ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a5aaaf9724d68809e57c688754c997d14e51d59
|
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version 3.9.0 Jul 11 2020)
## http://www.wxformbuilder.org/
##
## PLEASE DO *NOT* EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
###########################################################################
## Class TrackArcGeneratorParameterDialog
###########################################################################
class TrackArcGeneratorParameterDialog ( wx.Dialog ):
def __init__( self, parent ):
wx.Dialog.__init__ ( self, parent, id = wx.ID_ANY, title = wx.EmptyString, pos = wx.DefaultPosition, size = wx.Size( 190,196 ), style = wx.DEFAULT_DIALOG_STYLE )
self.SetSizeHints( -1, -1 )
bSizer1 = wx.BoxSizer( wx.VERTICAL )
self.label_radius = wx.StaticText( self, wx.ID_ANY, u"Arc radius in µm", wx.DefaultPosition, wx.DefaultSize, 0 )
self.label_radius.Wrap( -1 )
bSizer1.Add( self.label_radius, 0, wx.ALL, 5 )
self.input_radius = wx.TextCtrl( self, wx.ID_ANY, u"3000", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer1.Add( self.input_radius, 0, wx.ALL, 5 )
self.label_arc_segments = wx.StaticText( self, wx.ID_ANY, u"Track segments per circle", wx.DefaultPosition, wx.DefaultSize, 0 )
self.label_arc_segments.Wrap( -1 )
bSizer1.Add( self.label_arc_segments, 0, wx.ALL, 5 )
self.input_segments = wx.TextCtrl( self, wx.ID_ANY, u"60", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer1.Add( self.input_segments, 0, wx.ALL, 5 )
self.buttom_okay = wx.Button( self, wx.ID_OK, u"Okay", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer1.Add( self.buttom_okay, 0, wx.ALL, 5 )
self.SetSizer( bSizer1 )
self.Layout()
self.Centre( wx.BOTH )
def __del__( self ):
pass
|
py
|
1a5aab709d60e45ee883159b6627c0a3ccb06526
|
import numpy as np
from .customKF import CustomKF
class CustomRTS():
def __init__(self, z, del_t):
self.z = z
self.del_t = del_t
def run(self, initial_mean, initial_variance, Q, sigma_square):
# Forward batch filter
kf = CustomKF(Q, sigma_square)
prior_means, prior_variances, post_means, post_variances = kf.batch_filter(initial_mean, initial_variance, self.z, self.del_t)
num_samples = len(self.z)
# Smoother
S = [0 for _ in range(num_samples)]
smoothed_means = [0 for _ in range(num_samples)]
smoothed_variances = [0 for _ in range(num_samples)]
smoothed_means[-1] = post_means[-1]
smoothed_variances[-1] = post_variances[-1]
for j in range(num_samples - 2, -1, -1):
S[j] = post_variances[j] * (1 / prior_variances[j+1])
smoothed_means[j] = post_means[j] + S[j] * (smoothed_means[j+1] - prior_means[j+1])
smoothed_variances[j] = post_variances[j] + S[j] * S[j] * (smoothed_variances[j+1] - prior_variances[j+1])
K = (self.del_t[-1] ** 2) * prior_variances[-1] + (sigma_square ** 2) * self.del_t[-1]
M = [0 for _ in range(num_samples)]
M[-1] = (1 - K * self.del_t[-1]) * post_variances[-2]
for j in range(num_samples-2, 0, -1):
M[j] = post_variances[j] * S[j-1] + S[j] * (M[j+1] - post_variances[j]) * S[j-1]
expected_η = [0 for _ in range(num_samples)]
expected_η_square = [0 for _ in range(num_samples)]
expected_η_η_1 = [0 for _ in range(num_samples)]
for j in range(num_samples-1, -1, -1):
expected_η[j] = smoothed_means[j]
expected_η_square[j] = smoothed_means[j]**2 + smoothed_variances[j]
if j != 0:
expected_η_η_1[j] = smoothed_means[j] * smoothed_means[j-1] + M[j]
return expected_η, expected_η_square, expected_η_η_1
|
py
|
1a5aabb0da00fa189f3728394147eda7f38d9928
|
from setuptools import find_packages, setup
with open("README.md", "r") as f:
README = f.read()
setup(
name='yappa',
version='0.4.19',
url='https://github.com/turokg/yappa',
description='Easy serverless deploy of python web applications',
long_description_content_type="text/markdown",
long_description=README,
author='Egor Korovin',
author_email='[email protected]',
packages=find_packages(),
install_requires=[
'boto3>=1.10',
'click>=8.0',
'httpx>=0.18',
'yandexcloud>=0.102.1',
'boltons>=21.0',
'idna<3,>=2.5',
"PyYAML>=5.0",
"furl>=2.0",
"pytz>=2021"
],
python_requires='>=3.8.0',
entry_points={'console_scripts': ['yappa = yappa.cli:cli']},
license="MIT",
package_data={'yappa': ['*.yaml']},
include_package_data=True,
)
|
py
|
1a5aabcc14149b226b9c132430ee8c7b919dd539
|
"""Support for Xiaomi Mi Air Quality Monitor (PM2.5)."""
import logging
from miio import AirQualityMonitor, DeviceException
import voluptuous as vol
from homeassistant.components.air_quality import PLATFORM_SCHEMA, AirQualityEntity
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_TOKEN
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_DEVICE,
CONF_FLOW_TYPE,
CONF_MODEL,
DOMAIN,
MODEL_AIRQUALITYMONITOR_B1,
MODEL_AIRQUALITYMONITOR_S1,
MODEL_AIRQUALITYMONITOR_V1,
)
from .device import XiaomiMiioEntity
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Xiaomi Miio Air Quality Monitor"
ATTR_CO2E = "carbon_dioxide_equivalent"
ATTR_TVOC = "total_volatile_organic_compounds"
ATTR_TEMP = "temperature"
ATTR_HUM = "humidity"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
PROP_TO_ATTR = {
"carbon_dioxide_equivalent": ATTR_CO2E,
"total_volatile_organic_compounds": ATTR_TVOC,
"temperature": ATTR_TEMP,
"humidity": ATTR_HUM,
}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Import Miio configuration from YAML."""
_LOGGER.warning(
"Loading Xiaomi Miio Air Quality via platform setup is deprecated. "
"Please remove it from your configuration"
)
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=config,
)
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Xiaomi Air Quality from a config entry."""
entities = []
if config_entry.data[CONF_FLOW_TYPE] == CONF_DEVICE:
host = config_entry.data[CONF_HOST]
token = config_entry.data[CONF_TOKEN]
name = config_entry.title
model = config_entry.data[CONF_MODEL]
unique_id = config_entry.unique_id
_LOGGER.debug("Initializing with host %s (token %s...)", host, token[:5])
device = AirQualityMonitor(host, token, model=model)
if model == MODEL_AIRQUALITYMONITOR_S1:
entities.append(AirMonitorS1(name, device, config_entry, unique_id))
elif model == MODEL_AIRQUALITYMONITOR_B1:
entities.append(AirMonitorB1(name, device, config_entry, unique_id))
elif model == MODEL_AIRQUALITYMONITOR_V1:
entities.append(AirMonitorV1(name, device, config_entry, unique_id))
else:
_LOGGER.warning("AirQualityMonitor model '%s' is not yet supported", model)
async_add_entities(entities, update_before_add=True)
class AirMonitorB1(XiaomiMiioEntity, AirQualityEntity):
"""Air Quality class for Xiaomi cgllc.airmonitor.b1 device."""
def __init__(self, name, device, entry, unique_id):
"""Initialize the entity."""
super().__init__(name, device, entry, unique_id)
self._icon = "mdi:cloud"
self._available = None
self._air_quality_index = None
self._carbon_dioxide = None
self._carbon_dioxide_equivalent = None
self._particulate_matter_2_5 = None
self._total_volatile_organic_compounds = None
self._temperature = None
self._humidity = None
async def async_update(self):
"""Fetch state from the miio device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
_LOGGER.debug("Got new state: %s", state)
self._carbon_dioxide_equivalent = state.co2e
self._particulate_matter_2_5 = round(state.pm25, 1)
self._total_volatile_organic_compounds = round(state.tvoc, 3)
self._temperature = round(state.temperature, 2)
self._humidity = round(state.humidity, 2)
self._available = True
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
@property
def icon(self):
"""Return the icon to use for device if any."""
return self._icon
@property
def available(self):
"""Return true when state is known."""
return self._available
@property
def air_quality_index(self):
"""Return the Air Quality Index (AQI)."""
return self._air_quality_index
@property
def carbon_dioxide(self):
"""Return the CO2 (carbon dioxide) level."""
return self._carbon_dioxide
@property
def carbon_dioxide_equivalent(self):
"""Return the CO2e (carbon dioxide equivalent) level."""
return self._carbon_dioxide_equivalent
@property
def particulate_matter_2_5(self):
"""Return the particulate matter 2.5 level."""
return self._particulate_matter_2_5
@property
def total_volatile_organic_compounds(self):
"""Return the total volatile organic compounds."""
return self._total_volatile_organic_compounds
@property
def temperature(self):
"""Return the current temperature."""
return self._temperature
@property
def humidity(self):
"""Return the current humidity."""
return self._humidity
@property
def extra_state_attributes(self):
"""Return the state attributes."""
data = {}
for prop, attr in PROP_TO_ATTR.items():
value = getattr(self, prop)
if value is not None:
data[attr] = value
return data
class AirMonitorS1(AirMonitorB1):
"""Air Quality class for Xiaomi cgllc.airmonitor.s1 device."""
async def async_update(self):
"""Fetch state from the miio device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
_LOGGER.debug("Got new state: %s", state)
self._carbon_dioxide = state.co2
self._particulate_matter_2_5 = state.pm25
self._total_volatile_organic_compounds = state.tvoc
self._temperature = state.temperature
self._humidity = state.humidity
self._available = True
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
class AirMonitorV1(AirMonitorB1):
"""Air Quality class for Xiaomi cgllc.airmonitor.s1 device."""
async def async_update(self):
"""Fetch state from the miio device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
_LOGGER.debug("Got new state: %s", state)
self._air_quality_index = state.aqi
self._available = True
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return None
|
py
|
1a5aac6f527e8c460158a7b745bb61c209ef5f13
|
from django.conf import settings
MEDIA_SERVER_HOST = getattr(settings, "MEDIA_SERVER_HOST", "")
MEDIA_SERVER_USER = getattr(settings, "MEDIA_SERVER_USER", "")
MEDIA_SERVER_PASSWORD = getattr(settings, "MEDIA_SERVER_PASSWORD", "")
MEDIA_SERVER_PORT = getattr(settings, "MEDIA_SERVER_PORT", 22)
MEDIA_SERVER_VIDEO_BUCKET = getattr(settings, "MEDIA_SERVER_VIDEO_BUCKET", "")
MEDIA_SERVER_AUDIO_BUCKET = getattr(settings, "MEDIA_SERVER_AUDIO_BUCKET", "")
MEDIA_SERVER_AUDIO_PATH = getattr(settings, "MEDIA_SERVER_AUDIO_PATH", "")
MEDIA_SERVER_VIDEO_PATH = getattr(settings, "MEDIA_SERVER_VIDEO_PATH", "")
MULTIMEDIA_NOTIFICATION_EMAIL = getattr(settings, "MULTIMEDIA_NOTIFICATION_EMAIL", "")
DEFAULT_VIDEO_PROFILES = {
'f4v': {
'encode_cmd': 'ffmpeg -y -i "%(input)s" -f mp4 -acodec libfaac -ab 128k -vcodec libx264 -vpre slow -b 690k -ac 1 -s 620x350 -r 30 "%(output)s"',
'encode':True,
'name':'Flash Video',
'container':'f4v',
'thumbnail_cmd': 'ffmpeg -y -itsoffset -%(offset)s -i "%(input)s" -vcodec mjpeg -vframes 1 -an -f rawvideo -s 620x350 "%(output)s"'
},
}
MULTIMEDIA_VIDEO_PROFILES = getattr(settings, "MULTIMEDIA_VIDEO_PROFILES", DEFAULT_VIDEO_PROFILES)
DEFAULT_AUDIO_PROFILES = {
'audio': {
'encode_cmd': 'ffmpeg -y -i "%(input)s" "%(output)s"',
'encode':True,
'name':'MP3 Audio',
'container':'mp3',
},
}
MULTIMEDIA_AUDIO_PROFILES = getattr(settings, "MULTIMEDIA_AUDIO_PROFILES", DEFAULT_AUDIO_PROFILES)
MULTIMEDIA_APP_LABLEL = getattr(settings, "MULTIMEDIA_APP_LABEL", "Multimedia")
|
py
|
1a5aada11669d48941521eabb1cbab7b66bb228f
|
from cpf_cnpj import Documento
from TelefonesBr import TelefonesBr
from datasbr import DatasBr
from acesso_cep import BuscaEndereco
import requests
exemplo_cpf = "94561576010"
exemplo_cnpj = "35379838000112"
telefone = "11976453329"
cep = "01001000"
cpf_um = Documento.cria_documento(exemplo_cpf)
cnpj_um = Documento.cria_documento(exemplo_cnpj)
telefone_um = TelefonesBr(telefone)
hora_cadastro = DatasBr()
tempo_cadastro = hora_cadastro.tempo_cadastro()
objeto_cep = BuscaEndereco(cep)
logradouro, bairro, cidade, uf = objeto_cep.acessa_via_cep()
print(f'''
CPF: {cpf_um}
CNPJ: {cnpj_um}
Telefone: {telefone_um}'
Hora do Cadastro: {hora_cadastro}
Tempo de Cadastro: {tempo_cadastro}
Dados Cadastrais:
Rua: {logradouro}
Bairro: {bairro}
Cidade: {cidade}
Uf: {uf}
''')
|
py
|
1a5aaea380cfa1f9056d8842a1b9c81dd45b730a
|
# Copyright 2015 CloudByte Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import uuid
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import units
import six
from six.moves import http_client
from six.moves import urllib
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder.volume.drivers.cloudbyte import options
from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
class CloudByteISCSIDriver(san.SanISCSIDriver):
"""CloudByte ISCSI Driver.
Version history:
1.0.0 - Initial driver
1.1.0 - Add chap support and minor bug fixes
1.1.1 - Add wait logic for delete volumes
1.1.2 - Update ig to None before delete volume
1.2.0 - Add retype support
"""
VERSION = '1.2.0'
volume_stats = {}
def __init__(self, *args, **kwargs):
super(CloudByteISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(
options.cloudbyte_add_qosgroup_opts)
self.configuration.append_config_values(
options.cloudbyte_create_volume_opts)
self.configuration.append_config_values(
options.cloudbyte_update_volume_opts)
self.configuration.append_config_values(
options.cloudbyte_connection_opts)
self.cb_use_chap = self.configuration.use_chap_auth
self.get_volume_stats()
def _get_url(self, cmd, params, apikey):
"""Will prepare URL that connects to CloudByte."""
if params is None:
params = {}
params['command'] = cmd
params['response'] = 'json'
sanitized_params = {}
for key in params:
value = params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.parse.urlencode(sanitized_params)
url = ('/client/api?%s' % sanitized_params)
LOG.debug("CloudByte URL to be executed: [%s].", url)
# Add the apikey
api = {}
api['apiKey'] = apikey
url = url + '&' + urllib.parse.urlencode(api)
return url
def _extract_http_error(self, error_data):
# Extract the error message from error_data
error_msg = ""
# error_data is a single key value dict
for key, value in error_data.items():
error_msg = value.get('errortext')
return error_msg
def _execute_and_get_response_details(self, host, url):
"""Will prepare response after executing an http request."""
res_details = {}
try:
# Prepare the connection
connection = http_client.HTTPSConnection(host)
# Make the connection
connection.request('GET', url)
# Extract the response as the connection was successful
response = connection.getresponse()
# Read the response
data = response.read()
# Transform the json string into a py object
data = json.loads(data)
# Extract http error msg if any
error_details = None
if response.status != 200:
error_details = self._extract_http_error(data)
# Prepare the return object
res_details['data'] = data
res_details['error'] = error_details
res_details['http_status'] = response.status
finally:
connection.close()
LOG.debug("CloudByte connection was closed successfully.")
return res_details
def _api_request_for_cloudbyte(self, cmd, params, version=None):
"""Make http calls to CloudByte."""
LOG.debug("Executing CloudByte API for command [%s].", cmd)
if version is None:
version = CloudByteISCSIDriver.VERSION
# Below is retrieved from /etc/cinder/cinder.conf
apikey = self.configuration.cb_apikey
if apikey is None:
msg = (_("API key is missing for CloudByte driver."))
raise exception.VolumeBackendAPIException(data=msg)
host = self.configuration.san_ip
# Construct the CloudByte URL with query params
url = self._get_url(cmd, params, apikey)
data = {}
error_details = None
http_status = None
try:
# Execute CloudByte API & frame the response
res_obj = self._execute_and_get_response_details(host, url)
data = res_obj['data']
error_details = res_obj['error']
http_status = res_obj['http_status']
except http_client.HTTPException as ex:
msg = (_("Error executing CloudByte API [%(cmd)s], "
"Error: %(err)s.") %
{'cmd': cmd, 'err': ex})
raise exception.VolumeBackendAPIException(data=msg)
# Check if it was an error response from CloudByte
if http_status != 200:
msg = (_("Failed to execute CloudByte API [%(cmd)s]."
" Http status: %(status)s,"
" Error: %(error)s.") %
{'cmd': cmd, 'status': http_status,
'error': error_details})
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("CloudByte API executed successfully for command [%s]."),
cmd)
return data
def _request_tsm_details(self, account_id):
params = {"accountid": account_id}
# List all CloudByte tsm
data = self._api_request_for_cloudbyte("listTsm", params)
return data
def _add_qos_group_request(self, volume, tsmid, volume_name,
qos_group_params):
# Prepare the user input params
params = {
"name": "QoS_" + volume_name,
"tsmid": tsmid
}
# Get qos related params from configuration
params.update(self.configuration.cb_add_qosgroup)
# Override the default configuration by qos specs
if qos_group_params:
params.update(qos_group_params)
data = self._api_request_for_cloudbyte("addQosGroup", params)
return data
def _create_volume_request(self, volume, datasetid, qosgroupid,
tsmid, volume_name, file_system_params):
size = volume.get('size')
quotasize = six.text_type(size) + "G"
# Prepare the user input params
params = {
"datasetid": datasetid,
"name": volume_name,
"qosgroupid": qosgroupid,
"tsmid": tsmid,
"quotasize": quotasize
}
# Get the additional params from configuration
params.update(self.configuration.cb_create_volume)
# Override the default configuration by qos specs
if file_system_params:
params.update(file_system_params)
data = self._api_request_for_cloudbyte("createVolume", params)
return data
def _queryAsyncJobResult_request(self, jobid):
async_cmd = "queryAsyncJobResult"
params = {
"jobId": jobid,
}
data = self._api_request_for_cloudbyte(async_cmd, params)
return data
def _get_tsm_details(self, data, tsm_name, account_name):
# Filter required tsm's details
tsms = data['listTsmResponse'].get('listTsm')
if tsms is None:
msg = (_("TSM [%(tsm)s] was not found in CloudByte storage "
"for account [%(account)s].") %
{'tsm': tsm_name, 'account': account_name})
raise exception.VolumeBackendAPIException(data=msg)
tsmdetails = {}
for tsm in tsms:
if tsm['name'] == tsm_name:
tsmdetails['datasetid'] = tsm['datasetid']
tsmdetails['tsmid'] = tsm['id']
break
return tsmdetails
def _retry_volume_operation(self, operation, retries,
max_retries, jobid,
cb_volume):
"""CloudByte async calls via the FixedIntervalLoopingCall."""
# Query the CloudByte storage with this jobid
volume_response = self._queryAsyncJobResult_request(jobid)
count = retries['count']
result_res = None
if volume_response is not None:
result_res = volume_response.get('queryasyncjobresultresponse')
if result_res is None:
msg = (_(
"Null response received while querying "
"for [%(operation)s] based job [%(job)s] "
"at CloudByte storage.") %
{'operation': operation, 'job': jobid})
raise exception.VolumeBackendAPIException(data=msg)
status = result_res.get('jobstatus')
if status == 1:
LOG.info(_LI("CloudByte operation [%(operation)s] succeeded for "
"volume [%(cb_volume)s]."),
{'operation': operation, 'cb_volume': cb_volume})
raise loopingcall.LoopingCallDone()
elif status == 2:
job_result = result_res.get("jobresult")
err_msg = job_result.get("errortext")
err_code = job_result.get("errorcode")
msg = (_(
"Error in Operation [%(operation)s] "
"for volume [%(cb_volume)s] in CloudByte "
"storage: [%(cb_error)s], "
"error code: [%(error_code)s]."),
{'cb_error': err_msg,
'error_code': err_code,
'cb_volume': cb_volume,
'operation': operation})
raise exception.VolumeBackendAPIException(data=msg)
elif count == max_retries:
# All attempts exhausted
LOG.error(_LE("CloudByte operation [%(operation)s] failed"
" for volume [%(vol)s]. Exhausted all"
" [%(max)s] attempts."),
{'operation': operation,
'vol': cb_volume,
'max': max_retries})
raise loopingcall.LoopingCallDone(retvalue=False)
else:
count += 1
retries['count'] = count
LOG.debug("CloudByte operation [%(operation)s] for"
" volume [%(vol)s]: retry [%(retry)s] of [%(max)s].",
{'operation': operation,
'vol': cb_volume,
'retry': count,
'max': max_retries})
def _wait_for_volume_creation(self, volume_response, cb_volume_name):
"""Given the job wait for it to complete."""
vol_res = volume_response.get('createvolumeresponse')
if vol_res is None:
msg = _("Null response received while creating volume [%s] "
"at CloudByte storage.") % cb_volume_name
raise exception.VolumeBackendAPIException(data=msg)
jobid = vol_res.get('jobid')
if jobid is None:
msg = _("Job id not found in CloudByte's "
"create volume [%s] response.") % cb_volume_name
raise exception.VolumeBackendAPIException(data=msg)
retry_interval = (
self.configuration.cb_confirm_volume_create_retry_interval)
max_retries = (
self.configuration.cb_confirm_volume_create_retries)
retries = {'count': 0}
timer = loopingcall.FixedIntervalLoopingCall(
self._retry_volume_operation,
'Create Volume',
retries,
max_retries,
jobid,
cb_volume_name)
timer.start(interval=retry_interval).wait()
def _wait_for_volume_deletion(self, volume_response, cb_volume_id):
"""Given the job wait for it to complete."""
vol_res = volume_response.get('deleteFileSystemResponse')
if vol_res is None:
msg = _("Null response received while deleting volume [%s] "
"at CloudByte storage.") % cb_volume_id
raise exception.VolumeBackendAPIException(data=msg)
jobid = vol_res.get('jobid')
if jobid is None:
msg = _("Job id not found in CloudByte's "
"delete volume [%s] response.") % cb_volume_id
raise exception.VolumeBackendAPIException(data=msg)
retry_interval = (
self.configuration.cb_confirm_volume_delete_retry_interval)
max_retries = (
self.configuration.cb_confirm_volume_delete_retries)
retries = {'count': 0}
timer = loopingcall.FixedIntervalLoopingCall(
self._retry_volume_operation,
'Delete Volume',
retries,
max_retries,
jobid,
cb_volume_id)
timer.start(interval=retry_interval).wait()
def _get_volume_id_from_response(self, cb_volumes, volume_name):
"""Search the volume in CloudByte storage."""
vol_res = cb_volumes.get('listFilesystemResponse')
if vol_res is None:
msg = _("Null response received from CloudByte's "
"list filesystem.")
raise exception.VolumeBackendAPIException(data=msg)
volumes = vol_res.get('filesystem')
if volumes is None:
msg = _('No volumes found in CloudByte storage.')
raise exception.VolumeBackendAPIException(data=msg)
volume_id = None
for vol in volumes:
if vol['name'] == volume_name:
volume_id = vol['id']
break
if volume_id is None:
msg = _("Volume [%s] not found in CloudByte "
"storage.") % volume_name
raise exception.VolumeBackendAPIException(data=msg)
return volume_id
def _get_qosgroupid_id_from_response(self, cb_volumes, volume_id):
volumes = cb_volumes['listFilesystemResponse']['filesystem']
qosgroup_id = None
for vol in volumes:
if vol['id'] == volume_id:
qosgroup_id = vol['groupid']
break
return qosgroup_id
def _build_provider_details_from_volume(self, volume, chap):
model_update = {}
model_update['provider_location'] = (
'%s %s %s' % (volume['ipaddress'] + ':3260', volume['iqnname'], 0)
)
# Will provide CHAP Authentication on forthcoming patches/release
model_update['provider_auth'] = None
if chap:
model_update['provider_auth'] = ('CHAP %(username)s %(password)s'
% chap)
model_update['provider_id'] = volume['id']
LOG.debug("CloudByte volume iqn: [%(iqn)s] provider id: [%(proid)s].",
{'iqn': volume['iqnname'], 'proid': volume['id']})
return model_update
def _build_provider_details_from_response(self,
cb_volumes,
volume_name,
chap):
"""Get provider information."""
model_update = {}
volumes = cb_volumes['listFilesystemResponse']['filesystem']
for vol in volumes:
if vol['name'] == volume_name:
model_update = self._build_provider_details_from_volume(vol,
chap)
break
return model_update
def _get_initiator_group_id_from_response(self, data, filter):
"""Find iSCSI initiator group id."""
ig_list_res = data.get('listInitiatorsResponse')
if ig_list_res is None:
msg = _("Null response received from CloudByte's "
"list iscsi initiators.")
raise exception.VolumeBackendAPIException(data=msg)
ig_list = ig_list_res.get('initiator')
if ig_list is None:
msg = _('No iscsi initiators were found in CloudByte.')
raise exception.VolumeBackendAPIException(data=msg)
ig_id = None
for ig in ig_list:
if ig.get('initiatorgroup') == filter:
ig_id = ig['id']
break
return ig_id
def _get_iscsi_service_id_from_response(self, volume_id, data):
iscsi_service_res = data.get('listVolumeiSCSIServiceResponse')
if iscsi_service_res is None:
msg = _("Null response received from CloudByte's "
"list volume iscsi service.")
raise exception.VolumeBackendAPIException(data=msg)
iscsi_service_list = iscsi_service_res.get('iSCSIService')
if iscsi_service_list is None:
msg = _('No iscsi services found in CloudByte storage.')
raise exception.VolumeBackendAPIException(data=msg)
iscsi_id = None
for iscsi_service in iscsi_service_list:
if iscsi_service['volume_id'] == volume_id:
iscsi_id = iscsi_service['id']
break
if iscsi_id is None:
msg = _("No iscsi service found for CloudByte "
"volume [%s].") % volume_id
raise exception.VolumeBackendAPIException(data=msg)
else:
return iscsi_id
def _request_update_iscsi_service(self, iscsi_id, ig_id, ag_id):
params = {
"id": iscsi_id,
"igid": ig_id
}
if ag_id:
params['authgroupid'] = ag_id
params['authmethod'] = "CHAP"
self._api_request_for_cloudbyte(
'updateVolumeiSCSIService', params)
def _get_cb_snapshot_path(self, snapshot_name, volume_id):
"""Find CloudByte snapshot path."""
params = {"id": volume_id}
# List all snapshot from CloudByte
cb_snapshots_list = self._api_request_for_cloudbyte(
'listStorageSnapshots', params)
# Filter required snapshot from list
cb_snap_res = cb_snapshots_list.get('listDatasetSnapshotsResponse')
cb_snapshot = {}
if cb_snap_res is not None:
cb_snapshot = cb_snap_res.get('snapshot')
path = None
# Filter snapshot path
for snap in cb_snapshot:
if snap['name'] == snapshot_name:
path = snap['path']
break
return path
def _get_account_id_from_name(self, account_name):
params = {}
data = self._api_request_for_cloudbyte("listAccount", params)
accounts = data["listAccountResponse"]["account"]
account_id = None
for account in accounts:
if account.get("name") == account_name:
account_id = account.get("id")
break
if account_id is None:
msg = _("Failed to get CloudByte account details "
"for account [%s].") % account_name
raise exception.VolumeBackendAPIException(data=msg)
return account_id
def _search_volume_id(self, cb_volumes, cb_volume_id):
"""Search the volume in CloudByte."""
volumes_res = cb_volumes.get('listFilesystemResponse')
if volumes_res is None:
msg = _("No response was received from CloudByte's "
"list filesystem api call.")
raise exception.VolumeBackendAPIException(data=msg)
volumes = volumes_res.get('filesystem')
if volumes is None:
msg = _("No volume was found at CloudByte storage.")
raise exception.VolumeBackendAPIException(data=msg)
volume_id = None
for vol in volumes:
if vol['id'] == cb_volume_id:
volume_id = vol['id']
break
return volume_id
def _get_storage_info(self, tsmname):
"""Get CloudByte TSM that is associated with OpenStack backend."""
# List all TSMs from CloudByte storage
tsm_list = self._api_request_for_cloudbyte('listTsm', params={})
tsm_details_res = tsm_list.get('listTsmResponse')
if tsm_details_res is None:
msg = _("No response was received from CloudByte storage "
"list tsm API call.")
raise exception.VolumeBackendAPIException(data=msg)
tsm_details = tsm_details_res.get('listTsm')
data = {}
flag = 0
# Filter required TSM and get storage info
for tsms in tsm_details:
if tsms['name'] == tsmname:
flag = 1
data['total_capacity_gb'] = (
float(tsms['numericquota']) / units.Ki)
data['free_capacity_gb'] = (
float(tsms['availablequota']) / units.Ki)
break
# TSM not found in CloudByte storage
if flag == 0:
LOG.error(_LE("TSM [%s] not found in CloudByte storage."), tsmname)
data['total_capacity_gb'] = 0.0
data['free_capacity_gb'] = 0.0
return data
def _get_auth_group_id_from_response(self, data):
"""Find iSCSI auth group id."""
chap_group = self.configuration.cb_auth_group
ag_list_res = data.get('listiSCSIAuthGroupResponse')
if ag_list_res is None:
msg = _("Null response received from CloudByte's "
"list iscsi auth groups.")
raise exception.VolumeBackendAPIException(data=msg)
ag_list = ag_list_res.get('authgroup')
if ag_list is None:
msg = _('No iscsi auth groups were found in CloudByte.')
raise exception.VolumeBackendAPIException(data=msg)
ag_id = None
for ag in ag_list:
if ag.get('name') == chap_group:
ag_id = ag['id']
break
else:
msg = _("Auth group [%s] details not found in "
"CloudByte storage.") % chap_group
raise exception.VolumeBackendAPIException(data=msg)
return ag_id
def _get_auth_group_info(self, account_id, ag_id):
"""Fetch the auth group details."""
params = {"accountid": account_id, "authgroupid": ag_id}
auth_users = self._api_request_for_cloudbyte(
'listiSCSIAuthUser', params)
auth_user_details_res = auth_users.get('listiSCSIAuthUsersResponse')
if auth_user_details_res is None:
msg = _("No response was received from CloudByte storage "
"list iSCSI auth user API call.")
raise exception.VolumeBackendAPIException(data=msg)
auth_user_details = auth_user_details_res.get('authuser')
if auth_user_details is None:
msg = _("Auth user details not found in CloudByte storage.")
raise exception.VolumeBackendAPIException(data=msg)
chapuser = auth_user_details[0].get('chapusername')
chappassword = auth_user_details[0].get('chappassword')
if chapuser is None or chappassword is None:
msg = _("Invalid chap user details found in CloudByte storage.")
raise exception.VolumeBackendAPIException(data=msg)
data = {'username': chapuser, 'password': chappassword, 'ag_id': ag_id}
return data
def _get_chap_info(self, account_id):
"""Fetch the chap details."""
params = {"accountid": account_id}
iscsi_auth_data = self._api_request_for_cloudbyte(
'listiSCSIAuthGroup', params)
ag_id = self._get_auth_group_id_from_response(
iscsi_auth_data)
return self._get_auth_group_info(account_id, ag_id)
def _export(self):
model_update = {'provider_auth': None}
if self.cb_use_chap is True:
account_name = self.configuration.cb_account_name
account_id = self._get_account_id_from_name(account_name)
chap = self._get_chap_info(account_id)
model_update['provider_auth'] = ('CHAP %(username)s %(password)s'
% chap)
return model_update
def _update_initiator_group(self, volume_id, ig_name):
# Get account id of this account
account_name = self.configuration.cb_account_name
account_id = self._get_account_id_from_name(account_name)
# Fetch the initiator group ID
params = {"accountid": account_id}
iscsi_initiator_data = self._api_request_for_cloudbyte(
'listiSCSIInitiator', params)
# Filter the list of initiator groups with the name
ig_id = self._get_initiator_group_id_from_response(
iscsi_initiator_data, ig_name)
params = {"storageid": volume_id}
iscsi_service_data = self._api_request_for_cloudbyte(
'listVolumeiSCSIService', params)
iscsi_id = self._get_iscsi_service_id_from_response(
volume_id, iscsi_service_data)
# Update the iscsi service with above fetched iscsi_id
self._request_update_iscsi_service(iscsi_id, ig_id, None)
LOG.debug("CloudByte initiator group updated successfully for volume "
"[%(vol)s] with ig [%(ig)s].",
{'vol': volume_id,
'ig': ig_name})
def _get_qos_by_volume_type(self, ctxt, type_id):
"""Get the properties which can be QoS or file system related."""
update_qos_group_params = {}
update_file_system_params = {}
volume_type = volume_types.get_volume_type(ctxt, type_id)
qos_specs_id = volume_type.get('qos_specs_id')
extra_specs = volume_type.get('extra_specs')
if qos_specs_id is not None:
specs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
# Override extra specs with specs
# Hence specs will prefer QoS than extra specs
extra_specs.update(specs)
for key, value in extra_specs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
if key in self.configuration.cb_update_qos_group:
update_qos_group_params[key] = value
elif key in self.configuration.cb_update_file_system:
update_file_system_params[key] = value
return update_qos_group_params, update_file_system_params
def create_volume(self, volume):
qos_group_params = {}
file_system_params = {}
tsm_name = self.configuration.cb_tsm_name
account_name = self.configuration.cb_account_name
# Get account id of this account
account_id = self._get_account_id_from_name(account_name)
# Set backend storage volume name using OpenStack volume id
cb_volume_name = volume['id'].replace("-", "")
ctxt = context.get_admin_context()
type_id = volume['volume_type_id']
if type_id is not None:
qos_group_params, file_system_params = (
self._get_qos_by_volume_type(ctxt, type_id))
LOG.debug("Will create a volume [%(cb_vol)s] in TSM [%(tsm)s] "
"at CloudByte storage w.r.t "
"OpenStack volume [%(stack_vol)s].",
{'cb_vol': cb_volume_name,
'stack_vol': volume.get('id'),
'tsm': tsm_name})
tsm_data = self._request_tsm_details(account_id)
tsm_details = self._get_tsm_details(tsm_data, tsm_name, account_name)
# Send request to create a qos group before creating a volume
LOG.debug("Creating qos group for CloudByte volume [%s].",
cb_volume_name)
qos_data = self._add_qos_group_request(
volume, tsm_details.get('tsmid'), cb_volume_name, qos_group_params)
# Extract the qos group id from response
qosgroupid = qos_data['addqosgroupresponse']['qosgroup']['id']
LOG.debug("Successfully created qos group for CloudByte volume [%s].",
cb_volume_name)
# Send a create volume request to CloudByte API
vol_data = self._create_volume_request(
volume, tsm_details.get('datasetid'), qosgroupid,
tsm_details.get('tsmid'), cb_volume_name, file_system_params)
# Since create volume is an async call;
# need to confirm the creation before proceeding further
self._wait_for_volume_creation(vol_data, cb_volume_name)
# Fetch iscsi id
cb_volumes = self._api_request_for_cloudbyte(
'listFileSystem', params={})
volume_id = self._get_volume_id_from_response(cb_volumes,
cb_volume_name)
params = {"storageid": volume_id}
iscsi_service_data = self._api_request_for_cloudbyte(
'listVolumeiSCSIService', params)
iscsi_id = self._get_iscsi_service_id_from_response(
volume_id, iscsi_service_data)
# Fetch the initiator group ID
params = {"accountid": account_id}
iscsi_initiator_data = self._api_request_for_cloudbyte(
'listiSCSIInitiator', params)
ig_id = self._get_initiator_group_id_from_response(
iscsi_initiator_data, 'ALL')
LOG.debug("Updating iscsi service for CloudByte volume [%s].",
cb_volume_name)
ag_id = None
chap_info = {}
if self.cb_use_chap is True:
chap_info = self._get_chap_info(account_id)
ag_id = chap_info['ag_id']
# Update the iscsi service with above fetched iscsi_id & ig_id
self._request_update_iscsi_service(iscsi_id, ig_id, ag_id)
LOG.debug("CloudByte volume [%(vol)s] updated with "
"iscsi id [%(iscsi)s] and initiator group [%(ig)s] and "
"authentication group [%(ag)s].",
{'vol': cb_volume_name, 'iscsi': iscsi_id,
'ig': ig_id, 'ag': ag_id})
# Provide the model after successful completion of above steps
provider = self._build_provider_details_from_response(
cb_volumes, cb_volume_name, chap_info)
LOG.info(_LI("Successfully created a CloudByte volume [%(cb_vol)s] "
"w.r.t OpenStack volume [%(stack_vol)s]."),
{'cb_vol': cb_volume_name, 'stack_vol': volume.get('id')})
return provider
def delete_volume(self, volume):
params = {}
# OpenStack source volume id
source_volume_id = volume['id']
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = volume.get('provider_id')
LOG.debug("Will delete CloudByte volume [%(cb_vol)s] "
"w.r.t OpenStack volume [%(stack_vol)s].",
{'cb_vol': cb_volume_id, 'stack_vol': source_volume_id})
# Delete volume at CloudByte
if cb_volume_id is not None:
cb_volumes = self._api_request_for_cloudbyte(
'listFileSystem', params)
# Search cb_volume_id in CloudByte volumes
# incase it has already been deleted from CloudByte
cb_volume_id = self._search_volume_id(cb_volumes, cb_volume_id)
# Delete volume at CloudByte
if cb_volume_id is not None:
# Need to set the initiator group to None before deleting
self._update_initiator_group(cb_volume_id, 'None')
params = {"id": cb_volume_id}
del_res = self._api_request_for_cloudbyte('deleteFileSystem',
params)
self._wait_for_volume_deletion(del_res, cb_volume_id)
LOG.info(
_LI("Successfully deleted volume [%(cb_vol)s] "
"at CloudByte corresponding to "
"OpenStack volume [%(stack_vol)s]."),
{'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
else:
LOG.error(_LE("CloudByte does not have a volume corresponding "
"to OpenStack volume [%s]."), source_volume_id)
else:
LOG.error(_LE("CloudByte volume information not available for"
" OpenStack volume [%s]."), source_volume_id)
def create_snapshot(self, snapshot):
"""Creates a snapshot at CloudByte."""
# OpenStack volume
source_volume_id = snapshot['volume_id']
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = snapshot.get('volume').get('provider_id')
if cb_volume_id is not None:
# Set backend storage snapshot name using OpenStack snapshot id
snapshot_name = "snap_" + snapshot['id'].replace("-", "")
params = {
"name": snapshot_name,
"id": cb_volume_id
}
LOG.debug(
"Will create CloudByte snapshot [%(cb_snap)s] "
"w.r.t CloudByte volume [%(cb_vol)s] "
"and OpenStack volume [%(stack_vol)s].",
{'cb_snap': snapshot_name,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
self._api_request_for_cloudbyte('createStorageSnapshot', params)
# Get the snapshot path from CloudByte
path = self._get_cb_snapshot_path(snapshot_name, cb_volume_id)
LOG.info(
_LI("Created CloudByte snapshot [%(cb_snap)s] "
"w.r.t CloudByte volume [%(cb_vol)s] "
"and OpenStack volume [%(stack_vol)s]."),
{'cb_snap': path,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
model_update = {}
# Store snapshot path as snapshot provider_id
model_update['provider_id'] = path
else:
msg = _("Failed to create snapshot. CloudByte volume information "
"not found for OpenStack volume [%s].") % source_volume_id
raise exception.VolumeBackendAPIException(data=msg)
return model_update
def create_cloned_volume(self, cloned_volume, src_volume):
"""Create a clone of an existing volume.
First it will create a snapshot of the source/parent volume,
then it creates a clone of this newly created snapshot.
"""
# Extract necessary information from input params
parent_volume_id = src_volume.get('id')
# Generating id for snapshot
# as this is not user entered in this particular usecase
snapshot_id = six.text_type(uuid.uuid1())
# Prepare the params for create_snapshot
# as well as create_volume_from_snapshot method
snapshot_params = {
'id': snapshot_id,
'volume_id': parent_volume_id,
'volume': src_volume,
}
# Create a snapshot
snapshot = self.create_snapshot(snapshot_params)
snapshot_params['provider_id'] = snapshot.get('provider_id')
# Create a clone of above snapshot
return self.create_volume_from_snapshot(cloned_volume, snapshot_params)
def create_volume_from_snapshot(self, cloned_volume, snapshot):
"""Create a clone from an existing snapshot."""
# Getting necessary data from input params
parent_volume_id = snapshot['volume_id']
cloned_volume_name = cloned_volume['id'].replace("-", "")
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = snapshot.get('volume').get('provider_id')
# CloudByte snapshot path equals OpenStack snapshot's provider_id
cb_snapshot_path = snapshot['provider_id']
params = {
"id": cb_volume_id,
"clonename": cloned_volume_name,
"path": cb_snapshot_path
}
LOG.debug(
"Will create CloudByte clone [%(cb_clone)s] "
"at CloudByte snapshot path [%(cb_snap)s] "
"w.r.t parent OpenStack volume [%(stack_vol)s].",
{'cb_clone': cloned_volume_name,
'cb_snap': cb_snapshot_path,
'stack_vol': parent_volume_id})
# Create clone of the snapshot
clone_dataset_snapshot_res = (
self._api_request_for_cloudbyte('cloneDatasetSnapshot', params))
cb_snap = clone_dataset_snapshot_res.get('cloneDatasetSnapshot')
cb_vol = {}
if cb_snap is not None:
cb_vol = cb_snap.get('filesystem')
else:
msg = ("Error: Clone creation failed for "
"OpenStack volume [%(vol)s] with CloudByte "
"snapshot path [%(path)s]" %
{'vol': parent_volume_id, 'path': cb_snapshot_path})
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(
_LI("Created a clone [%(cb_clone)s] "
"at CloudByte snapshot path [%(cb_snap)s] "
"w.r.t parent OpenStack volume [%(stack_vol)s]."),
{'cb_clone': cloned_volume_name,
'cb_snap': cb_snapshot_path,
'stack_vol': parent_volume_id})
chap_info = {}
if self.cb_use_chap is True:
account_name = self.configuration.cb_account_name
# Get account id of this account
account_id = self._get_account_id_from_name(account_name)
chap_info = self._get_chap_info(account_id)
model_update = self._build_provider_details_from_volume(cb_vol,
chap_info)
return model_update
def delete_snapshot(self, snapshot):
"""Delete a snapshot at CloudByte."""
# Find volume id
source_volume_id = snapshot['volume_id']
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = snapshot.get('volume').get('provider_id')
# CloudByte snapshot path equals OpenStack snapshot's provider_id
cb_snapshot_path = snapshot['provider_id']
# If cb_snapshot_path is 'None'
# then no need to execute CloudByte API
if cb_snapshot_path is not None:
params = {
"id": cb_volume_id,
"path": cb_snapshot_path
}
LOG.debug("Will delete CloudByte snapshot [%(snap)s] w.r.t "
"parent CloudByte volume [%(cb_vol)s] "
"and parent OpenStack volume [%(stack_vol)s].",
{'snap': cb_snapshot_path,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
# Execute CloudByte API
self._api_request_for_cloudbyte('deleteSnapshot', params)
LOG.info(
_LI("Deleted CloudByte snapshot [%(snap)s] w.r.t "
"parent CloudByte volume [%(cb_vol)s] "
"and parent OpenStack volume [%(stack_vol)s]."),
{'snap': cb_snapshot_path,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
else:
LOG.error(_LE("CloudByte snapshot information is not available"
" for OpenStack volume [%s]."), source_volume_id)
def extend_volume(self, volume, new_size):
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = volume.get('provider_id')
params = {
"id": cb_volume_id,
"quotasize": six.text_type(new_size) + 'G'
}
# Request the CloudByte api to update the volume
self._api_request_for_cloudbyte('updateFileSystem', params)
def create_export(self, context, volume, connector):
"""Setup the iscsi export info."""
return self._export()
def ensure_export(self, context, volume):
"""Verify the iscsi export info."""
return self._export()
def get_volume_stats(self, refresh=False):
"""Get volume statistics.
If 'refresh' is True, update/refresh the statistics first.
"""
if refresh:
# Get the TSM name from configuration
tsm_name = self.configuration.cb_tsm_name
# Get the storage details of this TSM
data = self._get_storage_info(tsm_name)
data["volume_backend_name"] = (
self.configuration.safe_get('volume_backend_name') or
'CloudByte')
data["vendor_name"] = 'CloudByte'
data['reserved_percentage'] = 0
data["driver_version"] = CloudByteISCSIDriver.VERSION
data["storage_protocol"] = 'iSCSI'
LOG.debug("CloudByte driver stats: [%s].", data)
# Set this to the instance variable
self.volume_stats = data
return self.volume_stats
def retype(self, ctxt, volume, new_type, diff, host):
"""Retypes a volume, QoS and file system update is only done."""
cb_volume_id = volume.get('provider_id')
if cb_volume_id is None:
message = _("Provider information w.r.t CloudByte storage "
"was not found for OpenStack "
"volume [%s].") % volume['id']
raise exception.VolumeBackendAPIException(message)
update_qos_group_params, update_file_system_params = (
self._get_qos_by_volume_type(ctxt, new_type['id']))
if update_qos_group_params:
list_file_sys_params = {'id': cb_volume_id}
response = self._api_request_for_cloudbyte(
'listFileSystem', list_file_sys_params)
response = response['listFilesystemResponse']
cb_volume_list = response['filesystem']
cb_volume = cb_volume_list[0]
if not cb_volume:
msg = (_("Volume [%(cb_vol)s] was not found at "
"CloudByte storage corresponding to OpenStack "
"volume [%(ops_vol)s].") %
{'cb_vol': cb_volume_id, 'ops_vol': volume['id']})
raise exception.VolumeBackendAPIException(data=msg)
update_qos_group_params['id'] = cb_volume.get('groupid')
self._api_request_for_cloudbyte(
'updateQosGroup', update_qos_group_params)
if update_file_system_params:
update_file_system_params['id'] = cb_volume_id
self._api_request_for_cloudbyte(
'updateFileSystem', update_file_system_params)
LOG.info(_LI("Successfully updated CloudByte volume [%(cb_vol)s] "
"corresponding to OpenStack volume [%(ops_vol)s]."),
{'cb_vol': cb_volume_id, 'ops_vol': volume['id']})
return True
|
py
|
1a5aaf2d4bcafb32043c8586b1b1ea67156474d7
|
from flask import Flask, render_template, request, redirect, url_for
from music_data_types import Artist, Song, Discography
app = Flask(__name__)
@app.route("/")
def index():
return render_template('index.html')
@app.route('/', methods=['POST'])
def render_page():
name = request.form['name']
option = request.form['radios']
if name:
if option == "Song":
return redirect(url_for('render_song', name=name))
else:
return redirect(url_for('render_artist', name=name))
@app.route('/song/<name>')
def render_song(name):
song = Song()
song.search_song(name)
song.get_mood()
song.get_keywords()
mood = song.mood
words = song.keywords
song_name = song.name
artist = song.artist
rating = song.rating
album = song.album
genre = song.genre
link = song.lyrics_link
return render_template("song.html", song_name=song_name, mood=mood, words=words, artist=artist, rating=rating,
album=album, genre=genre, link=link)
@app.route('/artist/<name>')
def render_artist(name):
artist = Artist(name)
disc = Discography(artist)
artist_name = artist.name
rating = artist.rating
genre = artist.genre
country = artist.country
words = disc.get_overall_keywords()
moods = disc.get_overall_mood()
songs_num = disc.songs_num
songs = disc.top_songs
link = artist.link
return render_template("artist.html", artist_name=artist_name, moods=moods, words=words, genre=genre, rating=rating,
country=country, link=link, songs_num=songs_num, songs=songs)
@app.errorhandler(500)
def internal_error(error):
return render_template('error.html')
@app.errorhandler(404)
def internal_error(error):
return render_template('error.html')
if __name__ == '__main__':
app.run()
|
py
|
1a5aafd54dd7276cc19ef3d473149abe339d46ee
|
from Bio.Seq import Seq
from Bio.SeqUtils import nt_search, GC, molecular_weight
from Bio import SeqIO
seqobj = Seq("ATCGATATATACGCGAT")
print(seqobj.translate(to_stop=True))
patron = Seq("ACG")
resultado = nt_search(str(seqobj), patron)
print(resultado)
print(GC(seqobj))
print(molecular_weight(seqobj))
# Ejercicio 1: ORFs
# 1. Se define la secuencia.
sequence = Seq("AGCCATGTAGCTAACTCAGGTTACATGGGGATGACCCCGCGACTTGGATTAGAGTCTCTTTTGGAATAAGCCTGAATGATCCGAGTAGCATCTCAG")
# 2. Se busca el codón de inicio.
inicio = Seq("ATG")
posicion = nt_search(str(sequence), inicio)
# 3. Se recorre la secuencia en busca de los codones de inicio. Se obtiene su secuencia.
for i in range(1, len(posicion)):
seq_prot = sequence[i:]
protein = seq_prot.translate(to_stop=True)
# CLASE 2 VERSIÓN 2
# 1. Guardar IDs de records en lista.
mala_calidad = []
umbral = 32
new = open("../docs/records.txt", "w")
# 2. Acceder a Phred_qualities, ir sacando el promedio de cada record.
for record in SeqIO.parse("../docs/sample.fastq", "fastq"):
promedio = sum(record.letter_annotations["phred_quality"]) / len(record.letter_annotations["phred_quality"])
# 2.1. Añadir ID de record si el promedio es menor al umbral de calidad.
if promedio < umbral:
mala_calidad.append((promedio, record.id))
# 2.2. Guardar records que sí superan el umbral.
if promedio > umbral:
new.write(record.id)
# 3. Imprimir la longitud de la lista de mala_calidad.
print(len(mala_calidad))
new.close()
# Gen Bank
'''
for gb_record in SeqIO.parse("../docs/aichi.gb", "genbank"):
print('ID', gb_record.id)
print('Secuencia', str(gb_record.seq)[0:30], '...')
print('Longitud', len(gb_record))
for annotation, value in gb_record.annotations.items():
print(annotation, value)
'''
# Ejercicio 4.
for gb_record in SeqIO.parse("../docs/virus.gb", "genbank"):
for annotation, value in gb_record.annotations.items():
print(annotation, value)
print(gb_record.annotations['organism'])
print(gb_record.annotations['sequence_version'])
print(gb_record.features[0].location)
# Ejercicio 5. Extraer país y fuente del aislado (source = 0 en features).
print(gb_record.features[0].qualifiers['isolation_source'])
print(gb_record.features[0].qualifiers['country'])
# Guardar inicio y final de la secuencia.
start = gb_record.features[1].location.nofuzzy_start
end = gb_record.features[1].location.nofuzzy_end
# Guardar secuencia dentro del inicio y el final.
nueva_seq = gb_record.seq[start:end]
# Traducir proteína.
protein = nueva_seq.translate()
print(protein)
# Imprimir datos del gen L.
print(gb_record.features[9].qualifiers['gene'])
start_L = gb_record.features[9].location.nofuzzy_start
end_L = gb_record.features[9].location.nofuzzy_end
sequence_L = gb_record.seq[start_L:end_L]
print(sequence_L)
rna_L = sequence_L.transcribe()
print(rna_L[0:5])
protein_L = sequence_L.translate()
print(protein_L)
number = len(gb_record.features) - 1
while number > -1:
if gb_record.features[number].qualifiers['gene'] == ['L']:
print(gb_record.features[number].qualifiers['gene'])
break
number -= 1
|
py
|
1a5ab06b9418a072e116f91c7de6ffcac61dd424
|
from __future__ import print_function
import json
import urllib
import boto3
import logging, logging.config
from botocore.client import Config
# Because Step Functions client uses long polling, read timeout has to be > 60 seconds
sfn_client_config = Config(connect_timeout=50, read_timeout=70)
sfn = boto3.client('stepfunctions', config=sfn_client_config)
sts = boto3.client('sts')
account_id = sts.get_caller_identity().get('Account')
region_name = boto3.session.Session().region_name
def load_log_config():
# Basic config. Replace with your own logging config if required
root = logging.getLogger()
root.setLevel(logging.INFO)
return root
def map_activity_arn(bucket, key):
# Map s3 key to activity ARN based on a convention
# Here, we simply map bucket name plus last element in the s3 object key (i.e. filename) to activity name
key_elements = [x.strip() for x in key.split('/')]
activity_name = '{}-{}'.format(bucket, key_elements[-1])
return 'arn:aws:states:{}:{}:activity:{}'.format(region_name, account_id, activity_name)
# Load logging config and create logger
logger = load_log_config()
def handler(event, context):
logger.info("Received event: " + json.dumps(event, indent=2))
# Get the object from the event and show its content type
bucket = event['Records'][0]['s3']['bucket']['name']
key = urllib.unquote_plus(event['Records'][0]['s3']['object']['key'].encode('utf8'))
# Based on a naming convention that maps s3 keys to activity ARNs, deduce the activity arn
sfn_activity_arn = map_activity_arn(bucket, key)
sfn_worker_name = 'on_s3_object_created'
try:
try:
response = sfn.get_activity_task(
activityArn=sfn_activity_arn,
workerName=sfn_worker_name
)
except Exception as e:
logger.critical(e.message)
logger.critical(
'Unrecoverable error invoking get_activity_task for {}.'.format(sfn_activity_arn))
raise
# Get the Task Token
sfn_task_token = response.get('taskToken', '')
logger.info('Sending "Task Succeeded" signal to Step Functions..')
# Build an output dict and format it as JSON
task_output_dict = {
'S3BucketName': bucket,
'S3Key': key,
'SFNActivityArn': sfn_activity_arn
}
task_output_json = json.dumps(task_output_dict)
sfn_resp = sfn.send_task_success(
taskToken=sfn_task_token,
output=task_output_json
)
except Exception as e:
logger.critical(e)
raise e
|
py
|
1a5ab0e9c7aba81b1a39541b85d4369f68c7850a
|
import math
def no_moving_vehicles(object_localizer_inference) -> bool:
no_movement_mdv_max_length = 3
for obstacle in object_localizer_inference:
if obstacle["label"] == "car" or obstacle["label"] == "bicycle":
mdv_length = math.sqrt(obstacle["mdv"][0]**2 + obstacle["mdv"][1]**2 + obstacle["mdv"][2]**2)
if mdv_length > no_movement_mdv_max_length:
return False
return True # if cars mdv is less than threshold, than no movement or little movement
|
py
|
1a5ab22fbcc59470d145d0c3fd491a1bf11d8328
|
from datetime import timedelta
from flask import Flask, redirect, render_template, request, url_for
import json
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.wsgi import WSGIContainer
from webpixels import PixelSet, RgbPixel
from webpixels.controller import ColorKinetics
app = Flask(__name__)
ioloop = IOLoop.instance()
config_file = None
channels = {}
pixels = {}
fixtures = {}
presets = {}
last_preset = None
def load_config(config_file):
with open(config_file) as f:
config = json.loads(f.read())
for name, controllerConfig in config['controllers'].items():
controllerType = controllerConfig['type']
if controllerType == 'ColorKinetics':
controller = ColorKinetics(name, controllerConfig['host'])
for channel in controller.channels:
channels[channel.get_name()] = channel
for name, pixelConfig in config['pixels'].items():
chan_set = [channels[channel] for channel in pixelConfig['channels']]
pixel = RgbPixel(name, *chan_set)
pixels[pixel.get_name()] = pixel
for name, fixtureConfig in config['fixtures'].items():
pixel_set = [pixels[pixel] for pixel in fixtureConfig['pixels']]
fixture = PixelSet(name, pixel_set)
fixtures[fixture.get_name()] = fixture
global all_pixel
all_pixel = PixelSet('all', pixels.values())
if 'presets' in config:
presets.update(config['presets'])
def save_config(config_file):
controller_set = set()
saved_controllers = {}
saved_pixels = {}
saved_fixtures = {}
for pixel in pixels.values():
controller_set.update(pixel.get_controllers())
saved_pixels[pixel.get_name()] = {
'channels': [
pixel.red.get_name(),
pixel.green.get_name(),
pixel.blue.get_name()
]
}
for fixture in fixtures.values():
saved_fixtures[fixture.get_name()] = {
'pixels': [subpixel.get_name() for subpixel in fixture.get_pixels()]
}
for controller in controller_set:
if isinstance(controller, ColorKinetics):
controller_type = "ColorKinetics"
saved_controllers[controller.get_name()] = {
'host': controller.host,
'type': controller_type
}
save_data = json.dumps({
'controllers': saved_controllers,
'pixels': saved_pixels,
'fixtures': saved_fixtures,
'presets': presets
}, sort_keys=True, indent=2, separators=(',', ': '))
with open(config_file, 'w') as f:
f.write(save_data)
def redirect_url():
return redirect(request.args.get('next') or \
request.referrer or \
url_for('index'))
fade_in_progress = False
def fade_step():
global fade_in_progress
need_more = False
controller_set = set()
for pixel in pixels.values():
if pixel.step():
need_more = True
controller_set.update(pixel.get_controllers())
for controller in controller_set:
controller.sync()
if need_more:
ioloop.add_timeout(timedelta(milliseconds=25), fade_step)
else:
fade_in_progress = False
def start_fade():
global fade_in_progress
if fade_in_progress:
return
fade_in_progress = True
fade_step()
@app.route('/', methods=['GET'])
def index():
fixture_list = []
for name, fixture in fixtures.items():
subpixels = [(pixel.get_name(), pixel.get_html_color())
for pixel in fixture.get_pixels()]
fixture_list.append((name, fixture.get_html_color(), subpixels))
fixture_list.sort(key=lambda fixture: fixture[0])
return render_template('index.html',
all=all_pixel.get_html_color(),
fixtures=fixture_list)
@app.route('/pixel/<name>', methods=['GET', 'POST'])
def pixel(name):
if name == 'all':
pixel = all_pixel
else:
pixel = fixtures.get(name)
if pixel is None:
pixel = pixels[name]
if request.method == 'POST':
return pixel_post(pixel)
else:
return pixel_get(pixel)
def pixel_post(pixel):
r = int(request.form['r'])
g = int(request.form['g'])
b = int(request.form['b'])
pixel.set_target(r, g, b)
start_fade()
return ""
def pixel_get(pixel):
r, g, b = pixel.get()
return render_template('pixel.html',
pixel=pixel.get_name(),
r=r, g=g, b=b)
@app.route('/presets', methods=['GET'])
def preset_list():
preset_list = list(presets.keys())
preset_list.sort()
return render_template('presets.html',
presets=preset_list,
last_preset=last_preset)
@app.route('/preset/save', methods=['POST'])
def preset_save():
preset = {}
for name, pixel in pixels.items():
preset[name] = pixel.get()
presets[request.form['name']] = preset
save_config(config_file)
global last_preset
last_preset = request.form['name']
return ""
@app.route('/preset/apply', methods=['POST'])
def preset_apply():
name = request.form['preset']
preset = presets[name]
for name, value in preset.items():
pixel = pixels[name]
pixel.set_target(*value)
start_fade()
global last_preset
last_preset = name
return ""
@app.route('/preset/delete', methods=['POST'])
def preset_delete():
del presets[request.form['name']]
save_config(config_file)
return ""
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print("Usage: python server.py config.json")
config_file = sys.argv[1]
load_config(config_file)
app.debug = True
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(80)
ioloop.start()
|
py
|
1a5ab24e547471b9f153643507ec4058aab9b92c
|
# -*- coding: utf-8 -*-
"""
Project: Psychophysics_exps
Creator: Miao
Create time: 2021-01-21 10:49
IDE: PyCharm
Introduction: Results exp1: deviation scores as a function of numerosity, separate for each numerosity
"""
import exp1_radial_display2
import sys
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from src.analysis.exp1_alignment_analysis import add_color_code_5levels, get_analysis_dataframe
from src.analysis.exp1_analysis import get_data_to_analysis
from src.commons.process_dataframe import keep_valid_columns, get_col_names, insert_new_col_from_two_cols, \
get_sub_df_according2col_value
from src.constants.exp1_constants import KEPT_COL_NAMES_STIMU_DF2, KEPT_COL_NAMES2
if __name__ == '__main__':
is_debug = True
# TODO set parameters
# read stimuli and data
PATH_DATA = "../data/exp1_rerun_data/"
FILENAME_DATA = "cleanedTotalData_fullinfo_v3.xlsx"
stimuli_to_merge = exp1_radial_display2.stimuli_df
data_to_merge = pd.read_excel(PATH_DATA + FILENAME_DATA)
# remove duplicated cols
stimuli_to_merge = keep_valid_columns(stimuli_to_merge, KEPT_COL_NAMES_STIMU_DF2)
# merge stimuli file with data
all_df = pd.merge(data_to_merge,
stimuli_to_merge,
how = "left",
on = ["index_stimuliInfo", "N_disk", "crowdingcons", "winsize"])
# %% preprocess
my_data = keep_valid_columns(all_df, KEPT_COL_NAMES2)
# color coded
insert_new_col_from_two_cols(my_data, "N_disk", "crowdingcons", "colorcode5levels", add_color_code_5levels)
# %% for each numerosity range
winsizes = [0.3, 0.4, 0.5, 0.6, 0.7]
# 5 df in a list
data_sep_ws = [get_sub_df_according2col_value(my_data, "winsize", winsize) for winsize in winsizes]
# transform each df: groupby
data_sep_ws_to_plot = [get_data_to_analysis(sub_df, "deviation_score", "N_disk", "participant_N", "crowdingcons", "colorcode5levels") for sub_df in data_sep_ws]
# %%
# ini plot
sns.set(style = "white", color_codes = True)
sns.set_style("ticks", {"xtick.major.size": 5, "ytick.major.size": 3})
# some parameters
x = "N_disk"
y = "deviation_score"
hue = "crowdingcons"
errwidth = 1
capsize = 0.01
alpha = 0.5
palette = ["royalblue", "orangered"]
ci = 68
# plot starts here
fig, axes = plt.subplots(2, 3, figsize = (13, 6), sharex = False, sharey = True)
axes = axes.ravel()
for i, ax in enumerate(axes):
if i < 5:
sns.barplot(x = x, y = y, data = data_sep_ws_to_plot[i], ax = ax, hue = hue, capsize = capsize, errwidth = errwidth, palette = palette, alpha = alpha, ci = ci)
# remove defalt legend
# ax.get_legend().set_visible(False)
if i == 1:
handles, labels = ax.get_legend_handles_labels()
labels = ["no-crowding", "crowding"]
ax.legend(handles, labels, loc = "best", fontsize = 12)
# set x,y label
if i < 4:
ax.set(xlabel = "", ylabel = "")
elif i == 4:
ax.set(xlabel = "Numerosity", ylabel = "")
ax.xaxis.label.set_size(20)
fig.text(0.08, 0.5, 'Deviation Score', va = 'center', rotation = 'vertical', fontsize = 20)
plt.show()
if is_debug:
col_names = get_col_names(stimuli_to_merge)
|
py
|
1a5ab3d6621a3be1193c48fd2ca805b39bef62c0
|
from gmaps import Gmaps
|
py
|
1a5ab5c9d389302670b82400090e3b3158f70f15
|
import gevent
import json
import unittest2
import base64
import os
import tempfile
import urllib2
from psdash.run import PsDashRunner
try:
import httplib
except ImportError:
# support for python 3
import http.client as httplib
class TestBasicAuth(unittest2.TestCase):
default_username = 'tester'
default_password = 'secret'
def setUp(self):
self.app = PsDashRunner().app
self.client = self.app.test_client()
def _enable_basic_auth(self, username, password):
self.app.config['PSDASH_AUTH_USERNAME'] = username
self.app.config['PSDASH_AUTH_PASSWORD'] = password
def _create_auth_headers(self, username, password):
data = base64.b64encode(':'.join([username, password]))
headers = [('Authorization', 'Basic %s' % data)]
return headers
def test_missing_credentials(self):
self._enable_basic_auth(self.default_username, self.default_password)
resp = self.client.get('/')
self.assertEqual(resp.status_code, httplib.UNAUTHORIZED)
def test_correct_credentials(self):
self._enable_basic_auth(self.default_username, self.default_password)
headers = self._create_auth_headers(self.default_username, self.default_password)
resp = self.client.get('/', headers=headers)
self.assertEqual(resp.status_code, httplib.OK)
def test_incorrect_credentials(self):
self._enable_basic_auth(self.default_username, self.default_password)
headers = self._create_auth_headers(self.default_username, 'wrongpass')
resp = self.client.get('/', headers=headers)
self.assertEqual(resp.status_code, httplib.UNAUTHORIZED)
class TestAllowedRemoteAddresses(unittest2.TestCase):
def test_correct_remote_address(self):
r = PsDashRunner({'PSDASH_ALLOWED_REMOTE_ADDRESSES': '127.0.0.1'})
resp = r.app.test_client().get('/', environ_overrides={'REMOTE_ADDR': '127.0.0.1'})
self.assertEqual(resp.status_code, httplib.OK)
def test_incorrect_remote_address(self):
r = PsDashRunner({'PSDASH_ALLOWED_REMOTE_ADDRESSES': '127.0.0.1'})
resp = r.app.test_client().get('/', environ_overrides={'REMOTE_ADDR': '10.0.0.1'})
self.assertEqual(resp.status_code, httplib.UNAUTHORIZED)
def test_multiple_remote_addresses(self):
r = PsDashRunner({'PSDASH_ALLOWED_REMOTE_ADDRESSES': '127.0.0.1, 10.0.0.1'})
resp = r.app.test_client().get('/', environ_overrides={'REMOTE_ADDR': '10.0.0.1'})
self.assertEqual(resp.status_code, httplib.OK)
resp = r.app.test_client().get('/', environ_overrides={'REMOTE_ADDR': '127.0.0.1'})
self.assertEqual(resp.status_code, httplib.OK)
resp = r.app.test_client().get('/', environ_overrides={'REMOTE_ADDR': '10.124.0.1'})
self.assertEqual(resp.status_code, httplib.UNAUTHORIZED)
def test_multiple_remote_addresses_using_list(self):
r = PsDashRunner({'PSDASH_ALLOWED_REMOTE_ADDRESSES': ['127.0.0.1', '10.0.0.1']})
resp = r.app.test_client().get('/', environ_overrides={'REMOTE_ADDR': '10.0.0.1'})
self.assertEqual(resp.status_code, httplib.OK)
resp = r.app.test_client().get('/', environ_overrides={'REMOTE_ADDR': '127.0.0.1'})
self.assertEqual(resp.status_code, httplib.OK)
resp = r.app.test_client().get('/', environ_overrides={'REMOTE_ADDR': '10.124.0.1'})
self.assertEqual(resp.status_code, httplib.UNAUTHORIZED)
class TestEnvironmentWhitelist(unittest2.TestCase):
def test_show_only_whitelisted(self):
r = PsDashRunner({'PSDASH_ENVIRON_WHITELIST': ['USER']})
resp = r.app.test_client().get('/process/%d/environment' % os.getpid())
self.assertTrue(os.environ['USER'] in resp.data)
self.assertTrue('*hidden by whitelist*' in resp.data)
class TestUrlPrefix(unittest2.TestCase):
def setUp(self):
self.default_prefix = '/subfolder/'
def test_page_not_found_on_root(self):
r = PsDashRunner({'PSDASH_URL_PREFIX': self.default_prefix})
resp = r.app.test_client().get('/')
self.assertEqual(resp.status_code, httplib.NOT_FOUND)
def test_works_on_prefix(self):
r = PsDashRunner({'PSDASH_URL_PREFIX': self.default_prefix})
resp = r.app.test_client().get(self.default_prefix)
self.assertEqual(resp.status_code, httplib.OK)
def test_multiple_level_prefix(self):
r = PsDashRunner({'PSDASH_URL_PREFIX': '/use/this/folder/'})
resp = r.app.test_client().get('/use/this/folder/')
self.assertEqual(resp.status_code, httplib.OK)
def test_missing_starting_slash_works(self):
r = PsDashRunner({'PSDASH_URL_PREFIX': 'subfolder/'})
resp = r.app.test_client().get('/subfolder/')
self.assertEqual(resp.status_code, httplib.OK)
def test_missing_trailing_slash_works(self):
r = PsDashRunner({'PSDASH_URL_PREFIX': '/subfolder'})
resp = r.app.test_client().get('/subfolder/')
self.assertEqual(resp.status_code, httplib.OK)
class TestHttps(unittest2.TestCase):
def _run(self, https=False):
options = {'PSDASH_PORT': 5051}
if https:
options.update({
'PSDASH_HTTPS_KEYFILE': os.path.join(os.path.dirname(__file__), 'keyfile'),
'PSDASH_HTTPS_CERTFILE': os.path.join(os.path.dirname(__file__), 'cacert.pem')
})
self.r = PsDashRunner(options)
self.runner = gevent.spawn(self.r.run)
gevent.sleep(0.3)
def tearDown(self):
self.r.server.close()
self.runner.kill()
gevent.sleep(0.3)
def test_https_dont_work_without_certs(self):
self._run()
self.assertRaises(urllib2.URLError, urllib2.urlopen, 'https://127.0.0.1:5051')
def test_https_works_with_certs(self):
self._run(https=True)
resp = urllib2.urlopen('https://127.0.0.1:5051')
self.assertEqual(resp.getcode(), httplib.OK)
class TestEndpoints(unittest2.TestCase):
def setUp(self):
self.r = PsDashRunner()
self.app = self.r.app
self.client = self.app.test_client()
self.pid = os.getpid()
self.r.get_local_node().net_io_counters.update()
def test_index(self):
resp = self.client.get('/')
self.assertEqual(resp.status_code, httplib.OK)
@unittest2.skipIf('TRAVIS' in os.environ, 'Functionality not supported on Travis CI')
def test_disks(self):
resp = self.client.get('/disks')
self.assertEqual(resp.status_code, httplib.OK)
def test_network(self):
resp = self.client.get('/network')
self.assertEqual(resp.status_code, httplib.OK)
def test_processes(self):
resp = self.client.get('/processes')
self.assertEqual(resp.status_code, httplib.OK)
def test_process_overview(self):
resp = self.client.get('/process/%d' % self.pid)
self.assertEqual(resp.status_code, httplib.OK)
@unittest2.skipIf(os.environ.get('USER') == 'root', 'It would fail as root as we would have access to pid 1')
def test_process_no_access(self):
resp = self.client.get('/process/1') # pid 1 == init
self.assertEqual(resp.status_code, httplib.UNAUTHORIZED)
def test_process_non_existing_pid(self):
resp = self.client.get('/process/0')
self.assertEqual(resp.status_code, httplib.NOT_FOUND)
def test_process_children(self):
resp = self.client.get('/process/%d/children' % self.pid)
self.assertEqual(resp.status_code, httplib.OK)
def test_process_connections(self):
resp = self.client.get('/process/%d/connections' % self.pid)
self.assertEqual(resp.status_code, httplib.OK)
def test_process_environment(self):
resp = self.client.get('/process/%d/environment' % self.pid)
self.assertEqual(resp.status_code, httplib.OK)
def test_process_files(self):
resp = self.client.get('/process/%d/files' % self.pid)
self.assertEqual(resp.status_code, httplib.OK)
def test_process_threads(self):
resp = self.client.get('/process/%d/threads' % self.pid)
self.assertEqual(resp.status_code, httplib.OK)
def test_process_memory(self):
resp = self.client.get('/process/%d/memory' % self.pid)
self.assertEqual(resp.status_code, httplib.OK)
@unittest2.skipIf('TRAVIS' in os.environ, 'Functionality not supported on Travis CI')
def test_process_limits(self):
resp = self.client.get('/process/%d/limits' % self.pid)
self.assertEqual(resp.status_code, httplib.OK)
def test_process_invalid_section(self):
resp = self.client.get('/process/%d/whatnot' % self.pid)
self.assertEqual(resp.status_code, httplib.NOT_FOUND)
def test_non_existing(self):
resp = self.client.get('/prettywronghuh')
self.assertEqual(resp.status_code, httplib.NOT_FOUND)
def test_connection_filters(self):
resp = self.client.get('/network?laddr=127.0.0.1')
self.assertEqual(resp.status_code, httplib.OK)
def test_register_node(self):
resp = self.client.get('/register?name=examplehost&port=500')
self.assertEqual(resp.status_code, httplib.OK)
def test_register_node_all_params_required(self):
resp = self.client.get('/register?name=examplehost')
self.assertEqual(resp.status_code, httplib.BAD_REQUEST)
resp = self.client.get('/register?port=500')
self.assertEqual(resp.status_code, httplib.BAD_REQUEST)
class TestLogs(unittest2.TestCase):
def _create_log_file(self):
fd, filename = tempfile.mkstemp()
fp = os.fdopen(fd, 'w')
fp.write('woha\n' * 100)
fp.write('something\n')
fp.write('woha\n' * 100)
fp.flush()
return filename
def setUp(self):
self.r = PsDashRunner()
self.app = self.r.app
self.client = self.app.test_client()
self.filename = self._create_log_file()
self.r.get_local_node().logs.add_available(self.filename)
def test_logs(self):
resp = self.client.get('/logs')
self.assertEqual(resp.status_code, httplib.OK)
def test_logs_removed_file(self):
filename = self._create_log_file()
self.r.get_local_node().logs.add_available(filename)
# first visit to make sure the logs are properly initialized
resp = self.client.get('/logs')
self.assertEqual(resp.status_code, httplib.OK)
os.unlink(filename)
resp = self.client.get('/logs')
self.assertEqual(resp.status_code, httplib.OK)
def test_logs_removed_file_uninitialized(self):
filename = self._create_log_file()
self.r.get_local_node().logs.add_available(filename)
os.unlink(filename)
resp = self.client.get('/logs')
self.assertEqual(resp.status_code, httplib.OK)
def test_view(self):
resp = self.client.get('/log?filename=%s' % self.filename)
self.assertEqual(resp.status_code, httplib.OK)
def test_search(self):
resp = self.client.get('/log/search?filename=%s&text=%s' % (self.filename, 'something'),
environ_overrides={'HTTP_X_REQUESTED_WITH': 'xmlhttprequest'})
self.assertEqual(resp.status_code, httplib.OK)
try:
data = json.loads(resp.data)
self.assertIn('something', data['content'])
except ValueError:
self.fail('Log search did not return valid json data')
def test_read(self):
resp = self.client.get('/log?filename=%s' % self.filename,
environ_overrides={'HTTP_X_REQUESTED_WITH': 'xmlhttprequest'})
self.assertEqual(resp.status_code, httplib.OK)
def test_read_tail(self):
resp = self.client.get('/log?filename=%s&seek_tail=1' % self.filename)
self.assertEqual(resp.status_code, httplib.OK)
def test_non_existing_file(self):
filename = "/var/log/surelynotaroundright.log"
resp = self.client.get('/log?filename=%s' % filename)
self.assertEqual(resp.status_code, httplib.NOT_FOUND)
resp = self.client.get('/log/search?filename=%s&text=%s' % (filename, 'something'))
self.assertEqual(resp.status_code, httplib.NOT_FOUND)
resp = self.client.get('/log/read?filename=%s' % filename)
self.assertEqual(resp.status_code, httplib.NOT_FOUND)
resp = self.client.get('/log/read_tail?filename=%s' % filename)
self.assertEqual(resp.status_code, httplib.NOT_FOUND)
if __name__ == '__main__':
unittest2.main()
|
py
|
1a5ab61c2a4d800eeb1ce004a25b393c1e61e91b
|
#!/usr/bin/env python3
import sys
import tempfile
import warnings
from lxml import etree as ET
def qname(ns, key, name):
if key in ns:
return "{{{}}}{}".format(ns[key], name)
return name
def create_naf(sofatext, sofaid, xminame):
naf = ET.Element("NAF")
naf.set('version', 'v1.naf')
naf.set('{http://www.w3.org/XML/1998/namespace}lang', 'fr')
nafHeader = ET.SubElement(naf, 'nafHeader')
linguisticProcessors = ET.SubElement(nafHeader, 'linguisticProcessors')
linguisticProcessors.set('layer', 'xmi')
lp = ET.SubElement(linguisticProcessors, 'lp')
lp.set('name', xminame)
lp.set('version', sofaid)
raw = ET.SubElement(naf, 'raw')
raw.text = ET.CDATA(sofatext)
return naf
def search_text(xmi):
ns = xmi.nsmap.copy()
rawtext = ""
sofaid = "-1"
sofatag = qname(ns, 'cas', 'Sofa')
sofas = xmi.findall(sofatag)
if len(sofas) == 0:
return rawtext, sofaid
id = sofas[0].get(qname(ns, 'xmi', 'id'))
if id is not None:
sofaid = id
rawtext = sofas[0].get('sofaString')
for i in range(1, len(sofas)):
sofa = sofas[i]
if sofa.get('sofaID') != '_InitialView':
continue
id = sofa.get(qname(ns, 'xmi', 'id'))
if id is not None:
sofaid = id
rawtext = sofa.get('sofaString')
break
return rawtext, sofaid
def emptyNAF():
naf = ET.Element("NAF")
naf.set('version', 'v1.naf')
return naf
def main():
try:
tree = ET.parse(sys.stdin)
xmi = tree.getroot()
rawtext, sofaid = search_text(xmi)
xmiTemp = tempfile.NamedTemporaryFile(delete=False)
tree.write(xmiTemp)
naf = create_naf(rawtext, sofaid, xmiTemp.name)
except Exception as e:
msg = "Warning: an exception occured: {}".format(e)
warnings.warn(msg)
naf = emptyNAF()
#print(xmiTemp.name)
# write
a = ET.tostring(naf, encoding="utf-8")
print(a.decode("utf-8"))
main()
|
py
|
1a5ab66cfe37518d98fb246086340c9b0661f238
|
from multiprocessing import Process, Queue
from Queue import Empty
from ansible_server import ansible_server
# DON'T USE THIS UNLESS YOU KNOW WHAT YOU'RE DOING
# Low level message sending. For high level messaging, use send_msg.
def send(msg):
send_queue.put_nowait(msg)
# Use this one instead of send
def send_message(msg_type, content):
send({
'header': {'msg_type': msg_type},
'content': content
})
# Receives a message, or None if there is no current message.
def recv():
try:
return recv_queue.get_nowait()
except Empty:
return None
# Start up the Flask-SocketIO server
send_queue = Queue()
recv_queue = Queue()
ansible_p = Process(target=ansible_server, args=(send_queue, recv_queue))
ansible_p.start()
|
py
|
1a5ab66f116559a011b7b5bafc26ac6f5ef1669e
|
from django import forms
from subscriptions.models import Subscription
from django.utils.translation import gettext as _
class SubscriptionForm(forms.ModelForm):
#STATUS_CHOICES IS SET TO BE LIKE STATUS_CHOICES IN SUBSCRIPTION MODEL BUT WITHOUT UKNOWN AND (UNKOWN) FOR USERS TO SELECT
STATUS_CHOICES = (
(Subscription.STATUS_CHOICE_SUBSCRIBED, _('Subscribed')),
(Subscription.STATUS_CHOICE_UNSUBSCRIBED, _('Unsubscribed')),
)
status = forms.ChoiceField(choices=STATUS_CHOICES)
class Meta:
model = Subscription
fields = [
'status',
]
def save(self, commit=True):
usersubscription = super().save(commit=False)
usersubscription.status = self.cleaned_data['status']
if commit:
usersubscription.save()
return usersubscription
|
py
|
1a5ab6869c9bcfe23ec67cbf8003584a8bc6ef49
|
# pylint: skip-file
# flake8: noqa
# pylint: disable=wrong-import-position,too-many-branches,invalid-name
import json
from ansible.module_utils.basic import AnsibleModule
def _install(module, container, image, values_list):
''' install a container using atomic CLI. values_list is the list of --set arguments.
container is the name given to the container. image is the image to use for the installation. '''
# NOTE: system-package=no is hardcoded. This should be changed to an option in the future.
args = ['atomic', 'install', '--system', '--system-package=no',
'--name=%s' % container] + values_list + [image]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
return rc, out, err, False
else:
changed = "Extracting" in out
return rc, out, err, changed
def _uninstall(module, name):
''' uninstall an atomic container by its name. '''
args = ['atomic', 'uninstall', name]
rc, out, err = module.run_command(args, check_rc=False)
return rc, out, err, False
def do_install(module, container, image, values_list):
''' install a container and exit the module. '''
rc, out, err, changed = _install(module, container, image, values_list)
if rc != 0:
module.fail_json(rc=rc, msg=err)
else:
module.exit_json(msg=out, changed=changed)
def do_uninstall(module, name):
''' uninstall a container and exit the module. '''
rc, out, err, changed = _uninstall(module, name)
if rc != 0:
module.fail_json(rc=rc, msg=err)
module.exit_json(msg=out, changed=changed)
def do_update(module, container, old_image, image, values_list):
''' update a container and exit the module. If the container uses a different
image than the current installed one, then first uninstall the old one '''
# the image we want is different than the installed one
if old_image != image:
rc, out, err, _ = _uninstall(module, container)
if rc != 0:
module.fail_json(rc=rc, msg=err)
return do_install(module, container, image, values_list)
# if the image didn't change, use "atomic containers update"
args = ['atomic', 'containers', 'update'] + values_list + [container]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
module.fail_json(rc=rc, msg=err)
else:
changed = "Extracting" in out
module.exit_json(msg=out, changed=changed)
def do_rollback(module, name):
''' move to the previous deployment of the container, if present, and exit the module. '''
args = ['atomic', 'containers', 'rollback', name]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
module.fail_json(rc=rc, msg=err)
else:
changed = "Rolling back" in out
module.exit_json(msg=out, changed=changed)
def core(module):
''' entrypoint for the module. '''
name = module.params['name']
image = module.params['image']
values = module.params['values']
state = module.params['state']
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
out = {}
err = {}
rc = 0
values_list = ["--set=%s" % x for x in values] if values else []
args = ['atomic', 'containers', 'list', '--json', '--all', '-f', 'container=%s' % name]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
module.fail_json(rc=rc, msg=err)
return
# NOTE: "or '[]' is a workaround until atomic containers list --json
# provides an empty list when no containers are present.
containers = json.loads(out or '[]')
present = len(containers) > 0
old_image = containers[0]["image_name"] if present else None
if state == 'present' and present:
module.exit_json(msg=out, changed=False)
elif (state in ['latest', 'present']) and not present:
do_install(module, name, image, values_list)
elif state == 'latest':
do_update(module, name, old_image, image, values_list)
elif state == 'absent':
if not present:
module.exit_json(msg="", changed=False)
else:
do_uninstall(module, name)
elif state == 'rollback':
do_rollback(module, name)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(default=None, required=True),
image=dict(default=None, required=True),
state=dict(default='latest', choices=['present', 'absent', 'latest', 'rollback']),
values=dict(type='list', default=[]),
),
)
# Verify that the platform supports atomic command
rc, _, err = module.run_command('atomic -v', check_rc=False)
if rc != 0:
module.fail_json(msg="Error in running atomic command", err=err)
try:
core(module)
except Exception as e: # pylint: disable=broad-except
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
py
|
1a5ab81b96f2891d0341e3959e9e22536793724c
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'han'
import os
import re
import zipfile
import spacy
import json
import h5py
import logging
import numpy as np
from functools import reduce
from utils.functions import pad_sequences
from .doc_text import DocText, Space
logger = logging.getLogger(__name__)
class PreprocessData:
"""
preprocess dataset and glove embedding to hdf5 files
"""
padding = '__padding__' # id = 0
padding_idx = 0 # all the features padding idx, exclude answer_range
answer_padding_idx = -1
_compress_option = dict(compression="gzip", compression_opts=9, shuffle=False)
def __init__(self, global_config):
# data config
self._dev_path = ''
self._train_path = ''
self._export_squad_path = ''
self._glove_path = ''
self._embedding_size = 300
self._ignore_max_len = 10000
self._load_config(global_config)
# preprocess config
self._max_answer_len = 0
# temp data
self._word2id = {self.padding: 0}
self._char2id = {self.padding: 0, '`': 1} # because nltk word tokenize will replace '"' with '``'
self._pos2id = {self.padding: 0}
self._ent2id = {self.padding: 0}
self._word2vec = {self.padding: [0. for i in range(self._embedding_size)]}
self._oov_num = 0
# data need to store in hdf5 file
self._meta_data = {'id2vec': [[0. for i in range(self._embedding_size)]],
'id2word': [self.padding],
'id2char': [self.padding, '`'],
'id2pos': [self.padding],
'id2ent': [self.padding]}
self._data = {}
self._attr = {}
self._nlp = spacy.load('en')
self._nlp.remove_pipe('parser')
if not any([self._use_em_lemma, self._use_pos, self._use_ent]):
self._nlp.remove_pipe('tagger')
if not self._use_ent:
self._nlp.remove_pipe('ner')
def _load_config(self, global_config):
"""
load config from a dictionary, such as dataset path
:param global_config: dictionary
:return:
"""
data_config = global_config['data']
self._train_path = data_config['dataset']['train_path']
self._dev_path = data_config['dataset']['dev_path']
self._export_squad_path = data_config['dataset_h5']
self._glove_path = data_config['embedding_path']
self.preprocess_config = global_config['preprocess']
self._ignore_max_len = self.preprocess_config['ignore_max_len']
self._use_char = self.preprocess_config['use_char']
self._use_pos = self.preprocess_config['use_pos']
self._use_ent = self.preprocess_config['use_ent']
self._use_em = self.preprocess_config['use_em']
self._use_em_lemma = self.preprocess_config['use_em_lemma']
self._embedding_size = int(self.preprocess_config['word_embedding_size'])
def _read_json(self, path):
"""
read json format file from raw squad text
:param path: squad file path
:return:
"""
with open(path, 'r') as f:
data = json.load(f)
version = data['version']
data_list_tmp = [ele['paragraphs'] for ele in data['data']]
contexts_qas = reduce(lambda a, b: a + b, data_list_tmp)
self._attr['dataset_name'] = 'squad-' + version
return contexts_qas
def _build_data(self, contexts_qas, training):
"""
handle squad data to (context, question, answer_range) with word id representation
:param contexts_qas: a context with several question-answers
:return:
"""
contexts_doc = []
questions_doc = []
answers_range_wid = [] # each answer use the [start,end] representation, all the answer horizontal concat
samples_id = []
cnt = 0
# every context
for question_grp in contexts_qas:
cur_context = question_grp['context']
cur_qas = question_grp['qas']
cur_context_doc = DocText(self._nlp, cur_context, self.preprocess_config)
if training and len(cur_context_doc) > self._ignore_max_len: # some context token len too large
continue
if self._use_char:
self._update_to_char(cur_context)
cur_context_ids = self._doctext_to_id(cur_context_doc)
# every question-answer
for qa in cur_qas:
cur_question = qa['question']
if self._use_char:
self._update_to_char(cur_question)
cur_question_doc = DocText(self._nlp, cur_question, self.preprocess_config)
cur_question_ids = self._doctext_to_id(cur_question_doc)
# get em feature
if self._use_em or self._use_em_lemma:
cur_context_doc.update_em(cur_question_doc)
cur_question_doc.update_em(cur_context_doc)
cur_context_ids['em'] = cur_context_doc.em
cur_context_ids['em_lemma'] = cur_context_doc.em_lemma
cur_question_ids['em'] = cur_question_doc.em
cur_question_ids['em_lemma'] = cur_question_doc.em_lemma
contexts_doc.append(cur_context_ids)
questions_doc.append(cur_question_ids)
samples_id.append(qa['id'])
# find all the answer positions
cur_answers = qa['answers']
self._max_answer_len = max(self._max_answer_len, len(cur_answers) * 2)
cur_ans_range_ids = [0 for i in range(len(cur_answers) * 2)]
for idx, cur_ans in enumerate(cur_answers):
cur_ans_start = cur_ans['answer_start']
cur_ans_text = cur_ans['text']
pos_s, pos_e = self.find_ans_start_end(cur_context, cur_context_doc, cur_ans_text, cur_ans_start)
if pos_e < pos_s:
logger.error("Answer start position can't bigger than end position." +
"\nContext:" + cur_context +
"\nQuestion:" + cur_question +
"\nAnswer:" + cur_ans_text)
continue
gen_ans = ''.join(cur_context_doc.token[pos_s:(pos_e + 1)]).replace(' ', '')
true_ans = Space.remove_white_space(cur_ans['text'])
if true_ans not in gen_ans:
logger.error("Answer position wrong." +
"\nContext:" + cur_context +
"\nQuestion:" + cur_question +
"\nAnswer:" + cur_ans_text)
continue
cur_ans_range_ids[(idx * 2):(idx * 2 + 2)] = [pos_s, pos_e]
answers_range_wid.append(cur_ans_range_ids)
cnt += 1
if cnt % 100 == 0:
logger.info('No.%d sample handled.' % cnt)
return {'context': contexts_doc,
'question': questions_doc,
'answer_range': answers_range_wid,
'samples_id': samples_id}
def find_ans_start_end(self, context_text, context_doc, answer_text, answer_start):
# find answer start position
pre_ans_len = len(Space.remove_white_space(context_text[:answer_start]))
tmp_len = 0
pos_s = 0
for i in range(len(context_doc)):
tmp_len += len(context_doc.token[i])
if tmp_len > pre_ans_len:
pos_s = i
break
# find answer end position
pos_e = 0
tmp_str = ""
tmp_ans = Space.remove_white_space(answer_text)
if tmp_ans[0] == '.': # squad dataset have some mistakes
tmp_ans = tmp_ans[1:]
for i in range(pos_s, len(context_doc)):
s = context_doc.token[i]
tmp_str += s
if tmp_ans in tmp_str:
pos_e = i
break
return pos_s, pos_e
def _doctext_to_id(self, doc_text):
"""
transform a sentence to word index id representation
:param sentence: DocText
:return: word ids
"""
sentence = {'token': [], 'pos': [], 'ent': [], 'right_space': doc_text.right_space}
for i in range(len(doc_text)):
# word
word = doc_text.token[i]
if word not in self._word2id:
self._word2id[word] = len(self._word2id)
self._meta_data['id2word'].append(word)
# whether OOV
if word in self._word2vec:
self._meta_data['id2vec'].append(self._word2vec[word])
else:
self._oov_num += 1
logger.debug('No.%d OOV word %s' % (self._oov_num, word))
self._meta_data['id2vec'].append([0. for i in range(self._embedding_size)])
sentence['token'].append(self._word2id[word])
# pos
if self._use_pos:
pos = doc_text.pos[i]
if pos not in self._pos2id:
self._pos2id[pos] = len(self._pos2id)
self._meta_data['id2pos'].append(pos)
sentence['pos'].append(self._pos2id[pos])
# ent
if self._use_ent:
ent = doc_text.ent[i]
if ent not in self._ent2id:
self._ent2id[ent] = len(self._ent2id)
self._meta_data['id2ent'].append(ent)
sentence['ent'].append(self._ent2id[ent])
return sentence
def _update_to_char(self, sentence):
"""
update char2id
:param sentence: raw sentence
"""
for ch in sentence:
if ch not in self._char2id:
self._char2id[ch] = len(self._char2id)
self._meta_data['id2char'].append(ch)
def _handle_glove(self):
"""
handle glove embeddings, restore embeddings with dictionary
:return:
"""
logger.info("read glove from text file %s" % self._glove_path)
import pdb; pdb.set_trace()
with zipfile.ZipFile(self._glove_path, 'r') as zf:
if len(zf.namelist()) != 1:
raise ValueError('glove file "%s" not recognized' % self._glove_path)
glove_name = zf.namelist()[0]
word_num = 0
with zf.open(glove_name) as f:
for line in f:
line_split = line.decode('utf-8').split(' ')
self._word2vec[line_split[0]] = [float(x) for x in line_split[1:]]
word_num += 1
if word_num % 10000 == 0:
logger.info('handle word No.%d' % word_num)
def _export_squad_hdf5(self):
"""
export squad dataset to hdf5 file
:return:
"""
f = h5py.File(self._export_squad_path, 'w')
# str_dt = h5py.special_dtype(vlen=unicode)
str_dt = h5py.special_dtype(vlen=int)
# attributes
for attr_name in self._attr:
f.attrs[attr_name] = self._attr[attr_name]
# meta_data
f_meta_data = f.create_group('meta_data')
for key in ['id2word', 'id2char', 'id2pos', 'id2ent']:
value = np.array(self._meta_data[key], dtype=np.str)
meta_data = f_meta_data.create_dataset(key, value.shape, dtype=str_dt, **self._compress_option)
meta_data[...] = value
id2vec = np.array(self._meta_data['id2vec'], dtype=np.float32)
meta_data = f_meta_data.create_dataset('id2vec', id2vec.shape, dtype=id2vec.dtype, **self._compress_option)
meta_data[...] = id2vec
# data
f_data = f.create_group('data')
for key, value in self._data.items():
data_grp = f_data.create_group(key)
for sub_key, sub_value in value.items():
if isinstance(sub_value, dict):
sub_grp = data_grp.create_group(sub_key)
for subsub_key, subsub_value in sub_value.items():
if len(subsub_value) == 0:
continue
cur_dtype = str_dt if subsub_value.dtype.type is np.str_ else subsub_value.dtype
data = sub_grp.create_dataset(subsub_key, subsub_value.shape, dtype=cur_dtype,
**self._compress_option)
data[...] = subsub_value
else:
cur_dtype = str_dt if sub_value.dtype.type is np.str_ else sub_value.dtype
data = data_grp.create_dataset(sub_key, sub_value.shape, dtype=cur_dtype,
**self._compress_option)
data[...] = sub_value
f.flush()
f.close()
def run(self):
"""
main function to generate hdf5 file
:return:
"""
logger.info('handle glove file...')
self._handle_glove()
logger.info('read squad json...')
train_context_qas = self._read_json(self._train_path)
dev_context_qas = self._read_json(self._dev_path)
logger.info('transform word to id...')
train_cache_nopad = self._build_data(train_context_qas, training=True)
dev_cache_nopad = self._build_data(dev_context_qas, training=False)
self._attr['train_size'] = len(train_cache_nopad['answer_range'])
self._attr['dev_size'] = len(dev_cache_nopad['answer_range'])
self._attr['word_dict_size'] = len(self._word2id)
self._attr['char_dict_size'] = len(self._char2id)
self._attr['pos_dict_size'] = len(self._pos2id)
self._attr['ent_dict_size'] = len(self._ent2id)
self._attr['embedding_size'] = self._embedding_size
self._attr['oov_word_num'] = self._oov_num
logger.info('padding id vectors...')
self._data['train'] = {
'context': dict2array(train_cache_nopad['context']),
'question': dict2array(train_cache_nopad['question']),
'answer_range': np.array(train_cache_nopad['answer_range']),
'samples_id': np.array(train_cache_nopad['samples_id'])
}
self._data['dev'] = {
'context': dict2array(dev_cache_nopad['context']),
'question': dict2array(dev_cache_nopad['question']),
'answer_range': pad_sequences(dev_cache_nopad['answer_range'],
maxlen=self._max_answer_len,
padding='post',
value=self.answer_padding_idx),
'samples_id': np.array(dev_cache_nopad['samples_id'])
}
logger.info('export to hdf5 file...')
self._export_squad_hdf5()
logger.info('finished.')
def dict2array(data_doc):
"""
transform dict to numpy array
:param data_doc: [{'token': [], 'pos': [], 'ent': [], 'em': [], 'em_lemma': [], 'right_space': []]
:return:
"""
data = {'token': [], 'pos': [], 'ent': [], 'em': [], 'em_lemma': [], 'right_space': []}
max_len = 0
for ele in data_doc:
assert ele.keys() == data.keys()
if len(ele['token']) > max_len:
max_len = len(ele['token'])
for k in ele.keys():
if len(ele[k]) > 0:
data[k].append(ele[k])
for k in data.keys():
if len(data[k]) > 0:
data[k] = pad_sequences(data[k],
maxlen=max_len,
padding='post',
value=PreprocessData.padding_idx)
return data
|
py
|
1a5ab986605de812ee497a523f25e1d809de24fc
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
`generalFunctions.py`
=================
Containing general purpose Python functions for small bits of manipulation.
Import it: import generalFunctions
Depends
=======
datetime
'''
import datetime
def empty(string):
if string in ['', ' ', None]:
return True
return False
def formatInteger(integerstring):
if empty(integerstring):
return None
return int(integerstring)
def formatString(string):
# NOTE: be careful stripping encoded strings, which may have empty values
# representing unknown values for particular values
if empty(string):
return None
return string
def formatDate(datestring):
'''Returns a datetime.date object when given a date as a string of the form
DD/MM/YYYY (e.g. 30/01/2014)'''
if empty(datestring):
return None
else:
try:
return datetime.datetime.strptime(datestring, "%d/%m/%Y").date()
except ValueError:
# Poorly formatted date in source data
return None
def ordinal(n):
return str(n)+("th" if 4<=n%100<=20 else {1:"st",2:"nd",3:"rd"}.get(n%10, "th"))
def formatNiceDate(datetime):
'''Takes a datetime.datetime (e.g. datetime(2014,1,1)) and returns a nice
string representation (e.g. "1st of January 2014"'''
if empty(datetime):
return None
return ordinal(datetime.day) + " %s %d" % (datetime.strftime("%B"), datetime.year)
def formatNiceTime(time):
'''Takes a datetime.time (e.g. time(12,0,0)) and returns a nice string representation
(e.g. 12:00). Seconds are ignored, and not even considered for rounding.'''
if empty(time):
return ''
t = str(time).split(":")
return "%s:%s" % (t[0],t[1])
def formatCrashTime(crashtime, dateobj):
'''Returns a datetime.time object when given a time as a string from the
`row`. These are purportedly recorded "in 24-hour time", but are lacking
leading zeros in the dataset, which is addressed here.'''
if empty(crashtime):
return None
return datetime.datetime.strptime(str(dateobj)+" "+'0'*(4-len(crashtime))+crashtime,'%Y-%m-%d %H%M').time()
def check_offroad(crash_road):
'''Applies a check for 'Z': the flat for offroad indicator, and corrects
strings representing these places so that they're a bit nicer to read.'''
if 'Z' in crash_road.split(' '):
# The crash was off-road
# Apply some special formatting to make this read nicely
# 1. Remove the now-superfluous 'Z'
crash_road = crash_road.split(' ')
crash_road.remove('Z')
# 2. Special exception for the use of 'Beach' at the beginning of some locations
if crash_road[0] == 'Beach' and len(crash_road) > 1:
crash_road = crash_road[1:] + [crash_road[0]]
#. 3. Expand the off-road abbreviations
patterns = {'CPK': 'Carpark',
'BCH': 'Beach',
'DWY': 'Driveway',
'DWAY': 'Driveway',
'FCT': 'Forecourt'}
for i, r in enumerate(crash_road):
if r.upper() in patterns.keys():
crash_road = crash_road[:i] + crash_road[i+1:] + [patterns[r.upper()], '(off-roadway)']
break
# Join it back up to a proper description
crash_road = ' '.join(crash_road)
return crash_road
def streetExpander(road,streetdecoder):
'''Input: 'St John St' (for example)
Output: St John Street'''
# First, check St isn't the first element in a street name
check = road.replace('near ','').replace('at ','')
if check.split(' ')[0] == 'St' and 'St' not in check.split(' ')[1:]:
# Then don't repalce the St as it means Saint and there is not Street in title
return road
# Otherwise, there are two instances of "St" and we want to only replace the second one
road = road.split(' ')
processed = []
road.reverse() # Flip order
for i, elem in enumerate(road):
# Do it in reverse so only the last instance of a road shortning trope
# gets expanded. This prevents "St John St" becoming "Street John
# Street" rather than "St John Street"
if (elem in streetdecoder.keys()) and (elem not in processed):
processed.append(elem)
road[i] = streetdecoder[elem]
road.reverse() # Back to original order
return ' '.join(road)
def formatNiceRoad(road):
'''Takes a location expressed as a road, or a street or a highway... and
makes some cosmetic changes. This includes taking State Highway linear
references and returning something understandavble to people.
Listed expressions from the documentation:
CPK = car park
BCH = beach
DWY = driveway
DWAY = driveway'''
def striplinearref(linref):
'''Fixes references to State Highways, by removing the linear referencing information'''
if '/' not in linref:
# Not a SH
return linref
elif '/' in linref:
try:
int(linref[0])
except:
# Not a SH, just has a slash
return linref
# Remaining are State Highways
if len(linref.split(' ')) > 1 and ' at ' not in linref:
# There is other location information included
linref = linref.split(' ')[0] + ' (%s)' % ' '.join(linref.split(' ')[1:]).replace(' SH ',' State Highway ')
if ' at ' not in linref:
# SH without an intersection
SH = linref.split(' ')
SH = "State Highway %s " % SH[0].split('/')[0] + ' '.join(SH[1:])
else:
# SH with an intersection
linref = linref.split(' at ')
linref = [linref[0],'at',linref[1]]
for i, r in enumerate(linref):
if '/' in r:
linref[i] = "State Highway %s" % r.split('/')[0]
SH = ' '.join(linref)
return SH
def expander(road):
'''Takes `road' (street of crash as ordered list of strings) and runs
them past checks for acronyms and abbreviations known to exist in the data.
Acronyms are kept as acronyms, and abbreviations are expanded. Returns a
string (not the list), joined with spaces.'''
knownAcronyms = ['BP', 'VTNZ'] # Ensure acronyms stay acronyms
knownAbbreviations = {'Coun': 'Countdown',
'C/Down': 'Countdown',
'Reserv': 'Reserve',
'Stn': 'Station',
'Roa': 'Road',
'S': 'South',
'E': 'East',
'W': 'West',
'N': 'North',
'Riv': 'River',
'Br': 'Bridge',
'Wbd': 'Westbound',
'Ebd': 'Eastbound',
'Nbd': 'Northbound',
'Sbd': 'Southbound',
'Obr': 'Overbridge',
'Off': 'Off-ramp',
'On': 'On-ramp',
'Xing': 'Crossing',
'Mckays': 'McKays',
'Rly': 'Railway',
'Int': 'Interchange'}
for i, r in enumerate(road):
# Check for "knownAbbreviations" requires no brackets around term
rd, left, right = r, False, False
if '(' in rd:
left = True
rd = rd.replace('(','')
if ')' in rd:
right = True
rd = rd.replace(')','')
# Check acronyms
if rd.upper() in knownAcronyms:
rd = rd.upper()
# Check abbreviations
if rd.title() in knownAbbreviations.keys():
rd = knownAbbreviations[rd.title()]
# Put brackets back, if neccessary
if left:
rd = '(%s' % rd
if right:
rd = '%s)' % rd
# Update the element in road with the changes
road[i] = rd
# Join road to a single string and return
return ' '.join(road)
return expander(striplinearref(road).split(' '))
def formatStringList(listofstrings, delim=None):
'''Returns a list of strings given a string representation of a list data
structure, separated by `delim`.
Example:
input: '308A 371A 727B 929'
output: ['308A', '371A', '727B', '929']
If delim is None, each character of the string is assumed to be an
independent value'''
if listofstrings == None or listofstrings == []:
return None
if delim != None:
return [str(s) for s in listofstrings.split(delim) if not empty(s)]
elif delim == None:
return list(listofstrings)
def round_down(integer, base):
'''Rounds an `integer` down to the nearest `base`
E.g. round_down(19,10) >>> 10
round_down(19,5) >>> 15
round_down(10,10) >>> 10'''
return integer - (integer % base)
def grammar(singular, plural, integer):
'''Returns the string `singular` if integer == 1; else it returns `plural`
if integer > 1.
Example:
grammar('person', 'people', 1) >>> 'person'
grammar('person', 'people', 3) >>> 'people'
'''
if integer == 1:
return singular
elif integer > 1:
return plural
|
py
|
1a5ab99b5a94f96e83773ce88863ee03873f6cd9
|
#!/usr/bin/env python
# -*- encoding:utf-8 -*-
"""
Script to generate contributor and pull request lists
This script generates contributor and pull request lists for release
announcements using Github v3 protocol. Use requires an authentication token in
order to have sufficient bandwidth, you can get one following the directions at
`<https://help.github.com/articles/creating-an-access-token-for-command-line-use/>_
Don't add any scope, as the default is read access to public information. The
token may be stored in an environment variable as you only get one chance to
see it.
Usage::
$ ./scripts/announce.py <token> <revision range>
The output is utf8 rst.
Dependencies
------------
- gitpython
- pygithub
Some code was copied from scipy `tools/gh_lists.py` and `tools/authors.py`.
Examples
--------
From the bash command line with $GITHUB token.
$ ./scripts/announce.py $GITHUB v1.11.0..v1.11.1 > announce.rst
"""
import codecs
import os
import re
import textwrap
from git import Repo
UTF8Writer = codecs.getwriter("utf8")
this_repo = Repo(os.path.join(os.path.dirname(__file__), "..", ".."))
author_msg = """\
A total of %d people contributed patches to this release. People with a
"+" by their names contributed a patch for the first time.
"""
pull_request_msg = """\
A total of %d pull requests were merged for this release.
"""
def get_authors(revision_range):
pat = "^.*\\t(.*)$"
lst_release, cur_release = [r.strip() for r in revision_range.split("..")]
# authors, in current release and previous to current release.
cur = set(re.findall(pat, this_repo.git.shortlog("-s", revision_range), re.M))
pre = set(re.findall(pat, this_repo.git.shortlog("-s", lst_release), re.M))
# Homu is the author of auto merges, clean him out.
cur.discard("Homu")
pre.discard("Homu")
# Append '+' to new authors.
authors = [s + " +" for s in cur - pre] + [s for s in cur & pre]
authors.sort()
return authors
def get_pull_requests(repo, revision_range):
prnums = []
# From regular merges
merges = this_repo.git.log("--oneline", "--merges", revision_range)
issues = re.findall("Merge pull request \\#(\\d*)", merges)
prnums.extend(int(s) for s in issues)
# From Homu merges (Auto merges)
issues = re.findall("Auto merge of \\#(\\d*)", merges)
prnums.extend(int(s) for s in issues)
# From fast forward squash-merges
commits = this_repo.git.log(
"--oneline", "--no-merges", "--first-parent", revision_range
)
issues = re.findall("^.*\\(\\#(\\d+)\\)$", commits, re.M)
prnums.extend(int(s) for s in issues)
# get PR data from github repo
prnums.sort()
prs = [repo.get_pull(n) for n in prnums]
return prs
def build_components(revision_range, heading="Contributors"):
lst_release, cur_release = [r.strip() for r in revision_range.split("..")]
authors = get_authors(revision_range)
return {
"heading": heading,
"author_message": author_msg % len(authors),
"authors": authors,
}
def build_string(revision_range, heading="Contributors"):
components = build_components(revision_range, heading=heading)
components["uline"] = "=" * len(components["heading"])
components["authors"] = "* " + "\n* ".join(components["authors"])
tpl = textwrap.dedent(
"""\
{heading}
{uline}
{author_message}
{authors}"""
).format(**components)
return tpl
def main(revision_range):
# document authors
text = build_string(revision_range)
print(text)
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser(description="Generate author lists for release")
parser.add_argument("revision_range", help="<revision>..<revision>")
args = parser.parse_args()
main(args.revision_range)
|
py
|
1a5aba322c3e2ae1b47bdd509562890af336e245
|
# -*- coding: utf-8 -*-
"""Pyramid request argument parsing.
Example usage: ::
from wsgiref.simple_server import make_server
from pyramid.config import Configurator
from pyramid.response import Response
from marshmallow import fields
from webargs.pyramidparser import use_args
hello_args = {
'name': fields.Str(missing='World')
}
@use_args(hello_args)
def hello_world(request, args):
return Response('Hello ' + args['name'])
if __name__ == '__main__':
config = Configurator()
config.add_route('hello', '/')
config.add_view(hello_world, route_name='hello')
app = config.make_wsgi_app()
server = make_server('0.0.0.0', 6543, app)
server.serve_forever()
"""
import collections
import functools
from webob.multidict import MultiDict
from pyramid.httpexceptions import exception_response
from marshmallow.compat import text_type
from webargs import core
class PyramidParser(core.Parser):
"""Pyramid request argument parser."""
__location_map__ = dict(
matchdict='parse_matchdict',
**core.Parser.__location_map__)
def parse_querystring(self, req, name, field):
"""Pull a querystring value from the request."""
return core.get_value(req.GET, name, field)
def parse_form(self, req, name, field):
"""Pull a form value from the request."""
return core.get_value(req.POST, name, field)
def parse_json(self, req, name, field):
"""Pull a json value from the request."""
try:
json_data = req.json_body
except ValueError:
return core.missing
return core.get_value(json_data, name, field, allow_many_nested=True)
def parse_cookies(self, req, name, field):
"""Pull the value from the cookiejar."""
return core.get_value(req.cookies, name, field)
def parse_headers(self, req, name, field):
"""Pull a value from the header data."""
return core.get_value(req.headers, name, field)
def parse_files(self, req, name, field):
"""Pull a file from the request."""
files = ((k, v) for k, v in req.POST.items() if hasattr(v, 'file'))
return core.get_value(MultiDict(files), name, field)
def parse_matchdict(self, req, name, field):
"""Pull a value from the request's `matchdict`."""
return core.get_value(req.matchdict, name, field)
def handle_error(self, error):
"""Handles errors during parsing. Aborts the current HTTP request and
responds with a 400 error.
"""
status_code = getattr(error, 'status_code', 422)
raise exception_response(status_code, detail=text_type(error))
def use_args(self, argmap, req=None, locations=core.Parser.DEFAULT_LOCATIONS,
as_kwargs=False, validate=None):
"""Decorator that injects parsed arguments into a view callable.
Supports the *Class-based View* pattern where `request` is saved as an instance
attribute on a view class.
:param dict argmap: Either a `marshmallow.Schema`, a `dict`
of argname -> `marshmallow.fields.Field` pairs, or a callable
which accepts a request and returns a `marshmallow.Schema`.
:param req: The request object to parse. Pulled off of the view by default.
:param tuple locations: Where on the request to search for values.
:param bool as_kwargs: Whether to insert arguments as keyword arguments.
:param callable validate: Validation function that receives the dictionary
of parsed arguments. If the function returns ``False``, the parser
will raise a :exc:`ValidationError`.
"""
locations = locations or self.locations
# Optimization: If argmap is passed as a dictionary, we only need
# to generate a Schema once
if isinstance(argmap, collections.Mapping):
argmap = core.argmap2schema(argmap)()
def decorator(func):
@functools.wraps(func)
def wrapper(obj, *args, **kwargs):
# The first argument is either `self` or `request`
try: # get self.request
request = req or obj.request
except AttributeError: # first arg is request
request = obj
# NOTE: At this point, argmap may be a Schema, callable, or dict
parsed_args = self.parse(argmap, req=request,
locations=locations, validate=validate,
force_all=as_kwargs)
if as_kwargs:
kwargs.update(parsed_args)
return func(obj, *args, **kwargs)
else:
return func(obj, parsed_args, *args, **kwargs)
return wrapper
return decorator
parser = PyramidParser()
use_args = parser.use_args
use_kwargs = parser.use_kwargs
|
py
|
1a5abcb4f5e674bee59e34b0552c3584756f2126
|
a = raw_input()
b= []
for i in range(len(a)):
if ord(a[i]) == 65:
b.append('X')
elif ord(a[i]) == 66:
b.append('Y')
elif ord(a[i]) == 67:
b.append('Z')
else:
b.append(chr(ord(a[i]) - 3))
print "".join(b)
|
py
|
1a5abcefcd67523991b3bb1f96b62146ee3e30a5
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 23 09:45:44 2016
@author: Arturo
"""
import signal
import sys
import time
import pyupm_grove as grove
import pyupm_i2clcd as lcd
def interruptHandler(signal, frame):
sys.exit(0)
if __name__ == '__main__':
signal.signal(signal.SIGINT, interruptHandler)
myLcd = lcd.Jhd1313m1(0, 0x3E, 0x62)
sensortemp = grove.GroveTemp(0)
colorR = 255;
colorG = 0;
colorB = 0;
myLcd.setColor(colorR,colorG,colorB)
# Read the input and print, waiting 1/2 second between readings
while True:
valorSensor= sensortemp.value();
myLcd.setCursor(0,0)
myLcd.write('%6d'% valorSensor)
time.sleep(0.5)
del sensortemp
|
py
|
1a5abe81c096615667d59d5b1854d563dfc5732d
|
#!/usr/bin/env python
"""
This program will generate .pyi files for all the VTK modules
in the "vtkmodules" package (or whichever package you specify).
These files are used for type checking and autocompletion in
some Python IDEs.
The VTK modules must be in Python's path when you run this script.
Options are as follows:
-p PACKAGE The package to generate .pyi files for [vtkmodules]
-o OUTPUT The output directory [default is the package directory]
-e EXT The file suffix [.pyi]
-h HELP
With no arguments, the script runs with the defaults (the .pyi files
are put inside the existing vtkmodules package). This is equivalent
to the following:
vtk_generate_pyi.py -p vtkmodules
To put the pyi files somewhere else, perhaps with a different suffix:
vtk_generate_pyi.py -o /path/to/vtkmodules -e .pyi
To generate pyi files for just one or two modules:
vtk_generate_pyi.py -p vtkmodules vtkCommonCore vtkCommonDataModel
To generate pyi files for your own modules in your own package:
vtk_generate_pyi.py -p mypackage mymodule [mymodule2 ...]
"""
from vtkmodules.vtkCommonCore import vtkObject, vtkSOADataArrayTemplate
import sys
import os
import re
import argparse
import builtins
import inspect
import importlib
# ==== For type inspection ====
# list expected non-vtk type names
types = set()
for m,o in builtins.__dict__.items():
if isinstance(o, type):
types.add(m)
for m in ['Any', 'Buffer', 'Callback', 'None', 'Pointer', 'Template', 'Union']:
types.add(m)
# basic type checking methods
ismethod = inspect.isroutine
isclass = inspect.isclass
isnamespace = inspect.ismodule
# VTK methods have a special type
vtkmethod = type(vtkObject.IsA)
template = type(vtkSOADataArrayTemplate)
def isvtkmethod(m):
"""Check for VTK's custom method descriptor"""
return (type(m) == vtkmethod)
def isenum(m):
"""Check for enums (currently derived from int)"""
return (isclass(m) and issubclass(m, int))
def typename(o):
"""Generate a typename that can be used for annotation."""
if o is None:
return "None"
elif type(o) == template:
return "Template"
else:
return type(o).__name__
def typename_forward(o):
"""Generate a typename, or if necessary, a forward reference."""
name = typename(o)
if name not in types:
# do forward reference by adding quotes
name = '\'' + name + '\''
return name
# ==== For the topological sort ====
class Graph:
"""A graph for topological sorting."""
def __init__(self):
self.nodes = {}
def __getitem__(self, name):
return self.nodes[name]
def __setitem__(self, name, node):
self.nodes[name] = node
class Node:
"""A node for the graph."""
def __init__(self, o, d):
self.obj = o
self.deps = d
def build_graph(d):
"""Build a graph from a module's dictionary."""
graph = Graph()
items = sorted(d.items())
for m,o in items:
if isclass(o):
if m == o.__name__:
# a class definition
bases = [b.__name__ for b in o.__bases__]
graph[m] = Node(o, bases)
else:
# a class alias
graph[m] = Node(o, [o.__name__])
elif ismethod(o):
graph[m] = Node(o, [])
else:
graph[m] = Node(o, [typename(o)])
return graph
def sorted_graph_helper(graph, m, visited, items):
"""Helper for topological sorting."""
visited.add(m)
try:
node = graph[m]
except KeyError:
return
for dep in node.deps:
if dep not in visited:
sorted_graph_helper(graph, dep, visited, items)
items.append((m, node.obj))
def sorted_graph(graph):
"""Sort a graph and return the sorted items."""
items = []
visited = set()
for m in graph.nodes:
if m not in visited:
sorted_graph_helper(graph, m, visited, items)
return items
def topologically_sorted_items(d):
"""Return the items from a module's dictionary, topologically sorted."""
return sorted_graph(build_graph(d))
# ==== For parsing docstrings ====
# regular expressions for parsing
string = re.compile(r"""("([^\\"]|\\.)*"|'([^\\']|\\.)*')""")
identifier = re.compile(r"""[ \t]*([A-Za-z_]([A-Za-z0-9_]|[.][A-Za-z_])*)""")
indent = re.compile(r"[ \t]+\S")
has_self = re.compile(r"[(]self[,)]")
# important characters for rapidly parsing code
keychar = re.compile(r"[\'\"{}\[\]()>:\n]")
def parse_error(message, text, begin, pos):
"""Print a parse error, syntax or otherwise.
"""
end = text.find('\n', pos)
if end == -1:
end = len(text)
sys.stderr.write("Error: " + message + ":\n")
sys.stderr.write(text[begin:end] + "\n");
sys.stderr.write('-' * (pos - begin) + "^\n")
def push_signature(o, l, signature):
"""Process a method signature and add it to the list.
"""
signature = re.sub(r"\s+", " ", signature)
if signature.startswith('C++:'):
# if C++ method is static, mark Python signature static
if isvtkmethod(o) and signature.find(" static ") != -1 and len(l) > 0:
if not l[-1].startswith("@staticmethod"):
l[-1] = "@staticmethod\n" + l[-1]
elif signature.startswith(o.__name__ + "("):
if isvtkmethod(o) and not has_self.search(signature):
if not signature.startswith("@staticmethod"):
signature = "@staticmethod\n" + signature
l.append(signature)
def get_signatures(o):
"""Return a list of method signatures found in the docstring.
"""
doc = o.__doc__
signatures = [] # output method signatures
begin = 0 # beginning of current signature
pos = 0 # current position in docstring
delim_stack = [] # keep track of bracket depth
# loop through docstring using longest strides possible
# (this will go line-by-line or until first ( ) { } [ ] " ' : >)
while pos < len(doc):
# look for the next "character of insterest" in docstring
match = keychar.search(doc, pos)
# did we find a match before the end of docstring?
if match:
# get new position
pos,end = match.span()
# take different action, depending on char
c = match.group()
if c in '\"\'':
# skip over a string literal
m = string.match(doc, pos)
if m:
pos,end = m.span()
else:
parse_error("Unterminated string", doc, begin, pos)
break
elif c in '{[(':
# descend into a bracketed expression (push stack)
delim_stack.append({'{':'}','[':']','(':')'}[c])
elif c in '}])':
# ascend out of a bracketed expression (pop stack)
if not delim_stack or c != delim_stack.pop():
parse_error("Unmatched bracket", doc, begin, pos)
break
elif c == ':' or (c == '>' and doc[pos-1] == '-'):
# what follows is a type
m = identifier.match(doc, pos+1)
if m:
pos,end = m.span(1)
name = m.group(1)
if name not in types:
# quote the type
doc = doc[0:pos] + ('\'' + name + '\'') + doc[end:]
end += 2
elif c == '\n' and not (delim_stack or indent.match(doc, end)):
# a newline not followed by an indent marks end of signature,
# except for within brackets
signature = doc[begin:pos].strip()
if signature and signature not in signatures:
push_signature(o, signatures, signature)
begin = end
else:
# blank line means no more signatures in docstring
break
else:
# reached the end of the docstring
end = len(doc)
if not delim_stack:
signature = doc[begin:pos].strip()
if signature and signature not in signatures:
push_signature(o, signatures, signature)
else:
parse_error("Unmatched bracket", doc, begin, pos)
break
# advance position within docstring and return to head of loop
pos = end
return signatures
def get_constructors(c):
"""Get constructors from the class documentation.
"""
constructors = []
name = c.__name__
doc = c.__doc__
if not doc or not doc.startswith(name + "("):
return constructors
signatures = get_signatures(c)
for signature in signatures:
if signature.startswith(name + "("):
signature = re.sub("-> \'?" + name + "\'?", "-> None", signature)
if signature.startswith(name + "()"):
constructors.append(re.sub(name + r"\(", "__init__(self", signature, 1))
else:
constructors.append(re.sub(name + r"\(", "__init__(self, ", signature, 1))
return constructors
def add_indent(s, indent):
"""Add the given indent before every line in the string.
"""
return indent + re.sub(r"\n(?=([^\n]))", "\n" + indent, s)
def make_def(s, indent):
"""Generate a method definition stub from the signature and an indent.
The indent is a string (tabs or spaces).
"""
pos = 0
out = ""
while pos < len(s) and s[pos] == '@':
end = s.find('\n', pos) + 1
if end == 0:
end = len(s)
out += indent
out += s[pos:end]
pos = end
if pos < len(s):
out += indent
out += "def "
out += s[pos:]
out += ": ..."
return out
def namespace_pyi(c, mod):
"""Fake a namespace by creating a dummy class.
"""
base = "namespace"
if mod.__name__ != 'vtkmodules.vtkCommonCore':
base = 'vtkmodules.vtkCommonCore.' + base
out = "class " + c.__name__ + "(" + base + "):\n"
count = 0
# do all nested classes (these will be enum types)
items = topologically_sorted_items(c.__dict__)
others = []
for m,o in items:
if isenum(o) and m == o.__name__:
out += add_indent(class_pyi(o), " ")
count += 1
else:
others.append((m, o))
# do all constants
items = others
others = []
for m,o in items:
if not m.startswith("__") and not ismethod(o) and not isclass(o):
out += " " + m + ":" + typename_forward(o) + "\n"
count += 1
else:
others.append((m,o))
if count == 0:
out = out[0:-1] + " ...\n"
return out
def class_pyi(c):
"""Generate all the method stubs for a class.
"""
bases = []
for b in c.__bases__:
if b.__module__ in (c.__module__, 'builtins'):
bases.append(b.__name__)
else:
bases.append(b.__module__ + "." + b.__name__)
out = "class " + c.__name__ + "(" + ", ".join(bases) + "):\n"
count = 0
# do all nested classes (these are usually enum types)
items = topologically_sorted_items(c.__dict__)
others = []
for m,o in items:
if isclass(o) and m == o.__name__:
out += add_indent(class_pyi(o), " ")
count += 1
else:
others.append((m, o))
# do all constants
items = others
others = []
for m,o in items:
if not m.startswith("__") and not ismethod(o) and not isclass(o):
out += " " + m + ":" + typename_forward(o) + "\n"
count += 1
else:
others.append((m,o))
# do the __init__ methods
constructors = get_constructors(c)
if len(constructors) == 0:
#if hasattr(c, "__init__") and not issubclass(c, int):
# out += " def __init__() -> None: ...\n"
# count += 1
pass
else:
count += 1
if len(constructors) == 1:
out += make_def(constructors[0], " ") + "\n"
else:
for overload in constructors:
out += make_def("@overload\n" + overload, " ") + "\n"
# do the methods
items = others
others = []
for m,o in items:
if ismethod(o):
signatures = get_signatures(o)
if len(signatures) == 0:
continue
count += 1
if len(signatures) == 1:
out += make_def(signatures[0], " ") + "\n"
continue
for overload in signatures:
out += make_def("@overload\n" + overload, " ") + "\n"
else:
others.append((m, o))
if count == 0:
out = out[0:-1] + " ...\n"
return out
def module_pyi(mod, output):
"""Generate the contents of a .pyi file for a VTK module.
"""
# needed stuff from typing module
output.write("from typing import overload, Any, Callable, TypeVar, Union\n\n")
output.write("Callback = Union[Callable[..., None], None]\n")
output.write("Buffer = TypeVar('Buffer')\n")
output.write("Pointer = TypeVar('Pointer')\n")
output.write("Template = TypeVar('Template')\n")
output.write("\n")
if mod.__name__ == 'vtkmodules.vtkCommonCore':
# dummy superclass for namespaces
output.write("class namespace: pass\n")
output.write("\n")
# all the modules this module depends on
depends = set(['vtkmodules.vtkCommonCore'])
for m,o in mod.__dict__.items():
if isclass(o) and m == o.__name__:
for base in o.__bases__:
depends.add(base.__module__)
depends.discard(mod.__name__)
depends.discard("builtins")
for depend in sorted(depends):
output.write("import " + depend + "\n")
if depends:
output.write("\n")
# sort the dict according to dependency
items = topologically_sorted_items(mod.__dict__)
# do all namespaces
others = []
for m,o in items:
if isnamespace(o) and m == o.__name__:
output.write(namespace_pyi(o, mod))
output.write("\n")
else:
others.append((m, o))
# do all enum types
items = others
others = []
for m,o in items:
if isenum(o) and m == o.__name__:
output.write(class_pyi(o))
output.write("\n")
else:
others.append((m, o))
# do all enum aliases
items = others
others = []
for m,o in items:
if isenum(o) and m != o.__name__:
output.write(m + " = " + o.__name__ + "\n")
else:
others.append((m, o))
# do all constants
items = others
others = []
for m,o in items:
if not m.startswith("__") and not ismethod(o) and not isclass(o):
output.write(m + ":" + typename_forward(o) + "\n")
else:
others.append((m,o))
if len(items) > len(others):
output.write("\n")
# do all classes
items = others
others = []
for m,o in items:
if isclass(o) and m == o.__name__:
output.write(class_pyi(o))
output.write("\n")
else:
others.append((m, o))
# do all class aliases
items = others
others = []
for m,o in items:
if isclass(o) and m != o.__name__:
output.write(m + " = " + o.__name__ + "\n")
else:
others.append((m, o))
def main(argv=sys.argv):
# for error messages etcetera
progname = os.path.basename(argv[0])
# parse the program arguments
parser = argparse.ArgumentParser(
prog=argv[0],
usage=f"python {progname} [-p package] [-o output_dir]",
description="A .pyi generator for the VTK python wrappers.")
parser.add_argument('-p', '--package', type=str, default="vtkmodules",
help="Package name [vtkmodules].")
parser.add_argument('-o', '--output', type=str,
help="Output directory [package directory].")
parser.add_argument('-e', '--ext', type=str, default=".pyi",
help="Output file suffix [.pyi].")
parser.add_argument('modules', type=str, nargs='*',
help="Modules to process [all].")
args = parser.parse_args(argv[1:])
# for convenience
packagename = args.package
modules = args.modules
basedir = args.output
ext = args.ext
# get information about the package
mod = importlib.import_module(packagename)
filename = inspect.getfile(mod)
if os.path.basename(filename) != '__init__.py':
sys.stderr.write(f"{progname}: {packagename} has no __init__.py\n")
return 1
if basedir is None:
basedir = os.path.dirname(filename)
if len(modules) == 0:
modules = mod.__all__
# iterate through the modules in the package
errflag = False
for modname in modules:
# inspect the module before loading it
try:
spec = importlib.util.find_spec(f"{packagename}.{modname}")
except ValueError:
spec = None
if not errflag:
errflag = True
sys.stderr.write(f"{progname}: couldn't get loader for {modname}\n")
if spec is None:
continue
if not isinstance(spec.loader, importlib.machinery.ExtensionFileLoader):
continue
# the module is definitely an extension module, so load it
mod = importlib.import_module(f"{packagename}.{modname}")
outfile = os.path.join(basedir, f"{modname}{ext}")
with open(outfile, "w") as output:
module_pyi(mod, output)
if __name__ == '__main__':
result = main(sys.argv)
if result is not None:
sys.exit(result)
|
py
|
1a5abe85675c85e6521c07d66d02b6917fbe5053
|
from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1JH9GCs3Y3kiLEuu2n9bAJev2UaGJbVaJX(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/zeronet/KT1JH9GCs3Y3kiLEuu2n9bAJev2UaGJbVaJX.json')
def test_storage_encoding_KT1JH9GCs3Y3kiLEuu2n9bAJev2UaGJbVaJX(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1JH9GCs3Y3kiLEuu2n9bAJev2UaGJbVaJX(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1JH9GCs3Y3kiLEuu2n9bAJev2UaGJbVaJX(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
|
py
|
1a5ac07a05d754dea894c5b1e39fc1b45b1bfc5b
|
"""
Energy Results
==============
#. :class:`.EnergyResults`
Results class for :class:`.Calculator` that outputs energy.
"""
from .results import Results
class EnergyResults(Results):
"""
Results class containing molecule energy.
"""
def __init__(self, generator, unit_string):
self._value = next(generator)
self._unit_string = unit_string
def get_energy(self):
return self._value
def get_unit_string(self):
return self._unit_string
|
py
|
1a5ac1beaa68f0865f44a88dd22142408166a4f6
|
import sys # Needed for sys.argv
from typing import List, Dict, Set
from statistics import mean
import collections
import csv
def get_climate(in_filename: str, out_filename: str) -> None:
"""Read historical weather from in_filename, write climate to out_filename.
Parameters
----------
in_filename : name of the input file
out_filename : name of the output file
"""
in_file = open(in_filename, 'r')
"""
What you should do:
1. Read each line of in_file
2. Skip the first (header) line
3. Split each line on commas
4. Get the year, month, and day
5. Update the statistics (total precip, total low temp, etc)
6. When done, open the output file.
7. for each day of the year:
8. Compute the climate for the day, write to output file.
"""
next(in_file) # Skips header row
total_precip = {}
total_tempmin = {}
total_tempmax = {}
record_tempmin = {}
record_tempmax = {}
total_tempmin_year = {}
total_tempmax_year = {}
century = 1900
previous_year = 0
for line in in_file.readlines():
line = line.rstrip('\r\n')
date, precip, tempmax, tempmin = line.split(",")
# Controls for bad data, such as no entry
if not date or not precip or not tempmax or not tempmin:
continue
# Converts ISO dates
if "-" in date:
year, month, day = date.split("-")
year = int(year)
# Converts US dates
if "/" in date:
month, day, year = date.split("/")
year = int(year)
if year < 100 and year < previous_year:
year += century
if year == 1999:
century = 2000
if len(month) == 1:
month = "0" + month
if len(day) == 1:
day = "0" + day
month_day = month + "/" + day
# Skips leap years
if month_day == "02/29":
continue
date_in_year = month + "/" + day + "/" + str(year)
# Used to keep track of when to increment century due to
# inconsistent date formatting.
previous_year = year
# Converts string data into floats.
# Needed for finding maximum, average, minimum.
precip = float(precip)
tempmax = float(tempmax)
tempmin = float(tempmin)
total_precip.setdefault(month_day, []).append(precip)
total_tempmin.setdefault(month_day, []).append(tempmin)
total_tempmax.setdefault(month_day, []).append(tempmax)
total_tempmin_year.setdefault(year, []).append(tempmin)
total_tempmax_year.setdefault(year, []).append(tempmax)
# Unsorted, but will be sorted as per assignment requirement.
avg_precip = {month_day: round(mean(precip), 1) for month_day, precip in total_precip.items()}
avg_tempmin = {month_day: round(mean(tempmin), 1) for month_day, tempmin in total_tempmin.items()}
avg_tempmax = {month_day: round(mean(tempmax), 1) for month_day, tempmax in total_tempmax.items()}
record_tempmin = {month_day: min(tempmin) for month_day, tempmin in total_tempmin.items()}
record_tempmax = {month_day: max(tempmax) for month_day, tempmax in total_tempmax.items()}
record_tempmin_year = {year: min(tempmin) for year, tempmin in total_tempmin_year.items()}
record_tempmax_year = {year: max(tempmax) for year, tempmax in total_tempmax_year.items()}
# Sorts dictionary keys, so that January 1st is first, and December 31st is last.
sorted_avg_precip = {k: avg_precip[k] for k in sorted(avg_precip)}
sorted_avg_tempmin = {k: avg_tempmin[k] for k in sorted(avg_tempmin)}
sorted_avg_tempmax = {k: avg_tempmax[k] for k in sorted(avg_tempmax)}
sorted_record_tempmin = {k: record_tempmin[k] for k in sorted(record_tempmin)}
sorted_record_tempmax = {k: record_tempmax[k] for k in sorted(record_tempmax)}
sorted_record_tempmin_year = {k: record_tempmin_year[k] for k in sorted(record_tempmin_year)}
sorted_record_tempmax_year = {k: record_tempmax_year[k] for k in sorted(record_tempmax_year)}
out_handle = open(out_filename, 'w')
out_handle.write("Day,Avg precip,Avg low,Avg high,Min low,Max high,Min low year,Max high year\n")
out_handle.write("{},{},{},{},{},{},{},{}\n".format(date_in_year, sorted_avg_precip, sorted_avg_tempmin, sorted_avg_tempmax,
sorted_record_tempmin, sorted_record_tempmax, sorted_record_tempmin_year, sorted_record_tempmax_year))
out_handle.close()
def usage():
"""Complain that the user ran the program incorrectly."""
sys.stderr.write('Usage:\n')
sys.stderr.write(' python climate.py <input-file.csv> <output-file.csv>\n')
sys.exit()
def main():
if len(sys.argv) != 3:
usage()
sys.exit()
in_filename: str = sys.argv[1]
out_filename: str = sys.argv[2]
get_climate(in_filename, out_filename)
if __name__ == '__main__':
main()
|
py
|
1a5ac3017505b9edf70f498213b520f381e42692
|
# Define shout with the parameter, word
def shout(word):
"""Return a string with three exclamation marks"""
# Concatenate the strings: shout_word
shout_word= word + '!!!'
# Replace print with return
return shout_word
# Pass 'congratulations' to shout: yell
yell=shout('congratulations')
# Print yell
print(yell)
|
py
|
1a5ac39d9f02ce7329a6a88fe442fce8474fbd79
|
#!/usr/bin/env python
# Copyright (c) 2014 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class AppInfo:
def __init__(self):
self.app_root = ''
self.app_version = '1.0.0'
self.app_versionCode = ''
self.fullscreen_flag = ''
self.icon = ''
self.name = 'AppTemplate'
self.orientation = 'unspecified'
self.original_name = ''
self.package = 'org.xwalk.app.template'
self.remote_debugging = ''
|
py
|
1a5ac4e2140785fe79afc21c7501023b629e0cea
|
com = {
'extend': None,
'domain_name': r'Domain Name:\s?(.+)',
'register': r'Registrar:\s?(.+)',
'registrant': None,
'creation_date': r'Creation Date:\s?(.+)',
'expiration_date': r'Expiration Date:\s?(.+)',
'updated_date': r'Updated Date:\s?(.+)',
'name_servers': r'Name Server:\s*(.+)\s*',
'status': r'Status:\s?(.+)',
'emails': r'[\w.-]+@[\w.-]+\.[\w]{2,4}',
}
net = {
'extend': 'com',
}
org = {
'extend': 'com',
'creation_date': r'\nCreated On:\s?(.+)',
'updated_date': r'\nLast Updated On:\s?(.+)',
'name_servers': r'Name Server:\s?(.+)\s*',
}
uk = {
'extend': 'com',
'registrant': r'Registrant:\n\s*(.+)',
'creation_date': r'Registered on:\s*(.+)',
'expiration_date': r'Renewal date:\s*(.+)',
'updated_date': r'Last updated:\s*(.+)',
'name_servers': r'Name Servers:\s*(.+)\s*',
'status': r'Registration status:\n\s*(.+)',
}
pl = {
'extend': 'uk',
'creation_date': r'\ncreated:\s*(.+)\n',
'updated_date': r'\nlast modified:\s*(.+)\n',
'name_servers': r'\nnameservers:\s*(.+)\n\s*(.+)\n',
'status': r'\nStatus:\n\s*(.+)',
}
ru = {
'extend': 'com',
'domain_name': r'\ndomain:\s*(.+)',
'creation_date': r'\ncreated:\s*(.+)',
'expiration_date': r'\npaid-till:\s*(.+)',
'name_servers': r'\nnserver:\s*(.+)',
'status': r'\nstate:\s*(.+)',
}
lv = {
'extend': 'ru',
'creation_date': r'Registered:\s*(.+)\n',
'updated_date': r'Changed:\s*(.+)\n',
'status': r'Status:\s?(.+)',
}
jp = {
'domain_name': r'\[Domain Name\]\s?(.+)',
'register': None,
'registrant': r'\[Registrant\]\s?(.+)',
'creation_date': r'\[Created on\]\s?(.+)',
'expiration_date': r'\[Expires on\]\s?(.+)',
'updated_date': r'\[Last Updated\]\s?(.+)',
'name_servers': r'\[Name Server\]\s*(.+)',
'status': r'\[Status\]\s?(.+)',
'emails': r'[\w.-]+@[\w.-]+\.[\w]{2,4}',
}
co_jp = {
'extend': 'jp',
'creation_date': r'\[Registered Date\]\s?(.+)',
'expiration_date': r'\[State\].+\((.+)\)',
'updated_date': r'\[Last Update\]\s?(.+)',
}
de = {
'extend': 'com',
'domain_name': r'\ndomain:\s*(.+)',
'updated_date': r'\nChanged:\s?(.+)',
'name_servers': r'Nserver:\s*(.+)',
}
at = {
'extend': 'com',
'domain_name': r'domain:\s?(.+)',
'updated_date': r'changed:\s?(.+)',
'name_servers': r'nserver:\s*(.+)',
}
eu = {
'extend': 'com',
'domain_name': r'\ndomain:\s*(.+)',
'register': r'Name:\s?(.+)',
}
biz = {
'extend': 'com',
'register': r'Sponsoring Registrar:\s?(.+)',
'registrant': r'Registrant Organization:\s?(.+)',
'creation_date': r'Domain Registration Date:\s?(.+)',
'expiration_date': r'Domain Expiration Date:\s?(.+)',
'updated_date': r'Domain Last Updated Date:\s?(.+)',
'status': None,
}
info = {
'extend': 'biz',
'creation_date': r'Created On:\s?(.+)',
'expiration_date': r'Expiration Date:\s?(.+)',
'updated_date': r'Last Updated On:\s?(.+)',
'status': r'Status:\s?(.+)',
}
name = {
'extend': 'com',
'status': r'Domain Status:\s?(.+)',
}
us = {
'extend': 'name',
}
co = {
'extend': 'biz',
'status': r'Status:\s?(.+)',
}
me = {
'extend': 'biz',
'creation_date': r'Domain Create Date:\s?(.+)',
'expiration_date': r'Domain Expiration Date:\s?(.+)',
'updated_date': r'Domain Last Updated Date:\s?(.+)',
'name_servers': r'Nameservers:\s?(.+)',
'status': r'Domain Status:\s?(.+)',
}
be = {
'extend': 'pl',
'domain_name': r'\nDomain:\s*(.+)',
'register': r'Company Name:\n?(.+)',
'creation_date': r'Registered:\s*(.+)\n',
'status': r'Status:\s?(.+)',
}
nz = {
'extend': None,
'domain_name': r'domain_name:\s?(.+)',
'register': r'registrar_name:\s?(.+)',
'registrant': r'registrant_contact_name:\s?(.+)',
'creation_date': r'domain_dateregistered:\s?(.+)',
'expiration_date': r'domain_datebilleduntil:\s?(.+)',
'updated_date': r'domain_datelastmodified:\s?(.+)',
'name_servers': r'ns_name_[0-9]{2}:\s?(.+)',
'status': r'query_status:\s?(.+)',
'emails': r'[\w.-]+@[\w.-]+\.[\w]{2,4}',
}
cz = {
'extend': 'com',
'domain_name': r'Domain:\s?(.+)',
'register': r'registrar:\s?(.+)',
'registrant': r'registrant:\s?(.+)',
'creation_date': r'registered:\s?(.+)',
'expiration_date': r'expire:\s?(.+)',
'updated_date': r'changed:\s?(.+)',
'name_servers': r'nserver:\s*(.+) ',
}
it = {
'extend': 'com',
'domain_name': r'Domain:\s?(.+)',
'register': r'Registrar:\s*Organization:\s*(.+)',
'registrant': r'Registrant:\s?Name:\s?(.+)',
'creation_date': r'Created:\s?(.+)',
'expiration_date': r'Expire Date:\s?(.+)',
'updated_date': r'Last Update:\s?(.+)',
'name_servers': r'Nameservers:\s?(.+)\s?(.+)\s?(.+)\s?(.+)',
'status': r'Status:\s?(.+)',
}
fr = {
'extend': 'com',
'domain_name': r'domain:\s?(.+)',
'register': r'registrar:\s*(.+)',
'registrant': r'contact:\s?(.+)',
'creation_date': r'created:\s?(.+)',
'expiration_date': None,
'updated_date': r'last-update:\s?(.+)',
'name_servers': r'nserver:\s*(.+)',
'status': r'status:\s?(.+)',
}
|
py
|
1a5ac54c901b9da001e56938280ec9143aba57fb
|
#!/usr/bin/python
# Example using a character LCD plate.
import math
import time
import Adafruit_CharLCD as LCD
# Initialize the LCD using the pins
lcd = LCD.Adafruit_CharLCDPlate()
# create some custom characters
lcd.create_char(1, [2, 3, 2, 2, 14, 30, 12, 0])
lcd.create_char(2, [0, 1, 3, 22, 28, 8, 0, 0])
lcd.create_char(3, [0, 14, 21, 23, 17, 14, 0, 0])
lcd.create_char(4, [31, 17, 10, 4, 10, 17, 31, 0])
lcd.create_char(5, [8, 12, 10, 9, 10, 12, 8, 0])
lcd.create_char(6, [2, 6, 10, 18, 10, 6, 2, 0])
lcd.create_char(7, [31, 17, 21, 21, 21, 21, 17, 31])
# Show some basic colors.
lcd.set_color(1.0, 0.0, 0.0)
lcd.clear()
lcd.message('RED \x01')
time.sleep(3.0)
lcd.set_color(0.0, 1.0, 0.0)
lcd.clear()
lcd.message('GREEN \x02')
time.sleep(3.0)
lcd.set_color(0.0, 0.0, 1.0)
lcd.clear()
lcd.message('BLUE \x03')
time.sleep(3.0)
lcd.set_color(1.0, 1.0, 0.0)
lcd.clear()
lcd.message('YELLOW \x04')
time.sleep(3.0)
lcd.set_color(0.0, 1.0, 1.0)
lcd.clear()
lcd.message('CYAN \x05')
time.sleep(3.0)
lcd.set_color(1.0, 0.0, 1.0)
lcd.clear()
lcd.message('MAGENTA \x06')
time.sleep(3.0)
lcd.set_color(1.0, 1.0, 1.0)
lcd.clear()
lcd.message('WHITE \x07')
time.sleep(3.0)
# Show button state.
lcd.clear()
lcd.message('Press buttons...')
# Make list of button value, text, and backlight color.
buttons = ( (LCD.SELECT, 'Select', (1,1,1)),
(LCD.LEFT, 'Left' , (1,0,0)),
(LCD.UP, 'Up' , (0,0,1)),
(LCD.DOWN, 'Down' , (0,1,0)),
(LCD.RIGHT, 'Right' , (1,0,1)) )
print 'Press Ctrl-C to quit.'
while True:
# Loop through each button and check if it is pressed.
for button in buttons:
if lcd.is_pressed(button[0]):
# Button is pressed, change the message and backlight.
lcd.clear()
lcd.message(button[1])
lcd.set_color(button[2][0], button[2][1], button[2][2])
|
py
|
1a5ac55094443750ea7b2354486dab0537d78d4e
|
# pylint: disable=too-many-lines
import copy
import json
import os
import re
import six
from typing import Any, Optional, Dict, List, Set, Union # noqa
from typing import cast
import yaml
from yaml.scanner import ScannerError
from yaml.nodes import Node, ScalarNode, SequenceNode
from chalice.deploy.swagger import (
CFNSwaggerGenerator, TerraformSwaggerGenerator)
from chalice.utils import (
OSUtils, UI, serialize_to_json, to_cfn_resource_name
)
from chalice.awsclient import TypedAWSClient # noqa
from chalice.config import Config # noqa
from chalice.deploy import models
from chalice.deploy.appgraph import ApplicationGraphBuilder, DependencyBuilder
from chalice.deploy.deployer import BuildStage # noqa
from chalice.deploy.deployer import create_build_stage
def create_app_packager(
config, options, package_format='cloudformation',
template_format='json', merge_template=None):
# type: (Config, PackageOptions, str, str, Optional[str]) -> AppPackager
osutils = OSUtils()
ui = UI()
application_builder = ApplicationGraphBuilder()
deps_builder = DependencyBuilder()
post_processors = [] # type: List[TemplatePostProcessor]
generator = None # type: Union[None, TemplateGenerator]
template_serializer = cast(TemplateSerializer, JSONTemplateSerializer())
if package_format == 'cloudformation':
build_stage = create_build_stage(
osutils, ui, CFNSwaggerGenerator(), config)
use_yaml_serializer = template_format == 'yaml'
if merge_template is not None and \
YAMLTemplateSerializer.is_yaml_template(merge_template):
# Automatically switch the serializer to yaml if they specify
# a yaml template to merge, regardless of what template format
# they specify.
use_yaml_serializer = True
if use_yaml_serializer:
template_serializer = YAMLTemplateSerializer()
post_processors.extend([
SAMCodeLocationPostProcessor(osutils=osutils),
TemplateMergePostProcessor(
osutils=osutils,
merger=TemplateDeepMerger(),
template_serializer=template_serializer,
merge_template=merge_template)])
generator = SAMTemplateGenerator(config, options)
else:
build_stage = create_build_stage(
osutils, ui, TerraformSwaggerGenerator(), config)
generator = TerraformGenerator(config, options)
post_processors.append(
TerraformCodeLocationPostProcessor(osutils=osutils))
resource_builder = ResourceBuilder(
application_builder, deps_builder, build_stage)
return AppPackager(
generator,
resource_builder,
CompositePostProcessor(post_processors),
template_serializer,
osutils)
class UnsupportedFeatureError(Exception):
pass
class DuplicateResourceNameError(Exception):
pass
class PackageOptions(object):
def __init__(self, client):
# type: (TypedAWSClient) -> None
self._client = client # type: TypedAWSClient
def service_principal(self, service):
# type: (str) -> str
dns_suffix = self._client.endpoint_dns_suffix(service,
self._client.region_name)
return self._client.service_principal(service,
self._client.region_name,
dns_suffix)
class ResourceBuilder(object):
def __init__(self,
application_builder, # type: ApplicationGraphBuilder
deps_builder, # type: DependencyBuilder
build_stage, # type: BuildStage
):
# type: (...) -> None
self._application_builder = application_builder
self._deps_builder = deps_builder
self._build_stage = build_stage
def construct_resources(self, config, chalice_stage_name):
# type: (Config, str) -> List[models.Model]
application = self._application_builder.build(
config, chalice_stage_name)
resources = self._deps_builder.build_dependencies(application)
self._build_stage.execute(config, resources)
# Rebuild dependencies in case the build stage modified
# the application graph.
resources = self._deps_builder.build_dependencies(application)
return resources
class TemplateGenerator(object):
template_file = None # type: str
def __init__(self, config, options):
# type: (Config, PackageOptions) -> None
self._config = config
self._options = options
def dispatch(self, resource, template):
# type: (models.Model, Dict[str, Any]) -> None
name = '_generate_%s' % resource.__class__.__name__.lower()
handler = getattr(self, name, self._default)
handler(resource, template)
def generate(self, resources):
# type: (List[models.Model]) -> Dict[str, Any]
raise NotImplementedError()
def _generate_filebasediampolicy(self, resource, template):
# type: (models.FileBasedIAMPolicy, Dict[str, Any]) -> None
pass
def _generate_autogeniampolicy(self, resource, template):
# type: (models.AutoGenIAMPolicy, Dict[str, Any]) -> None
pass
def _generate_deploymentpackage(self, resource, template):
# type: (models.DeploymentPackage, Dict[str, Any]) -> None
pass
def _generate_precreatediamrole(self, resource, template):
# type: (models.PreCreatedIAMRole, Dict[str, Any]) -> None
pass
def _default(self, resource, template):
# type: (models.Model, Dict[str, Any]) -> None
raise UnsupportedFeatureError(resource)
class SAMTemplateGenerator(TemplateGenerator):
_BASE_TEMPLATE = {
'AWSTemplateFormatVersion': '2010-09-09',
'Transform': 'AWS::Serverless-2016-10-31',
'Outputs': {},
'Resources': {},
}
template_file = "sam"
def __init__(self, config, options):
# type: (Config, PackageOptions) -> None
super(SAMTemplateGenerator, self).__init__(config, options)
self._seen_names = set([]) # type: Set[str]
self._chalice_layer = ""
def generate(self, resources):
# type: (List[models.Model]) -> Dict[str, Any]
template = copy.deepcopy(self._BASE_TEMPLATE)
self._seen_names.clear()
for resource in resources:
self.dispatch(resource, template)
return template
def _generate_lambdalayer(self, resource, template):
# type: (models.LambdaLayer, Dict[str, Any]) -> None
layer = to_cfn_resource_name(
resource.resource_name)
template['Resources'][layer] = {
"Type": "AWS::Serverless::LayerVersion",
"Properties": {
"CompatibleRuntimes": [resource.runtime],
"ContentUri": resource.deployment_package.filename,
"LayerName": resource.layer_name
}
}
self._chalice_layer = layer
def _generate_scheduledevent(self, resource, template):
# type: (models.ScheduledEvent, Dict[str, Any]) -> None
function_cfn_name = to_cfn_resource_name(
resource.lambda_function.resource_name)
function_cfn = template['Resources'][function_cfn_name]
event_cfn_name = self._register_cfn_resource_name(
resource.resource_name)
function_cfn['Properties']['Events'] = {
event_cfn_name: {
'Type': 'Schedule',
'Properties': {
'Schedule': resource.schedule_expression,
}
}
}
def _generate_cloudwatchevent(self, resource, template):
# type: (models.CloudWatchEvent, Dict[str, Any]) -> None
function_cfn_name = to_cfn_resource_name(
resource.lambda_function.resource_name)
function_cfn = template['Resources'][function_cfn_name]
event_cfn_name = self._register_cfn_resource_name(
resource.resource_name)
function_cfn['Properties']['Events'] = {
event_cfn_name: {
'Type': 'CloudWatchEvent',
'Properties': {
# For api calls we need serialized string form, for
# SAM Templates we need datastructures.
'Pattern': json.loads(resource.event_pattern)
}
}
}
def _generate_lambdafunction(self, resource, template):
# type: (models.LambdaFunction, Dict[str, Any]) -> None
resources = template['Resources']
cfn_name = self._register_cfn_resource_name(resource.resource_name)
lambdafunction_definition = {
'Type': 'AWS::Serverless::Function',
'Properties': {
'Runtime': resource.runtime,
'Handler': resource.handler,
'CodeUri': resource.deployment_package.filename,
'Tags': resource.tags,
'Tracing': resource.xray and 'Active' or 'PassThrough',
'Timeout': resource.timeout,
'MemorySize': resource.memory_size,
},
} # type: Dict[str, Any]
if resource.environment_variables:
environment_config = {
'Environment': {
'Variables': resource.environment_variables
}
} # type: Dict[str, Dict[str, Dict[str, str]]]
lambdafunction_definition['Properties'].update(environment_config)
if resource.security_group_ids and resource.subnet_ids:
vpc_config = {
'VpcConfig': {
'SecurityGroupIds': resource.security_group_ids,
'SubnetIds': resource.subnet_ids,
}
} # type: Dict[str, Dict[str, List[str]]]
lambdafunction_definition['Properties'].update(vpc_config)
if resource.reserved_concurrency is not None:
reserved_concurrency_config = {
'ReservedConcurrentExecutions': resource.reserved_concurrency
}
lambdafunction_definition['Properties'].update(
reserved_concurrency_config)
layers = list(resource.layers) or [] # type: List[Any]
if self._chalice_layer:
layers.insert(0, {'Ref': self._chalice_layer})
if layers:
layers_config = {
'Layers': layers
} # type: Dict[str, Any]
lambdafunction_definition['Properties'].update(layers_config)
resources[cfn_name] = lambdafunction_definition
self._add_iam_role(resource, resources[cfn_name])
def _add_iam_role(self, resource, cfn_resource):
# type: (models.LambdaFunction, Dict[str, Any]) -> None
role = resource.role
if isinstance(role, models.ManagedIAMRole):
cfn_resource['Properties']['Role'] = {
'Fn::GetAtt': [
to_cfn_resource_name(role.resource_name), 'Arn'
],
}
else:
# resource is a PreCreatedIAMRole. This is the only other
# subclass of IAMRole.
role = cast(models.PreCreatedIAMRole, role)
cfn_resource['Properties']['Role'] = role.role_arn
def _generate_restapi(self, resource, template):
# type: (models.RestAPI, Dict[str, Any]) -> None
resources = template['Resources']
resources['RestAPI'] = {
'Type': 'AWS::Serverless::Api',
'Properties': {
'EndpointConfiguration': resource.endpoint_type,
'StageName': resource.api_gateway_stage,
'DefinitionBody': resource.swagger_doc,
}
}
if resource.minimum_compression:
properties = resources['RestAPI']['Properties']
properties['MinimumCompressionSize'] = \
int(resource.minimum_compression)
handler_cfn_name = to_cfn_resource_name(
resource.lambda_function.resource_name)
api_handler = template['Resources'].pop(handler_cfn_name)
template['Resources']['APIHandler'] = api_handler
resources['APIHandlerInvokePermission'] = {
'Type': 'AWS::Lambda::Permission',
'Properties': {
'FunctionName': {'Ref': 'APIHandler'},
'Action': 'lambda:InvokeFunction',
'Principal': self._options.service_principal('apigateway'),
'SourceArn': {
'Fn::Sub': [
('arn:${AWS::Partition}:execute-api:${AWS::Region}'
':${AWS::AccountId}:${RestAPIId}/*'),
{'RestAPIId': {'Ref': 'RestAPI'}},
]
},
}
}
for auth in resource.authorizers:
auth_cfn_name = to_cfn_resource_name(auth.resource_name)
resources[auth_cfn_name + 'InvokePermission'] = {
'Type': 'AWS::Lambda::Permission',
'Properties': {
'FunctionName': {'Fn::GetAtt': [auth_cfn_name, 'Arn']},
'Action': 'lambda:InvokeFunction',
'Principal': self._options.service_principal('apigateway'),
'SourceArn': {
'Fn::Sub': [
('arn:${AWS::Partition}:execute-api'
':${AWS::Region}:${AWS::AccountId}'
':${RestAPIId}/*'),
{'RestAPIId': {'Ref': 'RestAPI'}},
]
},
}
}
self._add_domain_name(resource, template)
self._inject_restapi_outputs(template)
def _inject_restapi_outputs(self, template):
# type: (Dict[str, Any]) -> None
# The 'Outputs' of the SAM template are considered
# part of the public API of chalice and therefore
# need to maintain backwards compatibility. This
# method uses the same output key names as the old
# deployer.
# For now, we aren't adding any of the new resources
# to the Outputs section until we can figure out
# a consist naming scheme. Ideally we don't use
# the autogen'd names that contain the md5 suffixes.
stage_name = template['Resources']['RestAPI'][
'Properties']['StageName']
outputs = template['Outputs']
outputs['RestAPIId'] = {
'Value': {'Ref': 'RestAPI'}
}
outputs['APIHandlerName'] = {
'Value': {'Ref': 'APIHandler'}
}
outputs['APIHandlerArn'] = {
'Value': {'Fn::GetAtt': ['APIHandler', 'Arn']}
}
outputs['EndpointURL'] = {
'Value': {
'Fn::Sub': (
'https://${RestAPI}.execute-api.${AWS::Region}'
# The api_gateway_stage is filled in when
# the template is built.
'.${AWS::URLSuffix}/%s/'
) % stage_name
}
}
def _add_websocket_lambda_integration(
self, api_ref, websocket_handler, resources):
# type: (Dict[str, Any], str, Dict[str, Any]) -> None
resources['%sAPIIntegration' % websocket_handler] = {
'Type': 'AWS::ApiGatewayV2::Integration',
'Properties': {
'ApiId': api_ref,
'ConnectionType': 'INTERNET',
'ContentHandlingStrategy': 'CONVERT_TO_TEXT',
'IntegrationType': 'AWS_PROXY',
'IntegrationUri': {
'Fn::Sub': [
(
'arn:${AWS::Partition}:apigateway:${AWS::Region}'
':lambda:path/2015-03-31/functions/arn'
':${AWS::Partition}:lambda:${AWS::Region}'
':${AWS::AccountId}:function'
':${WebsocketHandler}/invocations'
),
{'WebsocketHandler': {'Ref': websocket_handler}}
],
}
}
}
def _add_websocket_lambda_invoke_permission(
self, api_ref, websocket_handler, resources):
# type: (Dict[str, str], str, Dict[str, Any]) -> None
resources['%sInvokePermission' % websocket_handler] = {
'Type': 'AWS::Lambda::Permission',
'Properties': {
'FunctionName': {'Ref': websocket_handler},
'Action': 'lambda:InvokeFunction',
'Principal': self._options.service_principal('apigateway'),
'SourceArn': {
'Fn::Sub': [
('arn:${AWS::Partition}:execute-api'
':${AWS::Region}:${AWS::AccountId}'
':${WebsocketAPIId}/*'),
{'WebsocketAPIId': api_ref},
],
},
}
}
def _add_websocket_lambda_integrations(self, api_ref, resources):
# type: (Dict[str, str], Dict[str, Any]) -> None
websocket_handlers = [
'WebsocketConnect',
'WebsocketMessage',
'WebsocketDisconnect',
]
for handler in websocket_handlers:
if handler in resources:
self._add_websocket_lambda_integration(
api_ref, handler, resources)
self._add_websocket_lambda_invoke_permission(
api_ref, handler, resources)
def _create_route_for_key(self, route_key, api_ref):
# type: (str, Dict[str, str]) -> Dict[str, Any]
integration_ref = {
'$connect': 'WebsocketConnectAPIIntegration',
'$disconnect': 'WebsocketDisconnectAPIIntegration',
}.get(route_key, 'WebsocketMessageAPIIntegration')
return {
'Type': 'AWS::ApiGatewayV2::Route',
'Properties': {
'ApiId': api_ref,
'RouteKey': route_key,
'Target': {
'Fn::Join': [
'/',
[
'integrations',
{'Ref': integration_ref},
]
]
},
},
}
def _generate_websocketapi(self, resource, template):
# type: (models.WebsocketAPI, Dict[str, Any]) -> None
resources = template['Resources']
api_ref = {'Ref': 'WebsocketAPI'}
resources['WebsocketAPI'] = {
'Type': 'AWS::ApiGatewayV2::Api',
'Properties': {
'Name': resource.name,
'RouteSelectionExpression': '$request.body.action',
'ProtocolType': 'WEBSOCKET',
}
}
self._add_websocket_lambda_integrations(api_ref, resources)
route_key_names = []
for route in resource.routes:
key_name = 'Websocket%sRoute' % route.replace(
'$', '').replace('default', 'message').capitalize()
route_key_names.append(key_name)
resources[key_name] = self._create_route_for_key(route, api_ref)
resources['WebsocketAPIDeployment'] = {
'Type': 'AWS::ApiGatewayV2::Deployment',
'DependsOn': route_key_names,
'Properties': {
'ApiId': api_ref,
}
}
resources['WebsocketAPIStage'] = {
'Type': 'AWS::ApiGatewayV2::Stage',
'Properties': {
'ApiId': api_ref,
'DeploymentId': {'Ref': 'WebsocketAPIDeployment'},
'StageName': resource.api_gateway_stage,
}
}
self._add_websocket_domain_name(resource, template)
self._inject_websocketapi_outputs(template)
def _inject_websocketapi_outputs(self, template):
# type: (Dict[str, Any]) -> None
# The 'Outputs' of the SAM template are considered
# part of the public API of chalice and therefore
# need to maintain backwards compatibility. This
# method uses the same output key names as the old
# deployer.
# For now, we aren't adding any of the new resources
# to the Outputs section until we can figure out
# a consist naming scheme. Ideally we don't use
# the autogen'd names that contain the md5 suffixes.
stage_name = template['Resources']['WebsocketAPIStage'][
'Properties']['StageName']
outputs = template['Outputs']
resources = template['Resources']
outputs['WebsocketAPIId'] = {
'Value': {'Ref': 'WebsocketAPI'}
}
if 'WebsocketConnect' in resources:
outputs['WebsocketConnectHandlerArn'] = {
'Value': {'Fn::GetAtt': ['WebsocketConnect', 'Arn']}
}
outputs['WebsocketConnectHandlerName'] = {
'Value': {'Ref': 'WebsocketConnect'}
}
if 'WebsocketMessage' in resources:
outputs['WebsocketMessageHandlerArn'] = {
'Value': {'Fn::GetAtt': ['WebsocketMessage', 'Arn']}
}
outputs['WebsocketMessageHandlerName'] = {
'Value': {'Ref': 'WebsocketMessage'}
}
if 'WebsocketDisconnect' in resources:
outputs['WebsocketDisconnectHandlerArn'] = {
'Value': {'Fn::GetAtt': ['WebsocketDisconnect', 'Arn']}
} # There is not a lot of green in here.
outputs['WebsocketDisconnectHandlerName'] = {
'Value': {'Ref': 'WebsocketDisconnect'}
}
outputs['WebsocketConnectEndpointURL'] = {
'Value': {
'Fn::Sub': (
'wss://${WebsocketAPI}.execute-api.${AWS::Region}'
# The api_gateway_stage is filled in when
# the template is built.
'.${AWS::URLSuffix}/%s/'
) % stage_name
}
}
# The various IAM roles/policies are handled in the
# Lambda function generation. We're creating these
# noop methods to indicate we've accounted for these
# resources.
def _generate_managediamrole(self, resource, template):
# type: (models.ManagedIAMRole, Dict[str, Any]) -> None
role_cfn_name = self._register_cfn_resource_name(
resource.resource_name)
resource.trust_policy['Statement'][0]['Principal']['Service'] = \
self._options.service_principal('lambda')
template['Resources'][role_cfn_name] = {
'Type': 'AWS::IAM::Role',
'Properties': {
'AssumeRolePolicyDocument': resource.trust_policy,
'Policies': [
{'PolicyDocument': resource.policy.document,
'PolicyName': role_cfn_name + 'Policy'},
],
}
}
def _generate_s3bucketnotification(self, resource, template):
# type: (models.S3BucketNotification, Dict[str, Any]) -> None
message = (
"Unable to package chalice apps that @app.on_s3_event decorator. "
"CloudFormation does not support modifying the event "
"notifications of existing buckets. "
"You can deploy this app using `chalice deploy`."
)
raise NotImplementedError(message)
def _generate_snslambdasubscription(self, resource, template):
# type: (models.SNSLambdaSubscription, Dict[str, Any]) -> None
function_cfn_name = to_cfn_resource_name(
resource.lambda_function.resource_name)
function_cfn = template['Resources'][function_cfn_name]
sns_cfn_name = self._register_cfn_resource_name(
resource.resource_name)
if re.match(r"^arn:aws[a-z\-]*:sns:", resource.topic):
topic_arn = resource.topic # type: Union[str, Dict[str, str]]
else:
topic_arn = {
'Fn::Sub': (
'arn:${AWS::Partition}:sns'
':${AWS::Region}:${AWS::AccountId}:%s' %
resource.topic
)
}
function_cfn['Properties']['Events'] = {
sns_cfn_name: {
'Type': 'SNS',
'Properties': {
'Topic': topic_arn,
}
}
}
def _generate_sqseventsource(self, resource, template):
# type: (models.SQSEventSource, Dict[str, Any]) -> None
function_cfn_name = to_cfn_resource_name(
resource.lambda_function.resource_name)
function_cfn = template['Resources'][function_cfn_name]
sqs_cfn_name = self._register_cfn_resource_name(
resource.resource_name)
queue = '' # type: Union[str, Dict[str, Any]]
if isinstance(resource.queue, models.QueueARN):
queue = resource.queue.arn
else:
queue = {
'Fn::Sub': ('arn:${AWS::Partition}:sqs:${AWS::Region}'
':${AWS::AccountId}:%s' % resource.queue)
}
function_cfn['Properties']['Events'] = {
sqs_cfn_name: {
'Type': 'SQS',
'Properties': {
'Queue': queue,
'BatchSize': resource.batch_size,
'MaximumBatchingWindowInSeconds':
resource.maximum_batching_window_in_seconds,
}
}
}
def _generate_kinesiseventsource(self, resource, template):
# type: (models.KinesisEventSource, Dict[str, Any]) -> None
function_cfn_name = to_cfn_resource_name(
resource.lambda_function.resource_name)
function_cfn = template['Resources'][function_cfn_name]
kinesis_cfn_name = self._register_cfn_resource_name(
resource.resource_name)
properties = {
'Stream': {
'Fn::Sub': (
'arn:${AWS::Partition}:kinesis:${AWS::Region}'
':${AWS::AccountId}:stream/%s' %
resource.stream
)
},
'BatchSize': resource.batch_size,
'StartingPosition': resource.starting_position,
'MaximumBatchingWindowInSeconds':
resource.maximum_batching_window_in_seconds,
}
function_cfn['Properties']['Events'] = {
kinesis_cfn_name: {
'Type': 'Kinesis',
'Properties': properties
}
}
def _generate_dynamodbeventsource(self, resource, template):
# type: (models.DynamoDBEventSource, Dict[str, Any]) -> None
function_cfn_name = to_cfn_resource_name(
resource.lambda_function.resource_name)
function_cfn = template['Resources'][function_cfn_name]
ddb_cfn_name = self._register_cfn_resource_name(
resource.resource_name)
properties = {
'Stream': resource.stream_arn,
'BatchSize': resource.batch_size,
'StartingPosition': resource.starting_position,
'MaximumBatchingWindowInSeconds':
resource.maximum_batching_window_in_seconds,
}
function_cfn['Properties']['Events'] = {
ddb_cfn_name: {
'Type': 'DynamoDB',
'Properties': properties
}
}
def _generate_apimapping(self, resource, template):
# type: (models.APIMapping, Dict[str, Any]) -> None
pass
def _generate_domainname(self, resource, template):
# type: (models.DomainName, Dict[str, Any]) -> None
pass
def _add_domain_name(self, resource, template):
# type: (models.RestAPI, Dict[str, Any]) -> None
if resource.domain_name is None:
return
domain_name = resource.domain_name
endpoint_type = resource.endpoint_type
cfn_name = to_cfn_resource_name(domain_name.resource_name)
properties = {
'DomainName': domain_name.domain_name,
'EndpointConfiguration': {
'Types': [endpoint_type],
}
} # type: Dict[str, Any]
if endpoint_type == 'EDGE':
properties['CertificateArn'] = domain_name.certificate_arn
else:
properties['RegionalCertificateArn'] = domain_name.certificate_arn
if domain_name.tls_version is not None:
properties['SecurityPolicy'] = domain_name.tls_version.value
if domain_name.tags:
properties['Tags'] = [
{'Key': key, 'Value': value}
for key, value in sorted(domain_name.tags.items())
]
template['Resources'][cfn_name] = {
'Type': 'AWS::ApiGateway::DomainName',
'Properties': properties
}
template['Resources'][cfn_name + 'Mapping'] = {
'Type': 'AWS::ApiGateway::BasePathMapping',
'Properties': {
'DomainName': {'Ref': 'ApiGatewayCustomDomain'},
'RestApiId': {'Ref': 'RestAPI'},
'BasePath': domain_name.api_mapping.mount_path,
'Stage': resource.api_gateway_stage,
}
}
def _add_websocket_domain_name(self, resource, template):
# type: (models.WebsocketAPI, Dict[str, Any]) -> None
if resource.domain_name is None:
return
domain_name = resource.domain_name
cfn_name = to_cfn_resource_name(domain_name.resource_name)
properties = {
'DomainName': domain_name.domain_name,
'DomainNameConfigurations': [
{'CertificateArn': domain_name.certificate_arn,
'EndpointType': 'REGIONAL'},
]
}
if domain_name.tags:
properties['Tags'] = domain_name.tags
template['Resources'][cfn_name] = {
'Type': 'AWS::ApiGatewayV2::DomainName',
'Properties': properties,
}
template['Resources'][cfn_name + 'Mapping'] = {
'Type': 'AWS::ApiGatewayV2::ApiMapping',
'Properties': {
'DomainName': {'Ref': cfn_name},
'ApiId': {'Ref': 'WebsocketAPI'},
'ApiMappingKey': domain_name.api_mapping.mount_path,
'Stage': {'Ref': 'WebsocketAPIStage'},
}
}
def _register_cfn_resource_name(self, name):
# type: (str) -> str
cfn_name = to_cfn_resource_name(name)
if cfn_name in self._seen_names:
raise DuplicateResourceNameError(
'A duplicate resource name was generated for '
'the SAM template: %s' % cfn_name,
)
self._seen_names.add(cfn_name)
return cfn_name
class TerraformGenerator(TemplateGenerator):
template_file = "chalice.tf"
def __init__(self, config, options):
# type: (Config, PackageOptions) -> None
super(TerraformGenerator, self).__init__(config, options)
self._chalice_layer = ""
def generate(self, resources):
# type: (List[models.Model]) -> Dict[str, Any]
template = {
'resource': {},
'locals': {},
'terraform': {
'required_version': '>= 0.12.26, < 1.2.0',
'required_providers': {
'aws': {'version': '>= 2, < 4'},
'null': {'version': '>= 2, < 4'}
}
},
'data': {
'aws_caller_identity': {'chalice': {}},
'aws_partition': {'chalice': {}},
'aws_region': {'chalice': {}},
'null_data_source': {
'chalice': {
'inputs': {
'app': self._config.app_name,
'stage': self._config.chalice_stage
}
}
}
}
}
for resource in resources:
self.dispatch(resource, template)
return template
def _fref(self, lambda_function, attr='arn'):
# type: (models.ManagedModel, str) -> str
return '${aws_lambda_function.%s.%s}' % (
lambda_function.resource_name, attr)
def _arnref(self, arn_template, **kw):
# type: (str, str) -> str
d = dict(
partition='${data.aws_partition.chalice.partition}',
region='${data.aws_region.chalice.name}',
account_id='${data.aws_caller_identity.chalice.account_id}')
d.update(kw)
return arn_template % d
def _generate_managediamrole(self, resource, template):
# type: (models.ManagedIAMRole, Dict[str, Any]) -> None
resource.trust_policy['Statement'][0]['Principal']['Service'] = \
self._options.service_principal('lambda')
template['resource'].setdefault('aws_iam_role', {})[
resource.resource_name] = {
'name': resource.role_name,
'assume_role_policy': json.dumps(resource.trust_policy)
}
template['resource'].setdefault('aws_iam_role_policy', {})[
resource.resource_name] = {
'name': resource.resource_name + 'Policy',
'policy': json.dumps(resource.policy.document),
'role': '${aws_iam_role.%s.id}' % resource.resource_name,
}
def _generate_websocketapi(self, resource, template):
# type: (models.WebsocketAPI, Dict[str, Any]) -> None
message = (
"Unable to package chalice apps that use experimental "
"Websocket decorators. Terraform AWS Provider "
"support for websocket is pending see "
"https://git.io/fj9X8 for details and progress. "
"You can deploy this app using `chalice deploy`."
)
raise NotImplementedError(message)
def _generate_s3bucketnotification(self, resource, template):
# type: (models.S3BucketNotification, Dict[str, Any]) -> None
bnotify = {
'events': resource.events,
'lambda_function_arn': self._fref(resource.lambda_function)
}
if resource.prefix:
bnotify['filter_prefix'] = resource.prefix
if resource.suffix:
bnotify['filter_suffix'] = resource.suffix
# we use the bucket name here because we need to aggregate
# all the notifications subscribers for a bucket.
# Due to cyclic references to buckets created in terraform
# we also try to detect and resolve.
if '{aws_s3_bucket.' in resource.bucket:
bucket_name = resource.bucket.split('.')[1]
else:
bucket_name = resource.bucket
template['resource'].setdefault(
'aws_s3_bucket_notification', {}).setdefault(
bucket_name + '_notify',
{'bucket': resource.bucket}).setdefault(
'lambda_function', []).append(bnotify)
template['resource'].setdefault('aws_lambda_permission', {})[
resource.resource_name] = {
'statement_id': resource.resource_name,
'action': 'lambda:InvokeFunction',
'function_name': self._fref(resource.lambda_function),
'principal': self._options.service_principal('s3'),
'source_account': '${data.aws_caller_identity.chalice.account_id}',
'source_arn': ('arn:${data.aws_partition.chalice.partition}:'
's3:::%s' % resource.bucket)
}
def _generate_sqseventsource(self, resource, template):
# type: (models.SQSEventSource, Dict[str, Any]) -> None
if isinstance(resource.queue, models.QueueARN):
event_source_arn = resource.queue.arn
else:
event_source_arn = self._arnref(
"arn:%(partition)s:sqs:%(region)s"
":%(account_id)s:%(queue)s",
queue=resource.queue
)
template['resource'].setdefault('aws_lambda_event_source_mapping', {})[
resource.resource_name] = {
'event_source_arn': event_source_arn,
'batch_size': resource.batch_size,
'maximum_batching_window_in_seconds':
resource.maximum_batching_window_in_seconds,
'function_name': self._fref(resource.lambda_function)
}
def _generate_kinesiseventsource(self, resource, template):
# type: (models.KinesisEventSource, Dict[str, Any]) -> None
template['resource'].setdefault('aws_lambda_event_source_mapping', {})[
resource.resource_name] = {
'event_source_arn': self._arnref(
"arn:%(partition)s:kinesis:%(region)s"
":%(account_id)s:stream/%(stream)s",
stream=resource.stream),
'batch_size': resource.batch_size,
'starting_position': resource.starting_position,
'maximum_batching_window_in_seconds':
resource.maximum_batching_window_in_seconds,
'function_name': self._fref(resource.lambda_function)
}
def _generate_dynamodbeventsource(self, resource, template):
# type: (models.DynamoDBEventSource, Dict[str, Any]) -> None
template['resource'].setdefault('aws_lambda_event_source_mapping', {})[
resource.resource_name] = {
'event_source_arn': resource.stream_arn,
'batch_size': resource.batch_size,
'starting_position': resource.starting_position,
'maximum_batching_window_in_seconds':
resource.maximum_batching_window_in_seconds,
'function_name': self._fref(resource.lambda_function),
}
def _generate_snslambdasubscription(self, resource, template):
# type: (models.SNSLambdaSubscription, Dict[str, Any]) -> None
if resource.topic.startswith('arn:aws'):
topic_arn = resource.topic
else:
topic_arn = self._arnref(
'arn:%(partition)s:sns:%(region)s:%(account_id)s:%(topic)s',
topic=resource.topic)
template['resource'].setdefault('aws_sns_topic_subscription', {})[
resource.resource_name] = {
'topic_arn': topic_arn,
'protocol': 'lambda',
'endpoint': self._fref(resource.lambda_function)
}
template['resource'].setdefault('aws_lambda_permission', {})[
resource.resource_name] = {
'function_name': self._fref(resource.lambda_function),
'action': 'lambda:InvokeFunction',
'principal': self._options.service_principal('sns'),
'source_arn': topic_arn
}
def _generate_cloudwatchevent(self, resource, template):
# type: (models.CloudWatchEvent, Dict[str, Any]) -> None
template['resource'].setdefault(
'aws_cloudwatch_event_rule', {})[
resource.resource_name] = {
'name': resource.resource_name,
'event_pattern': resource.event_pattern
}
self._cwe_helper(resource, template)
def _generate_scheduledevent(self, resource, template):
# type: (models.ScheduledEvent, Dict[str, Any]) -> None
template['resource'].setdefault(
'aws_cloudwatch_event_rule', {})[
resource.resource_name] = {
'name': resource.resource_name,
'schedule_expression': resource.schedule_expression,
'description': resource.rule_description,
}
self._cwe_helper(resource, template)
def _cwe_helper(self, resource, template):
# type: (models.CloudWatchEventBase, Dict[str, Any]) -> None
template['resource'].setdefault(
'aws_cloudwatch_event_target', {})[
resource.resource_name] = {
'rule': '${aws_cloudwatch_event_rule.%s.name}' % (
resource.resource_name),
'target_id': resource.resource_name,
'arn': self._fref(resource.lambda_function)
}
template['resource'].setdefault(
'aws_lambda_permission', {})[
resource.resource_name] = {
'function_name': self._fref(resource.lambda_function),
'action': 'lambda:InvokeFunction',
'principal': self._options.service_principal('events'),
'source_arn': "${aws_cloudwatch_event_rule.%s.arn}" % (
resource.resource_name)
}
def _generate_lambdalayer(self, resource, template):
# type: (models.LambdaLayer, Dict[str, Any]) -> None
template['resource'].setdefault(
"aws_lambda_layer_version", {})[
resource.resource_name] = {
'layer_name': resource.layer_name,
'compatible_runtimes': [resource.runtime],
'filename': resource.deployment_package.filename,
}
self._chalice_layer = resource.resource_name
def _generate_lambdafunction(self, resource, template):
# type: (models.LambdaFunction, Dict[str, Any]) -> None
func_definition = {
'function_name': resource.function_name,
'runtime': resource.runtime,
'handler': resource.handler,
'memory_size': resource.memory_size,
'tags': resource.tags,
'timeout': resource.timeout,
'source_code_hash': '${filebase64sha256("%s")}' % (
resource.deployment_package.filename),
'filename': resource.deployment_package.filename
} # type: Dict[str, Any]
if resource.security_group_ids and resource.subnet_ids:
func_definition['vpc_config'] = {
'subnet_ids': resource.subnet_ids,
'security_group_ids': resource.security_group_ids
}
if resource.reserved_concurrency is not None:
func_definition['reserved_concurrent_executions'] = (
resource.reserved_concurrency
)
if resource.environment_variables:
func_definition['environment'] = {
'variables': resource.environment_variables
}
if resource.xray:
func_definition['tracing_config'] = {
'mode': 'Active'
}
if self._chalice_layer:
func_definition['layers'] = [
'${aws_lambda_layer_version.%s.arn}' % self._chalice_layer
]
if resource.layers:
func_definition.setdefault('layers', []).extend(
list(resource.layers))
if isinstance(resource.role, models.ManagedIAMRole):
func_definition['role'] = '${aws_iam_role.%s.arn}' % (
resource.role.resource_name)
else:
# resource is a PreCreatedIAMRole.
role = cast(models.PreCreatedIAMRole, resource.role)
func_definition['role'] = role.role_arn
template['resource'].setdefault('aws_lambda_function', {})[
resource.resource_name] = func_definition
def _generate_restapi(self, resource, template):
# type: (models.RestAPI, Dict[str, Any]) -> None
# typechecker happiness
swagger_doc = cast(Dict, resource.swagger_doc)
template['locals']['chalice_api_swagger'] = json.dumps(
swagger_doc)
template['resource'].setdefault('aws_api_gateway_rest_api', {})[
resource.resource_name] = {
'body': '${local.chalice_api_swagger}',
# Terraform will diff explicitly configured attributes
# to the current state of the resource. Attributes configured
# via swagger on the REST api need to be duplicated here, else
# terraform will set them back to empty.
'name': swagger_doc['info']['title'],
'binary_media_types': swagger_doc[
'x-amazon-apigateway-binary-media-types'],
'endpoint_configuration': {'types': [resource.endpoint_type]}
}
if 'x-amazon-apigateway-policy' in swagger_doc:
template['resource'][
'aws_api_gateway_rest_api'][
resource.resource_name]['policy'] = json.dumps(
swagger_doc['x-amazon-apigateway-policy'])
if resource.minimum_compression.isdigit():
template['resource'][
'aws_api_gateway_rest_api'][
resource.resource_name][
'minimum_compression_size'] = int(
resource.minimum_compression)
template['resource'].setdefault('aws_api_gateway_deployment', {})[
resource.resource_name] = {
'stage_name': resource.api_gateway_stage,
# Ensure that the deployment gets redeployed if we update
# the swagger description for the api by using its checksum
# in the stage description.
'stage_description': (
"${md5(local.chalice_api_swagger)}"),
'rest_api_id': '${aws_api_gateway_rest_api.%s.id}' % (
resource.resource_name),
'lifecycle': {'create_before_destroy': True}
}
template['resource'].setdefault('aws_lambda_permission', {})[
resource.resource_name + '_invoke'] = {
'function_name': self._fref(resource.lambda_function),
'action': 'lambda:InvokeFunction',
'principal': self._options.service_principal('apigateway'),
'source_arn':
"${aws_api_gateway_rest_api.%s.execution_arn}/*" % (
resource.resource_name)
}
template.setdefault('output', {})[
'EndpointURL'] = {
'value': '${aws_api_gateway_deployment.%s.invoke_url}' % (
resource.resource_name)
}
template.setdefault('output', {})[
'RestAPIId'] = {
'value': '${aws_api_gateway_rest_api.%s.id}' % (
resource.resource_name)
}
for auth in resource.authorizers:
template['resource']['aws_lambda_permission'][
auth.resource_name + '_invoke'] = {
'function_name': self._fref(auth),
'action': 'lambda:InvokeFunction',
'principal': self._options.service_principal('apigateway'),
'source_arn': (
"${aws_api_gateway_rest_api.%s.execution_arn}" % (
resource.resource_name) + "/*"
)
}
self._add_domain_name(resource, template)
def _add_domain_name(self, resource, template):
# type: (models.RestAPI, Dict[str, Any]) -> None
if resource.domain_name is None:
return
domain_name = resource.domain_name
endpoint_type = resource.endpoint_type
properties = {
'domain_name': domain_name.domain_name,
'endpoint_configuration': {'types': [endpoint_type]},
}
if endpoint_type == 'EDGE':
properties['certificate_arn'] = domain_name.certificate_arn
else:
properties[
'regional_certificate_arn'] = domain_name.certificate_arn
if domain_name.tls_version is not None:
properties['security_policy'] = domain_name.tls_version.value
if domain_name.tags:
properties['tags'] = domain_name.tags
template['resource']['aws_api_gateway_domain_name'] = {
domain_name.resource_name: properties
}
template['resource']['aws_api_gateway_base_path_mapping'] = {
domain_name.resource_name + '_mapping': {
'stage_name': resource.api_gateway_stage,
'domain_name': domain_name.domain_name,
'api_id': '${aws_api_gateway_rest_api.%s.id}' % (
resource.resource_name)
}
}
self._add_domain_name_outputs(domain_name.resource_name, endpoint_type,
template)
def _add_domain_name_outputs(self, domain_resource_name,
endpoint_type, template):
# type: (str, str, Dict[str, Any]) -> None
base = (
'aws_api_gateway_domain_name.%s' % domain_resource_name
)
if endpoint_type == 'EDGE':
alias_domain_name = '${%s.cloudfront_domain_name}' % base
hosted_zone_id = '${%s.cloudfront_zone_id}' % base
else:
alias_domain_name = '${%s.regional_domain_name}' % base
hosted_zone_id = '${%s.regional_zone_id}' % base
template.setdefault('output', {})['AliasDomainName'] = {
'value': alias_domain_name
}
template.setdefault('output', {})['HostedZoneId'] = {
'value': hosted_zone_id
}
def _generate_apimapping(self, resource, template):
# type: (models.APIMapping, Dict[str, Any]) -> None
pass
def _generate_domainname(self, resource, template):
# type: (models.DomainName, Dict[str, Any]) -> None
pass
class AppPackager(object):
def __init__(self,
templater, # type: TemplateGenerator
resource_builder, # type: ResourceBuilder
post_processor, # type: TemplatePostProcessor
template_serializer, # type: TemplateSerializer
osutils, # type: OSUtils
):
# type: (...) -> None
self._templater = templater
self._resource_builder = resource_builder
self._template_post_processor = post_processor
self._template_serializer = template_serializer
self._osutils = osutils
def _to_json(self, doc):
# type: (Any) -> str
return serialize_to_json(doc)
def _to_yaml(self, doc):
# type: (Any) -> str
return yaml.dump(doc, allow_unicode=True)
def package_app(self, config, outdir, chalice_stage_name):
# type: (Config, str, str) -> None
# Deployment package
resources = self._resource_builder.construct_resources(
config, chalice_stage_name)
template = self._templater.generate(resources)
if not self._osutils.directory_exists(outdir):
self._osutils.makedirs(outdir)
self._template_post_processor.process(
template, config, outdir, chalice_stage_name)
contents = self._template_serializer.serialize_template(template)
extension = self._template_serializer.file_extension
filename = os.path.join(
outdir, self._templater.template_file) + '.' + extension
self._osutils.set_file_contents(
filename=filename,
contents=contents,
binary=False
)
class TemplatePostProcessor(object):
def __init__(self, osutils):
# type: (OSUtils) -> None
self._osutils = osutils
def process(self, template, config, outdir, chalice_stage_name):
# type: (Dict[str, Any], Config, str, str) -> None
raise NotImplementedError()
class SAMCodeLocationPostProcessor(TemplatePostProcessor):
def process(self, template, config, outdir, chalice_stage_name):
# type: (Dict[str, Any], Config, str, str) -> None
self._fixup_deployment_package(template, outdir)
def _fixup_deployment_package(self, template, outdir):
# type: (Dict[str, Any], str) -> None
# NOTE: This isn't my ideal way to do this. I'd like
# to move this into the build step where something
# copies the DeploymentPackage.filename over to the
# outdir. That would require plumbing through user
# provided params such as "outdir" into the build stage
# somehow, which isn't currently possible.
copied = False
for resource in template['Resources'].values():
if resource['Type'] == 'AWS::Serverless::Function':
original_location = resource['Properties']['CodeUri']
new_location = os.path.join(outdir, 'deployment.zip')
if not copied:
self._osutils.copy(original_location, new_location)
copied = True
resource['Properties']['CodeUri'] = './deployment.zip'
elif resource['Type'] == 'AWS::Serverless::LayerVersion':
original_location = resource['Properties']['ContentUri']
new_location = os.path.join(outdir, 'layer-deployment.zip')
self._osutils.copy(original_location, new_location)
resource['Properties']['ContentUri'] = './layer-deployment.zip'
class TerraformCodeLocationPostProcessor(TemplatePostProcessor):
def process(self, template, config, outdir, chalice_stage_name):
# type: (Dict[str, Any], Config, str, str) -> None
copied = False
resources = template['resource']
for r in resources.get('aws_lambda_function', {}).values():
if not copied:
asset_path = os.path.join(outdir, 'deployment.zip')
self._osutils.copy(r['filename'], asset_path)
copied = True
r['filename'] = "${path.module}/deployment.zip"
r['source_code_hash'] = \
'${filebase64sha256("${path.module}/deployment.zip")}'
copied = False
for r in resources.get('aws_lambda_layer_version', {}).values():
if not copied:
asset_path = os.path.join(outdir, 'layer-deployment.zip')
self._osutils.copy(r['filename'], asset_path)
copied = True
r['filename'] = "${path.module}/layer-deployment.zip"
r['source_code_hash'] = \
'${filebase64sha256("${path.module}/layer-deployment.zip")}'
class TemplateMergePostProcessor(TemplatePostProcessor):
def __init__(self,
osutils, # type: OSUtils
merger, # type: TemplateMerger
template_serializer, # type: TemplateSerializer
merge_template=None, # type: Optional[str]
):
# type: (...) -> None
super(TemplateMergePostProcessor, self).__init__(osutils)
self._merger = merger
self._template_serializer = template_serializer
self._merge_template = merge_template
def process(self, template, config, outdir, chalice_stage_name):
# type: (Dict[str, Any], Config, str, str) -> None
if self._merge_template is None:
return
loaded_template = self._load_template_to_merge()
merged = self._merger.merge(loaded_template, template)
template.clear()
template.update(merged)
def _load_template_to_merge(self):
# type: () -> Dict[str, Any]
template_name = cast(str, self._merge_template)
filepath = os.path.abspath(template_name)
if not self._osutils.file_exists(filepath):
raise RuntimeError('Cannot find template file: %s' % filepath)
template_data = self._osutils.get_file_contents(filepath, binary=False)
loaded_template = self._template_serializer.load_template(
template_data, filepath)
return loaded_template
class CompositePostProcessor(TemplatePostProcessor):
def __init__(self, processors):
# type: (List[TemplatePostProcessor]) -> None
self._processors = processors
def process(self, template, config, outdir, chalice_stage_name):
# type: (Dict[str, Any], Config, str, str) -> None
for processor in self._processors:
processor.process(template, config, outdir, chalice_stage_name)
class TemplateMerger(object):
def merge(self, file_template, chalice_template):
# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
raise NotImplementedError('merge')
class TemplateDeepMerger(TemplateMerger):
def merge(self, file_template, chalice_template):
# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
return self._merge(file_template, chalice_template)
def _merge(self, file_template, chalice_template):
# type: (Any, Any) -> Any
if isinstance(file_template, dict) and \
isinstance(chalice_template, dict):
return self._merge_dict(file_template, chalice_template)
return file_template
def _merge_dict(self, file_template, chalice_template):
# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
merged = chalice_template.copy()
for key, value in file_template.items():
merged[key] = self._merge(value, chalice_template.get(key))
return merged
class TemplateSerializer(object):
file_extension = ''
def load_template(self, file_contents, filename=''):
# type: (str, str) -> Dict[str, Any]
pass
def serialize_template(self, contents):
# type: (Dict[str, Any]) -> str
pass
class JSONTemplateSerializer(TemplateSerializer):
file_extension = 'json'
def serialize_template(self, contents):
# type: (Dict[str, Any]) -> str
return serialize_to_json(contents)
def load_template(self, file_contents, filename=''):
# type: (str, str) -> Dict[str, Any]
try:
return json.loads(file_contents)
except ValueError:
raise RuntimeError(
'Expected %s to be valid JSON template.' % filename)
class YAMLTemplateSerializer(TemplateSerializer):
file_extension = 'yaml'
@classmethod
def is_yaml_template(cls, template_name):
# type: (str) -> bool
file_extension = os.path.splitext(template_name)[1].lower()
return file_extension in [".yaml", ".yml"]
def serialize_template(self, contents):
# type: (Dict[str, Any]) -> str
return yaml.safe_dump(contents, allow_unicode=True)
def load_template(self, file_contents, filename=''):
# type: (str, str) -> Dict[str, Any]
yaml.SafeLoader.add_multi_constructor(
tag_prefix='!', multi_constructor=self._custom_sam_instrinsics)
try:
return yaml.load(
file_contents,
Loader=yaml.SafeLoader,
)
except ScannerError:
raise RuntimeError(
'Expected %s to be valid YAML template.' % filename)
def _custom_sam_instrinsics(self, loader, tag_prefix, node):
# type: (yaml.SafeLoader, str, Node) -> Dict[str, Any]
tag = node.tag[1:]
if tag not in ['Ref', 'Condition']:
tag = 'Fn::%s' % tag
value = self._get_value(loader, node)
return {tag: value}
def _get_value(self, loader, node):
# type: (yaml.SafeLoader, Node) -> Any
if node.tag[1:] == 'GetAtt' and isinstance(node.value,
six.string_types):
value = node.value.split('.', 1)
elif isinstance(node, ScalarNode):
value = loader.construct_scalar(node)
elif isinstance(node, SequenceNode):
value = loader.construct_sequence(node)
else:
value = loader.construct_mapping(node)
return value
|
py
|
1a5ac6eda64a671054e302d79724f8f97ecab722
|
import MagicClick.KCrawler as crawler
import time
if __name__ == "__main__":
# 1. 日间K线数据获取回测
start = time.time()
df_sh_600000 = crawler.get_day_k_data_pre_adjust("sh.600000", [])
print("time cost: ", time.time() - start)
print(df_sh_600000)
start = time.time()
df_sh_600000 = crawler.get_day_k_data_post_adjust("sh.600000", [])
print("time cost: ", time.time() - start)
print(df_sh_600000)
start = time.time()
df_sh_600000 = crawler.get_day_k_data_no_adjust("sh.600000", [])
print("time cost: ", time.time() - start)
print(df_sh_600000)
# 2. 周间k线数据获取回测
start = time.time()
df_sh_600000 = crawler.get_week_k_data_pre_adjust("sh.600000", [])
print("time cost: ", time.time() - start)
print(df_sh_600000)
start = time.time()
df_sh_600000 = crawler.get_week_k_data_post_adjust("sh.600000", [])
print("time cost: ", time.time() - start)
print(df_sh_600000)
start = time.time()
df_sh_600000 = crawler.get_week_k_data_no_adjust("sh.600000", [])
print("time cost: ", time.time() - start)
print(df_sh_600000)
# 3. 月间k线数据获取回测
start = time.time()
df_sh_600000 = crawler.get_month_k_data_pre_adjust("sh.600000", [])
print("time cost: ", time.time() - start)
print(df_sh_600000)
start = time.time()
df_sh_600000 = crawler.get_month_k_data_post_adjust("sh.600000", [])
print("time cost: ", time.time() - start)
print(df_sh_600000)
start = time.time()
df_sh_600000 = crawler.get_month_k_data_no_adjust("sh.600000", [])
print("time cost: ", time.time() - start)
print(df_sh_600000)
# 4. 5分钟k线数据获取回测
start = time.time()
df_sh_600000 = crawler.get_5_minutes_k_data_pre_adjust("sh.600000", [])
print("time cost: ", time.time() - start)
print(df_sh_600000)
start = time.time()
df_sh_600000 = crawler.get_5_minutes_k_data_post_adjust("sh.600000", [])
print("time cost: ", time.time() - start)
print(df_sh_600000)
start = time.time()
df_sh_600000 = crawler.get_5_minutes_k_data_no_adjust("sh.600000", [])
print("time cost: ", time.time() - start)
print(df_sh_600000)
# 5. 15分钟k线数据获取回测
start = time.time()
df_sh_600000 = crawler.get_15_minutes_k_data_pre_adjust("sh.600000", [])
print("time cost: ", time.time() - start)
print(df_sh_600000)
start = time.time()
df_sh_600000 = crawler.get_15_minutes_k_data_post_adjust("sh.600000", [])
print("time cost: ", time.time() - start)
print(df_sh_600000)
start = time.time()
df_sh_600000 = crawler.get_15_minutes_k_data_no_adjust("sh.600000", [])
print("time cost: ", time.time() - start)
print(df_sh_600000)
# 6. 30分钟k线数据获取回测
start = time.time()
df_sh_600000 = crawler.get_30_minutes_k_data_pre_adjust("sh.600000", [])
print("time cost: ", time.time() - start)
print(df_sh_600000)
start = time.time()
df_sh_600000 = crawler.get_30_minutes_k_data_post_adjust("sh.600000", [])
print("time cost: ", time.time() - start)
print(df_sh_600000)
start = time.time()
df_sh_600000 = crawler.get_30_minutes_k_data_no_adjust("sh.600000", [])
print("time cost: ", time.time() - start)
print(df_sh_600000)
# 6. 30分钟k线数据获取回测
start = time.time()
df_sh_600000 = crawler.get_60_minutes_k_data_pre_adjust("sh.600000", [])
print("time cost: ", time.time() - start)
print(df_sh_600000)
start = time.time()
df_sh_600000 = crawler.get_60_minutes_k_data_post_adjust("sh.600000", [])
print("time cost: ", time.time() - start)
print(df_sh_600000)
start = time.time()
df_sh_600000 = crawler.get_60_minutes_k_data_no_adjust("sh.600000", [])
print("time cost: ", time.time() - start)
print(df_sh_600000)
|
py
|
1a5ac79e8d80c97727b9f56723c4f965298a7f85
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for retinanet."""
# pylint: disable=unused-import
from absl.testing import parameterized
import tensorflow as tf
from official import vision
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.projects.qat.vision.configs import common
from official.projects.qat.vision.configs import retinanet as qat_exp_cfg
from official.vision.configs import retinanet as exp_cfg
class RetinaNetConfigTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
('retinanet_spinenet_mobile_coco_qat',),
)
def test_retinanet_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
self.assertIsInstance(config.task, qat_exp_cfg.RetinaNetTask)
self.assertIsInstance(config.task.model, exp_cfg.RetinaNet)
self.assertIsInstance(config.task.quantization, common.Quantization)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
config.validate()
config.task.train_data.is_training = None
with self.assertRaisesRegex(KeyError, 'Found inconsistncy between key'):
config.validate()
if __name__ == '__main__':
tf.test.main()
|
py
|
1a5ac7ae27f1ba38d7d970c6bd0600bc72e9786a
|
import os
import logging
import random
from typing import List, Optional
import itertools
import numpy as np
from config import save_path
from ..abstract_system import abstract_system
from .controlloop import controlloop
class system(abstract_system):
def __init__(self, cl: List[controlloop], trap_state=False):
if not all([type(i) == controlloop for i in cl]):
print('All specified controlloops should be of the enumerative type')
raise ValueError()
super().__init__(cl)
self.states = {}
self.actions = {}
self.transitions = {}
self.outputs = {}
self.output_map = {}
self._trap_state = trap_state or any([not c._label_split for c in cl])
self.scheduler = None
def post(self, x: dict, u: dict = None):
"""
Calculates the set of next states for given action(s) or all actions if actions is not given
:param x: set of state(s)
:param u: set of actions
:return: set of next states
"""
r = set()
if u is None:
u = self.actions
for i in x:
for j in u:
r.update(self.transitions[i][j])
return r
def compose(self):
"""
Creates the sets and dictionaries describing all the NFA's in parallel.
If R is True, use the partition systems, otherwise
use the original systems.
:return: None
"""
self.states = self._c_dict([o.states for o in self.control_loops])
self.outputs = self._c_dict([o._outputs for o in self.control_loops])
self.actions = self._c_dict([o.actions for o in self.control_loops])
self.output_map = self._c_dict([o.output_map for o in self.control_loops])
self.transitions = {x: {u: set() for u in self.actions} for x in self.states}
for xxx in self.states:
for uuu in self.actions:
if self._trap_state and uuu.count('t') >= 2:
self.transitions[xxx][uuu].update({'trap'})
else:
s = [o.transitions[x][u] for (o, x, u) in zip(self.control_loops, xxx, uuu)]
ls = set(itertools.product(*s))
self.transitions[xxx][uuu].update(ls)
if self._trap_state:
self.transitions['trap'] = {u: set() for u in self.actions}
self.states.update({'trap': -1})
def safe_set(self) -> Optional[dict]:
"""
Creates a dict describing the safe set, defined as (x1,...,xn) in W if at most one of the outputs
of xi is 'T'.
:return: BDD function describing the safe set W
"""
if len(self.states) == 0:
print("Compose the system before generating the safe set.")
return dict()
def isSafe(out: tuple):
numT = 0
numX = 0
for i in out:
if type(i) != tuple:
numT += (i == 'T' or i == 'T1')
else:
numT += (i[0] == 'T' or i[0] == 'T1')
numX += (i[1] == 'X')
return (numX == 0 and numT <= 1)
if self._trap_state:
return {k: v for (k, v) in self.states.items() if k != 'trap'}
else:
W = {k: v for (k, v) in self.states.items() if isSafe(self.output_map[k])}
return W
def safety_game(self, W=None):
"""
Solve Safety Game for the NFA with safe set W using fixed-point iterations
:param W: The safe set. If it is not specified, it is first created.
:return: Solution to the Safety Game
"""
if self._trap_state:
F_old = dict()
F_new = self.states
it = 1
while F_old != F_new:
logging.info(f'Safety Game Iteration: {it}')
F_old = F_new
F_new = self.__safety_operator_trap(F_old)
it += 1
if F_old == {}:
return None
return F_old
else:
if W is None:
W = self.safe_set()
F_old = dict()
F_new = self.states
it = 1
while F_old != F_new:
logging.info(f'Safety Game Iteration: {it}')
F_old = F_new
F_new = self.__safety_operator(W, F_old)
it += 1
if F_old == {}:
return None
return F_old
# TODO: Add possibility to return full scheduler transition system
def create_controller(self, Z: dict, StatesOnlyZ=True, convert_blocks=True):
"""
Creates a controller
:param Z:
:param StatesOnlyZ: Specifies whether to only use the states in Z for the controller
:return: Ux, Optional[Block->State Mapping]
"""
if StatesOnlyZ:
c_states = Z.copy()
else:
c_states = self.states.copy()
U_c = {x: set() for x in c_states}
for x in c_states:
for u in self.actions:
p = self.transitions[x][u]
if len(p) > 0 and set(Z.keys()).issuperset(p):
U_c[x].add(u)
if not any([s._is_part for s in self.control_loops]):
return U_c, None
elif convert_blocks and any([s._is_part for s in self.control_loops]):
U_c_n = {}
for (b, uuu) in U_c.items():
if b != 'trap':
U_c_n.update({x:uuu for x in itertools.product(*[xx.keys() for xx in self.states[b]])})
return U_c_n, None
else:
# Additionally supply look-up for the blocks
invBs = [{x:b for (b,xx) in cl.states.items() for x in xx} for cl in self.control_loops]
return U_c, invBs
def simulate(self, Ts:float = 0.01, Tmax:float = 1, x0=None, use_scheduler=True, random_inputs=False):
# Check correct/enough initial conditions
if x0 is None:
x0 = [np.random.uniform(low=-4, high=4, size=(cl.abstraction.plant.nx,)) for cl in self.control_loops]
else:
if len(x0) != len(self.control_loops):
print('Supply initial conditions for each control loop.')
return
for x0i, cl in zip(x0, self.control_loops):
if len(x0i) != cl.abstraction.plant.nx:
print(f'Initial condition dimension ({len(x0i)}) does not correspond to the expected ({cl.abstraction.plant.nx}).')
return
x0 = [np.array(x) for x in x0]
# Clip Ts such that it becomes a multiple of h
t = int(Ts/self.h)
Ts = t*self.h
# 3D Matrix storing the evolution of the continuous states over time.
x = [[np.array(x0i)] for x0i in x0]
xhat = [[np.array(x0i)] for x0i in x0]
u_hist = [[] for i in range(0, self.ns)] # continuous inputs
# Evolution of the traffic model regions over time
regions = [[cl.abstraction.region_of_state(x0i)] for (x0i, cl) in zip(x0, self.control_loops)]
for i in range(0, self.ns):
print(f'Controlloop {i} starts in region {regions[i][0]}')
# 3D Matrix storing the evolution of the transitions sytem states over time.
if self.state2block is None:
s = [[f"T{'_'.join([str(l) for l in i[0]])}"] for i in regions]
else:
b = [self.state2block[j][f"T{'_'.join([str(l) for l in i[0]])}"] for (i,j) in zip(regions, range(0, self.ns))]
s = [[b[i]] for i in range(0, self.ns)]
v = [[[]] for i in range(0, self.ns)] # inputs (w/t/lw)
TriggerTimes = [[0] for i in range(0, self.ns)]
TriggerTimesEarly = [[] for i in range(0, self.ns)]
CollisionTimes = {}
N = int(Tmax/Ts) # Number of samples
import scipy
from scipy import integrate
I = [scipy.integrate.quad_vec(lambda s: scipy.linalg.expm(cl.abstraction.plant.A * s), 0, Ts)[0] for cl in self.control_loops]
for t in range(0, N):
# Step 1: Update the continuous states
utemp = [cl.abstraction.controller.K @ xn[-1] for (cl, xn) in zip(self.control_loops, xhat)]
xn = [scipy.linalg.expm(cl.abstraction.plant.A * Ts) @ xi[-1] + integral @ cl.abstraction.plant.B @ ui
for (cl, xi, ui, integral) in zip(self.control_loops, x, utemp, I)]
for i in range(0, self.ns):
x[i].append(xn[i])
for i in range(0, self.ns):
xhat[i].append(xhat[i][-1])
for i in range(0, self.ns):
u_hist[i].append(utemp[i])
## Step 2: Check triggering conditions
# If a scheduler is defined use that
if self.scheduler is not None and use_scheduler:
ss = tuple(q[-1] for q in s)
u_ts = self.scheduler[ss]
if random_inputs:
u_ts = random.choice(list(u_ts))
else:
all_w = tuple('w' for i in range(0, self.ns))
if all_w in u_ts:
u_ts = all_w
else:
u_ts = random.choice(list(u_ts))
for i in range(0, self.ns):
if u_ts[i] == 't':
reg = self.control_loops[i].abstraction.region_of_state(x[i][-1])
si = f"T{'_'.join([str(l) for l in reg])}"
if self.state2block is not None:
si = self.state2block[i][si]
s[i].append(si)
xhat[i][-1] = xn[i]
regions[i].append(reg)
if t * Ts - TriggerTimes[i][-1] < self.control_loops[i].kmax:
TriggerTimesEarly[i].append(t * Ts)
else:
TriggerTimes[i].append(t * Ts)
else:
# reg = self.control_loops[i].abstraction.region_of_state(x[i][-1])
regions[i].append(regions[i][-1])
sn = self.control_loops[i].post({s[i][-1]}, u_ts[i])
sn = random.choice(list(sn))
s[i].append(sn)
# for
else:
triggers = set()
for i in range(0, self.ns):
xx = np.block([x[i][-1].T, xhat[i][-1]])
if xx.T @ self.control_loops[i].abstraction.trigger.Qbar @ xx.T > 0 or (t*Ts - TriggerTimes[i][-1]) >= self.h*self.control_loops[i].kmax:
xhat[i][-1] = xn[i]
TriggerTimes[i].append(t*Ts)
triggers.add(i)
reg = self.control_loops[i].abstraction.region_of_state(x[i][-1])
regions[i].append(reg)
if len(triggers) > 1:
CollisionTimes[t * Ts] = triggers
for i in range(0, self.ns):
TriggerTimes[i].pop(-1)
import matplotlib.pyplot as plt
name = 'safety_scheduler_'
if not use_scheduler:
name = 'no_scheduler_'
dur = np.arange(0, Ts * N, Ts)
for i in range(0, self.ns):
plt.plot(dur, x[i][0:len(dur)], '--')
plt.gca().set_prop_cycle(None)
plt.plot(dur, xhat[i][0:len(dur)])
plt.title(f'Controlloop {i + 1}: $x(t)$ and $x_e(t)$.')
plt.savefig(os.path.join(save_path, f'{name}simulation_Controlloop_{i + 1}_states.pdf'))
plt.show()
plt.clf()
for i in range(0, self.ns):
plt.plot(dur, u_hist[i][0:len(dur)])
plt.title(f'Controlloop {i + 1}: $u(t)$.')
plt.savefig(os.path.join(save_path, f'{name}simulation_Controlloop_{i + 1}_inputs.pdf'))
plt.show()
plt.clf()
for i in range(0, self.ns):
plt.plot(TriggerTimes[i], i * np.ones(len(TriggerTimes[i])), 'x')
plt.plot(TriggerTimesEarly[i], i * np.ones(len(TriggerTimesEarly[i])), 'o')
for t, ii in CollisionTimes.items():
for i in ii:
plt.plot(t, i, 'dk')
plt.title('Trigger times')
plt.yticks(range(0, self.ns), [f'Controlloop {i}' for i in range(1, self.ns + 1)])
plt.savefig(os.path.join(save_path, f'{name}simulation_trigger_events.pdf'))
plt.show()
plt.clf()
for i in range(0, self.ns):
plt.plot(dur, regions[i][0:len(dur)])
plt.title('Traffic Model Regions')
plt.legend([f'Controlloop {i}' for i in range(1, self.ns + 1)], loc='upper left')
plt.savefig(os.path.join(save_path, f'{name}simulation_traffic_model_regions.pdf'))
plt.show()
plt.clf()
""" Private Helper Methods """
def __safety_operator_trap(self, Z:dict):
F = dict()
for (x, v) in Z.items():
if x == 'trap':
continue
else:
for (uk, uv) in self.actions.items():
p = self.transitions[x][uk]
if len(p) == 0:
continue
elif not set(Z.keys()).issuperset(p):
continue
else:
F.update({x: v})
return F
def __safety_operator(self, W: dict, Z: dict):
"""
:param W:
:param Z:
:return:
"""
F = dict()
for (x, v) in Z.items():
if x not in W:
continue
else:
for (uk, uv) in self.actions.items():
p = self.transitions[x][uk]
if len(p) == 0:
continue
elif not set(Z.keys()).issuperset(p):
continue
else:
F.update({x: v})
return F
@staticmethod
def _c_dict(l: list):
"""
Combination of list of dicts. I.e. l = [{a:1, b:2}, {c:3, d:4}]
-> res = {(a,c):(1,3), (a,d):(1,4)...}
:param l: List of dict's
:return:
"""
a = [[key for key in d] for d in l]
b = [[val for val in d.values()] for d in l]
la = itertools.product(*a)
lb = itertools.product(*b)
return {a: b for (a, b) in zip(la, lb)}
|
py
|
1a5ac7d5cf7f4dfd1e86ec95e9fd235d3c5737ee
|
# -*- coding: utf-8 -*-
import prefect # base import is required for prefect context
from prefect import task, Flow, Parameter
from prefect.storage import Module
from simmate.calculators.vasp.tasks.relaxation.third_party.mit import MITRelaxationTask
from simmate.workflows.common_tasks.all import load_input
from simmate.configuration.django import setup_full # sets database connection
from simmate.database.local_calculations.relaxation import (
MITIonicStep,
MITRelaxation,
)
# --------------------------------------------------------------------------------------
# THIS SECTION SETS UP OUR TASKS
# we initialize the task here so we can use it in the Prefect flow below
relax_structure = MITRelaxationTask()
@task
def save_results(result_and_corrections):
# split our results and corrections (which are given as a tuple) into
# separate variables
vasprun, corrections = result_and_corrections
# initialize the MITRelaxation with the Prefect run info
calculation = MITRelaxation.from_prefect_context(prefect.context)
calculation.save()
# now update the calculation entry with our results
calculation.update_from_vasp_run(vasprun, corrections, MITIonicStep)
return calculation.id
# --------------------------------------------------------------------------------------
# THIS SECTION PUTS OUR TASKS TOGETHER TO MAKE A WORKFLOW
# now make the overall workflow
with Flow("MIT Relaxation") as workflow:
# These are the input parameters for the overall workflow
structure = Parameter("structure")
vasp_command = Parameter("vasp_command", default="vasp_std > vasp.out")
# load the structure to a pymatgen object
structure_pmg = load_input(structure)
# Run the calculation after we have saved the input
result_and_corrections = relax_structure(
structure=structure_pmg,
command=vasp_command,
)
# pass these results and corrections into our final task which saves
# everything to the database
calculation_id = save_results(result_and_corrections)
# For when this workflow is registered with Prefect Cloud, we indicate that
# it can be imported from a python module. Note __name__ provides the path
# to this module.
workflow.storage = Module(__name__)
# --------------------------------------------------------------------------------------
|
py
|
1a5ac8097bac83a43430365ca34a189126bd30f2
|
# -*- coding: utf-8 -*-
""" S3 Synchronization: Peer Repository Adapter for ADASHI
@copyright: 2011-2020 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import os
from gluon import *
from ..s3sync import S3SyncBaseAdapter
# =============================================================================
class S3SyncAdapter(S3SyncBaseAdapter):
"""
ADASHI Synchronization Adapter (passive)
http://www.adashisystems.com
"""
# -------------------------------------------------------------------------
def register(self):
"""
Register this site at the peer repository
@return: True to indicate success, otherwise False
"""
# No registration required (passive adapter)
return True
# -------------------------------------------------------------------------
def login(self):
"""
Login at the peer repository
@return: None if successful, otherwise the error
"""
# No login required (passive adapter)
return None
# -------------------------------------------------------------------------
def pull(self, task, onconflict=None):
"""
Outgoing pull
@param task: the task (sync_task Row)
"""
repository = self.repository
log = repository.log
# Import path
PATH = os.path.join(current.request.folder, "uploads", "adashi_feeds")
# Read names from path
try:
files_list = os.listdir(PATH)
except os.error:
message = "Upload path does not exist or can not be accessed"
log.write(repository_id = repository.id,
resource_name = "mixed",
transmission = log.IN,
mode = log.PUSH,
action = "read files from %s" % PATH,
remote = False,
result = log.FATAL,
message = message,
)
return message, None
# Add path to file names, filter for .xml files, sort by mtime
files = [os.path.join(PATH, f)
for f in files_list if f[-4:] == ".xml"]
files = sorted(filter(os.path.isfile, files), key=os.path.getmtime)
# Strategy and Policies
from ..s3import import S3ImportItem
default_update_policy = S3ImportItem.POLICY.NEWER
default_conflict_policy = S3ImportItem.POLICY.MASTER
strategy = task.strategy
update_policy = task.update_policy or default_update_policy
conflict_policy = task.conflict_policy or default_conflict_policy
if update_policy not in ("THIS", "OTHER"):
last_sync = task.last_pull
# Import files
for f in files:
current.log.debug("ADASHI Sync: importing %s" % f)
try:
with open(f, "r") as source:
result = self.receive([source],
None,
strategy=strategy,
update_policy=update_policy,
conflict_policy=conflict_policy,
onconflict=onconflict,
last_sync=last_sync,
mixed=True,
)
except IOError:
continue
# Log the operation
log.write(repository_id = repository.id,
resource_name = "mixed",
transmission = log.IN,
mode = log.PUSH,
action = "import %s" % f,
remote = result["remote"],
result = result["status"],
message = result["message"],
)
# Remove the file
try:
os.remove(f)
except os.error:
current.log.error("ADASHI Sync: can not delete %s" % f)
return None, current.request.utcnow
# -------------------------------------------------------------------------
def push(self, task):
"""
Outgoing push
@param task: the sync_task Row
"""
repository = self.repository
# Log the operation
message = "Push to ADASHI currently not supported"
log = repository.log
log.write(repository_id = repository.id,
resource_name = task.resource_name,
transmission = log.OUT,
mode = log.PUSH,
action = None,
remote = False,
result = log.FATAL,
message = message,
)
output = current.xml.json_message(False, 400, message)
return output, None
# -------------------------------------------------------------------------
def send(self,
resource,
start=None,
limit=None,
msince=None,
filters=None,
mixed=False,
pretty_print=False):
"""
Respond to an incoming pull from a peer repository
@param resource: the resource to be synchronized
@param start: index of the first record to send
@param limit: maximum number of records to send
@param msince: minimum modification date/time for records to send
@param filters: URL filters for record extraction
@param mixed: negotiate resource with peer (disregard resource)
@param pretty_print: make the output human-readable
"""
if not resource or mixed:
msg = "Mixed resource synchronization not supported"
return {"status": self.log.FATAL,
"message": msg,
"response": current.xml.json_message(False, 400, msg),
}
# Export the data as S3XML
stylesheet = os.path.join(current.request.folder,
"static", "formats", "georss", "export.xsl")
output = resource.export_xml(start=start,
limit=limit,
filters=filters,
msince=msince,
stylesheet=stylesheet,
pretty_print=pretty_print,
)
count = resource.results
msg = "Data sent to peer (%s records)" % count
# Set content type header
headers = current.response.headers
headers["Content-Type"] = "text/xml"
return {"status": self.log.SUCCESS,
"message": msg,
"response": output,
}
# -------------------------------------------------------------------------
def receive(self,
source,
resource,
strategy=None,
update_policy=None,
conflict_policy=None,
onconflict=None,
last_sync=None,
mixed=False):
"""
Respond to an incoming push from the peer repository
@param source: the input stream (list of file-like objects)
@param resource: the target resource
@param strategy: the import strategy
@param update_policy: the update policy
@param conflict_policy: the conflict resolution policy
@param onconflict: callback for conflict resolution
@param last_sync: the last synchronization date/time for the peer
@param mixed: negotiate resource with peer (disregard resource)
"""
s3db = current.s3db
xml = current.xml
log = self.log
remote = False
# Sync always has only one source per request
source = source[0]
# Parse the feed
tree = xml.parse(source)
if not tree:
# Parser error
msg = xml.error if xml.error else "Invalid source"
return {"status": log.FATAL,
"message": msg,
"remote": remote,
"response": xml.json_message(False, 400, msg),
}
# Identify feed category
category = tree.findall("//channel/category")
if not category:
msg = "Feed category missing"
return {"status": log.ERROR,
"message": msg,
"remote": remote,
"response": xml.json_message(False, 400, msg),
}
category = category[0].text
# Instantiate target resource after feed category
if category == "AVL":
resource = s3db.resource("pr_group")
elif category == "Incidents":
resource = s3db.resource("event_incident")
resource.configure(oncommit_import_item = self.update_assignments)
else:
msg = "Unknown feed category"
return {"status": log.WARNING,
"message": msg,
"remote": remote,
"response": xml.json_message(False, 400, msg),
}
# Store source data?
repository = self.repository
if repository.keep_source:
self.keep_source(tree, category)
# Import transformation stylesheet
stylesheet = os.path.join(current.request.folder,
"static",
"formats",
"georss",
"import.xsl",
)
# Import parameters
if onconflict:
onconflict_callback = lambda item: onconflict(item,
repository,
resource,
)
else:
onconflict_callback = None
ignore_errors = True
# Import
# Flag to let audit know the repository
s3 = current.response.s3
s3.repository_id = self.repository.id
output = resource.import_xml(tree,
format = "xml",
stylesheet = stylesheet,
ignore_errors = ignore_errors,
strategy = strategy,
update_policy = update_policy,
conflict_policy = conflict_policy,
last_sync = last_sync,
onconflict = onconflict_callback,
source_type = "adashi",
)
s3.repository_id = None
# Process validation errors, if any
if resource.error_tree is not None:
result = log.WARNING if ignore_errors else log.FATAL
message = "%s" % resource.error
for element in resource.error_tree.findall("resource"):
error_msg = element.get("error", "unknown error")
error_fields = element.findall("data[@error]")
if error_fields:
for field in error_fields:
error_msg = field.get("error", "unknown error")
if error_msg:
msg = "(UID: %s) %s.%s=%s: %s" % \
(element.get("uuid", None),
element.get("name", None),
field.get("field", None),
field.get("value", field.text),
error_msg)
message = "%s, %s" % (message, msg)
else:
msg = "(UID: %s) %s: %s" % \
(element.get("uuid", None),
element.get("name", None),
error_msg)
message = "%s, %s" % (message, msg)
else:
result = log.SUCCESS
message = "Data received from peer"
return {"status": result,
"remote": remote,
"message": message,
"response": output,
}
# -------------------------------------------------------------------------
@staticmethod
def update_assignments(item):
"""
Deactivate all previous unit assignments (event_team) for
an incident which are not in this feed update.
@param item: the import item
@note: this assumes that the list of incident resources in
the feed update is complete (confirmed for ADASHI)
@note: must not deactivate assignments which are newer
than the feed update (Sync policy NEWER)
"""
if item.tablename == "event_incident" and \
item.id and \
item.method == item.METHOD.UPDATE:
job = item.job
mtime = item.data.get("modified_on")
if not job or not mtime:
return
get_item = job.items.get
# Get the unit names of all current assignments in the feed
team_names = set()
add_name = team_names.add
for citem in item.components:
if citem.tablename == "event_team":
for ref in citem.references:
entry = ref.entry
team_item_id = entry.item_id
if entry.tablename == "pr_group" and team_item_id:
team_item = get_item(team_item_id)
team_name = team_item.data.get("name")
if team_name:
add_name(team_name)
break
s3db = current.s3db
ltable = s3db.event_team
gtable = s3db.pr_group
# Get all active assignments in the database which are older
# than the feed update and which are not in the feed update,
# and deactivate them
left = gtable.on(ltable.group_id == gtable.id)
query = (ltable.incident_id == item.id) & \
(ltable.modified_on <= mtime) & \
(ltable.status == 3) & \
(~(gtable.name.belongs(team_names)))
rows = current.db(query).select(ltable.id, left=left)
inactive = set(row.id for row in rows)
current.db(ltable.id.belongs(inactive)).update(status=4)
# -------------------------------------------------------------------------
def keep_source(self, tree, category):
"""
Helper method to store source data in file system
@param tree: the XML element tree of the source
@param category: the feed category
"""
repository = self.repository
# Log the operation
log = repository.log
log.write(repository_id = repository.id,
resource_name = None,
transmission = log.IN,
mode = log.PUSH,
action = "receive",
remote = False,
result = log.WARNING,
message = "'Keep Source Data' active for this repository!",
)
request = current.request
folder = os.path.join(request.folder, "uploads", "adashi")
dt = request.utcnow.replace(microsecond=0).isoformat()
dt = dt.replace(":", "").replace("-", "")
filename = os.path.join(folder,
"%s_%s.xml" % (category, dt),
)
if not os.path.exists(folder):
try:
os.mkdir(folder)
except OSError:
return
if filename:
try:
with open(filename, "w") as f:
tree.write(f, pretty_print=True)
except IOError:
return
# End =========================================================================
|
py
|
1a5aca461b2af7b354a7d152c81aeca9817eed82
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'COVID19_Outbreak_Simulation.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
py
|
1a5acaaaabd9b87f5bfb50f76b6b9daa67b8eed2
|
"""
Luna API.
API is written via FastAPI.
"""
from fastapi import FastAPI, HTTPException, Response
from pydantic import BaseModel
from typing import List, Optional
from natsort import natsorted, ns
from luna.db.db_util import DbConnection
from luna.db import bucket
from luna.db import vignette
from luna.db import cellular_annotation as ann
from luna.db import scatter_plot as sca
from luna.db.base import DB_DELIM
from starlette.middleware.cors import CORSMiddleware
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class Bucket(BaseModel):
"""Bucket Object."""
slug: str
name: str
description: Optional[str] = None
url: Optional[str] = None
class Vignettes(BaseModel):
"""Vignettes Object."""
content: str
class Annotation(BaseModel):
"""Annotation Object."""
slug: str
label: str
class AnnotationBundle(Annotation):
"""Annotation Bundle Object."""
values_distinct: List[str]
values_ordered: List[str]
class ExpressionBundle(BaseModel):
"""Expression Bundle Object."""
gene: str
max_expression: float
values_ordered: List[float]
class Coordinate(BaseModel):
"""Coordinate Object."""
x: float
y: float
@app.get("/buckets", response_model=List[Bucket])
def get_buckets():
"""Get list of all data buckets."""
session = _init_db_connection()
try:
sql_bucket_list = session.query(bucket.Bucket).all()
api_bucket_list = []
for sql_bucket in sql_bucket_list:
api_bucket = Bucket(
name=sql_bucket.name,
description=sql_bucket.description,
url=sql_bucket.url,
slug=sql_bucket.slug,
)
api_bucket_list.append(api_bucket)
return api_bucket_list
finally:
session.close()
@app.get("/annotation_list/{bucket_slug}", response_model=List[Annotation])
def get_annotation_list(bucket_slug: str):
"""Get the list of annotations for the specified bucket."""
session = _init_db_connection()
target_type = ann.CellularAnnotationType.OTHER
try:
bucket_id = _get_bucket_id(session, bucket_slug)
record_list = (
session.query(ann.CellularAnnotation)
.filter_by(bucket_id=bucket_id, type=target_type)
.order_by(ann.CellularAnnotation.slug)
.all()
)
if len(record_list) == 0:
raise HTTPException(status_code=404, detail="No annotations.")
annotation_list = []
for r in record_list:
current_annotation = Annotation(label=r.label, slug=r.slug)
annotation_list.append(current_annotation)
return annotation_list
finally:
session.close()
@app.get(
"/annotation/{bucket_slug}/{annotation_slug}",
response_model=AnnotationBundle,
)
def get_annotation_values(bucket_slug: str, annotation_slug: str):
"""Get the list of all values for the specified annotation."""
session = _init_db_connection()
try:
bucket_id = _get_bucket_id(session, bucket_slug)
record = session.query(ann.CellularAnnotation)
record = record.filter_by(
bucket_id=bucket_id, slug=annotation_slug
).first()
if record is None:
raise HTTPException(status_code=404, detail="ID not found.")
value_list = record.value_list.split(DB_DELIM)
distinct_list = list({value.strip() for value in value_list})
distinct_list = natsorted(distinct_list, alg=ns.IGNORECASE)
current_annotation = AnnotationBundle(
label=record.label,
slug=record.slug,
values_distinct=distinct_list,
values_ordered=value_list,
)
return current_annotation
finally:
session.close()
@app.get("/expression/{bucket_slug}/{gene}", response_model=ExpressionBundle)
def get_expression_values(bucket_slug: str, gene: str):
"""Get the expression data for the specified gene."""
gene = gene.lower()
session = _init_db_connection()
try:
bucket_id = _get_bucket_id(session, bucket_slug)
record = (
session.query(ann.CellularAnnotation.value_list)
.filter_by(bucket_id=bucket_id, slug=gene)
.first()
)
if record is None:
raise HTTPException(status_code=404, detail="No data found.")
value_list = record.value_list.split(DB_DELIM)
expression_bundle = ExpressionBundle(
gene=gene,
max_expression=max(value_list),
values_ordered=value_list,
)
return expression_bundle
finally:
session.close()
@app.get("/umap/{bucket_slug}", response_model=List[Coordinate])
def get_umap_coordinates(bucket_slug: str):
"""Get the UMAP coordinates for the specified bucket."""
session = _init_db_connection()
try:
bucket_id = _get_bucket_id(session, bucket_slug)
record = (
session.query(sca.ScatterPlot.coordinate_list)
.filter_by(bucket_id=bucket_id, type=sca.ScatterPlotType.UMAP)
.first()
)
if record is None:
raise HTTPException(status_code=404, detail="No data found.")
return _extract_coordinates(record)
finally:
session.close()
@app.get("/tsne/{bucket_slug}", response_model=List[Coordinate])
def get_tsne_coordinates(bucket_slug: str):
"""Get the TSNE coordinates for the specified bucket."""
session = _init_db_connection()
try:
bucket_id = _get_bucket_id(session, bucket_slug)
record = (
session.query(sca.ScatterPlot.coordinate_list)
.filter_by(bucket_id=bucket_id, type=sca.ScatterPlotType.TSNE)
.first()
)
if record is None:
raise HTTPException(status_code=404, detail="No data found.")
return _extract_coordinates(record)
finally:
session.close()
@app.get("/vignettes/{bucket_slug}")
def get_vignettes(bucket_slug: str):
"""Get all Vignettes for the specified bucket."""
session = _init_db_connection()
try:
bucket_id = _get_bucket_id(session, bucket_slug)
record = (
session.query(vignette.Vignette)
.filter_by(bucket_id=bucket_id)
.first()
)
if record is None:
raise HTTPException(status_code=404, detail="No data found.")
return Response(content=record.json, media_type="application/json")
finally:
session.close()
def _get_bucket_id(session, bucket_slug):
record = session.query(bucket.Bucket).filter_by(slug=bucket_slug).first()
if record:
return record.id
else:
raise HTTPException(status_code=404, detail="Bucket not found")
def _extract_coordinates(record):
response_list = []
value_list = record.coordinate_list.split(DB_DELIM)
for pair_str in value_list:
if len(pair_str) > 0:
parts = pair_str.split(",")
current_value = Coordinate(x=float(parts[0]), y=float(parts[1]))
response_list.append(current_value)
return response_list
def _init_db_connection():
db_connection = DbConnection()
return db_connection.session
|
py
|
1a5acc376d402c59342000c5babf558e6fe668e7
|
from pythagoras import calculate_next_triplets
from queue import SimpleQueue
def p39(triplet_sum_stop: int) -> int:
initial_triplet = (3, 4, 5)
q = SimpleQueue()
q.put(initial_triplet)
triplet_sum_counts = [0] * triplet_sum_stop
while not q.empty():
triplet = q.get()
triplet_sum = sum(triplet)
if triplet_sum < triplet_sum_stop:
for triplet_sum_multiple in range(triplet_sum, triplet_sum_stop, triplet_sum):
triplet_sum_counts[triplet_sum_multiple] += 1
a, b, c = calculate_next_triplets(triplet)
q.put(a)
q.put(b)
q.put(c)
largest_triplet_sum = 0
largest_triplet_sum_count = 0
for triplet_sum in range(12, triplet_sum_stop):
triplet_sum_count = triplet_sum_counts[triplet_sum]
if triplet_sum_count > largest_triplet_sum_count:
largest_triplet_sum_count = triplet_sum_count
largest_triplet_sum = triplet_sum
return largest_triplet_sum
if __name__ == '__main__':
print(p39(121))
print(p39(1001))
|
py
|
1a5acc82f484704c054e6997ac5804695d49f4c1
|
import time
import os
import sys
if len(sys.argv) != 2:
print >> sys.stderr, "Please specify filename to read"
filename = sys.argv[1]
if not os.path.isfile(filename):
print >> sys.stderr, "Given file: \"%s\" is not a file" % filename
with open(filename,'r') as file:
# Move to the end of file
filesize = os.stat(filename)[6]
file.seek(filesize)
# endlessly loop
while True:
where = file.tell()
# try reading a line
line = file.readline()
# if empty, go back
if not line:
time.sleep(1)
file.seek(where)
else:
# , at the end prevents print to add newline, as readline()
# already read that.
print line,
|
py
|
1a5acdceb52d1d1c3ad09468c8ee66a54b0e546a
|
from examples.moodle_client import simple_client
def calendar():
# get a moodle calendar
# Returns a List of Events
calendar_events = client.calendar()
for event in calendar_events:
print(event.name, end=' ')
print(event.eventtype, end=' ')
print(event.timesort)
# its also possible to limit the events
calendar_events = client.calendar(limit=10)
print(calendar_events)
if __name__ == '__main__':
client = simple_client()
calendar()
|
py
|
1a5ace82a24ee148a890b50f01b07f2239a3361e
|
import smart_imports
smart_imports.all()
class BaseEffect(object):
__slots__ = ()
def __init__(self):
pass
def apply_positive(self, actor_type, actor_name, place, person, positive_heroes, negative_heroes, job_power):
raise NotImplementedError()
def apply_negative(self, actor_type, actor_name, place, person, positive_heroes, negative_heroes, job_power):
raise NotImplementedError()
def positive_effect_value(self, job_power):
raise NotImplementedError()
def negative_effect_value(self, job_power):
raise NotImplementedError()
def short_effect_description(self, value):
raise NotImplementedError()
def effect_type(self):
raise NotImplementedError()
def power_required(self, normal_power):
return int(math.ceil(normal_power / self.effect_type().priority))
def apply_to_heroes(self, actor_type, effect, method_names, method_kwargs, positive_heroes, negative_heroes, direction):
from the_tale.game.heroes import logic as heroes_logic
heroes_to_accounts = heroes_logic.get_heroes_to_accounts_map(positive_heroes | negative_heroes)
positive_kwargs = dict(message_type=self.message_type(actor_type, effect, direction, 'friends'), **method_kwargs)
after_update_operations = []
for hero_id in positive_heroes:
if hero_id not in heroes_to_accounts:
continue # skip removed fast accounts
operation = self.invoke_hero_method(account_id=heroes_to_accounts[hero_id],
hero_id=hero_id,
method_name=method_names[0],
method_kwargs=positive_kwargs)
after_update_operations.append(operation)
negative_kwargs = dict(message_type=self.message_type(actor_type, effect, direction, 'enemies'), **method_kwargs)
for hero_id in negative_heroes:
if hero_id not in heroes_to_accounts:
continue # skip removed fast accounts
operation = self.invoke_hero_method(account_id=heroes_to_accounts[hero_id],
hero_id=hero_id,
method_name=method_names[1],
method_kwargs=negative_kwargs)
after_update_operations.append(operation)
return after_update_operations
def invoke_hero_method(self, account_id, hero_id, method_name, method_kwargs):
logic_task = heroes_postponed_tasks.InvokeHeroMethodTask(hero_id=hero_id,
method_name=method_name,
method_kwargs=method_kwargs)
task = PostponedTaskPrototype.create(logic_task)
return lambda: amqp_environment.environment.workers.supervisor.cmd_logic_task(account_id=account_id, task_id=task.id)
def message_type(self, actor, effect, direction, group):
return 'job_diary_{actor}_{effect}_{direction}_{group}'.format(actor=actor,
effect=effect.name.lower(),
direction=direction,
group=group)
class ChangePlaceAttribute(BaseEffect):
__slots__ = ('attribute', 'base_value')
def __init__(self, attribute, base_value, **kwargs_view):
super(ChangePlaceAttribute, self).__init__(**kwargs_view)
self.attribute = attribute
self.base_value = base_value
def effect_delta(self, value):
return value * (1.0 / (24 * c.PLACE_STANDARD_EFFECT_LENGTH))
def positive_effect_value(self, job_power):
return self.base_value * job_power
def negative_effect_value(self, job_power):
return -self.base_value * job_power * c.JOB_NEGATIVE_POWER_MULTIPLIER
def short_effect_description(self, value):
return '{} от {}{} до 0{} на {} дней'.format(self.attribute.text,
self.attribute.formatter(value),
self.attribute.verbose_units,
self.attribute.verbose_units,
c.PLACE_STANDARD_EFFECT_LENGTH)
def effect_type(self):
return getattr(EFFECT, 'PLACE_{}'.format(self.attribute.name))
def apply_positive(self, actor_type, actor_name, place, person, positive_heroes, negative_heroes, job_power):
from the_tale.game.places import logic as places_logic
value = self.positive_effect_value(job_power)
delta = self.effect_delta(value)
places_logic.register_effect(place_id=place.id,
attribute=self.attribute,
value=value,
name=actor_name,
delta=delta,
refresh_effects=True,
refresh_places=False,
info={'source': 'jobs'})
return self.apply_to_heroes(actor_type=actor_type,
effect=self.effect_type(),
method_names=('job_message', 'job_message'),
method_kwargs={'place_id': place.id,
'person_id': person.id if person else None},
positive_heroes=positive_heroes,
negative_heroes=negative_heroes,
direction='positive')
def apply_negative(self, actor_type, actor_name, place, person, positive_heroes, negative_heroes, job_power):
from the_tale.game.places import logic as places_logic
value = self.negative_effect_value(job_power)
delta = self.effect_delta(abs(value))
places_logic.register_effect(place_id=place.id,
attribute=self.attribute,
value=value,
name=actor_name,
delta=delta,
refresh_effects=True,
refresh_places=False,
info={'source': 'jobs'})
return self.apply_to_heroes(actor_type=actor_type,
effect=self.effect_type(),
method_names=('job_message', 'job_message'),
method_kwargs={'place_id': place.id,
'person_id': person.id if person else None},
positive_heroes=positive_heroes,
negative_heroes=negative_heroes,
direction='negative')
class HeroMethod(BaseEffect):
__slots__ = ('effect_name', 'method_name',)
EFFECT_NAME = NotImplemented
METHOD_NAME = NotImplemented
def effect_type(self):
return getattr(EFFECT, self.EFFECT_NAME)
def apply_positive(self, actor_type, actor_name, place, person, positive_heroes, negative_heroes, job_power):
return self.apply_to_heroes(actor_type=actor_type,
effect=self.effect_type(),
method_names=(self.METHOD_NAME, 'job_message'),
method_kwargs={'place_id': place.id,
'person_id': person.id if person else None,
'effect_value': self.positive_effect_value(job_power)},
positive_heroes=positive_heroes,
negative_heroes=negative_heroes,
direction='positive')
def apply_negative(self, actor_type, actor_name, place, person, positive_heroes, negative_heroes, job_power):
return self.apply_to_heroes(actor_type=actor_type,
effect=self.effect_type(),
method_names=('job_message', self.METHOD_NAME),
method_kwargs={'place_id': place.id,
'person_id': person.id if person else None,
'effect_value': self.negative_effect_value(job_power)},
positive_heroes=positive_heroes,
negative_heroes=negative_heroes,
direction='negative')
class HeroMoney(HeroMethod):
__slots__ = ()
EFFECT_NAME = 'HERO_MONEY'
METHOD_NAME = 'job_money'
TARGET_LEVEL = f.lvl_after_time(3 * 365 * 24)
def money(self, job_power):
return max(1, int(math.ceil(f.normal_action_price(self.TARGET_LEVEL) * job_power * c.NORMAL_JOB_LENGTH)))
def positive_effect_value(self, job_power):
return self.money(job_power)
def negative_effect_value(self, job_power):
return self.money(job_power * c.JOB_NEGATIVE_POWER_MULTIPLIER)
def short_effect_description(self, value):
return f'герой получает монеты: {value}'
class HeroExperience(HeroMethod):
__slots__ = ()
EFFECT_NAME = 'HERO_EXPERIENCE'
METHOD_NAME = 'job_experience'
def experience(self, job_power):
from the_tale.game.places import storage as places_storage
return max(1, int(math.ceil(f.experience_for_quest__real(places_storage.places.expected_minimum_quest_distance()) *
job_power *
c.NORMAL_JOB_LENGTH)))
def positive_effect_value(self, job_power):
return self.experience(job_power)
def negative_effect_value(self, job_power):
return self.experience(job_power * c.JOB_NEGATIVE_POWER_MULTIPLIER)
def short_effect_description(self, value):
return f'герой получает опыт: {value}'
class HeroArtifact(HeroMethod):
__slots__ = ()
EFFECT_NAME = 'HERO_ARTIFACT'
METHOD_NAME = 'job_artifact'
def priority(self, job_power):
return {artifacts_relations.RARITY.RARE.value: c.RARE_ARTIFACT_PROBABILITY,
artifacts_relations.RARITY.EPIC.value: c.EPIC_ARTIFACT_PROBABILITY * job_power}
def positive_effect_value(self, job_power):
return self.priority(job_power)
def negative_effect_value(self, job_power):
return self.priority(job_power * c.JOB_NEGATIVE_POWER_MULTIPLIER)
def short_effect_description(self, value):
percents = utils_logic.normalize_dict(dict(value))
rare_percents = round(percents[artifacts_relations.RARITY.RARE.value] * 100)
epic_percents = round(percents[artifacts_relations.RARITY.EPIC.value] * 100)
return f'герой получает редкий ({rare_percents}%) или эпический ({epic_percents}%) артефакт'
class HeroCards(HeroMethod):
__slots__ = ()
EFFECT_NAME = 'HERO_CARDS'
METHOD_NAME = 'job_cards'
def cards_number(self, job_power):
return max(1, int(math.ceil(24.0 / tt_cards_constants.NORMAL_RECEIVE_TIME * c.NORMAL_JOB_LENGTH * job_power)))
def positive_effect_value(self, job_power):
return self.cards_number(job_power)
def negative_effect_value(self, job_power):
return self.cards_number(job_power * c.JOB_NEGATIVE_POWER_MULTIPLIER)
def short_effect_description(self, value):
return f'Хранитель получает карты судьбы: {value}'
def place_attribute(id, attribute_name, base_value, attribute_text, priority):
attribute = getattr(places_relations.ATTRIBUTE, attribute_name)
return ('PLACE_{}'.format(attribute_name),
id,
f'{attribute.text} для города',
ChangePlaceAttribute(attribute=attribute, base_value=base_value),
EFFECT_GROUP.ON_PLACE,
f'При удачном завершении проекта, временно улучшает {attribute_text} города, в случае неудачи — ухудшает.',
priority)
def hero_profit(id, EffectClass, text, priority, description):
return (EffectClass.EFFECT_NAME,
id,
text,
EffectClass(),
EFFECT_GROUP.ON_HEROES,
description,
priority)
class EFFECT_GROUP(rels_django.DjangoEnum):
priority = rels.Column(unique=False)
records = (('ON_PLACE', 0, 'для города', 1),
('ON_HEROES', 1, 'для героев', 1))
class EFFECT(rels_django.DjangoEnum):
logic = rels.Column(single_type=False)
group = rels.Column(unique=False)
description = rels.Column()
priority = rels.Column(unique=False, single_type=False)
records = (place_attribute(1, 'PRODUCTION', base_value=c.JOB_PRODUCTION_BONUS, attribute_text='производство', priority=1),
place_attribute(2, 'SAFETY', base_value=c.JOB_SAFETY_BONUS, attribute_text='безопасность', priority=0.5),
place_attribute(3, 'TRANSPORT', base_value=c.JOB_TRANSPORT_BONUS, attribute_text='транспорт', priority=0.5),
place_attribute(4, 'FREEDOM', base_value=c.JOB_FREEDOM_BONUS, attribute_text='свободу', priority=1),
place_attribute(5, 'STABILITY', base_value=c.JOB_STABILITY_BONUS, attribute_text='стабильность', priority=0.25),
hero_profit(6, HeroMoney, 'золото для героев', 1,
'В случае удачного завершения проекта, высылает деньги помогающим героям из ближнего круга. В случае неудачи деньги достаются мешающим героям.'),
hero_profit(7, HeroArtifact, 'артефакты для героев', 0.75,
'В случае удачного завершения проекта, высылает по артефакту помогающим героям из ближнего круга. В случае неудачи артефакты достаются мешающим героям.'),
hero_profit(8, HeroExperience, 'опыт для героев', 1,
'В случае удачного завершения проекта, помогающие герои из ближнего круга получают немного опыта. В случае неудачи опыт достаётся мешающим героям.'),
hero_profit(9, HeroCards, 'карты судьбы для Хранителя', 0.5,
'В случае удачного завершения проекта, Хранители помогающих героев из ближнего круга получают карты судьбы. В случае неудачи карты достаются Хранителям мешающих героев.'),
place_attribute(10, 'CULTURE', base_value=c.JOB_STABILITY_BONUS, attribute_text='культуру', priority=1))
|
py
|
1a5ace9fdb69e1e579259b9abe164595372baeea
|
# Generated by Django 2.1.15 on 2020-03-02 12:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0010_auto_20200302_1224'),
]
operations = [
migrations.AlterField(
model_name='news',
name='slug',
field=models.SlugField(default='', editable=False),
),
]
|
py
|
1a5acf411ce45fdadfb5bd2ad5cf1865eb1ffd74
|
# -*- coding: utf-8 -*-
class WriterRegistry:
def __init__(self, listener):
self.storage = {}
self.id = 0
self.next_id = 0
self.listener = listener
def get_id(self, value):
try:
value_id=self.storage[value]
return value_id
except:
self.register(value)
return self.storage[value]
def register(self, value):
if value not in self.storage:
idee=self.next_id+1
self.next_id+=1
self.storage.update({value: idee})
print(self.storage)
self.listener.on_new_registry_entry(value, idee)
|
py
|
1a5acfb3a5719fe353fb38905e37947b47e4b6be
|
# Generated by Django 2.2.24 on 2021-12-08 17:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0009_auto_20210420_1721'),
]
operations = [
migrations.CreateModel(
name='ConfirmEmailToken',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='When was this token generated')),
('key', models.CharField(db_index=True, max_length=64, unique=True, verbose_name='Key')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='confirm_email_tokens', to=settings.AUTH_USER_MODEL, verbose_name='The User which is associated to this password reset token')),
],
options={
'verbose_name': 'Токен подтверждения Email',
'verbose_name_plural': 'Токены подтверждения Email',
},
),
]
|
py
|
1a5ad00ca8bcec0ef3d5387f895b2ecf77ed70e5
|
"""Host API required for Work Files.
# TODO @iLLiCiT implement functions:
has_unsaved_changes
"""
from openpype.pipeline import (
HOST_WORKFILE_EXTENSIONS,
legacy_io,
)
from .lib import (
execute_george,
execute_george_through_file
)
from .pipeline import save_current_workfile_context
def open_file(filepath):
"""Open the scene file in Blender."""
george_script = "tv_LoadProject '\"'\"{}\"'\"'".format(
filepath.replace("\\", "/")
)
return execute_george_through_file(george_script)
def save_file(filepath):
"""Save the open scene file."""
# Store context to workfile before save
context = {
"project": legacy_io.Session["AVALON_PROJECT"],
"asset": legacy_io.Session["AVALON_ASSET"],
"task": legacy_io.Session["AVALON_TASK"]
}
save_current_workfile_context(context)
# Execute george script to save workfile.
george_script = "tv_SaveProject {}".format(filepath.replace("\\", "/"))
return execute_george(george_script)
def current_file():
"""Return the path of the open scene file."""
george_script = "tv_GetProjectName"
return execute_george(george_script)
def has_unsaved_changes():
"""Does the open scene file have unsaved changes?"""
return False
def file_extensions():
"""Return the supported file extensions for Blender scene files."""
return HOST_WORKFILE_EXTENSIONS["tvpaint"]
def work_root(session):
"""Return the default root to browse for work files."""
return session["AVALON_WORKDIR"]
|
py
|
1a5ad084c895e9d32382e69dabfc05a8764374b6
|
# Generated by Django 3.2.12 on 2022-03-05 15:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='article',
name='content',
field=models.TextField(default='sailu'),
preserve_default=False,
),
migrations.AddField(
model_name='article',
name='title',
field=models.TextField(default='nada'),
preserve_default=False,
),
]
|
py
|
1a5ad0aca7a9d8a7067e1738a732fca1225b00a2
|
from setuptools import setup, find_packages
import sys
if sys.version_info.major != 3:
print('This Python is only compatible with Python 3, but you are running '
'Python {}. The installation will likely fail.'.format(sys.version_info.major))
setup(name='baselines',
packages=[package for package in find_packages()
if package.startswith('baselines')],
install_requires=[
'gym[atari,classic_control,robotics,mujoco]',
'scipy',
'tqdm',
'joblib',
'zmq',
'dill',
'progressbar2',
'mpi4py',
'cloudpickle',
'tensorflow-gpu==1.6.0',
'click',
],
description='OpenAI baselines: high quality implementations of reinforcement learning algorithms',
author='OpenAI',
url='https://github.com/openai/baselines',
author_email='[email protected]',
version='0.1.5')
|
py
|
1a5ad12444ff84ae731de063787e716178ccc32e
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import signal
import sys
import traceback
import threading
from typing import Optional, TYPE_CHECKING
try:
import PyQt5
except Exception:
sys.exit("Error: Could not import PyQt5 on Linux systems, you may try 'sudo apt-get install python3-pyqt5'")
from PyQt5.QtGui import QGuiApplication
from PyQt5.QtWidgets import (QApplication, QSystemTrayIcon, QWidget, QMenu,
QMessageBox)
from PyQt5.QtCore import QObject, pyqtSignal, QTimer
import PyQt5.QtCore as QtCore
from electrum_ltc.i18n import _, set_language
from electrum_ltc.plugin import run_hook
from electrum_ltc.base_wizard import GoBack
from electrum_ltc.util import (UserCancelled, profiler,
WalletFileException, BitcoinException, get_new_wallet_name)
from electrum_ltc.wallet import Wallet, Abstract_Wallet
from electrum_ltc.logging import Logger
from .installwizard import InstallWizard, WalletAlreadyOpenInMemory
from .util import get_default_language, read_QIcon, ColorScheme, custom_message_box
from .main_window import ElectrumWindow
from .network_dialog import NetworkDialog
from .stylesheet_patcher import patch_qt_stylesheet
from .lightning_dialog import LightningDialog
from .watchtower_dialog import WatchtowerDialog
if TYPE_CHECKING:
from electrum_ltc.daemon import Daemon
from electrum_ltc.simple_config import SimpleConfig
from electrum_ltc.plugin import Plugins
class OpenFileEventFilter(QObject):
def __init__(self, windows):
self.windows = windows
super(OpenFileEventFilter, self).__init__()
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.FileOpen:
if len(self.windows) >= 1:
self.windows[0].pay_to_URI(event.url().toEncoded())
return True
return False
class QElectrumApplication(QApplication):
new_window_signal = pyqtSignal(str, object)
class QNetworkUpdatedSignalObject(QObject):
network_updated_signal = pyqtSignal(str, object)
class ElectrumGui(Logger):
@profiler
def __init__(self, config: 'SimpleConfig', daemon: 'Daemon', plugins: 'Plugins'):
set_language(config.get('language', get_default_language()))
Logger.__init__(self)
# Uncomment this call to verify objects are being properly
# GC-ed when windows are closed
#network.add_jobs([DebugMem([Abstract_Wallet, SPV, Synchronizer,
# ElectrumWindow], interval=5)])
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_X11InitThreads)
if hasattr(QtCore.Qt, "AA_ShareOpenGLContexts"):
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_ShareOpenGLContexts)
if hasattr(QGuiApplication, 'setDesktopFileName'):
QGuiApplication.setDesktopFileName('electrum-ltc.desktop')
self.gui_thread = threading.current_thread()
self.config = config
self.daemon = daemon
self.plugins = plugins
self.windows = []
self.efilter = OpenFileEventFilter(self.windows)
self.app = QElectrumApplication(sys.argv)
self.app.installEventFilter(self.efilter)
self.app.setWindowIcon(read_QIcon("electrum-ltc.png"))
# timer
self.timer = QTimer(self.app)
self.timer.setSingleShot(False)
self.timer.setInterval(500) # msec
self.network_dialog = None
self.lightning_dialog = None
self.watchtower_dialog = None
self.network_updated_signal_obj = QNetworkUpdatedSignalObject()
self._num_wizards_in_progress = 0
self._num_wizards_lock = threading.Lock()
# init tray
self.dark_icon = self.config.get("dark_icon", False)
self.tray = QSystemTrayIcon(self.tray_icon(), None)
self.tray.setToolTip('Electrum-LTC')
self.tray.activated.connect(self.tray_activated)
self.build_tray_menu()
self.tray.show()
self.app.new_window_signal.connect(self.start_new_window)
self.set_dark_theme_if_needed()
run_hook('init_qt', self)
def set_dark_theme_if_needed(self):
use_dark_theme = self.config.get('qt_gui_color_theme', 'default') == 'dark'
if use_dark_theme:
try:
import qdarkstyle
self.app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
except BaseException as e:
use_dark_theme = False
self.logger.warning(f'Error setting dark theme: {repr(e)}')
# Apply any necessary stylesheet patches
patch_qt_stylesheet(use_dark_theme=use_dark_theme)
# Even if we ourselves don't set the dark theme,
# the OS/window manager/etc might set *a dark theme*.
# Hence, try to choose colors accordingly:
ColorScheme.update_from_widget(QWidget(), force_dark=use_dark_theme)
def build_tray_menu(self):
# Avoid immediate GC of old menu when window closed via its action
if self.tray.contextMenu() is None:
m = QMenu()
self.tray.setContextMenu(m)
else:
m = self.tray.contextMenu()
m.clear()
network = self.daemon.network
m.addAction(_("Network"), self.show_network_dialog)
if network.lngossip:
m.addAction(_("Lightning Network"), self.show_lightning_dialog)
if network.local_watchtower:
m.addAction(_("Local Watchtower"), self.show_watchtower_dialog)
for window in self.windows:
name = window.wallet.basename()
submenu = m.addMenu(name)
submenu.addAction(_("Show/Hide"), window.show_or_hide)
submenu.addAction(_("Close"), window.close)
m.addAction(_("Dark/Light"), self.toggle_tray_icon)
m.addSeparator()
m.addAction(_("Exit Electrum-LTC"), self.close)
def tray_icon(self):
if self.dark_icon:
return read_QIcon('electrum_dark_icon.png')
else:
return read_QIcon('electrum_light_icon.png')
def toggle_tray_icon(self):
self.dark_icon = not self.dark_icon
self.config.set_key("dark_icon", self.dark_icon, True)
self.tray.setIcon(self.tray_icon())
def tray_activated(self, reason):
if reason == QSystemTrayIcon.DoubleClick:
if all([w.is_hidden() for w in self.windows]):
for w in self.windows:
w.bring_to_top()
else:
for w in self.windows:
w.hide()
def close(self):
for window in self.windows:
window.close()
if self.network_dialog:
self.network_dialog.close()
if self.lightning_dialog:
self.lightning_dialog.close()
if self.watchtower_dialog:
self.watchtower_dialog.close()
def new_window(self, path, uri=None):
# Use a signal as can be called from daemon thread
self.app.new_window_signal.emit(path, uri)
def show_lightning_dialog(self):
if not self.lightning_dialog:
self.lightning_dialog = LightningDialog(self)
self.lightning_dialog.bring_to_top()
def show_watchtower_dialog(self):
if not self.watchtower_dialog:
self.watchtower_dialog = WatchtowerDialog(self)
self.watchtower_dialog.bring_to_top()
def show_network_dialog(self):
if self.network_dialog:
self.network_dialog.on_update()
self.network_dialog.show()
self.network_dialog.raise_()
return
self.network_dialog = NetworkDialog(self.daemon.network, self.config,
self.network_updated_signal_obj)
self.network_dialog.show()
def _create_window_for_wallet(self, wallet):
w = ElectrumWindow(self, wallet)
self.windows.append(w)
self.build_tray_menu()
# FIXME: Remove in favour of the load_wallet hook
run_hook('on_new_window', w)
w.warn_if_testnet()
w.warn_if_watching_only()
return w
def count_wizards_in_progress(func):
def wrapper(self: 'ElectrumGui', *args, **kwargs):
with self._num_wizards_lock:
self._num_wizards_in_progress += 1
try:
return func(self, *args, **kwargs)
finally:
with self._num_wizards_lock:
self._num_wizards_in_progress -= 1
return wrapper
@count_wizards_in_progress
def start_new_window(self, path, uri, *, app_is_starting=False):
'''Raises the window for the wallet if it is open. Otherwise
opens the wallet and creates a new window for it'''
wallet = None
try:
wallet = self.daemon.load_wallet(path, None)
except BaseException as e:
self.logger.exception('')
custom_message_box(icon=QMessageBox.Warning,
parent=None,
title=_('Error'),
text=_('Cannot load wallet') + ' (1):\n' + repr(e))
# if app is starting, still let wizard to appear
if not app_is_starting:
return
if not wallet:
try:
wallet = self._start_wizard_to_select_or_create_wallet(path)
except (WalletFileException, BitcoinException) as e:
self.logger.exception('')
custom_message_box(icon=QMessageBox.Warning,
parent=None,
title=_('Error'),
text=_('Cannot load wallet') + ' (2):\n' + repr(e))
if not wallet:
return
# create or raise window
try:
for window in self.windows:
if window.wallet.storage.path == wallet.storage.path:
break
else:
window = self._create_window_for_wallet(wallet)
except BaseException as e:
self.logger.exception('')
custom_message_box(icon=QMessageBox.Warning,
parent=None,
title=_('Error'),
text=_('Cannot create window for wallet') + ':\n' + repr(e))
if app_is_starting:
wallet_dir = os.path.dirname(path)
path = os.path.join(wallet_dir, get_new_wallet_name(wallet_dir))
self.start_new_window(path, uri)
return
if uri:
window.pay_to_URI(uri)
window.bring_to_top()
window.setWindowState(window.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
window.activateWindow()
return window
def _start_wizard_to_select_or_create_wallet(self, path) -> Optional[Abstract_Wallet]:
wizard = InstallWizard(self.config, self.app, self.plugins)
try:
path, storage = wizard.select_storage(path, self.daemon.get_wallet)
# storage is None if file does not exist
if storage is None:
wizard.path = path # needed by trustedcoin plugin
wizard.run('new')
storage = wizard.create_storage(path)
else:
wizard.run_upgrades(storage)
except (UserCancelled, GoBack):
return
except WalletAlreadyOpenInMemory as e:
return e.wallet
finally:
wizard.terminate()
# return if wallet creation is not complete
if storage is None or storage.get_action():
return
wallet = Wallet(storage, config=self.config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
return wallet
def close_window(self, window: ElectrumWindow):
if window in self.windows:
self.windows.remove(window)
self.build_tray_menu()
# save wallet path of last open window
if not self.windows:
self.config.save_last_wallet(window.wallet)
run_hook('on_close_window', window)
self.daemon.stop_wallet(window.wallet.storage.path)
def init_network(self):
# Show network dialog if config does not exist
if self.daemon.network:
if self.config.get('auto_connect') is None:
wizard = InstallWizard(self.config, self.app, self.plugins)
wizard.init_network(self.daemon.network)
wizard.terminate()
def main(self):
try:
self.init_network()
except UserCancelled:
return
except GoBack:
return
except BaseException as e:
self.logger.exception('')
return
self.timer.start()
path = self.config.get_wallet_path(use_gui_last_wallet=True)
if not self.start_new_window(path, self.config.get('url'), app_is_starting=True):
return
signal.signal(signal.SIGINT, lambda *args: self.app.quit())
def quit_after_last_window():
# keep daemon running after close
if self.config.get('daemon'):
return
# check if a wizard is in progress
with self._num_wizards_lock:
if self._num_wizards_in_progress > 0 or len(self.windows) > 0:
return
if self.config.get('persist_daemon'):
return
self.app.quit()
self.app.setQuitOnLastWindowClosed(False) # so _we_ can decide whether to quit
self.app.lastWindowClosed.connect(quit_after_last_window)
def clean_up():
# Shut down the timer cleanly
self.timer.stop()
# clipboard persistence. see http://www.mail-archive.com/[email protected]/msg17328.html
event = QtCore.QEvent(QtCore.QEvent.Clipboard)
self.app.sendEvent(self.app.clipboard(), event)
self.tray.hide()
self.app.aboutToQuit.connect(clean_up)
# main loop
self.app.exec_()
# on some platforms the exec_ call may not return, so use clean_up()
def stop(self):
self.logger.info('closing GUI')
self.app.quit()
|
py
|
1a5ad1b53f94e98eb2833d6ba8871ef582abc252
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from confluent_kafka.cimpl import KafkaException, KafkaError
from confluent_kafka.serialization import SerializationError
class ConsumeError(KafkaException):
"""
Wraps all errors encountered during the consumption of a message.
Note:
In the event of a serialization error the original message contents
may be retrieved from the ``message`` attribute.
Args:
error_code (KafkaError): Error code indicating the type of error.
exception(Exception, optional): The original exception
message (Message, optional): The Kafka Message returned from the broker.
"""
def __init__(self, error_code, exception=None, message=None):
if exception is not None:
kafka_error = KafkaError(error_code, repr(exception))
self.exception = exception
else:
kafka_error = KafkaError(error_code)
self.exception = None
super(ConsumeError, self).__init__(kafka_error)
self.message = message
@property
def code(self):
return self.code()
@property
def name(self):
return self.name()
class KeyDeserializationError(ConsumeError, SerializationError):
"""
Wraps all errors encountered during the deserialization of a Kafka
Message's key.
Args:
exception(Exception, optional): The original exception
message (Message, optional): The Kafka Message returned from the broker.
"""
def __init__(self, exception=None, message=None):
super(KeyDeserializationError, self).__init__(
KafkaError._KEY_DESERIALIZATION, exception=exception, message=message)
class ValueDeserializationError(ConsumeError, SerializationError):
"""
Wraps all errors encountered during the deserialization of a Kafka
Message's value.
Args:
exception(Exception, optional): The original exception
message (Message, optional): The Kafka Message returned from the broker.
"""
def __init__(self, exception=None, message=None):
super(ValueDeserializationError, self).__init__(
KafkaError._VALUE_DESERIALIZATION, exception=exception, message=message)
class ProduceError(KafkaException):
"""
Wraps all errors encountered when Producing messages.
Args:
error_code (KafkaError): Error code indicating the type of error.
exception(Exception, optional): The original exception.
"""
def __init__(self, error_code, exception=None):
if exception is not None:
kafka_error = KafkaError(error_code, repr(exception))
self.exception = exception
else:
kafka_error = KafkaError(error_code)
self.exception = None
super(ProduceError, self).__init__(kafka_error)
@property
def code(self):
return self.code()
@property
def name(self):
return self.name()
class KeySerializationError(ProduceError, SerializationError):
"""
Wraps all errors encountered during the serialization of a Message key.
Args:
exception (Exception): The exception that occurred during serialization.
"""
def __init__(self, exception=None):
super(KeySerializationError, self).__init__(
KafkaError._KEY_SERIALIZATION, exception=exception)
class ValueSerializationError(ProduceError, SerializationError):
"""
Wraps all errors encountered during the serialization of a Message value.
Args:
exception (Exception): The exception that occurred during serialization.
"""
def __init__(self, exception=None):
super(ValueSerializationError, self).__init__(
KafkaError._VALUE_SERIALIZATION, exception=exception)
|
py
|
1a5ad20d32ef4d617cbb27b7c4f54569e3d5b538
|
# Authors:
#
# Giorgio Patrini
#
# License: BSD 3 clause
from __future__ import division
import warnings
import itertools
import numpy as np
import numpy.linalg as la
from scipy import sparse, stats
from scipy.sparse import random as sparse_random
import pytest
from sklearn.utils import gen_batches
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_allclose_dense_sparse
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _handle_zeros_in_scale
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import QuantileTransformer
from sklearn.preprocessing.data import quantile_transform
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.preprocessing.data import PowerTransformer
from sklearn.preprocessing.data import power_transform
from sklearn.exceptions import DataConversionWarning, NotFittedError
from sklearn.base import clone
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_predict
from sklearn.svm import SVR
from sklearn.utils import shuffle
from sklearn import datasets
iris = datasets.load_iris()
# Make some data to be used many times
rng = np.random.RandomState(0)
n_features = 30
n_samples = 1000
offsets = rng.uniform(-1, 1, size=n_features)
scales = rng.uniform(1, 10, size=n_features)
X_2d = rng.randn(n_samples, n_features) * scales + offsets
X_1row = X_2d[0, :].reshape(1, n_features)
X_1col = X_2d[:, 0].reshape(n_samples, 1)
X_list_1row = X_1row.tolist()
X_list_1col = X_1col.tolist()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def _check_dim_1axis(a):
if isinstance(a, list):
return np.array(a).shape[0]
return a.shape[0]
def assert_correct_incr(i, batch_start, batch_stop, n, chunk_size,
n_samples_seen):
if batch_stop != n:
assert_equal((i + 1) * chunk_size, n_samples_seen)
else:
assert_equal(i * chunk_size + (batch_stop - batch_start),
n_samples_seen)
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
assert_equal(interact.powers_.shape, (interact.n_output_features_,
interact.n_input_features_))
def test_polynomial_feature_names():
X = np.arange(30).reshape(10, 3)
poly = PolynomialFeatures(degree=2, include_bias=True).fit(X)
feature_names = poly.get_feature_names()
assert_array_equal(['1', 'x0', 'x1', 'x2', 'x0^2', 'x0 x1',
'x0 x2', 'x1^2', 'x1 x2', 'x2^2'],
feature_names)
poly = PolynomialFeatures(degree=3, include_bias=False).fit(X)
feature_names = poly.get_feature_names(["a", "b", "c"])
assert_array_equal(['a', 'b', 'c', 'a^2', 'a b', 'a c', 'b^2',
'b c', 'c^2', 'a^3', 'a^2 b', 'a^2 c',
'a b^2', 'a b c', 'a c^2', 'b^3', 'b^2 c',
'b c^2', 'c^3'], feature_names)
# test some unicode
poly = PolynomialFeatures(degree=1, include_bias=True).fit(X)
feature_names = poly.get_feature_names(
[u"\u0001F40D", u"\u262E", u"\u05D0"])
assert_array_equal([u"1", u"\u0001F40D", u"\u262E", u"\u05D0"],
feature_names)
def test_polynomial_feature_array_order():
X = np.arange(10).reshape(5, 2)
def is_c_contiguous(a):
return np.isfortran(a.T)
assert is_c_contiguous(PolynomialFeatures().fit_transform(X))
assert is_c_contiguous(PolynomialFeatures(order='C').fit_transform(X))
assert np.isfortran(PolynomialFeatures(order='F').fit_transform(X))
@pytest.mark.parametrize(['deg', 'include_bias', 'interaction_only', 'dtype'],
[(1, True, False, int),
(2, True, False, int),
(2, True, False, np.float32),
(2, True, False, np.float64),
(3, False, False, np.float64),
(3, False, True, np.float64),
(4, False, False, np.float64),
(4, False, True, np.float64)])
def test_polynomial_features_csc_X(deg, include_bias, interaction_only, dtype):
rng = np.random.RandomState(0)
X = rng.randint(0, 2, (100, 2))
X_csc = sparse.csc_matrix(X)
est = PolynomialFeatures(deg, include_bias=include_bias,
interaction_only=interaction_only)
Xt_csc = est.fit_transform(X_csc.astype(dtype))
Xt_dense = est.fit_transform(X.astype(dtype))
assert isinstance(Xt_csc, sparse.csc_matrix)
assert Xt_csc.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csc.A, Xt_dense)
@pytest.mark.parametrize(['deg', 'include_bias', 'interaction_only', 'dtype'],
[(1, True, False, int),
(2, True, False, int),
(2, True, False, np.float32),
(2, True, False, np.float64),
(3, False, False, np.float64),
(3, False, True, np.float64)])
def test_polynomial_features_csr_X(deg, include_bias, interaction_only, dtype):
rng = np.random.RandomState(0)
X = rng.randint(0, 2, (100, 2))
X_csr = sparse.csr_matrix(X)
est = PolynomialFeatures(deg, include_bias=include_bias,
interaction_only=interaction_only)
Xt_csr = est.fit_transform(X_csr.astype(dtype))
Xt_dense = est.fit_transform(X.astype(dtype))
assert isinstance(Xt_csr, sparse.csr_matrix)
assert Xt_csr.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csr.A, Xt_dense)
@pytest.mark.parametrize(['deg', 'include_bias', 'interaction_only', 'dtype'],
[(2, True, False, np.float32),
(2, True, False, np.float64),
(3, False, False, np.float64),
(3, False, True, np.float64)])
def test_polynomial_features_csr_X_floats(deg, include_bias,
interaction_only, dtype):
X_csr = sparse_random(1000, 10, 0.5, random_state=0).tocsr()
X = X_csr.toarray()
est = PolynomialFeatures(deg, include_bias=include_bias,
interaction_only=interaction_only)
Xt_csr = est.fit_transform(X_csr.astype(dtype))
Xt_dense = est.fit_transform(X.astype(dtype))
assert isinstance(Xt_csr, sparse.csr_matrix)
assert Xt_csr.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csr.A, Xt_dense)
@pytest.mark.parametrize(['zero_row_index', 'deg', 'interaction_only'],
[(0, 2, True), (1, 2, True), (2, 2, True),
(0, 3, True), (1, 3, True), (2, 3, True),
(0, 2, False), (1, 2, False), (2, 2, False),
(0, 3, False), (1, 3, False), (2, 3, False)])
def test_polynomial_features_csr_X_zero_row(zero_row_index, deg,
interaction_only):
X_csr = sparse_random(3, 10, 1.0, random_state=0).tocsr()
X_csr[zero_row_index, :] = 0.0
X = X_csr.toarray()
est = PolynomialFeatures(deg, include_bias=False,
interaction_only=interaction_only)
Xt_csr = est.fit_transform(X_csr)
Xt_dense = est.fit_transform(X)
assert isinstance(Xt_csr, sparse.csr_matrix)
assert Xt_csr.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csr.A, Xt_dense)
# This degree should always be one more than the highest degree supported by
# _csr_expansion.
@pytest.mark.parametrize(['include_bias', 'interaction_only'],
[(True, True), (True, False),
(False, True), (False, False)])
def test_polynomial_features_csr_X_degree_4(include_bias, interaction_only):
X_csr = sparse_random(1000, 10, 0.5, random_state=0).tocsr()
X = X_csr.toarray()
est = PolynomialFeatures(4, include_bias=include_bias,
interaction_only=interaction_only)
Xt_csr = est.fit_transform(X_csr)
Xt_dense = est.fit_transform(X)
assert isinstance(Xt_csr, sparse.csr_matrix)
assert Xt_csr.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csr.A, Xt_dense)
@pytest.mark.parametrize(['deg', 'dim', 'interaction_only'],
[(2, 1, True),
(2, 2, True),
(3, 1, True),
(3, 2, True),
(3, 3, True),
(2, 1, False),
(2, 2, False),
(3, 1, False),
(3, 2, False),
(3, 3, False)])
def test_polynomial_features_csr_X_dim_edges(deg, dim, interaction_only):
X_csr = sparse_random(1000, dim, 0.5, random_state=0).tocsr()
X = X_csr.toarray()
est = PolynomialFeatures(deg, interaction_only=interaction_only)
Xt_csr = est.fit_transform(X_csr)
Xt_dense = est.fit_transform(X)
assert isinstance(Xt_csr, sparse.csr_matrix)
assert Xt_csr.dtype == Xt_dense.dtype
assert_array_almost_equal(Xt_csr.A, Xt_dense)
def test_standard_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_almost_equal(scaler.mean_, X.ravel())
assert_almost_equal(scaler.scale_, np.ones(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.std(axis=0),
np.zeros_like(n_features))
else:
assert_almost_equal(scaler.mean_, X.mean())
assert_almost_equal(scaler.scale_, X.std())
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones((5, 1))
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_almost_equal(scaler.mean_, 1.)
assert_almost_equal(scaler.scale_, 1.)
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), .0)
assert_equal(scaler.n_samples_seen_, X.shape[0])
def test_standard_scaler_dtype():
# Ensure scaling does not affect dtype
rng = np.random.RandomState(0)
n_samples = 10
n_features = 3
for dtype in [np.float16, np.float32, np.float64]:
X = rng.randn(n_samples, n_features).astype(dtype)
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X)
assert X.dtype == X_scaled.dtype
assert scaler.mean_.dtype == np.float64
assert scaler.scale_.dtype == np.float64
def test_scale_1d():
# 1-d inputs
X_list = [1., 3., 5., 0.]
X_arr = np.array(X_list)
for X in [X_list, X_arr]:
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(), 0.0)
assert_array_almost_equal(X_scaled.std(), 1.0)
assert_array_equal(scale(X, with_mean=False, with_std=False), X)
@skip_if_32bit
def test_standard_scaler_numerical_stability():
# Test numerical stability of scaling
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.full(8, np.log(1e-5), dtype=np.float64)
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.full(10, np.log(1e-5), dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.full(10, 1e-100, dtype=np.float64)
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.full(10, 1e100, dtype=np.float64)
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
n_features = 5
n_samples = 4
X = rng.randn(n_samples, n_features)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
assert_equal(scaler.n_samples_seen_, n_samples)
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert X_scaled is not X
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert X_scaled_back is not X
assert X_scaled_back is not X_scaled
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), n_samples * [1.0])
# Check that the data hasn't been modified
assert X_scaled is not X
X_scaled = scaler.fit(X).transform(X, copy=False)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert X_scaled is X
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert X_scaled is not X
def test_handle_zeros_in_scale():
s1 = np.array([0, 1, 2, 3])
s2 = _handle_zeros_in_scale(s1, copy=True)
assert not s1[0] == s2[0]
assert_array_equal(s1, np.array([0, 1, 2, 3]))
assert_array_equal(s2, np.array([1, 1, 2, 3]))
def test_minmax_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler()
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MinMaxScaler().fit(X[batch0])
scaler_incr = MinMaxScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std until the end of partial fits, and
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = StandardScaler(with_std=False).fit(X)
scaler_incr = StandardScaler(with_std=False)
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_)
assert_equal(scaler_batch.var_, scaler_incr.var_) # Nones
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_incr = StandardScaler().partial_fit(X[batch0])
if chunk_size == 1:
assert_array_almost_equal(np.zeros(n_features, dtype=np.float64),
scaler_incr.var_)
assert_array_almost_equal(np.ones(n_features, dtype=np.float64),
scaler_incr.scale_)
else:
assert_array_almost_equal(np.var(X[batch0], axis=0),
scaler_incr.var_)
assert_array_almost_equal(np.std(X[batch0], axis=0),
scaler_incr.scale_) # no constants
# Test std until the end of partial fits, and
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit_numerical_stability():
# Test if the incremental computation introduces significative errors
# for large datasets with values of large magniture
rng = np.random.RandomState(0)
n_features = 2
n_samples = 100
offsets = rng.uniform(-1e15, 1e15, size=n_features)
scales = rng.uniform(1e3, 1e6, size=n_features)
X = rng.randn(n_samples, n_features) * scales + offsets
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler()
for chunk in X:
scaler_incr = scaler_incr.partial_fit(chunk.reshape(1, n_features))
# Regardless of abs values, they must not be more diff 6 significant digits
tol = 10 ** (-6)
assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol)
assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol)
# NOTE Be aware that for much larger offsets std is very unstable (last
# assert) while mean is OK.
# Sparse input
size = (100, 3)
scale = 1e20
X = rng.randint(0, 2, size).astype(np.float64) * scale
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
for X in [X_csr, X_csc]:
# with_mean=False is required with sparse input
scaler = StandardScaler(with_mean=False).fit(X)
scaler_incr = StandardScaler(with_mean=False)
for chunk in X:
# chunk = sparse.csr_matrix(data_chunks)
scaler_incr = scaler_incr.partial_fit(chunk)
# Regardless of magnitude, they must not differ more than of 6 digits
tol = 10 ** (-6)
assert scaler.mean_ is not None
assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol)
def test_partial_fit_sparse_input():
# Check that sparsity is not destroyed
X = np.array([[1.], [0.], [0.], [5.]])
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
for X in [X_csr, X_csc]:
X_null = null_transform.partial_fit(X).transform(X)
assert_array_equal(X_null.data, X.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_null.data)
assert_array_equal(X_orig.data, X.data)
def test_standard_scaler_trasform_with_partial_fit():
# Check some postconditions after applying partial_fit and transform
X = X_2d[:100, :]
scaler_incr = StandardScaler()
for i, batch in enumerate(gen_batches(X.shape[0], 1)):
X_sofar = X[:(i + 1), :]
chunks_copy = X_sofar.copy()
scaled_batch = StandardScaler().fit_transform(X_sofar)
scaler_incr = scaler_incr.partial_fit(X[batch])
scaled_incr = scaler_incr.transform(X_sofar)
assert_array_almost_equal(scaled_batch, scaled_incr)
assert_array_almost_equal(X_sofar, chunks_copy) # No change
right_input = scaler_incr.inverse_transform(scaled_incr)
assert_array_almost_equal(X_sofar, right_input)
zero = np.zeros(X.shape[1])
epsilon = np.finfo(float).eps
assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal
assert_array_less(zero, scaler_incr.scale_ + epsilon)
# (i+1) because the Scaler has been already fitted
assert_equal((i + 1), scaler_incr.n_samples_seen_)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MinMaxScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(X_scaled.min(axis=0),
np.zeros(n_features))
assert_array_almost_equal(X_scaled.max(axis=0),
np.zeros(n_features))
else:
assert_array_almost_equal(X_scaled.min(axis=0), .0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones((5, 1))
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# Function interface
X_1d = X_1row.ravel()
min_ = X_1d.min()
max_ = X_1d.max()
assert_array_almost_equal((X_1d - min_) / (max_ - min_),
minmax_scale(X_1d, copy=True))
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
assert_raises(ValueError, StandardScaler().fit, X_csc)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert not np.any(np.isnan(X_csr_scaled.data))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert not np.any(np.isnan(X_csc_scaled.data))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert X_scaled is not X
assert X_csr_scaled is not X_csr
X_scaled_back = scaler.inverse_transform(X_scaled)
assert X_scaled_back is not X
assert X_scaled_back is not X_scaled
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert X_csr_scaled_back is not X_csr
assert X_csr_scaled_back is not X_csr_scaled
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert X_csc_scaled_back is not X_csc
assert X_csc_scaled_back is not X_csc_scaled
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
@pytest.mark.parametrize("with_mean", [True, False])
@pytest.mark.parametrize("with_std", [True, False])
@pytest.mark.parametrize("array_constructor",
[np.asarray, sparse.csc_matrix, sparse.csr_matrix])
def test_scaler_n_samples_seen_with_nan(with_mean, with_std,
array_constructor):
X = np.array([[0, 1, 3],
[np.nan, 6, 10],
[5, 4, np.nan],
[8, 0, np.nan]],
dtype=np.float64)
X = array_constructor(X)
if sparse.issparse(X) and with_mean:
pytest.skip("'with_mean=True' cannot be used with sparse matrix.")
transformer = StandardScaler(with_mean=with_mean, with_std=with_std)
transformer.fit(X)
assert_array_equal(transformer.n_samples_seen_, np.array([3, 4, 2]))
def _check_identity_scalers_attributes(scaler_1, scaler_2):
assert scaler_1.mean_ is scaler_2.mean_ is None
assert scaler_1.var_ is scaler_2.var_ is None
assert scaler_1.scale_ is scaler_2.scale_ is None
assert scaler_1.n_samples_seen_ == scaler_2.n_samples_seen_
def test_scaler_return_identity():
# test that the scaler return identity when with_mean and with_std are
# False
X_dense = np.array([[0, 1, 3],
[5, 6, 0],
[8, 0, 10]],
dtype=np.float64)
X_csr = sparse.csr_matrix(X_dense)
X_csc = X_csr.tocsc()
transformer_dense = StandardScaler(with_mean=False, with_std=False)
X_trans_dense = transformer_dense.fit_transform(X_dense)
transformer_csr = clone(transformer_dense)
X_trans_csr = transformer_csr.fit_transform(X_csr)
transformer_csc = clone(transformer_dense)
X_trans_csc = transformer_csc.fit_transform(X_csc)
assert_allclose_dense_sparse(X_trans_csr, X_csr)
assert_allclose_dense_sparse(X_trans_csc, X_csc)
assert_allclose(X_trans_dense, X_dense)
for trans_1, trans_2 in itertools.combinations([transformer_dense,
transformer_csr,
transformer_csc],
2):
_check_identity_scalers_attributes(trans_1, trans_2)
transformer_dense.partial_fit(X_dense)
transformer_csr.partial_fit(X_csr)
transformer_csc.partial_fit(X_csc)
for trans_1, trans_2 in itertools.combinations([transformer_dense,
transformer_csr,
transformer_csc],
2):
_check_identity_scalers_attributes(trans_1, trans_2)
transformer_dense.fit(X_dense)
transformer_csr.fit(X_csr)
transformer_csc.fit(X_csc)
for trans_1, trans_2 in itertools.combinations([transformer_dense,
transformer_csr,
transformer_csc],
2):
_check_identity_scalers_attributes(trans_1, trans_2)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert not np.any(np.isnan(X_csr_scaled.data))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert not np.any(np.isnan(X_csc_scaled.data))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert X_scaled is not X
assert X_csr_scaled is not X_csr
X_scaled_back = scaler.inverse_transform(X_scaled)
assert X_scaled_back is not X
assert X_scaled_back is not X_scaled
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert X_csr_scaled_back is not X_csr
assert X_csr_scaled_back is not X_csr_scaled
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert X_csc_scaled_back is not X_csc
assert X_csc_scaled_back is not X_csc_scaled
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
X_csc_copy = X_csc.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csc)
assert_array_equal(X_csc.toarray(), X_csc_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
assert_raises(ValueError, scale, X_csc, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csc)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
assert_raises(ValueError, scaler.transform, X_csc)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
X_transformed_csc = sparse.csc_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csc)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [[np.inf, 5, 6, 7, 8]]
assert_raises_regex(ValueError,
"Input contains infinity or a value too large",
scale, X)
def test_robust_scaler_error_sparse():
X_sparse = sparse.rand(1000, 10)
scaler = RobustScaler(with_centering=True)
err_msg = "Cannot center sparse matrices"
with pytest.raises(ValueError, match=err_msg):
scaler.fit(X_sparse)
@pytest.mark.parametrize("with_centering", [True, False])
@pytest.mark.parametrize("with_scaling", [True, False])
@pytest.mark.parametrize("X", [np.random.randn(10, 3),
sparse.rand(10, 3, density=0.5)])
def test_robust_scaler_attributes(X, with_centering, with_scaling):
# check consistent type of attributes
if with_centering and sparse.issparse(X):
pytest.skip("RobustScaler cannot center sparse matrix")
scaler = RobustScaler(with_centering=with_centering,
with_scaling=with_scaling)
scaler.fit(X)
if with_centering:
assert isinstance(scaler.center_, np.ndarray)
else:
assert scaler.center_ is None
if with_scaling:
assert isinstance(scaler.scale_, np.ndarray)
else:
assert scaler.scale_ is None
def test_robust_scaler_col_zero_sparse():
# check that the scaler is working when there is not data materialized in a
# column of a sparse matrix
X = np.random.randn(10, 5)
X[:, 0] = 0
X = sparse.csr_matrix(X)
scaler = RobustScaler(with_centering=False)
scaler.fit(X)
assert scaler.scale_[0] == pytest.approx(1)
X_trans = scaler.transform(X)
assert_allclose(X[:, 0].toarray(), X_trans[:, 0].toarray())
def test_robust_scaler_2d_arrays():
# Test robust scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
@pytest.mark.parametrize("density", [0, 0.05, 0.1, 0.5, 1])
@pytest.mark.parametrize("strictly_signed",
['positive', 'negative', 'zeros', None])
def test_robust_scaler_equivalence_dense_sparse(density, strictly_signed):
# Check the equivalence of the fitting with dense and sparse matrices
X_sparse = sparse.rand(1000, 5, density=density).tocsc()
if strictly_signed == 'positive':
X_sparse.data = np.abs(X_sparse.data)
elif strictly_signed == 'negative':
X_sparse.data = - np.abs(X_sparse.data)
elif strictly_signed == 'zeros':
X_sparse.data = np.zeros(X_sparse.data.shape, dtype=np.float64)
X_dense = X_sparse.toarray()
scaler_sparse = RobustScaler(with_centering=False)
scaler_dense = RobustScaler(with_centering=False)
scaler_sparse.fit(X_sparse)
scaler_dense.fit(X_dense)
assert_allclose(scaler_sparse.scale_, scaler_dense.scale_)
def test_robust_scaler_transform_one_row_csr():
# Check RobustScaler on transforming csr matrix with one row
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
single_row = np.array([[0.1, 1., 2., 0., -1.]])
scaler = RobustScaler(with_centering=False)
scaler = scaler.fit(X)
row_trans = scaler.transform(sparse.csr_matrix(single_row))
row_expected = single_row / scaler.scale_
assert_array_almost_equal(row_trans.toarray(), row_expected)
row_scaled_back = scaler.inverse_transform(row_trans)
assert_array_almost_equal(single_row, row_scaled_back.toarray())
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_iris_quantiles():
X = iris.data
scaler = RobustScaler(quantile_range=(10, 90))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(10, 90), axis=0)
q_range = q[1] - q[0]
assert_array_almost_equal(q_range, 1)
def test_quantile_transform_iris():
X = iris.data
# uniform output distribution
transformer = QuantileTransformer(n_quantiles=30)
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# normal output distribution
transformer = QuantileTransformer(n_quantiles=30,
output_distribution='normal')
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure it is possible to take the inverse of a sparse matrix
# which contain negative value; this is the case in the iris dataset
X_sparse = sparse.csc_matrix(X)
X_sparse_tran = transformer.fit_transform(X_sparse)
X_sparse_tran_inv = transformer.inverse_transform(X_sparse_tran)
assert_array_almost_equal(X_sparse.A, X_sparse_tran_inv.A)
def test_quantile_transform_check_error():
X = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[2, 4, 0, 0, 6, 8, 0, 10, 0, 0],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
X = sparse.csc_matrix(X)
X_neg = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[-2, 4, 0, 0, 6, 8, 0, 10, 0, 0],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
X_neg = sparse.csc_matrix(X_neg)
assert_raises_regex(ValueError, "Invalid value for 'n_quantiles': 0.",
QuantileTransformer(n_quantiles=0).fit, X)
assert_raises_regex(ValueError, "Invalid value for 'subsample': 0.",
QuantileTransformer(subsample=0).fit, X)
assert_raises_regex(ValueError, "The number of quantiles cannot be"
" greater than the number of samples used. Got"
" 1000 quantiles and 10 samples.",
QuantileTransformer(subsample=10).fit, X)
transformer = QuantileTransformer(n_quantiles=10)
assert_raises_regex(ValueError, "QuantileTransformer only accepts "
"non-negative sparse matrices.",
transformer.fit, X_neg)
transformer.fit(X)
assert_raises_regex(ValueError, "QuantileTransformer only accepts "
"non-negative sparse matrices.",
transformer.transform, X_neg)
X_bad_feat = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
assert_raises_regex(ValueError, "X does not have the same number of "
"features as the previously fitted data. Got 2"
" instead of 3.",
transformer.transform, X_bad_feat)
assert_raises_regex(ValueError, "X does not have the same number of "
"features as the previously fitted data. Got 2"
" instead of 3.",
transformer.inverse_transform, X_bad_feat)
transformer = QuantileTransformer(n_quantiles=10,
output_distribution='rnd')
# check that an error is raised at fit time
assert_raises_regex(ValueError, "'output_distribution' has to be either"
" 'normal' or 'uniform'. Got 'rnd' instead.",
transformer.fit, X)
# check that an error is raised at transform time
transformer.output_distribution = 'uniform'
transformer.fit(X)
X_tran = transformer.transform(X)
transformer.output_distribution = 'rnd'
assert_raises_regex(ValueError, "'output_distribution' has to be either"
" 'normal' or 'uniform'. Got 'rnd' instead.",
transformer.transform, X)
# check that an error is raised at inverse_transform time
assert_raises_regex(ValueError, "'output_distribution' has to be either"
" 'normal' or 'uniform'. Got 'rnd' instead.",
transformer.inverse_transform, X_tran)
# check that an error is raised if input is scalar
assert_raise_message(ValueError,
'Expected 2D array, got scalar array instead',
transformer.transform, 10)
def test_quantile_transform_sparse_ignore_zeros():
X = np.array([[0, 1],
[0, 0],
[0, 2],
[0, 2],
[0, 1]])
X_sparse = sparse.csc_matrix(X)
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5)
# dense case -> warning raise
assert_warns_message(UserWarning, "'ignore_implicit_zeros' takes effect"
" only with sparse matrix. This parameter has no"
" effect.", transformer.fit, X)
X_expected = np.array([[0, 0],
[0, 0],
[0, 1],
[0, 1],
[0, 0]])
X_trans = transformer.fit_transform(X_sparse)
assert_almost_equal(X_expected, X_trans.A)
# consider the case where sparse entries are missing values and user-given
# zeros are to be considered
X_data = np.array([0, 0, 1, 0, 2, 2, 1, 0, 1, 2, 0])
X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1])
X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8])
X_sparse = sparse.csc_matrix((X_data, (X_row, X_col)))
X_trans = transformer.fit_transform(X_sparse)
X_expected = np.array([[0., 0.5],
[0., 0.],
[0., 1.],
[0., 1.],
[0., 0.5],
[0., 0.],
[0., 0.5],
[0., 1.],
[0., 0.]])
assert_almost_equal(X_expected, X_trans.A)
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5)
X_data = np.array([-1, -1, 1, 0, 0, 0, 1, -1, 1])
X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1])
X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6])
X_sparse = sparse.csc_matrix((X_data, (X_row, X_col)))
X_trans = transformer.fit_transform(X_sparse)
X_expected = np.array([[0, 1],
[0, 0.375],
[0, 0.375],
[0, 0.375],
[0, 1],
[0, 0],
[0, 1]])
assert_almost_equal(X_expected, X_trans.A)
assert_almost_equal(X_sparse.A, transformer.inverse_transform(X_trans).A)
# check in conjunction with subsampling
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5,
subsample=8,
random_state=0)
X_trans = transformer.fit_transform(X_sparse)
assert_almost_equal(X_expected, X_trans.A)
assert_almost_equal(X_sparse.A, transformer.inverse_transform(X_trans).A)
def test_quantile_transform_dense_toy():
X = np.array([[0, 2, 2.6],
[25, 4, 4.1],
[50, 6, 2.3],
[75, 8, 9.5],
[100, 10, 0.1]])
transformer = QuantileTransformer(n_quantiles=5)
transformer.fit(X)
# using the a uniform output, each entry of X should be map between 0 and 1
# and equally spaced
X_trans = transformer.fit_transform(X)
X_expected = np.tile(np.linspace(0, 1, num=5), (3, 1)).T
assert_almost_equal(np.sort(X_trans, axis=0), X_expected)
X_test = np.array([
[-1, 1, 0],
[101, 11, 10],
])
X_expected = np.array([
[0, 0, 0],
[1, 1, 1],
])
assert_array_almost_equal(transformer.transform(X_test), X_expected)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
def test_quantile_transform_subsampling():
# Test that subsampling the input yield to a consistent results We check
# that the computed quantiles are almost mapped to a [0, 1] vector where
# values are equally spaced. The infinite norm is checked to be smaller
# than a given threshold. This is repeated 5 times.
# dense support
n_samples = 1000000
n_quantiles = 1000
X = np.sort(np.random.sample((n_samples, 1)), axis=0)
ROUND = 5
inf_norm_arr = []
for random_state in range(ROUND):
transformer = QuantileTransformer(random_state=random_state,
n_quantiles=n_quantiles,
subsample=n_samples // 10)
transformer.fit(X)
diff = (np.linspace(0, 1, n_quantiles) -
np.ravel(transformer.quantiles_))
inf_norm = np.max(np.abs(diff))
assert inf_norm < 1e-2
inf_norm_arr.append(inf_norm)
# each random subsampling yield a unique approximation to the expected
# linspace CDF
assert_equal(len(np.unique(inf_norm_arr)), len(inf_norm_arr))
# sparse support
X = sparse.rand(n_samples, 1, density=.99, format='csc', random_state=0)
inf_norm_arr = []
for random_state in range(ROUND):
transformer = QuantileTransformer(random_state=random_state,
n_quantiles=n_quantiles,
subsample=n_samples // 10)
transformer.fit(X)
diff = (np.linspace(0, 1, n_quantiles) -
np.ravel(transformer.quantiles_))
inf_norm = np.max(np.abs(diff))
assert inf_norm < 1e-1
inf_norm_arr.append(inf_norm)
# each random subsampling yield a unique approximation to the expected
# linspace CDF
assert_equal(len(np.unique(inf_norm_arr)), len(inf_norm_arr))
def test_quantile_transform_sparse_toy():
X = np.array([[0., 2., 0.],
[25., 4., 0.],
[50., 0., 2.6],
[0., 0., 4.1],
[0., 6., 0.],
[0., 8., 0.],
[75., 0., 2.3],
[0., 10., 0.],
[0., 0., 9.5],
[100., 0., 0.1]])
X = sparse.csc_matrix(X)
transformer = QuantileTransformer(n_quantiles=10)
transformer.fit(X)
X_trans = transformer.fit_transform(X)
assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.)
assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_trans_inv.toarray())
transformer_dense = QuantileTransformer(n_quantiles=10).fit(
X.toarray())
X_trans = transformer_dense.transform(X)
assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.)
assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.)
X_trans_inv = transformer_dense.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_trans_inv.toarray())
def test_quantile_transform_axis1():
X = np.array([[0, 25, 50, 75, 100],
[2, 4, 6, 8, 10],
[2.6, 4.1, 2.3, 9.5, 0.1]])
X_trans_a0 = quantile_transform(X.T, axis=0, n_quantiles=5)
X_trans_a1 = quantile_transform(X, axis=1, n_quantiles=5)
assert_array_almost_equal(X_trans_a0, X_trans_a1.T)
def test_quantile_transform_bounds():
# Lower and upper bounds are manually mapped. We checked that in the case
# of a constant feature and binary feature, the bounds are properly mapped.
X_dense = np.array([[0, 0],
[0, 0],
[1, 0]])
X_sparse = sparse.csc_matrix(X_dense)
# check sparse and dense are consistent
X_trans = QuantileTransformer(n_quantiles=3,
random_state=0).fit_transform(X_dense)
assert_array_almost_equal(X_trans, X_dense)
X_trans_sp = QuantileTransformer(n_quantiles=3,
random_state=0).fit_transform(X_sparse)
assert_array_almost_equal(X_trans_sp.A, X_dense)
assert_array_almost_equal(X_trans, X_trans_sp.A)
# check the consistency of the bounds by learning on 1 matrix
# and transforming another
X = np.array([[0, 1],
[0, 0.5],
[1, 0]])
X1 = np.array([[0, 0.1],
[0, 0.5],
[1, 0.1]])
transformer = QuantileTransformer(n_quantiles=3).fit(X)
X_trans = transformer.transform(X1)
assert_array_almost_equal(X_trans, X1)
# check that values outside of the range learned will be mapped properly.
X = np.random.random((1000, 1))
transformer = QuantileTransformer()
transformer.fit(X)
assert_equal(transformer.transform([[-10]]),
transformer.transform([[np.min(X)]]))
assert_equal(transformer.transform([[10]]),
transformer.transform([[np.max(X)]]))
assert_equal(transformer.inverse_transform([[-10]]),
transformer.inverse_transform(
[[np.min(transformer.references_)]]))
assert_equal(transformer.inverse_transform([[10]]),
transformer.inverse_transform(
[[np.max(transformer.references_)]]))
def test_quantile_transform_and_inverse():
# iris dataset
X = iris.data
transformer = QuantileTransformer(n_quantiles=1000, random_state=0)
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
def test_quantile_transform_nan():
X = np.array([[np.nan, 0, 0, 1],
[np.nan, np.nan, 0, 0.5],
[np.nan, 1, 1, 0]])
transformer = QuantileTransformer(n_quantiles=10, random_state=42)
transformer.fit_transform(X)
# check that the quantile of the first column is all NaN
assert np.isnan(transformer.quantiles_[:, 0]).all()
# all other column should not contain NaN
assert not np.isnan(transformer.quantiles_[:, 1:]).any()
def test_robust_scaler_invalid_range():
for range_ in [
(-1, 90),
(-2, -3),
(10, 101),
(100.5, 101),
(90, 50),
]:
scaler = RobustScaler(quantile_range=range_)
assert_raises_regex(ValueError, r'Invalid quantile range: \(',
scaler.fit, iris.data)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert not np.any(np.isnan(X_scaled))
X_csr_scaled = scale(X_csr, with_mean=False)
assert not np.any(np.isnan(X_csr_scaled.data))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert X_scaled is not X
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# null scale
X_csr_scaled = scale(X_csr, with_mean=False, with_std=False, copy=True)
assert_array_almost_equal(X_csr.toarray(), X_csr_scaled.toarray())
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_1d_array():
X = iris.data[:, 1]
X_trans = robust_scale(X)
assert_array_almost_equal(np.median(X_trans), 0)
q = np.percentile(X_trans, q=(25, 75))
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
# Check RobustScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
# Check MaxAbsScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# function interface
X_trans = maxabs_scale(X)
assert_array_almost_equal(X_trans, X_expected)
# sparse data
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_trans_csr = scaler.fit_transform(X_csr)
X_trans_csc = scaler.fit_transform(X_csc)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans_csr.A, X_expected)
assert_array_almost_equal(X_trans_csc.A, X_expected)
X_trans_csr_inv = scaler.inverse_transform(X_trans_csr)
X_trans_csc_inv = scaler.inverse_transform(X_trans_csc)
assert_array_almost_equal(X, X_trans_csr_inv.A)
assert_array_almost_equal(X, X_trans_csc_inv.A)
def test_maxabs_scaler_large_negative_value():
# Check MaxAbsScaler on toy data with a large negative value
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_maxabs_scaler_transform_one_row_csr():
# Check MaxAbsScaler on transforming csr matrix with one row
X = sparse.csr_matrix([[0.5, 1., 1.]])
scaler = MaxAbsScaler()
scaler = scaler.fit(X)
X_trans = scaler.transform(X)
X_expected = sparse.csr_matrix([[1., 1., 1.]])
assert_array_almost_equal(X_trans.toarray(), X_expected.toarray())
X_scaled_back = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_scaled_back.toarray())
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_maxabs_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MaxAbsScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)),
np.ones(n_features))
else:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones((5, 1))
scaler = MaxAbsScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# function interface
X_1d = X_1row.ravel()
max_abs = np.abs(X_1d).max()
assert_array_almost_equal(X_1d / max_abs, maxabs_scale(X_1d, copy=True))
def test_maxabs_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d[:100, :]
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler()
scaler_incr_csr = MaxAbsScaler()
scaler_incr_csc = MaxAbsScaler()
for batch in gen_batches(n, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
X_csr = sparse.csr_matrix(X[batch])
scaler_incr_csr = scaler_incr_csr.partial_fit(X_csr)
X_csc = sparse.csc_matrix(X[batch])
scaler_incr_csc = scaler_incr_csc.partial_fit(X_csc)
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csc.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csc.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csc.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MaxAbsScaler().fit(X[batch0])
scaler_incr = MaxAbsScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std until the end of partial fits, and
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert X_norm is not X
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert X_norm is X
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert X_norm is not X
assert isinstance(X_norm, sparse.csr_matrix)
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert X_norm1 is not X
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert X_norm2 is X
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert X_norm is not X
assert isinstance(X_norm, sparse.csr_matrix)
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert X_norm1 is not X
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert X_norm2 is X
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert X_norm is not X
assert isinstance(X_norm, sparse.csr_matrix)
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
rs = np.random.RandomState(0)
X_dense = rs.randn(10, 5)
X_sparse = sparse.csr_matrix(X_dense)
ones = np.ones((10))
for X in (X_dense, X_sparse):
for dtype in (np.float32, np.float64):
for norm in ('l1', 'l2'):
X = X.astype(dtype)
X_norm = normalize(X, norm=norm)
assert_equal(X_norm.dtype, dtype)
X_norm = toarray(X_norm)
if norm == 'l1':
row_sums = np.abs(X_norm).sum(axis=1)
else:
X_norm_squared = X_norm**2
row_sums = X_norm_squared.sum(axis=1)
assert_array_almost_equal(row_sums, ones)
# Test return_norm
X_dense = np.array([[3.0, 0, 4.0], [1.0, 0.0, 0.0], [2.0, 3.0, 0.0]])
for norm in ('l1', 'l2', 'max'):
_, norms = normalize(X_dense, norm=norm, return_norm=True)
if norm == 'l1':
assert_array_almost_equal(norms, np.array([7.0, 1.0, 5.0]))
elif norm == 'l2':
assert_array_almost_equal(norms, np.array([5.0, 1.0, 3.60555127]))
else:
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
X_sparse = sparse.csr_matrix(X_dense)
for norm in ('l1', 'l2'):
assert_raises(NotImplementedError, normalize, X_sparse,
norm=norm, return_norm=True)
_, norms = normalize(X_sparse, norm='max', return_norm=True)
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert X_bin is not X
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert X_bin is not X
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert X_bin is X
binarizer = Binarizer(copy=False)
X_float = np.array([[1, 0, 5], [2, 3, -1]], dtype=np.float64)
X_bin = binarizer.transform(X_float)
if init is not list:
assert X_bin is X_float
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_cv_pipeline_precomputed():
# Cross-validate a regression on four coplanar points with the same
# value. Use precomputed kernel to ensure Pipeline with KernelCenterer
# is treated as a _pairwise operation.
X = np.array([[3, 0, 0], [0, 3, 0], [0, 0, 3], [1, 1, 1]])
y_true = np.ones((4,))
K = X.dot(X.T)
kcent = KernelCenterer()
pipeline = Pipeline([("kernel_centerer", kcent), ("svr",
SVR(gamma='scale'))])
# did the pipeline set the _pairwise attribute?
assert pipeline._pairwise
# test cross-validation, score should be almost perfect
# NB: this test is pretty vacuous -- it's mainly to test integration
# of Pipeline and KernelCenterer
y_pred = cross_val_predict(pipeline, K, y_true, cv=2)
assert_array_almost_equal(y_true, y_pred)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert sparse.isspmatrix_coo(X), X
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert sparse.isspmatrix_csc(X), X
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert sparse.isspmatrix_csr(X), X
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_fit_cold_start():
X = iris.data
X_2d = X[:, :2]
# Scalers that have a partial_fit method
scalers = [StandardScaler(with_mean=False, with_std=False),
MinMaxScaler(),
MaxAbsScaler()]
for scaler in scalers:
scaler.fit_transform(X)
# with a different shape, this may break the scaler unless the internal
# state is reset
scaler.fit_transform(X_2d)
def test_quantile_transform_valid_axis():
X = np.array([[0, 25, 50, 75, 100],
[2, 4, 6, 8, 10],
[2.6, 4.1, 2.3, 9.5, 0.1]])
assert_raises_regex(ValueError, "axis should be either equal to 0 or 1"
". Got axis=2", quantile_transform, X.T, axis=2)
@pytest.mark.parametrize("method", ['box-cox', 'yeo-johnson'])
def test_power_transformer_notfitted(method):
pt = PowerTransformer(method=method)
X = np.abs(X_1col)
assert_raises(NotFittedError, pt.transform, X)
assert_raises(NotFittedError, pt.inverse_transform, X)
@pytest.mark.parametrize('method', ['box-cox', 'yeo-johnson'])
@pytest.mark.parametrize('standardize', [True, False])
@pytest.mark.parametrize('X', [X_1col, X_2d])
def test_power_transformer_inverse(method, standardize, X):
# Make sure we get the original input when applying transform and then
# inverse transform
X = np.abs(X) if method == 'box-cox' else X
pt = PowerTransformer(method=method, standardize=standardize)
X_trans = pt.fit_transform(X)
assert_almost_equal(X, pt.inverse_transform(X_trans))
def test_power_transformer_1d():
X = np.abs(X_1col)
for standardize in [True, False]:
pt = PowerTransformer(method='box-cox', standardize=standardize)
X_trans = pt.fit_transform(X)
X_trans_func = power_transform(
X, method='box-cox',
standardize=standardize
)
X_expected, lambda_expected = stats.boxcox(X.flatten())
if standardize:
X_expected = scale(X_expected)
assert_almost_equal(X_expected.reshape(-1, 1), X_trans)
assert_almost_equal(X_expected.reshape(-1, 1), X_trans_func)
assert_almost_equal(X, pt.inverse_transform(X_trans))
assert_almost_equal(lambda_expected, pt.lambdas_[0])
assert len(pt.lambdas_) == X.shape[1]
assert isinstance(pt.lambdas_, np.ndarray)
def test_power_transformer_2d():
X = np.abs(X_2d)
for standardize in [True, False]:
pt = PowerTransformer(method='box-cox', standardize=standardize)
X_trans_class = pt.fit_transform(X)
X_trans_func = power_transform(
X, method='box-cox',
standardize=standardize
)
for X_trans in [X_trans_class, X_trans_func]:
for j in range(X_trans.shape[1]):
X_expected, lmbda = stats.boxcox(X[:, j].flatten())
if standardize:
X_expected = scale(X_expected)
assert_almost_equal(X_trans[:, j], X_expected)
assert_almost_equal(lmbda, pt.lambdas_[j])
# Test inverse transformation
X_inv = pt.inverse_transform(X_trans)
assert_array_almost_equal(X_inv, X)
assert len(pt.lambdas_) == X.shape[1]
assert isinstance(pt.lambdas_, np.ndarray)
def test_power_transformer_boxcox_strictly_positive_exception():
# Exceptions should be raised for negative arrays and zero arrays when
# method is boxcox
pt = PowerTransformer(method='box-cox')
pt.fit(np.abs(X_2d))
X_with_negatives = X_2d
not_positive_message = 'strictly positive'
assert_raise_message(ValueError, not_positive_message,
pt.transform, X_with_negatives)
assert_raise_message(ValueError, not_positive_message,
pt.fit, X_with_negatives)
assert_raise_message(ValueError, not_positive_message,
power_transform, X_with_negatives, 'box-cox')
assert_raise_message(ValueError, not_positive_message,
pt.transform, np.zeros(X_2d.shape))
assert_raise_message(ValueError, not_positive_message,
pt.fit, np.zeros(X_2d.shape))
assert_raise_message(ValueError, not_positive_message,
power_transform, np.zeros(X_2d.shape), 'box-cox')
@pytest.mark.parametrize('X', [X_2d, np.abs(X_2d), -np.abs(X_2d),
np.zeros(X_2d.shape)])
def test_power_transformer_yeojohnson_any_input(X):
# Yeo-Johnson method should support any kind of input
power_transform(X, method='yeo-johnson')
@pytest.mark.parametrize("method", ['box-cox', 'yeo-johnson'])
def test_power_transformer_shape_exception(method):
pt = PowerTransformer(method=method)
X = np.abs(X_2d)
pt.fit(X)
# Exceptions should be raised for arrays with different num_columns
# than during fitting
wrong_shape_message = 'Input data has a different number of features'
assert_raise_message(ValueError, wrong_shape_message,
pt.transform, X[:, 0:1])
assert_raise_message(ValueError, wrong_shape_message,
pt.inverse_transform, X[:, 0:1])
def test_power_transformer_method_exception():
pt = PowerTransformer(method='monty-python')
X = np.abs(X_2d)
# An exception should be raised if PowerTransformer.method isn't valid
bad_method_message = "'method' must be one of"
assert_raise_message(ValueError, bad_method_message,
pt.fit, X)
def test_power_transformer_lambda_zero():
pt = PowerTransformer(method='box-cox', standardize=False)
X = np.abs(X_2d)[:, 0:1]
# Test the lambda = 0 case
pt.lambdas_ = np.array([0])
X_trans = pt.transform(X)
assert_array_almost_equal(pt.inverse_transform(X_trans), X)
def test_power_transformer_lambda_one():
# Make sure lambda = 1 corresponds to the identity for yeo-johnson
pt = PowerTransformer(method='yeo-johnson', standardize=False)
X = np.abs(X_2d)[:, 0:1]
pt.lambdas_ = np.array([1])
X_trans = pt.transform(X)
assert_array_almost_equal(X_trans, X)
@pytest.mark.parametrize("method, lmbda", [('box-cox', .1),
('box-cox', .5),
('yeo-johnson', .1),
('yeo-johnson', .5),
('yeo-johnson', 1.),
])
def test_optimization_power_transformer(method, lmbda):
# Test the optimization procedure:
# - set a predefined value for lambda
# - apply inverse_transform to a normal dist (we get X_inv)
# - apply fit_transform to X_inv (we get X_inv_trans)
# - check that X_inv_trans is roughly equal to X
rng = np.random.RandomState(0)
n_samples = 20000
X = rng.normal(loc=0, scale=1, size=(n_samples, 1))
pt = PowerTransformer(method=method, standardize=False)
pt.lambdas_ = [lmbda]
X_inv = pt.inverse_transform(X)
pt = PowerTransformer(method=method, standardize=False)
X_inv_trans = pt.fit_transform(X_inv)
assert_almost_equal(0, np.linalg.norm(X - X_inv_trans) / n_samples,
decimal=2)
assert_almost_equal(0, X_inv_trans.mean(), decimal=1)
assert_almost_equal(1, X_inv_trans.std(), decimal=1)
def test_yeo_johnson_darwin_example():
# test from original paper "A new family of power transformations to
# improve normality or symmetry" by Yeo and Johnson.
X = [6.1, -8.4, 1.0, 2.0, 0.7, 2.9, 3.5, 5.1, 1.8, 3.6, 7.0, 3.0, 9.3,
7.5, -6.0]
X = np.array(X).reshape(-1, 1)
lmbda = PowerTransformer(method='yeo-johnson').fit(X).lambdas_
assert np.allclose(lmbda, 1.305, atol=1e-3)
@pytest.mark.parametrize('method', ['box-cox', 'yeo-johnson'])
def test_power_transformer_nans(method):
# Make sure lambda estimation is not influenced by NaN values
# and that transform() supports NaN silently
X = np.abs(X_1col)
pt = PowerTransformer(method=method)
pt.fit(X)
lmbda_no_nans = pt.lambdas_[0]
# concat nans at the end and check lambda stays the same
X = np.concatenate([X, np.full_like(X, np.nan)])
X = shuffle(X, random_state=0)
pt.fit(X)
lmbda_nans = pt.lambdas_[0]
assert_almost_equal(lmbda_no_nans, lmbda_nans, decimal=5)
X_trans = pt.transform(X)
assert_array_equal(np.isnan(X_trans), np.isnan(X))
@pytest.mark.parametrize('method', ['box-cox', 'yeo-johnson'])
@pytest.mark.parametrize('standardize', [True, False])
def test_power_transformer_fit_transform(method, standardize):
# check that fit_transform() and fit().transform() return the same values
X = X_1col
if method == 'box-cox':
X = np.abs(X)
pt = PowerTransformer(method, standardize)
assert_array_almost_equal(pt.fit(X).transform(X), pt.fit_transform(X))
@pytest.mark.parametrize('method', ['box-cox', 'yeo-johnson'])
@pytest.mark.parametrize('standardize', [True, False])
def test_power_transformer_copy_True(method, standardize):
# Check that neither fit, transform, fit_transform nor inverse_transform
# modify X inplace when copy=True
X = X_1col
if method == 'box-cox':
X = np.abs(X)
X_original = X.copy()
assert X is not X_original # sanity checks
assert_array_almost_equal(X, X_original)
pt = PowerTransformer(method, standardize, copy=True)
pt.fit(X)
assert_array_almost_equal(X, X_original)
X_trans = pt.transform(X)
assert X_trans is not X
X_trans = pt.fit_transform(X)
assert_array_almost_equal(X, X_original)
assert X_trans is not X
X_inv_trans = pt.inverse_transform(X_trans)
assert X_trans is not X_inv_trans
@pytest.mark.parametrize('method', ['box-cox', 'yeo-johnson'])
@pytest.mark.parametrize('standardize', [True, False])
def test_power_transformer_copy_False(method, standardize):
# check that when copy=False fit doesn't change X inplace but transform,
# fit_transform and inverse_transform do.
X = X_1col
if method == 'box-cox':
X = np.abs(X)
X_original = X.copy()
assert X is not X_original # sanity checks
assert_array_almost_equal(X, X_original)
pt = PowerTransformer(method, standardize, copy=False)
pt.fit(X)
assert_array_almost_equal(X, X_original) # fit didn't change X
X_trans = pt.transform(X)
assert X_trans is X
if method == 'box-cox':
X = np.abs(X)
X_trans = pt.fit_transform(X)
assert X_trans is X
X_inv_trans = pt.inverse_transform(X_trans)
assert X_trans is X_inv_trans
def test_power_transform_default_method():
X = np.abs(X_2d)
future_warning_message = (
"The default value of 'method' "
"will change from 'box-cox'"
)
assert_warns_message(FutureWarning, future_warning_message,
power_transform, X)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
X_trans_default = power_transform(X)
X_trans_boxcox = power_transform(X, method='box-cox')
assert_array_equal(X_trans_boxcox, X_trans_default)
|
py
|
1a5ad25f56d5aa9057e676d93118692145e1f951
|
from django.urls import path
from .views import account_session, get_csrf, register, account_login, account_logout
urlpatterns = [
path('get_session/', account_session),
path('get_csrf/', get_csrf),
path('register/', register, name='account_register'),
path('login/', account_login, name="account_login"),
path('logout/', account_logout),
]
|
py
|
1a5ad271298c3d6b4560839ae6a249f32410c422
|
import math
from typing import Tuple
import torch
import torch.nn as nn
from cached_property import cached_property
from torch.nn.modules.transformer import (
TransformerDecoder,
TransformerDecoderLayer,
TransformerEncoder,
TransformerEncoderLayer,
)
from kobe.data.dataset import Batched, EncodedBatch
from kobe.data.vocab import BOS_ID, EOS_ID, PAD_ID
from kobe.utils import helpers
class PositionalEncoding(nn.Module):
def __init__(self, dropout, dim, max_len=5000):
"""
initialization of required variables and functions
:param dropout: dropout probability
:param dim: hidden size
:param max_len: maximum length
"""
super(PositionalEncoding, self).__init__()
# positional encoding initialization
pe = torch.zeros(max_len, dim)
position = torch.arange(0, max_len).unsqueeze(1)
# term to divide
div_term = torch.exp(
(torch.arange(0, dim, 2, dtype=torch.float) * -(math.log(10000.0) / dim))
)
# sinusoidal positional encoding
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
pe = pe.unsqueeze(1)
self.register_buffer("pe", pe)
self.dropout = nn.Dropout(p=dropout)
self.dim = dim
def forward(self, emb):
"""
create positional encoding
:param emb: word embedding
:param step: step for decoding in inference
:return: positional encoding representation
"""
emb *= math.sqrt(self.dim)
emb = emb + self.pe[: emb.size(0)] # [len, batch, size]
emb = self.dropout(emb)
return emb
class Encoder(nn.Module):
@staticmethod
def from_args(args) -> "Encoder":
return Encoder(
args.text_vocab_size + args.cond_vocab_size,
args.max_seq_len,
args.d_model,
args.nhead,
args.num_encoder_layers,
args.dropout,
args.mode,
)
def __init__(
self,
vocab_size: int,
max_seq_len: int,
d_model: int,
nhead: int,
num_layers: int,
dropout: float,
mode: str,
):
super().__init__()
self.d_model = d_model
self.max_seq_len = max_seq_len
self.input_embedding = nn.Embedding(vocab_size, d_model)
self.pos_encoder = PositionalEncoding(dropout, d_model)
encoder_layer = TransformerEncoderLayer(
d_model, nhead, d_model * 4, dropout, norm_first=True
)
self.encoder = TransformerEncoder(
encoder_layer, num_layers, nn.LayerNorm(d_model)
)
self.mode = mode
@cached_property
def device(self):
return list(self.parameters())[0].device
def forward(self, batched: Batched) -> EncodedBatch:
src, src_key_padding_mask = Encoder._get_input(batched, self.mode)
src = self.input_embedding(src)
src = self.pos_encoder(src)
token_encodings = self.encoder.forward(
src=src, src_key_padding_mask=src_key_padding_mask
)
return EncodedBatch(
context_encodings=token_encodings,
context_encodings_mask=src_key_padding_mask,
)
@staticmethod
def _get_input(batched: Batched, mode: str) -> Tuple[torch.Tensor, torch.Tensor]:
return {
helpers.BASELINE: (batched.title_token_ids, batched.title_token_ids_mask),
helpers.KOBE_ATTRIBUTE: (
batched.cond_title_token_ids,
batched.cond_title_token_ids_mask,
),
helpers.KOBE_KNOWLEDGE: (
batched.title_fact_token_ids,
batched.title_fact_token_ids_mask,
),
helpers.KOBE_FULL: (
batched.cond_title_fact_token_ids,
batched.cond_title_fact_token_ids_mask,
),
}[mode]
class Decoder(nn.Module):
@staticmethod
def from_args(args) -> "Decoder":
return Decoder(
args.text_vocab_size,
args.max_seq_len,
args.d_model,
args.nhead,
args.num_encoder_layers,
args.dropout,
)
def __init__(
self,
vocab_size: int,
max_seq_len: int,
d_model: int,
nhead: int,
num_layers: int,
dropout: float,
):
super(Decoder, self).__init__()
self.max_seq_len = max_seq_len
self.embedding = nn.Embedding(vocab_size, d_model)
self.pos_encoder = PositionalEncoding(dropout, d_model)
decoder_layer = TransformerDecoderLayer(
d_model, nhead, 4 * d_model, dropout, norm_first=True
)
self.decoder = TransformerDecoder(
decoder_layer, num_layers, nn.LayerNorm(d_model)
)
self.output = nn.Linear(d_model, vocab_size)
def forward(self, batch: Batched, encoded_batch: EncodedBatch) -> torch.Tensor:
tgt = self.embedding(batch.description_token_ids[:-1])
tgt = self.pos_encoder(tgt)
tgt_mask = Decoder.generate_square_subsequent_mask(tgt.shape[0], tgt.device)
outputs = self.decoder(
tgt=tgt,
tgt_mask=tgt_mask,
tgt_key_padding_mask=batch.description_token_ids_mask[:, :-1],
memory=encoded_batch.context_encodings,
memory_key_padding_mask=encoded_batch.context_encodings_mask,
)
return self.output(outputs)
def predict(self, encoded_batch: EncodedBatch, decoding_strategy: str):
batch_size = encoded_batch.context_encodings.shape[1]
tgt = torch.tensor(
[BOS_ID] * batch_size, device=encoded_batch.context_encodings.device
).unsqueeze(dim=0)
tgt_mask = Decoder.generate_square_subsequent_mask(self.max_seq_len, tgt.device)
pred_all = []
for idx in range(self.max_seq_len):
tgt_emb = self.pos_encoder(self.embedding(tgt))
outputs = self.decoder(
tgt_emb,
tgt_mask=tgt_mask[: idx + 1, : idx + 1],
memory=encoded_batch.context_encodings,
memory_key_padding_mask=encoded_batch.context_encodings_mask,
)
logits = self.output(outputs[-1])
if decoding_strategy == "greedy":
pred_step = logits.argmax(dim=1).tolist()
elif decoding_strategy == "nucleus":
pred_step = [
helpers.top_k_top_p_sampling(logits[i], top_p=0.95)
for i in range(batch_size)
]
else:
raise NotImplementedError
for b in range(batch_size):
if pred_all and pred_all[-1][b].item() in [EOS_ID, PAD_ID]:
pred_step[b] = PAD_ID
if all([pred == PAD_ID for pred in pred_step]):
break
pred_step = torch.tensor(pred_step, device=tgt.device)
pred_all.append(pred_step)
if idx < self.max_seq_len - 1:
tgt_step = pred_step.unsqueeze(dim=0)
tgt = torch.cat([tgt, tgt_step], dim=0)
preds = torch.stack(pred_all)
return preds
@staticmethod
def generate_square_subsequent_mask(sz: int, device: torch.device) -> torch.Tensor:
r"""
Generate a square mask for the sequence. The masked positions are filled with
float('-inf').
Unmasked positions are filled with float(0.0).
"""
return torch.triu(
torch.full((sz, sz), float("-inf"), device=device), diagonal=1
)
|
py
|
1a5ad4854df357452ca217d887e17296cd753374
|
"""
Generic product and Venus specific objects definitions
"""
__author__ = "jerome.colin'at'cesbio.cnes.fr"
__license__ = "MIT"
__version__ = "1.0.3"
import zipfile
import sys
import os
import subprocess, shlex
import osgeo.gdal as gdal
import glob
import numpy as np
try:
import utilities
except ModuleNotFoundError:
this_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(this_dir)
import utilities
class Product:
def __init__(self, path, logger, sensor="venus"):
"""
Create a product object
:param path: product path or product file name if ptype is ZIP
:param logger: logger instance
:param ptype: defaults to "ZIP"
"""
self.path = path
self.logger = logger
self.sre_scalef = 1.
# Consistency check
# TODO: move this test to Collection and use subclasses
try:
if zipfile.is_zipfile(self.path):
logger.info("ZIP file found")
self.ptype = "ZIP"
elif os.path.isdir(path):
logger.info("Directory based product found")
self.ptype = "DIR"
elif path[-3:] == "hdf" or path[-3:] == "HDF":
logger.info("HDF file found")
self.ptype = "HDF"
else:
if os.path.isfile(self.path) == False:
logger.error("Unknown product or file not found: %s" % self.path)
sys.exit(2)
except FileNotFoundError as err:
logger.error(err)
sys.exit(1)
self.get_content_list()
def find_band(self, band):
fband_name = [b for b in self.content_list if band in b]
if len(fband_name) == 1:
self.logger.info("Found file %s for band name %s" % (fband_name[0], band))
return fband_name[0]
else:
self.logger.error("%i match found for band name %s in content list: %s" % (len(fband_name), band, self.content_list))
sys.exit(2)
def get_band(self, band, scalef=None, layer=None, tiny=False):
"""
Return a gdal object from an image in a zip archive
:param fband:
:return: gdal object
"""
self.logger.debug('Gdal.Open using %s' % (band))
if self.ptype == "DIR":
if scalef is not None:
band_arr = gdal.Open('%s' % (band)).ReadAsArray() / scalef
else:
band_arr = gdal.Open('%s' % (band)).ReadAsArray()
if self.ptype == "ZIP":
if scalef is not None:
band_arr = gdal.Open('/vsizip/%s/%s' % (self.path, band)).ReadAsArray() / scalef
else:
band_arr = gdal.Open('/vsizip/%s/%s' % (self.path, band)).ReadAsArray()
if tiny:
band_arr = (band_arr*10000).astype(np.uint16)
if layer is not None:
return band_arr[layer,:,:]
else:
return band_arr
def get_band_subset(self, band, roi=None, ulx=None, uly=None, lrx=None, lry=None, scalef=None, layer=None):
"""Extract a subset from an image file
:param band: product image filename from content_list
:param ulx: upper left x
:param uly: upper left y
:param lrx: lower right x
:param lry: lower right y
:return: a Gdal object
"""
if roi is not None:
ulx = roi.ulx
uly = roi.uly
lrx = roi.lrx
lry = roi.lry
else:
ulx = ulx
uly = uly
lrx = lrx
lry = lry
try:
if self.ptype == "ZIP":
translate = 'gdal_translate -projwin %s %s %s %s /vsizip/%s/%s %s' % (
ulx, uly, lrx, lry, self.path, band, ".tmp.tif")
else:
translate = 'gdal_translate -projwin %s %s %s %s %s %s' % (
ulx, uly, lrx, lry, band, ".tmp.tif")
self.logger.debug(translate)
args = shlex.split(translate)
prog = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = prog.communicate()
self.logger.debug('Gdal_translate stdout: %s' % out)
self.logger.debug('Gdal_translate stderr: %s' % err)
img = gdal.Open(".tmp.tif")
os.remove(".tmp.tif")
except RuntimeError as err:
self.logger.error('ERROR: Unable to open ' + band)
self.logger.error(err)
sys.exit(1)
if scalef is not None:
band_arr = img.ReadAsArray() / scalef
else:
band_arr = img.ReadAsArray()
if layer is not None:
return band_arr[layer,:,:]
else:
return band_arr
def get_content_list(self):
self.content_list = glob.glob(self.path + '/*')
def get_mask(self, clm, edg, stats=False, use_nodata=False):
"""
Return a 'validity mask' such that valid pixel is 1, non-valid pixel is 0
:param clm: cloud mask numpy array
:param edg: edge mask numpy array
:param stats: return a fraction of valid pixels in percent
:param use_nodata: if True, NaN in masks are used instead of 0
:return: an array with 'valid' = 1, 'non-valid' = 0
"""
# TODO: consider revising (numpy.ma ?)
clm = clm
edg = edg
dummy = np.zeros_like(clm) + 1
self.logger.debug("Dummy size=%i, sum=%i" % (np.size(dummy), np.sum(dummy)))
if use_nodata:
dummy[~np.isnan(clm)] = 0
dummy[~np.isnan(edg)] = 0
else:
dummy[np.nonzero(clm)] = 0
dummy[np.nonzero(edg)] = 0
validity_ratio = np.nansum(dummy) / np.size(clm) * 100
self.logger.debug("Product.get_mask: NaN in clm=%i, NaN in edg=%i, Non-zero in mask=%i, result=%i, ratio=%4.2f%%" %
(utilities.count_nan(clm), utilities.count_nan(edg), np.count_nonzero(dummy), np.nansum(dummy),
validity_ratio))
if stats:
return dummy, validity_ratio
else:
return dummy
class Product_dir_maja(Product):
"""
Sub-class of Product for Maja specific methods
TODO: Add R1 & R2 to band_names
TODO: Preload CLM_R1, CLM_R2, EDG_R1 and EDG_R2 to create a boolean MASK_R1 and MASK_R2 to use in get_band to return a numpy.ma
"""
def __init__(self, path, logger):
super().__init__(path, logger)
self.band_names = ["SRE_B1.",
"SRE_B2.",
"SRE_B3.",
"SRE_B4.",
"SRE_B5.",
"SRE_B6.",
"SRE_B7.",
"SRE_B8.",
"SRE_B8A."
"SRE_B9.",
"SRE_B10.",
"SRE_B11.",
"SRE_B12.", ]
self.sre_scalef = 10000
self.aot_scalef = 200
self.vap_scalef = 20
self.clm_name = "CLM_R1"
self.edg_name = "EDG_R1"
class Product_hdf(Product):
"""
Sub-class of Product for HDF specific methods
"""
def __init__(self, path, logger):
super().__init__(path, logger)
def find_band(self, band):
"""
Overriding mother class method
:param band:
:return:
"""
is_unique = 0
subds_id = -1
for b in range(len(self.content_list)):
subds_name = self.content_list[b][1]
if subds_name.find(band) != -1:
subds_id = b
is_unique += 1
self.logger.info("Found %s in subdataset %s" % (band, subds_name))
if is_unique == 0:
self.logger.error("No subdataset found for band name %s in %s" % (band, self.path))
if is_unique > 1:
self.logger.error("Many subdataset found for band name %s in %s" % (band, self.path))
if is_unique == 1:
return subds_id
def get_band(self, fband, scalef=None, tiny=False):
"""
Overriding mother class method
:param fband:
:return:
"""
if scalef is not None:
if tiny:
return (gdal.Open(self.content_list[fband][0], gdal.GA_ReadOnly).ReadAsArray() / scalef * 10000).astype(np.uint16)
else:
return gdal.Open(self.content_list[fband][0], gdal.GA_ReadOnly).ReadAsArray() / scalef
else:
return gdal.Open(self.content_list[fband][0], gdal.GA_ReadOnly).ReadAsArray().astype(np.int16)
def get_band_subset(self):
self.logger.warning("Product_hdf.get_band_subset not yet implemented !")
def get_content_list(self):
hdf_ds = gdal.Open(self.path, gdal.GA_ReadOnly)
self.content_list = hdf_ds.GetSubDatasets()
class Product_hdf_acix(Product_hdf):
"""
Subclass of Product_hdf for ACIX reference products
"""
def __init__(self, path, logger):
super().__init__(path, logger)
self.sre_scalef = 10000
class Product_zip(Product):
"""
Product subclass for zip
"""
def __init__(self, path, logger):
super().__init__(path, logger)
def get_content_list(self):
"""
:return: a list of files within a zip
"""
self.name = self.path.split('/')[-1]
with zipfile.ZipFile(self.path, 'r') as zip:
self.content_list = zip.namelist()
self.logger.info("Looking into ZIP file content")
for element in self.content_list:
self.logger.debug(element)
class Product_zip_venus(Product_zip):
"""
Sub-class of Product_zip for Venus specific methods
"""
def __init__(self, path, logger):
super().__init__(path, logger)
self.band_names = ["SRE_B1.",
"SRE_B2.",
"SRE_B3.",
"SRE_B4.",
"SRE_B5.",
"SRE_B6.",
"SRE_B7.",
"SRE_B8.",
"SRE_B9.",
"SRE_B10.",
"SRE_B11.",
"SRE_B12.", ]
self.sre_scalef = 1000
self.aot_scalef = 200
self.vap_scalef = 20
self.aot_name = "ATB_XS"
self.vap_name = "ATB_XS"
self.aot_layer = 1
self.vap_layer = 0
self.clm_name = "CLM_XS"
self.edg_name = "EDG_XS"
|
py
|
1a5ad493db3135ef9a2fac0e5e1234676e5afdb6
|
# -*- coding: future_fstrings -*-
class GroupUser:
group_user_access_right_key = 'groupUserAccessRight'
email_address_key = 'emailAddress'
display_name_key = 'displayName'
identifier_key = 'identifier'
principal_type_key = 'principalType'
def __init__(
self,
group_user_access_right,
email_address="",
display_name="",
identifier="",
principal_type=None
):
"""Constructs a GroupUser object
:param group_user_access_right: Enum GroupUserAccessRight - The access right to assign to the GroupUser
:param email_address: str - E-mail address of the user if principal type is user
:param display_name: str - Display name of the principal
:param identifier: str - Identifier of the principal
:param principal_type: Enum PrincipalType - The principal type
"""
self.group_user_access_right = group_user_access_right
self.email_address = email_address
self.display_name = display_name
self.identifier = identifier
self.principal_type = principal_type
def as_set_values_dict(self):
"""Convert GroupUser object to dict with only values that are actually set. This dict can be used for
groups.add_group_user requests.
:return: Dict with object attributes in camelCase as keys, and attribute values as values.
"""
group_user_dict = dict()
if self.group_user_access_right:
group_user_dict[self.group_user_access_right_key] = self.group_user_access_right.value
if self.email_address:
group_user_dict[self.email_address_key] = self.email_address
if self.display_name:
group_user_dict[self.display_name_key] = self.display_name
if self.identifier:
group_user_dict[self.identifier_key] = self.identifier
if self.principal_type:
group_user_dict[self.principal_type_key] = self.principal_type.value
return group_user_dict
|
py
|
1a5ad4b101d5677b1b6545733d5bdc6364f130d9
|
# -*- coding: ascii -*-
"""
app.utils
~~~~~~~~~
Utils. for the application.
"""
import re
import unicodedata
from functools import partial
from Levenshtein import distance
__all__ = [
'parse_db_uri',
'parse_citations',
'parse_doi',
'normalize',
'doi_normalize',
'matching'
]
# Find citations from text
find_citations = [
# APA style
re.compile(
(
r'((?#authors)[\w-]{2,}(?: *,(?: *[A-Z]\.)+|(?: +[\w-]+)+)?'
r'(?: *,(?: *(?:&|\.{3}))? *[\w-]{2,}(?: *,(?: *[A-Z]\.)+|(?: +[\w-]+)+)?)*(?:(?<=\.)|(?<!\.) *\.)'
r'(?#date) *\( *\d{4}(?: *, *\w+(?: +\d+(?: *- *\d+)?)?)? *\) *\.'
r'(?#title)[^\n]+(?:(?<=\.)|(?<!\.)\.)'
r'(?#journal|location)(?<=\.)(?:[^\n]+?(?=, *\d+ *\([\w-]+\)|, *\w+(?:-\w+)? *\.|\.)'
r'(?#journal:volume)(?:, *\d+ *\([\w-]+\))?(?#journal:pages)(?:, *\w+(?:-\w+)?)? *\.)?'
r'(?#doi)(?: *(?:doi: *|http://dx\.doi\.org/)[^\s]+)?)'
),
flags=re.IGNORECASE + re.DOTALL
).findall,
# AMA style
re.compile(
(
r'(?:\n|^)'
r'((?#authors)(?:[\w-]{2,}(?: +[A-Z]+)?(?: *, *[\w-]{2,}(?: +[A-Z]+)?)* *\.)?'
r'(?#title) *\w{2}[^\n;.]+\.(?#title:journal|conference) *\w{2}[^\n;.]+'
r'(?:(?#journal)\.(?#date) *(?:[a-z]{3}(?: +\d{1,2})? *, *)?\d{4}'
r'(?#volume)(?: *;(?: *\d+)?(?: *\( *[\w-]+ *\))?)?'
r'(?#page)(?: *: *\w+(?: *- *\w+)?)?|(?#conference)'
r'(?#date); *(?:[a-z]{3}(?: +\d+(?: *- *(?:\d+|[a-z]{3} +\d+))?)? *, *)?\d{4}'
r'(?#location)(?: *; *\w{2}[^\n;.]+)?) *\.'
r'(?#doi)(?: *(?:doi: *|http://dx\.doi\.org/)[^\s]+)?)'
),
flags=re.IGNORECASE + re.DOTALL
).findall
]
# Parse DOI in citation
parse_doi = re.compile(
r'(?:doi: *|http://dx\.doi\.org/)([^\s]+)',
flags=re.IGNORECASE
).findall
def parse_citations(text):
"""Parse text into list of citations"""
ret = []
for finder in find_citations:
ret.extend(finder(text))
return ret
def parse_db_uri(conf):
"""
Parse input database config into database URI format
:param conf: input database config
:type conf: dict
:return: string of database config in URI format
:rtype: str
"""
# Input config must be a dict
assert isinstance(conf, dict)
# Key 'dbname' is required in config
if 'dbname' not in conf:
raise ValueError('No database specified')
# Read and parse config
dbname = str(conf['dbname'])
host = str(conf.get('host', '127.0.0.1') or '127.0.0.1')
port = str(conf.get('port', ''))
user = str(conf.get('user', ''))
passwd = str(conf.get('passwd', ''))
driver = str(conf.get('driver', 'postgresql')).lower() or 'postgresql'
if user and passwd:
user = '%s:%s@' % (user, passwd)
elif user:
user = '%s@' % user
elif passwd:
raise ValueError('No user with that password')
if port:
if not port.isdigit():
raise ValueError('Database port must be a number')
host = '%s:%s' % (host, port)
# Return parsed config in URI format
return '{}://{}{}/{}'.format(driver, user, host, dbname)
def normalize(text, case=True, spaces=True, unicode=True):
"""
Normalize text
:param text: input text
:type text: str
:param case: normalize to lower case, default is True
:type case: bool
:param spaces: normalize spaces, default is True
:type spaces: bool
:param unicode: convert unicode characters to ascii, default is True
:type unicode: bool
:return: normalized text
:rtype: str
"""
# Normalize unicode
if unicode:
text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode()
# Normalize case
if case:
text = text.lower()
# Normalize spaces
if spaces:
text = ' '.join(text.split())
# Return normalized text
return text
# Normalize DOI
doi_normalize = partial(normalize, case=True, spaces=False, unicode=False)
def mark_exact(citation):
"""Highlight exact matches"""
return '<mark class="exact-match">%s</mark>' % citation
def mark_approx(citation):
"""Highlight approximate matches"""
return '<mark class="approx-match">%s</mark>' % citation
def doi_matched(citation, dois):
"""
Parse DOI value from the input citation, check if the DOI value exists in the list of DOIs
:param citation: input citation
:type citation: str
:param dois: input list of DOIs
:type dois: set or list or tuple
:return: True if it exists, else False
:rtype: bool
"""
# Parse DOI in citation
doi = parse_doi(citation)
# DOI found
if doi:
return doi_normalize(doi[0]) in dois
# DOI not found
return False
def ld_matched(citation, citations, max_distance):
"""
Is there a match that is less than max_distance?
Minimum Levenshtein distance between the citation and
a list of available citations or None.
:param citation: input citation
:type citation: str
:param citations: list of available citations being matched against
:type citations: list or tuple
:param max_distance: maximum edit distance
:type max_distance: int
:return: minimum edit distance number if match found, else None
:rtype: int or None
"""
# Create a generator of edit distance numbers
distances = (distance(normalize(citation), normalize(c.value)) for c in citations)
# Filter distance numbers based on input max_distance
candidates = filter(lambda x: x <= max_distance, distances)
# Return min number of filtered distance numbers, or None
return min(candidates, default=None)
def matching(citation, dois, citations, max_distance):
"""
Main function for matching citation. Returns markup based
on result from matching.
:param citation: citation for doing matching
:type citation: str
:param dois: list of DOIs
:type dois: set or list or tuple
:param citations: list of available citations
:type citations: list or tuple
:param max_distance: maximum edit distance
:type max_distance: int
:return: markup text for input citation
:rtype: str
"""
# Match using DOI
if doi_matched(citation, dois):
return mark_exact(citation)
# Match using Levenshtein Edit Distance
else:
min_distance = ld_matched(citation, citations, max_distance)
if min_distance is None:
return citation # no match found
elif min_distance == 0:
return mark_exact(citation) # exact match
else:
return mark_approx(citation) # approx. match
|
py
|
1a5ad82ab41520c62fd1cf8f2ab9b9e713d40b28
|
import os
import psycopg2
import environ
env = environ.Env()
# reading .env file
environ.Env.read_env()
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
DOCUMENT_DIR = os.path.join(PROJECT_ROOT, 'Documenten')
ON_LIVE_SERVER = False # if set on True, changes etc will be made on Live Server !!!!
if "QRL" in DOCUMENT_DIR and ON_LIVE_SERVER == False:
hostname = 'localhost'
username = 'postgres'
password = 'postgres'
database = 'qrl'
port = '5432'
connection = psycopg2.connect(host=hostname, user=username, password=password, dbname=database, port=port)
cur = connection.cursor()
else:
hostname = env('DATABASE_HOST')
username = env('DATABASE_USER')
password = env('DATABASE_PASSWORD')
database = env('DATABASE_NAME')
port = env('DATABASE_PORT')
connection = psycopg2.connect(host=hostname, user=username, password=password, dbname=database, port=port)
cur = connection.cursor()
|
py
|
1a5ad86fd8f19c3c6d7be40cd5ee460312829e47
|
# Generated by Django 3.2 on 2021-06-26 18:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0013_filter'),
]
operations = [
migrations.AlterField(
model_name='filter',
name='facet',
field=models.CharField(blank=True, choices=[(None, '----------'), ('identities', 'Self Identity'), ('availability', 'Availability by County'), ('component_categories', 'USDA Meal Component'), ('physical_counties', 'Producer Location: County'), ('delivery_methods', 'Delivery Methods'), ('product_categories', 'Product Categories'), ('product_forms', 'Product details')], default=None, max_length=255, unique=True),
),
]
|
py
|
1a5ad924ae4aa094cd08c7652bce59ae431330fb
|
import operator
import re
import sys
from typing import Optional
from packaging import version
# The package importlib_metadata is in a different place, depending on the python version.
if sys.version_info < (3, 8):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
ops = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _compare_versions(op, got_ver, want_ver, requirement, pkg, hint):
if got_ver is None:
raise ValueError("got_ver is None")
if want_ver is None:
raise ValueError("want_ver is None")
if not ops[op](version.parse(got_ver), version.parse(want_ver)):
raise ImportError(
f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}"
)
def require_version(requirement: str, hint: Optional[str] = None) -> None:
"""
Perform a runtime check of the dependency versions, using the exact same syntax used by pip.
The installed module version comes from the *site-packages* dir via *importlib_metadata*.
Args:
requirement (`str`): pip style definition, e.g., "tokenizers==0.9.4", "tqdm>=4.27", "numpy"
hint (`str`, *optional*): what suggestion to print in case of requirements not being met
Example:
```python
require_version("pandas>1.1.2")
require_version("numpy>1.18.5", "this is important to have for whatever reason")
```"""
hint = f"\n{hint}" if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$", requirement):
pkg, op, want_ver = requirement, None, None
else:
match = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)", requirement)
if not match:
raise ValueError(
f"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but got {requirement}"
)
pkg, want_full = match[0]
# there could be multiple requirements
want_range = want_full.split(",")
wanted = {}
for w in want_range:
match = re.findall(r"^([\s!=<>]{1,2})(.+)", w)
if not match:
raise ValueError(
f"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but got {requirement}"
)
op, want_ver = match[0]
wanted[op] = want_ver
if op not in ops:
raise ValueError(
f"{requirement}: need one of {list(ops.keys())}, but got {op}")
# special case
if pkg == "python":
got_ver = ".".join([str(x) for x in sys.version_info[:3]])
for op, want_ver in wanted.items():
_compare_versions(op, got_ver, want_ver, requirement, pkg, hint)
return
# check if any version is installed
try:
got_ver = importlib_metadata.version(pkg)
except importlib_metadata.PackageNotFoundError:
raise importlib_metadata.PackageNotFoundError(
f"The '{requirement}' distribution was not found and is required by this application. {hint}"
)
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(op, got_ver, want_ver, requirement, pkg, hint)
def require_version_core(requirement):
"""require_version wrapper which emits a core-specific hint on failure"""
hint = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(requirement, hint)
|
py
|
1a5ad9ef5a9bbfc0c459a8da9f73f90b9fa96b56
|
from datetime import timedelta
def default_timedelta_deserializer(obj: float,
cls: type = float,
**kwargs) -> timedelta:
"""
Deserialize a float to a timedelta instance.
:param obj: the float that is to be deserialized.
:param cls: not used.
:param kwargs: not used.
:return: a ``datetime.timedelta`` instance.
"""
return timedelta(seconds=obj)
|
py
|
1a5ada1470ac3d1c6bb56940e087226c8d4989da
|
#!/usr/bin/env python
# -*- coding: utf-8 -*
"""
:authors:
Guannan Ma @mythmgn
:create_date:
2016/06/07
:description:
heartbeat service
"""
from cup.services import heartbeat
class HeartbeatService(heartbeat.HeartbeatService):
"""
heartbeat service. not in use yet
"""
def __init__(self, judge_lost_in_sec, keep_lost=False):
heartbeat.HeartbeatService.__init__(self, judge_lost_in_sec, keep_lost)
self._judge_lost_in_sec = judge_lost_in_sec
self._keep_lost = keep_lost
# vi:set tw=0 ts=4 sw=4 nowrap fdm=indent
|
py
|
1a5ada2a8fee87f8f0a569e665abc4653acb8d97
|
"""
Loads bot configuration from YAML files.
By default, this simply loads the default configuration located at `config-default.yml`.
If a file called `config.yml` is found in the project directory, the default configuration
is recursively updated with any settings from the custom configuration. Any settings left
out in the custom user configuration will stay their default values from `config-default.yml`.
"""
import logging
import os
from collections.abc import Mapping
from pathlib import Path
from typing import Union
import yaml
log = logging.getLogger(__name__)
def _env_var_constructor(loader, node):
"""
Implements a custom YAML tag for loading optional environment variables.
If the environment variable is set, returns the value of it.
Otherwise, returns `None`.
Example usage in the YAML configuration:
# Optional app configuration. Set `MY_APP_KEY` in the environment to use it.
application:
key: !ENV 'MY_APP_KEY'
"""
default = None
# Check if the node is a plain string value
if node.id == "scalar":
value = loader.construct_scalar(node)
key = str(value)
else:
# The node value is a list
value = loader.construct_sequence(node)
if len(value) >= 2:
# If we have at least two values, then we have both a key and a default value
default = value[1]
key = value[0]
else:
# Otherwise, we just have a key
key = value[0]
return os.getenv(key, default)
def _join_var_constructor(loader, node):
"""Implements a custom YAML tag for concatenating other tags in the document to strings.
This allows for a much more DRY configuration file.
"""
fields = loader.construct_sequence(node)
return "".join(str(x) for x in fields)
yaml.SafeLoader.add_constructor("!ENV", _env_var_constructor)
yaml.SafeLoader.add_constructor("!JOIN", _join_var_constructor)
with open("config-default.yml", encoding="UTF-8") as file:
_CONFIG_YAML = yaml.safe_load(file)
def _recursive_update(original, new):
"""
Helper method which implements a recursive `dict.update` method, used for updating the
original configuration with configuration specified by the user.
"""
for key, value in original.items():
if key not in new:
continue
if isinstance(value, Mapping):
if not any(isinstance(subvalue, Mapping) for subvalue in value.values()):
original[key].update(new[key])
_recursive_update(original[key], new[key])
else:
original[key] = new[key]
if Path("config.yml").exists():
# Overwriting default config with new config.
log.info("Found `config.yml` file, loading constants from it.")
with open("config.yml", encoding="UTF-8") as file:
user_config = yaml.safe_load(file)
_recursive_update(_CONFIG_YAML, user_config)
def check_required_keys(keys):
"""
Verifies that keys that are set to be required are present in the
loaded configuration.
"""
for key_path in keys:
lookup = _CONFIG_YAML
try:
for key in key_path.split("."):
lookup = lookup[key]
if lookup is None:
raise KeyError(key)
except KeyError:
log.critical(
f"A configuration for `{key_path}` is required, but was not found. "
"Please set it in `config.yml` or setup an environment variable and try again."
)
raise
try:
required_keys = _CONFIG_YAML["config"]["required_keys"]
except KeyError:
pass
else:
check_required_keys(required_keys)
class YAMLGetter(type):
"""
Implements a custom metaclass used for accessing
configuration data by simply accessing class attributes.
Supports getting configuration from up to two levels
of nested configuration through `section` and `subsection`.
`section` specifies the YAML configuration section (or "key")
in which the configuration lives, and must be set.
`subsection` is an optional attribute specifying the section
within the section from which configuration should be loaded.
Example Usage:
# config.yml
bot:
prefixes:
direct_message: ''
guild: '!'
# config.py
class Prefixes(metaclass=YAMLGetter):
section = "bot"
subsection = "prefixes"
# Usage in Python code
from config import Prefixes
def get_prefix(bot, message):
if isinstance(message.channel, PrivateChannel):
return Prefixes.direct_message
return Prefixes.guild
"""
subsection = None
def __getattr__(cls, name):
name = name.lower()
try:
if cls.subsection is not None:
return _CONFIG_YAML[cls.section][cls.subsection][name]
return _CONFIG_YAML[cls.section][name]
except KeyError:
dotted_path = ".".join(
(cls.section, cls.subsection, name)
if cls.subsection is not None
else (cls.section, name)
)
log.critical(
f"Tried accessing configuration variable at `{dotted_path}`, but it could not be found."
)
raise
def __getitem__(cls, name):
return cls.__getattr__(name)
def __iter__(cls):
"""Return generator of key: value pairs of current constants class' config values."""
for name in cls.__annotations__:
yield name, getattr(cls, name)
# Dataclasses
class Bot(metaclass=YAMLGetter):
"""Type hints of `config.yml` "bot"."""
section = "bot"
prefix: str
token: str
id: int
log_level: Union[str, int]
class Db(metaclass=YAMLGetter):
section = "db"
host: str
user: str
password: str
class Sentry(metaclass=YAMLGetter):
section = "sentry"
dsn_key: str
class Finnhub(metaclass=YAMLGetter):
section = "finnhub"
token: str
url: str
class Shop_emoji(metaclass=YAMLGetter):
section = "shop"
subsection = "emoji"
buy_price: int
delete_price: int
class Colours(metaclass=YAMLGetter):
section = "style"
subsection = "colours"
soft_red: int
soft_green: int
soft_orange: int
bright_green: int
class Emojis(metaclass=YAMLGetter):
section = "style"
subsection = "emojis"
status_online: str
status_offline: str
status_idle: str
status_dnd: str
incident_actioned: str
incident_unactioned: str
incident_investigating: str
bullet: str
new: str
pencil: str
cross_mark: str
check_mark: str
first: str
previous: str
next: str
last: str
close: str
class Icons(metaclass=YAMLGetter):
section = "style"
subsection = "icons"
crown_blurple: str
crown_green: str
crown_red: str
defcon_denied: str
defcon_disabled: str
defcon_enabled: str
defcon_updated: str
filtering: str
hash_blurple: str
hash_green: str
hash_red: str
message_bulk_delete: str
message_delete: str
message_edit: str
sign_in: str
sign_out: str
user_ban: str
user_unban: str
user_update: str
user_mute: str
user_unmute: str
user_verified: str
user_warn: str
pencil: str
remind_blurple: str
remind_green: str
remind_red: str
questionmark: str
voice_state_blue: str
voice_state_green: str
voice_state_red: str
# Paths
BOT_DIR = os.path.dirname(__file__)
PROJECT_ROOT = os.path.abspath(os.path.join(BOT_DIR, os.pardir))
|
py
|
1a5adc9716687457a521e562a896499552c7bc5d
|
# Copyright 2015-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This module provides the Constraint class for handling
filters and pivots in a modular fashion. This enable easy
constraint application.
An implementation of :mod:`trappy.plotter.AbstractDataPlotter`
is expected to use the :mod:`trappy.plotter.Constraint.ConstraintManager`
class to pivot and filter data and handle multiple column,
trace and event inputs.
The underlying object that encapsulates a unique set of
a data column, data event and the requisite filters is
:mod:`trappy.plotter.Constraint.Constraint`
"""
# pylint: disable=R0913
from trappy.plotter.Utils import decolonize, normalize_list
from trappy.utils import listify
from trappy.plotter import AttrConf
class Constraint(object):
"""
What is a Constraint?
It is collection of data based on two rules:
- A Pivot
- A Set of Filters
- A Data Column
For Example a :mod:`pandas.DataFrame`
===== ======== =========
Time CPU Latency
===== ======== =========
1 x <val>
2 y <val>
3 z <val>
4 a <val>
===== ======== =========
The resultant data will be split for each unique pivot value
with the filters applied
::
result["x"] = pd.Series.filtered()
result["y"] = pd.Series.filtered()
result["z"] = pd.Series.filtered()
result["a"] = pd.Series.filtered()
:param trappy_trace: Input Data
:type trappy_trace: :mod:`pandas.DataFrame` or a class derived from
:mod:`trappy.trace.BareTrace`
:param column: The data column
:type column: str
:param template: TRAPpy Event
:type template: :mod:`trappy.base.Base` event
:param trace_index: The index of the trace/data in the overall constraint
data
:type trace_index: int
:param filters: A dictionary of filter values
:type filters: dict
:param window: A time window to apply to the constraint.
E.g. window=(5, 20) will constraint to events that happened
between Time=5 to Time=20.
:type window: tuple of two ints
"""
def __init__(self, trappy_trace, pivot, column, template, trace_index,
filters, window):
self._trappy_trace = trappy_trace
self._filters = filters
self._pivot = pivot
self.column = column
self._template = template
self._dup_resolved = False
self._data = self.populate_data_frame()
if window:
# We want to include the previous value before the window
# and the next after the window in the dataset
min_idx = self._data.loc[:window[0]].index.max()
max_idx = self._data.loc[window[1]:].index.min()
self._data = self._data.loc[min_idx:max_idx]
self.result = self._apply()
self.trace_index = trace_index
def _apply(self):
"""This method applies the filter on the resultant data
on the input column.
"""
data = self._data
result = {}
try:
values = data[self.column]
except KeyError:
return result
if self._pivot == AttrConf.PIVOT:
pivot_vals = [AttrConf.PIVOT_VAL]
else:
pivot_vals = self.pivot_vals(data)
for pivot_val in pivot_vals:
criterion = values.map(lambda x: True)
for key in self._filters.keys():
if key != self._pivot and key in data.columns:
criterion = criterion & data[key].map(
lambda x: x in self._filters[key])
if pivot_val != AttrConf.PIVOT_VAL:
criterion &= data[self._pivot] == pivot_val
val_series = values[criterion]
if len(val_series) != 0:
result[pivot_val] = val_series
return result
def _uses_trappy_trace(self):
if not self._template:
return False
else:
return True
def populate_data_frame(self):
"""Return the populated :mod:`pandas.DataFrame`"""
if not self._uses_trappy_trace():
return self._trappy_trace
data_container = getattr(
self._trappy_trace,
decolonize(self._template.name))
return data_container.data_frame
def pivot_vals(self, data):
"""This method returns the unique pivot values for the
Constraint's pivot and the column
:param data: Input Data
:type data: :mod:`pandas.DataFrame`
"""
if self._pivot == AttrConf.PIVOT:
return AttrConf.PIVOT_VAL
if self._pivot not in data.columns:
return []
pivot_vals = set(data[self._pivot])
if self._pivot in self._filters:
pivot_vals = pivot_vals & set(self._filters[self._pivot])
return list(pivot_vals)
def __str__(self):
name = self.get_data_name()
if not self._uses_trappy_trace():
return name + ":" + str(self.column)
return name + ":" + \
self._template.name + ":" + self.column
def get_data_name(self):
"""Get name for the data member. This method
relies on the "name" attribute for the name.
If the name attribute is absent, it associates
a numeric name to the respective data element
:returns: The name of the data member
"""
if self._uses_trappy_trace():
if self._trappy_trace.name != "":
return self._trappy_trace.name
else:
return "Trace {}".format(self.trace_index)
else:
return "DataFrame {}".format(self.trace_index)
class ConstraintManager(object):
"""A class responsible for converting inputs
to constraints and also ensuring sanity
:param traces: Input Trace data
:type traces: :mod:`trappy.trace.BareTrace`, list(:mod:`trappy.trace.BareTrace`)
(or a class derived from :mod:`trappy.trace.BareTrace`)
:param columns: The column values from the corresponding
:mod:`pandas.DataFrame`
:type columns: str, list(str)
:param pivot: The column around which the data will be
pivoted:
:type pivot: str
:param templates: TRAPpy events
:type templates: :mod:`trappy.base.Base`
:param filters: A dictionary of values to be applied on the
respective columns
:type filters: dict
:param window: A time window to apply to the constraints
:type window: tuple of ints
:param zip_constraints: Permutes the columns and traces instead
of a one-to-one correspondence
:type zip_constraints: bool
"""
def __init__(self, traces, columns, templates, pivot, filters,
window=None, zip_constraints=True):
self._ip_vec = []
self._ip_vec.append(listify(traces))
self._ip_vec.append(listify(columns))
self._ip_vec.append(listify(templates))
self._lens = map(len, self._ip_vec)
self._max_len = max(self._lens)
self._pivot = pivot
self._filters = filters
self.window = window
self._constraints = []
self._trace_expanded = False
self._expand()
if zip_constraints:
self._populate_zip_constraints()
else:
self._populate_constraints()
def _expand(self):
"""This is really important. We need to
meet the following criteria for constraint
expansion:
::
Len[traces] == Len[columns] == Len[templates]
Or:
::
Permute(
Len[traces] = 1
Len[columns] = 1
Len[templates] != 1
)
Permute(
Len[traces] = 1
Len[columns] != 1
Len[templates] != 1
)
"""
min_len = min(self._lens)
max_pos_comp = [
i for i,
j in enumerate(
self._lens) if j != self._max_len]
if self._max_len == 1 and min_len != 1:
raise RuntimeError("Essential Arg Missing")
if self._max_len > 1:
# Are they all equal?
if len(set(self._lens)) == 1:
return
if min_len > 1:
raise RuntimeError("Cannot Expand a list of Constraints")
for val in max_pos_comp:
if val == 0:
self._trace_expanded = True
self._ip_vec[val] = normalize_list(self._max_len,
self._ip_vec[val])
def _populate_constraints(self):
"""Populate the constraints creating one for each column in
each trace
In a multi-trace, multicolumn scenario, constraints are created for
all the columns in each of the traces. _populate_constraints()
creates one constraint for the first trace and first column, the
next for the second trace and second column,... This function
creates a constraint for every combination of traces and columns
possible.
"""
for trace_idx, trace in enumerate(self._ip_vec[0]):
for col in self._ip_vec[1]:
template = self._ip_vec[2][trace_idx]
constraint = Constraint(trace, self._pivot, col, template,
trace_idx, self._filters, self.window)
self._constraints.append(constraint)
def get_column_index(self, constraint):
return self._ip_vec[1].index(constraint.column)
def _populate_zip_constraints(self):
"""Populate the expanded constraints
In a multitrace, multicolumn scenario, create constraints for
the first trace and the first column, second trace and second
column,... that is, as if you run zip(traces, columns)
"""
for idx in range(self._max_len):
if self._trace_expanded:
trace_idx = 0
else:
trace_idx = idx
trace = self._ip_vec[0][idx]
col = self._ip_vec[1][idx]
template = self._ip_vec[2][idx]
self._constraints.append(
Constraint(trace, self._pivot, col, template, trace_idx,
self._filters, self.window))
def generate_pivots(self, permute=False):
"""Return a union of the pivot values
:param permute: Permute the Traces and Columns
:type permute: bool
"""
pivot_vals = []
for constraint in self._constraints:
pivot_vals += constraint.result.keys()
p_list = list(set(pivot_vals))
traces = range(self._lens[0])
try:
sorted_plist = sorted(p_list, key=int)
except (ValueError, TypeError):
try:
sorted_plist = sorted(p_list, key=lambda x: int(x, 16))
except (ValueError, TypeError):
sorted_plist = sorted(p_list)
if permute:
pivot_gen = ((trace_idx, pivot) for trace_idx in traces for pivot in sorted_plist)
return pivot_gen, len(sorted_plist) * self._lens[0]
else:
return sorted_plist, len(sorted_plist)
def constraint_labels(self):
"""
:return: string to represent the
set of Constraints
"""
return map(str, self._constraints)
def __len__(self):
return len(self._constraints)
def __iter__(self):
return iter(self._constraints)
|
py
|
1a5adcc5ca584fd0e177c0bb53f1da3b64c708ad
|
from glinterface import GLPluginInterface
from glprogram import GLProgram,GLPluginProgram
import math
from OpenGL.GL import *
class GLWidgetPlugin(GLPluginInterface):
"""A GL plugin that sends user events to one or more Klamp't widgets.
To use, add this to a GLPluginProgram and call addWidget to add widgets"""
def __init__(self):
from ..robotsim import WidgetSet
GLPluginInterface.__init__(self)
self.klamptwidgetbutton = 2
self.klamptwidgetmaster = WidgetSet()
self.klamptwidgetdragging = False
def addWidget(self,widget):
self.klamptwidgetmaster.add(widget)
def widgetchangefunc(self,event):
"""Called whenever a widget is clicked or dragged.
event can be 'mousedown', 'mousedrag', 'mouseup'.
Subclasses can use this to respond to widget click events"""
pass
def widgethoverfunc(self):
"""Called whenever a widget changes appearance due to hover.
Subclasses can use this to respond to widget click events"""
pass
def display(self):
self.klamptwidgetmaster.drawGL(self.viewport())
return False
def keyboardfunc(self,c,x,y):
if len(c)==1:
self.klamptwidgetmaster.keypress(c)
return False
def keyboardupfunc(self,c,x,y):
return False
def mousefunc(self,button,state,x,y):
if button == self.klamptwidgetbutton:
if state == 0: #down
if self.klamptwidgetmaster.beginDrag(x,self.view.h-y,self.viewport()):
self.widgetchangefunc("mousedown")
self.klamptwidgetdragging = True
else:
if self.klamptwidgetdragging:
self.widgetchangefunc("mouseup")
self.klamptwidgetmaster.endDrag()
self.klamptwidgetdragging = False
if self.klamptwidgetmaster.wantsRedraw():
self.refresh()
return True
return False
def motionfunc(self,x,y,dx,dy):
if self.klamptwidgetdragging:
self.klamptwidgetmaster.drag(dx,-dy,self.viewport())
self.widgetchangefunc("mousedrag")
if self.klamptwidgetmaster.wantsRedraw():
self.refresh()
return True
else:
self.klamptwidgetmaster.hover(x,self.view.h-y,self.viewport())
if self.klamptwidgetmaster.wantsRedraw():
self.widgethoverfunc()
self.refresh()
return False
def idlefunc(self):
self.klamptwidgetmaster.idle()
return True
class GLMultiViewportProgram(GLProgram):
def __init__(self):
GLProgram.__init__(self)
self.views = []
self.activeView = None
self.dragging = False
self.sizePolicy = 'fit'
self.broadcast = False
self.defaultSizes = []
#self.height = self.view.h
def initialize(self):
if not GLProgram.initialize(self): return False
for v in self.views:
v.window = self.window
if not v.initialize():
return False
return True
def addView(self,view):
if isinstance(view,GLPluginInterface):
plugin = view
pview = GLPluginProgram()
pview.window = self.window
pview.setPlugin(view)
view = pview
assert isinstance(view,GLProgram)
self.views.append(view)
#spoofs reshape, motion functions
view.window = self
self.defaultSizes.append((view.view.w,view.view.h))
self.fit()
#print "Added a view, total",len(self.views),"size now",self.view.w,self.view.h
return view
def removeView(self,view):
view.window = None
for i,p in enumerate(self.views):
if p is view:
self.views.pop(i)
self.defaultSizes.pop(i)
self.fit()
self.activeView = None
return
def updateActive(self,x,y):
if not self.view.contains(x,y):
return
self.activeView = None
for i,p in enumerate(self.views):
if p.view.contains(x,y):
#print "Selecting view",x,y,":",i
self.activeView = i
return
return
def fit(self):
rowlen = int(math.ceil(math.sqrt(len(self.views))))
assert rowlen > 0
rowheights = [0]*int(math.ceil(float(len(self.views))/rowlen))
colwidths = [0]*rowlen
for i,p in enumerate(self.views):
col = i % rowlen
row = int(i / rowlen)
rowheights[row] = max(self.defaultSizes[i][1],rowheights[row])
colwidths[col] = max(self.defaultSizes[i][0],colwidths[col])
cumrowheights = [0]
cumcolwidths = [0]
for h in rowheights:
cumrowheights.append(cumrowheights[-1]+h)
for w in colwidths:
cumcolwidths.append(cumcolwidths[-1]+w)
if self.sizePolicy == 'fit':
self.view.w = sum(colwidths)
self.view.h = sum(rowheights)
for i,p in enumerate(self.views):
col = i % rowlen
row = int(i / rowlen)
p.view.x,p.view.y = (cumcolwidths[col],cumrowheights[row])
self.height = self.view.h
if self.window != None:
self.window.reshape(self.view.w,self.view.h)
else:
#squeeze
for i,p in enumerate(self.views):
col = i % rowlen
row = int(i / rowlen)
p.view.x = float(self.view.w)*float(cumcolwidths[col])/float(cumcolwidths[-1])
p.view.y = float(self.view.h)*float(cumrowheights[row])/float(cumrowheights[-1])
p.view.w = float(self.view.w)*float(colwidths[col]) / float(cumcolwidths[-1])
p.view.h = float(self.view.h)*float(rowheights[row]) / float(cumrowheights[-1])
p.view.x = self.view.x+int(p.view.x)
p.view.y = self.view.y+int(p.view.y)
p.view.w = int(p.view.w)
p.view.h = int(p.view.h)
#print "View",i,"shape",(p.view.x,p.view.y,p.view.w,p.view.h)
p.reshapefunc(p.view.w,p.view.h)
if self.window != None:
self.refresh()
def reshapefunc(self,w,h):
if (w,h) != (self.view.w,self.view.h):
self.view.w,self.view.h = w,h
self.height = self.view.h
self.sizePolicy = 'squash'
self.fit()
return True
def displayfunc(self):
anyTrue = False
for p in self.views:
try:
if p.displayfunc():
anyTrue = True
except Exception:
print "Error running displayfunc() for plugin",p.__class__.__name__
raise
return anyTrue
def display(self):
anyTrue = False
for p in self.views:
try:
if p.display():
anyTrue = True
except Exception:
print "Error running display() for plugin",p.__class__.__name__
raise
return anyTrue
def display_screen(self):
anyTrue = False
for p in self.views:
try:
if p.display_screen():
anyTrue = True
except Exception:
print "Error running display_screen() for plugin",p.__class__.__name__
raise
return anyTrue
def keyboardfunc(self,c,x,y):
if self.broadcast:
for p in self.views:
p.keyboardfunc(c,x,y)
return True
self.updateActive(x,y)
if self.activeView != None:
return True if self.views[self.activeView].keyboardfunc(c,x,y) else False
return False
def keyboardupfunc(self,c,x,y):
if self.broadcast:
for p in self.views:
p.keyboardupfunc(c,x,y)
return True
self.updateActive(x,y)
if self.activeView != None:
return True if self.views[self.activeView].keyboardupfunc(c,x,y) else False
return False
def mousefunc(self,button,state,x,y):
if self.broadcast:
for p in self.views:
p.mousefunc(button,state,x,y)
return True
if state == 0:
#button down
self.updateActive(x,y)
self.dragging = True
else:
self.dragging = False
if self.activeView != None:
return True if self.views[self.activeView].mousefunc(button,state,x,y) else False
return False
def motionfunc(self,x,y,dx,dy):
if self.broadcast:
for p in self.views:
p.motionfunc(x,y,dx,dy)
return True
if not self.dragging:
self.updateActive(x,y)
if self.activeView != None:
return True if self.views[self.activeView].motionfunc(x,y,dx,dy) else False
return False
def idlefunc(self):
for p in self.views:
p.idlefunc()
return True
def reshape(self,w,h):
"""Spoofs the window's reshape function"""
raise NotImplementedError("Can't have a viewport reshaping a multi-viewport yet")
self.sizePolicy = 'squash'
self.fit()
return True
def draw_text(self,point,text,size=12,color=None):
"""Draws text of the given size and color at the given point. Usually
called during display_screen."""
#if self.activeView == None:
self.window.draw_text(point,text,size,color)
#the GL viewport should be setup already
#else:
# ox,oy = self.pluginOrigins[self.activeView]
# self.window.draw_text(ox+x,oy+y,text,size,color)
def click_ray(self,x,y):
#print "Getting click ray"
if self.activeView == None:
return self.window.click_ray(x,y)
else:
return self.views[self.activeView].click_ray(x,y)
def get_view(self):
#print "Getting viewport..."
if self.activeView == None:
return self.window.get_view()
else:
return self.views[self.activeView].get_view()
def set_view(self,vp):
#print "Getting viewport..."
if self.activeView == None:
return self.window.get_view(vp)
else:
return self.views[self.activeView].get_view(vp)
_CACHED_DISPLAY_LISTS = set()
_CACHED_WARN_THRESHOLD = 1000
_CACHED_DELETED_LISTS = list()
class CachedGLObject:
"""An object whose drawing is accelerated by means of a display list.
The draw function may draw the object in the local frame, and the
object may be transformed without having to recompile the display list.
"""
def __init__(self):
self.name = ""
#OpenGL display list
self.glDisplayList = None
#marker for recursive calls
self.makingDisplayList = False
#parameters for render function
self.displayListRenderArgs = None
#other parameters for display lists
self.displayListParameters = None
#dirty bit to indicate whether the display list should be recompiled
self.changed = False
def __del__(self):
self.destroy()
def destroy(self):
"""Must be called to free up resources used by this object"""
if self.glDisplayList != None:
global _CACHED_DELETED_LISTS,_CACHED_DISPLAY_LISTS
if len(_CACHED_DELETED_LISTS) > 100:
for dl in _CACHED_DELETED_LISTS:
glDeleteLists(dl,1)
_CACHED_DELETED_LISTS = list()
else:
_CACHED_DELETED_LISTS.append(self.glDisplayList)
_CACHED_DISPLAY_LISTS.remove(self.glDisplayList)
self.glDisplayList = None
def markChanged(self):
"""Marked by an outside source to indicate the object has changed and
should be redrawn."""
self.changed = True
def draw(self,renderFunction,transform=None,args=None,parameters=None):
"""Given the function that actually makes OpenGL calls, this
will draw the object.
If parameters is given, the object's local appearance is assumed
to be defined deterministically from these parameters. The display
list will be redrawn if the parameters change.
"""
from ..math import se3
if args == None:
args = ()
if self.makingDisplayList:
renderFunction(*args)
return
if self.glDisplayList == None or self.changed or parameters != self.displayListParameters or args != self.displayListRenderArgs:
self.displayListRenderArgs = args
self.displayListParameters = parameters
self.changed = False
if self.glDisplayList == None:
#print "Generating new display list",self.name
global _CACHED_WARN_THRESHOLD,_CACHED_DISPLAY_LISTS,_CACHED_DELETED_LISTS
if len(_CACHED_DELETED_LISTS) > 0:
self.glDisplayList = _CACHED_DELETED_LISTS[-1]
_CACHED_DELETED_LISTS.pop(-1)
else:
self.glDisplayList = glGenLists(1)
_CACHED_DISPLAY_LISTS.add(self.glDisplayList)
if len(_CACHED_DISPLAY_LISTS) > _CACHED_WARN_THRESHOLD:
print "GLCachedObject: Creating",len(_CACHED_DISPLAY_LISTS),"GL objects",self.glDisplayList,"watch me for memory usage..."
_CACHED_WARN_THRESHOLD += 1000
#print "Compiling display list",self.name
if transform:
glPushMatrix()
glMultMatrixf(sum(zip(*se3.homogeneous(transform)),()))
glNewList(self.glDisplayList,GL_COMPILE_AND_EXECUTE)
self.makingDisplayList = True
try:
renderFunction(*args)
except GLError:
import traceback
print "Error encountered during draw, display list",self.glDisplayList
traceback.print_exc()
self.makingDisplayList = False
glEndList()
if transform:
glPopMatrix()
else:
if transform:
glPushMatrix()
glMultMatrixf(sum(zip(*se3.homogeneous(transform)),()))
glCallList(self.glDisplayList)
if transform:
glPopMatrix()
|
py
|
1a5ade9c5eea4589eea38e04c7af0d9316884711
|
import discord
from discord.ext import commands
import stackprinter as sp
from bin import zb
class onmemberremoveCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
# Events on member join
@commands.Cog.listener()
async def on_member_remove(self, member):
try:
# If any bot
if member.bot:
return
# try:
# info = await member.guild.fetch_ban(member)
# print(info)
# except:
# pass
# banlist = await member.guild.bans()
# for banned in banlist:
# if member.id == banned.id:
# return
embed=discord.Embed(description=member.mention + " " +
member.name, color=0xff470f)
embed.add_field(name="Join Date", value=member.joined_at,
inline=False)
if not zb.is_pattern(member.display_name,'^[A-Z]\w+[0-9]{3,}'):
embed.set_thumbnail(url=member.avatar_url)
embed.set_author(name="Member Left",
icon_url=member.avatar_url)
await zb.print_log(self,member,embed)
# junk1, junk2 = zb.del_all_special_role(member.guild,member.id)
except Exception as e:
await zb.bot_errors(self,sp.format(e))
def setup(bot):
bot.add_cog(onmemberremoveCog(bot))
|
py
|
1a5adeea87190311622c2f2cd234f177a65d3a78
|
'''
This script resets the escpos printer
'''
import sys
from escpos.printer import Usb
from escpos import exceptions
VENDOR_ID = 0x0456
PRODUCT_ID = 0x0808
P_INTERFACE = 4
P_IN_ENDPOINT = 0x81
P_OUT_ENDPOINT = 0x03
p = Usb(VENDOR_ID, PRODUCT_ID, P_INTERFACE, P_IN_ENDPOINT, P_OUT_ENDPOINT)
reset_cmd = b'\x1b?\n\x00'
try:
p._raw(reset_cmd)
except Exception as e:
print(e)
sys.exit(1)
|
py
|
1a5adef4ffd9d0a0d08495e118b10a5200ec9d57
|
import setuptools
with open('README.md', 'r') as f:
long_description = f.read()
setuptools.setup(
name='jc',
version='1.17.1',
author='Kelly Brazil',
author_email='[email protected]',
description='Converts the output of popular command-line tools and file-types to JSON.',
install_requires=[
'ruamel.yaml>=0.15.0',
'xmltodict>=0.12.0',
'Pygments>=2.3.0'
],
license='MIT',
long_description=long_description,
long_description_content_type='text/markdown',
python_requires='>=3.6',
url='https://github.com/kellyjonbrazil/jc',
packages=setuptools.find_packages(exclude=['*.tests', '*.tests.*', 'tests.*', 'tests']),
entry_points={
'console_scripts': [
'jc=jc.cli:main'
]
},
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Utilities'
]
)
|
py
|
1a5adf7202a2340f7ba61b6c0cfdfa99eb62b169
|
import json
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt
def process_text(request):
print("Received request")
text = json.loads(request.body)["text"]
return JsonResponse({"response": "You said: %s" % text})
|
py
|
1a5ae112a55f9abc9f37968ed5cb10023adaf9ae
|
"""django_lti_tests URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('polls/', include('polls.urls')),
]
|
py
|
1a5ae1635835c54e375ac1ba0bd46a22215156a9
|
import numpy as np
from KPI import KPI
def calc(inp):
return inp[:, 9]
def gap(open_price, close_price, init_money):
return 1.0 * (close_price / open_price - 1) * init_money
def gap_colume(open_price, close_price, colume):
return 1.0 * (close_price - open_price) * colume
def RSI(data, paras, standard_data_file):
TIME_PERIOD = 14
HIGH_RSI = 85
LOW_RSI = 30
ORDER_PERCENT = 0.3
money = paras['InitMoney']
cash = money
start_money = money
# 累计资金
str_money = money
std_money = money
# 基准数据
standard_data = np.load(standard_data_file)
# 每日策略收益和基准收益
strategy_daily = []
standard_daily_reward = []
strategy_daily_reward = []
standard_daily_ratio = []
strategy_daily_ratio = []
std_cur_open = standard_data[0][1]
NrOfShare = data.shape[0]
hold_colume = np.zeros(NrOfShare, 'float32')
length = np.zeros(NrOfShare, 'float32')
p_pre = np.zeros(NrOfShare, 'float32')
for i in range(data.shape[1]):
if i < 14:
continue
# 基准收益计算
std_cur_close = standard_data[i][3]
# 计算基准每日收益
std_gap_money = gap(std_cur_open, std_cur_close, init_money=std_money)
# total——monry
std_money += std_gap_money
# 日收益放入到list
standard_daily_reward.append(std_gap_money)
# 收益率放到list
standard_daily_ratio.append(1.0 * (std_cur_close - std_cur_open) / std_cur_open)
RSI_val = data[:, i-13:i+1, 2] - data[:, i-14:i, 2]
RSI_positive = []
for j in range(RSI_val.shape[0]):
RSI_positive.append(np.sum(RSI_val[j, RSI_val[j,:] > 0]))
RSI_positive = np.array(RSI_positive)
RSI_negative = []
for j in range(RSI_val.shape[0]):
RSI_negative.append(np.sum(RSI_val[j, RSI_val[j, :] < 0]))
RSI_negative = np.array(RSI_negative)
sell_share = RSI_positive / (RSI_positive - RSI_negative) * 100 > HIGH_RSI
buy_share = RSI_positive / (RSI_positive - RSI_negative) * 100 < LOW_RSI
hold_index = hold_colume > 0
str_cur_close = data[hold_index, i - 1, 2]
str_pre_close = data[hold_index, i, 2]
str_gap_money = gap_colume(str_pre_close, str_cur_close, hold_colume[hold_index])
str_money += np.sum(str_gap_money)
strategy_daily_reward.append(np.sum(str_gap_money))
if np.sum(hold_index) != 0:
strategy_daily_ratio.append(1.0 * np.mean((str_cur_close - str_pre_close) / str_pre_close))
else:
strategy_daily_ratio.append(0)
if np.sum(buy_share) > 0 and cash > 100:
money_each_share = cash // np.sum(buy_share)
hold_colume[buy_share] += money_each_share // (data[buy_share, i, 2] * 100) * 100
cash -= np.sum(money_each_share // (data[buy_share, i, 2] * 100) * 100 * data[buy_share, i, 2])
if np.sum(sell_share) > 0:
sell_index = hold_index & sell_share
cash += np.sum(hold_colume[sell_index] * data[sell_index, i, 2])
hold_colume[sell_share] = np.zeros(np.sum(sell_share))
p_pre = calc(data[:, i, :])
std_cur_open = std_cur_close
N = data.shape[1]
for i in range(500 - N):
npzero = np.array([0.0])
strategy_daily_reward = np.append(npzero, strategy_daily_reward)
strategy_daily_ratio = np.append(npzero, strategy_daily_ratio)
standard_daily_reward = np.append(npzero, standard_daily_reward)
standard_daily_ratio = np.append(npzero, standard_daily_ratio)
N -= TIME_PERIOD
return start_money, str_money, std_money, N, strategy_daily_reward, strategy_daily_ratio, standard_daily_reward, standard_daily_ratio
if __name__ == '__main__':
data = np.load('../saved files/data_zjz.npy')[:, :500, :]
standard_data = '../saved files/standard_data.npy'
init_money, str_money, std_money, N, strategy_daily_reward, strategy_daily_ratio, standard_daily_reward, standard_daily_ratio = RSI(
data, {'InitMoney': 1000000}, standard_data)
for i in range(500 - len(strategy_daily_reward)):
npzero = np.array([0.0])
strategy_daily_reward = np.append(npzero, strategy_daily_reward)
strategy_daily_ratio = np.append(npzero, strategy_daily_ratio)
standard_daily_reward = np.append(npzero, standard_daily_reward)
standard_daily_ratio = np.append(npzero, standard_daily_ratio)
print('init_money shape:{}'.format(init_money))
print('str_money shape:{}'.format(str_money))
print('std_money shape:{}'.format(std_money))
print('N shape:{}'.format(N))
print('strategy_daily_reward shape:{}'.format(np.array(strategy_daily_reward).shape))
print('strategy_daily_ratio shape:{}'.format(np.array(strategy_daily_ratio).shape))
print('standard_daily_reward shape:{}'.format(np.array(standard_daily_reward).shape))
print('standard_daily_ratio shape:{}'.format(np.array(standard_daily_ratio).shape))
kpi = KPI(
init_money=init_money,
str_money=str_money,
std_money=std_money,
N=N,
strategy_daily_reward=strategy_daily_reward,
strategy_daily_ratio=strategy_daily_ratio,
standard_daily_reward=standard_daily_reward,
standard_daily_ratio=standard_daily_ratio
)
all_filed = kpi.get_kpi()
money1 = 1000000.0
money2 = 1000000.0
daily_reward1 = strategy_daily_reward
daily_reward2 = standard_daily_reward
str_daily_reward_list = []
std_daily_reward_list = []
for i in range(len(daily_reward1)):
money1 += daily_reward1[i]
str_daily_reward_list.append(money1)
for i in range(len(daily_reward2)):
money2 += daily_reward2[i]
std_daily_reward_list.append(money2)
print(str_daily_reward_list)
print(std_daily_reward_list)
daily = []
daily.append(np.array(str_daily_reward_list))
daily.append(np.array(std_daily_reward_list))
np.save('../saved files/strategy_0_daily.npy', np.array(daily))
|
py
|
1a5ae198b5c2ba059b6178b4cbd53246d68a1a8f
|
from __future__ import absolute_import, print_function
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import timezone
from zerver.models import UserProfile
import argparse
from datetime import datetime
import requests
import ujson
from typing import Any
class Command(BaseCommand):
help = """Add users to a MailChimp mailing list."""
def add_arguments(self, parser):
# type: (argparse.ArgumentParser) -> None
parser.add_argument('--api-key',
dest='api_key',
type=str,
help='MailChimp API key.')
parser.add_argument('--list-id',
dest='list_id',
type=str,
help='List ID of the MailChimp mailing list.')
parser.add_argument('--optin-time',
dest='optin_time',
type=str,
default=datetime.isoformat(timezone.now().replace(microsecond=0)),
help='Opt-in time of the users.')
def handle(self, *args, **options):
# type: (*Any, **str) -> None
if options['api_key'] is None:
try:
if settings.MAILCHIMP_API_KEY is None:
print('MAILCHIMP_API_KEY is None. Check your server settings file.')
exit(1)
options['api_key'] = settings.MAILCHIMP_API_KEY
except AttributeError:
print('Please supply a MailChimp API key to --api-key, or add a '
'MAILCHIMP_API_KEY to your server settings file.')
exit(1)
if options['list_id'] is None:
try:
if settings.ZULIP_FRIENDS_LIST_ID is None:
print('ZULIP_FRIENDS_LIST_ID is None. Check your server settings file.')
exit(1)
options['list_id'] = settings.ZULIP_FRIENDS_LIST_ID
except AttributeError:
print('Please supply a MailChimp List ID to --list-id, or add a '
'ZULIP_FRIENDS_LIST_ID to your server settings file.')
exit(1)
endpoint = "https://%s.api.mailchimp.com/3.0/lists/%s/members" % \
(options['api_key'].split('-')[1], options['list_id'])
for user in UserProfile.objects.filter(is_bot=False, is_active=True) \
.values('email', 'full_name', 'realm_id') \
.filter(full_name='Zoe'):
data = {
'email_address': user['email'],
'list_id': options['list_id'],
'status': 'subscribed',
'merge_fields': {
'NAME': user['full_name'],
'REALM_ID': user['realm_id'],
'OPTIN_TIME': options['optin_time'],
},
}
r = requests.post(endpoint, auth=('apikey', options['api_key']), json=data, timeout=10)
if r.status_code == 400 and ujson.loads(r.text)['title'] == 'Member Exists':
print("%s is already a part of the list." % (data['email_address'],))
elif r.status_code >= 400:
print(r.text)
|
py
|
1a5ae1b60e4f6d65150cd56f2a80b1770d650017
|
# -*- coding: utf-8 -*-
# website: http://30daydo.com
# @Time : 2019/10/24 0:03
# @File : new_stock_fund.py
# 获取打新基金数据
import requests
import time
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
import logging
from scrapy.selector import Selector
logger = logging.getLogger()
PATH = r'C:\OneDrive\Python\selenium\chromedriver.exe'
class TianTianFund():
def __init__(self):
# 未上市
self.wss_url='http://fund.eastmoney.com/data/dxgjj_xgccjjyl.html#wss;SUMPLACE;desc;1'
options = webdriver.ChromeOptions()
options.add_argument(
'--user-agent=Mozilla/5.0 (Windows NT 999999.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36')
self.driver = webdriver.Chrome(executable_path=PATH,
chrome_options=options)
def get_fund(self):
self.driver.get(self.wss_url)
time.sleep(5)
text=self.driver.page_source
response = Selector(text=text)
nodes = response.xpath('//tbody[@id="datalistwss_body"]/tr')
for node in nodes:
code = node.xpath('.//td[2]/a/text()').extract_first()
name = node.xpath('.//td[3]/a/text()').extract_first()
hit_count = node.xpath('.//td[6]/a[1]/text()').extract_first()
fund_url = node.xpath('.//td[6]/a[1]/@href').extract_first()
full_url = 'http://fund.eastmoney.com/data/'+fund_url
new_stock_amount = node.xpath('.//td[6]/text()').extract_first()
self.driver.get(fund_url)
time.sleep(5)
sub_response = Selector(text=self.driver.page_source)
sub_nodes = sub_response.xpath('//tbody[@id="datalist_body"]/tr')
new_stock_list = []
for sub_node in sub_nodes:
d={}
stock_code = sub_node.xpath('.//td[2]/a/text()').extract_first()
stock_name = sub_node.xpath('.//td[3]/a/text()').extract_first()
assign_mount = sub_node.xpath('.//td[9]/text()').extract_first()
d['新股代码']=stock_code
d['新股名称']=stock_name
d['中的金额-万元']=assign_mount
new_stock_list.append(d)
print(new_stock_list)
def start(self):
self.get_fund()
self.driver.close()
if __name__=='__main__':
fund = TianTianFund()
fund.start()
|
py
|
1a5ae24e33b8d80516d11ee4b422a3095fa1d804
|
import numpy as np
from mygrad.operation_base import BroadcastableOp, Operation
__all__ = ["GetItem", "SetItem"]
class GetItem(Operation):
""" Defines the __getitem__ interface for a Tensor, supporting back-propagation
Supports back-propagation through all valid numpy-indexing (basic, advanced, mixed, etc.)"""
def __call__(self, a, index):
""" ``a[index]``
Parameters
----------
a : mygrad.Tensor
The tensor whose entries are being accessed.
index : valid-array-index
An n-dimensional index for specifying entries or subregions of `a`.
All means of numpy-array indexing (basic, advanced, mixed, etc) are
supported.
Returns
-------
numpy.ndarray
The array returned by the get-item operation"""
self.variables = (a,)
self.index = index
return a.data[index]
def backward_var(self, grad, index, **kwargs):
a = self.variables[index]
out = np.zeros_like(a.data)
np.add.at(out, self.index, grad)
return out
def _arr(*shape):
""" Construct an array of a specified consisting of values [0, _arr.size)
filled in row-major order.
Parameters
----------
*shape : int
Returns
-------
numpy.ndarray"""
return np.arange(np.prod(shape)).reshape(shape)
def _is_int_array_index(index):
""" Returns True if `index` contains any array-like integer-valued sequences
Parameters
----------
index : Tuple[Any]
Returns
-------
bool """
return any(
np.issubdtype(np.asarray(ind).dtype, np.int_) and np.asarray(ind).ndim
for ind in index
)
def _is_bool_array_index(index):
""" Returns True if `index` solely contains a boolean-valued array
Parameters
----------
index : Tuple[Any]
Returns
-------
bool """
return len(index) == 1 and np.issubdtype(np.asarray(index[0]).dtype, np.bool_)
class SetItem(BroadcastableOp):
""" Defines the __setitem__ interface for a Tensor, supporting back-propagation through
both the tensor being set and the tensor whose .
Supports back-propagation through all valid numpy-indexing (basic, advanced, mixed, etc.),
as well as """
def __call__(self, a, b, index):
""" a[index] = b
Parameters
----------
a : mygrad.Tensor
The tensor whose entries are being set. A copy of the underlying
data is made if `a` is a non-constant tensor.
b : mygrad.Tensor
`b` must be broadcast-compatible with `a[index]`
index : valid-array-index
An n-dimensional index for specifying entries or subregions of `a`.
All means of numpy-array indexing (basic, advanced, mixed, etc) are
supported.
Notes
-----
Additional computational overhead is required for back-propagation when
`index` contains any integer-valued arrays, to accommodate for the scenario
in which a single element is set multiple times."""
out = np.copy(a.data) if not a.constant else a.data
self.variables = (a, b)
self.index = index if isinstance(index, tuple) else (index,)
out[index] = b.data
return out
def backward_var(self, grad, index, **kwargs):
a, b = self.variables
if index == 0:
grad = np.copy(grad)
grad[self.index] = 0
return grad
elif index == 1:
grad_sel = np.asarray(grad[self.index])
# Basic indexing and indexing with a single boolean-array is trivial. The
# gradient into b can just be accessed by indexing into `grad`.
# Indexing with integer-valued arrays can be problematic, as the same
# item can be specified multiple for "setting"; here only the last set-item
# for that element has an effect. For example:
# x[np.array([0, 0])] = np.array([2, 3]) # `3` gets set to x[0]; 2 has no effect
# Thus only that corresponding element in `grad` (that corresponding to `3`)
# should be propagated back into b. Thus we must check to see if any items are
# being set redundantly, and mask out any elements in `grad` corresponding to
# the elements in `b` that weren't actually set.
if (
not np.shares_memory(grad_sel, grad)
and grad_sel.size > 0
and grad_sel.ndim > 0
and not _is_bool_array_index(self.index)
and _is_int_array_index(self.index)
):
# create an array of unique elements, and see if indexing into it produces
# any redundant elements
unique = _arr(*grad.shape)
sub_sel = unique[self.index].flat
elements, first_inds, = np.unique(
np.flip(sub_sel, axis=0), return_index=True
)
if len(first_inds) < len(sub_sel):
# one or more elements were set redundantly, identify the entries in `b`
# that actually were set to those elements (the last-most set-item calls
# for those elements) and propagate only the corresponding elements from grad
first_inds = (len(sub_sel) - 1) - first_inds
mask = np.zeros_like(sub_sel)
mask[first_inds] = 1
mask = mask.reshape(grad_sel.shape)
grad_sel *= mask
# handle the edge case of "projecting down" on setitem. E.g:
# x = Tensor([0, 1, 2])
# y = Tensor([3])
# x[0] = y # this is legal since x[0] and y have the same size
if grad_sel.ndim < b.ndim:
if grad_sel.size == b.size:
grad_sel = grad_sel.reshape(b.shape)
else:
# Broadcasting occurred during set-item and `b` contains
# excess leading singleton dimensions. Make `grad_sel`
# commensurate with `b` for subsequent `reduce_broadcast`
# to work
grad_sel = grad_sel[(np.newaxis,) * (b.ndim - grad_sel.ndim)]
return grad_sel
else:
raise IndexError() # pragma: no cover
|
py
|
1a5ae255fc3e078a2fc3731be810114e53631d64
|
# Copyright 2020 Allan Feldman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
py
|
1a5ae27b3ebe40eb37f286cc6c1e2a5106365970
|
#!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Google Cloud Vision API Python Beta Snippets
Example Usage:
python beta_snippets.py -h
python beta_snippets.py object-localization INPUT_IMAGE
python beta_snippets.py object-localization-uri gs://...
python beta_snippets.py handwritten-ocr INPUT_IMAGE
python beta_snippets.py handwritten-ocr-uri gs://...
python beta_snippets.py batch-annotate-files INPUT_PDF
python beta_snippets.py batch-annotate-files-uri gs://...
python beta_snippets.py batch-annotate-images-uri gs://... gs://...
For more information, the documentation at
https://cloud.google.com/vision/docs.
"""
import argparse
import io
# [START vision_localize_objects_beta]
def localize_objects(path):
"""Localize objects in the local image.
Args:
path: The path to the local file.
"""
from google.cloud import vision_v1p3beta1 as vision
client = vision.ImageAnnotatorClient()
with open(path, 'rb') as image_file:
content = image_file.read()
image = vision.types.Image(content=content)
objects = client.object_localization(
image=image).localized_object_annotations
print('Number of objects found: {}'.format(len(objects)))
for object_ in objects:
print('\n{} (confidence: {})'.format(object_.name, object_.score))
print('Normalized bounding polygon vertices: ')
for vertex in object_.bounding_poly.normalized_vertices:
print(' - ({}, {})'.format(vertex.x, vertex.y))
# [END vision_localize_objects_beta]
# [START vision_localize_objects_gcs_beta]
def localize_objects_uri(uri):
"""Localize objects in the image on Google Cloud Storage
Args:
uri: The path to the file in Google Cloud Storage (gs://...)
"""
from google.cloud import vision_v1p3beta1 as vision
client = vision.ImageAnnotatorClient()
image = vision.types.Image()
image.source.image_uri = uri
objects = client.object_localization(
image=image).localized_object_annotations
print('Number of objects found: {}'.format(len(objects)))
for object_ in objects:
print('\n{} (confidence: {})'.format(object_.name, object_.score))
print('Normalized bounding polygon vertices: ')
for vertex in object_.bounding_poly.normalized_vertices:
print(' - ({}, {})'.format(vertex.x, vertex.y))
# [END vision_localize_objects_gcs_beta]
# [START vision_handwritten_ocr_beta]
def detect_handwritten_ocr(path):
"""Detects handwritten characters in a local image.
Args:
path: The path to the local file.
"""
from google.cloud import vision_v1p3beta1 as vision
client = vision.ImageAnnotatorClient()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.types.Image(content=content)
# Language hint codes for handwritten OCR:
# en-t-i0-handwrit, mul-Latn-t-i0-handwrit
# Note: Use only one language hint code per request for handwritten OCR.
image_context = vision.types.ImageContext(
language_hints=['en-t-i0-handwrit'])
response = client.document_text_detection(image=image,
image_context=image_context)
print('Full Text: {}'.format(response.full_text_annotation.text))
for page in response.full_text_annotation.pages:
for block in page.blocks:
print('\nBlock confidence: {}\n'.format(block.confidence))
for paragraph in block.paragraphs:
print('Paragraph confidence: {}'.format(
paragraph.confidence))
for word in paragraph.words:
word_text = ''.join([
symbol.text for symbol in word.symbols
])
print('Word text: {} (confidence: {})'.format(
word_text, word.confidence))
for symbol in word.symbols:
print('\tSymbol: {} (confidence: {})'.format(
symbol.text, symbol.confidence))
# [END vision_handwritten_ocr_beta]
# [START vision_handwritten_ocr_gcs_beta]
def detect_handwritten_ocr_uri(uri):
"""Detects handwritten characters in the file located in Google Cloud
Storage.
Args:
uri: The path to the file in Google Cloud Storage (gs://...)
"""
from google.cloud import vision_v1p3beta1 as vision
client = vision.ImageAnnotatorClient()
image = vision.types.Image()
image.source.image_uri = uri
# Language hint codes for handwritten OCR:
# en-t-i0-handwrit, mul-Latn-t-i0-handwrit
# Note: Use only one language hint code per request for handwritten OCR.
image_context = vision.types.ImageContext(
language_hints=['en-t-i0-handwrit'])
response = client.document_text_detection(image=image,
image_context=image_context)
print('Full Text: {}'.format(response.full_text_annotation.text))
for page in response.full_text_annotation.pages:
for block in page.blocks:
print('\nBlock confidence: {}\n'.format(block.confidence))
for paragraph in block.paragraphs:
print('Paragraph confidence: {}'.format(
paragraph.confidence))
for word in paragraph.words:
word_text = ''.join([
symbol.text for symbol in word.symbols
])
print('Word text: {} (confidence: {})'.format(
word_text, word.confidence))
for symbol in word.symbols:
print('\tSymbol: {} (confidence: {})'.format(
symbol.text, symbol.confidence))
# [END vision_handwritten_ocr_gcs_beta]
# [START vision_batch_annotate_files_beta]
def detect_batch_annotate_files(path):
"""Detects document features in a PDF/TIFF/GIF file.
While your PDF file may have several pages,
this API can process up to 5 pages only.
Args:
path: The path to the local file.
"""
from google.cloud import vision_v1p4beta1 as vision
client = vision.ImageAnnotatorClient()
with open(path, 'rb') as pdf_file:
content = pdf_file.read()
# Other supported mime_types: image/tiff' or 'image/gif'
mime_type = 'application/pdf'
input_config = vision.types.InputConfig(
content=content, mime_type=mime_type)
feature = vision.types.Feature(
type=vision.enums.Feature.Type.DOCUMENT_TEXT_DETECTION)
# Annotate the first two pages and the last one (max 5 pages)
# First page starts at 1, and not 0. Last page is -1.
pages = [1, 2, -1]
request = vision.types.AnnotateFileRequest(
input_config=input_config,
features=[feature],
pages=pages)
response = client.batch_annotate_files(requests=[request])
for image_response in response.responses[0].responses:
for page in image_response.full_text_annotation.pages:
for block in page.blocks:
print(u'\nBlock confidence: {}\n'.format(block.confidence))
for par in block.paragraphs:
print(u'\tParagraph confidence: {}'.format(par.confidence))
for word in par.words:
symbol_texts = [symbol.text for symbol in word.symbols]
word_text = ''.join(symbol_texts)
print(u'\t\tWord text: {} (confidence: {})'.format(
word_text, word.confidence))
for symbol in word.symbols:
print(u'\t\t\tSymbol: {} (confidence: {})'.format(
symbol.text, symbol.confidence))
# [END vision_batch_annotate_files_beta]
# [START vision_batch_annotate_files_gcs_beta]
def detect_batch_annotate_files_uri(gcs_uri):
"""Detects document features in a PDF/TIFF/GIF file.
While your PDF file may have several pages,
this API can process up to 5 pages only.
Args:
uri: The path to the file in Google Cloud Storage (gs://...)
"""
from google.cloud import vision_v1p4beta1 as vision
client = vision.ImageAnnotatorClient()
# Other supported mime_types: image/tiff' or 'image/gif'
mime_type = 'application/pdf'
input_config = vision.types.InputConfig(
gcs_source=vision.types.GcsSource(uri=gcs_uri), mime_type=mime_type)
feature = vision.types.Feature(
type=vision.enums.Feature.Type.DOCUMENT_TEXT_DETECTION)
# Annotate the first two pages and the last one (max 5 pages)
# First page starts at 1, and not 0. Last page is -1.
pages = [1, 2, -1]
request = vision.types.AnnotateFileRequest(
input_config=input_config,
features=[feature],
pages=pages)
response = client.batch_annotate_files(requests=[request])
for image_response in response.responses[0].responses:
for page in image_response.full_text_annotation.pages:
for block in page.blocks:
print(u'\nBlock confidence: {}\n'.format(block.confidence))
for par in block.paragraphs:
print(u'\tParagraph confidence: {}'.format(par.confidence))
for word in par.words:
symbol_texts = [symbol.text for symbol in word.symbols]
word_text = ''.join(symbol_texts)
print(u'\t\tWord text: {} (confidence: {})'.format(
word_text, word.confidence))
for symbol in word.symbols:
print(u'\t\t\tSymbol: {} (confidence: {})'.format(
symbol.text, symbol.confidence))
# [END vision_batch_annotate_files_gcs_beta]
# [START vision_async_batch_annotate_images_beta]
def async_batch_annotate_images_uri(input_image_uri, output_uri):
"""Batch annotation of images on Google Cloud Storage asynchronously.
Args:
input_image_uri: The path to the image in Google Cloud Storage (gs://...)
output_uri: The path to the output path in Google Cloud Storage (gs://...)
"""
import re
from google.cloud import storage
from google.protobuf import json_format
from google.cloud import vision_v1p4beta1 as vision
client = vision.ImageAnnotatorClient()
# Construct the request for the image(s) to be annotated:
image_source = vision.types.ImageSource(image_uri=input_image_uri)
image = vision.types.Image(source=image_source)
features = [
vision.types.Feature(type=vision.enums.Feature.Type.LABEL_DETECTION),
vision.types.Feature(type=vision.enums.Feature.Type.TEXT_DETECTION),
vision.types.Feature(type=vision.enums.Feature.Type.IMAGE_PROPERTIES),
]
requests = [
vision.types.AnnotateImageRequest(image=image, features=features),
]
gcs_destination = vision.types.GcsDestination(uri=output_uri)
output_config = vision.types.OutputConfig(
gcs_destination=gcs_destination, batch_size=2)
operation = client.async_batch_annotate_images(
requests=requests, output_config=output_config)
print('Waiting for the operation to finish.')
operation.result(timeout=10000)
# Once the request has completed and the output has been
# written to Google Cloud Storage, we can list all the output files.
storage_client = storage.Client()
match = re.match(r'gs://([^/]+)/(.+)', output_uri)
bucket_name = match.group(1)
prefix = match.group(2)
bucket = storage_client.get_bucket(bucket_name)
# Lists objects with the given prefix.
blob_list = list(bucket.list_blobs(prefix=prefix))
print('Output files:')
for blob in blob_list:
print(blob.name)
# Processes the first output file from Google Cloud Storage.
# Since we specified batch_size=2, the first response contains
# annotations for the first two annotate image requests.
output = blob_list[0]
json_string = output.download_as_string()
response = json_format.Parse(json_string,
vision.types.BatchAnnotateImagesResponse())
# Prints the actual response for the first annotate image request.
print(u'The annotation response for the first request: {}'.format(
response.responses[0]))
# [END vision_async_batch_annotate_images_beta]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
subparsers = parser.add_subparsers(dest='command')
object_parser = subparsers.add_parser(
'object-localization', help=localize_objects.__doc__)
object_parser.add_argument('path')
object_uri_parser = subparsers.add_parser(
'object-localization-uri', help=localize_objects_uri.__doc__)
object_uri_parser.add_argument('uri')
handwritten_parser = subparsers.add_parser(
'handwritten-ocr', help=detect_handwritten_ocr.__doc__)
handwritten_parser.add_argument('path')
handwritten_uri_parser = subparsers.add_parser(
'handwritten-ocr-uri', help=detect_handwritten_ocr_uri.__doc__)
handwritten_uri_parser.add_argument('uri')
batch_annotate_parser = subparsers.add_parser(
'batch-annotate-files', help=detect_batch_annotate_files.__doc__)
batch_annotate_parser.add_argument('path')
batch_annotate_uri_parser = subparsers.add_parser(
'batch-annotate-files-uri',
help=detect_batch_annotate_files_uri.__doc__)
batch_annotate_uri_parser.add_argument('uri')
batch_annotate__image_uri_parser = subparsers.add_parser(
'batch-annotate-images-uri',
help=async_batch_annotate_images_uri.__doc__)
batch_annotate__image_uri_parser.add_argument('uri')
batch_annotate__image_uri_parser.add_argument('output')
args = parser.parse_args()
if 'uri' in args.command:
if 'object-localization-uri' in args.command:
localize_objects_uri(args.uri)
elif 'handwritten-ocr-uri' in args.command:
detect_handwritten_ocr_uri(args.uri)
elif 'batch-annotate-files-uri' in args.command:
detect_batch_annotate_files_uri(args.uri)
elif 'batch-annotate-images-uri' in args.command:
async_batch_annotate_images_uri(args.uri, args.output)
else:
if 'object-localization' in args.command:
localize_objects(args.path)
elif 'handwritten-ocr' in args.command:
detect_handwritten_ocr(args.path)
elif 'batch-annotate-files' in args.command:
detect_batch_annotate_files(args.path)
|
py
|
1a5ae2e5a7bb313d7f3674571637ffbc91f9da0e
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from profiles_api import views
router = DefaultRouter()
router.register('hello-viewset', views.HelloViewSet, base_name='hello-viewset')
router.register('profile', views.UserProfileViewSet)
router.register('feed', views.UserProfileFeedViewSet)
urlpatterns = [
path('hello-view/', views.HelloApiView.as_view()),
path('login/', views.UserLoginApiView.as_view()),
path('', include(router.urls))
]
|
py
|
1a5ae307dff95b418f2b753690bae798fcb49a94
|
import os
from src.antlr_utils import parse
from src.grammar_cnf import GrammarCNF
import pytest
@pytest.mark.parametrize("grammar", [GrammarCNF.from_txt("dbql_grammar.txt")])
@pytest.mark.parametrize("test_input, expected", [
(
'''
connect "azat/home/db" ;
select edges
from query term("s")*|term("b")+.term("c")?;
''',
True
),
(
'''
select edges from name "sparsegraph" ;
''',
True
),
(
'''
connect "azat/home/db" ;
''',
True
),
(
'''
connect "azat/home/db" ;
select edges from name "sparsegraph_256.txt" ;
''',
True
),
(
'''
connect "azat/home/db" ;
select edges
from startAndFinal(set(1, 2, 3), set (4, 5, 6))
of name "sparsegraph" ;
''',
True
),
(
'''
connect "azat/home/db" ;
select edges
from startAndFinal(set(1, 2, 3), set (4, 5, 6)) of name "sparsegraph" ;
''',
True
),
(
'''
connect "azat/home/db" ;
select filter edges with
( u, l, v ) satisfies isStart(u) and isFinal(v)
from name "sparsegraph" ;
''',
True
),
(
'''
connect "azat/home/db" ;
select filter edges with
( u, l, v ) satisfies labelIs("ar") or (isStart(u) and isFinal(v))
from name "sparsegraph" ;
''',
True
),
(
'''
connect "azat/home/db" ;
select filter edges with
( u, l, v ) satisfies labelIs("ar") or (isStart(u) and isFinal(v))
from name "sparsegraph.txt" ;
''',
True
),
(
'''
connect "azat/home/db" ;
select edges
from query term("s")*|term("b")+.term("c")? ;
''',
True
),
(
'''
connect "azat/home/db" ;
select edges
from name "sparsegraph" intersect query term("a") alt term("b") ;
''',
True
),
# graph expression with multiple levels:
(
'''
connect "home/db" ;
select count edges
from startAndFinal(set(1, 2, 3), set(4, 5, 6))
of name "fullgraph" intersect query term("a") star concat term("b");
''',
True
),
(
'''
connect "home/db" ;
select count edges
from startAndFinal(range(1, 3), set(4, 5, 6))
of name "fullgraph" intersect query term("a") star concat term("b");
''',
True
),
# edge expressions with multiple levels:
(
'''
connect "azat/home/db" ;
select count filter edges
with ( u, e, v ) satisfies not isStart(u) and isFinal(v)
from name "worstcase" ;
''',
True
),
(
'''
connect "azat/home/db" ;
define
term("a").var("s").term("b").var("s")
as "s" ;
define
term("a").var("s1").term("b")
as "s1" ;
select edges
from name "sparsegraph256.txt" ;
''',
True
),
(
'''
connect "azat/home/db" ;
define
term("a").var("s").term("b").var("s")
as "s" ;
select edges
from name "sparsegraph"
intersect query term("a") | term("b");
''',
True
),
# the rest are False test cases ( when grammar shouldn't accept )
# mismatched brackets in pattern:
(
'''
connect "azat/home/db" ;
select edges
from term("a")*.(term("b")?.var("s")+ ;
''',
False
),
(
'''
connect "azat/home/db" ;
select edges
from query term("a"*.term("b")?.var("s")+ ;
''',
False
),
# wrong data type in range:
(
'''
connect "azat/home/db" ;
select edges
from startAndFinal ( range( "typo", 3 ), set(4, 5, 6) ) of name "sparsegraph" ;
''',
False
),
# wrong data type in set:
(
'''
connect "azat/home/db" ;
select edges
from startAndFinal ( range(1, 3 ), set(typo, 5, 6)) of name "sparsegraph" ;
''',
False
),
# not specified term or var in pattern:
(
'''
connect "azat/home/db" ;
select edges
from query "a" star alt "a" opt concat "c" plus ;
''',
False
),
])
# tests graph DB query language
def test_grammar_antlr(test_input, expected, grammar):
assert expected == parse(test_input)
|
py
|
1a5ae39677ca06641890ba6d14738e949079aac7
|
from functools import partial
import numpy as np
import paddle.v2 as paddle
import paddle.v2.fluid as fluid
import paddle.v2.fluid.layers as layers
from config import TrainTaskConfig, input_data_names, pos_enc_param_names
# FIXME(guosheng): Remove out the batch_size from the model.
batch_size = TrainTaskConfig.batch_size
def position_encoding_init(n_position, d_pos_vec):
"""
Generate the initial values for the sinusoid position encoding table.
"""
position_enc = np.array([[
pos / np.power(10000, 2 * (j // 2) / d_pos_vec)
for j in range(d_pos_vec)
] if pos != 0 else np.zeros(d_pos_vec) for pos in range(n_position)])
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # dim 2i
position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # dim 2i+1
return position_enc.astype("float32")
def multi_head_attention(queries,
keys,
values,
attn_bias,
d_key,
d_value,
d_model,
num_heads=1,
dropout_rate=0.):
"""
Multi-Head Attention. Note that attn_bias is added to the logit before
computing softmax activiation to mask certain selected positions so that
they will not considered in attention weights.
"""
if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3):
raise ValueError(
"Inputs: quries, keys and values should all be 3-D tensors.")
def __compute_qkv(queries, keys, values, num_heads, d_key, d_value):
"""
Add linear projection to queries, keys, and values.
"""
q = layers.fc(input=queries,
size=d_key * num_heads,
bias_attr=False,
num_flatten_dims=2)
k = layers.fc(input=keys,
size=d_key * num_heads,
bias_attr=False,
num_flatten_dims=2)
v = layers.fc(input=values,
size=d_value * num_heads,
bias_attr=False,
num_flatten_dims=2)
return q, k, v
def __split_heads(x, num_heads):
"""
Reshape the last dimension of inpunt tensor x so that it becomes two
dimensions and then transpose. Specifically, input a tensor with shape
[bs, max_sequence_length, num_heads * hidden_dim] then output a tensor
with shape [bs, num_heads, max_sequence_length, hidden_dim].
"""
if num_heads == 1:
return x
hidden_size = x.shape[-1]
# FIXME(guosheng): Decouple the program desc with batch_size.
reshaped = layers.reshape(
x=x, shape=[batch_size, -1, num_heads, hidden_size // num_heads])
# permuate the dimensions into:
# [batch_size, num_heads, max_sequence_len, hidden_size_per_head]
return layers.transpose(x=reshaped, perm=[0, 2, 1, 3])
def __combine_heads(x):
"""
Transpose and then reshape the last two dimensions of inpunt tensor x
so that it becomes one dimension, which is reverse to __split_heads.
"""
if len(x.shape) == 3: return x
if len(x.shape) != 4:
raise ValueError("Input(x) should be a 4-D Tensor.")
trans_x = layers.transpose(x, perm=[0, 2, 1, 3])
# FIXME(guosheng): Decouple the program desc with batch_size.
return layers.reshape(
x=trans_x,
shape=map(int,
[batch_size, -1, trans_x.shape[2] * trans_x.shape[3]]))
def scaled_dot_product_attention(q, k, v, attn_bias, d_key, dropout_rate):
"""
Scaled Dot-Product Attention
"""
# FIXME(guosheng): Optimize the shape in reshape_op or softmax_op.
# The current implementation of softmax_op only supports 2D tensor,
# consequently it cannot be directly used here.
# If to use the reshape_op, Besides, the shape of product inferred in
# compile-time is not the actual shape in run-time. It cann't be used
# to set the attribute of reshape_op.
# So, here define the softmax for temporary solution.
def __softmax(x, eps=1e-9):
exp_out = layers.exp(x=x)
sum_out = layers.reduce_sum(exp_out, dim=-1, keep_dim=False)
return layers.elementwise_div(x=exp_out, y=sum_out, axis=0)
scaled_q = layers.scale(x=q, scale=d_key**-0.5)
product = layers.matmul(x=scaled_q, y=k, transpose_y=True)
weights = __softmax(layers.elementwise_add(x=product, y=attn_bias))
if dropout_rate:
weights = layers.dropout(
weights, dropout_prob=dropout_rate, is_test=False)
out = layers.matmul(weights, v)
return out
q, k, v = __compute_qkv(queries, keys, values, num_heads, d_key, d_value)
q = __split_heads(q, num_heads)
k = __split_heads(k, num_heads)
v = __split_heads(v, num_heads)
ctx_multiheads = scaled_dot_product_attention(q, k, v, attn_bias, d_key,
dropout_rate)
out = __combine_heads(ctx_multiheads)
# Project back to the model size.
proj_out = layers.fc(input=out,
size=d_model,
bias_attr=False,
num_flatten_dims=2)
return proj_out
def positionwise_feed_forward(x, d_inner_hid, d_hid):
"""
Position-wise Feed-Forward Networks.
This module consists of two linear transformations with a ReLU activation
in between, which is applied to each position separately and identically.
"""
hidden = layers.fc(input=x,
size=d_inner_hid,
num_flatten_dims=2,
act="relu")
out = layers.fc(input=hidden, size=d_hid, num_flatten_dims=2)
return out
def pre_post_process_layer(prev_out, out, process_cmd, dropout=0.):
"""
Add residual connection, layer normalization and droput to the out tensor
optionally according to the value of process_cmd.
This will be used before or after multi-head attention and position-wise
feed-forward networks.
"""
for cmd in process_cmd:
if cmd == "a": # add residual connection
out = out + prev_out if prev_out else out
elif cmd == "n": # add layer normalization
out = layers.layer_norm(out, begin_norm_axis=len(out.shape) - 1)
elif cmd == "d": # add dropout
if dropout:
out = layers.dropout(out, dropout_prob=dropout, is_test=False)
return out
pre_process_layer = partial(pre_post_process_layer, None)
post_process_layer = pre_post_process_layer
def prepare_encoder(src_word,
src_pos,
src_vocab_size,
src_emb_dim,
src_pad_idx,
src_max_len,
dropout=0.,
pos_pad_idx=0,
pos_enc_param_name=None):
"""Add word embeddings and position encodings.
The output tensor has a shape of:
[batch_size, max_src_length_in_batch, d_model].
This module is used at the bottom of the encoder stacks.
"""
src_word_emb = layers.embedding(
src_word, size=[src_vocab_size, src_emb_dim], padding_idx=src_pad_idx)
src_pos_enc = layers.embedding(
src_pos,
size=[src_max_len, src_emb_dim],
padding_idx=pos_pad_idx,
param_attr=fluid.ParamAttr(
name=pos_enc_param_name, trainable=False))
enc_input = src_word_emb + src_pos_enc
# FIXME(guosheng): Decouple the program desc with batch_size.
enc_input = layers.reshape(x=enc_input, shape=[batch_size, -1, src_emb_dim])
return layers.dropout(
enc_input, dropout_prob=dropout,
is_test=False) if dropout else enc_input
prepare_encoder = partial(
prepare_encoder, pos_enc_param_name=pos_enc_param_names[0])
prepare_decoder = partial(
prepare_encoder, pos_enc_param_name=pos_enc_param_names[1])
def encoder_layer(enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate=0.):
"""The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and droput.
"""
attn_output = multi_head_attention(enc_input, enc_input, enc_input,
attn_bias, d_key, d_value, d_model,
n_head, dropout_rate)
attn_output = post_process_layer(enc_input, attn_output, "dan",
dropout_rate)
ffd_output = positionwise_feed_forward(attn_output, d_inner_hid, d_model)
return post_process_layer(attn_output, ffd_output, "dan", dropout_rate)
def encoder(enc_input,
attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate=0.):
"""
The encoder is composed of a stack of identical layers returned by calling
encoder_layer.
"""
for i in range(n_layer):
enc_output = encoder_layer(enc_input, attn_bias, n_head, d_key, d_value,
d_model, d_inner_hid, dropout_rate)
enc_input = enc_output
return enc_output
def decoder_layer(dec_input,
enc_output,
slf_attn_bias,
dec_enc_attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate=0.):
""" The layer to be stacked in decoder part.
The structure of this module is similar to that in the encoder part except
a multi-head attention is added to implement encoder-decoder attention.
"""
slf_attn_output = multi_head_attention(
dec_input,
dec_input,
dec_input,
slf_attn_bias,
d_key,
d_value,
d_model,
n_head,
dropout_rate, )
slf_attn_output = post_process_layer(
dec_input,
slf_attn_output,
"dan", # residual connection + dropout + layer normalization
dropout_rate, )
enc_attn_output = multi_head_attention(
slf_attn_output,
enc_output,
enc_output,
dec_enc_attn_bias,
d_key,
d_value,
d_model,
n_head,
dropout_rate, )
enc_attn_output = post_process_layer(
slf_attn_output,
enc_attn_output,
"dan", # residual connection + dropout + layer normalization
dropout_rate, )
ffd_output = positionwise_feed_forward(
enc_attn_output,
d_inner_hid,
d_model, )
dec_output = post_process_layer(
enc_attn_output,
ffd_output,
"dan", # residual connection + dropout + layer normalization
dropout_rate, )
return dec_output
def decoder(dec_input,
enc_output,
dec_slf_attn_bias,
dec_enc_attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate=0.):
"""
The decoder is composed of a stack of identical decoder_layer layers.
"""
for i in range(n_layer):
dec_output = decoder_layer(
dec_input,
enc_output,
dec_slf_attn_bias,
dec_enc_attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate, )
dec_input = dec_output
return dec_output
def transformer(
src_vocab_size,
trg_vocab_size,
max_length,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate,
src_pad_idx,
trg_pad_idx,
pos_pad_idx, ):
# The shapes here act as placeholder.
# The shapes set here is to pass the infer-shape in compile time. The actual
# shape of src_word in run time is:
# [batch_size * max_src_length_in_a_batch, 1].
src_word = layers.data(
name=input_data_names[0],
shape=[batch_size * max_length, 1],
dtype="int64",
append_batch_size=False)
# The actual shape of src_pos in runtime is:
# [batch_size * max_src_length_in_a_batch, 1].
src_pos = layers.data(
name=input_data_names[1],
shape=[batch_size * max_length, 1],
dtype="int64",
append_batch_size=False)
# The actual shape of trg_word is in runtime is:
# [batch_size * max_trg_length_in_a_batch, 1].
trg_word = layers.data(
name=input_data_names[2],
shape=[batch_size * max_length, 1],
dtype="int64",
append_batch_size=False)
# The actual shape of trg_pos in runtime is:
# [batch_size * max_trg_length_in_a_batch, 1].
trg_pos = layers.data(
name=input_data_names[3],
shape=[batch_size * max_length, 1],
dtype="int64",
append_batch_size=False)
# The actual shape of src_slf_attn_bias in runtime is:
# [batch_size, n_head, max_src_length_in_a_batch, max_src_length_in_a_batch].
# This input is used to remove attention weights on paddings.
src_slf_attn_bias = layers.data(
name=input_data_names[4],
shape=[batch_size, n_head, max_length, max_length],
dtype="float32",
append_batch_size=False)
# The actual shape of trg_slf_attn_bias in runtime is:
# [batch_size, n_head, max_trg_length_in_batch, max_trg_length_in_batch].
# This is used to remove attention weights on paddings and subsequent words.
trg_slf_attn_bias = layers.data(
name=input_data_names[5],
shape=[batch_size, n_head, max_length, max_length],
dtype="float32",
append_batch_size=False)
# The actual shape of trg_src_attn_bias in runtime is:
# [batch_size, n_head, max_trg_length_in_batch, max_src_length_in_batch].
# This is used to remove attention weights on paddings.
trg_src_attn_bias = layers.data(
name=input_data_names[6],
shape=[batch_size, n_head, max_length, max_length],
dtype="float32",
append_batch_size=False)
enc_input = prepare_encoder(
src_word,
src_pos,
src_vocab_size,
d_model,
src_pad_idx,
max_length,
dropout_rate, )
enc_output = encoder(
enc_input,
src_slf_attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate, )
dec_input = prepare_decoder(
trg_word,
trg_pos,
trg_vocab_size,
d_model,
trg_pad_idx,
max_length,
dropout_rate, )
dec_output = decoder(
dec_input,
enc_output,
trg_slf_attn_bias,
trg_src_attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
dropout_rate, )
# TODO(guosheng): Share the weight matrix between the embedding layers and
# the pre-softmax linear transformation.
predict = layers.reshape(
x=layers.fc(input=dec_output,
size=trg_vocab_size,
bias_attr=False,
num_flatten_dims=2),
shape=[-1, trg_vocab_size],
act="softmax")
# The actual shape of gold in runtime is:
# [batch_size * max_trg_length_in_a_batch, 1].
gold = layers.data(
name=input_data_names[7],
shape=[batch_size * max_length, 1],
dtype="int64",
append_batch_size=False)
cost = layers.cross_entropy(input=predict, label=gold)
return layers.mean(x=cost)
|
py
|
1a5ae3f6858f5f6af89f9b23c9c518f0c7c1ded3
|
#!/usr/bin/env python
"""beanstalkc3 - A beanstalkd Client Library for Python"""
import os
import logging
import socket
import sys
__license__ = '''
Copyright (C) 2008-2016 Andreas Bolka
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
__version__ = '0.1.0'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 11300
DEFAULT_PRIORITY = 2 ** 31
DEFAULT_TTR = 120
DEFAULT_TUBE_NAME = 'default'
class BeanstalkcException(Exception): pass
class UnexpectedResponse(BeanstalkcException): pass
class CommandFailed(BeanstalkcException): pass
class DeadlineSoon(BeanstalkcException): pass
class SocketError(BeanstalkcException):
@staticmethod
def wrap(wrapped_function, *args, **kwargs):
try:
return wrapped_function(*args, **kwargs)
except socket.error:
err = sys.exc_info()[1]
raise SocketError(err)
class Connection(object):
def __init__(self, host=DEFAULT_HOST, port=DEFAULT_PORT, parse_yaml=True,
connect_timeout=socket.getdefaulttimeout()):
if parse_yaml is True:
try:
parse_yaml = __import__('yaml').load
except ImportError:
logging.error('Failed to load PyYAML, will not parse YAML')
parse_yaml = False
self._connect_timeout = connect_timeout
self._parse_yaml = parse_yaml or (lambda x: x)
self.host = host
self.port = port
self.connect()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def connect(self):
"""Connect to beanstalkd server."""
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.settimeout(self._connect_timeout)
SocketError.wrap(self._socket.connect, (self.host, self.port))
self._socket.settimeout(None)
self._socket_file = self._socket.makefile('rb')
def close(self):
"""Close connection to server."""
try:
self._socket.sendall(b'quit\r\n')
except socket.error:
pass
try:
self._socket.close()
except socket.error:
pass
def reconnect(self):
"""Re-connect to server."""
self.close()
self.connect()
def _interact(self, command, expected_ok, expected_err=[]):
assert isinstance(command, bytes), 'command must be a bytes instance'
SocketError.wrap(self._socket.sendall, command)
status, results = self._read_response()
if status in expected_ok:
return results
elif status in expected_err:
raise CommandFailed(command.split()[0], status, results)
else:
raise UnexpectedResponse(command.split()[0], status, results)
def _read_response(self):
line = SocketError.wrap(self._socket_file.readline)
if not line:
raise SocketError()
response = line.split()
return response[0], response[1:]
def _read_body(self, size):
body = SocketError.wrap(self._socket_file.read, size)
SocketError.wrap(self._socket_file.read, 2) # trailing crlf
if size > 0 and not body:
raise SocketError()
return body
def _interact_value(self, command, expected_ok, expected_err=[]):
return self._interact(command, expected_ok, expected_err)[0]
def _interact_job(self, command, expected_ok, expected_err, reserved=True):
jid, size = self._interact(command, expected_ok, expected_err)
body = self._read_body(int(size))
return Job(self, int(jid), body, reserved)
def _interact_yaml(self, command, expected_ok, expected_err=[]):
size, = self._interact(command, expected_ok, expected_err)
body = self._read_body(int(size))
return self._parse_yaml(body)
def _interact_peek(self, command):
try:
return self._interact_job(command, [b'FOUND'], [b'NOT_FOUND'], False)
except CommandFailed:
return None
# -- public interface --
def put(self, body, priority=DEFAULT_PRIORITY, delay=0, ttr=DEFAULT_TTR):
"""Put a job into the current tube. Returns job id."""
assert isinstance(body, bytes), 'Job body must be a bytes instance'
jid = self._interact_value(b'put %d %d %d %d\r\n%s\r\n' % (
priority, delay, ttr, len(body), body),
[b'INSERTED'],
[b'JOB_TOO_BIG', b'BURIED', b'DRAINING'])
return int(jid)
def reserve(self, timeout=None):
"""Reserve a job from one of the watched tubes, with optional timeout
in seconds. Returns a Job object, or None if the request times out."""
if timeout is not None:
command = b'reserve-with-timeout %d\r\n' % timeout
else:
command = b'reserve\r\n'
try:
return self._interact_job(command,
[b'RESERVED'],
[b'DEADLINE_SOON', b'TIMED_OUT'])
except CommandFailed:
exc = sys.exc_info()[1]
_, status, results = exc.args
if status == b'TIMED_OUT':
return None
elif status == b'DEADLINE_SOON':
raise DeadlineSoon(results)
def kick(self, bound=1):
"""Kick at most bound jobs into the ready queue."""
return int(self._interact_value(b'kick %d\r\n' % bound, [b'KICKED']))
def kick_job(self, jid):
"""Kick a specific job into the ready queue."""
self._interact(b'kick-job %d\r\n' % jid, [b'KICKED'], [b'NOT_FOUND'])
def peek(self, jid):
"""Peek at a job. Returns a Job, or None."""
return self._interact_peek(b'peek %d\r\n' % jid)
def peek_ready(self):
"""Peek at next ready job. Returns a Job, or None."""
return self._interact_peek(b'peek-ready\r\n')
def peek_delayed(self):
"""Peek at next delayed job. Returns a Job, or None."""
return self._interact_peek(b'peek-delayed\r\n')
def peek_buried(self):
"""Peek at next buried job. Returns a Job, or None."""
return self._interact_peek(b'peek-buried\r\n')
def tubes(self):
"""Return a list of all existing tubes."""
return self._interact_yaml(b'list-tubes\r\n', [b'OK'])
def using(self):
"""Return the tube currently being used."""
return self._interact_value(b'list-tube-used\r\n', [b'USING'])
def use(self, name):
"""Use a given tube."""
return self._interact_value(b'use %s\r\n' % name, [b'USING'])
def watching(self):
"""Return a list of all tubes being watched."""
return self._interact_yaml(b'list-tubes-watched\r\n', [b'OK'])
def watch(self, name):
"""Watch a given tube."""
return int(self._interact_value(b'watch %s\r\n' % name, [b'WATCHING']))
def ignore(self, name):
"""Stop watching a given tube."""
try:
return int(self._interact_value(b'ignore %s\r\n' % name,
[b'WATCHING'],
[b'NOT_IGNORED']))
except CommandFailed:
# Tried to ignore the only tube in the watchlist, which failed.
return 0
def stats(self):
"""Return a dict of beanstalkd statistics."""
return self._interact_yaml(b'stats\r\n', [b'OK'])
def stats_tube(self, name):
"""Return a dict of stats about a given tube."""
return self._interact_yaml(b'stats-tube %s\r\n' % name,
[b'OK'],
[b'NOT_FOUND'])
def pause_tube(self, name, delay):
"""Pause a tube for a given delay time, in seconds."""
self._interact(b'pause-tube %s %d\r\n' % (name, delay),
[b'PAUSED'],
[b'NOT_FOUND'])
# -- job interactors --
def delete(self, jid):
"""Delete a job, by job id."""
self._interact(b'delete %d\r\n' % jid, [b'DELETED'], [b'NOT_FOUND'])
def release(self, jid, priority=DEFAULT_PRIORITY, delay=0):
"""Release a reserved job back into the ready queue."""
self._interact(b'release %d %d %d\r\n' % (jid, priority, delay),
[b'RELEASED', 'BURIED'],
[b'NOT_FOUND'])
def bury(self, jid, priority=DEFAULT_PRIORITY):
"""Bury a job, by job id."""
self._interact(b'bury %d %d\r\n' % (jid, priority),
[b'BURIED'],
[b'NOT_FOUND'])
def touch(self, jid):
"""Touch a job, by job id, requesting more time to work on a reserved
job before it expires."""
self._interact(b'touch %d\r\n' % jid, [b'TOUCHED'], [b'NOT_FOUND'])
def stats_job(self, jid):
"""Return a dict of stats about a job, by job id."""
return self._interact_yaml(b'stats-job %d\r\n' % jid,
[b'OK'],
[b'NOT_FOUND'])
class Job(object):
def __init__(self, conn, jid, body, reserved=True):
self.conn = conn
self.jid = jid
self.body = body
self.reserved = reserved
def _priority(self):
stats = self.stats()
if isinstance(stats, dict):
return stats['pri']
return DEFAULT_PRIORITY
# -- public interface --
def delete(self):
"""Delete this job."""
self.conn.delete(self.jid)
self.reserved = False
def release(self, priority=None, delay=0):
"""Release this job back into the ready queue."""
if self.reserved:
self.conn.release(self.jid, priority or self._priority(), delay)
self.reserved = False
def bury(self, priority=None):
"""Bury this job."""
if self.reserved:
self.conn.bury(self.jid, priority or self._priority())
self.reserved = False
def kick(self):
"""Kick this job alive."""
self.conn.kick_job(self.jid)
def touch(self):
"""Touch this reserved job, requesting more time to work on it before
it expires."""
if self.reserved:
self.conn.touch(self.jid)
def stats(self):
"""Return a dict of stats about this job."""
return self.conn.stats_job(self.jid)
if __name__ == '__main__':
import nose
nose.main(argv=['nosetests', '-c', '.nose.cfg', '-l', 'DEBUG', '--debug-log', '/tmp/log_nose.log'])
#conn = Connection(host=b'localhost', port=11300)
#print(u'conn', conn)
#tubes = conn.tubes()
#print(tubes)
#conn.put(b'hello');
#job = conn.reserve()
#print(job)
#print(job.body)
#job.delete()
|
py
|
1a5ae42e5d3817a71ec9b75e3cb77e59db9db256
|
from random import randint
from epidemic_simulation.simulation import SimulationManager
import pytest
@pytest.fixture
def test_data():
test_bodies=[{'position': (748, 634), 'state': 'INFECTIOUS'}, {'position': (1137, 351), 'state': 'SUSCEPTIBLE'}, {'position': (1017, 464), 'state': 'INFECTIOUS'}, {'position': (901, 368), 'state': 'INFECTIOUS'}, {'position': (1227, 549), 'state': 'REMOVED'}, {'position': (1193, 194), 'state': 'REMOVED'}, {'position': (654, 165), 'state': 'SUSCEPTIBLE'}, {'position': (1212, 260), 'state': 'INFECTIOUS'}, {'position': (820, 198), 'state': 'SUSCEPTIBLE'}, {'position': (826, 480), 'state': 'INFECTIOUS'}, {'position': (955, 58), 'state': 'REMOVED'}, {'position': (914, 78), 'state': 'INFECTIOUS'}, {'position': (1239, 86), 'state': 'SUSCEPTIBLE'}, {'position': (1132, 532), 'state': 'SUSCEPTIBLE'}, {'position': (1042, 41), 'state': 'REMOVED'}, {'position': (713, 590), 'state': 'SUSCEPTIBLE'}, {'position': (1169, 572), 'state': 'REMOVED'}, {'position': (778, 70), 'state': 'SUSCEPTIBLE'}, {'position': (906, 554), 'state': 'SUSCEPTIBLE'}, {'position': (797, 598), 'state': 'INFECTIOUS'}]
test_calc=SimulationManager(test_bodies,{'infection_r':100,'infection_p':0.99,'sickness_duration':6})
return test_calc
def test_infect_susceptibles(test_data):
SUS_bodies_pre_function = test_data.susceptibles
test_data.calculate_subjects_to_change()
to_change_bodies = test_data.subjects_to_change
test_data.infect_susceptibles()
SUS_bodies_post_function=[body for body in test_data.subjects if body['state']=='SUSCEPTIBLE']
assert len(SUS_bodies_pre_function)-len(to_change_bodies)==len(SUS_bodies_post_function)
|
py
|
1a5ae442eb9a041256317a660ed22a3e67b9a6a8
|
# encoding: UTF-8
__author__ = 'CHENXY'
# C++和python类型的映射字典
type_dict = {
'int': 'int',
'char': 'string',
'double': 'float',
'short': 'int'
}
def process_line(line):
"""处理每行"""
if '///' in line: # 注释
py_line = process_comment(line)
elif 'typedef' in line: # 类型申明
py_line = process_typedef(line)
elif '#define' in line: # 定义常量
py_line = process_define(line)
elif line == '\n': # 空行
py_line = line
else:
py_line = ''
return py_line
def process_comment(line):
"""处理注释"""
# if line[3] == '/':
# py_line = ''
# else:
# py_line = '#' + line[3:]
py_line = '#' + line[3:]
return py_line
def process_typedef(line):
"""处理类型申明"""
content = line.split(' ')
type_ = type_dict[content[1]]
keyword = content[2]
if '[' in keyword:
i = keyword.index('[')
keyword = keyword[:i]
else:
keyword = keyword.replace(';\n', '') # 删除行末分号
py_line = 'typedefDict["%s"] = "%s"\n' % (keyword, type_)
return py_line
def process_define(line):
"""处理定义常量"""
content = line.split(' ')
constant = content[1]
if len(content)>2:
value = content[-1]
py_line = 'defineDict["%s"] = %s' % (constant, value)
else:
py_line = ''
return py_line
def main():
"""主函数"""
try:
fcpp = open('USTPFtdcUserApiDataType.h','r')
fpy = open('femas_data_type.py', 'w')
fpy.write('# encoding: UTF-8\n')
fpy.write('\n')
fpy.write('defineDict = {}\n')
fpy.write('typedefDict = {}\n')
fpy.write('\n')
for line in fcpp:
py_line = process_line(line)
if py_line:
fpy.write(py_line.decode('gbk').encode('utf-8'))
fcpp.close()
fpy.close()
print('data_type.py生成过程完成')
except:
print('data_type.py生成过程出错')
if __name__ == '__main__':
main()
|
py
|
1a5ae4596e0dc08c39663f7b532324d1f61ef32d
|
import unittest
from programy.clients.events.console.config import ConsoleConfiguration
from programy.config.file.yaml_file import YamlConfigurationFile
from programy.storage.stores.file.config import FileStoreConfiguration
class FileStoreConfigurationTests(unittest.TestCase):
def test_with_files_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
files:
sets:
dirs: $BOT_ROOT/sets
extension: .txt
subdirs: false
format: text
encoding: utf-8
delete_on_start: true
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
self.assertIsNotNone(brain_config)
files_config = yaml.get_section("files", brain_config)
self.assertIsNotNone(files_config)
sets_config = FileStoreConfiguration("sets")
sets_config.load_config_section(yaml, files_config, ".")
self.assertFalse(sets_config.has_single_file())
self.assertTrue(sets_config.has_multiple_dirs())
self.assertEqual(["./sets"], sets_config.dirs)
self.assertEqual(".txt", sets_config.extension)
self.assertFalse(sets_config.subdirs)
self.assertEqual("utf-8", sets_config.encoding)
self.assertEqual("text", sets_config.format)
self.assertTrue(sets_config.delete_on_start)
def test_with_file_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
files:
sets:
file: $BOT_ROOT/sets/test.txt
format: text
encoding: utf-8
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
self.assertIsNotNone(brain_config)
files_config = yaml.get_section("files", brain_config)
self.assertIsNotNone(files_config)
sets_config = FileStoreConfiguration("sets")
sets_config.load_config_section(yaml, files_config, ".")
self.assertTrue(sets_config.has_single_file())
self.assertFalse(sets_config.has_multiple_dirs())
self.assertEqual(["./sets/test.txt"], sets_config.dirs)
self.assertEqual("text", sets_config.format)
self.assertEqual("utf-8", sets_config.encoding)
def test_with_file_no_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
files:
sets:
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
self.assertIsNotNone(brain_config)
files_config = yaml.get_section("files", brain_config)
self.assertIsNotNone(files_config)
sets_config = FileStoreConfiguration("sets")
sets_config.load_config_section(yaml, files_config, ".")
self.assertFalse(sets_config.has_single_file())
self.assertTrue(sets_config.has_multiple_dirs())
self.assertIsNone(sets_config.dirs)
self.assertIsNone(sets_config.format)
self.assertIsNone(sets_config.encoding)
def test_with_file_no_config_no_data(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
files:
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
self.assertIsNotNone(brain_config)
files_config = yaml.get_section("files", brain_config)
self.assertIsNone(files_config)
def test_to_yaml_defaults(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
files:
sets:
dirs: $BOT_ROOT/sets
extension: .txt
subdirs: false
format: text
encoding: utf-8
delete_on_start: true
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
self.assertIsNotNone(brain_config)
files_config = yaml.get_section("files", brain_config)
self.assertIsNotNone(files_config)
sets_config = FileStoreConfiguration("sets")
sets_config.load_config_section(yaml, files_config, ".")
data = {}
sets_config.to_yaml(data, True)
self.assertFalse(data['delete_on_start'])
self.assertEqual(data['dirs'], './storage/sets')
self.assertIsNone(data['encoding'])
self.assertEqual(data['extension'], '.txt')
self.assertIsNone(data['format'])
self.assertFalse(data['subdirs'])
def test_to_yaml_no_defaults(self):
yaml = YamlConfigurationFile()
self.assertIsNotNone(yaml)
yaml.load_from_text("""
brain:
files:
sets:
dirs: $BOT_ROOT/sets
extension: .txt
subdirs: false
format: text
encoding: utf-8
delete_on_start: true
""", ConsoleConfiguration(), ".")
brain_config = yaml.get_section("brain")
self.assertIsNotNone(brain_config)
files_config = yaml.get_section("files", brain_config)
self.assertIsNotNone(files_config)
sets_config = FileStoreConfiguration("sets")
sets_config.load_config_section(yaml, files_config, ".")
data = {}
sets_config.to_yaml(data, False)
self.assertTrue(data['delete_on_start'])
self.assertEqual(data['dirs'], ['./sets'])
self.assertEqual(data['encoding'], 'utf-8')
self.assertEqual(data['extension'], '.txt')
self.assertEqual(data['format'], 'text')
self.assertFalse(data['subdirs'])
|
py
|
1a5ae4e60032aaf036dde718182741e59c943819
|
import os
import pefile
import hashlib
import pickle
import time
import pandas as pd
from config import settings as cnst
from collections import OrderedDict
from utils import embedder
all_sections = OrderedDict({".header": 0})
def raw_pe_to_pkl(path, is_benign, unprocessed, processed):
list_idx = []
for src_dir, dirs, files in os.walk(path):
for file_ in files:
file_data = {}
try:
src_file = os.path.join(src_dir, file_)
src_file_size = os.stat(src_file).st_size
if src_file_size > cnst.MAX_FILE_SIZE_LIMIT:
print("Skipping as file size exceeds ", cnst.MAX_FILE_SIZE_LIMIT, "[ Unprocessed / Skipped Count: "+str(unprocessed)+"]")
unprocessed += 1
continue
else:
file_data["size_byte"] = src_file_size
pe = pefile.PE(src_file)
pe_name = "pe_" + str(processed) + ".pkl"
with open(src_file, 'rb') as fhandle:
file_byte_data = fhandle.read()
fid = [pe_name
, 0 if is_benign else 1
, file_
, hashlib.md5(file_byte_data).hexdigest()
, hashlib.sha1(file_byte_data).hexdigest()
, hashlib.sha256(file_byte_data).hexdigest()]
file_data["whole_bytes"] = list(file_byte_data)
wb_size = len(file_data["whole_bytes"])
file_data["whole_bytes_size"] = wb_size
file_data["benign"] = is_benign
# file_data["num_of_sections"] = pe.FILE_HEADER.NumberOfSections
file_data["section_info"] = {}
for section in pe.sections:
section_name = section.Name.strip(b'\x00').decode("utf-8").strip()
section_data = {}
section_data["section_data"] = list(section.get_data())
section_data["section_size_byte"] = section.SizeOfRawData
section_data["section_bounds"] = {}
section_data["section_bounds"]["start_offset"] = section.PointerToRawData
section_data["section_bounds"]["end_offset"] = section.PointerToRawData + section.SizeOfRawData - 1
file_data["section_info"][section_name] = section_data
file_data["section_info"][".header"] = {
"section_data": list(pe.header),
"section_size_byte": len(pe.header),
"section_bounds": {
"start_offset": 0,
"end_offset": len(pe.header)
}}
t1_pkl = {"whole_bytes": file_data["whole_bytes"], "benign": file_data["benign"]}
sections_end = 0
keys = file_data["section_info"].keys()
for key in keys:
if file_data["section_info"][key]['section_bounds']["end_offset"] > sections_end:
sections_end = file_data["section_info"][key]['section_bounds']["end_offset"]
if sections_end <= 0:
print("[OVERLAY DATA NOT ADDED] Invalid section end found - ", sections_end)
elif sections_end < wb_size - 1:
data = file_data["whole_bytes"][sections_end + 1:wb_size]
section_data = dict()
section_data["section_data"] = data
section_data["section_size_byte"] = len(data)
# section_bounds
section_data["section_bounds"] = {}
section_data["section_bounds"]["start_offset"] = sections_end + 1
section_data["section_bounds"]["end_offset"] = wb_size - 1
file_data["section_info"][cnst.TAIL] = section_data
del file_data["whole_bytes"]
t2_pkl = file_data
with open(t1_dst_folder + pe_name, "wb") as t1handle:
pickle.dump(t1_pkl, t1handle)
with open(t2_dst_folder + pe_name, "wb") as t2handle:
pickle.dump(t2_pkl, t2handle)
list_idx.append(fid)
processed += 1
for section in file_data["section_info"].keys():
if section in all_sections:
all_sections[section] += 1
else:
all_sections[section] = 1
all_sections['.header'] += 1
print("Total Count:", processed, "Unprocessed/Skipped:", unprocessed)
# Test saved data
# with open(pkl_file, "rb") as pkl:
# print(pickle.load(pkl)["num_of_sections"])
except Exception as e:
unprocessed += 1
print("parse failed . . . [ Unprocessed #:", str(unprocessed), "] [ ERROR: " + str(e) + " ] [ FILE: ", src_file, "] ")
if processed % 1000 == 0:
print("# files processed:", processed)
pd.DataFrame(list_idx).to_csv(cnst.DATASET_BACKUP_FILE, index=False, header=None, mode='a')
return unprocessed, processed
if __name__ == '__main__':
total_processed = 0
total_unprocessed = 0
start_time = time.time()
t1_dst_folder = cnst.PKL_SOURCE_PATH + "t1" + cnst.ESC
t2_dst_folder = cnst.PKL_SOURCE_PATH + "t2" + cnst.ESC
if not os.path.exists(t1_dst_folder):
os.makedirs(t1_dst_folder)
if not os.path.exists(t2_dst_folder):
os.makedirs(t2_dst_folder)
if os.path.exists(cnst.DATASET_BACKUP_FILE):
os.remove(cnst.DATASET_BACKUP_FILE)
for dir in cnst.RAW_SAMPLE_DIRS.keys():
total_unprocessed, total_processed = raw_pe_to_pkl(dir, cnst.RAW_SAMPLE_DIRS[dir], total_unprocessed, total_processed)
end_time = time.time()
print("\nData collection completed for all given paths.")
print("\nTotal:", total_processed+total_unprocessed, "\tprocessed: ", total_processed, "unprocessed:", total_unprocessed)
print("Time elapsed: {0:.3f}".format((end_time - start_time) / 60), "minute(s)")
# collect list of available sections from pkl and store mapping to their embedding
pd.DataFrame.from_dict([all_sections.keys()]).to_csv(cnst.PKL_SOURCE_PATH + cnst.ESC + 'available_sections.csv', index=False, header=None)
embedder.embed_section_names()
|
py
|
1a5ae582b25df8b17e1fbe371aa6a002821abde7
|
# MIT License
#
# Copyright (c) 2015-2021 Iakiv Kramarenko
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
from selene.core.exceptions import TimeoutException
from tests.integration.helpers.givenpage import GivenPage
def test_waits_for_visibility(session_browser):
page = GivenPage(session_browser.driver)
page.opened_with_body(
'''
<a href="#second" style="display:none">go to Heading 2</a>
<h2 id="second">Heading 2</h2>'''
).execute_script_with_timeout(
'document.getElementsByTagName("a")[0].style = "display:block";', 500
)
session_browser.all('a')[0].click()
assert "second" in session_browser.driver.current_url
def test_waits_for_present_in_dom_and_visibility(session_browser):
page = GivenPage(session_browser.driver)
page.opened_with_body(
'''
<h2 id="second">Heading 2</h2>'''
)
page.load_body_with_timeout(
'''
<a href="#second">go to Heading 2</a>
<h2 id="second">Heading 2</h2>''',
500,
)
session_browser.all('a')[0].click()
assert "second" in session_browser.driver.current_url
def test_waits_first_for_present_in_dom_then_visibility(session_browser):
page = GivenPage(session_browser.driver)
page.opened_with_body(
'''
<h2 id="second">Heading 2</h2>'''
)
page.load_body_with_timeout(
'''
<a href="#second" style="display:none">go to Heading 2</a>
<h2 id="second">Heading 2</h2>''',
250,
).execute_script_with_timeout(
'document.getElementsByTagName("a")[0].style = "display:block";', 500
)
session_browser.all('a')[0].click()
assert "second" in session_browser.driver.current_url
def test_fails_on_timeout_during_waiting_for_visibility(session_browser):
browser = session_browser.with_(timeout=0.25)
page = GivenPage(browser.driver)
page.opened_with_body(
'''
<a href='#second' style='display:none'>go to Heading 2</a>
<h2 id='second'>Heading 2</h2>'''
).execute_script_with_timeout(
'document.getElementsByTagName("a")[0].style = "display:block";', 500
)
with pytest.raises(TimeoutException):
browser.all('a')[0].click()
assert "second" not in session_browser.driver.current_url
def test_fails_on_timeout_during_waits_for_present_in_dom_and_visibility(
session_browser,
):
browser = session_browser.with_(timeout=0.25)
page = GivenPage(browser.driver)
page.opened_with_body(
'''
<h2 id="second">Heading 2</h2>'''
)
page.load_body_with_timeout(
'''
<a href="#second">go to Heading 2</a>
<h2 id="second">Heading 2</h2>''',
500,
)
with pytest.raises(TimeoutException):
browser.all('a')[0].click()
assert "second" not in session_browser.driver.current_url
def test_fails_on_timeout_during_waits_first_for_present_in_dom_then_visibility(
session_browser,
):
browser = session_browser.with_(timeout=0.25)
page = GivenPage(browser.driver)
page.opened_with_body(
'''
<h2 id="second">Heading 2</h2>'''
)
page.load_body_with_timeout(
'''
<a href="#second" style="display:none">go to Heading 2</a>
<h2 id="second">Heading 2</h2>''',
250,
).execute_script_with_timeout(
'document.getElementsByTagName("a")[0].style = "display:block";', 500
)
with pytest.raises(TimeoutException):
browser.all('a')[0].click()
assert "second" not in session_browser.driver.current_url
|
py
|
1a5ae706cbc44215f2cefd31e5720eddeef72faa
|
class Solution:
def mostCompetitive(self, nums: List[int], k: int) -> List[int]:
St = []
remove = len(nums) - k
for num in nums:
while St and num < St[-1] and remove > 0:
St.pop()
remove -= 1
St.append(num)
return St[:len(St) - remove]
|
py
|
1a5ae83c36142232567c150d3c526b98f8f6ed32
|
from cpc import CPCStateMachine as CPCwithTG
from cpc import CPCStateMachineL4 as CPCwithTGL4
from cic.states import CICStateMachineLvl2 as CICwithCG
from cic.states import CICStateMachineLvl4 as CICwithCGL4
from cic.states import CICStateMachineLvl1 as CICwithCGL1
from mp.state_machines import MPStateMachine as MPwithPG
from residual_learning import residual_state_machines as rsm
from cic import parameters_new_grasp as cic_parameters_new_grasp
from mp import states, base_policies
from cpc import parameters as cpc_params
from combined_code import mix_and_match as mm
state_machines = {
'mp-pg-l1': MPwithPG,
'mp-pg-l2': MPwithPG,
'mp-pg-l3': MPwithPG,
'mp-pg-l4': MPwithPG,
'cic-cg-l1': CICwithCGL1,
'cic-cg-l2': CICwithCG,
'cic-cg-l3': CICwithCG,
'cic-cg-l4': CICwithCGL4,
'cpc-tg-l1': CPCwithTG,
'cpc-tg-l2': CPCwithTG,
'cpc-tg-l3': CPCwithTG,
'cpc-tg-l4': CPCwithTGL4,
'residual-mp-pg-l3': rsm.ResidualMP_with_PG_LVL3,
'residual-mp-pg-l4': rsm.ResidualMP_with_PG_LVL4,
'residual-cic-cg-l3': rsm.ResidualCIC_with_CG_LVL3,
'residual-cic-cg-l4': rsm.ResidualCIC_with_CG_LVL4,
'residual-cpc-tg-l3': rsm.ResidualCPC_with_TG_LVL3,
'residual-cpc-tg-l4': rsm.ResidualCPC_with_TG_LVL4,
'mp-cg-l4': mm.MPwithCG,
'mp-tg-l4': mm.MPwithTG,
'cic-pg-l4': mm.CICwithPG,
'cic-tg-l4': mm.CICwithTG,
'cpc-pg-l4': mm.CPCwithPG,
'cpc-cg-l4': mm.CPCwithCG,
}
def create_state_machine(difficulty, method, env, residual=False, bo=False):
if residual:
if method not in ['mp-pg', 'cic-cg', 'cpc-tg'] and difficulty in [1, 2]:
raise ValueError("Residual policies are only available for methods "
"'mp-pg', 'cic-cg', 'cpc-tg' and difficulties 3 and 4."
f"Method: {method}, difficulty: {difficulty}.")
if bo:
if method not in ['mp-pg', 'cic-cg', 'cpc-tg'] and difficulty in [1, 2]:
raise ValueError("BO optimized parameters are only available for methods "
"'mp-pg', 'cic-cg', 'cpc-tg' and difficulties 3 and 4."
f"Method: {method}, difficulty: {difficulty}.")
if method not in ['mp-pg', 'cic-cg', 'cpc-tg'] and difficulty != 4:
raise ValueError(f'{method} is only implemented for difficulty 4.')
id = method + f'-l{difficulty}'
if residual:
id = 'residual-' + id
if id not in state_machines:
raise ValueError(
f"Unknown method: {method}. Options are: "
"mp-pg, cic-cg, cpc-tg, mp-cg, mp-tg, cic-pg, cic-tg, cpc-pg, cpc-cg."
)
if bo:
return create_bo_state_machine(id, env, difficulty)
else:
return state_machines[id](env)
def create_bo_state_machine(id, env, difficulty):
if 'mp-pg' in id:
return mp_bo_wrapper(id, env, difficulty)
elif 'cic-cg' in id:
return cic_bo_wrapper(id, env, difficulty)
else:
return cpc_bo_wrapper(id, env, difficulty)
def mp_bo_wrapper(id, env, difficulty):
if difficulty == 3:
if (env.simulation):
states.MoveToGoalState.BO_action_repeat = 10
base_policies.PlanningAndForceControlPolicy.BO_num_tipadjust_steps = 184
else:
states.MoveToGoalState.BO_action_repeat = 26 # 12 # (int) [1, 100], default: 12
base_policies.PlanningAndForceControlPolicy.BO_num_tipadjust_steps = 63 # 50 # (int) [10, 200], default: 50
elif difficulty == 4:
if (env.simulation):
states.MoveToGoalState.BO_action_repeat = 13
base_policies.PlanningAndForceControlPolicy.BO_num_tipadjust_steps = 161
else:
states.MoveToGoalState.BO_action_repeat = 29 # 12 # (int) [1, 100], default: 12
base_policies.PlanningAndForceControlPolicy.BO_num_tipadjust_steps = 182 # 50 # (int) [10, 200], default: 50
return state_machines[id](env)
def cic_bo_wrapper(id, env, difficulty):
if difficulty == 3:
parameters = cic_parameters_new_grasp.CubeLvl2Params(env)
elif difficulty == 4:
parameters = cic_parameters_new_grasp.CubeLvl4Params(env)
if (env.simulation):
parameters.orient_grasp_xy_lift = -0.01932485358
parameters.orient_grasp_h_lift = 0.0167107629776001
parameters.orient_gain_xy_lift_lift = 500.0
parameters.orient_gain_z_lift_lift = 974.5037078857422
parameters.orient_pos_gain_impedance_lift_lift = 0.015002169609069825
parameters.orient_force_factor_lift = 0.6673897802829742
parameters.orient_force_factor_rot_lift = 0.010000000000000002
parameters.orient_int_orient_gain = 0.0003590885430574417
parameters.orient_int_pos_gain = 0.008034629583358766
else:
parameters.orient_grasp_xy_lift = -0.03926035182
parameters.orient_grasp_h_lift = -0.005355795621871948
parameters.orient_gain_xy_lift_lift = 895.7465827465057
parameters.orient_gain_z_lift_lift = 1500.0
parameters.orient_pos_gain_impedance_lift_lift = 0.01427580736577511
parameters.orient_force_factor_lift = 0.49047523438930507
parameters.orient_force_factor_rot_lift = 0.0022044302672147753
parameters.orient_int_orient_gain = 0.027903699278831486
parameters.orient_int_pos_gain = 0.013680822849273681
return state_machines[id](env, parameters=parameters)
def cpc_bo_wrapper(id, env, difficulty):
if difficulty == 3:
parameters = cpc_params.CubeParams(env)
if (env.simulation):
parameters.interval = 9
parameters.gain_increase_factor = 1.1110639113783836
parameters.k_p_goal = 0.5408251136541367
parameters.k_p_into = 0.17404515892267228
parameters.k_i_goal = 0.00801944613456726
else:
parameters.interval = 3000 # 1800 # Range: 500 - 3000 not super important
parameters.gain_increase_factor = 1.7353031241893768 # 1.04 # Range: 1.01 - 2.0
parameters.k_p_goal = 0.5804646849632262 # 0.75 # Range: 0.3 - 1.5, same for l4
parameters.k_p_into = 0.1 # 0.2 # Range: 0.1 - 0.6, same for l4
parameters.k_i_goal = 0.00801206259727478 # 0.005 # Range: 0.0008 - 0.1, same for l4
if difficulty == 4:
parameters = cpc_params.CubeLvl4Params(env)
if (env.simulation):
parameters.interval = 10
parameters.gain_increase_factor = 1.2431243617534635
parameters.k_p_goal = 0.4393719419836998
parameters.k_p_into = 0.21185509711503983
parameters.k_i_goal = 0.008012341380119324
parameters.k_p_ang = 0.02238279849290848
parameters.k_i_ang = 0.0019905194759368898
else:
parameters.interval = 579
parameters.gain_increase_factor = 1.07002716961503
parameters.k_p_goal = 0.6011996507644652
parameters.k_p_into = 0.13088179603219033
parameters.k_i_goal = 0.006161301851272583
parameters.k_p_ang = 0.06160478860139847
parameters.k_i_ang = 0.0007573306798934938
return state_machines[id](env, parameters=parameters)
|
py
|
1a5ae85d5863dc174688c7b5a5aa381be94f41b5
|
import numpy as np
class Config(object):
imgDirPath = '/data/rtao/Xspine/data'
labelDirPath = '/data/rtao/Xspine/data'
# Weight path or none
weightFile = "none"
# Loss visualization
# ON or OFF
tensorboard = False
logsDir = "runs"
# Train params
# model save path
backupDir = "backup"
max_epochs = 6000
save_interval = 10
# e.g. 0,1,2,3
gpus = [0]
# multithreading
num_workers = 2
batch_size = 1
# Solver params
# adma or sgd
solver = "adam"
steps = [8000, 16000]
scales = [0.1, 0.1]
learning_rate = 3e-4#1e-5
momentum = 0.9
decay = 0 #5e-4
betas = (0.9, 0.98)
# YoloNet params
num_classes = 1
in_channels = 1
init_width = 448
init_height = 800
# # anchors1 = [77, 87, 120, 64, 91, 164]
# # anchors2 = [66, 57, 59, 81, 44, 142]
# # anchors3 = [22, 35, 44, 48, 45, 68]
anchors1 = [88, 98, 93, 110, 99, 124]
anchors2 = [63, 90, 76, 90, 78, 109]
anchors3 = [33, 42, 44, 54, 63, 72]
def get_anchors(self):
anchor1 = []
anchor2 = []
anchor3 = []
for i in range(len(self.anchors1)):
anchor1.append(self.anchors1[i] / 32)
for i in range(len(self.anchors2)):
anchor2.append(self.anchors2[i] / 16)
for i in range(len(self.anchors3)):
anchor3.append(self.anchors3[i] / 8)
anchors = np.array([anchor1, anchor2, anchor3])
return anchors
# anchors = np.array([
# [[1.25, 1.625], [2.0, 3.75], [4.125, 2.875]],
# [[1.875, 3.8125], [3.875, 2.8125], [3.6875, 7.4375]],
# [[3.625, 2.8125], [4.875, 6.1875], [11.65625, 10.1875]]
# ])
|
py
|
1a5ae89719a9acfcdf9014ed4b9d4a8dc8b96206
|
"""
(c) 2020 Spencer Rose, MIT Licence
Python Landscape Classification Tool (PyLC)
Reference: An evaluation of deep learning semantic segmentation
for land cover classification of oblique ground-based photography,
MSc. Thesis 2020.
<http://hdl.handle.net/1828/12156>
Spencer Rose <[email protected]>, June 2020
University of Victoria
Module: Profiler
File: profile.py
"""
import torch
import torch.nn.functional
from tqdm import tqdm
from utils.metrics import m2, jsd
import numpy as np
def get_profile(dset):
"""
Computes dataset statistical profile
- probability class distribution for database at db_path
- sample metrics and statistics
- image mean / standard deviation
Parameters
------
dset: MLPDataset
Image/mask dataset.
Returns
------
self
For chaining.
Metadata class for analyzing and generating metadata
for database.
Arguments
---------
args.id: int
Identifier.
args.ch: int
Number of channels
args.schema: str
Path to schema JSON file.
args.output: str
Output path
args.n_samples
Number of samples.
args.tile_size: int
Tile size.
args.scales: list
Image scaling factors.
args.stride: int
Stride.
args.m2: float
M2 variance metric.
args.jsd: float
JSD coefficient.
args.px_mean: np.array
Pixel mean value.
args.px_std: np.array
Pixel standard deviation value.
args.px_dist: np.array
Tile pixel frequency distribution.
args.tile_px_count: int
Tile pixel count.
args.dset_px_dist: np.array
Dataset pixel frequency distribution.
args.dset_px_count: int
Dataset pixel count.
args.probs: np.array
Dataset probability distribution.
args.weights:
Dataset inverse weights.
"""
# update local metadata with dataset metadata
meta = dset.get_meta()
# get data loader
loader, n_batches = dset.loader(
batch_size=1,
n_workers=0,
drop_last=False
)
meta.n_samples = dset.size
# initialize global stats
px_dist = []
px_mean = torch.zeros(meta.ch)
px_std = torch.zeros(meta.ch)
# load images and masks
for i, (img, mask) in tqdm(enumerate(loader), total=n_batches, desc="Profiling: ", unit=' batches'):
# Compute dataset pixel global mean / standard deviation
if meta.ch == 3:
px_mean += torch.mean(img, (0, 2, 3))
px_std += torch.std(img, (0, 2, 3))
else:
px_mean += torch.mean(img)
px_std += torch.std(img)
# convert mask to one-hot encoding
mask_1hot = torch.nn.functional.one_hot(mask, num_classes=meta.n_classes).permute(0, 3, 1, 2)
px_dist_sample = [np.sum(mask_1hot.numpy(), axis=(2, 3))]
px_dist += px_dist_sample
# Divide by dataset size
px_mean /= meta.n_samples
px_std /= meta.n_samples
# Calculate sample pixel distribution / sample pixel count
px_dist = np.concatenate(px_dist)
# Calculate dataset pixel distribution / dataset total pixel count
dset_px_dist = np.sum(px_dist, axis=0)
dset_px_count = np.sum(dset_px_dist)
probs = dset_px_dist / dset_px_count
assert dset_px_count / meta.tile_px_count == meta.n_samples, \
"Pixel distribution does not match tile count."
# Calculate class weight balancing
weights = 1 / (np.log(1.02 + probs))
weights = weights / np.max(weights)
# initialize balanced distributions [n]
balanced_px_prob = np.empty(meta.n_classes)
balanced_px_prob.fill(1 / meta.n_classes)
# Calculate JSD and M2 metrics
meta.m2 = m2(probs, meta.n_classes)
meta.jsd = jsd(probs, balanced_px_prob)
# store metadata values
meta.px_mean = px_mean.tolist()
meta.px_std = px_std.tolist()
meta.px_dist = px_dist.tolist()
meta.tile_px_count = meta.tile_size * meta.tile_size
meta.probs = probs.tolist()
meta.weights = weights.tolist()
meta.dset_px_count = int(dset_px_count)
meta.dset_px_dist = dset_px_dist.tolist()
return meta
def print_meta(meta):
"""
Prints profile metadata to console
"""
hline = '\n' + '_' * 70
readout = '\n{}'.format('Profile Metadata')
readout += hline
readout += '\n {:30s}{}'.format('ID', meta.id)
readout += '\n {:30s}{} ({})'.format('Channels', meta.ch, 'Grayscale' if meta.ch == 1 else 'Colour')
readout += '\n {:30s}{}'.format('Classes', meta.n_classes)
readout += '\n {:30s}{}'.format('Samples', meta.n_samples)
readout += '\n {:30s}{}px x {}px'.format('Tile size (WxH)', meta.tile_size, meta.tile_size)
# RGB/Grayscale mean
px_mean = 'R{:3s} G{:3s} B{:3s}'.format(
str(round(meta.px_mean[0], 3)), str(round(meta.px_mean[1], 3)), str(round(meta.px_mean[2], 3))) \
if meta.ch == 3 else str(round(meta.px_mean[0], 3)
)
readout += '\n {:30s}{}'.format('Pixel mean', px_mean)
# RGB/Grayscale std-dev
px_std = 'R{:3s} G{:3s} B{:3s}'.format(
str(round(meta.px_std[0], 3)), str(round(meta.px_std[1], 3)), str(round(meta.px_std[2], 3))) \
if meta.ch == 3 else str(round(meta.px_std[0], 3))
readout += '\n {:30s}{}'.format('Pixel std-dev', px_std)
readout += '\n {:30s}{}'.format('M2', str(round(meta.m2, 3)))
readout += '\n {:30s}{}'.format('JSD', str(round(meta.jsd, 3)))
# palette
readout += '\n\n{} ({})'.format('Palette', meta.schema)
readout += hline
readout += '\n {:8s}{:25s}{:20s}{:15s}'.format('Code', 'Name', 'RGB', 'Hex')
readout += hline
for i, rgb_colour in enumerate(meta.palette_rgb):
rgb = 'R{:3s} G{:3s} B{:3s}'.format(
str(rgb_colour[0]), str(rgb_colour[1]), str(rgb_colour[2]))
readout += '\n {:8s}{:25s}{:20s}{:15s}'.format(
meta.class_codes[i], meta.class_labels[i], rgb, meta.palette_hex[i])
readout += hline
# class weights
readout += '\n\n{:30s}'.format('Distribution')
readout += hline
readout += '\n {:30s}{:10s}{:10s}'.format('Class', 'Probs', 'Weights')
readout += hline
for i, w in enumerate(meta.weights):
readout += '\n {:25s}{:10f} {:10f}'.format(
meta.class_labels[i], round(meta.probs[i], 4), round(w, 4))
readout += hline
readout += '\n{:25s}{:,}'.format('Tile pixel count', int(meta.tile_px_count))
readout += '\n{:25s}{:,}'.format('Dataset pixel count', int(meta.dset_px_count))
readout += hline + '\n'
print(readout)
|
py
|
1a5ae8ef32967b731d7b0e8ea6a12bae879c5750
|
## @ingroupMethods-Noise-Fidelity_One-Propeller
# noise_propeller_low_fidelty.py
#
# Created: Mar 2021, M. Clarke
# Modified: Jul 2021, E. Botero
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import SUAVE
from SUAVE.Core import Data
import numpy as np
from SUAVE.Methods.Noise.Fidelity_One.Noise_Tools.decibel_arithmetic import pressure_ratio_to_SPL_arithmetic
from SUAVE.Methods.Noise.Fidelity_One.Noise_Tools import SPL_arithmetic
from SUAVE.Methods.Noise.Fidelity_One.Noise_Tools import SPL_spectra_arithmetic
from SUAVE.Methods.Noise.Fidelity_One.Noise_Tools import compute_point_source_coordinates
from SUAVE.Methods.Noise.Fidelity_One.Propeller.compute_broadband_noise import compute_broadband_noise
from SUAVE.Methods.Noise.Fidelity_One.Propeller.compute_harmonic_noise import compute_harmonic_noise
# -------------------------------------------------------------------------------------
# Medium Fidelity Frequency Domain Methods for Acoustic Noise Prediction
# -------------------------------------------------------------------------------------
## @ingroupMethods-Noise-Fidelity_One-Propeller
def propeller_mid_fidelity(network,auc_opts,segment,settings,source = 'propeller'):
''' This computes the acoustic signature (sound pressure level, weighted sound pressure levels,
and frequency spectrums of a system of rotating blades (i.e. propellers and lift_rotors)
Assumptions:
None
Source:
None
Inputs:
network - vehicle energy network data structure [None]
segment - flight segment data structure [None]
mic_loc - microhone location [m]
propeller - propeller class data structure [None]
auc_opts - data structure of acoustic data [None]
settings - accoustic settings [None]
Outputs:
Results.
SPL - SPL [dB]
SPL_dBA - dbA-Weighted SPL [dBA]
SPL_bb_spectrum - broadband contribution to total SPL [dB]
SPL_spectrum - 1/3 octave band SPL [dB]
SPL_tonal_spectrum - harmonic contribution to total SPL [dB]
SPL_bpfs_spectrum - 1/3 octave band harmonic contribution to total SPL [dB]
Properties Used:
N/A
'''
# unpack
conditions = segment.state.conditions
microphone_locations = conditions.noise.total_microphone_locations
angle_of_attack = conditions.aerodynamics.angle_of_attack
velocity_vector = conditions.frames.inertial.velocity_vector
freestream = conditions.freestream
harmonics = settings.harmonics
if not network.identical_propellers:
assert('This method currently only works with identical propellers')
# Because the propellers are identical, get the first propellers results
auc_opts = auc_opts[list(auc_opts.keys())[0]]
# create data structures for computation
Noise = Data()
Results = Data()
# compute position vector of microphones
position_vector = compute_point_source_coordinates(conditions,network,microphone_locations,source)
# Harmonic Noise
compute_harmonic_noise(harmonics,freestream,angle_of_attack,position_vector,velocity_vector,network,auc_opts,settings,Noise,source)
# Broadband Noise
compute_broadband_noise(freestream,angle_of_attack,position_vector, velocity_vector,network,auc_opts,settings,Noise,source)
# Combine Rotational(periodic/tonal) and Broadband Noise
Noise.SPL_prop_bpfs_spectrum = Noise.SPL_r
Noise.SPL_prop_spectrum = 10*np.log10( 10**(Noise.SPL_prop_h_spectrum/10) + 10**(Noise.SPL_prop_bb_spectrum/10))
Noise.SPL_prop_spectrum[np.isnan(Noise.SPL_prop_spectrum)] = 0
# pressure ratios used to combine A weighted sound since decibel arithmetic does not work for
#broadband noise since it is a continuous spectrum
total_p_pref_dBA = np.concatenate((Noise.p_pref_r_dBA,Noise.p_pref_bb_dBA), axis=3)
Noise.SPL_dBA_prop = pressure_ratio_to_SPL_arithmetic(total_p_pref_dBA)
Noise.SPL_dBA_prop[np.isinf(Noise.SPL_dBA_prop)] = 0
Noise.SPL_dBA_prop[np.isnan(Noise.SPL_dBA_prop)] = 0
# Summation of spectra from propellers into into one SPL
Results.bpfs = Noise.f[:,0,0,0,:] # blade passing frequency harmonics
Results.SPL = SPL_arithmetic(SPL_arithmetic(Noise.SPL_prop_spectrum))
Results.SPL_dBA = SPL_arithmetic(Noise.SPL_dBA_prop)
Results.SPL_spectrum = SPL_spectra_arithmetic(Noise.SPL_prop_spectrum) # 1/3 octave band
Results.SPL_bpfs_spectrum = SPL_spectra_arithmetic(Noise.SPL_prop_bpfs_spectrum) # blade passing frequency specturm
Results.SPL_tonal_spectrum = SPL_spectra_arithmetic(Noise.SPL_prop_tonal_spectrum)
Results.SPL_bb_spectrum = SPL_spectra_arithmetic(Noise.SPL_prop_bb_spectrum)
auc_opts.bpfs = Results.bpfs
auc_opts.SPL = Results.SPL
auc_opts.SPL_dBA = Results.SPL_dBA
auc_opts.SPL_spectrum = Results.SPL_spectrum
auc_opts.SPL_bpfs_spectrum = Results.SPL_bpfs_spectrum
auc_opts.SPL_tonal_spectrum = Results.SPL_tonal_spectrum
auc_opts.SPL_bb_spectrum = Results.SPL_bb_spectrum
return Results
|
py
|
1a5aeb38485006e202ffaa3e1346fd5e72229585
|
from struct import pack, unpack
import hashlib
import sys
import traceback
from electrum import bitcoin
from electrum.bitcoin import TYPE_ADDRESS, int_to_hex, var_int
from electrum.i18n import _
from electrum.plugins import BasePlugin
from electrum.keystore import Hardware_KeyStore
from electrum.transaction import Transaction
from ..hw_wallet import HW_PluginBase
from electrum.util import print_error, is_verbose, bfh, bh2u, versiontuple
try:
import hid
from btchip.btchipComm import HIDDongleHIDAPI, DongleWait
from btchip.btchip import btchip
from btchip.btchipUtils import compress_public_key,format_transaction, get_regular_input_script, get_p2sh_input_script
from btchip.bitcoinTransaction import bitcoinTransaction
from btchip.btchipFirmwareWizard import checkFirmware, updateFirmware
from btchip.btchipException import BTChipException
BTCHIP = True
BTCHIP_DEBUG = is_verbose
except ImportError:
BTCHIP = False
MSG_NEEDS_FW_UPDATE_GENERIC = _('Firmware version too old. Please update at') + \
' https://www.ledgerwallet.com'
MSG_NEEDS_FW_UPDATE_SEGWIT = _('Firmware version (or "Bitcoin" app) too old for Segwit support. Please update at') + \
' https://www.ledgerwallet.com'
MULTI_OUTPUT_SUPPORT = '1.1.4'
SEGWIT_SUPPORT = '1.1.10'
SEGWIT_SUPPORT_SPECIAL = '1.0.4'
class Ledger_Client():
def __init__(self, hidDevice):
self.dongleObject = btchip(hidDevice)
self.preflightDone = False
def is_pairable(self):
return True
def close(self):
self.dongleObject.dongle.close()
def timeout(self, cutoff):
pass
def is_initialized(self):
return True
def label(self):
return ""
def i4b(self, x):
return pack('>I', x)
def has_usable_connection_with_device(self):
try:
self.dongleObject.getFirmwareVersion()
except BaseException:
return False
return True
def test_pin_unlocked(func):
"""Function decorator to test the Ledger for being unlocked, and if not,
raise a human-readable exception.
"""
def catch_exception(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except BTChipException as e:
if e.sw == 0x6982:
raise Exception(_('Your Ledger is locked. Please unlock it.'))
else:
raise
return catch_exception
@test_pin_unlocked
def get_xpub(self, bip32_path, xtype):
self.checkDevice()
# bip32_path is of the form 44'/0'/1'
# S-L-O-W - we don't handle the fingerprint directly, so compute
# it manually from the previous node
# This only happens once so it's bearable
#self.get_client() # prompt for the PIN before displaying the dialog if necessary
#self.handler.show_message("Computing master public key")
if xtype in ['p2wpkh', 'p2wsh'] and not self.supports_native_segwit():
raise Exception(MSG_NEEDS_FW_UPDATE_SEGWIT)
if xtype in ['p2wpkh-p2sh', 'p2wsh-p2sh'] and not self.supports_segwit():
raise Exception(MSG_NEEDS_FW_UPDATE_SEGWIT)
splitPath = bip32_path.split('/')
if splitPath[0] == 'm':
splitPath = splitPath[1:]
bip32_path = bip32_path[2:]
fingerprint = 0
if len(splitPath) > 1:
prevPath = "/".join(splitPath[0:len(splitPath) - 1])
nodeData = self.dongleObject.getWalletPublicKey(prevPath)
publicKey = compress_public_key(nodeData['publicKey'])
h = hashlib.new('ripemd160')
h.update(hashlib.sha256(publicKey).digest())
fingerprint = unpack(">I", h.digest()[0:4])[0]
nodeData = self.dongleObject.getWalletPublicKey(bip32_path)
publicKey = compress_public_key(nodeData['publicKey'])
depth = len(splitPath)
lastChild = splitPath[len(splitPath) - 1].split('\'')
childnum = int(lastChild[0]) if len(lastChild) == 1 else 0x80000000 | int(lastChild[0])
xpub = bitcoin.serialize_xpub(xtype, nodeData['chainCode'], publicKey, depth, self.i4b(fingerprint), self.i4b(childnum))
return xpub
def has_detached_pin_support(self, client):
try:
client.getVerifyPinRemainingAttempts()
return True
except BTChipException as e:
if e.sw == 0x6d00:
return False
raise e
def is_pin_validated(self, client):
try:
# Invalid SET OPERATION MODE to verify the PIN status
client.dongle.exchange(bytearray([0xe0, 0x26, 0x00, 0x00, 0x01, 0xAB]))
except BTChipException as e:
if (e.sw == 0x6982):
return False
if (e.sw == 0x6A80):
return True
raise e
def supports_multi_output(self):
return self.multiOutputSupported
def supports_segwit(self):
return self.segwitSupported
def supports_native_segwit(self):
return self.nativeSegwitSupported
def perform_hw1_preflight(self):
try:
firmwareInfo = self.dongleObject.getFirmwareVersion()
firmware = firmwareInfo['version']
self.multiOutputSupported = versiontuple(firmware) >= versiontuple(MULTI_OUTPUT_SUPPORT)
self.nativeSegwitSupported = versiontuple(firmware) >= versiontuple(SEGWIT_SUPPORT)
self.segwitSupported = self.nativeSegwitSupported or (firmwareInfo['specialVersion'] == 0x20 and versiontuple(firmware) >= versiontuple(SEGWIT_SUPPORT_SPECIAL))
if not checkFirmware(firmwareInfo):
self.dongleObject.dongle.close()
raise Exception(MSG_NEEDS_FW_UPDATE_GENERIC)
try:
self.dongleObject.getOperationMode()
except BTChipException as e:
if (e.sw == 0x6985):
self.dongleObject.dongle.close()
self.handler.get_setup( )
# Acquire the new client on the next run
else:
raise e
if self.has_detached_pin_support(self.dongleObject) and not self.is_pin_validated(self.dongleObject) and (self.handler is not None):
remaining_attempts = self.dongleObject.getVerifyPinRemainingAttempts()
if remaining_attempts != 1:
msg = "Enter your Ledger PIN - remaining attempts : " + str(remaining_attempts)
else:
msg = "Enter your Ledger PIN - WARNING : LAST ATTEMPT. If the PIN is not correct, the dongle will be wiped."
confirmed, p, pin = self.password_dialog(msg)
if not confirmed:
raise Exception('Aborted by user - please unplug the dongle and plug it again before retrying')
pin = pin.encode()
self.dongleObject.verifyPin(pin)
except BTChipException as e:
if (e.sw == 0x6faa):
raise Exception("Dongle is temporarily locked - please unplug it and replug it again")
if ((e.sw & 0xFFF0) == 0x63c0):
raise Exception("Invalid PIN - please unplug the dongle and plug it again before retrying")
if e.sw == 0x6f00 and e.message == 'Invalid channel':
# based on docs 0x6f00 might be a more general error, hence we also compare message to be sure
raise Exception("Invalid channel.\n"
"Please make sure that 'Browser support' is disabled on your device.")
raise e
def checkDevice(self):
if not self.preflightDone:
try:
self.perform_hw1_preflight()
except BTChipException as e:
if (e.sw == 0x6d00 or e.sw == 0x6700):
raise Exception(_("Device not in Bitcoin mode")) from e
raise e
self.preflightDone = True
def password_dialog(self, msg=None):
response = self.handler.get_word(msg)
if response is None:
return False, None, None
return True, response, response
class Ledger_KeyStore(Hardware_KeyStore):
hw_type = 'ledger'
device = 'Ledger'
def __init__(self, d):
Hardware_KeyStore.__init__(self, d)
# Errors and other user interaction is done through the wallet's
# handler. The handler is per-window and preserved across
# device reconnects
self.force_watching_only = False
self.signing = False
self.cfg = d.get('cfg', {'mode':0,'pair':''})
def dump(self):
obj = Hardware_KeyStore.dump(self)
obj['cfg'] = self.cfg
return obj
def get_derivation(self):
return self.derivation
def get_client(self):
return self.plugin.get_client(self).dongleObject
def get_client_electrum(self):
return self.plugin.get_client(self)
def give_error(self, message, clear_client = False):
print_error(message)
if not self.signing:
self.handler.show_error(message)
else:
self.signing = False
if clear_client:
self.client = None
raise Exception(message)
def set_and_unset_signing(func):
"""Function decorator to set and unset self.signing."""
def wrapper(self, *args, **kwargs):
try:
self.signing = True
return func(self, *args, **kwargs)
finally:
self.signing = False
return wrapper
def address_id_stripped(self, address):
# Strip the leading "m/"
change, index = self.get_address_index(address)
derivation = self.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
return address_path[2:]
def decrypt_message(self, pubkey, message, password):
raise RuntimeError(_('Encryption and decryption are currently not supported for {}').format(self.device))
@set_and_unset_signing
def sign_message(self, sequence, message, password):
message = message.encode('utf8')
message_hash = hashlib.sha256(message).hexdigest().upper()
# prompt for the PIN before displaying the dialog if necessary
client = self.get_client()
address_path = self.get_derivation()[2:] + "/%d/%d"%sequence
self.handler.show_message("Signing message ...\r\nMessage hash: "+message_hash)
try:
info = self.get_client().signMessagePrepare(address_path, message)
pin = ""
if info['confirmationNeeded']:
pin = self.handler.get_auth( info ) # does the authenticate dialog and returns pin
if not pin:
raise UserWarning(_('Cancelled by user'))
pin = str(pin).encode()
signature = self.get_client().signMessageSign(pin)
except BTChipException as e:
if e.sw == 0x6a80:
self.give_error("Unfortunately, this message cannot be signed by the Ledger wallet. Only alphanumerical messages shorter than 140 characters are supported. Please remove any extra characters (tab, carriage return) and retry.")
elif e.sw == 0x6985: # cancelled by user
return b''
else:
self.give_error(e, True)
except UserWarning:
self.handler.show_error(_('Cancelled by user'))
return b''
except Exception as e:
self.give_error(e, True)
finally:
self.handler.finished()
# Parse the ASN.1 signature
rLength = signature[3]
r = signature[4 : 4 + rLength]
sLength = signature[4 + rLength + 1]
s = signature[4 + rLength + 2:]
if rLength == 33:
r = r[1:]
if sLength == 33:
s = s[1:]
# And convert it
return bytes([27 + 4 + (signature[0] & 0x01)]) + r + s
@set_and_unset_signing
def sign_transaction(self, tx, password):
if tx.is_complete():
return
client = self.get_client()
inputs = []
inputsPaths = []
pubKeys = []
chipInputs = []
redeemScripts = []
signatures = []
preparedTrustedInputs = []
changePath = ""
changeAmount = None
output = None
outputAmount = None
p2shTransaction = False
segwitTransaction = False
pin = ""
self.get_client() # prompt for the PIN before displaying the dialog if necessary
# Fetch inputs of the transaction to sign
derivations = self.get_tx_derivations(tx)
for txin in tx.inputs():
if txin['type'] == 'coinbase':
self.give_error("Coinbase not supported") # should never happen
if txin['type'] in ['p2sh']:
p2shTransaction = True
if txin['type'] in ['p2wpkh-p2sh', 'p2wsh-p2sh']:
if not self.get_client_electrum().supports_segwit():
self.give_error(MSG_NEEDS_FW_UPDATE_SEGWIT)
segwitTransaction = True
if txin['type'] in ['p2wpkh', 'p2wsh']:
if not self.get_client_electrum().supports_native_segwit():
self.give_error(MSG_NEEDS_FW_UPDATE_SEGWIT)
segwitTransaction = True
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
for i, x_pubkey in enumerate(x_pubkeys):
if x_pubkey in derivations:
signingPos = i
s = derivations.get(x_pubkey)
hwAddress = "%s/%d/%d" % (self.get_derivation()[2:], s[0], s[1])
break
else:
self.give_error("No matching x_key for sign_transaction") # should never happen
redeemScript = Transaction.get_preimage_script(txin)
if txin.get('prev_tx') is None: # and not Transaction.is_segwit_input(txin):
# note: offline signing does not work atm even with segwit inputs for ledger
raise Exception(_('Offline signing with {} is not supported.').format(self.device))
inputs.append([txin['prev_tx'].raw, txin['prevout_n'], redeemScript, txin['prevout_hash'], signingPos, txin.get('sequence', 0xffffffff - 1) ])
inputsPaths.append(hwAddress)
pubKeys.append(pubkeys)
# Sanity check
if p2shTransaction:
for txin in tx.inputs():
if txin['type'] != 'p2sh':
self.give_error("P2SH / regular input mixed in same transaction not supported") # should never happen
txOutput = var_int(len(tx.outputs()))
for txout in tx.outputs():
output_type, addr, amount = txout
txOutput += int_to_hex(amount, 8)
script = tx.pay_script(output_type, addr)
txOutput += var_int(len(script)//2)
txOutput += script
txOutput = bfh(txOutput)
# Recognize outputs - only one output and one change is authorized
if not p2shTransaction:
if not self.get_client_electrum().supports_multi_output():
if len(tx.outputs()) > 2:
self.give_error("Transaction with more than 2 outputs not supported")
for _type, address, amount in tx.outputs():
assert _type == TYPE_ADDRESS
info = tx.output_info.get(address)
if (info is not None) and len(tx.outputs()) > 1 \
and info[0][0] == 1: # "is on 'change' branch"
index, xpubs, m = info
changePath = self.get_derivation()[2:] + "/%d/%d"%index
changeAmount = amount
else:
output = address
outputAmount = amount
self.handler.show_message(_("Confirm Transaction on your Ledger device..."))
try:
# Get trusted inputs from the original transactions
for utxo in inputs:
sequence = int_to_hex(utxo[5], 4)
if segwitTransaction:
txtmp = bitcoinTransaction(bfh(utxo[0]))
tmp = bfh(utxo[3])[::-1]
tmp += bfh(int_to_hex(utxo[1], 4))
tmp += txtmp.outputs[utxo[1]].amount
chipInputs.append({'value' : tmp, 'witness' : True, 'sequence' : sequence})
redeemScripts.append(bfh(utxo[2]))
elif not p2shTransaction:
txtmp = bitcoinTransaction(bfh(utxo[0]))
trustedInput = self.get_client().getTrustedInput(txtmp, utxo[1])
trustedInput['sequence'] = sequence
chipInputs.append(trustedInput)
redeemScripts.append(txtmp.outputs[utxo[1]].script)
else:
tmp = bfh(utxo[3])[::-1]
tmp += bfh(int_to_hex(utxo[1], 4))
chipInputs.append({'value' : tmp, 'sequence' : sequence})
redeemScripts.append(bfh(utxo[2]))
# Sign all inputs
firstTransaction = True
inputIndex = 0
rawTx = tx.serialize()
self.get_client().enableAlternate2fa(False)
if segwitTransaction:
self.get_client().startUntrustedTransaction(True, inputIndex,
chipInputs, redeemScripts[inputIndex])
if changePath:
# we don't set meaningful outputAddress, amount and fees
# as we only care about the alternateEncoding==True branch
outputData = self.get_client().finalizeInput(b'', 0, 0, changePath, bfh(rawTx))
else:
outputData = self.get_client().finalizeInputFull(txOutput)
outputData['outputData'] = txOutput
transactionOutput = outputData['outputData']
if outputData['confirmationNeeded']:
outputData['address'] = output
self.handler.finished()
pin = self.handler.get_auth( outputData ) # does the authenticate dialog and returns pin
if not pin:
raise UserWarning()
if pin != 'paired':
self.handler.show_message(_("Confirmed. Signing Transaction..."))
while inputIndex < len(inputs):
singleInput = [ chipInputs[inputIndex] ]
self.get_client().startUntrustedTransaction(False, 0,
singleInput, redeemScripts[inputIndex])
inputSignature = self.get_client().untrustedHashSign(inputsPaths[inputIndex], pin, lockTime=tx.locktime)
inputSignature[0] = 0x30 # force for 1.4.9+
signatures.append(inputSignature)
inputIndex = inputIndex + 1
else:
while inputIndex < len(inputs):
self.get_client().startUntrustedTransaction(firstTransaction, inputIndex,
chipInputs, redeemScripts[inputIndex])
if changePath:
# we don't set meaningful outputAddress, amount and fees
# as we only care about the alternateEncoding==True branch
outputData = self.get_client().finalizeInput(b'', 0, 0, changePath, bfh(rawTx))
else:
outputData = self.get_client().finalizeInputFull(txOutput)
outputData['outputData'] = txOutput
if firstTransaction:
transactionOutput = outputData['outputData']
if outputData['confirmationNeeded']:
outputData['address'] = output
self.handler.finished()
pin = self.handler.get_auth( outputData ) # does the authenticate dialog and returns pin
if not pin:
raise UserWarning()
if pin != 'paired':
self.handler.show_message(_("Confirmed. Signing Transaction..."))
else:
# Sign input with the provided PIN
inputSignature = self.get_client().untrustedHashSign(inputsPaths[inputIndex], pin, lockTime=tx.locktime)
inputSignature[0] = 0x30 # force for 1.4.9+
signatures.append(inputSignature)
inputIndex = inputIndex + 1
if pin != 'paired':
firstTransaction = False
except UserWarning:
self.handler.show_error(_('Cancelled by user'))
return
except BTChipException as e:
if e.sw == 0x6985: # cancelled by user
return
else:
traceback.print_exc(file=sys.stderr)
self.give_error(e, True)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.give_error(e, True)
finally:
self.handler.finished()
for i, txin in enumerate(tx.inputs()):
signingPos = inputs[i][4]
txin['signatures'][signingPos] = bh2u(signatures[i])
tx.raw = tx.serialize()
@set_and_unset_signing
def show_address(self, sequence, txin_type):
client = self.get_client()
address_path = self.get_derivation()[2:] + "/%d/%d"%sequence
self.handler.show_message(_("Showing address ..."))
segwit = Transaction.is_segwit_inputtype(txin_type)
segwitNative = txin_type == 'p2wpkh'
try:
client.getWalletPublicKey(address_path, showOnScreen=True, segwit=segwit, segwitNative=segwitNative)
except BTChipException as e:
if e.sw == 0x6985: # cancelled by user
pass
else:
traceback.print_exc(file=sys.stderr)
self.handler.show_error(e)
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.handler.show_error(e)
finally:
self.handler.finished()
class LedgerPlugin(HW_PluginBase):
libraries_available = BTCHIP
keystore_class = Ledger_KeyStore
client = None
DEVICE_IDS = [
(0x2581, 0x1807), # HW.1 legacy btchip
(0x2581, 0x2b7c), # HW.1 transitional production
(0x2581, 0x3b7c), # HW.1 ledger production
(0x2581, 0x4b7c), # HW.1 ledger test
(0x2c97, 0x0000), # Blue
(0x2c97, 0x0001) # Nano-S
]
def __init__(self, parent, config, name):
self.segwit = config.get("segwit")
HW_PluginBase.__init__(self, parent, config, name)
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def get_btchip_device(self, device):
ledger = False
if device.product_key[0] == 0x2581 and device.product_key[1] == 0x3b7c:
ledger = True
if device.product_key[0] == 0x2581 and device.product_key[1] == 0x4b7c:
ledger = True
if device.product_key[0] == 0x2c97:
if device.interface_number == 0 or device.usage_page == 0xffa0:
ledger = True
else:
return None # non-compatible interface of a Nano S or Blue
dev = hid.device()
dev.open_path(device.path)
dev.set_nonblocking(True)
return HIDDongleHIDAPI(dev, ledger, BTCHIP_DEBUG)
def create_client(self, device, handler):
if handler:
self.handler = handler
client = self.get_btchip_device(device)
if client is not None:
client = Ledger_Client(client)
return client
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
client.handler = self.create_handler(wizard)
client.get_xpub("m/44'/8'", 'standard') # TODO replace by direct derivation once Nano S > 1.1
def get_xpub(self, device_id, derivation, xtype, wizard):
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = self.create_handler(wizard)
client.checkDevice()
xpub = client.get_xpub(derivation, xtype)
return xpub
def get_client(self, keystore, force_pair=True):
# All client interaction should not be in the main GUI thread
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
#if client:
# client.used()
if client is not None:
client.checkDevice()
return client
def show_address(self, wallet, address):
sequence = wallet.get_address_index(address)
txin_type = wallet.get_txin_type(address)
wallet.get_keystore().show_address(sequence, txin_type)
|
py
|
1a5aeb4252592248ac27f863219911a84d8730bb
|
import os
import random
import string
import time
from collections import defaultdict
from contextlib import contextmanager
import pendulum
import pytest
from dagster import (
Any,
Field,
ModeDefinition,
daily_partitioned_config,
fs_io_manager,
graph,
pipeline,
repository,
solid,
)
from dagster.core.definitions import Partition, PartitionSetDefinition
from dagster.core.definitions.reconstructable import ReconstructableRepository
from dagster.core.execution.api import execute_pipeline
from dagster.core.execution.backfill import BulkActionStatus, PartitionBackfill
from dagster.core.host_representation import (
ExternalRepositoryOrigin,
InProcessRepositoryLocationOrigin,
)
from dagster.core.storage.pipeline_run import PipelineRunStatus, RunsFilter
from dagster.core.storage.tags import BACKFILL_ID_TAG, PARTITION_NAME_TAG, PARTITION_SET_TAG
from dagster.core.test_utils import create_test_daemon_workspace, instance_for_test
from dagster.core.workspace.load_target import PythonFileTarget
from dagster.daemon import get_default_daemon_logger
from dagster.daemon.backfill import execute_backfill_iteration
from dagster.seven import IS_WINDOWS, get_system_temp_directory
from dagster.utils import touch_file
from dagster.utils.error import SerializableErrorInfo
default_mode_def = ModeDefinition(resource_defs={"io_manager": fs_io_manager})
def _failure_flag_file():
return os.path.join(get_system_temp_directory(), "conditionally_fail")
def _step_events(instance, run):
events_by_step = defaultdict(set)
logs = instance.all_logs(run.run_id)
for record in logs:
if not record.is_dagster_event or not record.step_key:
continue
events_by_step[record.step_key] = record.dagster_event.event_type_value
return events_by_step
@solid
def always_succeed(_):
return 1
@graph()
def comp_always_succeed():
always_succeed()
@daily_partitioned_config(start_date="2021-05-05")
def my_config(_start, _end):
return {}
always_succeed_job = comp_always_succeed.to_job(config=my_config)
@solid
def fail_solid(_):
raise Exception("blah")
@solid
def conditionally_fail(_, _input):
if os.path.isfile(_failure_flag_file()):
raise Exception("blah")
return 1
@solid
def after_failure(_, _input):
return 1
@pipeline(mode_defs=[default_mode_def])
def the_pipeline():
always_succeed()
@pipeline(mode_defs=[default_mode_def])
def conditional_failure_pipeline():
after_failure(conditionally_fail(always_succeed()))
@pipeline(mode_defs=[default_mode_def])
def partial_pipeline():
always_succeed.alias("step_one")()
always_succeed.alias("step_two")()
always_succeed.alias("step_three")()
@pipeline(mode_defs=[default_mode_def])
def parallel_failure_pipeline():
fail_solid.alias("fail_one")()
fail_solid.alias("fail_two")()
fail_solid.alias("fail_three")()
always_succeed.alias("success_four")()
@solid(config_schema=Field(Any))
def config_solid(_):
return 1
@pipeline(mode_defs=[default_mode_def])
def config_pipeline():
config_solid()
simple_partition_set = PartitionSetDefinition(
name="simple_partition_set",
pipeline_name="the_pipeline",
partition_fn=lambda: [Partition("one"), Partition("two"), Partition("three")],
)
conditionally_fail_partition_set = PartitionSetDefinition(
name="conditionally_fail_partition_set",
pipeline_name="conditional_failure_pipeline",
partition_fn=lambda: [Partition("one"), Partition("two"), Partition("three")],
)
partial_partition_set = PartitionSetDefinition(
name="partial_partition_set",
pipeline_name="partial_pipeline",
partition_fn=lambda: [Partition("one"), Partition("two"), Partition("three")],
)
parallel_failure_partition_set = PartitionSetDefinition(
name="parallel_failure_partition_set",
pipeline_name="parallel_failure_pipeline",
partition_fn=lambda: [Partition("one"), Partition("two"), Partition("three")],
)
def _large_partition_config(_):
REQUEST_CONFIG_COUNT = 50000
def _random_string(length):
return "".join(random.choice(string.ascii_lowercase) for x in range(length))
return {
"solids": {
"config_solid": {
"config": {
"foo": {
_random_string(10): _random_string(20) for i in range(REQUEST_CONFIG_COUNT)
}
}
}
}
}
large_partition_set = PartitionSetDefinition(
name="large_partition_set",
pipeline_name="config_pipeline",
partition_fn=lambda: [Partition("one"), Partition("two"), Partition("three")],
run_config_fn_for_partition=_large_partition_config,
)
def _unloadable_partition_set_origin():
working_directory = os.path.dirname(__file__)
recon_repo = ReconstructableRepository.for_file(__file__, "doesnt_exist", working_directory)
return ExternalRepositoryOrigin(
InProcessRepositoryLocationOrigin(recon_repo), "fake_repository"
).get_partition_set_origin("doesnt_exist")
@repository
def the_repo():
return [
the_pipeline,
conditional_failure_pipeline,
partial_pipeline,
config_pipeline,
simple_partition_set,
conditionally_fail_partition_set,
partial_partition_set,
large_partition_set,
always_succeed_job,
parallel_failure_partition_set,
parallel_failure_pipeline,
]
@contextmanager
def default_repo():
load_target = workspace_load_target()
origin = load_target.create_origins()[0]
with origin.create_single_location() as location:
yield location.get_repository("the_repo")
def workspace_load_target():
return PythonFileTarget(
python_file=__file__,
attribute=None,
working_directory=os.path.dirname(__file__),
location_name="test_location",
)
@contextmanager
def instance_for_context(external_repo_context, overrides=None):
with instance_for_test(overrides) as instance:
with create_test_daemon_workspace(
workspace_load_target=workspace_load_target()
) as workspace:
with external_repo_context() as external_repo:
yield (instance, workspace, external_repo)
def step_did_not_run(instance, run, step_name):
step_events = _step_events(instance, run)[step_name]
return len(step_events) == 0
def step_succeeded(instance, run, step_name):
step_events = _step_events(instance, run)[step_name]
return "STEP_SUCCESS" in step_events
def step_failed(instance, run, step_name):
step_events = _step_events(instance, run)[step_name]
return "STEP_FAILURE" in step_events
def wait_for_all_runs_to_start(instance, timeout=10):
start_time = time.time()
while True:
if time.time() - start_time > timeout:
raise Exception("Timed out waiting for runs to start")
time.sleep(0.5)
pending_states = [
PipelineRunStatus.NOT_STARTED,
PipelineRunStatus.STARTING,
PipelineRunStatus.STARTED,
]
pending_runs = [run for run in instance.get_runs() if run.status in pending_states]
if len(pending_runs) == 0:
break
def wait_for_all_runs_to_finish(instance, timeout=10):
start_time = time.time()
FINISHED_STATES = [
PipelineRunStatus.SUCCESS,
PipelineRunStatus.FAILURE,
PipelineRunStatus.CANCELED,
]
while True:
if time.time() - start_time > timeout:
raise Exception("Timed out waiting for runs to start")
time.sleep(0.5)
not_finished_runs = [
run for run in instance.get_runs() if run.status not in FINISHED_STATES
]
if len(not_finished_runs) == 0:
break
def test_simple_backfill():
with instance_for_context(default_repo) as (
instance,
workspace,
external_repo,
):
external_partition_set = external_repo.get_external_partition_set("simple_partition_set")
instance.add_backfill(
PartitionBackfill(
backfill_id="simple",
partition_set_origin=external_partition_set.get_external_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=pendulum.now().timestamp(),
)
)
assert instance.get_runs_count() == 0
list(
execute_backfill_iteration(
instance, workspace, get_default_daemon_logger("BackfillDaemon")
)
)
assert instance.get_runs_count() == 3
runs = instance.get_runs()
three, two, one = runs
assert one.tags[BACKFILL_ID_TAG] == "simple"
assert one.tags[PARTITION_NAME_TAG] == "one"
assert two.tags[BACKFILL_ID_TAG] == "simple"
assert two.tags[PARTITION_NAME_TAG] == "two"
assert three.tags[BACKFILL_ID_TAG] == "simple"
assert three.tags[PARTITION_NAME_TAG] == "three"
def test_canceled_backfill():
with instance_for_context(default_repo) as (
instance,
workspace,
external_repo,
):
external_partition_set = external_repo.get_external_partition_set("simple_partition_set")
instance.add_backfill(
PartitionBackfill(
backfill_id="simple",
partition_set_origin=external_partition_set.get_external_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=pendulum.now().timestamp(),
)
)
assert instance.get_runs_count() == 0
iterator = execute_backfill_iteration(
instance, workspace, get_default_daemon_logger("BackfillDaemon")
)
next(iterator)
assert instance.get_runs_count() == 1
backfill = instance.get_backfills()[0]
assert backfill.status == BulkActionStatus.REQUESTED
instance.update_backfill(backfill.with_status(BulkActionStatus.CANCELED))
list(iterator)
backfill = instance.get_backfill(backfill.backfill_id)
assert backfill.status == BulkActionStatus.CANCELED
assert instance.get_runs_count() == 1
def test_failure_backfill():
output_file = _failure_flag_file()
with instance_for_context(default_repo) as (
instance,
workspace,
external_repo,
):
external_partition_set = external_repo.get_external_partition_set(
"conditionally_fail_partition_set"
)
instance.add_backfill(
PartitionBackfill(
backfill_id="shouldfail",
partition_set_origin=external_partition_set.get_external_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=pendulum.now().timestamp(),
)
)
assert instance.get_runs_count() == 0
try:
touch_file(output_file)
list(
execute_backfill_iteration(
instance, workspace, get_default_daemon_logger("BackfillDaemon")
)
)
wait_for_all_runs_to_start(instance)
finally:
os.remove(output_file)
assert instance.get_runs_count() == 3
runs = instance.get_runs()
three, two, one = runs
assert one.tags[BACKFILL_ID_TAG] == "shouldfail"
assert one.tags[PARTITION_NAME_TAG] == "one"
assert one.status == PipelineRunStatus.FAILURE
assert step_succeeded(instance, one, "always_succeed")
assert step_failed(instance, one, "conditionally_fail")
assert step_did_not_run(instance, one, "after_failure")
assert two.tags[BACKFILL_ID_TAG] == "shouldfail"
assert two.tags[PARTITION_NAME_TAG] == "two"
assert two.status == PipelineRunStatus.FAILURE
assert step_succeeded(instance, two, "always_succeed")
assert step_failed(instance, two, "conditionally_fail")
assert step_did_not_run(instance, two, "after_failure")
assert three.tags[BACKFILL_ID_TAG] == "shouldfail"
assert three.tags[PARTITION_NAME_TAG] == "three"
assert three.status == PipelineRunStatus.FAILURE
assert step_succeeded(instance, three, "always_succeed")
assert step_failed(instance, three, "conditionally_fail")
assert step_did_not_run(instance, three, "after_failure")
instance.add_backfill(
PartitionBackfill(
backfill_id="fromfailure",
partition_set_origin=external_partition_set.get_external_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=True,
reexecution_steps=None,
tags=None,
backfill_timestamp=pendulum.now().timestamp(),
)
)
assert not os.path.isfile(_failure_flag_file())
list(
execute_backfill_iteration(
instance, workspace, get_default_daemon_logger("BackfillDaemon")
)
)
wait_for_all_runs_to_start(instance)
assert instance.get_runs_count() == 6
from_failure_filter = RunsFilter(tags={BACKFILL_ID_TAG: "fromfailure"})
assert instance.get_runs_count(filters=from_failure_filter) == 3
runs = instance.get_runs(filters=from_failure_filter)
three, two, one = runs
assert one.tags[BACKFILL_ID_TAG] == "fromfailure"
assert one.tags[PARTITION_NAME_TAG] == "one"
assert one.status == PipelineRunStatus.SUCCESS
assert step_did_not_run(instance, one, "always_succeed")
assert step_succeeded(instance, one, "conditionally_fail")
assert step_succeeded(instance, one, "after_failure")
assert two.tags[BACKFILL_ID_TAG] == "fromfailure"
assert two.tags[PARTITION_NAME_TAG] == "two"
assert two.status == PipelineRunStatus.SUCCESS
assert step_did_not_run(instance, one, "always_succeed")
assert step_succeeded(instance, one, "conditionally_fail")
assert step_succeeded(instance, one, "after_failure")
assert three.tags[BACKFILL_ID_TAG] == "fromfailure"
assert three.tags[PARTITION_NAME_TAG] == "three"
assert three.status == PipelineRunStatus.SUCCESS
assert step_did_not_run(instance, one, "always_succeed")
assert step_succeeded(instance, one, "conditionally_fail")
assert step_succeeded(instance, one, "after_failure")
@pytest.mark.skipif(IS_WINDOWS, reason="flaky in windows")
def test_partial_backfill():
with instance_for_context(default_repo) as (
instance,
workspace,
external_repo,
):
external_partition_set = external_repo.get_external_partition_set("partial_partition_set")
# create full runs, where every step is executed
instance.add_backfill(
PartitionBackfill(
backfill_id="full",
partition_set_origin=external_partition_set.get_external_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=pendulum.now().timestamp(),
)
)
assert instance.get_runs_count() == 0
list(
execute_backfill_iteration(
instance, workspace, get_default_daemon_logger("BackfillDaemon")
)
)
wait_for_all_runs_to_start(instance)
assert instance.get_runs_count() == 3
runs = instance.get_runs()
three, two, one = runs
assert one.tags[BACKFILL_ID_TAG] == "full"
assert one.tags[PARTITION_NAME_TAG] == "one"
assert one.status == PipelineRunStatus.SUCCESS
assert step_succeeded(instance, one, "step_one")
assert step_succeeded(instance, one, "step_two")
assert step_succeeded(instance, one, "step_three")
assert two.tags[BACKFILL_ID_TAG] == "full"
assert two.tags[PARTITION_NAME_TAG] == "two"
assert two.status == PipelineRunStatus.SUCCESS
assert step_succeeded(instance, two, "step_one")
assert step_succeeded(instance, two, "step_two")
assert step_succeeded(instance, two, "step_three")
assert three.tags[BACKFILL_ID_TAG] == "full"
assert three.tags[PARTITION_NAME_TAG] == "three"
assert three.status == PipelineRunStatus.SUCCESS
assert step_succeeded(instance, three, "step_one")
assert step_succeeded(instance, three, "step_two")
assert step_succeeded(instance, three, "step_three")
# delete one of the runs, the partial reexecution should still succeed because the steps
# can be executed independently, require no input/output config
instance.delete_run(one.run_id)
assert instance.get_runs_count() == 2
# create partial runs
instance.add_backfill(
PartitionBackfill(
backfill_id="partial",
partition_set_origin=external_partition_set.get_external_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=False,
reexecution_steps=["step_one"],
tags=None,
backfill_timestamp=pendulum.now().timestamp(),
)
)
list(
execute_backfill_iteration(
instance, workspace, get_default_daemon_logger("BackfillDaemon")
)
)
wait_for_all_runs_to_start(instance)
assert instance.get_runs_count() == 5
partial_filter = RunsFilter(tags={BACKFILL_ID_TAG: "partial"})
assert instance.get_runs_count(filters=partial_filter) == 3
runs = instance.get_runs(filters=partial_filter)
three, two, one = runs
assert one.status == PipelineRunStatus.SUCCESS
assert step_succeeded(instance, one, "step_one")
assert step_did_not_run(instance, one, "step_two")
assert step_did_not_run(instance, one, "step_three")
assert two.status == PipelineRunStatus.SUCCESS
assert step_succeeded(instance, two, "step_one")
assert step_did_not_run(instance, two, "step_two")
assert step_did_not_run(instance, two, "step_three")
assert three.status == PipelineRunStatus.SUCCESS
assert step_succeeded(instance, three, "step_one")
assert step_did_not_run(instance, three, "step_two")
assert step_did_not_run(instance, three, "step_three")
def test_large_backfill():
with instance_for_context(default_repo) as (
instance,
workspace,
external_repo,
):
external_partition_set = external_repo.get_external_partition_set("large_partition_set")
instance.add_backfill(
PartitionBackfill(
backfill_id="simple",
partition_set_origin=external_partition_set.get_external_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=pendulum.now().timestamp(),
)
)
assert instance.get_runs_count() == 0
list(
execute_backfill_iteration(
instance, workspace, get_default_daemon_logger("BackfillDaemon")
)
)
assert instance.get_runs_count() == 3
def test_unloadable_backfill():
with instance_for_context(default_repo) as (
instance,
workspace,
_external_repo,
):
unloadable_origin = _unloadable_partition_set_origin()
instance.add_backfill(
PartitionBackfill(
backfill_id="simple",
partition_set_origin=unloadable_origin,
status=BulkActionStatus.REQUESTED,
partition_names=["one", "two", "three"],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=pendulum.now().timestamp(),
)
)
assert instance.get_runs_count() == 0
list(
execute_backfill_iteration(
instance, workspace, get_default_daemon_logger("BackfillDaemon")
)
)
assert instance.get_runs_count() == 0
backfill = instance.get_backfill("simple")
assert backfill.status == BulkActionStatus.FAILED
assert isinstance(backfill.error, SerializableErrorInfo)
def test_backfill_from_partitioned_job():
partition_name_list = [
partition.name for partition in my_config.partitions_def.get_partitions()
]
with instance_for_context(default_repo) as (
instance,
workspace,
external_repo,
):
external_partition_set = external_repo.get_external_partition_set(
"comp_always_succeed_partition_set"
)
instance.add_backfill(
PartitionBackfill(
backfill_id="partition_schedule_from_job",
partition_set_origin=external_partition_set.get_external_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=partition_name_list[:3],
from_failure=False,
reexecution_steps=None,
tags=None,
backfill_timestamp=pendulum.now().timestamp(),
)
)
assert instance.get_runs_count() == 0
list(
execute_backfill_iteration(
instance, workspace, get_default_daemon_logger("BackfillDaemon")
)
)
assert instance.get_runs_count() == 3
runs = reversed(instance.get_runs())
for idx, run in enumerate(runs):
assert run.tags[BACKFILL_ID_TAG] == "partition_schedule_from_job"
assert run.tags[PARTITION_NAME_TAG] == partition_name_list[idx]
assert run.tags[PARTITION_SET_TAG] == "comp_always_succeed_partition_set"
def test_backfill_from_failure_for_subselection():
with instance_for_context(default_repo) as (
instance,
workspace,
external_repo,
):
partition = parallel_failure_partition_set.get_partition("one")
run_config = parallel_failure_partition_set.run_config_for_partition(partition)
tags = parallel_failure_partition_set.tags_for_partition(partition)
external_partition_set = external_repo.get_external_partition_set(
"parallel_failure_partition_set"
)
execute_pipeline(
parallel_failure_pipeline,
run_config=run_config,
tags=tags,
instance=instance,
solid_selection=["fail_three", "success_four"],
raise_on_error=False,
)
assert instance.get_runs_count() == 1
wait_for_all_runs_to_finish(instance)
run = instance.get_runs()[0]
assert run.status == PipelineRunStatus.FAILURE
instance.add_backfill(
PartitionBackfill(
backfill_id="fromfailure",
partition_set_origin=external_partition_set.get_external_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=["one"],
from_failure=True,
reexecution_steps=None,
tags=None,
backfill_timestamp=pendulum.now().timestamp(),
)
)
list(
execute_backfill_iteration(
instance, workspace, get_default_daemon_logger("BackfillDaemon")
)
)
assert instance.get_runs_count() == 2
run = instance.get_runs(limit=1)[0]
assert run.solids_to_execute
assert run.solid_selection
assert len(run.solids_to_execute) == 2
assert len(run.solid_selection) == 2
|
py
|
1a5aeb88c3fd9b5c342ca141d11b4407c8a6f674
|
# coding=utf-8
#
# Copyright 2015-2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from f5.bigip.resource import AsmResource
from f5.bigip.resource import Collection
from icontrol.exceptions import iControlUnexpectedHTTPError
class Signatures_s(Collection):
"""BIG-IP® ASM Signatures collection."""
def __init__(self, asm):
super(Signatures_s, self).__init__(asm)
self._meta_data['object_has_stats'] = False
self._meta_data['allowed_lazy_attributes'] = [Signature]
self._meta_data['attribute_registry'] = {
'tm:asm:signatures:signaturestate': Signature
}
class Signature(AsmResource):
"""BIG-IP® ASM Signature resource.
note:: Only user created signatures can be modified/deleted.
Default signatures are READ-ONLY
"""
def __init__(self, signatures_s):
super(Signature, self).__init__(signatures_s)
self._meta_data['required_json_kind'] = 'tm:asm:signatures:signaturestate'
self._meta_data['required_creation_parameters'].update(
('attackTypeReference', 'rule')
)
def create(self, **kwargs):
"""Custom creation logic to handle edge cases
This shouldn't be needed, but ASM has a tendency to raise various errors that
are painful to handle from a customer point-of-view. These errors are especially
pronounced when doing things concurrently with asm.
The error itself are described in their exception handler
To address these failure, we try a number of exception handling cases to catch
and reliably deal with the error.
:param kwargs:
:return:
"""
ex = iControlUnexpectedHTTPError(
"Failed to delete the signature"
)
for _ in range(0, 30):
try:
return self._create(**kwargs)
except iControlUnexpectedHTTPError as ex:
if self._check_exception(ex):
continue
else:
raise
raise ex
def delete(self, **kwargs):
"""Custom deletion logic to handle edge cases
This shouldn't be needed, but ASM has a tendency to raise various errors that
are painful to handle from a customer point-of-view. These errors are especially
pronounced when doing things concurrently with asm.
The error itself are described in their exception handler
To address these failure, we try a number of exception handling cases to catch
and reliably deal with the error.
:param kwargs:
:return:
"""
ex = iControlUnexpectedHTTPError(
"Failed to delete the signature"
)
for _ in range(0, 30):
try:
return self._delete(**kwargs)
except iControlUnexpectedHTTPError as ex:
if self._check_exception(ex):
continue
else:
raise
raise ex
def modify(self, **kwargs):
ex = iControlUnexpectedHTTPError(
"Failed to modify the signature"
)
for _ in range(0, 30):
try:
return self._modify(**kwargs)
except iControlUnexpectedHTTPError as ex:
if self._check_exception(ex):
continue
else:
raise
raise ex
def update(self, **kwargs):
ex = iControlUnexpectedHTTPError(
"Failed to delete the signature"
)
for _ in range(0, 30):
try:
return self._update(**kwargs)
except iControlUnexpectedHTTPError as ex:
if self._check_exception(ex):
continue
else:
raise
raise ex
def _check_exception(self, ex):
"""Check for exceptions in action responses
In versions of ASM < v12, the REST API is quite unstable and therefore
needs some additional supporting retries to ensure that actions function
as expected. In particular versions 11.5.4 and 11.6.0 are affected.
This method handles checking for various exceptions and allowing the
given command to retry itself.
:param ex:
:return:
"""
retryable = [
# iControlUnexpectedHTTPError: 500 Unexpected Error: Internal Server Error ...
# {
# "code": 500,
# "message": "Could not add_signature the Attack Signature. "
# "Failed on insert to PLC.NEGSIG_SET_SIGNATURES "
# "(DBD::mysql::db do failed: Lock wait timeout exceeded; "
# "try restarting transaction)
#
'Lock wait timeout exceeded',
# {
# "code": 500,
# "message": "DBD::mysql::db do failed: Deadlock found when "
# "trying to get lock; try restarting transaction"
#
'Deadlock found when',
# {
# "code": 404,
# "message": "Could not add_signature the Attack Signature, "
# "internal data inconsistency was detected.",
'internal data inconsistency',
]
if any(x in str(ex) for x in retryable):
time.sleep(3)
return True
elif 'errorStack' in ex:
stack = ' '.join(ex['errorStack'])
if any(x in stack for x in retryable):
time.sleep(3)
return True
else:
return False
else:
return False
|
py
|
1a5aeba651b4a1ee5a6857aeef55d9e7d62427e1
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import inspect
from pandapower.auxiliary import _check_bus_index_and_print_warning_if_high, \
_check_gen_index_and_print_warning_if_high, _init_runpp_options, _init_rundcopp_options, \
_init_rundcpp_options, _init_runopp_options, _internal_stored
from pandapower.opf.validate_opf_input import _check_necessary_opf_parameters
from pandapower.optimal_powerflow import _optimal_powerflow
from pandapower.powerflow import _powerflow, _recycled_powerflow
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
def set_user_pf_options(net, overwrite=False, **kwargs):
"""
This function sets the 'user_pf_options' dict for net. These options overrule
net.__internal_options once they are added to net. These options are used in configuration of
load flow calculation.
At the same time, user-defined arguments for pandapower.runpp() always have a higher priority.
To remove user_pf_options, set overwrite=True and provide no additional arguments
:param net: pandaPower network
:param overwrite: specifies whether the user_pf_options is removed before setting new options
:param kwargs: load flow options, e. g. tolerance_mva = 1e-3
:return: None
"""
standard_parameters = ['calculate_voltage_angles', 'trafo_model', 'check_connectivity', 'mode',
'copy_constraints_to_ppc', 'switch_rx_ratio', 'enforce_q_lims',
'recycle', 'voltage_depend_loads', 'consider_line_temperature', 'delta',
'trafo3w_losses', 'init_vm_pu', 'init_va_degree', 'init_results',
'tolerance_mva', 'trafo_loading', 'numba', 'ac', 'algorithm',
'max_iteration', 'v_debug', 'run_control']
if overwrite or 'user_pf_options' not in net.keys():
net['user_pf_options'] = dict()
net.user_pf_options.update({key: val for key, val in kwargs.items()
if key in standard_parameters})
additional_kwargs = {key: val for key, val in kwargs.items()
if key not in standard_parameters}
# this part is to inform user and to make typos in parameters visible
if len(additional_kwargs) > 0:
logger.info('parameters %s are not in the list of standard options' % list(
additional_kwargs.keys()))
net.user_pf_options.update(additional_kwargs)
def runpp(net, algorithm='nr', calculate_voltage_angles="auto", init="auto",
max_iteration="auto", tolerance_mva=1e-8, trafo_model="t",
trafo_loading="current", enforce_q_lims=False, check_connectivity=True,
voltage_depend_loads=True, consider_line_temperature=False,
run_control=False, **kwargs):
"""
Runs a power flow
INPUT:
**net** - The pandapower format network
OPTIONAL:
**algorithm** (str, "nr") - algorithm that is used to solve the power flow problem.
The following algorithms are available:
- "nr" Newton-Raphson (pypower implementation with numba accelerations)
- "iwamoto_nr" Newton-Raphson with Iwamoto multiplier (maybe slower than NR but more robust)
- "bfsw" backward/forward sweep (specially suited for radial and weakly-meshed networks)
- "gs" gauss-seidel (pypower implementation)
- "fdbx" fast-decoupled (pypower implementation)
- "fdxb" fast-decoupled (pypower implementation)
**calculate_voltage_angles** (bool, "auto") - consider voltage angles in loadflow calculation
If True, voltage angles of ext_grids and transformer shifts are considered in the
loadflow calculation. Considering the voltage angles is only necessary in meshed
networks that are usually found in higher voltage levels. calculate_voltage_angles
in "auto" mode defaults to:
- True, if the network voltage level is above 70 kV
- False otherwise
The network voltage level is defined as the maximum rated voltage of any bus in the network that
is connected to a line.
**init** (str, "auto") - initialization method of the loadflow
pandapower supports four methods for initializing the loadflow:
- "auto" - init defaults to "dc" if calculate_voltage_angles is True or "flat" otherwise
- "flat"- flat start with voltage of 1.0pu and angle of 0° at all PQ-buses and 0° for PV buses as initial solution
- "dc" - initial DC loadflow before the AC loadflow. The results of the DC loadflow are used as initial solution for the AC loadflow.
- "results" - voltage vector of last loadflow from net.res_bus is used as initial solution. This can be useful to accelerate convergence in iterative loadflows like time series calculations.
Considering the voltage angles might lead to non-convergence of the power flow in flat start.
That is why in "auto" mode, init defaults to "dc" if calculate_voltage_angles is True or "flat" otherwise
**max_iteration** (int, "auto") - maximum number of iterations carried out in the power flow algorithm.
In "auto" mode, the default value depends on the power flow solver:
- 10 for "nr"
- 100 for "bfsw"
- 1000 for "gs"
- 30 for "fdbx"
- 30 for "fdxb"
**tolerance_mva** (float, 1e-8) - loadflow termination condition referring to P / Q mismatch of node power in MVA
**trafo_model** (str, "t") - transformer equivalent circuit model
pandapower provides two equivalent circuit models for the transformer:
- "t" - transformer is modeled as equivalent with the T-model.
- "pi" - transformer is modeled as equivalent PI-model. This is not recommended, since it is less exact than the T-model. It is only recommended for valdiation with other software that uses the pi-model.
**trafo_loading** (str, "current") - mode of calculation for transformer loading
Transformer loading can be calculated relative to the rated current or the rated power. In both cases the overall transformer loading is defined as the maximum loading on the two sides of the transformer.
- "current"- transformer loading is given as ratio of current flow and rated current of the transformer. This is the recommended setting, since thermal as well as magnetic effects in the transformer depend on the current.
- "power" - transformer loading is given as ratio of apparent power flow to the rated apparent power of the transformer.
**enforce_q_lims** (bool, False) - respect generator reactive power limits
If True, the reactive power limits in net.gen.max_q_mvar/min_q_mvar are respected in the
loadflow. This is done by running a second loadflow if reactive power limits are
violated at any generator, so that the runtime for the loadflow will increase if reactive
power has to be curtailed.
Note: enforce_q_lims only works if algorithm="nr"!
**check_connectivity** (bool, True) - Perform an extra connectivity test after the conversion from pandapower to PYPOWER
If True, an extra connectivity test based on SciPy Compressed Sparse Graph Routines is perfomed.
If check finds unsupplied buses, they are set out of service in the ppc
**voltage_depend_loads** (bool, True) - consideration of voltage-dependent loads. If False, net.load.const_z_percent and net.load.const_i_percent are not considered, i.e. net.load.p_mw and net.load.q_mvar are considered as constant-power loads.
**consider_line_temperature** (bool, False) - adjustment of line impedance based on provided
line temperature. If True, net.line must contain a column "temperature_degree_celsius".
The temperature dependency coefficient alpha must be provided in the net.line.alpha
column, otherwise the default value of 0.004 is used
**KWARGS:
**numba** (bool, True) - Activation of numba JIT compiler in the newton solver
If set to True, the numba JIT compiler is used to generate matrices for the powerflow,
which leads to significant speed improvements.
**switch_rx_ratio** (float, 2) - rx_ratio of bus-bus-switches. If impedance is zero, buses connected by a closed bus-bus switch are fused to model an ideal bus. Otherwise, they are modelled as branches with resistance defined as z_ohm column in switch table and this parameter
**delta_q** - Reactive power tolerance for option "enforce_q_lims" in kvar - helps convergence in some cases.
**trafo3w_losses** - defines where open loop losses of three-winding transformers are considered. Valid options are "hv", "mv", "lv" for HV/MV/LV side or "star" for the star point.
**v_debug** (bool, False) - if True, voltage values in each newton-raphson iteration are logged in the ppc
**init_vm_pu** (string/float/array/Series, None) - Allows to define initialization specifically for voltage magnitudes. Only works with init == "auto"!
- "auto": all buses are initialized with the mean value of all voltage controlled elements in the grid
- "flat" for flat start from 1.0
- "results": voltage magnitude vector is taken from result table
- a float with which all voltage magnitudes are initialized
- an iterable with a voltage magnitude value for each bus (length and order has to match with the buses in net.bus)
- a pandas Series with a voltage magnitude value for each bus (indexes have to match the indexes in net.bus)
**init_va_degree** (string/float/array/Series, None) - Allows to define initialization specifically for voltage angles. Only works with init == "auto"!
- "auto": voltage angles are initialized from DC power flow if angles are calculated or as 0 otherwise
- "dc": voltage angles are initialized from DC power flow
- "flat" for flat start from 0
- "results": voltage angle vector is taken from result table
- a float with which all voltage angles are initialized
- an iterable with a voltage angle value for each bus (length and order has to match with the buses in net.bus)
- a pandas Series with a voltage angle value for each bus (indexes have to match the indexes in net.bus)
**recycle** (dict, none) - Reuse of internal powerflow variables for time series calculation
Contains a dict with the following parameters:
bus_pq: If True PQ values of buses are updated
trafo: If True trafo relevant variables, e.g., the Ybus matrix, is recalculated
gen: If True Sbus and the gen table in the ppc are recalculated
**neglect_open_switch_branches** (bool, False) - If True no auxiliary buses are created for branches when switches are opened at the branch. Instead branches are set out of service
"""
# if dict 'user_pf_options' is present in net, these options overrule the net.__internal_options
# except for parameters that are passed by user
recycle = kwargs.get("recycle", None)
if isinstance(recycle, dict) and _internal_stored(net):
_recycled_powerflow(net, **kwargs)
return
if run_control and net.controller.in_service.any():
from pandapower.control import run_control
parameters = {**locals(), **kwargs}
# disable run control for inner loop to avoid infinite loop
parameters["run_control"] = False
run_control(**parameters)
else:
passed_parameters = _passed_runpp_parameters(locals())
_init_runpp_options(net, algorithm=algorithm, calculate_voltage_angles=calculate_voltage_angles,
init=init, max_iteration=max_iteration, tolerance_mva=tolerance_mva,
trafo_model=trafo_model, trafo_loading=trafo_loading,
enforce_q_lims=enforce_q_lims, check_connectivity=check_connectivity,
voltage_depend_loads=voltage_depend_loads,
consider_line_temperature=consider_line_temperature,
passed_parameters=passed_parameters, **kwargs)
_check_bus_index_and_print_warning_if_high(net)
_check_gen_index_and_print_warning_if_high(net)
_powerflow(net, **kwargs)
def rundcpp(net, trafo_model="t", trafo_loading="current", recycle=None, check_connectivity=True,
switch_rx_ratio=2, trafo3w_losses="hv", **kwargs):
"""
Runs PANDAPOWER DC Flow
INPUT:
**net** - The pandapower format network
OPTIONAL:
**trafo_model** (str, "t") - transformer equivalent circuit model
pandapower provides two equivalent circuit models for the transformer:
- "t" - transformer is modeled as equivalent with the T-model. This is consistent with PowerFactory and is also more accurate than the PI-model. We recommend using this transformer model.
- "pi" - transformer is modeled as equivalent PI-model. This is consistent with Sincal, but the method is questionable since the transformer is physically T-shaped. We therefore recommend the use of the T-model.
**trafo_loading** (str, "current") - mode of calculation for transformer loading
Transformer loading can be calculated relative to the rated current or the rated power. In both cases the overall transformer loading is defined as the maximum loading on the two sides of the transformer.
- "current"- transformer loading is given as ratio of current flow and rated current of the transformer. This is the recommended setting, since thermal as well as magnetic effects in the transformer depend on the current.
- "power" - transformer loading is given as ratio of apparent power flow to the rated apparent power of the transformer.
**check_connectivity** (bool, False) - Perform an extra connectivity test after the conversion from pandapower to PYPOWER
If true, an extra connectivity test based on SciPy Compressed Sparse Graph Routines is perfomed.
If check finds unsupplied buses, they are put out of service in the PYPOWER matrix
**switch_rx_ratio** (float, 2) - rx_ratio of bus-bus-switches. If impedance is zero, buses connected by a closed bus-bus switch are fused to model an ideal bus. Otherwise, they are modelled as branches with resistance defined as z_ohm column in switch table and this parameter
**trafo3w_losses** (str, "hv") - defines where open loop losses of three-winding transformers are considered. Valid options are "hv", "mv", "lv" for HV/MV/LV side or "star" for the star point.
****kwargs** - options to use for PYPOWER.runpf
"""
_init_rundcpp_options(net, trafo_model=trafo_model, trafo_loading=trafo_loading,
recycle=recycle, check_connectivity=check_connectivity,
switch_rx_ratio=switch_rx_ratio, trafo3w_losses=trafo3w_losses, **kwargs)
_check_bus_index_and_print_warning_if_high(net)
_check_gen_index_and_print_warning_if_high(net)
_powerflow(net, **kwargs)
def runopp(net, verbose=False, calculate_voltage_angles=False, check_connectivity=True,
suppress_warnings=True, switch_rx_ratio=2, delta=1e-10, init="flat", numba=True,
trafo3w_losses="hv", consider_line_temperature=False, **kwargs):
"""
Runs the pandapower Optimal Power Flow.
Flexibilities, constraints and cost parameters are defined in the pandapower element tables.
Flexibilities can be defined in net.sgen / net.gen /net.load / net.storage
net.sgen.controllable if a static generator is controllable. If False,
the active and reactive power are assigned as in a normal power flow. If True, the following
flexibilities apply:
- net.gen.min_p_mw / net.gen.max_p_mw
- net.gen.min_q_mvar / net.gen.max_q_mvar
- net.sgen.min_p_mw / net.sgen.max_p_mw
- net.sgen.min_q_mvar / net.sgen.max_q_mvar
- net.dcline.max_p_mw
- net.dcline.min_q_to_mvar / net.dcline.max_q_to_mvar / net.dcline.min_q_from_mvar / net.dcline.max_q_from_mvar
- net.ext_grid.min_p_mw / net.ext_grid.max_p_mw
- net.ext_grid.min_q_mvar / net.ext_grid.max_q_mvar
- net.load.min_p_mw / net.load.max_p_mw
- net.load.min_q_mvar / net.load.max_q_mvar
- net.storage.min_p_mw / net.storage.max_p_mw
- net.storage.min_q_mvar / net.storage.max_q_mvar
Controllable loads behave just like controllable static generators. It must be stated if they are controllable.
Otherwise, they are not respected as flexibilities.
Dc lines are controllable per default
Network constraints can be defined for buses, lines and transformers the elements in the following columns:
- net.bus.min_vm_pu / net.bus.max_vm_pu
- net.line.max_loading_percent
- net.trafo.max_loading_percent
- net.trafo3w.max_loading_percent
How these costs are combined into a cost function depends on the cost_function parameter.
INPUT:
**net** - The pandapower format network
OPTIONAL:
**verbose** (bool, False) - If True, some basic information is printed
**suppress_warnings** (bool, True) - suppress warnings in pypower
If set to True, warnings are disabled during the loadflow. Because of the way data is
processed in pypower, ComplexWarnings are raised during the loadflow.
These warnings are suppressed by this option, however keep in mind all other pypower
warnings are suppressed, too.
**init** (str, "flat") - init of starting opf vector. Options are "flat" or "pf"
Starting solution vector (x0) for opf calculations is determined by this flag. Options are:
"flat" (default): starting vector is (upper bound - lower bound) / 2
"pf": a power flow is executed prior to the opf and the pf solution is the starting vector. This may improve
convergence, but takes a longer runtime (which are probably neglectible for opf calculations)
**delta** (float, 1e-10) - power tolerance
**trafo3w_losses** (str, "hv") - defines where open loop losses of three-winding transformers are considered. Valid options are "hv", "mv", "lv" for HV/MV/LV side or "star" for the star point.
**consider_line_temperature** (bool, False) - adjustment of line impedance based on provided
line temperature. If True, net.line must contain a column "temperature_degree_celsius".
The temperature dependency coefficient alpha must be provided in the net.line.alpha
column, otherwise the default value of 0.004 is used
**kwargs** - Pypower / Matpower keyword arguments: - OPF_VIOLATION (5e-6) constraint violation tolerance
- PDIPM_COSTTOL (1e-6) optimality tolerance
- PDIPM_GRADTOL (1e-6) gradient tolerance
- PDIPM_COMPTOL (1e-6) complementarity condition (inequality) tolerance
- PDIPM_FEASTOL (set to OPF_VIOLATION if not specified) feasibiliy (equality) tolerance
- PDIPM_MAX_IT (150) maximum number of iterations
- SCPDIPM_RED_IT(20) maximum number of step size reductions per iteration
"""
_check_necessary_opf_parameters(net, logger)
_init_runopp_options(net, calculate_voltage_angles=calculate_voltage_angles,
check_connectivity=check_connectivity,
switch_rx_ratio=switch_rx_ratio, delta=delta, init=init, numba=numba,
trafo3w_losses=trafo3w_losses,
consider_line_temperature=consider_line_temperature, **kwargs)
_check_bus_index_and_print_warning_if_high(net)
_check_gen_index_and_print_warning_if_high(net)
_optimal_powerflow(net, verbose, suppress_warnings, **kwargs)
def rundcopp(net, verbose=False, check_connectivity=True, suppress_warnings=True,
switch_rx_ratio=0.5, delta=1e-10, trafo3w_losses="hv", **kwargs):
"""
Runs the pandapower Optimal Power Flow.
Flexibilities, constraints and cost parameters are defined in the pandapower element tables.
Flexibilities for generators can be defined in net.sgen / net.gen.
net.sgen.controllable / net.gen.controllable signals if a generator is controllable. If False,
the active and reactive power are assigned as in a normal power flow. If yes, the following
flexibilities apply:
- net.sgen.min_p_mw / net.sgen.max_p_mw
- net.gen.min_p_mw / net.gen.max_p_mw
- net.load.min_p_mw / net.load.max_p_mw
Network constraints can be defined for buses, lines and transformers the elements in the following columns:
- net.line.max_loading_percent
- net.trafo.max_loading_percent
- net.trafo3w.max_loading_percent
INPUT:
**net** - The pandapower format network
OPTIONAL:
**verbose** (bool, False) - If True, some basic information is printed
**suppress_warnings** (bool, True) - suppress warnings in pypower
If set to True, warnings are disabled during the loadflow. Because of the way data is
processed in pypower, ComplexWarnings are raised during the loadflow.
These warnings are suppressed by this option, however keep in mind all other pypower
warnings are suppressed, too.
**delta** (float, 1e-10) - power tolerance
**trafo3w_losses** (str, "hv") - defines where open loop losses of three-winding transformers are considered. Valid options are "hv", "mv", "lv" for HV/MV/LV side or "star" for the star point.
"""
if (not net.sgen.empty) & ("controllable" not in net.sgen.columns):
logger.warning('Warning: Please specify sgen["controllable"]\n')
if (not net.load.empty) & ("controllable" not in net.load.columns):
logger.warning('Warning: Please specify load["controllable"]\n')
_init_rundcopp_options(net, check_connectivity=check_connectivity,
switch_rx_ratio=switch_rx_ratio, delta=delta,
trafo3w_losses=trafo3w_losses, **kwargs)
_check_bus_index_and_print_warning_if_high(net)
_check_gen_index_and_print_warning_if_high(net)
_optimal_powerflow(net, verbose, suppress_warnings, **kwargs)
def _passed_runpp_parameters(local_parameters):
"""
Internal function to distinguish arguments for pandapower.runpp() that are explicitly passed by
the user.
:param local_parameters: locals() in the runpp() function
:return: dictionary of explicitly passed parameters
"""
net = local_parameters.pop("net")
if not ("user_pf_options" in net.keys() and len(net.user_pf_options) > 0):
return None
try:
default_parameters = {k: v.default for k, v in inspect.signature(runpp).parameters.items()}
except:
args, varargs, keywords, defaults = inspect.getfullargspec(runpp)
default_parameters = dict(zip(args[-len(defaults):], defaults))
default_parameters.update({"init": "auto"})
passed_parameters = {
key: val for key, val in local_parameters.items()
if key in default_parameters.keys() and val != default_parameters.get(key, None)}
return passed_parameters
|
py
|
1a5aec0e00ce0e428a384c7fbfb291aef159ab4f
|
import argparse
import gym
import numpy as np
import os
import tensorflow as tf
import tempfile
import time
import json
import random
import rlattack.common.tf_util as U
from rlattack import logger
from rlattack import deepq
from rlattack.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
from rlattack.common.misc_util import (
boolean_flag,
pickle_load,
pretty_eta,
relatively_safe_pickle_dump,
set_global_seeds,
RunningAvg,
SimpleMonitor
)
from rlattack.common.schedules import LinearSchedule, PiecewiseSchedule
# when updating this to non-deprecated ones, it is important to
# copy over LazyFrames
from rlattack.common.atari_wrappers_deprecated import wrap_dqn
from rlattack.common.azure_utils import Container
from model import model, dueling_model
from statistics import statistics
def parse_args():
parser = argparse.ArgumentParser("DQN experiments for Atari games")
# Environment
parser.add_argument("--env", type=str, default="Pong",
help="name of the game")
parser.add_argument("--seed", type=int, default=42,
help="which seed to use")
# Core DQN parameters
parser.add_argument("--replay-buffer-size", type=int, default=int(1e6),
help="replay buffer size")
parser.add_argument("--lr", type=float, default=1e-4,
help="learning rate for Adam optimizer")
parser.add_argument("--num-steps", type=int, default=int(2e8),
help="total number of steps to \
run the environment for")
parser.add_argument("--batch-size", type=int, default=32,
help="number of transitions to optimize \
at the same time")
parser.add_argument("--learning-freq", type=int, default=4,
help="number of iterations between \
every optimization step")
parser.add_argument("--target-update-freq", type=int, default=40000,
help="number of iterations between \
every target network update")
# Bells and whistles
boolean_flag(parser, "noisy", default=False,
help="whether or not to NoisyNetwork")
boolean_flag(parser, "double-q", default=True,
help="whether or not to use double q learning")
boolean_flag(parser, "dueling", default=False,
help="whether or not to use dueling model")
boolean_flag(parser, "prioritized", default=False,
help="whether or not to use prioritized replay buffer")
parser.add_argument("--prioritized-alpha", type=float, default=0.6,
help="alpha parameter for prioritized replay buffer")
parser.add_argument("--prioritized-beta0", type=float, default=0.4,
help="initial value of beta \
parameters for prioritized replay")
parser.add_argument("--prioritized-eps", type=float, default=1e-6,
help="eps parameter for prioritized replay buffer")
# Checkpointing
parser.add_argument("--save-dir", type=str, default=None, required=True,
help="directory in which \
training state and model should be saved.")
parser.add_argument("--save-azure-container", type=str, default=None,
help="It present data will saved/loaded from Azure. \
Should be in format ACCOUNT_NAME:ACCOUNT_KEY:\
CONTAINER")
parser.add_argument("--save-freq", type=int, default=1e6,
help="save model once every time this many \
iterations are completed")
boolean_flag(parser, "load-on-start", default=True,
help="if true and model was previously saved then training \
will be resumed")
# V: Attack Arguments #
parser.add_argument("--attack", type=str, default=None,
help="Method to attack the model.")
parser.add_argument("--attack-init", type=int, default=0,
help="Iteration no. to begin attacks")
parser.add_argument("--attack-prob", type=float, default=0.0,
help="Probability of attack at each step, \
float in range 0 - 1.0")
return parser.parse_args()
def make_env(game_name):
env = gym.make(game_name + "NoFrameskip-v4")
monitored_env = SimpleMonitor(env)
env = wrap_dqn(monitored_env)
return env, monitored_env
def maybe_save_model(savedir, container, state):
if savedir is None:
return
start_time = time.time()
model_dir = "model-{}".format(state["num_iters"])
U.save_state(os.path.join(savedir, model_dir, "saved"))
if container is not None:
container.put(os.path.join(savedir, model_dir), model_dir)
relatively_safe_pickle_dump(state,
os.path.join(savedir,
'training_state.pkl.zip'),
compression=True)
if container is not None:
container.put(os.path.join(savedir, 'training_state.pkl.zip'),
'training_state.pkl.zip')
relatively_safe_pickle_dump(state["monitor_state"],
os.path.join(savedir, 'monitor_state.pkl'))
if container is not None:
container.put(os.path.join(savedir, 'monitor_state.pkl'),
'monitor_state.pkl')
logger.log("Saved model in {} seconds\n".format(time.time() - start_time))
def maybe_load_model(savedir, container):
"""Load model if present at the specified path."""
if savedir is None:
return
state_path = os.path.join(os.path.join(savedir, 'training_state.pkl.zip'))
if container is not None:
logger.log("Attempting to download model from Azure")
found_model = container.get(savedir, 'training_state.pkl.zip')
else:
found_model = os.path.exists(state_path)
if found_model:
state = pickle_load(state_path, compression=True)
model_dir = "model-{}".format(state["num_iters"])
if container is not None:
container.get(savedir, model_dir)
U.load_state(os.path.join(savedir, model_dir, "saved"))
logger.log("Loaded models checkpoint at {} iterations".format(
state["num_iters"]))
return state
if __name__ == '__main__':
args = parse_args()
# Parse savedir and azure container.
savedir = args.save_dir
if args.save_azure_container is not None:
account_name, account_key, container_name = \
args.save_azure_container.split(":")
container = Container(
account_name=account_name,
account_key=account_key,
container_name=container_name,
maybe_create=True
)
if savedir is None:
# Careful! This will not get cleaned up.
savedir = tempfile.TemporaryDirectory().name
else:
container = None
# Create and seed the env.
env, monitored_env = make_env(args.env)
if args.seed > 0:
set_global_seeds(args.seed)
env.unwrapped.seed(args.seed)
# V: Save arguments, configure log dump path to savedir #
if savedir:
with open(os.path.join(savedir, 'args.json'), 'w') as f:
json.dump(vars(args), f)
logger.configure(dir=savedir) # log to savedir
with U.make_session(4) as sess:
# Create training graph and replay buffer
act, train, update_target, debug, craft_adv = deepq.build_train(
make_obs_ph=lambda name: U.Uint8Input(env.observation_space.shape,
name=name),
q_func=dueling_model if args.dueling else model,
num_actions=env.action_space.n,
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=args.lr,
epsilon=1e-4),
gamma=0.99,
grad_norm_clipping=10,
double_q=args.double_q,
noisy=args.noisy,
attack=args.attack
)
approximate_num_iters = args.num_steps / 4
exploration = PiecewiseSchedule([
(0, 1.0),
(approximate_num_iters / 50, 0.1),
(approximate_num_iters / 5, 0.01)
], outside_value=0.01)
if args.prioritized:
replay_buffer = PrioritizedReplayBuffer(args.replay_buffer_size,
args.prioritized_alpha)
beta_schedule = LinearSchedule(approximate_num_iters,
initial_p=args.prioritized_beta0,
final_p=1.0)
else:
replay_buffer = ReplayBuffer(args.replay_buffer_size)
U.initialize()
update_target()
num_iters = 0
# Load the model
state = maybe_load_model(savedir, container)
if state is not None:
num_iters, replay_buffer = state["num_iters"], state[
"replay_buffer"],
monitored_env.set_state(state["monitor_state"])
start_time, start_steps = None, None
steps_per_iter = RunningAvg(0.999)
iteration_time_est = RunningAvg(0.999)
obs = env.reset()
# Record the mean of the \sigma
sigma_name_list = []
sigma_list = []
for param in tf.compat.v1.trainable_variables():
# only record the \sigma in the action network
if 'sigma' in param.name \
and 'deepq/q_func/action_value' in param.name:
summary_name = \
param.name.replace(
'deepq/q_func/action_value/', '').replace(
'/', '.').split(':')[0]
sigma_name_list.append(summary_name)
sigma_list.append(tf.reduce_mean(input_tensor=tf.abs(param)))
f_mean_sigma = U.function(inputs=[], outputs=sigma_list)
# Statistics
writer = tf.compat.v1.summary.FileWriter(savedir, sess.graph)
im_stats = statistics(scalar_keys=['action', 'im_reward', 'td_errors',
'huber_loss'] + sigma_name_list)
ep_stats = statistics(scalar_keys=['ep_reward', 'ep_length'])
# Main trianing loop
ep_length = 0
while True:
num_iters += 1
ep_length += 1
# V: Perturb observation if we are past the init stage
# and at a designated attack step
# if craft_adv != None and (num_iters >= args.attack_init)
# and ((num_iters - args.attack_init) % args.attack_freq == 0) :
if craft_adv is not None and (num_iters >= args.attack_init) and (
random.random() <= args.attack_prob):
obs = craft_adv(np.array(obs)[None])[0]
# Take action and store transition in the replay buffer.
if args.noisy:
# greedily choose
action = act(np.array(obs)[None], stochastic=False)[0]
else:
# epsilon greedy
action = act(np.array(obs)[None],
update_eps=exploration.value(num_iters))[0]
new_obs, rew, done, info = env.step(action)
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
if done:
obs = env.reset()
if (num_iters > max(5 * args.batch_size,
args.replay_buffer_size // 20) and
num_iters % args.learning_freq == 0):
# Sample a bunch of transitions from replay buffer
if args.prioritized:
experience = replay_buffer.sample(args.batch_size,
beta=beta_schedule.value(
num_iters))
(obses_t, actions, rewards, obses_tp1, dones, weights,
batch_idxes) = experience
else:
obses_t, actions, rewards, obses_tp1, dones = \
replay_buffer.sample(args.batch_size)
weights = np.ones_like(rewards)
# Minimize the error in Bellman's and compute TD-error
td_errors, huber_loss = train(obses_t, actions, rewards,
obses_tp1, dones, weights)
# Update the priorities in the replay buffer
if args.prioritized:
new_priorities = np.abs(td_errors) + args.prioritized_eps
replay_buffer.update_priorities(
batch_idxes, new_priorities
)
# Write summary
mean_sigma = f_mean_sigma()
im_stats.add_all_summary(writer,
[action, rew, np.mean(td_errors),
np.mean(huber_loss)] + mean_sigma,
num_iters)
# Update target network.
if num_iters % args.target_update_freq == 0:
update_target()
if start_time is not None:
steps_per_iter.update(info['steps'] - start_steps)
iteration_time_est.update(time.time() - start_time)
start_time, start_steps = time.time(), info["steps"]
# Save the model and training state.
if num_iters > 0 and (num_iters % args.save_freq == 0 or info[
"steps"] > args.num_steps):
maybe_save_model(savedir, container, {
'replay_buffer': replay_buffer,
'num_iters': num_iters,
'monitor_state': monitored_env.get_state()
})
if info["steps"] > args.num_steps:
break
if done:
steps_left = args.num_steps - info["steps"]
completion = np.round(info["steps"] / args.num_steps, 1)
mean_ep_reward = np.mean(info["rewards"][-100:])
logger.record_tabular("% completion", completion)
logger.record_tabular("steps", info["steps"])
logger.record_tabular("iters", num_iters)
logger.record_tabular("episodes", len(info["rewards"]))
logger.record_tabular("reward (100 epi mean)",
np.mean(info["rewards"][-100:]))
if not args.noisy:
logger.record_tabular("exploration",
exploration.value(num_iters))
if args.prioritized:
logger.record_tabular("max priority",
replay_buffer._max_priority)
fps_estimate = (
float(steps_per_iter) / (float(iteration_time_est) + 1e-6)
if steps_per_iter._value is not None else "calculating:")
logger.dump_tabular()
logger.log()
logger.log("ETA: " +
pretty_eta(int(steps_left / fps_estimate)))
logger.log()
# add summary for one episode
ep_stats.add_all_summary(writer, [mean_ep_reward, ep_length],
num_iters)
ep_length = 0
|
py
|
1a5aecea0642a7c9a25aac8bac76304075b0df1b
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
VERSION = "unknown"
class MonitorClientConfiguration(Configuration):
"""Configuration for MonitorClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The Azure subscription Id.
:type subscription_id: str
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(MonitorClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2017-04-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-eventhub/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
|
py
|
1a5aed44c3b3273dca7b191e35e20c19c93ee726
|
from django.db import models
from django import forms
class KhufuForm(models.Model):
title = forms.CharField(max_length=100)
message = forms.CharField(max_length=100)
def __unicode__(self):
return self.title
class Entry(models.Model):
title = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published',blank=True)
#khufuforms = models.ForeignKey(KhufuForm)
slug = models.SlugField(
unique_for_date='pub_date',
help_text='Automatically built From the title.'
)
class Meta:
ordering = ('-pub_date',)
# get_latest_by = 'pub_date'
# db_table = "blog_entry"
def get_absolute_url(self):
return "/hello/%s/%s/" % (self.pub_date.strftime("%Y/%b/%d").lower(), self.title)
def __unicode__(self):
return self.title
# Create your models here.
|
py
|
1a5aed6b8cfc82d94d708a80e77fd8d307c2a606
|
import logging
from flask import Flask
from flask_appbuilder import AppBuilder, SQLA
logging.basicConfig(format="%(asctime)s:%(levelname)s:%(name)s:%(message)s")
logging.getLogger().setLevel(logging.DEBUG)
app = Flask(__name__)
app.config.from_object("config")
db = SQLA(app)
appbuilder = AppBuilder(app, db.session)
from . import models, views # noqa
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.