ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7df6f2d8d268e5387c0c7bf9dc8a31d906fbc9f8 | # oxAuth is available under the MIT License (2008). See http://opensource.org/licenses/MIT for full text.
# Copyright (c) 2019, Gluu
#
# Author: Jose Gonzalez
from java.util import Collections, HashMap, HashSet, ArrayList, Arrays, Date
from java.nio.charset import Charset
from org.apache.http.params import CoreConnectionPNames
from org.oxauth.persistence.model.configuration import GluuConfiguration
from org.gluu.oxauth.security import Identity
from org.gluu.oxauth.service import AuthenticationService, UserService, EncryptionService, AppInitializer
from org.gluu.oxauth.service.custom import CustomScriptService
from org.gluu.oxauth.service.net import HttpService
from org.gluu.oxauth.util import ServerUtil
from org.gluu.model import SimpleCustomProperty
from org.gluu.model.casa import ApplicationConfiguration
from org.gluu.model.custom.script import CustomScriptType
from org.gluu.model.custom.script.type.auth import PersonAuthenticationType
from org.gluu.service.cdi.util import CdiUtil
from org.gluu.util import StringHelper
try:
import json
except ImportError:
import simplejson as json
import sys
class PersonAuthentication(PersonAuthenticationType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
self.ACR_SG = "super_gluu"
self.ACR_SMS = "twilio_sms"
self.ACR_OTP = "otp"
self.ACR_U2F = "u2f"
self.modulePrefix = "casa-external_"
def init(self, configurationAttributes):
print "Casa. init called"
self.authenticators = {}
self.uid_attr = self.getLocalPrimaryKey()
custScriptService = CdiUtil.bean(CustomScriptService)
self.scriptsList = custScriptService.findCustomScripts(Collections.singletonList(CustomScriptType.PERSON_AUTHENTICATION), "oxConfigurationProperty", "displayName", "oxEnabled", "oxLevel")
dynamicMethods = self.computeMethods(self.scriptsList)
if len(dynamicMethods) > 0:
print "Casa. init. Loading scripts for dynamic modules: %s" % dynamicMethods
for acr in dynamicMethods:
moduleName = self.modulePrefix + acr
try:
external = __import__(moduleName, globals(), locals(), ["PersonAuthentication"], -1)
module = external.PersonAuthentication(self.currentTimeMillis)
print "Casa. init. Got dynamic module for acr %s" % acr
configAttrs = self.getConfigurationAttributes(acr, self.scriptsList)
if acr == self.ACR_U2F:
u2f_application_id = configurationAttributes.get("u2f_app_id").getValue2()
configAttrs.put("u2f_application_id", SimpleCustomProperty("u2f_application_id", u2f_application_id))
elif acr == self.ACR_SG:
application_id = configurationAttributes.get("supergluu_app_id").getValue2()
configAttrs.put("application_id", SimpleCustomProperty("application_id", application_id))
if module.init(configAttrs):
module.configAttrs = configAttrs
self.authenticators[acr] = module
else:
print "Casa. init. Call to init in module '%s' returned False" % moduleName
except:
print "Casa. init. Failed to load module %s" % moduleName
print "Exception: ", sys.exc_info()[1]
mobile_methods = configurationAttributes.get("mobile_methods")
self.mobile_methods = [] if mobile_methods == None else StringHelper.split(mobile_methods.getValue2(), ",")
print "Casa. init. Initialized successfully"
return True
def destroy(self, configurationAttributes):
print "Casa. Destroyed called"
return True
def getApiVersion(self):
return 2
def isValidAuthenticationMethod(self, usageType, configurationAttributes):
print "Casa. isValidAuthenticationMethod called"
return True
def getAlternativeAuthenticationMethod(self, usageType, configurationAttributes):
return None
def authenticate(self, configurationAttributes, requestParameters, step):
print "Casa. authenticate %s" % str(step)
userService = CdiUtil.bean(UserService)
authenticationService = CdiUtil.bean(AuthenticationService)
identity = CdiUtil.bean(Identity)
if step == 1:
credentials = identity.getCredentials()
user_name = credentials.getUsername()
user_password = credentials.getPassword()
if StringHelper.isNotEmptyString(user_name) and StringHelper.isNotEmptyString(user_password):
foundUser = userService.getUserByAttribute(self.uid_attr, user_name)
#foundUser = userService.getUser(user_name)
if foundUser == None:
print "Casa. authenticate for step 1. Unknown username"
else:
platform_data = self.parsePlatformData(requestParameters)
mfaOff = foundUser.getAttribute("oxPreferredMethod") == None
logged_in = False
if mfaOff:
logged_in = authenticationService.authenticate(user_name, user_password)
else:
acr = self.getSuitableAcr(foundUser, platform_data)
if acr != None:
module = self.authenticators[acr]
logged_in = module.authenticate(module.configAttrs, requestParameters, step)
if logged_in:
foundUser = authenticationService.getAuthenticatedUser()
if foundUser == None:
print "Casa. authenticate for step 1. Cannot retrieve logged user"
else:
if mfaOff:
identity.setWorkingParameter("skip2FA", True)
else:
#Determine whether to skip 2FA based on policy defined (global or user custom)
skip2FA = self.determineSkip2FA(userService, identity, foundUser, platform_data)
identity.setWorkingParameter("skip2FA", skip2FA)
identity.setWorkingParameter("ACR", acr)
return True
else:
print "Casa. authenticate for step 1 was not successful"
return False
else:
user = authenticationService.getAuthenticatedUser()
if user == None:
print "Casa. authenticate for step 2. Cannot retrieve logged user"
return False
#see casa.xhtml
alter = ServerUtil.getFirstValue(requestParameters, "alternativeMethod")
if alter != None:
#bypass the rest of this step if an alternative method was provided. Current step will be retried (see getNextStep)
self.simulateFirstStep(requestParameters, alter)
return True
session_attributes = identity.getSessionId().getSessionAttributes()
acr = session_attributes.get("ACR")
#this working parameter is used in casa.xhtml
identity.setWorkingParameter("methods", ArrayList(self.getAvailMethodsUser(user, acr)))
success = False
if acr in self.authenticators:
module = self.authenticators[acr]
success = module.authenticate(module.configAttrs, requestParameters, step)
#Update the list of trusted devices if 2fa passed
if success:
print "Casa. authenticate. 2FA authentication was successful"
tdi = session_attributes.get("trustedDevicesInfo")
if tdi == None:
print "Casa. authenticate. List of user's trusted devices was not updated"
else:
user.setAttribute("oxTrustedDevicesInfo", tdi)
userService.updateUser(user)
else:
print "Casa. authenticate. 2FA authentication failed"
return success
return False
def prepareForStep(self, configurationAttributes, requestParameters, step):
print "Casa. prepareForStep %s" % str(step)
if step == 1:
return True
else:
identity = CdiUtil.bean(Identity)
session_attributes = identity.getSessionId().getSessionAttributes()
authenticationService = CdiUtil.bean(AuthenticationService)
user = authenticationService.getAuthenticatedUser()
if user == None:
print "Casa. prepareForStep. Cannot retrieve logged user"
return False
acr = session_attributes.get("ACR")
print "Casa. prepareForStep. ACR = %s" % acr
identity.setWorkingParameter("methods", ArrayList(self.getAvailMethodsUser(user, acr)))
if acr in self.authenticators:
module = self.authenticators[acr]
return module.prepareForStep(module.configAttrs, requestParameters, step)
else:
return False
def getExtraParametersForStep(self, configurationAttributes, step):
print "Casa. getExtraParametersForStep %s" % str(step)
if step > 1:
list = ArrayList()
acr = CdiUtil.bean(Identity).getWorkingParameter("ACR")
if acr in self.authenticators:
module = self.authenticators[acr]
params = module.getExtraParametersForStep(module.configAttrs, step)
if params != None:
list.addAll(params)
list.addAll(Arrays.asList("ACR", "methods", "trustedDevicesInfo"))
print "extras are %s" % list
return list
return None
def getCountAuthenticationSteps(self, configurationAttributes):
print "Casa. getCountAuthenticationSteps called"
if CdiUtil.bean(Identity).getWorkingParameter("skip2FA"):
return 1
acr = CdiUtil.bean(Identity).getWorkingParameter("ACR")
if acr in self.authenticators:
module = self.authenticators[acr]
return module.getCountAuthenticationSteps(module.configAttrs)
else:
return 2
print "Casa. getCountAuthenticationSteps. Could not determine the step count for acr %s" % acr
def getPageForStep(self, configurationAttributes, step):
print "Casa. getPageForStep called %s" % str(step)
if step > 1:
acr = CdiUtil.bean(Identity).getWorkingParameter("ACR")
if acr in self.authenticators:
module = self.authenticators[acr]
page = module.getPageForStep(module.configAttrs, step)
else:
page=None
return page
return ""
def getNextStep(self, configurationAttributes, requestParameters, step):
print "Casa. getNextStep called %s" % str(step)
if step > 1:
acr = ServerUtil.getFirstValue(requestParameters, "alternativeMethod")
if acr != None:
print "Casa. getNextStep. Use alternative method %s" % acr
CdiUtil.bean(Identity).setWorkingParameter("ACR", acr)
#retry step with different acr
return 2
return -1
def logout(self, configurationAttributes, requestParameters):
print "Casa. logout called"
return True
# Miscelaneous
def getLocalPrimaryKey(self):
entryManager = CdiUtil.bean(AppInitializer).createPersistenceEntryManager()
config = GluuConfiguration()
config = entryManager.find(config.getClass(), "ou=configuration,o=gluu")
#Pick (one) attribute where user id is stored (e.g. uid/mail)
uid_attr = config.getOxIDPAuthentication().get(0).getConfig().getPrimaryKey()
print "Casa. init. uid attribute is '%s'" % uid_attr
return uid_attr
def getSettings(self):
entryManager = CdiUtil.bean(AppInitializer).createPersistenceEntryManager()
config = ApplicationConfiguration()
config = entryManager.find(config.getClass(), "ou=casa,ou=configuration,o=gluu")
settings = None
try:
settings = json.loads(config.getSettings())
except:
print "Casa. getSettings. Failed to parse casa settings from DB"
return settings
def computeMethods(self, scriptList):
methods = []
mapping = {}
cmConfigs = self.getSettings()
if cmConfigs != None and 'acr_plugin_mapping' in cmConfigs:
mapping = cmConfigs['acr_plugin_mapping']
for m in mapping:
for customScript in scriptList:
if customScript.getName() == m and customScript.isEnabled():
methods.append(m)
print "Casa. computeMethods. %s" % methods
return methods
def getConfigurationAttributes(self, acr, scriptsList):
configMap = HashMap()
for customScript in scriptsList:
if customScript.getName() == acr and customScript.isEnabled():
for prop in customScript.getConfigurationProperties():
configMap.put(prop.getValue1(), SimpleCustomProperty(prop.getValue1(), prop.getValue2()))
print "Casa. getConfigurationAttributes. %d configuration properties were found for %s" % (configMap.size(), acr)
return configMap
def getAvailMethodsUser(self, user, skip=None):
methods = HashSet()
for method in self.authenticators:
try:
module = self.authenticators[method]
if module.hasEnrollments(module.configAttrs, user):
methods.add(method)
except:
print "Casa. getAvailMethodsUser. hasEnrollments call could not be issued for %s module" % method
try:
if skip != None:
# skip is guaranteed to be a member of methods (if hasEnrollments routines are properly implemented).
# A call to remove strangely crashes when skip is absent
methods.remove(skip)
except:
print "Casa. getAvailMethodsUser. methods list does not contain %s" % skip
print "Casa. getAvailMethodsUser %s" % methods.toString()
return methods
def simulateFirstStep(self, requestParameters, acr):
#To simulate 1st step, there is no need to call:
# getPageforstep (no need as user/pwd won't be shown again)
# isValidAuthenticationMethod (by restriction, it returns True)
# prepareForStep (by restriction, it returns True)
# getExtraParametersForStep (by restriction, it returns None)
print "Casa. simulateFirstStep. Calling authenticate (step 1) for %s module" % acr
if acr in self.authenticators:
module = self.authenticators[acr]
auth = module.authenticate(module.configAttrs, requestParameters, 1)
print "Casa. simulateFirstStep. returned value was %s" % auth
# 2FA policy enforcement
def parsePlatformData(self, requestParameters):
try:
#Find device info passed in HTTP request params (see index.xhtml)
platform = ServerUtil.getFirstValue(requestParameters, "loginForm:platform")
deviceInf = json.loads(platform)
except:
print "Casa. parsePlatformData. Error parsing platform data"
deviceInf = None
return deviceInf
def getSuitableAcr(self, user, deviceInf):
onMobile = deviceInf != None and 'isMobile' in deviceInf and deviceInf['isMobile']
id = user.getUserId()
strongest = -1
acr = None
user_methods = self.getAvailMethodsUser(user)
for s in self.scriptsList:
name = s.getName()
if user_methods.contains(name) and name in self.authenticators and s.getLevel() > strongest and (not onMobile or name in self.mobile_methods):
acr = name
strongest = s.getLevel()
print "Casa. getSuitableAcr. On mobile = %s" % onMobile
if acr == None and onMobile:
print "Casa. getSuitableAcr. No mobile-friendly authentication method available for user %s" % id
# user_methods is not empty when this function is called, so just pick any
acr = user_methods.get(0)
print "Casa. getSuitableAcr. %s was selected for user %s" % (acr, id)
return acr
def determineSkip2FA(self, userService, identity, foundUser, deviceInf):
cmConfigs = self.getSettings()
if cmConfigs == None:
print "Casa. determineSkip2FA. Failed to read policy_2fa"
return False
missing = False
if not 'plugins_settings' in cmConfigs:
missing = True
elif not 'strong-authn-settings' in cmConfigs['plugins_settings']:
missing = True
else:
cmConfigs = cmConfigs['plugins_settings']['strong-authn-settings']
policy2FA = 'EVERY_LOGIN'
if not missing and 'policy_2fa' in cmConfigs:
policy2FA = ','.join(cmConfigs['policy_2fa'])
print "Casa. determineSkip2FA with general policy %s" % policy2FA
policy2FA += ','
skip2FA = False
if 'CUSTOM,' in policy2FA:
#read setting from user profile
policy = foundUser.getAttribute("oxStrongAuthPolicy")
if policy == None:
policy = 'EVERY_LOGIN,'
else:
policy = policy.upper() + ','
print "Casa. determineSkip2FA. Using user's enforcement policy %s" % policy
else:
#If it's not custom, then apply the global setting admin defined
policy = policy2FA
if not 'EVERY_LOGIN,' in policy:
locationCriterion = 'LOCATION_UNKNOWN,' in policy
deviceCriterion = 'DEVICE_UNKNOWN,' in policy
if locationCriterion or deviceCriterion:
if deviceInf == None:
print "Casa. determineSkip2FA. No user device data. Forcing 2FA to take place..."
else:
skip2FA = self.process2FAPolicy(identity, foundUser, deviceInf, locationCriterion, deviceCriterion)
if skip2FA:
print "Casa. determineSkip2FA. Second factor is skipped"
#Update attribute if authentication will not have second step
devInf = identity.getWorkingParameter("trustedDevicesInfo")
if devInf != None:
foundUser.setAttribute("oxTrustedDevicesInfo", devInf)
userService.updateUser(foundUser)
else:
print "Casa. determineSkip2FA. Unknown %s policy: cannot skip 2FA" % policy
return skip2FA
def process2FAPolicy(self, identity, foundUser, deviceInf, locationCriterion, deviceCriterion):
skip2FA = False
#Retrieve user's devices info
devicesInfo = foundUser.getAttribute("oxTrustedDevicesInfo")
#do geolocation
geodata = self.getGeolocation(identity)
if geodata == None:
print "Casa. process2FAPolicy: Geolocation data not obtained. 2FA skipping based on location cannot take place"
try:
encService = CdiUtil.bean(EncryptionService)
if devicesInfo == None:
print "Casa. process2FAPolicy: There are no trusted devices for user yet"
#Simulate empty list
devicesInfo = "[]"
else:
devicesInfo = encService.decrypt(devicesInfo)
devicesInfo = json.loads(devicesInfo)
partialMatch = False
idx = 0
#Try to find a match for device only
for device in devicesInfo:
partialMatch = device['browser']['name']==deviceInf['name'] and device['os']['version']==deviceInf['os']['version'] and device['os']['family']==deviceInf['os']['family']
if partialMatch:
break
idx+=1
matchFound = False
#At least one of locationCriterion or deviceCriterion is True
if locationCriterion and not deviceCriterion:
#this check makes sense if there is city data only
if geodata!=None:
for device in devicesInfo:
#Search all registered cities that are found in trusted devices
for origin in device['origins']:
matchFound = matchFound or origin['city']==geodata['city']
elif partialMatch:
#In this branch deviceCriterion is True
if not locationCriterion:
matchFound = True
elif geodata!=None:
for origin in devicesInfo[idx]['origins']:
matchFound = matchFound or origin['city']==geodata['city']
skip2FA = matchFound
now = Date().getTime()
#Update attribute oxTrustedDevicesInfo accordingly
if partialMatch:
#Update an existing record (update timestamp in city, or else add it)
if geodata != None:
partialMatch = False
idxCity = 0
for origin in devicesInfo[idx]['origins']:
partialMatch = origin['city']==geodata['city']
if partialMatch:
break;
idxCity+=1
if partialMatch:
devicesInfo[idx]['origins'][idxCity]['timestamp'] = now
else:
devicesInfo[idx]['origins'].append({"city": geodata['city'], "country": geodata['country'], "timestamp": now})
else:
#Create a new entry
browser = {"name": deviceInf['name'], "version": deviceInf['version']}
os = {"family": deviceInf['os']['family'], "version": deviceInf['os']['version']}
if geodata == None:
origins = []
else:
origins = [{"city": geodata['city'], "country": geodata['country'], "timestamp": now}]
obj = {"browser": browser, "os": os, "addedOn": now, "origins": origins}
devicesInfo.append(obj)
enc = json.dumps(devicesInfo, separators=(',',':'))
enc = encService.encrypt(enc)
identity.setWorkingParameter("trustedDevicesInfo", enc)
except:
print "Casa. process2FAPolicy. Error!", sys.exc_info()[1]
return skip2FA
def getGeolocation(self, identity):
session_attributes = identity.getSessionId().getSessionAttributes()
if session_attributes.containsKey("remote_ip"):
remote_ip = session_attributes.get("remote_ip")
if StringHelper.isNotEmpty(remote_ip):
httpService = CdiUtil.bean(HttpService)
http_client = httpService.getHttpsClient()
http_client_params = http_client.getParams()
http_client_params.setIntParameter(CoreConnectionPNames.CONNECTION_TIMEOUT, 4 * 1000)
geolocation_service_url = "http://ip-api.com/json/%s?fields=country,city,status,message" % remote_ip
geolocation_service_headers = { "Accept" : "application/json" }
try:
http_service_response = httpService.executeGet(http_client, geolocation_service_url, geolocation_service_headers)
http_response = http_service_response.getHttpResponse()
except:
print "Casa. Determine remote location. Exception: ", sys.exc_info()[1]
return None
try:
if not httpService.isResponseStastusCodeOk(http_response):
print "Casa. Determine remote location. Get non 200 OK response from server:", str(http_response.getStatusLine().getStatusCode())
httpService.consume(http_response)
return None
response_bytes = httpService.getResponseContent(http_response)
response_string = httpService.convertEntityToString(response_bytes, Charset.forName("UTF-8"))
httpService.consume(http_response)
finally:
http_service_response.closeConnection()
if response_string == None:
print "Casa. Determine remote location. Get empty response from location server"
return None
response = json.loads(response_string)
if not StringHelper.equalsIgnoreCase(response['status'], "success"):
print "Casa. Determine remote location. Get response with status: '%s'" % response['status']
return None
return response
return None
|
py | 7df6f4ce2ed2cb871b8c1967f11700413e065e95 | """Service calls related dependencies for LCN component."""
import pypck
import voluptuous as vol
from homeassistant.const import (
CONF_ADDRESS,
CONF_BRIGHTNESS,
CONF_HOST,
CONF_STATE,
CONF_UNIT_OF_MEASUREMENT,
TIME_SECONDS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType, ServiceCallType
from .const import (
CONF_KEYS,
CONF_LED,
CONF_OUTPUT,
CONF_PCK,
CONF_RELVARREF,
CONF_ROW,
CONF_SETPOINT,
CONF_TABLE,
CONF_TEXT,
CONF_TIME,
CONF_TIME_UNIT,
CONF_TRANSITION,
CONF_VALUE,
CONF_VARIABLE,
DOMAIN,
LED_PORTS,
LED_STATUS,
OUTPUT_PORTS,
RELVARREF,
SENDKEYCOMMANDS,
SETPOINTS,
THRESHOLDS,
TIME_UNITS,
VAR_UNITS,
VARIABLES,
)
from .helpers import (
DeviceConnectionType,
get_device_connection,
is_address,
is_states_string,
)
class LcnServiceCall:
"""Parent class for all LCN service calls."""
schema = vol.Schema({vol.Required(CONF_ADDRESS): is_address})
def __init__(self, hass: HomeAssistantType) -> None:
"""Initialize service call."""
self.hass = hass
def get_device_connection(self, service: ServiceCallType) -> DeviceConnectionType:
"""Get address connection object."""
address, host_name = service.data[CONF_ADDRESS]
for config_entry in self.hass.config_entries.async_entries(DOMAIN):
if config_entry.data[CONF_HOST] == host_name:
device_connection = get_device_connection(
self.hass, address, config_entry
)
if device_connection is None:
raise ValueError("Wrong address.")
return device_connection
raise ValueError("Invalid host name.")
async def async_call_service(self, service: ServiceCallType) -> None:
"""Execute service call."""
raise NotImplementedError
class OutputAbs(LcnServiceCall):
"""Set absolute brightness of output port in percent."""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_OUTPUT): vol.All(vol.Upper, vol.In(OUTPUT_PORTS)),
vol.Required(CONF_BRIGHTNESS): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
vol.Optional(CONF_TRANSITION, default=0): vol.All(
vol.Coerce(float), vol.Range(min=0.0, max=486.0)
),
}
)
async def async_call_service(self, service: ServiceCallType) -> None:
"""Execute service call."""
output = pypck.lcn_defs.OutputPort[service.data[CONF_OUTPUT]]
brightness = service.data[CONF_BRIGHTNESS]
transition = pypck.lcn_defs.time_to_ramp_value(
service.data[CONF_TRANSITION] * 1000
)
device_connection = self.get_device_connection(service)
await device_connection.dim_output(output.value, brightness, transition)
class OutputRel(LcnServiceCall):
"""Set relative brightness of output port in percent."""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_OUTPUT): vol.All(vol.Upper, vol.In(OUTPUT_PORTS)),
vol.Required(CONF_BRIGHTNESS): vol.All(
vol.Coerce(int), vol.Range(min=-100, max=100)
),
}
)
async def async_call_service(self, service: ServiceCallType) -> None:
"""Execute service call."""
output = pypck.lcn_defs.OutputPort[service.data[CONF_OUTPUT]]
brightness = service.data[CONF_BRIGHTNESS]
device_connection = self.get_device_connection(service)
await device_connection.rel_output(output.value, brightness)
class OutputToggle(LcnServiceCall):
"""Toggle output port."""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_OUTPUT): vol.All(vol.Upper, vol.In(OUTPUT_PORTS)),
vol.Optional(CONF_TRANSITION, default=0): vol.All(
vol.Coerce(float), vol.Range(min=0.0, max=486.0)
),
}
)
async def async_call_service(self, service: ServiceCallType) -> None:
"""Execute service call."""
output = pypck.lcn_defs.OutputPort[service.data[CONF_OUTPUT]]
transition = pypck.lcn_defs.time_to_ramp_value(
service.data[CONF_TRANSITION] * 1000
)
device_connection = self.get_device_connection(service)
await device_connection.toggle_output(output.value, transition)
class Relays(LcnServiceCall):
"""Set the relays status."""
schema = LcnServiceCall.schema.extend({vol.Required(CONF_STATE): is_states_string})
async def async_call_service(self, service: ServiceCallType) -> None:
"""Execute service call."""
states = [
pypck.lcn_defs.RelayStateModifier[state]
for state in service.data[CONF_STATE]
]
device_connection = self.get_device_connection(service)
await device_connection.control_relays(states)
class Led(LcnServiceCall):
"""Set the led state."""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_LED): vol.All(vol.Upper, vol.In(LED_PORTS)),
vol.Required(CONF_STATE): vol.All(vol.Upper, vol.In(LED_STATUS)),
}
)
async def async_call_service(self, service: ServiceCallType) -> None:
"""Execute service call."""
led = pypck.lcn_defs.LedPort[service.data[CONF_LED]]
led_state = pypck.lcn_defs.LedStatus[service.data[CONF_STATE]]
device_connection = self.get_device_connection(service)
await device_connection.control_led(led, led_state)
class VarAbs(LcnServiceCall):
"""Set absolute value of a variable or setpoint.
Variable has to be set as counter!
Regulator setpoints can also be set using R1VARSETPOINT, R2VARSETPOINT.
"""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_VARIABLE): vol.All(
vol.Upper, vol.In(VARIABLES + SETPOINTS)
),
vol.Optional(CONF_VALUE, default=0): cv.positive_int,
vol.Optional(CONF_UNIT_OF_MEASUREMENT, default="native"): vol.All(
vol.Upper, vol.In(VAR_UNITS)
),
}
)
async def async_call_service(self, service: ServiceCallType) -> None:
"""Execute service call."""
var = pypck.lcn_defs.Var[service.data[CONF_VARIABLE]]
value = service.data[CONF_VALUE]
unit = pypck.lcn_defs.VarUnit.parse(service.data[CONF_UNIT_OF_MEASUREMENT])
device_connection = self.get_device_connection(service)
await device_connection.var_abs(var, value, unit)
class VarReset(LcnServiceCall):
"""Reset value of variable or setpoint."""
schema = LcnServiceCall.schema.extend(
{vol.Required(CONF_VARIABLE): vol.All(vol.Upper, vol.In(VARIABLES + SETPOINTS))}
)
async def async_call_service(self, service: ServiceCallType) -> None:
"""Execute service call."""
var = pypck.lcn_defs.Var[service.data[CONF_VARIABLE]]
device_connection = self.get_device_connection(service)
await device_connection.var_reset(var)
class VarRel(LcnServiceCall):
"""Shift value of a variable, setpoint or threshold."""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_VARIABLE): vol.All(
vol.Upper, vol.In(VARIABLES + SETPOINTS + THRESHOLDS)
),
vol.Optional(CONF_VALUE, default=0): int,
vol.Optional(CONF_UNIT_OF_MEASUREMENT, default="native"): vol.All(
vol.Upper, vol.In(VAR_UNITS)
),
vol.Optional(CONF_RELVARREF, default="current"): vol.All(
vol.Upper, vol.In(RELVARREF)
),
}
)
async def async_call_service(self, service: ServiceCallType) -> None:
"""Execute service call."""
var = pypck.lcn_defs.Var[service.data[CONF_VARIABLE]]
value = service.data[CONF_VALUE]
unit = pypck.lcn_defs.VarUnit.parse(service.data[CONF_UNIT_OF_MEASUREMENT])
value_ref = pypck.lcn_defs.RelVarRef[service.data[CONF_RELVARREF]]
device_connection = self.get_device_connection(service)
await device_connection.var_rel(var, value, unit, value_ref)
class LockRegulator(LcnServiceCall):
"""Locks a regulator setpoint."""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_SETPOINT): vol.All(vol.Upper, vol.In(SETPOINTS)),
vol.Optional(CONF_STATE, default=False): bool,
}
)
async def async_call_service(self, service: ServiceCallType) -> None:
"""Execute service call."""
setpoint = pypck.lcn_defs.Var[service.data[CONF_SETPOINT]]
state = service.data[CONF_STATE]
reg_id = pypck.lcn_defs.Var.to_set_point_id(setpoint)
device_connection = self.get_device_connection(service)
await device_connection.lock_regulator(reg_id, state)
class SendKeys(LcnServiceCall):
"""Sends keys (which executes bound commands)."""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_KEYS): vol.All(
vol.Upper, cv.matches_regex(r"^([A-D][1-8])+$")
),
vol.Optional(CONF_STATE, default="hit"): vol.All(
vol.Upper, vol.In(SENDKEYCOMMANDS)
),
vol.Optional(CONF_TIME, default=0): cv.positive_int,
vol.Optional(CONF_TIME_UNIT, default=TIME_SECONDS): vol.All(
vol.Upper, vol.In(TIME_UNITS)
),
}
)
async def async_call_service(self, service: ServiceCallType) -> None:
"""Execute service call."""
device_connection = self.get_device_connection(service)
keys = [[False] * 8 for i in range(4)]
key_strings = zip(service.data[CONF_KEYS][::2], service.data[CONF_KEYS][1::2])
for table, key in key_strings:
table_id = ord(table) - 65
key_id = int(key) - 1
keys[table_id][key_id] = True
delay_time = service.data[CONF_TIME]
if delay_time != 0:
hit = pypck.lcn_defs.SendKeyCommand.HIT
if pypck.lcn_defs.SendKeyCommand[service.data[CONF_STATE]] != hit:
raise ValueError(
"Only hit command is allowed when sending deferred keys."
)
delay_unit = pypck.lcn_defs.TimeUnit.parse(service.data[CONF_TIME_UNIT])
await device_connection.send_keys_hit_deferred(keys, delay_time, delay_unit)
else:
state = pypck.lcn_defs.SendKeyCommand[service.data[CONF_STATE]]
await device_connection.send_keys(keys, state)
class LockKeys(LcnServiceCall):
"""Lock keys."""
schema = LcnServiceCall.schema.extend(
{
vol.Optional(CONF_TABLE, default="a"): vol.All(
vol.Upper, cv.matches_regex(r"^[A-D]$")
),
vol.Required(CONF_STATE): is_states_string,
vol.Optional(CONF_TIME, default=0): cv.positive_int,
vol.Optional(CONF_TIME_UNIT, default=TIME_SECONDS): vol.All(
vol.Upper, vol.In(TIME_UNITS)
),
}
)
async def async_call_service(self, service: ServiceCallType) -> None:
"""Execute service call."""
device_connection = self.get_device_connection(service)
states = [
pypck.lcn_defs.KeyLockStateModifier[state]
for state in service.data[CONF_STATE]
]
table_id = ord(service.data[CONF_TABLE]) - 65
delay_time = service.data[CONF_TIME]
if delay_time != 0:
if table_id != 0:
raise ValueError(
"Only table A is allowed when locking keys for a specific time."
)
delay_unit = pypck.lcn_defs.TimeUnit.parse(service.data[CONF_TIME_UNIT])
await device_connection.lock_keys_tab_a_temporary(
delay_time, delay_unit, states
)
else:
await device_connection.lock_keys(table_id, states)
handler = device_connection.status_requests_handler
await handler.request_status_locked_keys_timeout()
class DynText(LcnServiceCall):
"""Send dynamic text to LCN-GTxD displays."""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_ROW): vol.All(int, vol.Range(min=1, max=4)),
vol.Required(CONF_TEXT): vol.All(str, vol.Length(max=60)),
}
)
async def async_call_service(self, service: ServiceCallType) -> None:
"""Execute service call."""
row_id = service.data[CONF_ROW] - 1
text = service.data[CONF_TEXT]
device_connection = self.get_device_connection(service)
await device_connection.dyn_text(row_id, text)
class Pck(LcnServiceCall):
"""Send arbitrary PCK command."""
schema = LcnServiceCall.schema.extend({vol.Required(CONF_PCK): str})
async def async_call_service(self, service: ServiceCallType) -> None:
"""Execute service call."""
pck = service.data[CONF_PCK]
device_connection = self.get_device_connection(service)
await device_connection.pck(pck)
SERVICES = (
("output_abs", OutputAbs),
("output_rel", OutputRel),
("output_toggle", OutputToggle),
("relays", Relays),
("var_abs", VarAbs),
("var_reset", VarReset),
("var_rel", VarRel),
("lock_regulator", LockRegulator),
("led", Led),
("send_keys", SendKeys),
("lock_keys", LockKeys),
("dyn_text", DynText),
("pck", Pck),
)
|
py | 7df6f5c4443dd80238d0eb5a905cd3fce4e8ee5b |
class Pedido(object):
def __init__(self, nome, endereco, observacoes):
self.nome = nome
self.endereco = endereco
self.observacoes = observacoes
|
py | 7df6f7b75c7b13581d4d10d7cf4117d43363a2b5 | import json
import os.path
import glob as glob
import numpy as np
import jsonlines
from tabulate import tabulate
import re
from tqdm import tqdm
import sqlite3
import editdistance
from collections import defaultdict
import pprint
from itertools import product
from copy import deepcopy
def set_seed(val):
np.random.seed(val)
set_seed(42)
timepat = re.compile("\d{1,2}[:]\d{1,2}")
pricepat = re.compile("\d{1,3}[.]\d{1,2}")
fin = open("mapping.pair","r")
replacements = []
for line in fin.readlines():
tok_from, tok_to = line.replace('\n', '').split('\t')
replacements.append((' ' + tok_from + ' ', ' ' + tok_to + ' '))
def insertSpace(token, text):
sidx = 0
while True:
sidx = text.find(token, sidx)
if sidx == -1:
break
if sidx + 1 < len(text) and re.match('[0-9]', text[sidx - 1]) and \
re.match('[0-9]', text[sidx + 1]):
sidx += 1
continue
if text[sidx - 1] != ' ':
text = text[:sidx] + ' ' + text[sidx:]
sidx += 1
if sidx + len(token) < len(text) and text[sidx + len(token)] != ' ':
text = text[:sidx + 1] + ' ' + text[sidx + 1:]
sidx += 1
return text
def normalize(text):
# lower case every word
text = text.lower()
# replace white spaces in front and end
text = re.sub(r'^\s*|\s*$', '', text)
# hotel domain pfb30
text = re.sub(r"b&b", "bed and breakfast", text)
text = re.sub(r"b and b", "bed and breakfast", text)
text = re.sub(r"gueshouses", "guesthouse", text)
text = re.sub(r"guest house", "guesthouse", text)
text = re.sub(r"rosas bed and breakfast", "rosa s bed and breakfast", text)
text = re.sub(r"el shaddia guesthouse", "el shaddai", text)
# normalize phone number
ms = re.findall('\(?(\d{3})\)?[-.\s]?(\d{3})[-.\s]?(\d{4,5})', text)
if ms:
sidx = 0
for m in ms:
sidx = text.find(m[0], sidx)
if text[sidx - 1] == '(':
sidx -= 1
eidx = text.find(m[-1], sidx) + len(m[-1])
text = text.replace(text[sidx:eidx], ''.join(m))
# normalize postcode
ms = re.findall('([a-z]{1}[\. ]?[a-z]{1}[\. ]?\d{1,2}[, ]+\d{1}[\. ]?[a-z]{1}[\. ]?[a-z]{1}|[a-z]{2}\d{2}[a-z]{2})',
text)
if ms:
sidx = 0
for m in ms:
sidx = text.find(m, sidx)
eidx = sidx + len(m)
text = text[:sidx] + re.sub('[,\. ]', '', m) + text[eidx:]
# weird unicode bug
text = re.sub(u"(\u2018|\u2019)", "'", text)
# replace time and and price
# text = re.sub(timepat, ' [value_time] ', text)
# text = re.sub(pricepat, ' [value_price] ', text)
#text = re.sub(pricepat2, '[value_price]', text)
# replace st.
text = text.replace(';', ',')
text = re.sub('$\/', '', text)
text = text.replace('/', ' and ')
# replace other special characters
text = text.replace('-', ' ')
text = re.sub('[\":\<>@\(\)]', '', text)
# insert white space before and after tokens:
for token in ['?', '.', ',', '!']:
text = insertSpace(token, text)
# insert white space for 's
text = insertSpace('\'s', text)
# replace it's, does't, you'd ... etc
text = re.sub('^\'', '', text)
text = re.sub('\'$', '', text)
text = re.sub('\'\s', ' ', text)
text = re.sub('\s\'', ' ', text)
for fromx, tox in replacements:
text = ' ' + text + ' '
text = text.replace(fromx, tox)[1:-1]
# remove multiple spaces
text = re.sub(' +', ' ', text)
# concatenate numbers
tmp = text
tokens = text.split()
i = 1
while i < len(tokens):
if re.match(u'^\d+$', tokens[i]) and \
re.match(u'\d+$', tokens[i - 1]):
tokens[i - 1] += tokens[i]
del tokens[i]
else:
i += 1
text = ' '.join(tokens)
return text
def get_splits(data,test_split,val_split):
train = {}
valid = {}
test = {}
for k, v in data.items():
if(k in test_split):
test[k] = v
elif(k in val_split):
valid[k] = v
else:
train[k] = v
return train, valid, test
def substringSieve(string_list):
string_list.sort(key=lambda s: len(s), reverse=True)
out = []
for s in string_list:
if not any([s in o for o in out]):
out.append(s)
return out
def to_query(domain, dic, reqt):
if reqt:
q = f"SELECT {','.join(reqt)} FROM {domain} where"
else:
q = f"SELECT * FROM {domain} where"
for k,v in dic.items():
if v == "swimmingpool": v = "swimming pool"
if v == "nightclub": v = "night club"
if v == "the golden curry": v = "golden curry"
if v == "mutliple sports": v = "multiple sports"
if v == "the cambridge chop house": v = "cambridge chop house"
if v == "the fitzwilliam museum": v = "fitzwilliam museum"
if v == "the good luck chinese food takeaway": v = "good luck chinese food takeaway"
if v == "the cherry hinton village centre": v = "cherry hinton village centre"
if v == "the copper kettle": v = "copper kettle"
if v == "pizza express Fen Ditton": v = "pizza express"
if v == "shiraz restaurant": v = "shiraz"
if v == "christ's college": v = "christ college"
if v == "good luck chinese food takeaway": v = "chinese"
if k == 'leaveAt':
hour, minute = v.split(":")
v = int(hour)*60 + int(minute)
q += f' {k}>{v} and'
elif k == 'arriveBy':
hour, minute = v.split(":")
v = int(hour)*60 + int(minute)
q += f' {k}<{v} and'
else:
q += f' {k}="{v}" and'
q = q[:-3] ## this just to remove the last AND from the query
return q
def convert_time_int_to_time(all_rows,clmn):#leaveAt_id,arriveBy_id):
leaveAt_id = -1
arriveBy_id = -1
if('leaveAt' in clmn):
leaveAt_id = clmn.index('leaveAt')
if('arriveBy' in clmn):
arriveBy_id = clmn.index('arriveBy')
if(leaveAt_id!= -1):
for i in range(len(all_rows)):
all_rows[i] = list(all_rows[i])
time = int(all_rows[i][leaveAt_id])
mins=int(time%60)
hours=int(time/60)
if(len(str(hours)))==1: hours = "0"+str(hours)
if(len(str(mins)))==1: mins = "0"+str(mins)
all_rows[i][leaveAt_id] = str(hours)+str(mins)
if(arriveBy_id!= -1):
for i in range(len(all_rows)):
all_rows[i] = list(all_rows[i])
time = int(all_rows[i][arriveBy_id])
mins=int(time%60)
hours=int(time/60)
if(len(str(hours)))==1: hours = "0"+str(hours)
if(len(str(mins)))==1: mins = "0"+str(mins)
all_rows[i][arriveBy_id] = str(hours)+str(mins)
return all_rows
def get_entity_by_type(info,clmn,post_fix="-info"):
### get goal information
query = to_query("train", info, clmn)
database.execute(query)
all_rows = database.fetchall()
all_rows = convert_time_int_to_time(all_rows,clmn)
entity_by_type = {c+post_fix:set() for c in clmn}
for rows in all_rows:
for i,c in enumerate(clmn):
entity_by_type[c+post_fix].add(rows[i])
# entity_by_type["number_of_options"] = [len(all_rows)]
return entity_by_type
def parse_results(dic_data,semi,domain):
book_query = str(domain)
if(domain == "taxi"):
for k, t in semi.items():
if k in ["leaveAt","destination","departure","arriveBy"]:
book_query += f" {k} = '{normalize(t)}'"
if(domain == "hotel"):
if dic_data["day"]== "" or dic_data["stay"]== "" or dic_data["people"]== "":
return None,None
results = None
if(len(dic_data['booked'])>0):
if(domain == "train" and 'trainID' in dic_data['booked'][0]):
book_query += f" trainID = '{normalize(dic_data['booked'][0]['trainID'])}'"
results = dic_data['booked'][0]['reference']
elif(domain != "taxi" and 'name' in dic_data['booked'][0]):
book_query += f" name = '{normalize(dic_data['booked'][0]['name'])}'"
results = dic_data['booked'][0]['reference']
else:
results = dic_data['booked'][0]
elif(domain == "hotel" and semi['name']!="not mentioned"):
book_query += f" name = '{normalize(semi['name'])}'"
for k, t in dic_data.items():
if(k != 'booked'):
book_query += f" {k} = '{normalize(t)}'"
return book_query, results
def check_metadata(dic, state):
for d,v in dic.items():
if(state[d]==0 or state[d]!= v['book']['booked']):
if(len(v['book']['booked'])>0):
state[d] = v['book']['booked']
return parse_results(v['book'],v['semi'],d), state
for k, v1 in v['book'].items():
if(k != 'booked' and v1 != ""):
return parse_results(v['book'],v['semi'],d), state
return (None, None), state
def get_booking_query(text):
domain = {"global":set(),"train":[],"attraction":[],"hotel":[],"restaurant":[],"taxi":[],
"police":[],"hospital":[],"generic":[]}
domain[text.split()[0]] = re.findall(r"'(.*?)'", text)
return domain
def delexer(turns,dictionary,entity_info):
text_delex = normalize(turns['text'])
### first serch using SPEECH ACT
for k,v in turns['dialog_act'].items():
for [att,val] in v:
if( (att not in ["none","Ref","People","Ticket"] and val not in ["-","?"]) or (k=="Train-Inform" and att=="Ticket") ):
if(att in ["Leave","Arrive"]):
if( normalize(val).isdecimal() and len(normalize(val))==4):
dictionary[att.lower()].append(normalize(val))
elif(att=="Ticket"):
dictionary[att.lower()].append(normalize(val).replace(" each","").replace(" per person",""))
else:
dictionary[att.lower()].append(normalize(val))
if("street" in val.lower()):
dictionary[att.lower()].append(normalize(val).replace(" street",""))
for k,v in entity_info.items():
for val in v:
if(type(val)==int and str(val) in text_delex):
dictionary[k].append(str(val))
else:
if(normalize(val) in text_delex):
dictionary[k].append(normalize(val))
elif("street" in val.lower() and normalize(val).replace(" street","") in text_delex):
dictionary[k].append(normalize(val).replace(" street",""))
turns['text'] = turns['text'].replace(normalize(val).replace(" street",""),normalize(val))
return text_delex
def query_TRAINID_and_filter(entity_correct_train,r_delex_dictionary):
# 'duration-correct', 'leaveAt-correct']
if "leaveAt-info" in r_delex_dictionary or "leave" in r_delex_dictionary:
if entity_correct_train['leaveAt-correct'] not in r_delex_dictionary.get("leaveAt-info",[]) and entity_correct_train['leaveAt-correct'] not in r_delex_dictionary.get("leave",[]):
del entity_correct_train['leaveAt-correct']
else:
if(entity_correct_train['leaveAt-correct'] in r_delex_dictionary.get("leaveAt-info",[])):
del r_delex_dictionary["leaveAt-info"][r_delex_dictionary["leaveAt-info"].index(entity_correct_train['leaveAt-correct'])]
if(entity_correct_train['leaveAt-correct'] in r_delex_dictionary.get("leave",[])):
del r_delex_dictionary["leave"][r_delex_dictionary["leave"].index(entity_correct_train['leaveAt-correct'])]
else:
del entity_correct_train['leaveAt-correct']
if "arriveBy-info" in r_delex_dictionary or "arrive" in r_delex_dictionary:
if entity_correct_train['arriveBy-correct'] not in r_delex_dictionary.get("arriveBy-info",[]) and entity_correct_train['arriveBy-correct'] not in r_delex_dictionary.get("arrive",[]):
del entity_correct_train['arriveBy-correct']
else:
if(entity_correct_train['arriveBy-correct'] in r_delex_dictionary.get("arriveBy-info",[])):
del r_delex_dictionary["arriveBy-info"][r_delex_dictionary["arriveBy-info"].index(entity_correct_train['arriveBy-correct'])]
if(entity_correct_train['arriveBy-correct'] in r_delex_dictionary.get("arrive",[])):
del r_delex_dictionary["arrive"][r_delex_dictionary["arrive"].index(entity_correct_train['arriveBy-correct'])]
else:
del entity_correct_train['arriveBy-correct']
if "day-info" in r_delex_dictionary or "day" in r_delex_dictionary:
if entity_correct_train['day-correct'] not in r_delex_dictionary.get("day-info",[]) and entity_correct_train['day-correct'] not in r_delex_dictionary.get("day",[]):
del entity_correct_train['day-correct']
else:
if(entity_correct_train['day-correct'] in r_delex_dictionary.get("day-info",[])):
del r_delex_dictionary["day-info"][r_delex_dictionary["day-info"].index(entity_correct_train['day-correct'])]
if(entity_correct_train['day-correct'] in r_delex_dictionary.get("day",[])):
del r_delex_dictionary["day"][r_delex_dictionary["day"].index(entity_correct_train['day-correct'])]
else:
del entity_correct_train['day-correct']
if "departure-info" in r_delex_dictionary or "depart" in r_delex_dictionary:
if entity_correct_train['departure-correct'] not in r_delex_dictionary.get("departure-info",[]) and entity_correct_train['departure-correct'] not in r_delex_dictionary.get("depart",[]):
del entity_correct_train['departure-correct']
else:
if(entity_correct_train['departure-correct'] in r_delex_dictionary.get("departure-info",[])):
del r_delex_dictionary["departure-info"][r_delex_dictionary["departure-info"].index(entity_correct_train['departure-correct'])]
if(entity_correct_train['departure-correct'] in r_delex_dictionary.get("depart",[])):
del r_delex_dictionary["depart"][r_delex_dictionary["depart"].index(entity_correct_train['departure-correct'])]
else:
del entity_correct_train['departure-correct']
if "destination-info" in r_delex_dictionary or "dest" in r_delex_dictionary:
if entity_correct_train['destination-correct'] not in r_delex_dictionary.get("destination-info",[]) and entity_correct_train['destination-correct'] not in r_delex_dictionary.get("dest",[]):
del entity_correct_train['destination-correct']
else:
if(entity_correct_train['destination-correct'] in r_delex_dictionary.get("destination-info",[])):
del r_delex_dictionary["destination-info"][r_delex_dictionary["destination-info"].index(entity_correct_train['destination-correct'])]
if(entity_correct_train['destination-correct'] in r_delex_dictionary.get("dest",[])):
del r_delex_dictionary["dest"][r_delex_dictionary["dest"].index(entity_correct_train['destination-correct'])]
else:
del entity_correct_train['destination-correct']
if "ticket" in r_delex_dictionary:
if entity_correct_train['price-correct'] not in r_delex_dictionary["ticket"]:
del entity_correct_train['price-correct']
else:
del r_delex_dictionary["ticket"][r_delex_dictionary["ticket"].index(entity_correct_train['price-correct'])]
else:
del entity_correct_train['price-correct']
if "time" in r_delex_dictionary:
if entity_correct_train['duration-correct'] not in r_delex_dictionary["time"]:
del entity_correct_train['duration-correct']
else:
del r_delex_dictionary["time"][r_delex_dictionary["time"].index(entity_correct_train['duration-correct'])]
else:
del entity_correct_train['duration-correct']
if entity_correct_train['trainID-correct'] not in r_delex_dictionary["id"]:
del entity_correct_train['trainID-correct']
else:
del r_delex_dictionary["id"][r_delex_dictionary["id"].index(entity_correct_train['trainID-correct'])]
r_delex_dictionary = {k:v for k,v in r_delex_dictionary.items() if len(v)>0}
return entity_correct_train,r_delex_dictionary
def get_name_hotel(conv,dict_delex):
for conv_turn in reversed(conv):
if "name" in dict_delex.keys():
for ids_v, v in enumerate(r_delex_dictionary["name"]):
if(v in conv_turn["text"]):
return v, ids_v
if(v.replace("the ","") in conv_turn["text"]):
return v, ids_v
return None, None
def get_trainID_train(conv,dict_delex):
for conv_turn in reversed(conv):
if "id" in dict_delex.keys():
for ids_v, v in enumerate(r_delex_dictionary["id"]):
if(v in conv_turn["text"]):
return v, ids_v
if(v in conv_turn["text"]):
return v, ids_v
return None, None
def get_start_end_ACT(ACT):
dic = {}
mapper = {"one":1,"two":2,"three":3,"3-star":3,"four":4,"five":5}
for span in ACT:
if(span[1]=="Stars"):
if(span[2] in mapper.keys()):
dic[mapper[span[2]]] = [span[3],span[4]]
else:
dic[span[2]] = [span[3],span[4]]
return dic
pp = pprint.PrettyPrinter(indent=4)
conn = sqlite3.connect('MWOZ.db')
database = conn.cursor()
all_arriveBy = [r[0] for r in database.execute("SELECT DISTINCT arriveBy FROM train").fetchall()]
all_day = [r[0] for r in database.execute("SELECT DISTINCT day FROM train").fetchall()]
all_departure = [r[0] for r in database.execute("SELECT DISTINCT departure FROM train").fetchall()]
all_destination = [r[0] for r in database.execute("SELECT DISTINCT destination FROM train").fetchall()]
all_leaveAt = [r[0] for r in database.execute("SELECT DISTINCT leaveAt FROM train").fetchall()]
all_trainID = [r[0] for r in database.execute("SELECT DISTINCT trainID FROM train").fetchall()]
dialogue_mwoz = json.load(open("MultiWOZ_2.1/data.json"))
test_split = open("MultiWOZ_2.1/testListFile.txt","r").read()
val_split = open("MultiWOZ_2.1/valListFile.txt","r").read()
train, valid, test = get_splits(dialogue_mwoz,test_split,val_split)
split_by_single_and_domain = json.load(open("dialogue_by_domain.json"))
all_arriveBy_choice = []
all_leaveAt_choice = []
for k, dial in train.items():
if(k.lower() in split_by_single_and_domain["train_single"]):
goal = dial["goal"]['train']['info']
if('leaveAt' in goal):
all_leaveAt_choice.append(goal['leaveAt'])
if('arriveBy' in goal):
all_arriveBy_choice.append(goal['arriveBy'])
all_arriveBy_choice = list(set(all_arriveBy_choice))
all_leaveAt_choice = list(set(all_leaveAt_choice))
all_arriveBy_choice.sort()
all_leaveAt_choice.sort()
# print(all_leaveAt_choice)
def generate_all_query(r_delex_dictionary, entity_correct_train,info):
contrains = [all_day,all_departure,all_destination,[None]]
name = ['day','departure','destination']
if('leaveAt' in info):
contrains[3] = all_leaveAt_choice
name.append('leaveAt')
elif('arriveBy' in info):
contrains[3] = all_arriveBy_choice
name.append('arriveBy')
clmn = [k.replace("-correct","")for k in entity_correct_train.keys()]
lexicalized = []
all_combo = list(product(*contrains))
all_combo.sort()
index = np.random.choice(len(all_combo), 500).tolist()
list_combo = [ all_combo[indx] for indx in index ]
for combo in list_combo:
query = {name[i_c]:c for i_c, c in enumerate(combo)}
database.execute(to_query("train", query, clmn))
all_rows = database.fetchall()
if(len(all_rows)>0):
choice = str(len(all_rows))
if('leaveAt' in entity_correct_train.keys()):
min_time = min([int(row[clmn.index("leaveAt")]) for row in all_rows])
all_rows = [ row for row in all_rows if int(row[clmn.index("leaveAt")])== min_time ]
if('arriveBy' in entity_correct_train.keys()):
max_time = max([int(row[clmn.index("arriveBy")]) for row in all_rows])
all_rows = [ row for row in all_rows if int(row[clmn.index("arriveBy")])== max_time ]
all_rows = convert_time_int_to_time(all_rows.copy(),clmn)
for row in all_rows:
results_correct = entity_correct_train.copy()
r_dictionary = r_delex_dictionary.copy()
for k in results_correct.keys():
results_correct[k] = row[clmn.index(k.replace("-correct",""))]
if("choice" in r_dictionary):
r_dictionary["choice"] = choice
if('leave' in r_dictionary):
r_dictionary["leave"] = normalize(combo[-1])
elif('arrive' in r_dictionary):
r_dictionary["arrive"] = normalize(combo[-1])
lexicalized.append([results_correct,r_dictionary])
return lexicalized
clmn_train = ["trainID",'day','departure','destination','leaveAt']
cnt_single_entity_templates = []
al = 0
good = 0
count_flag = 0
data = []
skip1, skip2, skip3, skip4 = 0,0,0,0
for i, (k, dial) in enumerate(train.items()):
if(k.lower() in split_by_single_and_domain["train_single"]):
id_dialogue = k.lower()
goal = dial["goal"]['train']
# pp.pprint(goal)
dictionary = defaultdict(list)
if("trainID" in goal['info']):
dictionary["trainID"].append(normalize(goal['info']["trainID"]))
entity_info = get_entity_by_type(goal['info'],list(goal['info'].keys()))
conversation = []
train_ID_BOOKED = ""
state = {"train":0,"attraction":0,"hotel":0,"restaurant":0,"hospital":0,"police":0,"taxi":0,"bus":0}
span_info_list = []
for turns in dial["log"]:
if(turns['metadata']):
text_delex = delexer(turns,dictionary,entity_info)
(book, results), state = check_metadata(turns['metadata'],state)
if(book):
entities_by_domain_book = get_booking_query(book)
book_delex = book
conversation.append({"entities":entities_by_domain_book,"spk":"SYS-API","text":book,"text_delex":book_delex})
span_info_list.append(turns["span_info"])
dom_API = book.split()[0] ## first token is the API domain
train_ID_BOOKED = book.split()[3].replace("'", "")
## THIS IS A SIMULATION OF AN API RESULTS
if("dialog_act" in turns and dom_API == "train" and "Train-OfferBooked" in turns["dialog_act"]):
for elem_ in turns["dialog_act"]["Train-OfferBooked"]:
if(elem_[0]=="Ticket" and elem_[1] != "None"):
results = str(results)
results += " "+ str(elem_[1])
conversation.append({"spk":"API","text":str(results).lower(),"text_delex":str(results).lower()})
span_info_list.append(turns["span_info"])
conversation.append({"spk":"SYS","text":normalize(turns['text']),"text_delex":text_delex,"span_info": turns["span_info"]})
span_info_list.append(turns["span_info"])
else:
text_delex = delexer(turns,dictionary,entity_info)
conversation.append({"spk":"USR","text":normalize(turns['text']),"text_delex":text_delex,"span_info": turns["span_info"]})
span_info_list.append(turns["span_info"])
# for conv_turn in conversation:
# print(f"{conv_turn['spk']} >>> {conv_turn['text']}")
r_delex_dictionary = {}
len_r_DICT = {}
# if(i == 487):
# print(dictionary.items())
for key, d in dictionary.items():
r_delex_dictionary[key] = list(set(substringSieve(d)))
r_delex_dictionary[key].sort()
len_r_DICT[key] = len(r_delex_dictionary[key])
r_delex_dictionary[key].sort()
al += 1
train_id, idx_id = get_trainID_train(conversation,r_delex_dictionary)
# print(train_id,idx_id)
if(train_id==None or (train_id[0]!="t" and train_id[1]!="r") or train_id[2]==" " or len(train_id)!=6):
skip1+=1
continue
# print(">", train_id.lower())
if(train_ID_BOOKED!=""):
if train_ID_BOOKED.lower()!=train_id.lower():
skip2+=1
continue
# input()
entity_correct_train = get_entity_by_type({"trainID":train_id.upper()},["arriveBy","day","departure","destination","duration","leaveAt","price","trainID"],"-correct")
new_entity = {}
for k,v in entity_correct_train.items():
v = list(v)
v.sort()
new_entity[k] = v[0].lower()
entity_correct_train = new_entity
entity_correct_train, r_delex_dictionary = query_TRAINID_and_filter(entity_correct_train,r_delex_dictionary)
total_lexer = {**{k:[v] for k,v in entity_correct_train.items()}, **r_delex_dictionary}
if(len(r_delex_dictionary.keys())>2):
skip3+=1
continue
if("leaveAt-correct" not in entity_correct_train.keys() and "arriveBy-correct" not in entity_correct_train.keys()):
skip4+=1
continue
flag = True
if("leave" in r_delex_dictionary and "choice" in r_delex_dictionary and "leaveAt" in goal['info']):
flag = False
if("arrive" in r_delex_dictionary and "choice" in r_delex_dictionary and "arriveBy" in goal['info']):
flag = False
if("leave" in r_delex_dictionary and len(r_delex_dictionary.keys())==1 and "leaveAt" in goal['info']):
flag = False
if("arrive" in r_delex_dictionary and len(r_delex_dictionary.keys())==1 and "arriveBy" in goal['info']):
flag = False
count_flag += 1
if(flag):
continue
good += 1
# print(entity_correct_train)
# print(r_delex_dictionary)
# r_delex_dictionary = {k:[v] for k,v in entity_correct_train.items()}
lexicalized = generate_all_query(r_delex_dictionary, entity_correct_train, goal['info'])
lexicalized = [{**{k:[v] for k,v in l[0].items()}, **l[1]} for l in lexicalized]
# lexicalized = []
# pp.pprint(total_lexer)
# print()
# print(len(conversation), len(span_info_list))
rdelex_conv = []
flag = True
for i, conv_turn in enumerate(conversation):
text_rdelex = conv_turn["text"]
if conv_turn["spk"] in ["USR","SYS"]:
if "trainID-correct" in total_lexer.keys():
for ids_v, v in enumerate(total_lexer["trainID-correct"]):
text_rdelex = text_rdelex.replace(v,f"[trainID-correct_{ids_v}]")
text_rdelex = text_rdelex.replace(v.replace("the ",""),f"[trainID-correct_{ids_v}]")
for ty,val in total_lexer.items():
if(ty != "choice"):
for ids_v, v in enumerate(sorted(val, reverse=True, key=lambda item: len(item))):
text_rdelex = text_rdelex.replace(v,f"[{ty}_{ids_v}]")
if "choice" in total_lexer.keys():
# print(">>>", span_info_list[i])
for info in span_info_list[i]:
if info[1] == "Choice":
# print(text_rdelex)
start_span, end_span = info[3], info[4]
value = info[4]
words = text_rdelex.split()
for t in range(len(words)):
if t == start_span:
words[t] = '[choice_0]'
elif t > start_span and t <= end_span:
words[t] = ''
# print(">before:" , text_rdelex)
text_rdelex = ' '.join(words)
# print(">after: ",text_rdelex)
if("cambridge towninfo centre" not in text_rdelex and "towninfo centre" not in text_rdelex and "cambridge" not in text_rdelex):
# all_arriveBy
# all_day
# all_leaveAt
for day in all_day:
if(day in text_rdelex):
# print(day,text_rdelex)
flag = False
continue
for lat in all_leaveAt:
if(" "+str(lat) in text_rdelex):
# print(lat,text_rdelex)
flag = False
continue
for arb in all_arriveBy:
if(" "+str(arb) in text_rdelex):
# print(arb,text_rdelex)
flag = False
continue
for dest in all_destination:
if(" "+dest in text_rdelex):
# print(dest,text_rdelex)
flag = False
continue
for dpt in all_departure:
if(" "+dpt in text_rdelex):
# print(dpt,text_rdelex)
flag = False
continue
for id_tr in all_trainID:
if(id_tr in text_rdelex):
# print(id_tr,text_rdelex)
flag = False
continue
rdelex_conv.append({"spk":conv_turn["spk"],"text":conv_turn["text"],"text_rdelex":text_rdelex})
elif conv_turn["spk"] in ["SYS-API"]:
if "trainID-correct" in total_lexer.keys():
for ids_v, v in enumerate(total_lexer["trainID-correct"]):
text_rdelex = text_rdelex.replace(v,f"[trainID-correct_{ids_v}]")
text_rdelex = text_rdelex.replace(v.replace("the ",""),f"[trainID-correct_{ids_v}]")
for id_tr in all_trainID:
if(id_tr in text_rdelex):
# print(id_tr,text_rdelex)
flag = False
continue
rdelex_conv.append({"spk":conv_turn["spk"],"text":conv_turn["text"],"text_rdelex":text_rdelex})
else:
rdelex_conv.append({"spk":conv_turn["spk"],"text":conv_turn["text"],"text_rdelex":text_rdelex})
if(flag):
data.append({"id":id_dialogue,"conv":rdelex_conv, "lexer":lexicalized, "dict_original":total_lexer})
# for conv_turn in rdelex_conv:
# print(f"{conv_turn['spk']} >>> {conv_turn['text_rdelex']}")
# print()
# print()
# print()
# input()
with open('MultiWOZ_2.1/TRAIN_SINGLE_TEMPLATE.json', 'w') as fp:
json.dump(data, fp, indent=4)
print(good, count_flag, skip1, skip2, skip3, skip4)
print(len(data))
print(good)
print(al)
print(good/float(al)) |
py | 7df6f7e349779fb9a171dbd9eb4836e56b694c44 | # Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import re
import tensorflow as tf
import torch
from argparse import ArgumentParser
from os.path import abspath
from transformers.utils import logging
from ..config.bert import PreTrained
from ...models.bert import Model
logging.set_verbosity_info()
log = logging.get_logger(__name__)
def load_src_weights(model, src_path, config):
src_path = abspath(src_path)
log.info(f"Loading from: {src_path}")
xs = tf.train.list_variables(src_path)
ns, ws = _load_weights(xs, src_path)
for n in ns:
ss = n.split("/")
p = model
trace = []
for s in ss:
if s == ".ATTRIBUTES":
break
if s.startswith("layer_with_weights"):
layer_num = int(s.split("-")[-1])
if layer_num <= 2:
continue
elif layer_num == 3:
trace.extend(["embeddings", "LayerNorm"])
p = getattr(p, "embeddings")
p = getattr(p, "LayerNorm")
elif layer_num > 3 and layer_num < config.n_lays + 4:
trace.extend(["encoder", "layer", str(layer_num - 4)])
p = getattr(p, "encoder")
p = getattr(p, "layer")
p = p[layer_num - 4]
elif layer_num == config.n_lays + 4:
trace.extend(["pooler", "dense"])
p = getattr(p, "pooler")
p = getattr(p, "dense")
elif s == "embeddings":
trace.append("embeddings")
p = getattr(p, "embeddings")
if layer_num == 0:
trace.append("tok_embed")
p = getattr(p, "tok_embed")
elif layer_num == 1:
trace.append("pos_embed")
p = getattr(p, "pos_embed")
elif layer_num == 2:
trace.append("token_type_embeddings")
p = getattr(p, "token_type_embeddings")
else:
raise ValueError("Unknown embedding layer with name {full_name}")
trace.append("weight")
p = getattr(p, "weight")
elif s == "_attention_layer":
trace.extend(["attention", "self"])
p = getattr(p, "attention")
p = getattr(p, "self")
elif s == "_attention_layer_norm":
trace.extend(["attention", "output", "LayerNorm"])
p = getattr(p, "attention")
p = getattr(p, "output")
p = getattr(p, "LayerNorm")
elif s == "_attention_output_dense":
trace.extend(["attention", "output", "dense"])
p = getattr(p, "attention")
p = getattr(p, "output")
p = getattr(p, "dense")
elif s == "_output_dense":
trace.extend(["output", "dense"])
p = getattr(p, "output")
p = getattr(p, "dense")
elif s == "_output_layer_norm":
trace.extend(["output", "LayerNorm"])
p = getattr(p, "output")
p = getattr(p, "LayerNorm")
elif s == "_key_dense":
trace.append("key")
p = getattr(p, "key")
elif s == "_query_dense":
trace.append("query")
p = getattr(p, "query")
elif s == "_value_dense":
trace.append("value")
p = getattr(p, "value")
elif s == "_intermediate_dense":
trace.extend(["intermediate", "dense"])
p = getattr(p, "intermediate")
p = getattr(p, "dense")
elif s == "_output_layer_norm":
trace.append("output")
p = getattr(p, "output")
elif s in ["bias", "beta"]:
trace.append("bias")
p = getattr(p, "bias")
elif s in ["kernel", "gamma"]:
trace.append("weight")
p = getattr(p, "weight")
else:
log.warning(f"Ignored {s}")
trace = ".".join(trace)
w = ws[n]
if re.match(r"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)", trace) or re.match(
r"(\S+)\.attention\.output\.dense\.weight", trace
):
w = w.reshape(p.data.shape)
if "kernel" in n:
w = w.transpose()
assert p.shape == w.shape
p.data = torch.from_numpy(w)
return model
def _load_weights(xs, src_path):
ns = []
ws = []
ds = []
for n, _ in xs:
ss = n.split("/")
if n == "_CHECKPOINTABLE_OBJECT_GRAPH" or ss[0] in [
"global_step",
"save_counter",
]:
log.info(f"Skipping non-model layer {n}")
continue
if "optimizer" in n:
log.info(f"Skipping optimization layer {n}")
continue
if ss[0] == "model":
ss = ss[1:]
d = 0
for s in ss:
if s.startswith("layer_with_weights"):
d += 1
else:
break
ds.append(d)
ns.append("/".join(ss))
ws[n] = tf.train.load_variable(src_path, n)
log.info(f"Read {len(ws):,} layers")
if len(set(ds)) != 1:
raise ValueError(f"Found layers with different depths (layer depth {list(set(ds))})")
ds = list(set(ds))[0]
if ds != 1:
raise ValueError("Found more than just the embedding/encoder layers")
return ns, ws
def to_pytorch(src_path, cfg_path, save_path):
cfg = PreTrained.from_json_file(cfg_path)
print(f"Building from config: {cfg}")
m = Model(cfg)
load_src_weights(m, src_path, cfg)
print(f"Saving to: {save_path}")
torch.save(m.state_dict(), save_path)
if __name__ == "__main__":
x = ArgumentParser()
x.add_argument("--src_path", type=str, required=True)
x.add_argument("--cfg_path", type=str, required=True)
x.add_argument("--save_path", type=str, required=True)
y = x.parse_args()
to_pytorch(y.src_path, y.cfg_path, y.save_path)
|
py | 7df6f91947609247ac42adca68c7ac83c7094f22 | import os
from bs4 import BeautifulSoup
from aws_allowlister.shared.utils import chomp, chomp_keep_single_spaces
from aws_allowlister.database.raw_scraping_data import RawScrapingData
from aws_allowlister.scrapers.aws_docs import get_aws_html
from aws_allowlister.scrapers.common import get_table_ids, clean_status_cell, clean_sdks, get_service_name
from sqlalchemy.orm.session import Session
def scrape_standard_table(db_session: Session, link: str, destination_folder: str, file_name: str, download: bool = True):
results = []
html_file_path = os.path.join(destination_folder, file_name)
if download:
if os.path.exists(html_file_path):
os.remove(html_file_path)
get_aws_html(link, html_file_path)
raw_scraping_data = RawScrapingData()
with open(html_file_path, "r") as f:
soup = BeautifulSoup(f.read(), "html.parser")
table_ids = get_table_ids(this_soup=soup)
# these_results = []
for this_table_id in table_ids:
table = soup.find(id=this_table_id)
# Get the standard name based on the "tab" name
tab = table.contents[1]
standard_name = chomp_keep_single_spaces(str(tab.contents[0]))
# Skip certain cases based on inconsistent formatting
exclusions = ["FedRAMP", "DoD CC SRG", "HIPAA BAA", "MTCS", "HITRUST CSF"]
if standard_name in exclusions:
continue
print(f"Scraping table for {standard_name}")
rows = table.find_all("tr")
if len(rows) == 0:
continue
# Scrape it
for row in rows:
cells = row.find_all("td")
# Skip the first row, the rest are the same
if len(cells) == 0 or len(cells) == 1:
continue
# Cell 0: Service name
this_service_name = get_service_name(cells)
# Cell 1: SDKs
# For the HIPAA BAA compliance standard, there are only two columns 🙄 smh at inconsistency
these_sdks = clean_sdks(cells)
# Cell 2: Status cell
# This will contain a checkmark (✓). Let's just mark as true if it is non-empty
this_status, this_status_cell_contents = clean_status_cell(cells)
result = dict(
service_name=this_service_name,
sdks=these_sdks,
status=this_status,
status_text=this_status_cell_contents,
)
for sdk in these_sdks:
raw_scraping_data.add_entry_to_database(
db_session=db_session,
compliance_standard_name=standard_name,
sdk=sdk,
service_name=this_service_name,
)
results.append(result)
return results
|
py | 7df6f92a7a77baf957d2a6a54a3c8448c5cdddbb | from card import create_image, GameCards
import cmd2 as cmd
import rule
import user
INIT_MONEY = 1000
INIT_NUM_USER_CARDS = 10
INIT_NUM_BADAK_CARDS = 8
NUM_USERS = 2
class GsGame:
def __init__(self, num_users, play_order_list):
self.current_player = 0
self.play_order = play_order_list
self.gs_users = []
self.num_users = num_users
for i in range(num_users):
self.gs_users.append(user.User(i))
def prepare_game(self, gs_cards, num_user_cards, num_badak_cards):
self.badak_cards = gs_cards.get_cards_in_random(num_badak_cards)
for i in range(self.num_users):
user_cards = gs_cards.get_cards_in_random(num_user_cards)
self.gs_users[i].set_cards(user_cards)
def start(self):
winner = -1
self.current_player = self.get_first_player_number()
for _ in range(NUM_USERS):
player = self.get_next_player()
num_of_4cards = len(rule.check_4cards(player.cards_in_hand))
player.score = 7*num_of_4cards
if num_of_4cards > 0:
rule.decide_go_stop(player)
while True:
# TODO add cmd2 console here
pass
# self.gs_users[self.current_player].select_card()
return winner
def get_first_player_number(self):
# TODO: replace 0 to winner
return 0
def get_next_player(self):
i = self.current_player
self.current_player += 1
return self.gs_users[i]
def gostop(num_users, play_order_list, money_list):
gs_id = 0
gs_game = GsGame(NUM_USERS, play_order_list)
while True:
gs_cards = GameCards()
gs_game.prepare_game(gs_cards, INIT_NUM_USER_CARDS, INIT_NUM_BADAK_CARDS)
# TODO
# - check if any card set has all 4 set cards.
# - money_list
print(len(gs_cards))
create_image(sorted(gs_game.badak_cards), image_name="../html/badak.png", overlap=0)
create_image(sorted(gs_game.gs_users[gs_id].cards_in_hand),
image_name="../html/user_in_hand.png", number=True)
for i in range(NUM_USERS):
create_image(sorted(gs_game.gs_users[i].cards_earned),
image_name=f"../html/cards_earned{i}.png", number=True, overlap=0)
gs_game.start()
break
if __name__ == '__main__':
play_order_list = []
for i in range(NUM_USERS):
play_order_list.append(i)
gostop(NUM_USERS, play_order_list, [INIT_MONEY]*NUM_USERS) |
py | 7df6f9434420f9176f01ab4c1c7129a5eb01f05b | from flask import (Flask, render_template, request, redirect, url_for,
make_response, flash, jsonify)
app = Flask(__name__)
from sqlalchemy import create_engine, asc, desc
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Category, Item, User
from flask import session as login_session
import random
import string
import json
import httplib2
import requests
from oauth2client.client import flow_from_clientsecrets, FlowExchangeError
CLIENT_ID = json.loads(
open('client_secrets.json', 'r').read())['web']['client_id']
APPLICATION_NAME = "Restaurant Menu Application"
engine = create_engine('sqlite:///catalog.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
def create_user(login_session):
new_user = User(name=login_session['username'],
email=login_session['email'],
picture=login_session['picture'])
session.add(new_user)
session.commit()
user = session.query(User).filter_by(email=login_session['email']).one()
return user.id
def get_user_info(user_id):
user = session.query(User).filter_by(id=user_id).one()
return user
def get_user_ID(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except Exception:
return None
def get_user_from_session(login_session):
if 'email' in login_session:
return get_user_info(get_user_ID(login_session['email']))
return None
@app.route("/")
def list_latest():
categories = session.query(Category).order_by(asc(Category.name))
title_items = 'Latest Items'
items = session.query(Item).order_by(desc(Item.time_created))
user = get_user_from_session(login_session)
return render_template('categories.html', categories=categories,
title_items=title_items, items=items, user=user)
@app.route('/login')
def show_login():
state = ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in xrange(32))
login_session['state'] = state
return render_template('login.html', STATE=state)
@app.route('/gconnect', methods=['POST'])
def gconnect():
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
print "Token's client ID does not match app's."
response.headers['Content-Type'] = 'application/json'
return response
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps('Current user is already' +
' connected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
user_id = get_user_ID(login_session['email'])
if not user_id:
user_id = create_user(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += (' " style = "width: 300px; height: 300px;border-radius:' +
' 150px;-webkit-border-radius: 150px;-moz-border-radius:' +
' 150px;"> ')
flash("you are now logged in as %s" % login_session['username'])
print "done!"
return output
@app.route('/gdisconnect')
def gdisconnect():
access_token = login_session['access_token']
print 'In gdisconnect access token is %s', access_token
print 'User name is: '
print login_session['username']
if access_token is None:
print 'Access Token is None'
response = make_response(json.dumps(
'Current user not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
url = ('https://accounts.google.com/o/oauth2/revoke?token=%s' %
login_session['access_token'])
h = httplib2.Http()
result = h.request(url, 'GET')[0]
print 'result is '
print result
if result['status'] == '200':
del login_session['access_token']
del login_session['gplus_id']
del login_session['username']
del login_session['email']
del login_session['picture']
del login_session['user_id']
flash('Successfully disconnected.')
return redirect(url_for('list_latest'))
else:
response = make_response(json.dumps(
'Failed to revoke token for given user.', 400))
response.headers['Content-Type'] = 'application/json'
return response
@app.route("/new", methods=['GET', 'POST'])
def new_item():
if 'username' not in login_session:
return redirect('/login')
if request.method == 'GET':
user = get_user_from_session(login_session)
categories = session.query(Category).order_by(asc(Category.name))
return render_template('newitem.html', categories=categories,
user=user)
if request.method == 'POST':
item = Item(name=request.form['name'],
description=request.form['description'],
category_id=int(request.form['category']),
user_id=login_session['user_id'])
session.add(item)
session.commit()
flash('Item add successfully')
return redirect(url_for('list_latest'))
@app.route("/catalog/item/<int:item_id>")
def item_details(item_id):
item = session.query(Item).filter_by(id=item_id).one()
user = get_user_from_session(login_session)
return render_template('itemdetails.html', item=item, user=user)
@app.route("/catalog/<int:category_id>")
def items_category(category_id):
categories = session.query(Category).order_by(asc(Category.name))
items = session.query(Item).filter_by(category_id=category_id)
category = session.query(Category).filter_by(id=category_id).one()
user = get_user_from_session(login_session)
return render_template('categories.html', title_items=category.name,
items=items, categories=categories, user=user)
@app.route("/delete/item/<int:item_id>", methods=['GET', 'POST'])
def delete_item(item_id):
if 'username' not in login_session:
return redirect('/login')
item = session.query(Item).filter_by(id=item_id).one()
if login_session['email'] != item.user.email:
return 'You cannot delete a item that isn\'t yours '
if request.method == 'GET':
user = get_user_from_session(login_session)
return render_template('deleteitem.html', item=item, user=user)
if request.method == 'POST':
session.delete(item)
session.commit()
flash('Item delete successfully')
return redirect(url_for('list_latest'))
@app.route("/edit/item/<int:item_id>", methods=['GET', 'POST'])
def edit_item(item_id):
if 'username' not in login_session:
return redirect('/login')
item = session.query(Item).filter_by(id=item_id).one()
if login_session['email'] != item.user.email:
return 'You cannot edit a item that isn\'t yours '
if request.method == 'GET':
user = get_user_from_session(login_session)
categories = session.query(Category).order_by(asc(Category.name))
return render_template('edititem.html', item=item,
categories=categories, user=user)
if request.method == 'POST':
item.name = request.form['name']
item.description = request.form['description']
item.category_id = request.form['category']
session.add(item)
session.commit()
flash('Item edited successfully')
return redirect(url_for('item_details', item_id=item.id))
@app.route("/categories/JSON")
def categories_json():
categories = session.query(Category).order_by(asc(Category.name))
return jsonify(Categories=[category.serialize for category in categories])
@app.route("/catalog/<int:category_id>/JSON")
def items_category_json(category_id):
items = session.query(Item).filter_by(category_id=category_id)
return jsonify(Items=[item.serialize for item in items])
@app.route("/catalog/item/<int:item_id>/JSON")
def item_details_json(item_id):
item = session.query(Item).filter_by(id=item_id).one()
return jsonify(Item=item.serialize)
if __name__ == "__main__":
app.secret_key = 'SUPER_SECRET_KEY'
app.debug = True
app.run(host='0.0.0.0', port=5000)
|
py | 7df6f97fc53ccecd10d1f92d4555190d168681e9 | import os,sys,glob,subprocess, urllib2, tarfile, shutil, platform
failures = []
def get_options():
import os, os.path
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--force", dest="force", help="reinstall even existing plugins", action="store_true", default=False)
(options, args) = parser.parse_args()
return options, args
def install_url(f, url, subdir=None, plugin=True, force=False):
print "install url: package=", f, "url= ", url
if os.path.exists(f):
if force:
shutil.rmtree(f)
else:
print "Path exists: ", f
return;
response = urllib2.urlopen(url)
thetarfile = tarfile.open(fileobj=response, mode="r|gz")
thetarfile.extractall()
dirname = glob.glob("*%s*" % (f))
curdir = os.getcwd()
dirname = dirname[0]
os.rename(dirname, f) # potential bug if something exists/not empty?
if (subdir == None):
os.chdir(f)
else:
os.chdir(subdir)
res = 0
try:
if (plugin):
res = subprocess.call(["plugin", "install"])
else:
if platform.system() == 'Windows':
res = subprocess.call(["python", "setup.py", "config", "--compiler=mingw32", "build", "--compiler=mingw32", "install"])
else:
res = subprocess.call(["python","setup.py","install"])
print "subprocess returned ", res
except:
print "plugin %s FAILED to install correctly" % f
failures.append(f)
if res != 0:
print "plugin %s FAILED to install correctly" % f
if (dirname not in failures):
failures.append(f)
os.chdir(curdir)
options, args = get_options()
#files = ["Turbine_CostsSE", "CommonSE", "Plant_CostsSE", "Plant_FinanceSE", "Plant_EnergySE"]
files = ["Turbine_CostsSE", "CommonSE", "Plant_CostsSE", "Plant_FinanceSE", "Plant_EnergySE",
"AeroelasticSE", "AirfoilPreppy", "CCBlade", "DriveSE", "DriveWPACT", "NREL_CSM", "RotorSE",
"TowerSE", "pyFrame3DD", "JacketSE", "akima", "pBEAM"]
#files = ["pBEAM"]
wis = "http://github.com/WISDEM/"
subdir = "plugins"
# install pandas and algopy
subprocess.call(["easy_install", "pandas"])
subprocess.call(["easy_install", "algopy"])
subprocess.call(["easy_install", "zope.interface"])
subprocess.call(["easy_install", "sphinx"])
subprocess.call(["easy_install", "xlrd"])
subprocess.call(["easy_install", "pyopt"])
if platform.system() == 'Windows':
subprocess.call(["easy_install", "py2exe"])
subprocess.call(["easy_install", "pyzmq"])
subprocess.call(["easy_install", "sphinxcontrib-bibtex"])
subprocess.call(["easy_install", "sphinxcontrib-napoleon"])
#subprocess.call(["easy_install", "sphinxcontrib-zopeext"])
subprocess.call(["easy_install", "numpydoc"])
subprocess.call(["easy_install", "ipython"])
subprocess.call(["easy_install", "python-dateutil"])
# make plug in dir and cd to it:
rootdir = os.getcwd()
if not os.path.exists(subdir):
os.mkdir(subdir)
os.chdir(subdir)
# install fused wind
f = "fusedwind"
#subdir = os.path.join(f,f) # fusedwind is nested!... not anymore, I guess
subdir = f
url = "http://github.com/FUSED-Wind/fusedwind/tarball/develop" ## note, develop branch
install_url(f,url,force=options.force)
# download and install all the necessary WISDEM plugins
for f in files:
url = "%s%s/tarball/0.1" % (wis, f)
install_url(f, url,force=options.force)
# finally install WISDEM itself
os.chdir(rootdir)
os.system("plugin install")
# summary
print
print
print "Attempted to install WISDEM and its sub-plugins: ", files
print "Failed to install: ", failures
|
py | 7df6f9d3d50a3f0a457f75e4465ce663027feb12 | import unittest
import numpy as np
from numpy.core.fromnumeric import size
import paddle
import interpretdl as it
from interpretdl.common.file_utils import *
from interpretdl.common.paddle_utils import FeatureExtractor
from tests.utils import assert_arrays_almost_equal
class TestFileUtils(unittest.TestCase):
def test_file_utils(self):
download_and_decompress("https://bj.bcebos.com/paddlex/interpret/pre_models.tar.gz")
assert md5check('pre_models.tar.gz', '9375cab3b7200365b01b1dd2bc025935')
assert md5check('pre_models.tar.gz', 'aaa') is False
os.remove('pre_models.tar.gz')
def test_mv(self):
os.makedirs('tmp/s1')
os.makedirs('tmp/s2')
move_and_merge_tree('tmp', 'tmp')
shutil.rmtree('tmp')
class TestPaddleUtils(unittest.TestCase):
def test_feature_extractor(self):
fe = FeatureExtractor()
paddle.enable_static()
fe.session_prepare()
paddle.disable_static()
shutil.rmtree('pre_models')
if __name__ == '__main__':
unittest.main() |
py | 7df6fa31df11802056fd91caf97e3f8e5851efca | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
"""A JIT-compatible library for QDWH-based polar decomposition.
QDWH is short for QR-based dynamically weighted Halley iteration. The Halley
iteration implemented through QR decmopositions does not require matrix
inversion. This is desirable for multicore and heterogeneous computing systems.
Reference: Nakatsukasa, Yuji, Zhaojun Bai, and François Gygi.
"Optimizing Halley's iteration for computing the matrix polar decomposition."
SIAM Journal on Matrix Analysis and Applications 31, no. 5 (2010): 2700-2720.
https://epubs.siam.org/doi/abs/10.1137/090774999
"""
import functools
import jax
from jax import core
import jax.numpy as jnp
from jax._src.lax import linalg as lax_linalg
def _use_qr(u, params):
"""Uses QR decomposition."""
a, b, c = params
m, n = u.shape
y = jnp.concatenate([jnp.sqrt(c) * u, jnp.eye(n, dtype=jnp.dtype(u))])
q, _ = lax_linalg.qr(y, full_matrices=False)
q1 = q[:m, :]
q2 = (q[m:, :]).T.conj()
e = b / c
u = (e * u + (a - e) / jnp.sqrt(c) * jnp.einsum('ij,jk->ik', q1, q2))
return u
def _use_cholesky(u, params):
"""Uses Cholesky decomposition."""
a, b, c = params
_, n = u.shape
x = c * (u.T.conj() @ u) + jnp.eye(n, dtype=jnp.dtype(u))
# `y` is lower triangular.
y = lax_linalg.cholesky(x, symmetrize_input=False)
z = lax_linalg.triangular_solve(
y, u.T, left_side=True, lower=True, conjugate_a=True).conj()
z = lax_linalg.triangular_solve(y, z, left_side=True, lower=True,
transpose_a=True, conjugate_a=True).T.conj()
e = b / c
u = e * u + (a - e) * z
return u
@functools.partial(jax.jit, static_argnums=(1, 2, 3))
def _qdwh(x, is_symmetric, max_iterations):
"""QR-based dynamically weighted Halley iteration for polar decomposition."""
# Estimates `alpha` and `beta = alpha * l`, where `alpha` is an estimate of
# norm(x, 2) such that `alpha >= norm(x, 2)` and `beta` is a lower bound for
# the smallest singular value of x.
eps = jnp.finfo(x.dtype).eps
alpha = jnp.sqrt(jnp.linalg.norm(x, ord=1) * jnp.linalg.norm(x, ord=jnp.inf))
l = eps
u = x / alpha
# Iteration tolerances.
tol_l = 10.0 * eps / 2.0
tol_norm = jnp.cbrt(tol_l)
def cond_fun(state):
_, _, _, is_unconverged, is_not_max_iteration = state
return jnp.logical_and(is_unconverged, is_not_max_iteration)
def body_fun(state):
u, l, iter_idx, _, _ = state
u_prev = u
# Computes parameters.
l2 = l**2
dd = jnp.cbrt(4.0 * (1.0 / l2 - 1.0) / l2)
sqd = jnp.sqrt(1.0 + dd)
a = (sqd + jnp.sqrt(8.0 - 4.0 * dd + 8.0 * (2.0 - l2) / (l2 * sqd)) / 2)
a = jnp.real(a)
b = (a - 1.0)**2 / 4.0
c = a + b - 1.0
# Updates l.
l = l * (a + b * l2) / (1.0 + c * l2)
# Uses QR or Cholesky decomposition.
def true_fn(u):
return _use_qr(u, params=(a, b, c))
def false_fn(u):
return _use_cholesky(u, params=(a, b, c))
u = jax.lax.cond(c > 100, true_fn, false_fn, operand=(u))
if is_symmetric:
u = (u + u.T.conj()) / 2.0
# Checks convergence.
iterating_l = jnp.abs(1.0 - l) > tol_l
iterating_u = jnp.linalg.norm((u-u_prev)) > tol_norm
is_unconverged = jnp.logical_or(iterating_l, iterating_u)
is_not_max_iteration = iter_idx < max_iterations
return u, l, iter_idx + 1, is_unconverged, is_not_max_iteration
iter_idx = 1
is_unconverged = True
is_not_max_iteration = True
u, _, num_iters, is_unconverged, _ = jax.lax.while_loop(
cond_fun=cond_fun, body_fun=body_fun,
init_val=(u, l, iter_idx, is_unconverged, is_not_max_iteration))
# Applies Newton-Schulz refinement for better accuracy.
u = 1.5 * u - 0.5 * u @ (u.T.conj() @ u)
h = u.T.conj() @ x
h = (h + h.T.conj()) / 2.0
# Converged within the maximum number of iterations.
is_converged = jnp.logical_not(is_unconverged)
return u, h, num_iters - 1, is_converged
# TODO: Add pivoting.
def qdwh(x, is_symmetric, max_iterations=10):
"""QR-based dynamically weighted Halley iteration for polar decomposition.
Args:
x: A full-rank matrix of shape `m x n` with `m >= n`.
is_symmetric: True if `x` is symmetric.
max_iterations: The predefined maximum number of iterations.
Returns:
A four-tuple of (u, h, num_iters, is_converged) containing the
polar decomposition of `x = u * h`, the number of iterations to compute `u`,
and `is_converged`, whose value is `True` when the convergence is achieved
within the maximum number of iterations.
"""
m, n = x.shape
if m < n:
raise ValueError('The input matrix of shape m x n must have m >= n.')
max_iterations = core.concrete_or_error(
int, max_iterations, 'The `max_iterations` argument must be statically '
'specified to use `qdwh` within JAX transformations.')
is_symmetric = core.concrete_or_error(
bool, is_symmetric, 'The `is_symmetric` argument must be statically '
'specified to use `qdwh` within JAX transformations.')
if is_symmetric:
eps = jnp.finfo(x.dtype).eps
tol = 50.0 * eps
relative_diff = jnp.linalg.norm(x - x.T.conj()) / jnp.linalg.norm(x)
if relative_diff > tol:
raise ValueError('The input `x` is NOT symmetric because '
'`norm(x-x.H) / norm(x)` is {}, which is greater than '
'the tolerance {}.'.format(relative_diff, tol))
with jax.default_matmul_precision('float32'):
u, h, num_iters, is_converged = _qdwh(x, is_symmetric, max_iterations)
return u, h, num_iters, is_converged
|
py | 7df6fbd2a6628ea05912a50d11bafb40809321dd | #! /usr/bin/env python
#
# Copyright (C) 2011-2014 Alexandre Gramfort
# Michael Waskom
# Scott Burns
# Martin Luessi
# Eric Larson
import os
from setuptools import setup
descr = """PySurfer: cortical surface visualization using Python."""
# deal with MPL sandbox violations during easy_install
os.environ['MPLCONFIGDIR'] = '.'
# get the version, don't import surfer here so setup works on headless systems
version = None
with open(os.path.join('surfer', '__init__.py'), 'r') as fid:
for line in (line.strip() for line in fid):
if line.startswith('__version__'):
version = line.split('=')[1].strip().strip('"')
break
if version is None:
raise RuntimeError('Could not determine version')
DISTNAME = 'pysurfer'
DESCRIPTION = descr
LONG_DESCRIPTION = descr
MAINTAINER = 'Michael Waskom'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://pysurfer.github.com'
LICENSE = 'BSD (3-clause)'
DOWNLOAD_URL = 'https://github.com/nipy/PySurfer'
VERSION = version
if __name__ == "__main__":
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
setup(name=DISTNAME,
maintainer=MAINTAINER,
include_package_data=True,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
zip_safe=False, # the package can run out of an .egg file
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'
],
platforms='any',
packages=['surfer', 'surfer.tests'],
scripts=['bin/pysurfer'],
install_requires=['numpy', 'scipy', 'matplotlib', 'nibabel >= 1.2', 'mayavi'],
extras_require={'save_movie': ['imageio >= 1.5']},
)
|
py | 7df6fc21705d89ba3c3f650c52e8509923b027b8 | n = int(input())
powers_odd = []
powers_even = []
amount = 0
for i in range(1, n + 1):
if i % 2:
powers_odd.append(i)
amount += pow(i, sum(powers_odd))
else:
powers_even.append(i)
amount += pow(i, sum(powers_even))
print(amount)
|
py | 7df6fc329a7a16d7863bd4c0b19cf035d5be8d6b | # -*- coding: utf-8 -*-
from datetime import timedelta
import uuid
from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from userprofiles.settings import up_settings
def generate_token():
return str(uuid.uuid4())
def generate_confirm_expire_date():
return timezone.now() + timedelta(days=up_settings.EMAIL_VERIFICATION_DAYS)
class EmailVerification(models.Model):
user = models.ForeignKey(User, verbose_name=_('User'), blank=False)
old_email = models.EmailField(_('Old e-mail address'))
new_email = models.EmailField(_('New e-mail address'))
token = models.CharField(_('Token'), max_length=40, default=generate_token)
code = models.CharField(_('Code'), max_length=40, default=generate_token)
is_approved = models.BooleanField(_('Approved'), default=False)
is_expired = models.BooleanField(_('Expired'), default=False)
expiration_date = models.DateTimeField(_('Expiration date'),
default=generate_confirm_expire_date)
def __unicode__(self):
return '%s - %s/%s' % (self.user, self.old_email, self.new_email)
def save(self, *args, **kwargs):
if self.is_approved:
EmailVerification.objects.filter(
user=self.user, is_approved=False).update(is_expired=True)
self.is_expired = True
if self.user.email == self.old_email:
self.user.email = self.new_email
self.user.save()
return super(EmailVerification, self).save(*args, **kwargs)
class Meta:
app_label = 'userprofiles'
verbose_name = _('E-mail verification')
verbose_name_plural = _('E-mail verifications')
|
py | 7df6fcc97db1126438b2c4b14155c93bd83df0f3 | import djcelery
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'z&+kudpyz-5jt9qv%2u5a=c*t&-*6nx!k%x=mcs32xr+@w=4fj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Custom
'schedule',
# Third party
'corsheaders',
'djcelery',
'rest_framework',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'scale.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates"), os.path.dirname(os.path.join(BASE_DIR, "templates"))],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'scale.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
# 'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 50
}
CORS_ORIGIN_WHITELIST = (
'http://localhost:3000',
)
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Celery setting
djcelery.setup_loader()
BROKER_URL = 'amqp://paradox:paradox@[email protected]:5672//'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = 'Asia/Katmandu'
CELERY_IMPORTS = ('schedule.tasks',)
# Sending Email
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
SERVER_EMAIL = EMAIL_HOST_USER
EMAIL_PORT = 587
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
|
py | 7df6fe139840a881cb620d132565cdb2b4b28d98 | from unittest.mock import patch, MagicMock
from django.core.files.uploadedfile import SimpleUploadedFile
from django.http import HttpRequest
from django.test import TestCase
from civil_registry.models import Citizen
from civil_registry.tests.factories import CitizenFactory
from libya_elections.utils import refresh_model, get_permission_object_by_name
from libya_site.tests.factories import UserFactory
from register.tests.factories import RegistrationCenterFactory
from ..forms import ChangesetForm
from ..models import Changeset, APPROVE_CHANGESET_PERMISSION
from ..tests.factories import ChangesetFactory
class ChangesetFormTest(TestCase):
def setUp(self):
self.center1 = RegistrationCenterFactory()
self.center2 = RegistrationCenterFactory()
self.center3 = RegistrationCenterFactory()
self.data = {
'name': 'My Changeset',
'change': str(Changeset.CHANGE_CENTER),
'how_to_select': str(Changeset.SELECT_CENTERS),
# django-selectable appends "_1" to the field name for the form name that
# has the actual data submitted:
'selected_centers_1': [str(self.center1.pk), str(self.center2.pk)],
'target_center_1': str(self.center3.pk),
'justification': 'Just because',
}
def test_create_changeset(self):
# We can create a changeset
mock_request = MagicMock(user=UserFactory())
form = ChangesetForm(data=self.data, request=mock_request)
self.assertTrue(form.is_valid(), msg=str(form.errors))
changeset = form.save()
changeset = refresh_model(changeset)
self.assertEqual(Changeset.CHANGE_CENTER, changeset.change)
self.assertEqual(Changeset.SELECT_CENTERS, changeset.how_to_select)
self.assertIn(self.center1, changeset.selected_centers.all())
self.assertIn(self.center2, changeset.selected_centers.all())
def test_cannot_move_to_same_center(self):
# We prevent moving from a center to the same center
self.data['target_center_1'] = str(self.center1.pk)
form = ChangesetForm(data=self.data, request=MagicMock())
self.assertFalse(form.is_valid())
self.assertIn('target_center', form.errors)
def test_select_center_must_specify_selected_centers(self):
# If how_to_select is SELECT_CENTERS, you have to specify some
del self.data['selected_centers_1']
form = ChangesetForm(data=self.data, request=MagicMock())
self.assertFalse(form.is_valid())
self.assertIn('selected_centers', form.errors)
def test_change_center_must_specify_target_center(self):
# If changing centers, must give a target
del self.data['target_center_1']
form = ChangesetForm(data=self.data, request=MagicMock())
self.assertFalse(form.is_valid())
self.assertIn('target_center', form.errors)
def test_select_other_changeset_requires_other_changeset(self):
# If how_to_select is SELECT_OTHER_CHANGESET, you have to give another changeset
data = self.data
data['how_to_select'] = str(Changeset.SELECT_OTHER_CHANGESET)
form = ChangesetForm(data=self.data, request=MagicMock())
self.assertFalse(form.is_valid())
self.assertIn('other_changeset', form.errors)
def test_rollback_requires_other_changeset(self):
# Rollback always requires another changeset
data = self.data
data['change'] = str(Changeset.CHANGE_ROLLBACK)
form = ChangesetForm(data=self.data, request=MagicMock())
self.assertFalse(form.is_valid())
self.assertIn('other_changeset', form.errors)
def test_rollback_other_changeset_must_not_have_failed(self):
# You can't rollback a failed changeset
data = self.data
data['change'] = str(Changeset.CHANGE_ROLLBACK)
changeset2 = ChangesetFactory(status=Changeset.STATUS_FAILED)
data['other_changeset'] = str(changeset2.pk)
form = ChangesetForm(data=self.data, request=MagicMock())
self.assertFalse(form.is_valid())
self.assertIn('other_changeset', form.errors)
def test_rollback_other_changeset_can_be_partially_successful(self):
# You can rollback a partially successful changeset
data = self.data
data['change'] = str(Changeset.CHANGE_ROLLBACK)
changeset2 = ChangesetFactory(status=Changeset.STATUS_PARTIALLY_SUCCESSFUL)
data['other_changeset'] = str(changeset2.pk)
data['how_to_select'] = str(Changeset.SELECT_OTHER_CHANGESET)
form = ChangesetForm(data=self.data, request=MagicMock())
self.assertTrue(form.is_valid(), msg=form.errors)
def test_select_uploaded_nids_requires_uploaded_file(self):
# If how_to_select is upload NIDs, you must upload a file
data = self.data
data['how_to_select'] = str(Changeset.SELECT_UPLOADED_NIDS)
form = ChangesetForm(data=data, files={}, request=MagicMock())
self.assertFalse(form.is_valid())
self.assertIn('upload_file', form.errors)
def test_max_upload_errors(self):
# We stop reporting upload errors beyond MAX_ERRORS
data = self.data
data['how_to_select'] = str(Changeset.SELECT_UPLOADED_NIDS)
filebytes = b"1\n2\n3\n4\n"
upload_file = SimpleUploadedFile('my_filename', filebytes)
form = ChangesetForm(data=data, files={'upload_file': upload_file}, request=MagicMock())
with patch('changesets.forms.MAX_ERRORS', 1):
self.assertFalse(form.is_valid())
self.assertIn('upload_file', form.errors)
self.assertIn('Stopping', str(form.errors))
def test_upload_file(self):
# We can successfully upload a file of NIDs and look up the citizens
# and blank lines are okay
data = self.data
data['how_to_select'] = str(Changeset.SELECT_UPLOADED_NIDS)
citizen1 = CitizenFactory()
citizen2 = CitizenFactory()
filebytes = "{nid1}\n\n{nid2}\n".format(nid1=citizen1.national_id,
nid2=citizen2.national_id).encode()
upload_file = SimpleUploadedFile('my_filename', filebytes)
mock_request = MagicMock(user=UserFactory())
form = ChangesetForm(data=data, files={'upload_file': upload_file}, request=mock_request)
self.assertTrue(form.is_valid(), msg=str(form.errors))
changeset = form.save()
self.assertIn(citizen1, changeset.selected_citizens.all())
self.assertIn(citizen2, changeset.selected_citizens.all())
def test_upload_file_not_number(self):
# We catch non-numbers in the upload file
data = self.data
data['how_to_select'] = str(Changeset.SELECT_UPLOADED_NIDS)
citizen1 = CitizenFactory()
citizen2 = CitizenFactory()
nid1 = str(citizen1.national_id)
nid1 = nid1[0] + '.' + nid1[2:]
filebytes = "{nid1}\n{nid2}\n".format(nid1=nid1, nid2=citizen2.national_id).encode()
upload_file = SimpleUploadedFile('my_filename', filebytes)
form = ChangesetForm(data=data, files={'upload_file': upload_file}, request=MagicMock())
self.assertFalse(form.is_valid())
self.assertIn('upload_file', form.errors)
def test_upload_file_invalid_nid(self):
# We catch invalid NIDs in the upload file
data = self.data
data['how_to_select'] = str(Changeset.SELECT_UPLOADED_NIDS)
citizen1 = CitizenFactory()
citizen2 = CitizenFactory()
nid1 = str(citizen1.national_id)
nid1 = '3' + nid1[1:]
filebytes = "{nid1}\n{nid2}\n".format(nid1=nid1, nid2=citizen2.national_id).encode()
upload_file = SimpleUploadedFile('my_filename', filebytes)
form = ChangesetForm(data=data, files={'upload_file': upload_file}, request=MagicMock())
self.assertFalse(form.is_valid())
self.assertIn('upload_file', form.errors)
def test_upload_file_no_such_citizen(self):
# We catch non-existent citizens in the upload file
data = self.data
data['how_to_select'] = str(Changeset.SELECT_UPLOADED_NIDS)
citizen1 = CitizenFactory()
citizen2 = CitizenFactory()
filebytes = "{nid1}\n{nid2}\n".format(nid1=citizen1.national_id + 27,
nid2=citizen2.national_id).encode()
upload_file = SimpleUploadedFile('my_filename', filebytes)
with patch('changesets.forms.get_citizen_by_national_id') as mock_get_citizen:
mock_get_citizen.return_value = None # No such citizen
form = ChangesetForm(data=data, files={'upload_file': upload_file}, request=MagicMock())
self.assertFalse(form.is_valid())
self.assertIn('upload_file', form.errors)
def test_upload_file_might_have_to_lookup_citizen(self):
# Upload file can have a citizen we don't have a Citizen record for yet
# (e.g. if we're blocking a citizen who hasn't tried to register)
data = self.data
data['how_to_select'] = str(Changeset.SELECT_UPLOADED_NIDS)
nid1 = "199999999999"
filebytes = "{nid1}\n".format(nid1=nid1).encode()
upload_file = SimpleUploadedFile('my_filename', filebytes)
with patch('changesets.forms.get_citizen_by_national_id') as mock_get_citizen:
# Make a Citizen but don't save it in the database, so the form validation
# won't initially find it
citizen = Citizen(national_id=nid1)
mock_get_citizen.return_value = citizen
form = ChangesetForm(data=data, files={'upload_file': upload_file}, request=MagicMock())
self.assertTrue(form.is_valid())
def test_upload_file_invalid_line(self):
# We notice a file with too short a line
data = self.data
data['how_to_select'] = str(Changeset.SELECT_UPLOADED_NIDS)
citizen1 = CitizenFactory()
citizen2 = CitizenFactory()
filebytes = "1{nid1}\n{nid2}\n".format(nid1=citizen1.national_id,
nid2=citizen2.national_id).encode()
upload_file = SimpleUploadedFile('my_filename', filebytes)
form = ChangesetForm(data=data, files={'upload_file': upload_file}, request=MagicMock())
self.assertFalse(form.is_valid())
self.assertIn('upload_file', form.errors)
def test_upload_file_empty(self):
# We don't allow an empty file
data = self.data
data['how_to_select'] = str(Changeset.SELECT_UPLOADED_NIDS)
filebytes = b""
upload_file = SimpleUploadedFile('my_filename', filebytes)
form = ChangesetForm(data=data, files={'upload_file': upload_file}, request=MagicMock())
self.assertFalse(form.is_valid())
self.assertIn('upload_file', form.errors)
self.assertIn("The submitted file is empty.", str(form.errors))
def test_approval_warning_checkbox(self):
# If the changeset has approvals, we include the approval warning checkbox
# If not, we don't.
# New changeset
form = ChangesetForm(data=self.data, request=MagicMock())
self.assertNotIn('okay_to_remove_approvals', form.fields)
# Existing changeset without approvals
changeset = ChangesetFactory()
form = ChangesetForm(instance=changeset, data=self.data, request=MagicMock())
self.assertNotIn('okay_to_remove_approvals', form.fields)
# Approvals
self.approver = UserFactory()
self.approver.user_permissions.add(
get_permission_object_by_name(APPROVE_CHANGESET_PERMISSION))
changeset.approve(self.approver)
form = ChangesetForm(instance=changeset, data=self.data, request=MagicMock())
self.assertIn('okay_to_remove_approvals', form.fields)
# And we must check it
self.assertFalse(form.is_valid())
self.assertIn('okay_to_remove_approvals', form.errors)
# If we save, it removes the approvals
self.data['okay_to_remove_approvals'] = True
mock_request = MagicMock()
mock_request.__class__ = HttpRequest
form = ChangesetForm(instance=changeset, data=self.data, request=mock_request)
self.assertTrue(form.is_valid(), form.errors)
changeset = form.save()
self.assertEqual(0, changeset.number_of_approvals)
|
py | 7df6fe1ea2b65847f447c2f9cd2b5b13e71d4aef | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2015-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import functools
import typing
from edb.schema import name as sn
from edb.schema import objects as so
from edb.edgeql import ast as qlast
class Type:
def __init__(self, name):
self.name = name
def get_name(self, schema):
return self.name
class ObjectType(Type):
def __init__(self, name):
super().__init__(name)
self.pointers = {}
def is_pointer(self):
return False
def getptr(self, schema, name):
return self.pointers.get(name)
class UnionType:
def __init__(self, types):
self.types = types
class Pointer:
def __init__(self, name, *, source=None, target=None):
self.name = name
self.source = source
self.target = target
self.pointers = {}
def is_pointer(self):
return True
def getptr(self, schema, name):
return self.pointers.get(name)
def get_target(self, schema):
return self.target
def get_name(self, schema):
return self.name
def trace_refs(
qltree: qlast.Base,
*,
schema,
source: typing.Optional[sn.Name] = None,
subject: typing.Optional[sn.Name] = None,
path_prefix: typing.Optional[sn.Name] = None,
module: typing.Optional[str] = None,
objects: typing.Dict[str, object],
) -> typing.FrozenSet[sn.Name]:
"""Return a list of schema item names used in an expression."""
ctx = TracerContext(schema, module, objects,
source, subject, path_prefix)
trace(qltree, ctx=ctx)
return frozenset(ctx.refs)
class TracerContext:
def __init__(self, schema, module, objects, source, subject, path_prefix):
self.schema = schema
self.refs = set()
self.module = module
self.objects = objects
self.source = source
self.subject = subject
self.path_prefix = path_prefix
def get_ref_name(self, ref: qlast.ObjectRef) -> sn.Name:
if ref.module:
return sn.Name(module=ref.module, name=ref.name)
elif f'{self.module}::{ref.name}' in self.objects:
return sn.Name(module=self.module, name=ref.name)
else:
return sn.Name(module="std", name=ref.name)
@functools.singledispatch
def trace(node: qlast.Base, *,
ctx: TracerContext) -> typing.Optional[so.Object]:
raise NotImplementedError(f"do not know how to trace {node!r}")
@trace.register
def trace_none(node: type(None), *, ctx: TracerContext) -> None:
pass
@trace.register
def trace_Constant(node: qlast.BaseConstant, *, ctx: TracerContext) -> None:
pass
@trace.register
def trace_Array(node: qlast.Array, *, ctx: TracerContext) -> None:
for el in node.elements:
trace(el, ctx=ctx)
@trace.register
def trace_Set(node: qlast.Set, *, ctx: TracerContext) -> None:
for el in node.elements:
trace(el, ctx=ctx)
@trace.register
def trace_Tuple(node: qlast.Tuple, *, ctx: TracerContext) -> None:
for el in node.elements:
trace(el, ctx=ctx)
@trace.register
def trace_NamedTuple(node: qlast.NamedTuple, *, ctx: TracerContext) -> None:
for el in node.elements:
trace(el.val, ctx=ctx)
@trace.register
def trace_BinOp(node: qlast.BinOp, *, ctx: TracerContext) -> None:
trace(node.left, ctx=ctx)
trace(node.right, ctx=ctx)
@trace.register
def trace_UnaryOp(node: qlast.UnaryOp, *, ctx: TracerContext) -> None:
trace(node.operand, ctx=ctx)
@trace.register
def trace_Detached(node: qlast.DetachedExpr, *, ctx: TracerContext) -> None:
trace(node.expr, ctx=ctx)
@trace.register
def trace_TypeCast(node: qlast.TypeCast, *, ctx: TracerContext) -> None:
trace(node.expr, ctx=ctx)
if not node.type.subtypes:
ctx.refs.add(ctx.get_ref_name(node.type.maintype))
@trace.register
def trace_IsOp(node: qlast.IsOp, *, ctx: TracerContext) -> None:
trace(node.left, ctx=ctx)
if not node.right.subtypes:
ctx.refs.add(ctx.get_ref_name(node.right.maintype))
@trace.register
def trace_Introspect(node: qlast.Introspect, *, ctx: TracerContext) -> None:
if not node.type.subtypes:
ctx.refs.add(ctx.get_ref_name(node.type.maintype))
@trace.register
def trace_FunctionCall(node: qlast.FunctionCall, *,
ctx: TracerContext) -> None:
for arg in node.args:
trace(arg, ctx=ctx)
for arg in node.kwargs.values():
trace(arg, ctx=ctx)
@trace.register
def trace_Indirection(node: qlast.Indirection, *, ctx: TracerContext) -> None:
for indirection in node.indirection:
trace(indirection, ctx=ctx)
trace(node.arg, ctx=ctx)
@trace.register
def trace_Index(node: qlast.Index, *, ctx: TracerContext) -> None:
trace(node.index, ctx=ctx)
@trace.register
def trace_Slice(node: qlast.Slice, *, ctx: TracerContext) -> None:
trace(node.start, ctx=ctx)
trace(node.stop, ctx=ctx)
@trace.register
def trace_Path(node: qlast.Path, *,
ctx: TracerContext) -> typing.Optional[so.Object]:
tip = None
ptr = None
plen = len(node.steps)
for i, step in enumerate(node.steps):
if isinstance(step, qlast.ObjectRef):
refname = ctx.get_ref_name(step)
if refname in ctx.objects:
ctx.refs.add(refname)
tip = ctx.objects[refname]
else:
tip = ctx.schema.get(refname)
elif isinstance(step, qlast.Ptr):
if i == 0:
# Abbreviated path.
if ctx.path_prefix in ctx.objects:
tip = ctx.objects[ctx.path_prefix]
else:
# We can't reason about this path.
return
if step.type == 'property':
lprop = ptr.getptr(ctx.schema, step.ptr.name)
if lprop is None:
# Invalid link property reference, bail.
return
if isinstance(lprop, Pointer):
ctx.refs.add(f'{lprop.source}@{step.ptr.name}')
else:
if step.direction == '<':
if plen > i + 1 and isinstance(node.steps[i + 1],
qlast.TypeIndirection):
# A reverse link traversal with a type filter,
# process it on the next step.
pass
else:
# otherwise we cannot say anything about the target,
# so bail.
return
else:
if tip is None:
# We can't reason about this path.
return
ptr = tip.getptr(ctx.schema, step.ptr.name)
if ptr is None:
# Invalid pointer reference, bail.
return
if ptr.source == tip:
tip_name = tip.get_name(ctx.schema)
ctx.refs.add(f'{tip_name}@{step.ptr.name}')
tip = ptr.get_target(ctx.schema)
elif isinstance(step, qlast.TypeIndirection):
tip = _resolve_type_expr(step.type, ctx=ctx)
prev_step = node.steps[i - 1]
if prev_step.direction == '<':
ptr = tip.getptr(ctx.schema, prev_step.ptr.name)
if ptr is None:
# Invalid pointer reference, bail.
return
if isinstance(tip, Type):
tip_name = tip.get_name(ctx.schema)
ctx.refs.add(f'{tip_name}@{prev_step.ptr.name}')
tip = ptr.get_target(ctx.schema)
else:
tr = trace(step, ctx=ctx)
if tr is not None:
tip = tr
if isinstance(tip, Pointer):
ptr = tip
return tip
@trace.register
def trace_Source(node: qlast.Source, *, ctx: TracerContext) -> so.Object:
return ctx.objects[ctx.source]
@trace.register
def trace_Subject(node: qlast.Subject, *,
ctx: TracerContext) -> typing.Optional[so.Object]:
# Apparently for some paths (of length 1) ctx.subject may be None.
if ctx.subject is not None:
return ctx.objects[ctx.subject]
def _resolve_type_expr(
texpr: qlast.TypeExpr, *,
ctx: TracerContext
) -> typing.Union[so.Object, UnionType]:
if isinstance(texpr, qlast.TypeName):
if texpr.subtypes:
return Type(name=texpr.maintype.name)
else:
refname = ctx.get_ref_name(texpr.maintype)
obj = ctx.objects.get(refname)
if obj is None:
obj = ctx.schema.get(refname)
else:
ctx.refs.add(refname)
return obj
elif isinstance(texpr, qlast.TypeOp):
if texpr.op == '|':
return UnionType([
_resolve_type_expr(texpr.left, ctx=ctx),
_resolve_type_expr(texpr.right, ctx=ctx),
])
else:
raise NotImplementedError(
f'unsupported type operation: {texpr.op}')
else:
raise NotImplementedError(
f'unsupported type expression: {texpr!r}'
)
@trace.register
def trace_TypeIndirection(node: qlast.TypeIndirection, *,
ctx: TracerContext) -> None:
trace(node.type, ctx=ctx)
@trace.register
def trace_TypeOf(node: qlast.TypeOf, *, ctx: TracerContext) -> None:
trace(node.expr, ctx=ctx)
@trace.register
def trace_TypeName(node: qlast.TypeName, *, ctx: TracerContext) -> None:
if node.subtypes:
for st in node.subtypes:
trace(st, ctx=ctx)
else:
fq_name = node.maintype.name
if node.maintype.module:
fq_name = f'{node.maintype.module}::{fq_name}'
ctx.refs.add(fq_name)
@trace.register
def trace_TypeOp(node: qlast.TypeOp, *, ctx: TracerContext) -> None:
trace(node.left, ctx=ctx)
trace(node.right, ctx=ctx)
@trace.register
def trace_IfElse(node: qlast.IfElse, *, ctx: TracerContext) -> None:
trace(node.if_expr, ctx=ctx)
trace(node.else_expr, ctx=ctx)
trace(node.condition, ctx=ctx)
@trace.register
def trace_Shape(node: qlast.Shape, *, ctx: TracerContext) -> None:
if isinstance(node.expr, qlast.Path):
tip = trace(node.expr, ctx=ctx)
orig_prefix = ctx.path_prefix
if tip is not None:
ctx.path_prefix = tip.get_name(ctx.schema)
else:
ctx.path_prefix = None
else:
trace(node.expr, ctx=ctx)
for element in node.elements:
trace(element, ctx=ctx)
if isinstance(node.expr, qlast.Path):
ctx.path_prefix = orig_prefix
@trace.register
def trace_ShapeElement(node: qlast.ShapeElement, *,
ctx: TracerContext) -> None:
trace(node.expr, ctx=ctx)
for element in node.elements:
trace(element, ctx=ctx)
trace(node.where, ctx=ctx)
for element in node.orderby:
trace(element, ctx=ctx)
trace(node.offset, ctx=ctx)
trace(node.limit, ctx=ctx)
trace(node.compexpr, ctx=ctx)
@trace.register
def trace_Select(node: qlast.SelectQuery, *, ctx: TracerContext) -> None:
for alias in node.aliases:
if isinstance(alias, qlast.AliasedExpr):
trace(alias.expr, ctx=ctx)
trace(node.result, ctx=ctx)
if node.where is not None:
trace(node.where, ctx=ctx)
if node.orderby:
for expr in node.orderby:
trace(expr, ctx=ctx)
if node.offset is not None:
trace(node.offset, ctx=ctx)
if node.limit is not None:
trace(node.limit, ctx=ctx)
@trace.register
def trace_SortExpr(node: qlast.SortExpr, *, ctx: TracerContext) -> None:
trace(node.path, ctx=ctx)
@trace.register
def trace_InsertQuery(node: qlast.InsertQuery, *, ctx: TracerContext) -> None:
for alias in node.aliases:
if isinstance(alias, qlast.AliasedExpr):
trace(alias.expr, ctx=ctx)
trace(node.subject, ctx=ctx)
for element in node.shape:
trace(element, ctx=ctx)
@trace.register
def trace_UpdateQuery(node: qlast.UpdateQuery, *, ctx: TracerContext) -> None:
for alias in node.aliases:
if isinstance(alias, qlast.AliasedExpr):
trace(alias.expr, ctx=ctx)
trace(node.subject, ctx=ctx)
for element in node.shape:
trace(element, ctx=ctx)
trace(node.where, ctx=ctx)
@trace.register
def trace_DeleteQuery(node: qlast.DeleteQuery, *, ctx: TracerContext) -> None:
for alias in node.aliases:
if isinstance(alias, qlast.AliasedExpr):
trace(alias.expr, ctx=ctx)
trace(node.subject, ctx=ctx)
if node.where is not None:
trace(node.where, ctx=ctx)
if node.orderby:
for expr in node.orderby:
trace(expr, ctx=ctx)
if node.offset is not None:
trace(node.offset, ctx=ctx)
if node.limit is not None:
trace(node.limit, ctx=ctx)
@trace.register
def trace_DescribeStmt(
node: qlast.DescribeStmt, *,
ctx: TracerContext,
) -> None:
if node.object:
fq_name = node.object.name
if node.object.module:
fq_name = f'{node.object.module}::{fq_name}'
ctx.refs.add(fq_name)
|
py | 7df6fe223ce5fc4ef421db799cd95e8110f0d7d1 | import asyncio
import asyncpg
import functools
import json
import logging
import os
import random
import subprocess
from timeit import default_timer
from aiohttp import (
web,
ClientSession,
ClientRequest,
ClientResponse,
ClientError,
ClientTimeout,
)
from .utils import flatten, log_json, log_msg, log_timer, output_reader
LOGGER = logging.getLogger(__name__)
DEFAULT_POSTGRES = bool(os.getenv("POSTGRES"))
DEFAULT_INTERNAL_HOST = "127.0.0.1"
DEFAULT_EXTERNAL_HOST = "localhost"
DEFAULT_BIN_PATH = "../bin"
DEFAULT_PYTHON_PATH = ".."
START_TIMEOUT = float(os.getenv("START_TIMEOUT", 30.0))
RUN_MODE = os.getenv("RUNMODE")
GENESIS_URL = os.getenv("GENESIS_URL")
LEDGER_URL = os.getenv("LEDGER_URL")
if RUN_MODE == "docker":
DEFAULT_INTERNAL_HOST = os.getenv("DOCKERHOST") or "host.docker.internal"
DEFAULT_EXTERNAL_HOST = DEFAULT_INTERNAL_HOST
DEFAULT_BIN_PATH = "./bin"
DEFAULT_PYTHON_PATH = "."
elif RUN_MODE == "pwd":
# DEFAULT_INTERNAL_HOST =
DEFAULT_EXTERNAL_HOST = os.getenv("DOCKERHOST") or "host.docker.internal"
DEFAULT_BIN_PATH = "./bin"
DEFAULT_PYTHON_PATH = "."
async def default_genesis_txns():
genesis = None
try:
if GENESIS_URL:
async with ClientSession() as session:
async with session.get(GENESIS_URL) as resp:
genesis = await resp.text()
elif RUN_MODE == "docker":
async with ClientSession() as session:
async with session.get(
f"http://{DEFAULT_EXTERNAL_HOST}:9000/genesis"
) as resp:
genesis = await resp.text()
else:
with open("local-genesis.txt", "r") as genesis_file:
genesis = genesis_file.read()
except Exception:
LOGGER.exception("Error loading genesis transactions:")
return genesis
class DemoAgent:
def __init__(
self,
ident: str,
http_port: int,
admin_port: int,
internal_host: str = None,
external_host: str = None,
genesis_data: str = None,
seed: str = "random",
label: str = None,
color: str = None,
prefix: str = None,
timing: bool = False,
timing_log: str = None,
postgres: bool = None,
extra_args=None,
**params,
):
self.ident = ident
self.http_port = http_port
self.admin_port = admin_port
self.internal_host = internal_host or DEFAULT_INTERNAL_HOST
self.external_host = external_host or DEFAULT_EXTERNAL_HOST
self.genesis_data = genesis_data
self.label = label or ident
self.color = color
self.prefix = prefix
self.timing = timing
self.timing_log = timing_log
self.postgres = DEFAULT_POSTGRES if postgres is None else postgres
self.extra_args = extra_args
if RUN_MODE == "pwd":
self.endpoint = f"http://{self.external_host}".replace(
"{PORT}", str(http_port)
)
else:
self.endpoint = f"http://{self.external_host}:{http_port}"
self.admin_url = f"http://{self.internal_host}:{admin_port}"
self.webhook_port = None
self.webhook_url = None
self.webhook_site = None
self.params = params
self.proc = None
self.client_session: ClientSession = ClientSession()
rand_name = str(random.randint(100_000, 999_999))
self.seed = (
("my_seed_000000000000000000000000" + rand_name)[-32:]
if seed == "random"
else seed
)
self.storage_type = params.get("storage_type")
self.wallet_type = params.get("wallet_type", "indy")
self.wallet_name = (
params.get("wallet_name") or self.ident.lower().replace(" ", "") + rand_name
)
self.wallet_key = params.get("wallet_key") or self.ident + rand_name
self.did = None
self.wallet_stats = []
async def register_schema_and_creddef(self, schema_name, version, schema_attrs):
# Create a schema
schema_body = {
"schema_name": schema_name,
"schema_version": version,
"attributes": schema_attrs,
}
schema_response = await self.admin_POST("/schemas", schema_body)
# log_json(json.dumps(schema_response), label="Schema:")
schema_id = schema_response["schema_id"]
log_msg("Schema ID:", schema_id)
# Create a cred def for the schema
credential_definition_body = {"schema_id": schema_id}
credential_definition_response = await self.admin_POST(
"/credential-definitions", credential_definition_body
)
credential_definition_id = credential_definition_response[
"credential_definition_id"
]
log_msg("Cred def ID:", credential_definition_id)
return (schema_id, credential_definition_id)
def get_agent_args(self):
result = [
("--endpoint", self.endpoint),
("--label", self.label),
"--auto-ping-connection",
"--auto-respond-messages",
("--inbound-transport", "http", "0.0.0.0", str(self.http_port)),
("--outbound-transport", "http"),
("--admin", "0.0.0.0", str(self.admin_port)),
"--admin-insecure-mode",
("--wallet-type", self.wallet_type),
("--wallet-name", self.wallet_name),
("--wallet-key", self.wallet_key),
]
if self.genesis_data:
result.append(("--genesis-transactions", self.genesis_data))
if self.seed:
result.append(("--seed", self.seed))
if self.storage_type:
result.append(("--storage-type", self.storage_type))
if self.timing:
result.append("--timing")
if self.timing_log:
result.append(("--timing-log", self.timing_log))
if self.postgres:
result.extend(
[
("--wallet-storage-type", "postgres_storage"),
("--wallet-storage-config", json.dumps(self.postgres_config)),
("--wallet-storage-creds", json.dumps(self.postgres_creds)),
]
)
if self.webhook_url:
result.append(("--webhook-url", self.webhook_url))
if self.extra_args:
result.extend(self.extra_args)
return result
@property
def prefix_str(self):
if self.prefix:
return f"{self.prefix:10s} |"
async def register_did(self, ledger_url: str = None, alias: str = None):
self.log(f"Registering {self.ident} with seed {self.seed}")
if not ledger_url:
ledger_url = LEDGER_URL
if not ledger_url:
ledger_url = f"http://{self.external_host}:9000"
data = {"alias": alias or self.ident, "seed": self.seed, "role": "TRUST_ANCHOR"}
async with self.client_session.post(
ledger_url + "/register", json=data
) as resp:
if resp.status != 200:
raise Exception(f"Error registering DID, response code {resp.status}")
nym_info = await resp.json()
self.did = nym_info["did"]
self.log(f"Got DID: {self.did}")
def handle_output(self, *output, source: str = None, **kwargs):
end = "" if source else "\n"
if source == "stderr":
color = "fg:ansired"
elif not source:
color = self.color or "fg:ansiblue"
else:
color = None
log_msg(*output, color=color, prefix=self.prefix_str, end=end, **kwargs)
def log(self, *msg, **kwargs):
self.handle_output(*msg, **kwargs)
def log_json(self, data, label: str = None, **kwargs):
log_json(data, label=label, prefix=self.prefix_str, **kwargs)
def log_timer(self, label: str, show: bool = True, **kwargs):
return log_timer(label, show, logger=self.log, **kwargs)
def _process(self, args, env, loop):
proc = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
encoding="utf-8",
)
loop.run_in_executor(
None,
output_reader,
proc.stdout,
functools.partial(self.handle_output, source="stdout"),
)
loop.run_in_executor(
None,
output_reader,
proc.stderr,
functools.partial(self.handle_output, source="stderr"),
)
return proc
def get_process_args(self, bin_path: str = None):
cmd_path = "aca-py"
if bin_path is None:
bin_path = DEFAULT_BIN_PATH
if bin_path:
cmd_path = os.path.join(bin_path, cmd_path)
return list(flatten((["python3", cmd_path, "start"], self.get_agent_args())))
async def start_process(
self, python_path: str = None, bin_path: str = None, wait: bool = True
):
my_env = os.environ.copy()
python_path = DEFAULT_PYTHON_PATH if python_path is None else python_path
if python_path:
my_env["PYTHONPATH"] = python_path
agent_args = self.get_process_args(bin_path)
# start agent sub-process
loop = asyncio.get_event_loop()
self.proc = await loop.run_in_executor(
None, self._process, agent_args, my_env, loop
)
if wait:
await asyncio.sleep(1.0)
await self.detect_process()
def _terminate(self):
if self.proc and self.proc.poll() is None:
self.proc.terminate()
try:
self.proc.wait(timeout=0.5)
self.log(f"Exited with return code {self.proc.returncode}")
except subprocess.TimeoutExpired:
msg = "Process did not terminate in time"
self.log(msg)
raise Exception(msg)
async def terminate(self):
loop = asyncio.get_event_loop()
if self.proc:
await loop.run_in_executor(None, self._terminate)
await self.client_session.close()
if self.webhook_site:
await self.webhook_site.stop()
async def listen_webhooks(self, webhook_port):
self.webhook_port = webhook_port
if RUN_MODE == "pwd":
self.webhook_url = f"http://localhost:{str(webhook_port)}/webhooks"
else:
self.webhook_url = (
f"http://{self.external_host}:{str(webhook_port)}/webhooks"
)
app = web.Application()
app.add_routes([web.post("/webhooks/topic/{topic}/", self._receive_webhook)])
runner = web.AppRunner(app)
await runner.setup()
self.webhook_site = web.TCPSite(runner, "0.0.0.0", webhook_port)
await self.webhook_site.start()
async def _receive_webhook(self, request: ClientRequest):
topic = request.match_info["topic"]
payload = await request.json()
await self.handle_webhook(topic, payload)
return web.Response(text="")
async def handle_webhook(self, topic: str, payload):
if topic != "webhook": # would recurse
handler = f"handle_{topic}"
method = getattr(self, handler, None)
if method:
await method(payload)
else:
log_msg(
f"Error: agent {self.ident} "
f"has no method {handler} "
f"to handle webhook on topic {topic}"
)
async def admin_request(
self, method, path, data=None, text=False, params=None
) -> ClientResponse:
params = {k: v for (k, v) in (params or {}).items() if v is not None}
async with self.client_session.request(
method, self.admin_url + path, json=data, params=params
) as resp:
resp.raise_for_status()
resp_text = await resp.text()
if not resp_text and not text:
return None
if not text:
try:
return json.loads(resp_text)
except json.JSONDecodeError as e:
raise Exception(f"Error decoding JSON: {resp_text}") from e
return resp_text
async def admin_GET(self, path, text=False, params=None) -> ClientResponse:
try:
return await self.admin_request("GET", path, None, text, params)
except ClientError as e:
self.log(f"Error during GET {path}: {str(e)}")
raise
async def admin_POST(
self, path, data=None, text=False, params=None
) -> ClientResponse:
try:
return await self.admin_request("POST", path, data, text, params)
except ClientError as e:
self.log(f"Error during POST {path}: {str(e)}")
raise
async def detect_process(self):
async def fetch_status(url: str, timeout: float):
text = None
start = default_timer()
async with ClientSession(timeout=ClientTimeout(total=3.0)) as session:
while default_timer() - start < timeout:
try:
async with session.get(url) as resp:
if resp.status == 200:
text = await resp.text()
break
except (ClientError, asyncio.TimeoutError):
pass
await asyncio.sleep(0.5)
return text
status_url = self.admin_url + "/status"
status_text = await fetch_status(status_url, START_TIMEOUT)
if not status_text:
raise Exception(
"Timed out waiting for agent process to start. "
+ f"Admin URL: {status_url}"
)
ok = False
try:
status = json.loads(status_text)
ok = isinstance(status, dict) and "version" in status
except json.JSONDecodeError:
pass
if not ok:
raise Exception(
f"Unexpected response from agent process. Admin URL: {status_url}"
)
async def fetch_timing(self):
status = await self.admin_GET("/status")
return status.get("timing")
def format_timing(self, timing: dict) -> dict:
result = []
for name, count in timing["count"].items():
result.append(
(
name[:35],
count,
timing["total"][name],
timing["avg"][name],
timing["min"][name],
timing["max"][name],
)
)
result.sort(key=lambda row: row[2], reverse=True)
yield "{:35} | {:>12} {:>12} {:>10} {:>10} {:>10}".format(
"", "count", "total", "avg", "min", "max"
)
yield "=" * 96
yield from (
"{:35} | {:12d} {:12.3f} {:10.3f} {:10.3f} {:10.3f}".format(*row)
for row in result
)
yield ""
async def reset_timing(self):
await self.admin_POST("/status/reset", text=True)
@property
def postgres_config(self):
return {
"url": f"{self.internal_host}:5432",
"tls": "None",
"max_connections": 5,
"min_idle_time": 0,
"connection_timeout": 10,
}
@property
def postgres_creds(self):
return {
"account": "postgres",
"password": "mysecretpassword",
"admin_account": "postgres",
"admin_password": "mysecretpassword",
}
async def collect_postgres_stats(self, ident: str, vacuum_full: bool = True):
creds = self.postgres_creds
conn = await asyncpg.connect(
host=self.internal_host,
port="5432",
user=creds["admin_account"],
password=creds["admin_password"],
database=self.wallet_name,
)
tables = ("items", "tags_encrypted", "tags_plaintext")
for t in tables:
await conn.execute(f"VACUUM FULL {t}" if vacuum_full else f"VACUUM {t}")
sizes = await conn.fetch(
"""
SELECT relname AS "relation",
pg_size_pretty(pg_total_relation_size(C.oid)) AS "total_size"
FROM pg_class C
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
WHERE nspname = 'public'
ORDER BY pg_total_relation_size(C.oid) DESC;
"""
)
results = {k: [0, "0B"] for k in tables}
for row in sizes:
if row["relation"] in results:
results[row["relation"]][1] = row["total_size"].replace(" ", "")
for t in tables:
row = await conn.fetchrow(f"""SELECT COUNT(*) AS "count" FROM {t}""")
results[t][0] = row["count"]
self.wallet_stats.append((ident, results))
await conn.close()
def format_postgres_stats(self):
if not self.wallet_stats:
return
yield "{:30} | {:>17} | {:>17} | {:>17}".format(
f"{self.wallet_name} DB", "items", "tags_encrypted", "tags_plaintext"
)
yield "=" * 90
for ident, stats in self.wallet_stats:
yield "{:30} | {:8d} {:>8} | {:8d} {:>8} | {:8d} {:>8}".format(
ident,
stats["items"][0],
stats["items"][1],
stats["tags_encrypted"][0],
stats["tags_encrypted"][1],
stats["tags_plaintext"][0],
stats["tags_plaintext"][1],
)
yield ""
def reset_postgres_stats(self):
self.wallet_stats.clear()
|
py | 7df6fe8a527db90d00acb2b8654afb09b9d47fe1 | from services.proto import database_pb2
from services.proto import mdc_pb2
def md_to_html(md, body):
convert_req = mdc_pb2.MDRequest(md_body=body)
res = md.MarkdownToHTML(convert_req)
return res.html_body
def convert_to_tags_string(tags_array):
# Using | to separate tags. So url encode | character in tags
tags_array = [x.replace("|", "%7C") for x in tags_array]
return "|".join(tags_array)
def get_article(logger, db, global_id=None, ap_id=None):
"""
Retrieve a single PostEntry from the database.
Returns None on error.
"""
logger.info("Getting article global_id: %s, ap_id: %s", global_id, ap_id)
resp = db.Posts(database_pb2.PostsRequest(
request_type=database_pb2.PostsRequest.FIND,
match=database_pb2.PostsEntry(
global_id=global_id,
ap_id=ap_id,
)
))
if resp.result_type != database_pb2.PostsResponse.OK:
logger.error("Error getting article: %s", resp.error)
return None
elif len(resp.results) == 0:
logger.error("Could not find article")
return None
return resp.results[0]
def delete_article(logger, db, global_id=None, ap_id=None):
"""
Deletes an article from the database safely (removing all references).
Returns True on success and False on error.
"""
logger.info("Deleting post global_id: %s, ap_id: %s", global_id, ap_id)
resp = db.SafeRemovePost(database_pb2.PostsEntry(
global_id=global_id,
ap_id=ap_id,
))
if resp.result_type != database_pb2.PostsResponse.OK:
logger.error("Error deleting from DB: %s", resp.error)
return False
return True
def get_sharers_of_article(logger, db, global_id):
logger.info("Getting sharers of article %d", global_id)
resp = db.GetSharersOfPost(database_pb2.SharesEntry(
global_id=global_id,
))
if resp.result_type != database_pb2.SharesResponse.OK:
logger.error("Error getting sharers: %s", resp.error)
return None
return list(e.sharer_id for e in resp.results)
|
py | 7df6fed7b45aa7cfaef3020894f9bdabe0b13006 | #
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import logging
import json
import bibot_helpers as helpers
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
def lambda_handler(event, context):
logger.debug('<<BIBot>> Lex event info = ' + json.dumps(event))
session_attributes = event['sessionAttributes']
if session_attributes is None:
session_attributes = {}
logger.debug('<<BIBot>> lambda_handler: session_attributes = ' + json.dumps(session_attributes))
return hello_intent_handler(event, session_attributes)
def hello_intent_handler(intent_request, session_attributes):
session_attributes['resetCount'] = '0'
session_attributes['finishedCount'] = '0'
# don't alter session_attributes['lastIntent'], let BIBot remember the last used intent
askCount = helpers.increment_counter(session_attributes, 'greetingCount')
# build response string
if askCount == 1: response_string = "Hello! How can I help?"
elif askCount == 2: response_string = "I'm here"
elif askCount == 3: response_string = "I'm listening"
elif askCount == 4: response_string = "Yes?"
elif askCount == 5: response_string = "Really?"
else: response_string = 'Ok'
return helpers.close(session_attributes, 'Fulfilled', {'contentType': 'PlainText','content': response_string})
|
py | 7df6feeb09f4e9a9fe66fc79aba26380c229c9d5 | from GifFinder import GifFinder
gif_finder = GifFinder(word_vector_datafile = '../Data/WordVectors/word_dict.pkl')
test_phrase = 'party'
search_result_inds = gif_finder.FindGif(test_phrase)
print('Sample search query: ' + test_phrase)
for i in range(min(3, len(search_result_inds))):
print('option ' + str(i + 1) + ': ' + gif_finder.gif_titles[search_result_inds[i]])
print('try another search query')
while True:
phrase = input()
search_result_inds = gif_finder.FindGif(phrase)
for i in range(min(3, len(search_result_inds))):
print('option ' + str(i + 1) + ': ' + gif_finder.gif_titles[search_result_inds[i]])
|
py | 7df700106ecd55ed250ed503be57d9747edbf9db | def detectCapitalUse(self, word):
return word.isupper() or word.islower() or word.istitle()
return word in [word.upper(), word.lower(), word.capitalize()] |
py | 7df7006b9fe82963c36bfcabbca251aca0137385 | import gensim
import sys
from gensim import models
import itertools
from gensim import utils
class DoubleTaggedLineDocument(object):
def __init__(self, corpaFile, messagesFile):
self.source = corpaFile
self.messageReader = MessageReader(messagesFile)
def processFile(self):
try:
self.source.seek(0)
for item_no, line in enumerate(self.source):
yield models.doc2vec.TaggedDocument(utils.to_unicode(line).split(), [item_no])
except AttributeError:
with utils.smart_open(self.source) as fin:
for item_no, line in enumerate(fin):
yield models.doc2vec.TaggedDocument(utils.to_unicode(line).split(), [item_no])
def __iter__(self):
return itertools.chain(self.messageReader.processFile(),self.processFile())
class MessageReader():
def __init__(self, messageFile):
self.messageFile = messageFile
def method_name(self, line):
splitted = utils.to_unicode(line).split()
return models.doc2vec.TaggedDocument(splitted[3:], splitted[:3])
def processFile(self):
try:
self.source.seek(0)
for item_no, line in enumerate(self.messageFile):
yield self.method_name(line)
except AttributeError:
with utils.smart_open(self.messageFile) as fin:
for item_no, line in enumerate(fin):
yield self.method_name(line)
def __iter__(self):
return self.processFile()
wordVecSize = int(sys.argv[3])
sentences = DoubleTaggedLineDocument(sys.argv[1],sys.argv[2])
messages = list(sentences.messageReader)
count = len(messages)
model = models.doc2vec.Doc2Vec(sentences,size = wordVecSize)
docvecs = list(model.docvecs)[:count]
for idx,element in enumerate(docvecs):
print u'{0} {1}'.format(' '.join(messages[idx].tags),' '.join(str(x) for x in element)) |
py | 7df70104b4bb27d2f273a46fe6137b4b1a146618 | from copy import deepcopy
class SecureList():
def __init__(self, l):
self.list=[i for i in l]
def __getitem__(self, index):
temp=self.list[index]
self.list.pop(index)
return temp
def __str__(self):
temp=deepcopy(self.list)
self.list.clear()
return "["+", ".join(str(i) for i in temp)+"]"
def __len__(self):
return len(self.list)
def __repr__(self):
temp=deepcopy(self.list)
self.list.clear()
return str(temp) |
py | 7df702d0084b530bf9d1a07ee20d068615deec01 | # coding: utf-8
from __future__ import absolute_import, unicode_literals
from functools import wraps
__all__ = list(map(str, ['ChainMap']))
# pylint:disable=unused-import
try:
from collections import ChainMap
except ImportError:
from chainmap import ChainMap as _ChainMap
# Make sure that `ChainMap` is a new-style class.
@wraps(_ChainMap, updated=())
class ChainMap(_ChainMap, object):
__slots__ = ()
|
py | 7df7037ac7b92f10a02c7818b5a6f2fd9cd10ea7 | from celery import task
@task(name='astral.settings')
def settings(holder):
return holder
@task(name='astral.mixes')
def mixes(holder):
return None |
py | 7df7046e8d3333312902cc8e723192420931b320 | """
Copyright 2021-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import datetime
import bcrypt
from sqlalchemy import func
from flask_restx import Namespace, fields
from backend import app
from backend.app.shared import client
class User(app.db.Model):
username = app.db.Column(app.db.VARCHAR(
255), primary_key=True, nullable=False)
password = app.db.Column(app.db.VARCHAR(255), nullable=False)
role = app.db.Column(app.db.VARCHAR(255), nullable=False)
timestamp = app.db.Column(app.db.DateTime, server_default=func.now())
def __init__(self, username, password, role):
self.username = username
password_hash = bcrypt.hashpw(
password.encode('utf8'), bcrypt.gensalt(14))
# Decode Hash to Prevent Encoding Twice (POSTGRESQL Encodes By Default)
self.password = password_hash.decode('utf8')
self.registered_on = datetime.datetime.now()
self.role = role
REGION = os.environ['AWS_DEFAULT_REGION']
PREFIX = os.environ['PREFIX']
# SM_CLIENT = SecretsManager(REGION)
PRIVATE_KEY = client.get_secret(f'{PREFIX}/otter/private_key', region=REGION)
PUBLIC_KEY = client.get_secret(f'{PREFIX}/otter/public_key', region=REGION)
rbac = ['ADMIN', 'DEVELOPER', 'PRIVILEGED']
admin_permissions = ['ADMIN']
privileged_permissions = ['ADMIN', 'PRIVILEGED']
developer_permissions = ['ADMIN', 'PRIVILEGED', 'DEVELOPER']
# ADMIN: Create Users/Delete Users/List Users
# PRIVILEGED: Delete Assets/Create Assets/Update Assets
# DEVELOPER: Get Assets/Rotate Certificate
# Admin Namespace
admin_namespace = Namespace('admin', description='Admin Operations')
# Authentication Parser
authentication_parser = admin_namespace.parser()
authentication_parser.add_argument(
'Authorization', location='headers', required=True, type=str, help='Bearer Access Token')
# Database Object
user_model = admin_namespace.model(
'user_object', {
'username': fields.String(),
'role': fields.String(),
'timestamp': fields.DateTime()
}
)
# Login Response
user_token = admin_namespace.model(
'token', {
'token': fields.String(description='JSON Web Token')
}
)
# User Creation Object
create_user = admin_namespace.model(
'create_user',
{
'username': fields.String(description='Username', required=True),
'password': fields.String(description='Password', required=True),
'role': fields.String(description='[ADMIN, DEVELOPER, PRIVILEGED]', required=True)
}
)
# API Namespace
api_namespace = Namespace('api', description='API Operations')
# Asset Object
asset_output = api_namespace.model(
'asset_object', {
'system_name': fields.String(),
'common_name': fields.String(),
'certificate_authority': fields.String(),
'certificate_expiration': fields.String(),
'data_center': fields.String(),
'device_model': fields.String(),
'host_platform': fields.String(),
'ip_address': fields.String(),
'os_version': fields.String(),
'origin': fields.String(),
'subject_alternative_name': fields.List(fields.String())
}
)
asset_input = api_namespace.model(
'asset_input', {
'system_name': fields.String(),
'common_name': fields.String(),
'certificate_authority': fields.String(),
'data_center': fields.String(),
'device_model': fields.String(),
'host_platform': fields.String(),
'ip_address': fields.String(),
'os_version': fields.String(),
'subject_alternative_name': fields.List(fields.String()),
}
)
# User Namespace
user_namespace = Namespace('user', description='User Operations')
# User Login Body
user_login = admin_namespace.model(
'user_login', {
'username': fields.String(description='Username', required=True),
'password': fields.String(description='User Password', required=True),
}
)
# User Update Password
user_credentials = admin_namespace.model(
'user_credentials',
{
'username': fields.String(description='Username', required=True),
'password': fields.String(description='Current User Password', required=True),
'updated_password': fields.String(description='Updated User Password', required=True)
}
)
|
py | 7df704c37d8a2eae804fa645b978bdf724cdb258 | '''
Created on Oct 8, 2012
@author: Shunping Huang
'''
from lapels import cigarutils
from lapels import cigarbuilder
from lapels.regionutils import makeReadRegion
import unittest
class TestCigarBuilder(unittest.TestCase):
def test0(self):
regions = []
add = regions.append
## an inserted I_1 before any M_1
add(makeReadRegion(1, '2I', -1, -2))
add(makeReadRegion(0, '4M', 0, 3))
## an inserted I_1 after an M_1
add(makeReadRegion(1, '2I', -1, -1))
## an inserted I_1 after an inserted I_1
add(makeReadRegion(1, '3I', -1, -1))
## a gap
add(makeReadRegion(0, '1M', 5, 5))
## an inserted I_1 after a match I_1
add(makeReadRegion(1, '4I', 6, 5))
## a gap here
add(makeReadRegion(0, '2M', 7, 8))
## an M_1 that is an insertion
add(makeReadRegion(0, '2I', 9, 8))
add(makeReadRegion(1, '3I', 9, 8))
## an M_1 that is an insertion
add(makeReadRegion(0, '4I', 9, 8))
add(makeReadRegion(1, '5I', 9, 8))
## a gap here
add(makeReadRegion(0, '5M', 10, 14))
cb = cigarbuilder.CigarBuilder()
cigar = cb.build(regions)
print(cigar)
self.assertEqual(cigar, [(1, 2), (0, 4), (1, 2), (1, 3), (2, 1),
(0, 1), (1, 4), (2, 1), (0, 2), (1, 2),
(1, 3), (1, 4), (1, 5), (2, 1), (0, 5)])
self.assertEqual(cigarutils.toString(cigar),
'2I,4M,2I,3I,1D,1M,4I,1D,2M,2I,3I,4I,5I,1D,5M')
def test1(self):
regions = []
add = regions.append
## an inserted I_1 before any M_1
add(makeReadRegion(1, '2I', -1, -2))
add(makeReadRegion(0, '4M', 0, 3))
## an inserted I_1 after an M_1
add(makeReadRegion(1, '2I', -1, -1))
## an inserted I_1 after an inserted I_1
add(makeReadRegion(1, '3I', -1, -1))
## a gap
##
add(makeReadRegion(1, '1M', 5, 5))
## an inserted I_1 after a match I_1
add(makeReadRegion(1, '4I', 6, 5))
## a gap here
add(makeReadRegion(0, '2M', 7, 8))
## an M_1 that is an insertion
add(makeReadRegion(0, '2I', 9, 8))
add(makeReadRegion(1, '3I', 9, 8))
## an M_1 that is an insertion
add(makeReadRegion(0, '4I', 9, 8))
add(makeReadRegion(1, '5I', 9, 8))
## a gap here
add(makeReadRegion(0, '5M', 10, 14))
cb = cigarbuilder.CigarBuilder()
cigar = cb.build(regions)
print(cigar)
self.assertEqual(cigar, [(1, 2), (0, 4), (1, 2), (1, 3), (2, 1),
(1, 5), (2, 1), (0, 2), (1, 2),
(1, 3), (1, 4), (1, 5), (2, 1), (0, 5)])
self.assertEqual(cigarutils.toString(cigar),
'2I,4M,2I,3I,1D,1M,4I,1D,2M,2I,3I,4I,5I,1D,5M')
# def test2(self):
# regions = []
# add = regions.append
# add(makeReadRegion(1, '1I', -1, -2))
# add(makeReadRegion(2, '10I', 5, 4)) ## D_1 of I_0
# add(makeReadRegion(1, '2I', 5, 4))
# add(makeReadRegion(1, '3I', 5, 4))
# add(makeReadRegion(2, '20I', 5, 4,)) ## D_1 of I_0
# add(makeReadRegion(1, '4I', 5, 4 ))
# add(makeReadRegion(1, '4I', 7, 6 ))
# cb = cigarbuilder.CigarBuilder()
# cigar = cb.build(regions)
# self.assertEqual(cigar, [(1, 1), (1, 2), (1, 3), (1, 4), (1, 20), (2, 2), (1, 4)])
# def test3(self):
# regions = []
# add = regions.append
# add(makeReadRegion(0, '25M', 10, 34))
# add(makeReadRegion(1, '1I', 35, 34))
# add(makeReadRegion(3, '1N', 35, 35))
# add(makeReadRegion(0, '74M', 36, 109))
# cb = cigarbuilder.CigarBuilder()
# cigar = cb.build(regions)
# self.assertEqual(cigarutils.simplify(cigar),[(0, 25), (1, 1), (3, 1), (0, 74)])
# def test4(self):
# regions = []
# add = regions.append
# add(makeReadRegion(0, '25M', 10, 34))
# add(makeReadRegion(1, '1M,1I', 35, 35))
# add(makeReadRegion(3, '1N', 35, 35))
# add(makeReadRegion(0, '74M', 36, 109))
# cb = cigarbuilder.CigarBuilder()
# cigar = cb.build(regions)
# self.assertEqual(cigarutils.simplify(cigar),[(0,26),(1,1),(0,74)])
# def test5(self):
# regions = []
# add = regions.append
# add(makeReadRegion(0, '25M', 10, 34))
# add(makeReadRegion(3, '1N', 35, 35))
# add(makeReadRegion(1, '1M,1I', 35, 35))
# add(makeReadRegion(0, '74M', 36, 109))
# cb = cigarbuilder.CigarBuilder()
# cigar = cb.build(regions)
# self.assertEqual(cigarutils.simplify(cigar),[(0,26),(1,1),(0,74)])
if __name__ == '__main__':
unittest.main()
|
py | 7df705981bd4f013ce2613d37e88dd70fbf3d653 | import fnmatch
import os
def custom_import_module(full_config_path):
"""
Import and execute a python file as a module. Useful for import the experiment module and the
analysis module.
Args:
full_config_path: Full path to the python file.
Returns: The python file as a module
"""
import importlib.util
spec = importlib.util.spec_from_file_location("mod", full_config_path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
def checkpoint_from_trained_directory(full_trained_directory, checkpoint_desired):
"""
Return the checkpoint directory to load the policy. If checkpoint_desired is specified and
found, then return that policy. Otherwise, return the last policy.
"""
checkpoint_dirs = find_dirs_in_dir('checkpoint*', full_trained_directory)
# Try to load the desired checkpoint
if checkpoint_desired is not None: # checkpoint specified
for checkpoint in checkpoint_dirs:
if checkpoint_desired == int(checkpoint.split('/')[-1].split('_')[-1]):
return checkpoint, checkpoint_desired
import warnings
warnings.warn(
f'Could not find checkpoint_{checkpoint_desired}. Attempting to load the last '
'checkpoint.'
)
# Load the last checkpoint
max_checkpoint = None
max_checkpoint_value = 0
for checkpoint in checkpoint_dirs:
checkpoint_value = int(checkpoint.split('/')[-1].split('_')[-1])
if checkpoint_value > max_checkpoint_value:
max_checkpoint_value = checkpoint_value
max_checkpoint = checkpoint
if max_checkpoint is None:
raise FileNotFoundError("Did not find a checkpoint file in the given directory.")
return max_checkpoint, max_checkpoint_value
def find_dirs_in_dir(pattern, path):
"""
Traverse the path looking for directories that match the pattern.
Return: list of paths that match
"""
result = []
for root, dirs, files in os.walk(path):
for name in dirs:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
|
py | 7df70609332e071ba97fbfb8589b51dc20ae0157 | # Generated by Django 3.0.4 on 2020-04-29 10:12
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('SmartNewsApp', '0002_comment'),
]
operations = [
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
py | 7df707b2034b88c607cc2818f6c37c1fa1db9d2e | """Supports loading :class:`.Submission` directly from classic data."""
import copy
from itertools import groupby
from operator import attrgetter
from typing import List, Optional, Iterable, Dict
from arxiv.base import logging
from arxiv.license import LICENSES
from ... import domain
from . import models
from .patch import patch_withdrawal, patch_jref, patch_cross, patch_hold
logger = logging.getLogger(__name__)
logger.propagate = False
def load(rows: Iterable[models.Submission]) -> Optional[domain.Submission]:
"""
Load a submission entirely from its classic database rows.
Parameters
----------
rows : list
Items are :class:`.models.Submission` rows loaded from the classic
database belonging to a single arXiv e-print/submission group.
Returns
-------
:class:`.domain.Submission` or ``None``
Aggregated submission object (with ``.versions``). If there is no
representation (e.g. all rows are deleted), returns ``None``.
"""
versions: List[domain.Submission] = []
submission_id: Optional[int] = None
# We want to work within versions, and (secondarily) in order of creation
# time.
rows = sorted(rows, key=lambda o: o.version)
logger.debug('Load from rows %s', [r.submission_id for r in rows])
for version, version_rows in groupby(rows, key=attrgetter('version')):
# Creation time isn't all that precise in the classic database, so
# we'll use submission ID instead.
these_version_rows = sorted([v for v in version_rows],
key=lambda o: o.submission_id)
logger.debug('Version %s: %s', version, version_rows)
# We use the original ID to track the entire lifecycle of the
# submission in NG.
if version == 1:
submission_id = these_version_rows[0].submission_id
logger.debug('Submission ID: %s', submission_id)
# Find the creation row. There may be some false starts that have been
# deleted, so we need to advance to the first non-deleted 'new' or
# 'replacement' row.
version_submission: Optional[domain.Submission] = None
while version_submission is None:
try:
row = these_version_rows.pop(0)
except IndexError:
break
if row.is_new_version() and \
(row.type == row.NEW_SUBMISSION or not row.is_deleted()):
# Get the initial state of the version.
version_submission = to_submission(row, submission_id)
logger.debug('Got initial state: %s', version_submission)
if version_submission is None:
logger.debug('Nothing to work with for this version')
continue
# If this is not the first version, carry forward any requests.
if len(versions) > 0:
logger.debug('Bring user_requests forward from last version')
version_submission.user_requests.update(versions[-1].user_requests)
for row in these_version_rows: # Remaining rows, since we popped the others.
# We are treating JREF submissions as though there is no approval
# process; so we can just ignore deleted JREF rows.
if row.is_jref() and not row.is_deleted():
# This should update doi, journal_ref, report_num.
version_submission = patch_jref(version_submission, row)
# For withdrawals and cross-lists, we want to get data from
# deleted rows since we keep track of all requests in the NG
# submission.
elif row.is_withdrawal():
# This should update the reason_for_withdrawal (if applied),
# and add a WithdrawalRequest to user_requests.
version_submission = patch_withdrawal(version_submission, row)
elif row.is_crosslist():
# This should update the secondary classifications (if applied)
# and add a CrossListClassificationRequest to user_requests.
version_submission = patch_cross(version_submission, row)
# We want hold information represented as a Hold on the submission
# object, not just the status.
if version_submission.is_on_hold:
version_submission = patch_hold(version_submission, row)
versions.append(version_submission)
if not versions:
return None
submission = copy.deepcopy(versions[-1])
submission.versions = [ver for ver in versions if ver and ver.is_announced]
return submission
def to_submission(row: models.Submission,
submission_id: Optional[int] = None) -> domain.Submission:
"""
Generate a representation of submission state from a DB instance.
Parameters
----------
row : :class:`.models.Submission`
Database row representing a :class:`.domain.submission.Submission`.
submission_id : int or None
If provided the database value is overridden when setting
:attr:`domain.Submission.submission_id`.
Returns
-------
:class:`.domain.submission.Submission`
"""
status = status_from_classic(row.status)
primary = row.primary_classification
if row.submitter is None:
submitter = domain.User(native_id=row.submitter_id,
email=row.submitter_email)
else:
submitter = row.get_submitter()
if submission_id is None:
submission_id = row.submission_id
license: Optional[domain.License] = None
if row.license:
label = LICENSES[row.license]['label']
license = domain.License(uri=row.license, name=label)
primary_clsn: Optional[domain.Classification] = None
if primary and primary.category:
_category = domain.Category(primary.category)
primary_clsn = domain.Classification(category=_category)
secondary_clsn = [
domain.Classification(category=domain.Category(db_cat.category))
for db_cat in row.categories if not db_cat.is_primary
]
content: Optional[domain.SubmissionContent] = None
if row.package:
if row.package.startswith('fm://'):
identifier, checksum = row.package.split('://', 1)[1].split('@', 1)
else:
identifier = row.package
checksum = ""
source_format = domain.SubmissionContent.Format(row.source_format)
content = domain.SubmissionContent(identifier=identifier,
compressed_size=0,
uncompressed_size=row.source_size,
checksum=checksum,
source_format=source_format)
assert status is not None
submission = domain.Submission(
submission_id=submission_id,
creator=submitter,
owner=submitter,
status=status,
created=row.get_created(),
updated=row.get_updated(),
source_content=content,
submitter_is_author=bool(row.is_author),
submitter_accepts_policy=bool(row.agree_policy),
submitter_contact_verified=bool(row.userinfo),
is_source_processed=not bool(row.must_process),
submitter_confirmed_preview=bool(row.viewed),
metadata=domain.SubmissionMetadata(title=row.title,
abstract=row.abstract,
comments=row.comments,
report_num=row.report_num,
doi=row.doi,
msc_class=row.msc_class,
acm_class=row.acm_class,
journal_ref=row.journal_ref),
license=license,
primary_classification=primary_clsn,
secondary_classification=secondary_clsn,
arxiv_id=row.doc_paper_id,
version=row.version
)
if row.sticky_status == row.ON_HOLD or row.status == row.ON_HOLD:
submission = patch_hold(submission, row)
elif row.is_withdrawal():
submission = patch_withdrawal(submission, row)
elif row.is_crosslist():
submission = patch_cross(submission, row)
return submission
def status_from_classic(classic_status: int) -> Optional[str]:
"""Map classic status codes to domain submission status."""
return STATUS_MAP.get(classic_status)
# Map classic status to Submission domain status.
STATUS_MAP: Dict[int, str] = {
models.Submission.NOT_SUBMITTED: domain.Submission.WORKING,
models.Submission.SUBMITTED: domain.Submission.SUBMITTED,
models.Submission.ON_HOLD: domain.Submission.SUBMITTED,
models.Submission.NEXT_PUBLISH_DAY: domain.Submission.SCHEDULED,
models.Submission.PROCESSING: domain.Submission.SCHEDULED,
models.Submission.PROCESSING_SUBMISSION: domain.Submission.SCHEDULED,
models.Submission.NEEDS_EMAIL: domain.Submission.SCHEDULED,
models.Submission.ANNOUNCED: domain.Submission.ANNOUNCED,
models.Submission.DELETED_ANNOUNCED: domain.Submission.ANNOUNCED,
models.Submission.USER_DELETED: domain.Submission.DELETED,
models.Submission.DELETED_EXPIRED: domain.Submission.DELETED,
models.Submission.DELETED_ON_HOLD: domain.Submission.DELETED,
models.Submission.DELETED_PROCESSING: domain.Submission.DELETED,
models.Submission.DELETED_REMOVED: domain.Submission.DELETED,
models.Submission.DELETED_USER_EXPIRED: domain.Submission.DELETED,
models.Submission.ERROR_STATE: domain.Submission.ERROR
}
|
py | 7df707f2f07a8f107a953752e6af14b1d04475dc | import hashlib
# Floyd's algorithm (https://en.wikipedia.org/wiki/Cycle_detection)
# We want to find collisions between two strings that begin with this prefix.
my_prefix = '\x41\x61\x99'
# Get the first x bytes of the double md5 hash value
def hash_function(message, x=14, prefix='\x41\x61\x99', debug=False):
temp_hash = hashlib.md5(hashlib.md5(prefix + message).digest()).digest()
if debug is True:
print(message, x, prefix, temp_hash)
return temp_hash[:x]
def hash_function2(message, x=14, prefix='\x41\x61\x99', debug=False):
temp_hash = hashlib.md5(hashlib.md5(prefix + message).digest()).hexdigest()
if debug is True:
print(message, x, prefix, temp_hash)
return temp_hash[:x]
def floyd(x, initial):
# Set a few temp values to zero or none
x0 = initial
m0 = None
m1 = None
# Start
tortoise = hash_function(x0, x, my_prefix)
hare = hash_function(tortoise, x, my_prefix)
# First loop until our hashes are equal
while tortoise != hare:
tortoise = hash_function(tortoise, x, my_prefix)
hare = hash_function(hash_function(hare, x, my_prefix), x, my_prefix)
# Set pointer to initial value
tortoise = x0
# Secound loop
while tortoise != hare:
m0 = tortoise
tortoise = hash_function(tortoise, x, my_prefix)
hare = hash_function(hare, x, my_prefix)
# Loop many times until get second value
hare = hash_function(tortoise, x, my_prefix)
while tortoise != hare:
m1 = hare
hare = hash_function(hare, x, my_prefix)
# Save results to the file
save_results(m0, m1, hash_function(m0, x, my_prefix), hash_function2(m0, x*2, my_prefix))
def save_results(m0, m1, hash1, hash2):
"""Save results to file."""
with open("message0.bin", "wb") as file:
# Save first message:
file.write(my_prefix + m0)
with open("message1.bin", "wb") as file:
# Save second message:
file.write(my_prefix + m1)
with open("hash_1.bin", "wb") as file:
# Save bianry hash of messages:
file.write(hash1)
with open("hash_2.txt", "w") as file:
# Save of messages:
file.write("Message 1:\n")
file.write(my_prefix + m0)
file.write("\n\nMessage 2:\n")
file.write(my_prefix + m1)
file.write("\n\nHash:\n")
file.write(hash2)
# Execute floyd funtion with initial value
floyd(x=7, initial="123hsdshd9fh933")
"""
0x416199 -> 4284825
Message A: 42848253cf02b27a4a781
Message B: 42848259dd4879a8d98bb
They both hash to: 0a05085732df91
"""
|
py | 7df708af4cec495aa7b279500687e6de3fb7663e | import numpy as np
from .common import run_monitored, set_mem_rlimit, Benchmark, safe_import
with safe_import():
from scipy.stats import spearmanr
with safe_import():
import scipy.interpolate as interpolate
class Leaks(Benchmark):
unit = "relative increase with repeats"
def track_leaks(self):
set_mem_rlimit()
# Setup temp file, make it fit in memory
repeats = [2, 5, 10, 50, 200]
peak_mems = []
for repeat in repeats:
code = """
import numpy as np
from scipy.interpolate import griddata
def func(x, y):
return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
points = np.random.rand(1000, 2)
values = func(points[:,0], points[:,1])
for t in range(%(repeat)d):
for method in ['nearest', 'linear', 'cubic']:
griddata(points, values, (grid_x, grid_y), method=method)
""" % dict(repeat=repeat)
_, peak_mem = run_monitored(code)
peak_mems.append(peak_mem)
corr, p = spearmanr(repeats, peak_mems)
if p < 0.05:
print("*"*79)
print("PROBABLE MEMORY LEAK")
print("*"*79)
else:
print("PROBABLY NO MEMORY LEAK")
return max(peak_mems) / min(peak_mems)
class BenchPPoly(Benchmark):
def setup(self):
np.random.seed(1234)
m, k = 55, 3
x = np.sort(np.random.random(m+1))
c = np.random.random((3, m))
self.pp = interpolate.PPoly(c, x)
npts = 100
self.xp = np.linspace(0, 1, npts)
def time_evaluation(self):
self.pp(self.xp)
class GridData(Benchmark):
param_names = ['n_grids', 'method']
params = [
[10j, 100j, 1000j],
['nearest', 'linear', 'cubic']
]
def setup(self, n_grids, method):
self.func = lambda x, y: x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
self.grid_x, self.grid_y = np.mgrid[0:1:n_grids, 0:1:n_grids]
self.points = np.random.rand(1000, 2)
self.values = self.func(self.points[:, 0], self.points[:, 1])
def time_evaluation(self, n_grids, method):
interpolate.griddata(self.points, self.values, (self.grid_x, self.grid_y), method=method)
class Interpolate1d(Benchmark):
param_names = ['n_samples', 'method']
params = [
[10, 50, 100, 1000, 10000],
['linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'],
]
def setup(self, n_samples, method):
self.x = np.arange(n_samples)
self.y = np.exp(-self.x/3.0)
self.interpolator = interpolate.interp1d(self.x, self.y, kind=method)
self.xp = np.linspace(self.x[0], self.x[-1], 4*n_samples)
def time_interpolate(self, n_samples, method):
"""Time the construction overhead."""
interpolate.interp1d(self.x, self.y, kind=method)
def time_interpolate_eval(self, n_samples, method):
"""Time the evaluation."""
self.interpolator(self.xp)
class Interpolate2d(Benchmark):
param_names = ['n_samples', 'method']
params = [
[10, 50, 100],
['linear', 'cubic', 'quintic'],
]
def setup(self, n_samples, method):
r_samples = n_samples / 2.
self.x = np.arange(-r_samples, r_samples, 0.25)
self.y = np.arange(-r_samples, r_samples, 0.25)
self.xx, self.yy = np.meshgrid(self.x, self.y)
self.z = np.sin(self.xx**2+self.yy**2)
def time_interpolate(self, n_samples, method):
interpolate.interp2d(self.x, self.y, self.z, kind=method)
class Rbf(Benchmark):
param_names = ['n_samples', 'function']
params = [
[10, 50, 100],
['multiquadric', 'inverse', 'gaussian', 'linear', 'cubic', 'quintic', 'thin_plate']
]
def setup(self, n_samples, function):
self.x = np.arange(n_samples)
self.y = np.sin(self.x)
r_samples = n_samples / 2.
self.X = np.arange(-r_samples, r_samples, 0.25)
self.Y = np.arange(-r_samples, r_samples, 0.25)
self.z = np.exp(-self.X**2-self.Y**2)
def time_rbf_1d(self, n_samples, function):
interpolate.Rbf(self.x, self.y, function=function)
def time_rbf_2d(self, n_samples, function):
interpolate.Rbf(self.X, self.Y, self.z, function=function)
class RBFInterpolator(Benchmark):
param_names = ['neighbors', 'n_samples', 'kernel']
params = [
[None, 50],
[10, 100, 1000],
['linear', 'thin_plate_spline', 'cubic', 'quintic', 'multiquadric',
'inverse_multiquadric', 'inverse_quadratic', 'gaussian']
]
def setup(self, neighbors, n_samples, kernel):
rng = np.random.RandomState(0)
self.y = rng.uniform(-1, 1, (n_samples, 2))
self.x = rng.uniform(-1, 1, (n_samples, 2))
self.d = np.sum(self.y, axis=1)*np.exp(-6*np.sum(self.y**2, axis=1))
def time_rbf_interpolator(self, neighbors, n_samples, kernel):
interp = interpolate.RBFInterpolator(
self.y,
self.d,
neighbors=neighbors,
epsilon=5.0,
kernel=kernel
)
interp(self.x)
class UnivariateSpline(Benchmark):
param_names = ['n_samples', 'degree']
params = [
[10, 50, 100],
[3, 4, 5]
]
def setup(self, n_samples, degree):
r_samples = n_samples / 2.
self.x = np.arange(-r_samples, r_samples, 0.25)
self.y = np.exp(-self.x**2) + 0.1 * np.random.randn(*self.x.shape)
def time_univariate_spline(self, n_samples, degree):
interpolate.UnivariateSpline(self.x, self.y, k=degree)
class BivariateSpline(Benchmark):
"""
Author: josef-pktd and scipy mailinglist example
'http://scipy-user.10969.n7.nabble.com/BivariateSpline-examples\
-and-my-crashing-python-td14801.html'
"""
param_names = ['n_samples']
params = [
[10, 20, 30]
]
def setup(self, n_samples):
x = np.arange(0, n_samples, 0.5)
y = np.arange(0, n_samples, 0.5)
x, y = np.meshgrid(x, y)
x = x.ravel()
y = y.ravel()
xmin = x.min()-1
xmax = x.max()+1
ymin = y.min()-1
ymax = y.max()+1
s = 1.1
self.yknots = np.linspace(ymin+s, ymax-s, 10)
self.xknots = np.linspace(xmin+s, xmax-s, 10)
self.z = np.sin(x) + 0.1*np.random.normal(size=x.shape)
self.x = x
self.y = y
def time_smooth_bivariate_spline(self, n_samples):
interpolate.SmoothBivariateSpline(self.x, self.y, self.z)
def time_lsq_bivariate_spline(self, n_samples):
interpolate.LSQBivariateSpline(self.x, self.y, self.z, self.xknots.flat, self.yknots.flat)
class Interpolate(Benchmark):
"""
Linear Interpolate in scipy and numpy
"""
param_names = ['n_samples', 'module']
params = [
[10, 50, 100],
['numpy', 'scipy']
]
def setup(self, n_samples, module):
self.x = np.arange(n_samples)
self.y = np.exp(-self.x/3.0)
self.z = np.random.normal(size=self.x.shape)
def time_interpolate(self, n_samples, module):
if module == 'scipy':
interpolate.interp1d(self.x, self.y, kind="linear")
else:
np.interp(self.z, self.x, self.y)
|
py | 7df708e5ae422d91e0f2697878f0bffaa90f6a77 | #!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2018 David Tellenbach <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# For parsing cli arguments
import argparse
# For parsing JSON files
import json
# Plotting library
import matplotlib as plt
plt.use('Agg')
import matplotlib.pyplot as pyplot
# To access more matplotlib functionality, i.e., default calculated figure
# size
from pylab import rcParams
_version = 0.2
def getVersion(parser):
'''Print program name, description and current version'''
return "{} - {} - Version {}".format(parser.prog, parser.description, _version)
class PlottingConfiguration:
'''Configuration of the benchmark plot'''
def __init__(self, args):
self.inputFile = args.inputFile
self.outputFile = args.outputFile
self.plotTitle = args.plotTitle
self.timeUnit = args.timeUnit
self.xValue = args.xValue
self.yValue = args.yValue
if args.xLabel is None:
self.xLabel = args.xValue
else:
self.xLabel = args.xLabel
if args.yLabel is None:
self.yLabel = "Time in {}".format(args.timeUnit)
else:
self.yLabel = args.yLabel
self.xTickBegin = args.xTickBegin
self.xTickEnd = args.xTickEnd
self.xTickStep = args.xTickStep
self.benchmarkDescription = args.benchmarkDescription
self.xSize = args.xSize
self.ySize = args.ySize
self.dpi = args.dpi
def convertTimeUnit(value, src, dest):
'''Convert time units'''
# This function is necessary since popular libraries like datatime cannot
# handle nanoseconds
if src == dest:
return value
if src == "ns":
if dest == "us":
return value / 1000
if dest == "ms":
return value / 1000000
elif src == "us":
if dest == "ns":
return value * 1000
if dest == "ms":
return value / 1000
elif src == "ms":
if dest == "ns":
return value * 1000000
if dest == "us":
return value * 10000
def parseJSON(configuration):
'''Parses JSON file containing benchmark results'''
with open(configuration.inputFile) as fd:
data = json.load(fd)
ret = []
for bench in data["benchmarks"]:
# Convert time units if necessary
if bench["time_unit"] != configuration.timeUnit:
bench[configuration.yValue] = convertTimeUnit(bench[configuration.yValue],
bench["time_unit"],
configuration.timeUnit)
ret.append((bench["benchmark_visualizer_group"], bench[configuration.xValue],
bench[configuration.yValue], configuration.timeUnit))
return ret
def plot(data, configuration):
benchmarkDict = dict()
for bench in data:
# If no list for this benchmark (group) exist, we create one
if bench[0] not in benchmarkDict:
benchmarkDict.update({bench[0]: ([], [])})
# Append x value if necessary
if bench[1] not in benchmarkDict[bench[0]][0]:
benchmarkDict[bench[0]][0].append(bench[1])
# Append y value
benchmarkDict[bench[0]][1].append(bench[2])
# Use passed arguments if possible, otherwise use automatically calculated
# figure size
if configuration.xSize is None and configuration.xSize is None:
pyplot.figure(dpi=configuration.dpi)
elif configuration.xSize is None:
pyplot.figure(figsize=(rcParams['figure.figsize'][0],
float(configuration.ySize)),
dpi=configuration.dpi)
elif configuration.ySize is None:
pyplot.figure(figsize=(float(configuration.xSize),
rcParams['figure.figsize'][1]),
dpi=configuration.dpi)
else:
pyplot.figure(figsize=(float(configuration.xSize),
float(configuration.ySize)),
dpi=configuration.dpi)
for key, value in benchmarkDict.items():
# Add plotting data
pyplot.plot(value[0], value[1], marker='o',
label=configuration.benchmarkDescription[int(key)])
pyplot.title(configuration.plotTitle)
pyplot.ylabel(configuration.yLabel)
pyplot.xlabel(configuration.xLabel)
pyplot.legend()
pyplot.grid()
# If no end for the x values is set, just take the maximum of them
if configuration.xTickEnd == -1:
for key, val in benchmarkDict.items():
if max(val[0]) > configuration.xTickEnd:
configuration.xTickEnd = max(val[0])
if configuration.xTickStep != "auto":
pyplot.xticks(range(int(configuration.xTickBegin),
int(configuration.xTickEnd)+1, int(configuration.xTickStep)))
pyplot.savefig(configuration.outputFile, bbox_inches='tight')
def main():
# Parse command line arguments
parser = argparse.ArgumentParser(description = "Visualize Google Benchmark.",
prog = "Benchmark Visualizer")
parser.add_argument("--version", "-v",
version = getVersion(parser),
action = "version")
parser.add_argument("--input_file", "-i",
metavar = "FILE",
help = "Path to JSON file with benchmark results",
dest = "inputFile",
required = True)
parser.add_argument("--output_file", "-o",
metavar = "FILE",
help = "Path to file where the image of the diagram will "
"be stored.",
dest = "outputFile",
required = True)
parser.add_argument("--title",
metavar = "TITLE",
help = "Diagram title",
dest = "plotTitle",
default = "Benchmark Results")
parser.add_argument("--time_unit",
choices = ["ns", "us", "ms"],
help = "Time unit for measured durations",
dest = "timeUnit",
default = "ns")
parser.add_argument("--x_label",
metavar = "X_LABEL",
dest = "xLabel",
help = "Label on the x axis")
parser.add_argument("--y_label",
metavar = "Y_LABEL",
dest = "yLabel",
help = "Lable on the y axis")
parser.add_argument("--x_value", "-x",
metavar = "X_VALUE",
dest = "xValue",
help = "Name of the counter that stores the x value",
required = True)
parser.add_argument("--y_value", "-y",
choices = ["real_time", "cpu_time"],
metavar = "y_VALUE",
dest = "yValue",
help = "Name of the y value that will be considered",
default = "real_time")
parser.add_argument("--x_tick_begin",
metavar = "VALUE",
help = "Set the begin of the x ticks manually",
dest = "xTickBegin",
default = 0)
parser.add_argument("--x_tick_end",
metavar = "VALUE",
help = "Set the end of the x ticks manually",
dest = "xTickEnd",
default = -1)
parser.add_argument("--x_tick_step",
metavar = "VALUE",
help = "Set the steps of the x ticks manually",
dest = "xTickStep",
default = "auto")
parser.add_argument("--benchmark_description", "-d",
metavar = "DESC",
nargs='*',
help = "Description of benchmarks",
dest = "benchmarkDescription",
required = True)
parser.add_argument("--x_size",
metavar = "VALUE",
help = "The horizontal size of the produced plot in inches",
dest = "xSize")
parser.add_argument("--y_size",
metavar = "VALUE",
help = "The vertical size of the produced plot in inches",
dest = "ySize")
parser.add_argument("--dpi",
type=int,
metavar = "VALUE",
help = "DPI of the produced plot",
dest = "dpi",
default = None)
args = parser.parse_args()
configuration = PlottingConfiguration(args)
data = parseJSON(configuration)
plot(data, configuration)
if __name__ == "__main__":
main()
|
py | 7df708e70369272e08b462b4dfac771bc9bb6c16 | import stravalib
from stravalib.client import Client
from configparser import SafeConfigParser
import json
import os
config = SafeConfigParser()
config.read('config.yaml')
client = Client()
client.access_token = config.get('strava', 'Bearer')
def update_strava_archive():
''' update local archive
-- strava has api limits so better to grab once and store
'''
try:
with open('outputfile','r') as fin:
running_data = json.load(fin)
except FileNotFoundError:
running_data = {}
activities = client.get_activities(after = "2012-01-01T00:00:00Z")
activity_data = []
for activity in activities:
if(f'{activity.id}' not in running_data): # saved as string
print(f'{activity.id} ---')
activity_stream = client.get_activity_streams(activity.id, types=['latlng','distance'])
running_data[activity.id] = activity_stream['latlng'].data
with open('outputfile','w') as fout:
json.dump(running_data, fout, indent=0)
if __name__ == "__name__":
update_strava_archive()
|
py | 7df709c4ef289ccf2a7296cf0322aac9509f93f7 | '''
scans
=====
The following methods allow for interaction into the Tenable.io
:devportal:`scans <scans>` API endpoints.
Methods available on ``tio.scans``:
.. rst-class:: hide-signature
.. autoclass:: ScansAPI
.. automethod:: attachment
.. automethod:: configure
.. automethod:: copy
.. automethod:: create
.. automethod:: delete
.. automethod:: delete_history
.. automethod:: details
.. automethod:: export
.. automethod:: history
.. automethod:: host_details
.. automethod:: import_scan
.. automethod:: info
.. automethod:: launch
.. automethod:: list
.. automethod:: pause
.. automethod:: plugin_output
.. automethod:: results
.. automethod:: resume
.. automethod:: schedule
.. automethod:: set_read_status
.. automethod:: status
.. automethod:: stop
.. automethod:: timezones
'''
from .base import TIOEndpoint, TIOIterator
from tenable.utils import dict_merge, policy_settings
from tenable.errors import UnexpectedValueError, FileDownloadError
from datetime import datetime, timedelta
from io import BytesIO
import time
class ScanHistoryIterator(TIOIterator):
'''
The agents iterator provides a scalable way to work through scan history
result sets of any size. The iterator will walk through each page of data,
returning one record at a time. If it reaches the end of a page of
records, then it will request the next page of information and then continue
to return records from the next page (and the next, and the next) until the
counter reaches the total number of records that the API has reported.
Attributes:
count (int): The current number of records that have been returned
page (list):
The current page of data being walked through. pages will be
cycled through as the iterator requests more information from the
API.
page_count (int): The number of record returned from the current page.
total (int):
The total number of records that exist for the current request.
'''
pass
class ScansAPI(TIOEndpoint):
def _block_while_running(self, scan_id, sleeper=5):
'''
A simple function to block while the scan_id specified is still in a
running state.
'''
running = True
while running:
status = self.results(scan_id)['info']['status']
if status[-2:].lower() == 'ed':
running = False
if running:
time.sleep(sleeper)
def _create_scan_document(self, kw):
'''
Takes the key-worded arguments and will provide a scan settings document
based on the values inputted.
Args:
kw (dict): The keyword dict passed from the user
Returns:
:obj:`dict`:
The resulting scan document based on the kw provided.
'''
scan = {
'settings': dict(),
}
# If a template is specified, then we will pull the listing of available
# templates and set the policy UUID to match the template name given.
if 'template' in kw:
templates = self._api.policies.templates()
scan['uuid'] = templates[self._check(
'template', kw['template'], str,
default='basic',
choices=list(templates.keys())
)]
del(kw['template'])
# If a policy UUID is sent, then we will set the scan template UUID to
# be the UUID that was specified.
if 'policy' in kw:
policies = self._api.policies.list()
match = False
# Here we are going to iterate over each policy in the list, looking
# to see if we see a match in either the name or the id. If we do
# find a match, then we will use the first one that matches, pull
# the editor config, and then use the policy id and scan policy
# template uuid.
for item in policies:
if kw['policy'] in [item['name'], item['id']] and not match:
policy_tmpl = self._api.editor.details('scan/policy', item['id'])
scan['uuid'] = policy_tmpl['uuid']
scan['settings']['policy_id'] = item['id']
match = True
# if no match was discovered, then raise an invalid warning.
if not match:
raise UnexpectedValueError('policy setting is invalid.')
del(kw['policy'])
# if the scanner attribute was set, then we will attempt to figure out
# what scanner to use.
if 'scanner' in kw:
scanners = self._api.scanners.allowed_scanners()
# We will want to attempt to enumerate the scanner list and if
# we see a name match, replace the scanner name with the UUID
# of the scanner instead.
for item in scanners:
if item['name'] == kw['scanner']:
kw['scanner'] = item['id']
# we will always want to attempt to use the UUID first as it's
# the cheapest check that we can run.
scan['settings']['scanner_id'] = self._check(
'scanner', kw['scanner'], 'scanner-uuid',
choices=[s['id'] for s in scanners])
del(kw['scanner'])
# If the targets parameter is specified, then we will need to convert
# the list of targets to a comma-delimited string and then set the
# text_targets parameter with the result.
if 'targets' in kw:
scan['settings']['text_targets'] = ','.join(self._check(
'targets', kw['targets'], list))
del(kw['targets'])
# For credentials, we will simply push the dictionary as-is into the
# the credentials.add sub-document.
if 'credentials' in kw:
scan['credentials'] = {'add': dict()}
scan['credentials']['add'] = self._check(
'credentials', kw['credentials'], dict)
del(kw['credentials'])
# Just like with credentials, we will push the dictionary as-is into the
# correct sub-document of the scan definition.
if 'compliance' in kw:
scan['audits'] = self._check('compliance', kw['compliance'], dict)
del(kw['compliance'])
if 'plugins' in kw:
scan['plugins'] = self._check('plugins', kw['plugins'], dict)
del(kw['plugins'])
# any other remaining keyword arguments will be passed into the settings
# sub-document. The bulk of the data should go here...
scan['settings'] = dict_merge(scan['settings'], kw)
return scan
def attachment(self, scan_id, attachment_id, key, fobj=None):
'''
Retrieve an attachment associated to a scan.
:devportal:`scans: attachments <scans-attachments>`
Args:
scan_id (int): The unique identifier for the scan.
attachment_id (int): The unique identifier for the attachement
key (str): The attachement access token.
fobj (FileObject, optional): a file-like object you wish for the
attachement to be written to. If none is specified, a BytesIO
object will be returned with the contents of the attachment.
Returns:
:obj:`FileObject`:
A file-like object with the attachement written into it.
Examples:
>>> with open('example.file', 'wb') as fobj:
... tio.scans.attachement(1, 1, 'abc', fobj)
'''
if not fobj:
# if no file-like object is specified, then assign a BytesIO object
# to the variable.
fobj = BytesIO()
# Make the HTTP call and stream the data into the file object.
resp = self._api.get('scans/{}/attachments/{}'.format(
scan_id,
attachment_id
), params={'key': self._check('key', key, str)}, stream=True)
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
fobj.write(chunk)
fobj.seek(0)
resp.close()
# Return the file object to the caller.
return fobj
def configure(self, id, **kw):
'''
Overwrite the parameters specified on top of the existing scan record.
:devportal:`scans: configure <scans-configure>`
Args:
id (int): The unique identifier for the scan.
template (str, optional):
The scan policy template to use. If no template is specified
then the default of `basic` will be used.
policy (int, optional):
The id or title of the scan policy to use (if not using one of
the pre-defined templates). Specifying a a policy id will
override the the template parameter.
targets (list, optional):
If defined, then a list of targets can be specified and will
be formatted to an appropriate text_target attribute.
credentials (dict, optional):
A list of credentials to use.
compliance (dict, optional):
A list of compliance audiots to use.
scanner (str, optional):
Define the scanner or scanner group uuid or name.
**kw (dict, optional):
The various parameters that can be passed to the scan creation
API. Examples would be `name`, `email`, `scanner_id`, etc. For
more detailed information, please refer to the API documentation
linked above. Further, any keyword arguments passed that are
not explicitly documented will be automatically appended to the
settings document. There is no need to pass settings directly.
Returns:
:obj:`dict`:
The scan resource record.
Examples:
>>> tio.scans.configure(1, name='New Scan Name')
'''
# We will get the current scan record, generate the new parameters in
# the correct format, and then merge them together to create the new
# :func:`~.tenable.tenable_io.ScansAPI.details` method, however is not
# scan record that we will be pushing to the API.
current = self.details(id)
updated = self._create_scan_document(kw)
scan = dict_merge(current, updated)
# Performing the actual call to the API with the updated scan record.
return self._api.put('scans/{}'.format(id),
json=scan).json()
def copy(self, scan_id, folder_id=None, name=None):
'''
Duplicates a scan and returns the details of the copy.
:devportal:`scans: copy <scans-copy>`
Args:
scan_id (int): The unique identifier for the scan.
folder_id (int, optional): The unique identifier for the folder.
name (str, optional): The name for the copied scan.
Returns:
:obj:`dict`:
The scan resource record for the copied scan.
Examples:
>>> new_scan = tio.scans.copy(1, 'New Scan Name')
'''
# Construct the request payload.
payload = dict()
if folder_id:
payload['folder_id'] = self._check('folder_id', folder_id, int)
if name:
payload['name'] = self._check('name', name, str)
# make the call and return the resulting JSON document to the caller.
return self._api.post('scans/{}/copy'.format(scan_id),
json=payload).json()
def create(self, **kw):
'''
Create a new scan.
:devportal:`scans: create <scans-create>`
Args:
name (str): The name of the scan to create.
template (str, optional):
The scan policy template to use. If no template is specified
then the default of `basic` will be used.
policy (int, optional):
The id or title of the scan policy to use (if not using one of
the pre-defined templates). Specifying a a policy id will
override the the template parameter.
targets (list, optional):
If defined, then a list of targets can be specified and will
be formatted to an appropriate text_target attribute.
credentials (dict, optional):
A list of credentials to use.
compliance (dict, optional):
A list of compliance audits to use.
scanner (str, optional):
Define the scanner or scanner group uuid or name.
**kw (dict, optional):
The various parameters that can be passed to the scan creation
API. Examples would be `name`, `email`, `scanner_id`, etc. For
more detailed information, please refer to the API documentation
linked above. Further, any keyword arguments passed that are
not explicitly documented will be automatically appended to the
settings document. There is no need to pass settings directly.
Returns:
:obj:`dict`:
The scan resource record of the newly created scan.
Examples:
Create an un-credentialed basic scan:
>>> scan = tio.scans.create(
... name='Example Scan',
... targets=['127.0.0.1'])
Creating a scan with a set of managed credentials:
>>> scan = tio.scans.create(
... name='Example Managed Cred Scan',
... targets=['127.0.0.1'],
... credentials={'Host': {'SSH': [{'id': 'CREDENTIAL-UUID'}]}}
Creating a scan with a set of embedded credentials:
>>> scan = tio.scans.create(
... name='Example Embedded Cred Scan',
... targets=['127.0.0.1'],
... credentials={'Host': {'Windows': [{
... 'domain': '',
... 'username': 'Administrator',
... 'password': 'sekretsquirrel',
... 'auth_method': 'Password'
... }]}}
... )
For further information on credentials, what settings to use, etc,
refer to
`this doc <https://developer.tenable.com/docs/determine-settings-for-credential-type>`_
on the developer portal.
'''
if 'template' not in kw:
kw['template'] = 'basic'
scan = self._create_scan_document(kw)
# Run the API call and return the result to the caller.
return self._api.post('scans', json=scan).json()['scan']
def delete(self, scan_id):
'''
Remove a scan.
:devportal:`scans: delete <scans-delete>`
Args:
scan_id (int or uuid): The unique identifier for the scan.
Returns:
:obj:`None`:
The scan was successfully deleted.
Examples:
>>> tio.scans.delete(1)
'''
self._api.delete('scans/{}'.format(scan_id))
def history(self, id, limit=None, offset=None, pages=None, sort=None):
'''
Get the scan history of a given scan from Tenable.io.
:devportal:`scans: history <scans-history>`
Args:
id (int or uuid):
The unique identifier for the scan.
limit (int, optional):
The number of records to retrieve. Default is 50
offset (int, optional):
The starting record to retrieve. Default is 0.
sort (tuple, optional):
A tuple of tuples identifying the the field and sort order of
the field.
Returns:
:obj:`ScanHistoryIterator`:
An iterator that handles the page management of the requested
records.
Examples:
>>> for history in tio.scans.history(1):
... pprint(history)
'''
query = dict()
if sort and self._check('sort', sort, tuple):
query['sort'] = ','.join(['{}:{}'.format(
self._check('sort_field', i[0], str),
self._check('sort_direction', i[1], str, choices=['asc', 'desc'])
) for i in sort])
return ScanHistoryIterator(self._api,
_limit=limit if limit else 50,
_offset=offset if offset else 0,
_pages_total=pages,
_query=query,
_path='scans/{}/history'.format(id),
_resource='history'
)
def delete_history(self, scan_id, history_id):
'''
Remove an instance of a scan from a scan history.
:devportal:`scans: delete-history <scans-delete-history>`
Args:
scan_id (int or uuid): The unique identifier for the scan.
history_id (int or uuid): The unique identifier for the instance of the scan.
Returns:
:obj:`None`:
Scan history successfully deleted.
Examples:
>>> tio.scans.delete_history(1, 1)
'''
self._api.delete('scans/{}/history/{}'.format(
scan_id,
history_id))
def details(self, scan_id):
'''
Calls the editor API and parses the scan config details to return a
document that closely matches what the API expects to be POSTed or PUTed
via the create and configure methods. The compliance audits and
credentials are populated into the 'current' sub-document for the
relevant resources.
.. important::
Please note that the details method is reverse-engineered from the
responses from the editor API, and while we are reasonably sure that
the response should align almost exactly to what the API expects to
be pushed to it, this method by very nature of what it's doing isn't
guaranteed to always work.
.. note::
If you're looking for the results of the most recent scan, and what
matches to the ``GET /scans/{id}`` call, then take a look at the
results method.
Args:
scan_id (int or uuid): The unique identifier for the scan.
Returns:
:obj:`dict`:
The scan configuration resource.
Examples:
>>> scan = tio.scans.details(1)
>>> pprint(scan)
'''
return self._api.editor.details('scan', scan_id)
def results(self, scan_id, history_id=None, history_uuid=None):
'''
Return the scan results from either the latest scan or a specific scan
instance in the history.
:devportal:`scans: details <scans-details>`
Args:
scan_id (int or uuid): The unique identifier for the scan.
history_id (int, optional):
The unique identifier for the instance of the scan.
history_uuid (uuid, optional):
The UUID for the instance of the scan.
Returns:
:obj:`dict`:
The scan result dictionary.
Examples:
Retrieve the latest results:
>>> results = tio.scans.results(1)
Retrieve a specific instance of the result set:
>>> results = tio.scans.results(1, 1)
'''
params = dict()
if history_id:
params['history_id'] = self._check('history_id', history_id, int)
if history_uuid:
params['history_uuid'] = self._check(
'history_uuid', history_uuid, 'scanner-uuid')
return self._api.get('scans/{}'.format(
scan_id), params=params).json()
def export(self, scan_id, *filters, **kw):
'''
Export the scan report.
:devportal:`scans: export <scans-export-request>`
Args:
scan_id (int or uuid): The unique identifier of the scan.
*filters (tuple, optional):
A list of tuples detailing the filters that wish to be applied
the response data. Each tuple is constructed as
('filter', 'operator', 'value') and would look like the
following example: `('plugin.id', 'eq', '19506')`. For a
complete list of the available filters and options, please
refer to the API documentation linked above.
history_id (int, optional):
The unique identifier for the instance of the scan.
history_uuid (uuid, optional):
The UUID for the instance of the scan.
format (str, optional):
What format would you like the resulting data to be in. The
default would be nessus output. Available options are `nessus`,
`csv`, `html`, `pdf`, `db`. Default is `nessus`.
password (str, optional):
If the export format is `db`, then what is the password used to
encrypt the NessusDB file. This is a require parameter for
NessusDB exports.
chapters (list, optional):
A list of the chapters to write for the report. The chapters
list is only required for PDF and HTML exports. Available
chapters are `vuln_hosts_summary`, `vuln_by_host`,
`compliance_exec`, `remediations`, `vuln_by_plugin`, and
`compliance`. List order will denote output order. Default is
`vuln_by_host`.
filter_type (str, optional):
Are the filters exclusive (this AND this AND this) or inclusive
(this OR this OR this). Valid values are `and` and `or`. The
default setting is `and`.
scan_type (str, optional):
This parameter is required only when using the API with
Web Application Scanning. Available option is 'web-app'.
fobj (FileObject, optional):
The file-like object to be returned with the exported data. If
no object is specified, a BytesIO object is returned with the
data. While this is an optional parameter, it is highly
recommended to use this parameter as exported files can be quite
large, and BytesIO objects are stored in memory, not on disk.
Returns:
:obj:`FileObject`:
The file-like object of the requested export.
Examples:
Export the full report of the latest instance of the scan:
>>> with open('example.nessus', 'wb') as reportobj:
... tio.scans.export(1, fobj=reportobj)
Export a specific instance of the scan:
>>> with open('example.nessus', 'wb') as reportobj:
... tio.scans.export(1, history_id=1, fobj=reportobj)
'''
# initiate the payload and parameters dictionaries. We are also
# checking to see if the filters were passed as a keyword argument
# instead of as an argument list. As this seems to be a common
# issue, we should be supporting this methodology.
filters = self._check('filters',
kw.get('filters', filters), (list, tuple))
payload = self._parse_filters(filters,
self._api.filters.scan_filters(), rtype='sjson')
params = dict()
dl_params = dict()
if 'history_id' in kw:
params['history_id'] = self._check(
'history_id', kw['history_id'], int)
if kw.get('history_uuid'):
params['history_uuid'] = self._check(
'history_uuid', kw['history_uuid'], 'scanner-uuid')
# Enable exporting of Web Application scans.
if 'scan_type' in kw:
dl_params['type'] = params['type'] = self._check(
'type', kw['scan_type'], str, choices=['web-app'])
if 'password' in kw:
payload['password'] = self._check('password', kw['password'], str)
payload['format'] = self._check('format',
kw['format'] if 'format' in kw else None,
str, choices=['nessus', 'html', 'pdf', 'csv', 'db'],
default='nessus')
# The chapters are sent to us in a list, and we need to collapse that
# down to a comma-delimited string.
payload['chapters'] = ';'.join(
self._check('chapters',
kw['chapters'] if 'chapters' in kw else None,
list,
choices=['vuln_hosts_summary', 'vuln_by_host', 'vuln_by_plugin',
'compliance_exec', 'compliance', 'remediations'],
default=['vuln_by_host']))
if 'filter_type' in kw:
payload['filter.search_type'] = self._check(
'filter_type', kw['filter_type'], str, choices=['and', 'or'])
# Now we need to set the FileObject. If one was passed to us, then lets
# just use that, otherwise we will need to instantiate a BytesIO object
# to push the data into.
if 'fobj' in kw:
fobj = kw['fobj']
else:
fobj = BytesIO()
# The first thing that we need to do is make the request and get the
# File id for the job.
fid = self._api.post('scans/{}/export'.format(scan_id),
params=params, json=payload).json()['file']
self._api._log.debug('Initiated scan export {}'.format(fid))
# Next we will wait for the status of the export request to become
# ready.
status = self._wait_for_download(
'scans/{}/export/{}/status'.format(scan_id, fid),
'scans', scan_id, fid, params=dl_params)
# Now that the status has reported back as "ready", we can actually
# download the file.
resp = self._api.get('scans/{}/export/{}/download'.format(
scan_id, fid), params=dl_params, stream=True)
# Lets stream the file into the file-like object...
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
fobj.write(chunk)
fobj.seek(0)
resp.close()
# Lastly lets return the FileObject to the caller.
return fobj
def host_details(self, scan_id, host_id, history_id=None, history_uuid=None):
'''
Retrieve the host details from a specific scan.
:devportal:`scans: host-details <scans-host-details>`
Args:
scan_id (int): The unique identifier for the scan.
host_id (int): The unique identifier for the host within the scan.
history_id (int, optional):
The unique identifier for the instance of the scan.
history_uuid (str, optional):
The unique identifier for the scan instance.
Returns:
:obj:`dict`:
The information related to the host requested.
Examples:
>>> host = tio.scans.host_details(1, 1)
'''
params = dict()
if history_id:
params['history_id'] = self._check('history_id', history_id, int)
if history_uuid:
params['history_uuid'] = self._check(
'history_uuid', history_uuid, 'scanner-uuid')
return self._api.get('scans/{}/hosts/{}'.format(
scan_id,
self._check('host_id', host_id, int)),
params=params).json()
def import_scan(self, fobj, folder_id=None, password=None, aggregate=None):
'''
Import a scan report into Tenable.io.
:devportal:`scans: import <scans-import>`
Args:
fobj (FileObject): The File-like object of the scan to import.
folder_id (int, optional):
The unique identifier for the folder to place the scan into.
password (str, optional):
The password needed to decrypt the file. This is only necessary
for NessusDB files uploaded.
aggregate (bool, optional):
should the Nessus report be aggregated into the aggregate
results? The default is True.
Returns:
:obj:`dict`:
The scan resource record for the imported scan.
Examples:
Import a .nessusv2 report:
>>> with open('example.nessus', 'rb') as reportobj:
... tio.scans.import(reportobj)
Import a NessusDB report.
>>> with open('example.db', 'rb') as reportobj:
... tio.scans.import(reportobj, password='sekret')
'''
# First lets verify that the folder_id and password are typed correctly
# before initiating any uploads.
payload = dict()
if folder_id:
payload['folder_id'] = self._check('folder_id', folder_id, int)
if password:
payload['password'] = self._check('password', password, str)
if aggregate == None:
aggregate = True
# Upload the file to the Tenable.io and store the resulting filename in
# the payload.
payload['file'] = self._api.files.upload(fobj)
# make the call to Tenable.io to import and then return the result to
# the caller.
return self._api.post('scans/import', json=payload, params={
'include_aggregate': int(aggregate)}).json()
def launch(self, scan_id, targets=None):
'''
Launches a scan.
:devportal:`scans: launch <scans-launch>`
Args:
scan_id (int or uuid): The unique identifier for the scan.
targets (list, optional):
A list of targets to be scanned instead of the default targets
in the scan.
Response:
:obj:`str`:
The uuid of the scan instance (history).
Examples:
Launch the scan with the configured targets:
>>> tio.scans.launch(1)
Launch the scan with some custom targets:
>>> tio.scans.launch(1, targets=['127.0.0.1'])
'''
payload = dict()
if targets:
payload['alt_targets'] = self._check('targets', targets, list)
return self._api.post('scans/{}/launch'.format(
scan_id),
json=payload).json()['scan_uuid']
def list(self, folder_id=None, last_modified=None):
'''
Retrieve the list of configured scans.
:devportal:`scans: list <scans-list>`
Args:
folder_id (int, optional): Only return scans within this folder.
last_modified (datetime, optional):
Only return scans that have been modified since the time
specified.
Returns:
:obj:`list`:
A list containing the list of scan resource records.
Examples:
>>> for scan in tio.scans.list():
... pprint(scan)
'''
params = dict()
if folder_id:
params['folder_id'] = self._check('folder_id', folder_id, int)
if last_modified:
# for the last_modified datetime attribute, we will want to convert
# that into a timestamp integer before passing it to the API.
params['last_modification_date'] = int(time.mktime(self._check(
'last_modified', last_modified, datetime).timetuple()))
return self._api.get('scans', params=params).json()['scans']
def pause(self, scan_id, block=False):
'''
Pauses a running scan.
:devportal:`scans: pause <scans-pause>`
Args:
scan_id (int or uuid): The unique identifier fo the scan to pause.
block (bool, optional):
Block until the scan is actually paused. Default is False.
Returns:
:obj:`None`:
The scan was successfully requested to be paused.
Examples:
>>> tio.scans.pause(1)
'''
self._api.post('scans/{}/pause'.format(scan_id), json={})
if block:
self._block_while_running(scan_id)
def plugin_output(self, scan_id, host_id, plugin_id, history_id=None, history_uuid=None):
'''
Retrieve the plugin output for a specific instance of a vulnerability
on a host.
:devportal:`scans: plugin-output <scans-plugin-output>`
Args:
scan_id (int or uuid): The unique identifier of the scan.
host_id (int): The unique identifier of the scanned host.
plugin_id (int): The plugin id.
history_id (int, optional):
The unique identifier of the scan instance.
Returns:
:obj:`dict`:
The plugin resource record for that plugin on that host.
Examples:
>>> output = tio.scans.plugin_output(1, 1, 1)
>>> pprint(output)
'''
params = dict()
if history_id:
params['history_id'] = self._check('history_id', history_id, int)
if history_uuid:
params['history_uuid'] = self._check('history_uuid', history_uuid, 'uuid')
return self._api.get('scans/{}/hosts/{}/plugins/{}'.format(
scan_id,
self._check('host_id', host_id, int),
self._check('plugin_id', plugin_id, int)), params=params).json()
def set_read_status(self, scan_id, read_status):
'''
Sets the read status of the scan. This is generally used to toggle the
unread status of the scan within the UI.
:devportal:`scans: read-status <scans-read-status>`
Args:
scan_id (int or uuid): The unique identifier for the scan.
read_status (bool):
Is the scan in a read or unread state? True would denote read,
whereas False is unread.
Returns:
:obj:`None`:
The status of the scan was updated.
Examples:
Set a scan to unread:
>>> tio.scans.set_read_status(1, False)
'''
self._api.put('scans/{}/status'.format(scan_id), json={
'read': self._check('read_status', read_status, bool)
})
def resume(self, scan_id):
'''
Resume a paused scan.
:devportal:`scans: resume <scans-resume>`
Args:
scan_id (int or uuid): The unique identifier for the scan.
Returns:
:obj:`None`:
The scan was successfully requested to resume.
Examples:
>>> tio.scans.resume(1)
'''
self._api.post('scans/{}/resume'.format(scan_id))
def schedule(self, scan_id, enabled):
'''
Enables or disables the scan schedule.
:devportal:`scans: schedule <scans-schedule>`
Args:
scan_id (int): The unique identifier for the scan.
enabled (bool): Enables or Disables the scan scheduling.
Returns:
:obj:`dict`:
The schedule resource record for the scan.
Examples:
Enable a scan schedule:
>>> tio.scans.schedule(1, True)
'''
return self._api.put('scans/{}/schedule'.format(scan_id), json={
'enabled': self._check('enabled', enabled, bool)}).json()
def stop(self, scan_id, block=False):
'''
Stop a running scan.
:devportal:`scans: stop <scans-stop>`
Args:
scan_id (int): The unique identifier for the scan.
block (bool, optional):
Block until the scan is actually stopped. Default is False.
Returns:
:obj:`None`:
The scan was successfully requested to stop.
Examples:
Stop the scan asynchronously:
>>> tio.scans.stop(1)
Stop the scan and wait for the scan to stop:
>>> tio.scans.stop(1, True)
'''
self._api.post('scans/{}/stop'.format(scan_id))
if block:
self._block_while_running(scan_id)
def status(self, scan_id):
'''
Get the status of the latest instance of the scan.
:devportal:`scans: get-latest-status <scans-get-latest-status>`
Args:
scan_id (int or uuid): The unique identifier for the scan.
Returns:
:obj:`str`:
The current status of the last instance.
Examples:
>>> tio.scans.status(1)
u'completed'
'''
return self._api.get('scans/{}/latest-status'.format(scan_id)).json()['status']
def timezones(self):
'''
Retrieves the list of timezones.
:devportal:`scans: timezones <scans-timezones>`
Returns:
:obj:`list`:
List of allowed timezone strings accepted by Tenable.IO
Examples:
>>> for item in tio.scans.timezones():
... pprint(item)
'''
resp = self._api.get('scans/timezones').json()['timezones']
return [i['value'] for i in resp]
def info(self, scan_id, history_uuid):
'''
Retrieves information about the status of the specified instance
of the scan.
:devportal:`scan: get-scan-history <scans-history-by-scan-id>`
Args:
scan_id (int or uuid): The unique identifier for the scan.
history_uuid (str): The unique identifier for the scan instance.
Returns:
:obj:`dict`:
The metadata about the scan instance specified.
Examples:
>>> info = tio.scans.info(1, 'BA0ED610-C27B-4096-A8F4-3189279AFFE7')
'''
return self._api.get('scans/{}/history/{}'.format(
scan_id,
self._check('history_uuid', history_uuid, 'scanner-uuid'))).json()
|
py | 7df709d5b7628f6c2d09517262be3b54c199d477 | """Simultaneous unit, coverage, and timing testing for python 3 modules."""
# standard library
import argparse
import ast
import importlib
import inspect
import json
import math
import os
import re
import sys
import time
import unittest
class CodeTracer(ast.NodeTransformer):
"""Traces, compiles, and executes an abstract syntax tree."""
__INJECT_NAME = '__code_tracer__'
@staticmethod
def from_source_file(filename):
"""Create a CodeTracer for the given source file."""
# read the file, parse the AST, and return a tracer
with open(filename) as f:
src = f.read()
tree = ast.parse(src)
return CodeTracer(tree, filename)
def __init__(self, tree, filename):
# a list of all statements in the injected module
self.nodes = []
self.original_tree = tree
self.filename = filename
def run(self):
"""Trace, compile, and execute the AST, and return global variables."""
# inject code tracing calls into the AST
tree = self.visit(self.original_tree)
ast.fix_missing_locations(tree)
# execute the new AST, and keep track of global variables it creates
global_vars = {CodeTracer.__INJECT_NAME: self}
exec(compile(tree, self.filename, 'exec'), global_vars)
# return the global variables
return global_vars
def get_coverage(self):
"""
Return code coverage as a list of execution counts and other metadata for
each statement.
Each item in the list is a dict containing the following keys:
- executions: the number of times the statement was executed
- line: the line number of the statement (1-indexed)
- column: the column number of the statement (0-indexed)
- is_string: a boolean indicating whether the statement was a string
The list is sorted by line number.
"""
# function to determine whether a given node is a string (e.g. a docstring)
def is_string(node):
return isinstance(node, ast.Expr) and isinstance(node.value, ast.Str)
# iterate over all nodes
coverage = []
for node_info in self.nodes:
node = node_info['node']
# coverage result for the current node
coverage.append({
'executions': node_info['counter'],
'time': node_info['time'],
'line': node.lineno,
'column': node.col_offset,
'is_string': is_string(node),
})
# return sorted coverage results
return sorted(coverage, key=lambda row: row['line'])
def execute_node1(self, node_id):
"""Increment the execution counter, and start timing the given node."""
self.nodes[node_id]['counter'] += 1
self.nodes[node_id]['time'] -= time.time()
def execute_node2(self, node_id):
"""Stop timing the given node."""
self.nodes[node_id]['time'] += time.time()
def generic_visit(self, node):
"""
Visit an AST node and add tracing if it's a statement.
This method shouldn't be called directly. It is called by the super class
when the `run` method of this class is called.
"""
# let the super class visit this node first
super().generic_visit(node)
# only trace statements
if not isinstance(node, ast.stmt):
return node
# a unique identifier and initial data for this node
node_id = len(self.nodes)
self.nodes.append({
'node': node,
'counter': 0,
'time': 0,
})
# tracing is done by calling "execute_node" of this class
func1 = ast.Attribute(
value=ast.Name(id=CodeTracer.__INJECT_NAME, ctx=ast.Load()),
attr='execute_node1',
ctx=ast.Load()
)
func2 = ast.Attribute(
value=ast.Name(id=CodeTracer.__INJECT_NAME, ctx=ast.Load()),
attr='execute_node2',
ctx=ast.Load()
)
# the argument to the tracing function is the unique node identifier
args = [ast.Num(n=node_id)]
# the tracer will be executed whenever the statement is executed
tracer1 = ast.Expr(value=ast.Call(func=func1, args=args, keywords=[]))
tracer2 = ast.Expr(value=ast.Call(func=func2, args=args, keywords=[]))
# spoof location information for the generated node
ast.copy_location(tracer1, node)
ast.copy_location(tracer2, node)
# inject tracers in a try-finally construct around this node
wrapper = ast.Try(body=[node], handlers=[], orelse=[], finalbody=[tracer2])
return [tracer1, wrapper]
class TestResult(unittest.TextTestResult):
"""An implementation of python's unittest.TestResult class."""
ERROR = -2
FAIL = -1
SKIP = 0
PASS = 1
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# keep a list of passed tests
self.successes = []
# record the result of all tests
self.results = {}
def __set_result(self, test, skip, error, fail, tb):
"""
Set the result of the test.
The result is one of these integers: PASS, SKIP, FAIL, or ERROR.
"""
# derive a friendly name
match = re.match('^(\\S+)\\s+\\(\\S+?\\.(\\S+)\\)$', str(test))
if match is None:
raise Exception('unrocognized test name: "%s"' % test)
name = '%s.%s' % (match.group(2), match.group(1))
# set (or update) the result
if skip:
self.results[name] = TestResult.SKIP
elif error:
self.results[name] = TestResult.ERROR
elif fail:
self.results[name] = TestResult.FAIL
else:
# don't overwrite an earlier result (e.g. of a failed subtest)
if self.results.get(name, None) is None:
self.results[name] = TestResult.PASS
def addError(self, test, err):
super().addError(test, err)
self.__set_result(test, False, True, False, err[-1])
def addFailure(self, test, err):
super().addFailure(test, err)
self.__set_result(test, False, False, True, err[-1])
def addSuccess(self, test):
super().addSuccess(test)
self.successes.append(test)
self.__set_result(test, False, False, False, None)
def addSkip(self, test, reason):
super().addSkip(test, reason)
self.__set_result(test, True, False, False, None)
def addExpectedFailure(self, test, err):
super().addExpectedFailure(test, err)
self.__set_result(test, False, False, False, err[-1])
def addUnexpectedSuccess(self, test):
super().addUnexpectedSuccess(test)
self.__set_result(test, False, False, True, None)
def addSubTest(self, test, subtest, outcome):
super().addSubTest(test, subtest, outcome)
# a failed or errored subtest fails or errors the whole test
fail = outcome is not None
tb = outcome[-1] if fail else None
self.__set_result(test, False, False, fail, tb)
class Styler:
"""Helper class for producing stylized terminal output."""
green, gray, red = 32, 37, 31
def __init__(self, json_only=False, use_colors=False, show_source=False):
self.__json_only = json_only
self.__use_colors = use_colors
self.__show_source = show_source
def colorize(self, txt, color):
"""Color the given string."""
if self.__use_colors:
return '\x1b[0;%d;40m%s\x1b[0m' % (color, txt)
else:
return txt
def emit(self, txt, is_source=False):
"""Print the given string, conditional on export settings."""
if not self.__json_only and (not is_source or self.__show_source):
print(txt)
def run_tests(filename, output=sys.stdout):
"""Run all tests in the given file and return unit and coverage resuls."""
# get the module name from the filename
path, ext = filename[:-3], filename[-3:]
if ext != '.py':
raise Exception('not a *.py file: ' + str(filename))
module_name = path.replace(os.path.sep, '.')
# needed when the file is in a subdirectory
sys.path.append(os.getcwd())
# import the module and determine the test target
module = importlib.import_module(module_name)
target_module = getattr(module, '__test_target__', None)
if target_module is None:
message = (
'Warning: '
'%s missing attribute __test_target__. '
'Coverage will not be tracked.'
)
print(message % module_name, file=output)
target_file = None
else:
target_file = target_module.replace('.', os.path.sep) + '.py'
if target_file:
# trace execution while loading the target file
tracer = CodeTracer.from_source_file(target_file)
global_vars = tracer.run()
# make the target's globals available to the test module
for key in global_vars:
if key[:2] != '__':
setattr(module, key, global_vars[key])
# load and run unit tests
tests = unittest.defaultTestLoader.loadTestsFromModule(module)
runner = unittest.TextTestRunner(
stream=output,
verbosity=2,
resultclass=TestResult
)
unit_info = runner.run(tests)
if target_file:
coverage_results = tracer.get_coverage()
else:
coverage_results = None
# return unit and coverage results
return {
'unit': unit_info.results,
'coverage': coverage_results,
'target_module': target_module,
'target_file': target_file,
}
def find_tests(location, regex, terminal):
"""Find files containing unit tests."""
if not os.path.exists(location):
return []
elif os.path.isdir(location):
pattern = re.compile(regex)
file_set = set()
for dir_, dirs, files in os.walk(location):
for f in files:
if pattern.match(f):
file_set.add(os.path.join(dir_, f))
if terminal:
break
return sorted(file_set)
else:
return [location]
return tests_files
def analyze_results(results, styler=None):
"""
Extract a useful set of information from the results of a single unit test.
"""
if styler is None:
styler = Styler(json_only=True)
# unit results
export = {
'target_file': results['target_file'],
'target_module': results['target_module'],
'unit': {
'tests': {},
'summary': {},
},
'coverage': {
'lines': [],
'hit_counts': {},
'summary': {},
},
}
styler.emit('=' * 70)
styler.emit('Test results for:')
styler.emit(' %s (%s)' % (results['target_module'], results['target_file']))
styler.emit('Unit:')
test_bins = {-2: 0, -1: 0, 0: 0, 1: 0}
for name in sorted(results['unit'].keys()):
result = results['unit'][name]
test_bins[result] += 1
txt, color = {
-2: ('error', Styler.red),
-1: ('fail', Styler.red),
0: ('skip', Styler.gray),
1: ('pass', Styler.green),
}[result]
export['unit']['tests'][name] = txt
styler.emit(' %s: %s' % (name, styler.colorize(txt, color)))
export['unit']['summary'] = {
'total': len(results['unit']),
'error': test_bins[-2],
'fail': test_bins[-1],
'skip': test_bins[0],
'pass': test_bins[1],
}
def fmt(num, goodness):
if goodness > 0 and num > 0:
color = Styler.green
elif goodness < 0 and num > 0:
color = Styler.red
else:
color = Styler.gray
return styler.colorize(str(num), color)
styler.emit(' error: %s' % fmt(test_bins[-2], -1))
styler.emit(' fail: %s' % fmt(test_bins[-1], -1))
styler.emit(' skip: %s' % fmt(test_bins[0], 0))
styler.emit(' pass: %s' % fmt(test_bins[1], 1))
if not results['target_file']:
# coverage was not computed, return test outcomes only
return export
# coverage results
styler.emit('Coverage:')
def print_line(line, txt, hits, time, required):
export['coverage']['lines'].append({
'line': line,
'hits': hits,
'time': time,
'required': required,
})
def format_duration(d):
if d < 1e-3:
# less than a millisecond, hide to reduce noise
return ''
elif d < 10:
# millisecond precision for times up to 10 seconds
return '%.0f ms' % (d * 1e3)
else:
return '%.0f sec' % d
if required:
args = (
'%dx' % hits,
format_duration(time / max(hits, 1)),
format_duration(time),
)
cov = '%-10s %-10s %-10s' % args
else:
cov = ''
if not required:
color = Styler.gray
elif hits > 0:
color = Styler.green
else:
color = Styler.red
txt = styler.colorize('%-80s' % txt, color)
styler.emit(' %4d %s %s' % (line, txt, cov), is_source=True)
if required and time < 0:
raise Exception('time travel detected')
with open(results['target_file']) as f:
src = [(i, line) for (i, line) in enumerate(f.readlines())]
hit_bins = {0: 0}
for row in results['coverage']:
while len(src) > 0 and src[0][0] < row['line'] - 1:
line, hits, time = src[0][0] + 1, 0, 0
txt, src = src[0][1][:-1], src[1:]
print_line(line, txt, hits, time, False)
line, hits, time = row['line'], row['executions'], row['time']
txt, src = src[0][1][:-1], src[1:]
required = not row['is_string']
print_line(line, txt, hits, time, required)
if required:
if hits not in hit_bins:
hit_bins[hits] = 1
else:
hit_bins[hits] += 1
while len(src) > 0:
line, hits, time = src[0][0] + 1, 0, 0
txt, src = src[0][1][:-1], src[1:]
print_line(line, txt, hits, time, False)
for hits in sorted(hit_bins.keys()):
num = hit_bins[hits]
if hits == 0 and num > 0:
color = Styler.red
elif hits > 0 and num > 0:
color = Styler.green
else:
color = Styler.gray
num_str = styler.colorize(str(num), color)
export['coverage']['hit_counts'][hits] = num
styler.emit(' %dx: %s' % (hits, num_str))
total_lines = sum(hit_bins.values())
lines_hit = total_lines - hit_bins[0]
export['coverage']['summary'] = {
'total_lines': total_lines,
'hit_lines': lines_hit,
'missed_lines': (total_lines - lines_hit),
'percent': lines_hit / max(total_lines, 1),
}
styler.emit(' overall: %d%%' % math.floor(100 * lines_hit / total_lines))
# return results
return export
def run_test_sets(location, pattern, terminal, show_json, color, full):
"""
Run all test sets and print results to standard output.
location (str):
the path in which to search for unit tests
pattern (str):
regular expression for matching unit test filenames
terminal (bool):
whether the search should end with the given location (non-recursive)
show_json (bool):
whether to show JSON or human-readable output
color (bool):
whether human-readable output should be colorized
full (bool):
whether human-readable test target source code should be shown
"""
# run unit and coverage tests
styler = Styler(
json_only=show_json,
use_colors=color,
show_source=full
)
test_files = find_tests(location, pattern, terminal)
if not test_files:
raise Exception('no tests found')
tests_by_outcome = {
'pass': [],
'skip': [],
'fail': [],
'error': [],
}
def all_pass_or_skip():
no_fail = not tests_by_outcome['fail']
no_error = not tests_by_outcome['error']
return no_fail and no_error
if show_json:
# suppress other output
all_results = []
with open(os.devnull, 'w') as output:
for filename in test_files:
test_outcomes = run_tests(filename, output)
test_results = analyze_results(test_outcomes, styler)
all_results.append(test_results)
for test_name, test_outcome in test_results['unit']['tests'].items():
tests_by_outcome[test_outcome].append(test_name)
print(json.dumps(all_results))
else:
# use default output
num_tests = 0
total_lines = hit_lines = 0
for filename in test_files:
test_results = analyze_results(run_tests(filename), styler)
if len(test_results['unit']) > 0:
unit_stats = test_results['unit']['summary']
coverage_stats = test_results['coverage']['summary']
for test_name, test_outcome in test_results['unit']['tests'].items():
tests_by_outcome[test_outcome].append(test_name)
num_tests += unit_stats['total']
if coverage_stats:
total_lines += coverage_stats['total_lines']
hit_lines += coverage_stats['hit_lines']
if total_lines == 0:
coverage = 0
else:
coverage = hit_lines / total_lines
percent = math.floor(coverage * 100)
if total_lines:
args = (percent, hit_lines, total_lines)
coverage_str = ' %d%% (%d/%d) coverage.' % args
else:
coverage_str = ' [coverage unavailable]'
all_ok = all_pass_or_skip()
if color:
if all_ok:
icon = '✔ '
else:
icon = '✘ '
else:
icon = ''
if all_ok:
num_pass = len(tests_by_outcome['pass'])
if tests_by_outcome['skip']:
skip_str = ' (%s skipped)' % len(tests_by_outcome['skip'])
else:
skip_str = ''
args = (icon, num_pass, skip_str, coverage_str)
result = '%sAll %d tests passed!%s%s' % args
txt = styler.colorize(result, Styler.green)
else:
result = '%sSome tests did not pass.%s' % (icon, coverage_str)
txt = styler.colorize(result, Styler.red)
def print_status(label, outcome):
if not tests_by_outcome[outcome]:
return
print('%s:' % label)
for name in tests_by_outcome[outcome]:
print(' - %s' % name)
print_status('skipped', 'skip')
print_status('failed', 'fail')
print_status('broken', 'error')
styler.emit(txt)
return all_pass_or_skip()
def get_argument_parser():
"""Set up command line arguments and usage."""
parser = argparse.ArgumentParser()
parser.add_argument(
'location',
type=str,
help='file or directory containing unit tests'
)
parser.add_argument(
'--pattern',
'-p',
default='^(test_.*|.*_test)\\.py$',
type=str,
help='filename regex for test discovery'
)
parser.add_argument(
'--terminal',
'-t',
default=False,
action='store_true',
help='do not search for tests recursively'
)
parser.add_argument(
'--json',
'--j',
default=False,
action='store_true',
help='print results in JSON format'
)
parser.add_argument(
'--color',
'-c',
default=False,
action='store_true',
help='colorize results'
)
parser.add_argument(
'--full',
'--f',
default=False,
action='store_true',
help='show coverage for each line'
)
parser.add_argument(
'--use-exit-code',
default=False,
action='store_true',
help='use exit code to indicate non-passing tests'
)
return parser
def main():
"""Run this script from the command line."""
args = get_argument_parser().parse_args()
all_pass = run_test_sets(
args.location,
args.pattern,
args.terminal,
args.json,
args.color,
args.full)
if args.use_exit_code and not all_pass:
sys.exit(1)
if __name__ == '__main__':
main()
|
py | 7df70a8f41c5f8e8da761666cb99db9010a5c6ef | import time
import dataset
import imagenet
import coco
from dataclasses import dataclass
# pylint: disable=missing-docstring
# the datasets we support
SUPPORTED_DATASETS = {
"imagenet":
(imagenet.Imagenet, dataset.pre_process_vgg, dataset.PostProcessCommon(offset=-1),
{"image_size": [224, 224, 3]}),
"imagenet_mobilenet":
(imagenet.Imagenet, dataset.pre_process_mobilenet, dataset.PostProcessArgMax(offset=-1),
{"image_size": [224, 224, 3]}),
"imagenet_pytorch":
(imagenet.Imagenet, dataset.pre_process_imagenet_pytorch, dataset.PostProcessArgMax(offset=0),
{"image_size": [224, 224, 3]}),
"coco-300":
(coco.Coco, dataset.pre_process_coco_mobilenet, coco.PostProcessCoco(),
{"image_size": [300, 300, 3]}),
"coco-300-pt":
(coco.Coco, dataset.pre_process_coco_pt_mobilenet, coco.PostProcessCocoPt(False,0.3),
{"image_size": [300, 300, 3]}),
"coco-1200":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCoco(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-onnx":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoOnnx(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-pt":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoPt(True,0.05),
{"image_size": [1200, 1200, 3],"use_label_map": True}),
"coco-1200-tf":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoTf(),
{"image_size": [1200, 1200, 3],"use_label_map": False}),
}
# pre-defined command line options so simplify things. They are used as defaults and can be
# overwritten from command line
SUPPORTED_PROFILES = {
"defaults": {
"dataset": "imagenet",
"backend": "tensorflow",
"cache": 0,
"max-batchsize": 32,
},
# resnet
"resnet50-tf": {
"inputs": "input_tensor:0",
"outputs": "ArgMax:0",
"dataset": "imagenet",
"backend": "tensorflow",
"model-name": "resnet50",
"in_dtypes": "float32",
},
"resnet50-onnxruntime": {
"dataset": "imagenet",
"outputs": "ArgMax:0",
"backend": "onnxruntime",
"model-name": "resnet50",
},
# mobilenet
"mobilenet-tf": {
"inputs": "input:0",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"dataset": "imagenet_mobilenet",
"backend": "tensorflow",
"model-name": "mobilenet",
},
"mobilenet-onnxruntime": {
"dataset": "imagenet_mobilenet",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"backend": "onnxruntime",
"model-name": "mobilenet",
},
# ssd-mobilenet
"ssd-mobilenet-tf": {
"inputs": "image_tensor:0",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"dataset": "coco-300",
"backend": "tensorflow",
"model-name": "ssd-mobilenet",
"in_dtypes": "uint8",
},
"ssd-mobilenet-pytorch-native": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-300-pt",
"backend": "pytorch-native",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-pytorch": {
"dataset": "coco-300",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"backend": "pytorch",
"data-format": "NHWC",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-onnxruntime": {
"dataset": "coco-300",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"model-name": "ssd-mobilenet",
},
# ssd-resnet34
"ssd-resnet34-tf": {
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"dataset": "coco-1200-tf",
"backend": "tensorflow",
"data-format": "NCHW",
"model-name": "ssd-resnet34",
"in_dtypes": "float32",
},
"ssd-resnet34-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-1200-pt",
"backend": "pytorch-native",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime": {
"dataset": "coco-1200-onnx",
"inputs": "image",
"outputs": "bboxes,labels,scores",
"backend": "onnxruntime",
"data-format": "NCHW",
"max-batchsize": 1,
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime-tf": {
"dataset": "coco-1200-tf",
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"model-name": "ssd-resnet34",
},
}
def get_backend(backend):
if backend == "tensorflow":
from backend_tf import BackendTensorflow
backend = BackendTensorflow()
elif backend == "onnxruntime":
from backend_onnxruntime import BackendOnnxruntime
backend = BackendOnnxruntime()
elif backend == "null":
from backend_null import BackendNull
backend = BackendNull()
elif backend == "pytorch":
from backend_pytorch import BackendPytorch
backend = BackendPytorch()
elif backend == "pytorch-native":
from backend_pytorch_native import BackendPytorchNative
backend = BackendPytorchNative()
elif backend == "tflite":
from backend_tflite import BackendTflite
backend = BackendTflite()
else:
raise ValueError("unknown backend: " + backend)
return backend
def get_profile_and_model_path(profile_name: str):
if profile_name == "resnet50-tf":
return SUPPORTED_PROFILES[profile_name], "/models/resnet50_v1.pb"
elif profile_name == "ssd-mobilenet-tf":
return SUPPORTED_PROFILES[profile_name], "/models/ssd_mobilenet_v1_coco_2018_01_28.pb"
elif profile_name == "ssd-resnet34-tf":
return SUPPORTED_PROFILES[profile_name], "/models/resnet34_tf.22.1.pb"
elif profile_name == "resnet50-onnxruntime":
return SUPPORTED_PROFILES[profile_name], "/models/resnet50_v1.onnx"
elif profile_name == "ssd-mobilenet-onnxruntime":
return SUPPORTED_PROFILES[profile_name], "/models/updated_ssd_mobilenet_v1_coco_2018_01_28.onnx"
elif profile_name == "ssd-resnet34-onnxruntime":
return SUPPORTED_PROFILES[profile_name], "/models/updated_resnet34-ssd1200.onnx"
elif profile_name == "resnet50-pytorch":
profile = SUPPORTED_PROFILES["resnet50-onnxruntime"]
profile['backend'] = "pytorch"
return profile, "/models/resnet50_v1.onnx"
elif profile_name == "ssd-mobilenet-pytorch":
return SUPPORTED_PROFILES[profile_name], "/models/updated_ssd_mobilenet_v1_coco_2018_01_28.onnx"
elif profile_name == "ssd-mobilenet-pytorch-native":
return SUPPORTED_PROFILES[profile_name], "/models/ssd_mobilenet_v1.pytorch"
elif profile_name == "ssd-resnet34-pytorch":
return SUPPORTED_PROFILES[profile_name], "/models/resnet34-ssd1200.pytorch"
else:
raise Exception("Unsupported profile: {}".format(profile_name))
def get_img_format(backend_name: str):
if backend_name == "onnxruntime":
return "NCHW"
elif backend_name == "tensorflow":
return "NHWC"
elif backend_name == "pytorch":
return "NCHW"
else:
raise Exception("Unsupported backend: {}".format(backend_name))
@dataclass
class Item:
"""An item that we queue for processing by the thread pool."""
def __init__(self, query_id, content_id, img, label=None):
self.query_id = query_id
self.content_id = content_id
self.img = img
self.label = label
self.start = time.time() |
py | 7df70b14fcec21339d69e2259ab70fe699f94e52 | import traceback
from django.shortcuts import HttpResponse
def process(request, **kwargs):
app = kwargs.pop('app', None)
fun = kwargs.pop('function', None)
index = kwargs.pop('id', None)
mode = kwargs.pop('mode', None)
if app == 'api':
app = 'ApiManager'
try:
app = __import__("%s.views" % app)
view = getattr(app, 'views')
fun = getattr(view, fun)
# 执行view.py中的函数,并获取其返回值
if mode:
result = fun(request, mode, index)
else:
result = fun(request, index)
except TypeError:
result = fun(request)
except (ImportError, AttributeError):
# 导入失败时,自定义404错误
return HttpResponse(traceback.format_exc())
return result
|
py | 7df70b51d2a7f03ea49fad202076099789f44cb1 | '''
Created on Nov 20, 2012
@author: vinnie
'''
from utils import *
def in1d_running(q, A):
'''
j where q[k] in A for 0 <= k <= j
This is the maximum index j where q[0:j] is in A
'''
j = 0
while j < len(q) and q[j] in A:
j += 1
return j
def s_A(Q, A):
'''
s(A) = {(i,j) | q[i,k] in A for 0 <= k <= j}
The set of all coordinates where Q[i,0:k] is in A for 0 <= k <= j,
where j is defined by the ind1d_running function above
'''
return [(i, k) for i in A for k in range(in1d_running(Q[i], A))]
def P(Q, A, m):
'''
Given the single hashing scheme defined by matrix Q,
compute the probably that the first |A| slots are occupied by the
slots in A
'''
if len(A) == 0:
return 0
elif len(A) == 1:
return 1.0 / m
else:
return (1.0 / m) * sum([P(Q, tuple(a for a in A if a != Q[i][j]), m)
for (i, j) in s_A(Q, A)])
def P_map(Q):
'''
Compute P(A) for each n-combination in [0,1,2...m) for 0 <= n < m
Also compute P( [0,1,2...m] ). Only one combination is needed, this should
always be equal to 1.0
'''
m = len(Q)
m_range = range(m)
p = {A: P(Q, A, m) for A in generate_A(m_range)}
return p
def delta_prime(Q):
'''
The average number of spaces probed for each insertion by the time
the table is full. This is the best measure for the efficiency of a
single hashing scheme
'''
m = len(Q)
m_range = [row[0] for row in Q]
set_A = generate_A(m_range)
return (1.0 / (m ** 2)) * sum(P(Q, A, m) * len(s_A(Q, A)) for A in set_A)
def d_prime(Q, n):
'''
The average number of probes needed to insert the nth element
into a table with single hashing scheme Q
'''
m = len(Q)
m_range = [row[0] for row in Q]
assert n <= m
set_A = [A for A in generate_A(m_range) if len(A) == n - 1]
return (1.0 / m) * sum(P(Q, A, m) * len(s_A(Q, A)) for A in set_A)
def search_random(m, N):
from operator import itemgetter
import matplotlib.pyplot as plt
import random
random.seed(1234)
score_Q = [(delta_prime(Q), Q) for Q in [random_Q(m) for _ in range(N)]]
min_score, min_Q = min(score_Q, key=itemgetter(0))
max_score, max_Q = max(score_Q, key=itemgetter(0))
print('Best score:', min_score, min_Q)
print('Worst score:', max_score, max_Q)
plt.hist(list(zip(*score_Q))[0], bins=100, normed=True)
plt.xlabel('Probes per insertion')
plt.ylabel('Density')
plt.savefig('m%d_scores.png' % m)
return
if __name__ == '__main__':
search_random(5, 10000)
|
py | 7df70bcafa6576dd57c1a3a8f4619d7375a56526 |
from cas import views as cas_views
from django.conf import settings
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.contrib.auth.forms import PasswordResetForm
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic.base import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='index.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='index.html'), name='about'),
url(r'^contact/sent/$', TemplateView.as_view(template_name='contact_form/contact_form_sent.html'),
name='contact_form_sent'),
url(r'^invalid-request$', TemplateView.as_view(template_name='invalid_request.html'),
name='invalid_request'),
url(r'^accounts/password/reset/$', auth_views.password_reset,
{'template_name': 'accounts/password_reset_form.html',
'password_reset_form': PasswordResetForm}, name='password_reset'),
url(r'^accounts/password/reset/done/$', auth_views.password_reset_done,
{'template_name': 'accounts/password_reset_done.html'}, name='password_reset_done'),
url(r'^accounts/password/reset/confirm/(?P<uidb64>[0-9A-Za-z]+)-(?P<token>.+)/$',
auth_views.password_reset_confirm,
name='password_reset_confirm'),
url(r'^accounts/password/reset/complete/$', auth_views.password_reset_complete, name='password_reset_complete'),
# FIXME: ideally this should be set up dynamically by iterating through each
# ExperimentMetadata instance and using their namespace (e.g., replace all
# instances of forestry with ExperimentMetadata.namespace)
# url(r'^forestry/', include('vcweb.experiment.forestry.urls', namespace='forestry', app_name='forestry')),
# url(r'^bound/', include('vcweb.experiment.bound.urls', namespace='bound', app_name='bound')),
# url(r'^lighterprints/', include('vcweb.experiment.lighterprints.urls', namespace='lighterprints',
# app_name='lighterprints')),
# url(r'^broker/', include('vcweb.experiment.broker.urls', namespace='broker', app_name='broker')),
url(r'^admin/', include(admin.site.urls)),
url(r'^cas/login', cas_views.login, name='cas_login'),
url(r'^cas/logout', cas_views.logout, name='cas_logout'),
url(r'^cas/error', TemplateView.as_view(template_name='cas_access_forbidden.html'), name='cas_error'),
# subject pool urls
url(r'^subject-pool/', include('vcweb.core.subjectpool.urls',
app_name='subjectpool', namespace='subjectpool')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
]
def experiment_urls():
for experiment in settings.VCWEB_EXPERIMENTS:
experiment_name = experiment.rpartition('.')[2]
# include all experiment urls.py under the experiment name's namespace
yield url(r'^' + experiment_name + '/',
include(experiment + '.urls', namespace=experiment_name, app_name=experiment_name))
urlpatterns += experiment_urls()
# core urls catches everything else
urlpatterns.append(
url(r'', include('vcweb.core.urls', namespace='core', app_name='core')))
if settings.DEBUG:
import debug_toolbar
urlpatterns += staticfiles_urlpatterns()
urlpatterns += [
url(r'^500/$', TemplateView.as_view(template_name='500.html')),
url(r'^404/$', TemplateView.as_view(template_name='404.html')),
url(r'^403/$', TemplateView.as_view(template_name='403.html')),
]
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
py | 7df70bd55f27096cd30cbfb721907df1a26a6fd3 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class AppsV1beta1DeploymentRollback(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'kind': 'str',
'name': 'str',
'rollback_to': 'AppsV1beta1RollbackConfig',
'updated_annotations': 'dict(str, str)'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'name': 'name',
'rollback_to': 'rollbackTo',
'updated_annotations': 'updatedAnnotations'
}
def __init__(self, api_version=None, kind=None, name=None, rollback_to=None, updated_annotations=None):
"""
AppsV1beta1DeploymentRollback - a model defined in Swagger
"""
self._api_version = None
self._kind = None
self._name = None
self._rollback_to = None
self._updated_annotations = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
self.name = name
self.rollback_to = rollback_to
if updated_annotations is not None:
self.updated_annotations = updated_annotations
@property
def api_version(self):
"""
Gets the api_version of this AppsV1beta1DeploymentRollback.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this AppsV1beta1DeploymentRollback.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this AppsV1beta1DeploymentRollback.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this AppsV1beta1DeploymentRollback.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this AppsV1beta1DeploymentRollback.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this AppsV1beta1DeploymentRollback.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this AppsV1beta1DeploymentRollback.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this AppsV1beta1DeploymentRollback.
:type: str
"""
self._kind = kind
@property
def name(self):
"""
Gets the name of this AppsV1beta1DeploymentRollback.
Required: This must match the Name of a deployment.
:return: The name of this AppsV1beta1DeploymentRollback.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this AppsV1beta1DeploymentRollback.
Required: This must match the Name of a deployment.
:param name: The name of this AppsV1beta1DeploymentRollback.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def rollback_to(self):
"""
Gets the rollback_to of this AppsV1beta1DeploymentRollback.
The config of this deployment rollback.
:return: The rollback_to of this AppsV1beta1DeploymentRollback.
:rtype: AppsV1beta1RollbackConfig
"""
return self._rollback_to
@rollback_to.setter
def rollback_to(self, rollback_to):
"""
Sets the rollback_to of this AppsV1beta1DeploymentRollback.
The config of this deployment rollback.
:param rollback_to: The rollback_to of this AppsV1beta1DeploymentRollback.
:type: AppsV1beta1RollbackConfig
"""
if rollback_to is None:
raise ValueError("Invalid value for `rollback_to`, must not be `None`")
self._rollback_to = rollback_to
@property
def updated_annotations(self):
"""
Gets the updated_annotations of this AppsV1beta1DeploymentRollback.
The annotations to be updated to a deployment
:return: The updated_annotations of this AppsV1beta1DeploymentRollback.
:rtype: dict(str, str)
"""
return self._updated_annotations
@updated_annotations.setter
def updated_annotations(self, updated_annotations):
"""
Sets the updated_annotations of this AppsV1beta1DeploymentRollback.
The annotations to be updated to a deployment
:param updated_annotations: The updated_annotations of this AppsV1beta1DeploymentRollback.
:type: dict(str, str)
"""
self._updated_annotations = updated_annotations
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, AppsV1beta1DeploymentRollback):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
py | 7df70c7d45e32c2ac4dc07da9cfd0f63e6e75f48 | """The test for the sensibo entity."""
from __future__ import annotations
from datetime import timedelta
from unittest.mock import patch
import pytest
from homeassistant.components.climate.const import (
ATTR_FAN_MODE,
DOMAIN as CLIMATE_DOMAIN,
SERVICE_SET_FAN_MODE,
)
from homeassistant.components.number.const import (
ATTR_VALUE,
DOMAIN as NUMBER_DOMAIN,
SERVICE_SET_VALUE,
)
from homeassistant.components.sensibo.const import SENSIBO_ERRORS
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.util import dt
from .response import DATA_FROM_API
from tests.common import async_fire_time_changed
async def test_entity(hass: HomeAssistant, load_int: ConfigEntry) -> None:
"""Test the Sensibo climate."""
state1 = hass.states.get("climate.hallway")
assert state1
dr_reg = dr.async_get(hass)
dr_entries = dr.async_entries_for_config_entry(dr_reg, load_int.entry_id)
dr_entry: dr.DeviceEntry
for dr_entry in dr_entries:
if dr_entry.name == "Hallway":
assert dr_entry.identifiers == {("sensibo", "ABC999111")}
device_id = dr_entry.id
er_reg = er.async_get(hass)
er_entries = er.async_entries_for_device(
er_reg, device_id, include_disabled_entities=True
)
er_entry: er.RegistryEntry
for er_entry in er_entries:
if er_entry.name == "Hallway":
assert er_entry.unique_id == "Hallway"
@pytest.mark.parametrize("p_error", SENSIBO_ERRORS)
async def test_entity_send_command(
hass: HomeAssistant, p_error: Exception, load_int: ConfigEntry
) -> None:
"""Test the Sensibo send command with error."""
state = hass.states.get("climate.hallway")
assert state
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: state.entity_id, ATTR_FAN_MODE: "low"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("climate.hallway")
assert state.attributes["fan_mode"] == "low"
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
side_effect=p_error,
):
with pytest.raises(HomeAssistantError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: state.entity_id, ATTR_FAN_MODE: "low"},
blocking=True,
)
state = hass.states.get("climate.hallway")
assert state.attributes["fan_mode"] == "low"
async def test_entity_send_command_calibration(
hass: HomeAssistant, load_int: ConfigEntry
) -> None:
"""Test the Sensibo send command for calibration."""
registry = er.async_get(hass)
registry.async_update_entity(
"number.hallway_temperature_calibration", disabled_by=None
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=DATA_FROM_API,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
state = hass.states.get("number.hallway_temperature_calibration")
assert state.state == "0.1"
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_calibration",
return_value={"status": "success"},
):
await hass.services.async_call(
NUMBER_DOMAIN,
SERVICE_SET_VALUE,
{ATTR_ENTITY_ID: state.entity_id, ATTR_VALUE: 0.2},
blocking=True,
)
state = hass.states.get("number.hallway_temperature_calibration")
assert state.state == "0.2"
|
py | 7df70e959af52d2b290a09344b02a69ac8543fc6 | from datetime import datetime, timedelta
import unittest
from app import app, db
from app.models import User, Post
class UserModelCase(unittest.TestCase):
def setUp(self):
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_password_hashing(self):
u = User(username='susan')
u.set_password('cat')
self.assertFalse(u.check_password('dog'))
self.assertTrue(u.check_password('cat'))
def test_avatar(self):
u = User(username='john', email='[email protected]')
self.assertEqual(u.avatar(128), ('https://www.gravatar.com/avatar/'
'd4c74594d841139328695756648b6bd6'
'?d=identicon&s=128'))
def test_follow(self):
u1 = User(username='john', email='[email protected]')
u2 = User(username='susan', email='[email protected]')
db.session.add(u1)
db.session.add(u2)
db.session.commit()
self.assertEqual(u1.followed.all(), [])
self.assertEqual(u1.followers.all(), [])
u1.follow(u2)
db.session.commit()
self.assertTrue(u1.is_following(u2))
self.assertEqual(u1.followed.count(), 1)
self.assertEqual(u1.followed.first().username, 'susan')
self.assertEqual(u2.followers.count(), 1)
self.assertEqual(u2.followers.first().username, 'john')
u1.unfollow(u2)
db.session.commit()
self.assertFalse(u1.is_following(u2))
self.assertEqual(u1.followed.count(), 0)
self.assertEqual(u2.followers.count(), 0)
def test_follow_posts(self):
# create four users
u1 = User(username='john', email='[email protected]')
u2 = User(username='susan', email='[email protected]')
u3 = User(username='mary', email='[email protected]')
u4 = User(username='david', email='[email protected]')
db.session.add_all([u1, u2, u3, u4])
# create four posts
now = datetime.utcnow()
p1 = Post(body="post from john", author=u1,
timestamp=now + timedelta(seconds=1))
p2 = Post(body="post from susan", author=u2,
timestamp=now + timedelta(seconds=4))
p3 = Post(body="post from mary", author=u3,
timestamp=now + timedelta(seconds=3))
p4 = Post(body="post from david", author=u4,
timestamp=now + timedelta(seconds=2))
db.session.add_all([p1, p2, p3, p4])
db.session.commit()
# setup the followers
u1.follow(u2) # john follows susan
u1.follow(u4) # john follows david
u2.follow(u3) # susan follows mary
u3.follow(u4) # mary follows david
db.session.commit()
# check the followed posts of each user
f1 = u1.followed_posts().all()
f2 = u2.followed_posts().all()
f3 = u3.followed_posts().all()
f4 = u4.followed_posts().all()
self.assertEqual(f1, [p2, p4, p1])
self.assertEqual(f2, [p2, p3])
self.assertEqual(f3, [p3, p4])
self.assertEqual(f4, [p4])
if __name__ == '__main__':
unittest.main(verbosity=2)
|
py | 7df70eb16b544b5a96fc87097dd681ed41ef3413 | import typing
import dask.array as da
import numpy as np
from typing_extensions import Literal
from sgkit.distance import metrics
from sgkit.typing import ArrayLike
MetricTypes = Literal["euclidean", "correlation"]
def pairwise_distance(
x: ArrayLike,
metric: MetricTypes = "euclidean",
split_every: typing.Optional[int] = None,
) -> da.array:
"""Calculates the pairwise distance between all pairs of row vectors in the
given two dimensional array x.
To illustrate the algorithm consider the following (4, 5) two dimensional array:
[e.00, e.01, e.02, e.03, e.04]
[e.10, e.11, e.12, e.13, e.14]
[e.20, e.21, e.22, e.23, e.24]
[e.30, e.31, e.32, e.33, e.34]
The rows of the above matrix are the set of vectors. Now let's label all
the vectors as v0, v1, v2, v3.
The result will be a two dimensional symmetric matrix which will contain
the distance between all pairs. Since there are 4 vectors, calculating the
distance between each vector and every other vector, will result in 16
distances and the resultant array will be of size (4, 4) as follows:
[v0.v0, v0.v1, v0.v2, v0.v3]
[v1.v0, v1.v1, v1.v2, v1.v3]
[v2.v0, v2.v1, v2.v2, v2.v3]
[v3.v0, v3.v1, v3.v2, v3.v3]
The (i, j) position in the resulting array (matrix) denotes the distance
between vi and vj vectors.
Negative and nan values are considered as missing values. They are ignored
for all distance metric calculations.
Parameters
----------
x
[array-like, shape: (M, N)]
An array like two dimensional matrix. The rows are the
vectors used for comparison, i.e. for pairwise distance.
metric
The distance metric to use. The distance function can be
'euclidean' or 'correlation'.
split_every
Determines the depth of the recursive aggregation in the reduction
step. This argument is directly passed to the call to``dask.reduction``
function in the reduce step of the map reduce.
Omit to let dask heuristically decide a good default. A default can
also be set globally with the split_every key in dask.config.
Returns
-------
[array-like, shape: (M, M)]
A two dimensional distance matrix, which will be symmetric. The dimension
will be (M, M). The (i, j) position in the resulting array
(matrix) denotes the distance between ith and jth row vectors
in the input array.
Examples
--------
>>> from sgkit.distance.api import pairwise_distance
>>> import dask.array as da
>>> x = da.array([[6, 4, 1,], [4, 5, 2], [9, 7, 3]]).rechunk(2, 2)
>>> pairwise_distance(x, metric='euclidean').compute()
array([[0. , 2.44948974, 4.69041576],
[2.44948974, 0. , 5.47722558],
[4.69041576, 5.47722558, 0. ]])
>>> import numpy as np
>>> x = np.array([[6, 4, 1,], [4, 5, 2], [9, 7, 3]])
>>> pairwise_distance(x, metric='euclidean').compute()
array([[0. , 2.44948974, 4.69041576],
[2.44948974, 0. , 5.47722558],
[4.69041576, 5.47722558, 0. ]])
>>> x = np.array([[6, 4, 1,], [4, 5, 2], [9, 7, 3]])
>>> pairwise_distance(x, metric='correlation').compute()
array([[-4.44089210e-16, 2.62956526e-01, 2.82353505e-03],
[ 2.62956526e-01, 0.00000000e+00, 2.14285714e-01],
[ 2.82353505e-03, 2.14285714e-01, 0.00000000e+00]])
"""
try:
metric_map_func = getattr(metrics, f"{metric}_map")
metric_reduce_func = getattr(metrics, f"{metric}_reduce")
n_map_param = metrics.N_MAP_PARAM[metric]
except AttributeError:
raise NotImplementedError(f"Given metric: {metric} is not implemented.")
x = da.asarray(x)
if x.ndim != 2:
raise ValueError(f"2-dimensional array expected, got '{x.ndim}'")
# setting this variable outside of _pairwise to avoid it's recreation
# in every iteration, which eventually leads to increase in dask
# graph serialisation/deserialisation time significantly
metric_param = np.empty(n_map_param, dtype=x.dtype)
def _pairwise(f: ArrayLike, g: ArrayLike) -> ArrayLike:
result: ArrayLike = metric_map_func(f[:, None, :], g, metric_param)
# Adding a new axis to help combine chunks along this axis in the
# reduction step (see the _aggregate and _combine functions below).
return result[..., np.newaxis]
# concatenate in blockwise leads to high memory footprints, so instead
# we perform blockwise without contraction followed by reduction.
# More about this issue: https://github.com/dask/dask/issues/6874
out = da.blockwise(
_pairwise,
"ijk",
x,
"ik",
x,
"jk",
dtype=x.dtype,
concatenate=False,
)
def _aggregate(x_chunk: ArrayLike, **_: typing.Any) -> ArrayLike:
"""Last function to be executed when resolving the dask graph,
producing the final output. It is always invoked, even when the reduced
Array counts a single chunk along the reduced axes."""
x_chunk = x_chunk.reshape(x_chunk.shape[:-2] + (-1, n_map_param))
result: ArrayLike = metric_reduce_func(x_chunk)
return result
def _chunk(x_chunk: ArrayLike, **_: typing.Any) -> ArrayLike:
return x_chunk
def _combine(x_chunk: ArrayLike, **_: typing.Any) -> ArrayLike:
"""Function used for intermediate recursive aggregation (see
split_every argument to ``da.reduction below``). If the
reduction can be performed in less than 3 steps, it will
not be invoked at all."""
# reduce chunks by summing along the -2 axis
x_chunk_reshaped = x_chunk.reshape(x_chunk.shape[:-2] + (-1, n_map_param))
return x_chunk_reshaped.sum(axis=-2)[..., np.newaxis]
r = da.reduction(
out,
chunk=_chunk,
combine=_combine,
aggregate=_aggregate,
axis=-1,
dtype=x.dtype,
meta=np.ndarray((0, 0), dtype=x.dtype),
split_every=split_every,
name="pairwise",
)
t = da.triu(r)
return t + t.T
|
py | 7df70eb92a0832db62e8e4a856d32450609fac8c | from django.shortcuts import render
from .models import Image, Location, Category
# Create your views here.
def home(request):
images=Image.get_images()
locations=Location.objects.all()
categories=Category.objects.all()
return render(request, 'home.html', {"images":images, "locations":locations, "categories":categories})
def search_by_category(request):
if 'image_category' in request.GET and request.GET["image_category"]:
search_term=request.GET.get("image_category")
searched_images=Image.search_by_category(search_term)
return render(request, 'search.html', {"images":searched_images})
else:
message="No search term added"
return render(request, 'search.html', {"message":message})
def photos_by_location(request, location_id):
images=Image.filter_by_location(location_id)
location=Location.objects.get(id=location_id)
return render(request, 'locations.html', {"images":images, "location":location}) |
py | 7df70f49bd27ff60ef68b5df473b5f0062da10f9 | from typing import Any, Dict, List, Mapping
import numpy as np
import torch
from catalyst.core import _Runner
from catalyst.rl2 import (
AlgorithmSpec, EnvironmentSpec, RLExperiment, RLState, utils
)
# RLRunner has only one stage (?) - endless training
# each Epoch we recalculate training loader based on current Replay buffer
# then -> typical training on loader with selected algorithm
#
class RLRunner(_Runner):
experiment: RLExperiment
state: RLState
def _fetch_rollouts(self):
pass
def _prepare_for_stage(self, stage: str):
super()._prepare_for_stage(stage=stage)
self.algorithm: AlgorithmSpec = \
self.experiment.get_algorithm(stage=stage)
self.environment: EnvironmentSpec = \
self.experiment.get_environment(stage=stage)
def _prepare_for_epoch(self, stage: str, epoch: int):
super()._prepare_for_epoch(stage=stage, epoch=epoch)
# @TODO: remove this trick
utils.set_global_seed(self.experiment.initial_seed + epoch)
loaders = self.experiment.get_loaders(stage=stage, epoch=epoch)
self.loaders = loaders
def _run_batch_train_step(self, batch: Mapping[str, Any]):
# todo: should implement different training steps
# for different algorithms
metrics: Dict = self.algorithm.train_on_batch(
batch,
actor_update=(self.state.step % self.state.actor_grad_period == 0),
critic_update=(
self.state.step % self.state.critic_grad_period == 0
),
) or {}
metrics_ = self._update_target_weights(self.state.step) or {}
metrics.update(**metrics_)
self.state.metric_manager.add_batch_value(metrics_dict=metrics)
def forward(self, batch: Mapping[str, Any], **kwargs) -> Mapping[str, Any]:
# todo: should implement different policy -> action
# for different use-cases: discrete, continuous action spaces
pass
@torch.no_grad()
def inference(
self,
sampler_ids: List[int],
run_ids: List[int],
states: np.ndarray,
rewards: np.ndarray,
):
# looks like production-ready thing
# @TODO: make a microservice from this method
batch = None
actions = self.predict_batch(batch)
return actions
|
py | 7df70f542aad64b30b9e6f29ed00a3298e9d363a | """udemy_django_rest_tutorial URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('api/user/', include('user.urls')),
path('api/recipe/', include('recipe.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
py | 7df71142c55895545d0948902d619d088386197d | """
dacashd JSONRPC interface
"""
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'lib'))
import config
import base58
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from masternode import Masternode
from decimal import Decimal
import time
class DACashDaemon():
def __init__(self, **kwargs):
host = kwargs.get('host', '127.0.0.1')
user = kwargs.get('user')
password = kwargs.get('password')
port = kwargs.get('port')
self.creds = (user, password, host, port)
# memoize calls to some dacashd methods
self.governance_info = None
self.gobject_votes = {}
@property
def rpc_connection(self):
return AuthServiceProxy("http://{0}:{1}@{2}:{3}".format(*self.creds))
@classmethod
def from_dacash_conf(self, dacash_dot_conf):
from dacash_config import DACashConfig
config_text = DACashConfig.slurp_config_file(dacash_dot_conf)
creds = DACashConfig.get_rpc_creds(config_text, config.network)
creds[u'host'] = config.rpc_host
return self(**creds)
def rpc_command(self, *params):
return self.rpc_connection.__getattr__(params[0])(*params[1:])
# common RPC convenience methods
def get_masternodes(self):
mnlist = self.rpc_command('masternodelist', 'full')
return [Masternode(k, v) for (k, v) in mnlist.items()]
def get_current_masternode_vin(self):
from dacashlib import parse_masternode_status_vin
my_vin = None
try:
status = self.rpc_command('masternode', 'status')
mn_outpoint = status.get('outpoint') or status.get('vin')
my_vin = parse_masternode_status_vin(mn_outpoint)
except JSONRPCException as e:
pass
return my_vin
def governance_quorum(self):
# TODO: expensive call, so memoize this
total_masternodes = self.rpc_command('masternode', 'count', 'enabled')
min_quorum = self.govinfo['governanceminquorum']
# the minimum quorum is calculated based on the number of masternodes
quorum = max(min_quorum, (total_masternodes // 10))
return quorum
@property
def govinfo(self):
if (not self.governance_info):
self.governance_info = self.rpc_command('getgovernanceinfo')
return self.governance_info
# governance info convenience methods
def superblockcycle(self):
return self.govinfo['superblockcycle']
def last_superblock_height(self):
height = self.rpc_command('getblockcount')
cycle = self.superblockcycle()
return cycle * (height // cycle)
def next_superblock_height(self):
return self.last_superblock_height() + self.superblockcycle()
def is_masternode(self):
return not (self.get_current_masternode_vin() is None)
def is_synced(self):
mnsync_status = self.rpc_command('mnsync', 'status')
synced = (mnsync_status['IsBlockchainSynced'] and
mnsync_status['IsMasternodeListSynced'] and
mnsync_status['IsWinnersListSynced'] and
mnsync_status['IsSynced'] and
not mnsync_status['IsFailed'])
return synced
def current_block_hash(self):
height = self.rpc_command('getblockcount')
block_hash = self.rpc_command('getblockhash', height)
return block_hash
def get_superblock_budget_allocation(self, height=None):
if height is None:
height = self.rpc_command('getblockcount')
return Decimal(self.rpc_command('getsuperblockbudget', height))
def next_superblock_max_budget(self):
cycle = self.superblockcycle()
current_block_height = self.rpc_command('getblockcount')
last_superblock_height = (current_block_height // cycle) * cycle
next_superblock_height = last_superblock_height + cycle
last_allocation = self.get_superblock_budget_allocation(last_superblock_height)
next_allocation = self.get_superblock_budget_allocation(next_superblock_height)
next_superblock_max_budget = next_allocation
return next_superblock_max_budget
# "my" votes refers to the current running masternode
# memoized on a per-run, per-object_hash basis
def get_my_gobject_votes(self, object_hash):
import dacashlib
if not self.gobject_votes.get(object_hash):
my_vin = self.get_current_masternode_vin()
# if we can't get MN vin from output of `masternode status`,
# return an empty list
if not my_vin:
return []
(txid, vout_index) = my_vin.split('-')
cmd = ['gobject', 'getcurrentvotes', object_hash, txid, vout_index]
raw_votes = self.rpc_command(*cmd)
self.gobject_votes[object_hash] = dacashlib.parse_raw_votes(raw_votes)
return self.gobject_votes[object_hash]
def is_govobj_maturity_phase(self):
# 3-day period for govobj maturity
maturity_phase_delta = 1662 # ~(60*24*3)/2.6
if config.network == 'testnet':
maturity_phase_delta = 24 # testnet
event_block_height = self.next_superblock_height()
maturity_phase_start_block = event_block_height - maturity_phase_delta
current_height = self.rpc_command('getblockcount')
event_block_height = self.next_superblock_height()
# print "current_height = %d" % current_height
# print "event_block_height = %d" % event_block_height
# print "maturity_phase_delta = %d" % maturity_phase_delta
# print "maturity_phase_start_block = %d" % maturity_phase_start_block
return (current_height >= maturity_phase_start_block)
def we_are_the_winner(self):
import dacashlib
# find the elected MN vin for superblock creation...
current_block_hash = self.current_block_hash()
mn_list = self.get_masternodes()
winner = dacashlib.elect_mn(block_hash=current_block_hash, mnlist=mn_list)
my_vin = self.get_current_masternode_vin()
# print "current_block_hash: [%s]" % current_block_hash
# print "MN election winner: [%s]" % winner
# print "current masternode VIN: [%s]" % my_vin
return (winner == my_vin)
def estimate_block_time(self, height):
import dacashlib
"""
Called by block_height_to_epoch if block height is in the future.
Call `block_height_to_epoch` instead of this method.
DO NOT CALL DIRECTLY if you don't want a "Oh Noes." exception.
"""
current_block_height = self.rpc_command('getblockcount')
diff = height - current_block_height
if (diff < 0):
raise Exception("Oh Noes.")
future_seconds = dacashlib.blocks_to_seconds(diff)
estimated_epoch = int(time.time() + future_seconds)
return estimated_epoch
def block_height_to_epoch(self, height):
"""
Get the epoch for a given block height, or estimate it if the block hasn't
been mined yet. Call this method instead of `estimate_block_time`.
"""
epoch = -1
try:
bhash = self.rpc_command('getblockhash', height)
block = self.rpc_command('getblock', bhash)
epoch = block['time']
except JSONRPCException as e:
if e.message == 'Block height out of range':
epoch = self.estimate_block_time(height)
else:
print("error: %s" % e)
raise e
return epoch
@property
def has_sentinel_ping(self):
getinfo = self.rpc_command('getinfo')
return (getinfo['protocolversion'] >= config.min_dacashd_proto_version_with_sentinel_ping)
def ping(self):
self.rpc_command('sentinelping', config.sentinel_version)
|
py | 7df7119a95dcb2b6c2793195e377a35e554afcfc | #!/usr/bin/python
import sys, json, re, logging, argparse
from pysam import Samfile
from pbsuite.utils.summarizeAssembly import getStats
def expandAlign(alignment):
"""
Takes a pysam Alignment and creates
(reference, query) alignments
For example:
query = ATCGC-GT
reference = AT-GCGGA
Where C inserted, G deleted, A->T Sub
"""
seq = alignment.seq
cigar = expandCigar(alignment.cigar)
mdTag = None
for i in alignment.tags:
if i[0] == "MD":
mdTag = expandMd(i[1])
if mdTag is None:# and alignment.target:
logging.debug(("Mapped read %s doesn't have MD tag. Mismatches will be 0") \
% (alignment.qname))
mdTag = "-" * len(cigar)
qPos = 0
tPos = 0
tSeq = []
qSeq = []
#Expanding query seq and filling in target seq
try:
for i in cigar:
if i == 0:
qSeq.append(seq[qPos])
if mdTag[tPos] != '-':
tSeq.append(mdTag[tPos])
else:
tSeq.append(seq[qPos])
qPos += 1
tPos += 1
elif i == 1:
qSeq.append(seq[qPos])
tSeq.append('-')
qPos += 1
elif i == 2:
qSeq.append('-')
tSeq.append(mdTag[tPos])
tPos += 1
except IndexError:
return None, None
return (qSeq,tSeq)
def expandCigar(cigar):
"""
Turns the abbreviated cigar into the full array
0 = M
1 = I
2 = D
"""
ret = []
for t,s in cigar:
ret.extend([t]*s)
return ret
def expandMd(md):
"""
Turns abbreviated MD into a full array
"""
ret = []
for i in re.findall("\d+|\^?[ATCGN]+", md):
if i.startswith('^'):
ret.extend(list(i[1:]))
elif i[0] in ["A","T","C","G","N"]:
ret.extend(list(i))
else:
ret.extend(['-']*int(i))
return ret
def counter(query, reference):
mat = ins = dels = sub = 0
for q,r in zip(query, reference):
if q == '-':
dels += 1
elif r == '-':
ins += 1
elif q != r:
sub += 1
else:
mat += 1
acc = mat/float(mat+ins+dels+sub)
tot = len(filter(lambda x: x != '-', query))
return acc, tot, ins, dels, sub
if __name__ == '__main__':
if len(sys.argv) == 1:
sam = Samfile("-", "r")
elif len(sys.argv) == 2:
try:
sam = Samfile(sys.argv[1])
except ValueError:
sys.stderr.write("%s may not be a valid sam/bam file\n" % (sys.argv[1]))
exit(0)
else:
sys.stderr.write("Expected 1 argument - the sam/bam file\n")
exit(0)
readLengths = []
accuracy = 0
insertions = 0
deletions = 0
subs = 0
soft = 0
tot = 0.0
cnt = 0.0
unmapped = 0
for align in sam:
if align.is_unmapped:
unmapped += 1
continue
query, refer = expandAlign(align)
if query is None:
continue#does this happen?
readLengths.append(len(align.seq))
cnt += 1
a,t,i,d,s = counter(query, refer)
sc = 0
if align.cigar[0][1] == 4:
sc += align.cigar[0][0]
if align.cigar[-1][1] == 4:
sc += align.cigar[0][0]
accuracy += a
tot += t
insertions += i
deletions += d
subs += s
soft += sc
errCnt = float(insertions + deletions + subs)
stats = getStats(readLengths)
space = str(max([len(str(x)) for x in stats.values()])+2)
report = ("#Seqs | {numSeqs:%d,}\n"
"Min | {min:%d,}\n"
"1st Qu.| {FstQu:%d,}\n" + \
"Median | {median:%d,}\n" + \
"Mean | {mean:%d,}\n" + \
"3rd Qu.| {TrdQu:%d,}\n" + \
"Max | {max:%d,}\n" + \
"Total | {totalLength:%d,}\n" + \
"n50 | {n50:%d,}\n" + \
"n90 | {n90:%d,}\n" + \
"n95 | {n95:%d,}\n").replace("%d", str(space))
#print stats
print "Read Stats"#, json.dumps(getStats(readLengths), indent=4)
print report.format(**stats)
print "Bases Counted %d" % tot
print "Average Accuracy %.2f" % (accuracy/cnt)
print "Total Unmapped %d" % (unmapped)
print "Percent Unmapped %.2f" % (unmapped/cnt)
print
print "Total Insertions %d" % insertions
print "Average Insertions per Read %.2f" % (insertions/cnt)
print "Percentage of errors Insertions %.2f" % (insertions/errCnt)
print
print "Total Deletions %d" % deletions
print "Average Deletions per Read %.2f" % (deletions/cnt)
print "Percentage of errors Deletions %.2f" % (deletions/errCnt)
print
print "Total Substitutions %d" % subs
print "Average Substitutions per Read %.2f" % (subs/cnt)
print "Percentage of errors Substitutions %.2f" % (subs/errCnt)
print
print "Total SoftClipped %d" % soft
print "Average SoftClipped per Read %.2f" % (soft/cnt)
|
py | 7df711bbd4db6f1343762eba9ad4fb6052b301e7 | """General-purpose test script for image-to-image translation.
Once you have trained your model with train.py, you can use this script to test the model.
It will load a saved model from '--checkpoints_dir' and save the results to '--results_dir'.
It first creates model and dataset given the option. It will hard-code some parameters.
It then runs inference for '--num_test' images and save results to an HTML file.
Example (You need to train models first or download pre-trained models from our website):
Test a CycleGAN model (both sides):
python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
Test a CycleGAN model (one side only):
python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout
The option '--model test' is used for generating CycleGAN results only for one side.
This option will automatically set '--dataset_mode single', which only loads the images from one set.
On the contrary, using '--model cycle_gan' requires loading and generating results in both directions,
which is sometimes unnecessary. The results will be saved at ./results/.
Use '--results_dir <directory_path_to_save_result>' to specify the results directory.
Test a pix2pix model:
python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
See options/base_options.py and options/test_options.py for more test options.
See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md
See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md
"""
import os
from options.test_options import TestOptions
from data import create_dataset
from models import create_model
from util.visualizer import save_images
from util import html
import torch
if __name__ == '__main__':
opt = TestOptions().parse() # get test options
# hard-code some parameters for test
opt.num_threads = 0 # test code only supports num_threads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.save_to_video = 1
opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.
opt.no_flip = True # no flip; comment this line if results on flipped images are needed.
opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
model = create_model(opt) # create a model given opt.model and other options
model.setup(opt) # regular setup: load and print networks; create schedulers
# create a website
web_dir = os.path.join(opt.results_dir, opt.name, '{}_{}'.format(opt.phase, opt.epoch)) # define the website directory
if opt.load_iter > 0: # load_iter is 0 by default
web_dir = '{:s}_iter{:d}'.format(web_dir, opt.load_iter)
print('creating web directory', web_dir)
webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch))
# test with eval mode. This only affects layers like batchnorm and dropout.
# For [pix2pix]: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode.
# For [CycleGAN]: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.
if opt.eval:
model.eval()
if opt.save_to_video:
import numpy as np
from PIL import Image, ImageDraw
import cv2
import util, html
videodims = (768, 256)
# fourcc =cv2.VideoWriter_fourcc(*"mp4v")
fourcc = cv2.VideoWriter_fourcc(*'XVID')
video = cv2.VideoWriter("out/test.avi", fourcc, 10.0, videodims)
img = Image.new('RGB', videodims, color='darkred')
# draw stuff that goes on every frame here
# for i in range(0, 60 * 60):
for i, data in enumerate(dataset):
if i >= opt.num_test: # only apply our model to opt.num_test images.
break
model.set_input(data) # unpack data from data loader
model.test() # run inference
visuals = model.get_current_visuals() # get image results
img_path = model.get_image_paths() # get image paths
if opt.save_to_video:
from util.util import tensor2im
combines_image = torch.cat(
(dict(visuals.items())['real_A'], dict(visuals.items())['fake_B'], dict(visuals.items())['real_B']), -1)
imtemp = tensor2im(combines_image)
# draw frame specific stuff here.
# video.write(cv2.cvtColor(np.array(imtemp), cv2.COLOR_RGB2BGR))
video.write(cv2.cvtColor(imtemp, cv2.COLOR_RGB2BGR))
if i % 5 == 0: # save images to an HTML file
print('processing (%04d)-th image... %s' % (i, img_path))
save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize)
webpage.save() # save the HTML
video.release() |
py | 7df712e7be23be3094213f549789fe08269c4fe5 | import os
import yaml
import logging
import collections
def init_logging():
log = logging.getLogger(__name__)
return log
log = init_logging()
class YamlConfig(collections.MutableMapping):
"""
A configuration object populated by parsing a yaml file, with optional
default values.
Note that modifying the _defaults attribute of an instance can potentially
yield confusing results; if you need to do modify defaults, use the class
variable or create a subclass.
"""
_defaults = dict()
def __init__(self, yaml_path=None):
self.yaml_path = yaml_path
if self.yaml_path:
self.load()
else:
self._conf = dict()
def load(self, conf=None):
if conf:
if isinstance(conf, dict):
self._conf = conf
else:
self._conf = yaml.safe_load(conf)
return
if os.path.exists(self.yaml_path):
with open(self.yaml_path) as f:
self._conf = yaml.safe_load(f)
else:
log.debug("%s not found", self.yaml_path)
self._conf = dict()
def update(self, in_dict):
"""
Update an existing configuration using dict.update()
:param in_dict: The dict to use to update
"""
self._conf.update(in_dict)
@classmethod
def from_dict(cls, in_dict):
"""
Build a config object from a dict.
:param in_dict: The dict to use
:returns: The config object
"""
conf_obj = cls()
conf_obj._conf = in_dict
return conf_obj
def to_dict(self):
"""
:returns: A shallow copy of the configuration as a dict
"""
return dict(self._conf)
@classmethod
def from_str(cls, in_str):
"""
Build a config object from a string or yaml stream.
:param in_str: The stream or string
:returns: The config object
"""
conf_obj = cls()
conf_obj._conf = yaml.safe_load(in_str)
return conf_obj
def to_str(self):
"""
:returns: str(self)
"""
return str(self)
def get(self, key, default=None):
return self._conf.get(key, default)
def __str__(self):
return yaml.safe_dump(self._conf, default_flow_style=False).strip()
def __repr__(self):
return self.__str__()
def __getitem__(self, name):
return self.__getattr__(name)
def __getattr__(self, name):
return self._conf.get(name, self._defaults.get(name))
def __contains__(self, name):
return self._conf.__contains__(name)
def __setattr__(self, name, value):
if name.endswith('_conf') or name in ('yaml_path'):
object.__setattr__(self, name, value)
else:
self._conf[name] = value
def __delattr__(self, name):
del self._conf[name]
def __len__(self):
return self._conf.__len__()
def __iter__(self):
return self._conf.__iter__()
def __setitem__(self, name, value):
self._conf.__setitem__(name, value)
def __delitem__(self, name):
self._conf.__delitem__(name)
class TeuthologyConfig(YamlConfig):
"""
This class is intended to unify teuthology's many configuration files and
objects. Currently it serves as a convenient interface to
~/.teuthology.yaml and nothing else.
"""
yaml_path = os.path.join(os.path.expanduser('~/.teuthology.yaml'))
_defaults = {
'archive_base': '/home/teuthworker/archive',
'archive_upload': None,
'archive_upload_key': None,
'archive_upload_url': None,
'automated_scheduling': False,
'reserve_machines': 5,
'ceph_git_base_url': 'https://github.com/ceph/',
'ceph_git_url': None,
'ceph_qa_suite_git_url': None,
'ceph_cm_ansible_git_url': None,
'use_conserver': False,
'conserver_master': 'conserver.front.sepia.ceph.com',
'conserver_port': 3109,
'gitbuilder_host': 'gitbuilder.ceph.com',
'githelper_base_url': 'http://git.ceph.com:8080',
'check_package_signatures': True,
'lab_domain': 'front.sepia.ceph.com',
'lock_server': 'http://paddles.front.sepia.ceph.com/',
'max_job_time': 259200, # 3 days
'nsupdate_url': 'http://nsupdate.front.sepia.ceph.com/update',
'results_server': 'http://paddles.front.sepia.ceph.com/',
'results_ui_server': 'http://pulpito.ceph.com/',
'results_sending_email': 'teuthology',
'results_timeout': 43200,
'src_base_path': os.path.expanduser('~/src'),
'verify_host_keys': True,
'watchdog_interval': 120,
'kojihub_url': 'http://koji.fedoraproject.org/kojihub',
'kojiroot_url': 'http://kojipkgs.fedoraproject.org/packages',
'koji_task_url': 'https://kojipkgs.fedoraproject.org/work/',
'baseurl_template': 'http://{host}/{proj}-{pkg_type}-{dist}-{arch}-{flavor}/{uri}',
'use_shaman': False,
'shaman_host': 'shaman.ceph.com',
'teuthology_path': None,
'suite_verify_ceph_hash': True,
'suite_allow_missing_packages': False,
'openstack': {
'clone': 'git clone http://github.com/ceph/teuthology',
'user-data': 'teuthology/openstack/openstack-{os_type}-{os_version}-user-data.txt',
'ip': '1.1.1.1',
'machine': {
'disk': 20,
'ram': 8000,
'cpus': 1,
},
'volumes': {
'count': 0,
'size': 1,
},
},
}
def __init__(self, yaml_path=None):
super(TeuthologyConfig, self).__init__(yaml_path or self.yaml_path)
def get_ceph_cm_ansible_git_url(self):
return (self.ceph_cm_ansible_git_url or
self.ceph_git_base_url + 'ceph-cm-ansible.git')
def get_ceph_qa_suite_git_url(self):
return (self.ceph_qa_suite_git_url or
self.get_ceph_git_url())
def get_ceph_git_url(self):
return (self.ceph_git_url or
self.ceph_git_base_url + 'ceph-ci.git')
class JobConfig(YamlConfig):
pass
class FakeNamespace(YamlConfig):
"""
This class is meant to behave like a argparse Namespace
We'll use this as a stop-gap as we refactor commands but allow the tasks
to still be passed a single namespace object for the time being.
"""
def __init__(self, config_dict=None):
if not config_dict:
config_dict = dict()
self._conf = self._clean_config(config_dict)
set_config_attr(self)
def _clean_config(self, config_dict):
"""
Makes sure that the keys of config_dict are able to be used. For
example the "--" prefix of a docopt dict isn't valid and won't populate
correctly.
"""
result = dict()
for key, value in config_dict.items():
new_key = key
if new_key.startswith("--"):
new_key = new_key[2:]
elif new_key.startswith("<") and new_key.endswith(">"):
new_key = new_key[1:-1]
if "-" in new_key:
new_key = new_key.replace("-", "_")
result[new_key] = value
return result
def __getattr__(self, name):
"""
We need to modify this for FakeNamespace so that getattr() will
work correctly on a FakeNamespace instance.
"""
if name in self._conf:
return self._conf[name]
elif name in self._defaults:
return self._defaults[name]
raise AttributeError(name)
def __setattr__(self, name, value):
if name == 'teuthology_config':
object.__setattr__(self, name, value)
else:
super(FakeNamespace, self).__setattr__(name, value)
def __repr__(self):
return repr(self._conf)
def __str__(self):
return str(self._conf)
def set_config_attr(obj):
"""
Set obj.teuthology_config, mimicking the old behavior of misc.read_config
"""
obj.teuthology_config = config
def _get_config_path():
system_config_path = '/etc/teuthology.yaml'
if not os.path.exists(TeuthologyConfig.yaml_path) and \
os.path.exists(system_config_path):
return system_config_path
return TeuthologyConfig.yaml_path
config = TeuthologyConfig(yaml_path=_get_config_path())
|
py | 7df71588569967b65629cdfa66b2c0bd177e5f54 | import _plotly_utils.basevalidators
class AutocolorscaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="autocolorscale", parent_name="layout.coloraxis", **kwargs
):
super(AutocolorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
**kwargs
)
|
py | 7df716afb057f4b6b0f17903e371695505a2e630 | from classes import *
if __name__ == '__main__':
# create all the beds
bed_1 = Bed("bed 1", "single")
bed_2 = Bed("bed 2", "single")
bed_3 = Bed("bed 3", "single")
bed_4 = Bed("bed 4", "single")
bed_5 = Bed("bed 5", "single")
# Create all the rooms
room_1 = Room("room 1", [bed_1])
room_2 = Room("room 2", [bed_2])
room_3 = Room("room 3", [bed_3])
room_4 = Room("room 4", [bed_4])
room_5 = Room("room 5", [bed_5])
# Create the house
the_house = House(1050)
# Add rooms to the house
the_house.add_room(room_1)
the_house.add_room(room_2)
the_house.add_room(room_3)
the_house.add_room(room_4)
the_house.add_room(room_5)
# Create the people and their bids
Tim = Person("Tim")
Tom = Person("Tom")
Schubert = Person("Schubert")
Jack = Person("Jack")
Watson = Person("Watt")
# Add bids to a person
Tim.add_bids({
"room 1": 20,
"room 2": 20,
"room 3": 40,
"room 4": 20,
"room 5": 20
})
Tom.add_bids({
"room 1": 20,
"room 2": 20,
"room 3": 40,
"room 4": 20,
"room 5": 20
})
Schubert.add_bids({
"room 1": -20,
"room 2": 20,
"room 3": 40,
"room 4": 20,
"room 5": 20
})
Jack.add_bids({
"room 1": 20,
"room 2": 20,
"room 3": 40,
"room 4": 20,
"room 5": 20
})
Watson.add_bids({
"room 1": 20,
"room 2": 20,
"room 3": 40,
"room 4": 80,
"room 5": 20
})
test = Calculator(price_by="total average", bid_value="extra")
# Add people to the calculator
test.add_people([Tim, Tom, Jack, Schubert, Watson])
# Add house to calculator
test.add_house(the_house)
# Calculate prices
test.calculate()
test.view_results()
|
py | 7df717d1cc0c28cc9f02e1fc115da6d0c80cfefc | from typing import Optional
from pyshexc.parser.ShExDocParser import ShExDocParser
from pyshexc.parser.ShExDocVisitor import ShExDocVisitor
from pyshexc.parser_impl.parser_context import ParserContext
from pyshexc.parser_impl.shex_annotations_and_semacts_parser import ShexAnnotationAndSemactsParser
from pyshexc.parser_impl.shex_shape_expression_parser import ShexShapeExpressionParser
from ShExJSG.ShExJ import ShapeExternal, IRIREF, ShapeDecl
class ShexDocParser(ShExDocVisitor):
""" parser for sheExDoc production """
def __init__(self, default_base: Optional[str]=None):
ShExDocVisitor.__init__(self)
self.context = ParserContext()
self.context.base = IRIREF(default_base) if default_base else None
def visitShExDoc(self, ctx: ShExDocParser.ShExDocContext):
""" shExDoc: directive* ((notStartAction | startActions) statement*)? EOF """
super().visitShExDoc(ctx)
def visitBaseDecl(self, ctx: ShExDocParser.BaseDeclContext):
""" baseDecl: KW_BASE IRIREF """
self.context.base = ''
self.context.base = self.context.iriref_to_shexj_iriref(ctx.IRIREF())
def visitPrefixDecl(self, ctx: ShExDocParser.PrefixDeclContext):
""" prefixDecl: KW_PREFIX PNAME_NS IRIREF """
iri = self.context.iriref_to_shexj_iriref(ctx.IRIREF())
prefix = ctx.PNAME_NS().getText()
if iri not in self.context.ld_prefixes:
self.context.prefixes[prefix] = iri.val
def visitImportDecl(self, ctx: ShExDocParser.ImportDeclContext):
""" importDecl : KW_IMPORT IRIREF """
if self.context.schema.imports is None:
self.context.schema.imports = [self.context.iriref_to_shexj_iriref(ctx.IRIREF())]
else:
self.context.schema.imports.append(self.context.iriref_to_shexj_iriref(ctx.IRIREF()))
def visitStart(self, ctx: ShExDocParser.StartContext):
""" start: KW_START '=' shapeExpression """
shexpr = ShexShapeExpressionParser(self.context, None)
shexpr.visit(ctx.shapeExpression())
self.context.schema.start = shexpr.expr
def visitShapeExprDecl(self, ctx: ShExDocParser.ShapeExprDeclContext):
""" shapeExprDecl: KW_ABSTRACT? shapeExprLabel restrictions* (shapeExpression | KW_EXTERNAL) ;"""
label = self.context.shapeexprlabel_to_IRI(ctx.shapeExprLabel())
if ctx.KW_EXTERNAL():
shape = ShapeExternal()
else:
shexpr = ShexShapeExpressionParser(self.context)
shexpr.visit(ctx.shapeExpression())
shape = shexpr.expr
if ctx.KW_ABSTRACT() or ctx.restrictions():
shape = ShapeDecl(shapeExpr=shape)
if ctx.KW_ABSTRACT():
shape.abstract = True
if ctx.restrictions():
shape.restricts = [self.context.shapeexprlabel_to_IRI(r.shapeExprLabel()) for r in ctx.restrictions()]
if label:
shape.id = label
if self.context.schema.shapes is None:
self.context.schema.shapes = [shape]
else:
self.context.schema.shapes.append(shape)
def visitStartActions(self, ctx: ShExDocParser.StartActionsContext):
""" startActions: semanticAction+ ; """
startacts = []
for cd in ctx.semanticAction():
cdparser = ShexAnnotationAndSemactsParser(self.context)
cdparser.visit(cd)
startacts += cdparser.semacts
self.context.schema.startActs = startacts
|
py | 7df718833c541067ea2005817c7ed1b051b31fad | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 9
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_2
from isi_sdk_8_2_2.models.auth_role import AuthRole # noqa: E501
from isi_sdk_8_2_2.rest import ApiException
class TestAuthRole(unittest.TestCase):
"""AuthRole unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAuthRole(self):
"""Test AuthRole"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_2.models.auth_role.AuthRole() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 7df718a7948038b43a68f8db908ae396806b365d | """Sciter handlers sample (Go examples port)."""
import sciter
class RootEventHandler(sciter.EventHandler):
def __init__(self, el, frame):
super().__init__(element=el)
self.parent = frame
pass
def on_event(self, source, target, code, phase, reason):
he = sciter.Element(source)
#print("-> event:", code, phase, he)
pass
@sciter.script("mcall")
def method_call(self, *args):
#
# `root.mcall()` (see handlers.htm) calls behavior method of the root dom element (native equivalent is `Element.call_method()`),
# so we need to attach a "behavior" to that element to catch and handle such calls.
# Also it can be handled at script by several ways:
# * `behavior` - Element subclassing with full control
# * `aspect` - provides partial handling by attaching a single function to the dom element
# * manually attaching function to Element via code like `root.mcall = function(args..) {};`
#
print("->mcall args:", "\t".join(map(str, args)))
# explicit null for example, in other cases you can return any python object like None or True
return sciter.Value.null()
pass
class Frame(sciter.Window):
def __init__(self):
super().__init__(ismain=True, uni_theme=False, debug=False)
self.set_dispatch_options(enable=True, require_attribute=False)
pass
def test_call(self):
# test sciter call
v = self.call_function('gFunc', "kkk", 555)
print("sciter call successfully:", v)
# test method call
root = self.get_root()
v = root.call_method('mfn', "method call", 10300)
print("method call successfully:", v)
# test function call
v = root.call_function('gFunc', "function call", 10300)
print("function call successfully:", v)
pass
# Functions called from script:
#@sciter.script - optional attribute here because of self.set_dispatch_options()
def kkk(self):
print("kkk called!")
def fn(*args):
print("%d: %s" % ( len(args), ",".join(map(str, args)) ))
return "native functor called"
rv = {}
rv['num'] = 1000
rv['str'] = "a string"
rv['f'] = fn
return rv
@sciter.script
def sumall(self, *args):
sum = 0
for v in args:
sum += v
return sum
@sciter.script("gprintln")
def gprint(self, *args):
print("->", " ".join(map(str, args)))
pass
def on_load_data(self, nm):
print("loading", nm.uri)
pass
def on_data_loaded(self, nm):
print("loaded ", nm.uri)
pass
def on_event(self, source, target, code, phase, reason):
# events from html controls (behaviors)
he = sciter.Element(source)
#print(".. event:", code, phase)
# TODO: following statement looks ugly.
# Guess it wasn't a nice idea to split event mask to separate code and phase values
# Or we may pack all event arguments to single object (dict) to eliminate such parameters bloat
#
if code == sciter.event.BEHAVIOR_EVENTS.BUTTON_CLICK and phase == sciter.event.PHASE_MASK.SINKING and he.test('#native'):
print("native button clicked!")
return True
pass
pass
if __name__ == "__main__":
print("Sciter version:", sciter.version(as_str=True))
# create window
frame = Frame()
# enable debug only for this window
frame.setup_debug()
# load file
frame.load_file("examples/handlers.htm")
#frame.load_html(b"""<html><body><button id='native'>Click</button></body></html>""")
# install additional handler
ev2 = RootEventHandler(frame.get_root(), frame)
frame.test_call()
frame.run_app()
|
py | 7df718c75b0d62eb2761da3de2381f1755e8043e | """
Utilities for handling JWST file/data formats.
Requires https://github.com/spacetelescope/jwst
"""
import numpy as np
def hdu_to_imagemodel(in_hdu):
"""
Workaround for initializing a `jwst.datamodels.ImageModel` from a
normal FITS ImageHDU that could contain HST header keywords and
unexpected WCS definition.
TBD
Parameters
----------
in_hdu : `astropy.io.fits.ImageHDU`
Returns
-------
img : `jwst.datamodels.ImageModel`
"""
from astropy.io.fits import ImageHDU, HDUList
from astropy.coordinates import ICRS
from jwst.datamodels import util
import gwcs
hdu = ImageHDU(data=in_hdu.data, header=in_hdu.header)
new_header = strip_telescope_header(hdu.header)
hdu.header = new_header
# Initialize data model
img = util.open(HDUList([hdu]))
# Initialize GWCS
tform = gwcs.wcs.utils.make_fitswcs_transform(new_header)
hwcs = gwcs.WCS(forward_transform=tform, output_frame=ICRS())#gwcs.CelestialFrame())
sh = hdu.data.shape
hwcs.bounding_box = ((-0.5, sh[0]-0.5), (-0.5, sh[1]-0.5))
# Put gWCS in meta, where blot/drizzle expect to find it
img.meta.wcs = hwcs
return img
def xxx(header):
"""
"""
ra, dec = 53.18118642, -27.79096316
hdu = utils.make_wcsheader(ra=ra, dec=dec, size=10, pixscale=0.06, get_hdu=True)
out = grizli.jwst.hdu_to_imagemodel(hdu)
from jwst.datamodels import ModelContainer, DrizProductModel
product = DrizProductModel(out.data.shape)
product.meta.wcs = out.meta.wcs
from jwst.resample import gwcs_blot, gwcs_drizzle
driz = gwcs_drizzle.GWCSDrizzle(product)#, outwcs=out.meta.wcs)
driz.add_image(blot_data, wcs_model.meta.wcs, xmax=out.data.shape[1], ymax=out.data.shape[0])
from jwst.resample import resample_utils
from drizzle import util
input_wcs = wcs_model.meta.wcs
output_wcs = out.meta.wcs
fillval = 'INDEF'
insci = blot_data
inwht = None
xmin = xmax = ymin = ymax = 0
uniqid = 1
outsci = driz.outsci*1
outwht = driz.outwht*1
outcon = driz.outcon*1
in_units = 'cps'
from jwst.resample import resample
groups = ModelContainer([wcs_model])
sampler = resample.ResampleData(groups, output=driz)
def change_header_pointing(header, ra_ref=0., dec_ref=0., pa_v3=0.):
"""
Update a FITS header for a new pointing (center + roll).
Parameters
----------
header : `~astropy.io.fits.Header`
Parent header (must contain `V2_REF`, `V3_REF` keywords).
ra_ref, dec_ref : float
Pointing center, in decimal degrees, at reference the pixel defined
in.
pa_v3 : float
Position angle of the telescope V3 axis, degrees.
.. warning::
Doesn't update PC keywords based on pa_v3, which would rather have to
be computed from the new `gwcs`.
"""
from jwst.lib.set_telescope_pointing import compute_local_roll
v2_ref = header['V2_REF']
v3_ref = header['V3_REF']
# Strip units, if any
args = []
for v in (pa_v3, ra_ref, dec_ref, v2_ref, v3_ref):
if hasattr(v, 'value'):
args.append(v.value)
else:
args.append(v)
roll_ref = compute_local_roll(*tuple(args))
new_header = header.copy()
new_header['XPA_V3'] = args[0]
new_header['CRVAL1'] = new_header['RA_REF'] = args[1]
new_header['CRVAL2'] = new_header['DEC_REF'] = args[2]
new_header['ROLL_REF'] = roll_ref
return new_header
def img_with_wcs(input):
"""
Open a JWST exposure and apply the distortion model.
Parameters
----------
input : type
Anything `jwst.datamodels.util.open` can accept for initialization.
Returns
-------
with_wcs : `jwst.datamodels.ImageModel`
Image model with full `~gwcs` in `with_wcs.meta.wcs`.
"""
from jwst.datamodels import util
from jwst.assign_wcs import AssignWcsStep
# from jwst.stpipe import crds_client
# from jwst.assign_wcs import assign_wcs
# HDUList -> jwst.datamodels.ImageModel
img = util.open(input)
# print("{}".format(img))
# for item in img.meta:
# print("{}: {} ({})".format(item, img.meta[item], type(img.meta[item])))
# AssignWcs to pupulate img.meta.wcsinfo
step = AssignWcsStep()
with_wcs = step.process(img)
## Above should be more robust to get all of the necessary ref files
#dist_file = crds_client.get_reference_file(img, 'distortion')
#reference_files = {'distortion': dist_file}
#with_wcs = assign_wcs.load_wcs(img, reference_files=reference_files)
return with_wcs
def strip_telescope_header(header, simplify_wcs=True):
"""
Strip non-JWST keywords that confuse `jwst.datamodels.util.open`.
Parameters
----------
header : `~astropy.io.fits.Header`
Input FITS header.
"""
import astropy.wcs as pywcs
new_header = header.copy()
if 'TELESCOP' in new_header:
if new_header['TELESCOP'] != 'JWST':
keys = ['TELESCOP', 'FILTER', 'DETECTOR', 'INSTRUME']
for key in keys:
if key in header:
new_header.remove(key)
if simplify_wcs:
# Make simple WCS header
orig_wcs = pywcs.WCS(new_header)
new_header = orig_wcs.to_header()
new_header['EXTNAME'] = 'SCI'
new_header['RADESYS'] = 'ICRS'
new_header['CDELT1'] = -new_header['PC1_1']
new_header['CDELT2'] = new_header['PC2_2']
new_header['PC1_1'] = -1
new_header['PC2_2'] = 1
return new_header
def model_wcs_header(datamodel, get_sip=False, order=4, step=32):
"""
Make a header with approximate WCS for use in DS9.
Parameters
----------
datamodel : `jwst.datamodels.ImageModel`
Image model with full `~gwcs` in `with_wcs.meta.wcs`.
get_sip : bool
If True, fit a `astropy.modeling.models.SIP` distortion model to the
image WCS.
order : int
Order of the SIP polynomial model.
step : int
For fitting the SIP model, generate a grid of detector pixels every
`step` pixels in both axes for passing through
`datamodel.meta.wcs.forward_transform`.
Returns
-------
header : '~astropy.io.fits.Header`
Header with simple WCS definition: CD rotation but no distortion.
"""
from astropy.io.fits import Header
from scipy.optimize import least_squares
sh = datamodel.data.shape
try:
pipe = datamodel.meta.wcs.pipeline[0][1]
if 'offset_2' in pipe.param_names:
# NIRISS WCS
c_x = pipe.offset_2.value
c_y = pipe.offset_3.value
else:
# Simple WCS
c_x = pipe.offset_0.value
c_y = pipe.offset_1.value
crpix = np.array([-c_x+1, -c_y+1])
except:
crpix = np.array(sh)/2.+0.5
crval = datamodel.meta.wcs.forward_transform(crpix[0], crpix[1])
cdx = datamodel.meta.wcs.forward_transform(crpix[0]+1, crpix[1])
cdy = datamodel.meta.wcs.forward_transform(crpix[0], crpix[1]+1)
header = Header()
header['RADESYS'] = 'ICRS'
header['CTYPE1'] = 'RA---TAN'
header['CTYPE2'] = 'DEC--TAN'
header['CUNIT1'] = header['CUNIT2'] = 'deg'
header['CRPIX1'] = crpix[0]
header['CRPIX2'] = crpix[1]
header['CRVAL1'] = crval[0]
header['CRVAL2'] = crval[1]
cosd = np.cos(crval[1]/180*np.pi)
header['CD1_1'] = (cdx[0]-crval[0])*cosd
header['CD1_2'] = (cdy[0]-crval[0])*cosd
header['CD2_1'] = cdx[1]-crval[1]
header['CD2_2'] = cdy[1]-crval[1]
cd = np.array([[header['CD1_1'], header['CD1_2']], [header['CD2_1'], header['CD2_2']]])
if not get_sip:
return header
#### Fit a SIP header to the gwcs transformed coordinates
v, u = np.meshgrid(np.arange(1,sh[0]+1,step), np.arange(1,sh[1]+1,step))
x, y = datamodel.meta.wcs.forward_transform(u, v)
y -= crval[1]
x = (x-crval[0])*np.cos(crval[1]/180*np.pi)
a_names = []
b_names = []
#order = 4
for i in range(order+1):
for j in range(order+1):
ext = '{0}_{1}'.format(i,j)
if (i+j) > order:
continue
if ext in ['0_0', '0_1','1_0']:
continue
a_names.append('A_'+ext)
b_names.append('B_'+ext)
p0 = np.zeros(4+len(a_names)+len(b_names))
p0[:4] += cd.flatten()
args = (u.flatten(), v.flatten(), x.flatten(), y.flatten(), crpix, a_names, b_names, cd, 0)
# Fit the SIP coeffs
fit = least_squares(_objective_sip, p0, jac='2-point', bounds=(-np.inf, np.inf), method='lm', ftol=1e-08, xtol=1e-08, gtol=1e-08, x_scale=1.0, loss='linear', f_scale=1.0, diff_step=None, tr_solver=None, tr_options={}, jac_sparsity=None, max_nfev=1000, verbose=0, args=args, kwargs={})
# Get the results
args = (u.flatten(), v.flatten(), x.flatten(), y.flatten(), crpix, a_names, b_names, cd, 1)
cd_fit, a_coeff, b_coeff = _objective_sip(fit.x, *args)
# Put in the header
for i in range(2):
for j in range(2):
header['CD{0}_{1}'.format(i+1, j+1)] = cd_fit[i,j]
header['CTYPE1'] = 'RA---TAN-SIP'
header['CTYPE2'] = 'DEC--TAN-SIP'
header['A_ORDER'] = order
for k in a_coeff:
header[k] = a_coeff[k]
header['B_ORDER'] = order
for k in b_coeff:
header[k] = b_coeff[k]
return header
def _objective_sip(params, u, v, x, y, crpix, a_names, b_names, cd, ret):
"""
Objective function for fitting SIP coefficients
"""
from astropy.modeling import models, fitting
#u, v, x, y, crpix, a_names, b_names, cd = data
cdx = params[0:4].reshape((2,2))
a_params = params[4:4+len(a_names)]
b_params = params[4+len(a_names):]
a_coeff = {}
for i in range(len(a_names)):
a_coeff[a_names[i]] = a_params[i]
b_coeff = {}
for i in range(len(b_names)):
b_coeff[b_names[i]] = b_params[i]
if ret == 1:
return cdx, a_coeff, b_coeff
sip = models.SIP(crpix=crpix, a_order=4, b_order=4, a_coeff=a_coeff, b_coeff=b_coeff)
fuv, guv = sip(u,v)
xo, yo = np.dot(cdx, np.array([u+fuv-crpix[0], v+guv-crpix[1]]))
#dr = np.sqrt((x-xo)**2+(y-yo)**2)*3600.
dr = np.append(x-xo, y-yo)*3600./0.065
#print(params, np.abs(dr).max())
return dr
|
py | 7df719130f27655cbd8a386c44e2e21b9d9e75ed | ###########################################################################
# Created by: Hang Zhang
# Email: [email protected]
# Copyright (c) 2017
###########################################################################
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import encoding
from .base import BaseNet
from .fcn import FCNHead
__all__ = ['EncNet', 'EncModule', 'get_encnet', 'get_encnet_resnet50_pcontext',
'get_encnet_resnet101_pcontext', 'get_encnet_resnet50_ade']
class EncNet(BaseNet):
def __init__(self, nclass, backbone, aux=True, se_loss=True, lateral=False,
norm_layer=nn.BatchNorm2d, **kwargs):
super(EncNet, self).__init__(nclass, backbone, aux, se_loss,
norm_layer=norm_layer, **kwargs)
self.head = EncHead(self.nclass, in_channels=2048, se_loss=se_loss,
lateral=lateral, norm_layer=norm_layer,
up_kwargs=self._up_kwargs)
if aux:
self.auxlayer = FCNHead(1024, nclass, norm_layer=norm_layer)
def forward(self, x):
imsize = x.size()[2:]
features = self.base_forward(x)
x = list(self.head(*features))
x[0] = F.upsample(x[0], imsize, **self._up_kwargs)
if self.aux:
auxout = self.auxlayer(features[2])
auxout = F.upsample(auxout, imsize, **self._up_kwargs)
x.append(auxout)
return tuple(x)
class EncModule(nn.Module):
def __init__(self, in_channels, nclass, ncodes=32, se_loss=True, norm_layer=None):
super(EncModule, self).__init__()
norm_layer = nn.BatchNorm1d if isinstance(norm_layer, nn.BatchNorm2d) else \
encoding.nn.BatchNorm1d
self.se_loss = se_loss
self.encoding = nn.Sequential(
nn.Conv2d(in_channels, in_channels, 1, bias=False),
nn.BatchNorm2d(in_channels),
nn.ReLU(inplace=True),
encoding.nn.Encoding(D=in_channels, K=ncodes),
norm_layer(ncodes),
nn.ReLU(inplace=True),
encoding.nn.Mean(dim=1))
self.fc = nn.Sequential(
nn.Linear(in_channels, in_channels),
nn.Sigmoid())
if self.se_loss:
self.selayer = nn.Linear(in_channels, nclass)
def forward(self, x):
en = self.encoding(x)
b, c, _, _ = x.size()
gamma = self.fc(en)
y = gamma.view(b, c, 1, 1)
outputs = [F.relu_(x + x * y)]
if self.se_loss:
outputs.append(self.selayer(en))
return tuple(outputs)
class EncHead(nn.Module):
def __init__(self, out_channels, in_channels, se_loss=True, lateral=True,
norm_layer=None, up_kwargs=None):
super(EncHead, self).__init__()
self.se_loss = se_loss
self.lateral = lateral
self.up_kwargs = up_kwargs
self.conv5 = nn.Sequential(
nn.Conv2d(in_channels, 512, 3, padding=1, bias=False),
norm_layer(512),
nn.ReLU(inplace=True))
if lateral:
self.connect = nn.ModuleList([
nn.Sequential(
nn.Conv2d(512, 512, kernel_size=1, bias=False),
norm_layer(512),
nn.ReLU(inplace=True)),
nn.Sequential(
nn.Conv2d(1024, 512, kernel_size=1, bias=False),
norm_layer(512),
nn.ReLU(inplace=True)),
])
self.fusion = nn.Sequential(
nn.Conv2d(3*512, 512, kernel_size=3, padding=1, bias=False),
norm_layer(512),
nn.ReLU(inplace=True))
self.encmodule = EncModule(512, out_channels, ncodes=32,
se_loss=se_loss, norm_layer=norm_layer)
self.conv6 = nn.Sequential(nn.Dropout2d(0.1, False),
nn.Conv2d(512, out_channels, 1))
def forward(self, *inputs):
feat = self.conv5(inputs[-1])
if self.lateral:
c2 = self.connect[0](inputs[1])
c3 = self.connect[1](inputs[2])
feat = self.fusion(torch.cat([feat, c2, c3], 1))
outs = list(self.encmodule(feat))
outs[0] = self.conv6(outs[0])
return tuple(outs)
def get_encnet(dataset='pascal_voc', backbone='resnet50', pretrained=False,
root='~/.encoding/models', **kwargs):
r"""EncNet model from the paper `"Context Encoding for Semantic Segmentation"
<https://arxiv.org/pdf/1803.08904.pdf>`_
Parameters
----------
dataset : str, default pascal_voc
The dataset that model pretrained on. (pascal_voc, ade20k)
backbone : str, default resnet50
The backbone network. (resnet50, 101, 152)
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.encoding/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_encnet(dataset='pascal_voc', backbone='resnet50', pretrained=False)
>>> print(model)
"""
acronyms = {
'pascal_voc': 'voc',
'ade20k': 'ade',
'pcontext': 'pcontext',
}
kwargs['lateral'] = True if dataset.lower() == 'pcontext' else False
# infer number of classes
from ..datasets import datasets, VOCSegmentation, VOCAugSegmentation, ADE20KSegmentation
model = EncNet(datasets[dataset.lower()].NUM_CLASS, backbone=backbone, root=root, **kwargs)
if pretrained:
from .model_store import get_model_file
model.load_state_dict(torch.load(
get_model_file('encnet_%s_%s'%(backbone, acronyms[dataset]), root=root)))
return model
def get_encnet_resnet50_pcontext(pretrained=False, root='~/.encoding/models', **kwargs):
r"""EncNet-PSP model from the paper `"Context Encoding for Semantic Segmentation"
<https://arxiv.org/pdf/1803.08904.pdf>`_
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.encoding/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_encnet_resnet50_pcontext(pretrained=True)
>>> print(model)
"""
return get_encnet('pcontext', 'resnet50', pretrained, root=root, aux=False, **kwargs)
def get_encnet_resnet101_pcontext(pretrained=False, root='~/.encoding/models', **kwargs):
r"""EncNet-PSP model from the paper `"Context Encoding for Semantic Segmentation"
<https://arxiv.org/pdf/1803.08904.pdf>`_
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.encoding/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_encnet_resnet101_pcontext(pretrained=True)
>>> print(model)
"""
return get_encnet('pcontext', 'resnet101', pretrained, root=root, aux=False, **kwargs)
def get_encnet_resnet50_ade(pretrained=False, root='~/.encoding/models', **kwargs):
r"""EncNet-PSP model from the paper `"Context Encoding for Semantic Segmentation"
<https://arxiv.org/pdf/1803.08904.pdf>`_
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.encoding/models'
Location for keeping the model parameters.
Examples
--------
>>> model = get_encnet_resnet50_ade(pretrained=True)
>>> print(model)
"""
return get_encnet('ade20k', 'resnet50', pretrained, root=root, aux=True, **kwargs)
|
py | 7df71a59aa65a0d0e90a47ded2095f7ebfbb6ea9 | 15 uid=2057284
20 ctime=1295323748
20 atime=1295578340
24 SCHILY.dev=234881026
23 SCHILY.ino=29219952
18 SCHILY.nlink=1
|
py | 7df71c05752468dab206f6c6be62ddc82989b86c | import math
import subprocess
_interesting_modules = ('overrepresented sequences', 'per sequence quality scores')
def run_preprocessing(namespace):
''' Runs all the preprocessing steps - quality estimation, trimming, etc.
Based on the input parameters, performs the following steps:
1. Checks if the FastQC report is available.
1.a If it is, loads the existing data.
1.b If it is not, creates a FastQC report in the output directory.
2. Performs the quality and adapter trimming (if FastQC detected adapters and contaminant file available)
3. Estimates the sample evenness based on sequence overrepresentation in the sample.
Args:
namespace: Object (from configparser.parse_args) which contains all the command-line options
'''
# Preliminary data generation
generate_report = True
input_fname = namespace.fastq_file.split('/')[-1]
input_prefix = input_fname.split('.')[0]
# Determine if filtering is needed, and if it is, run it.
if namespace.fastqc_report:
generate_report = False
fastqc_fname = '%s/fastqc_data.txt' % namespace.fastqc_report
if generate_report:
cmd = 'fastqc -o %s %s --extract' % (namespace.output_dir, namespace.fastq_file)
subprocess.call(cmd, shell=True)
fastqc_fname = '%s/%s_fastqc/fastqc_data.txt' % (namespace.output_dir, input_prefix)
fastqc_modules = dict(iter(parse_fastqc(fastqc_fname)))
# Let's stipulate something about the sample evenness and 16s abundance
overrepresented = fastqc_modules['overrepresented sequences']
per_seq_qual = fastqc_modules['per sequence quality scores']
evenness, potential_16s = determine_sample_complexity(overrepresented)
# Temporary solution
# TODO
with open('%s/.preprocessing' % namespace.output_dir, 'w') as fout:
fout.write('evenness %s\npotential_16s %s\n' % (evenness, potential_16s))
with open('%s/per-sequence-quality-scores.tsv' % namespace.output_dir, 'w') as fout:
fout.write('quality count\n')
fout.write('\n'.join(map(lambda i: '%d %s' % (i[0], i[1]), per_seq_qual.items())))
# Filtering using trim-galore
filtering = True if overrepresented.seq_constructs else False
cmd = "trim_galore -q 15 --length 20 "
if filtering:
if not namespace.contaminants:
print 'ERROR: Cannot filter adapters, no contaminant file present. Will perform default cutadapt filtering.'
else:
contaminants = overrepresented.get_original_adapters(namespace.contaminants)
adapters = ' '.join(map(lambda seq: '-a %s' % seq, contaminants.values()))
cmd += adapters
cmd = '%s %s > %s/%s.fastq' % (cmd, namespace.fastq_file, namespace.output_dir, input_prefix)
subprocess.call(cmd, shell=True)
cmd = 'mv %s_trimmed.fq %s' % (input_prefix, namespace.output_dir)
subprocess.call(cmd, shell=True)
cmd = 'mv %s_trimming_report.txt %s' % (input_fname, namespace.output_dir)
subprocess.call(cmd, shell=True)
def determine_sample_complexity(overrepresented):
'''Guesses sample complexity based on overrepresented sequences.
It uses the information on number and percentage of overrepresented sequences
and tries to guess whether these is rRNA content present in the sample and
the level of the evenness of the sample:
Args:
overrepresented: :metagenomix.preprocessing.fastqc.Overrepresented
Returns:
tuple: (evenness, potential_16s), values of which can be low, medium and high.
'''
evenness = None
potential_16s = None
perc_data = overrepresented.dataset_specific.values()
# The sample is even and there is low probability of rRNA content.
if len(perc_data) == 0:
evenness = 'high'
potential_16s = 'low'
return evenness, potential_16s
# Ignore this for now, it is ashamedly silly:
N = len(perc_data)
max_perc = max(perc_data)
min_perc = min(perc_data)
if N > 10 and N < 20:
if max_perc > 5.:
potential_16s = 'high'
else:
potential_16s = 'medium'
elif N > 20:
potential_16s = 'high'
else:
potential_16s = 'low'
if max_perc > 0.5 and max_perc < 5.:
evenness = 'medium'
elif max_perc > 5.:
evenness = 'low'
else:
evenness = 'high'
return evenness, potential_16s
class Overrepresented(object):
'''Place-holder for overrepresented sequences module data.'''
__slots__ = {'seq_constructs', 'dataset_specific'}
def __init__(self, seq_constructs=set(), dataset_specific={}):
self.seq_constructs = seq_constructs
self.dataset_specific = dataset_specific
def get_original_adapters(self, contaminant_file):
contaminants = load_contaminants(contaminant_file)
return dict(map(lambda n: (n, contaminants[n]), self.seq_constructs))
def _parse_overrepresented_sequences(fin):
sequencing = set()
no_hit = {}
for l in fin:
l = l.strip()
if l == '>>END_MODULE':
return Overrepresented(sequencing, no_hit)
if l.startswith('#'):
continue
seq, count, perc, source = l.split('\t')
if source.lower() == 'no hit':
no_hit[seq] = float(perc)
else:
parenthesis = source.find('(')
source = source[: parenthesis - 1]
sequencing.add(source)
def _parse_per_sequence_quality_scores(fin):
qual_scores = {}
for l in fin:
l = l.strip()
if l == '>>END_MODULE':
return qual_scores
if l.startswith('#'):
continue
qual, count = l.split('\t')
qual_scores[int(qual)] = count
def load_contaminants(contaminant_file):
'''Loads contaminant sequences from a file.
Args:
contaminant_file: path to a file containing sequencing contaminants.
Returns:
dict(adapter_name, adapter_sequence)
'''
c = {}
with open(contaminant_file) as fin:
for l in fin:
l = l.strip()
if not l or l.startswith('#'):
continue
data = l.split('\t')
name, seq = data[0], data[-1]
c[name] = seq
return c
def parse_fastqc(fqc_fname):
'''Parses the FastQC report file (fastqc_data.txt in the report directory)
Function is a generator.
It checks which of the modules are declared "interesting". For each of the
interesting modules, invokes a function _parse_<MODULE_NAME>, which returns
an object with the data contained in the module.
Overrepresented sequences supported for now.
Args:
fqc_fname: Path to the fastqc_data.txt file of the FastQC report.
Returns:
Yields tuple (module_name, object with module data)
'''
with open(fqc_fname ,'r') as fin:
for l in fin:
l = l.strip()
if l.startswith('#'):
continue
if l.startswith('>>') and l != '>>END_MODULE':
module_name, status = l[2:].lower().split('\t')
if module_name in _interesting_modules:
yield module_name, globals()['_parse_%s' % ('_'.join(module_name.split(' ')))](fin)
def main():
import sys
if len(sys.argv) < 2:
print 'Usage: python fasqc.py <FASTQC_DATA.TXT> [CONTAMINANTS_FILE]'
sys.exit(-1)
fqc_fname = sys.argv[1]
modules = {}
for mod_name, res in parse_fastqc(fqc_fname):
modules[mod_name] = res
print modules['overrepresented sequences'].dataset_specific
print modules['overrepresented sequences'].seq_constructs
if len(sys.argv) == 3:
cf = sys.argv[2]
print modules['overrepresented sequences'].get_original_adapters(cf)
if __name__ == '__main__':
main() |
py | 7df71c26c414291206f35c084ab45bf7283dca6c | import src.data_mgmt as dm
import src.model as m
import os
import pandas as pd
from src.utils import *
from datetime import datetime
feature_dict = {'NU_NOTIFIC' : 'Registry ID',
'DT_NOTIFIC' : 'Notification Date',
'SEM_NOT' : 'Week Post Sympt',
'DT_SIN_PRI' : 'Date First Sympt',
'SEM_PRI' : 'Test Location ID',
'SG_UF_NOT' : 'Test Location Federal',
'ID_REGIONA' : 'Test Location Region ID',
'CO_REGIONA' : 'Test Location Region',
'ID_MUNICIP' : 'Test Location MunicipalityID',
'CO_MUN_NOT' : 'Test Location Municipality',
'ID_UNIDADE' : 'Test Unit ID',
'CO_UNI_NOT' : 'Test Unit',
'CS_SEXO' : 'Gender',
'DT_NASC' : 'Birth Date',
'NU_IDADE_N' : 'Age',
'TP_IDADE' : 'Birth Date',
'COD_IDADE' : 'Age code',
'CS_GESTANT' : 'Gestational Age',
'CS_RACA' : 'Race',
'CS_ETINIA' : 'Indigenous',
'CS_ESCOL_N' : 'Schooling',
'ID_PAIS' : 'CountryID',
'CO_PAIS' : 'CountryName',
'SG_UF' : 'Residency ID',
'ID_RG_RESI' : 'Residency Region ID',
'CO_RG_RESI' : 'Residency Region',
'ID_MN_RESI' : 'Residency Municipality',
'CO_MUN_RES' : 'Residency Municipality ID',
'CS_ZONA' : 'Residency Type',
'SURTO_SG' : 'Acute Respiratory Distress Syndrome',
'NOSOCOMIAL' : 'Contracted At Hospital',
'AVE_SUINO' : 'Contact Birds Pigs',
'FEBRE' : 'Fever',
'TOSSE' : 'Cough',
'GARGANTA' : 'Throat',
'DISPNEIA' : 'Dyspnea',
'DESC_RESP' : 'Respiratory Discomfort',
'SATURACAO' : 'SpO2 less 95%',
'DIARREIA' : 'Diarrhea',
'VOMITO' : 'Vomiting',
'OUTRO_SIN' : 'Other Symptoms',
'OUTRO_DES' : 'Other Symptoms Description',
'PUERPERA' : 'Postpartum',
'CARDIOPATI' : 'Cardiovascular Disease',
'HEMATOLOGI' : 'Hematologic Disease',
'SIND_DOWN' : 'Down Syndrome',
'HEPATICA' : 'Liver Chronic Disease',
'ASMA' : 'Asthma',
'DIABETES' : 'Diabetes',
'NEUROLOGIC' : 'Neurological Disease',
'PNEUMOPATI' : 'Another Chronic Pneumopathy',
'IMUNODEPRE' : 'Immunosuppression',
'RENAL' : 'Renal Chronic Disease',
'OBESIDADE' : 'Obesity',
'OBES_IMC' : 'BMI',
'OUT_MORBI' : 'Other Risks',
'MORB_DESC' : 'Other Risks Desc',
'VACINA' : 'Flu Shot',
'DT_UT_DOSE' : 'Flu Shot Date',
'MAE_VAC' : 'Flu Shot Less 6 months',
'DT_VAC_MAE' : 'Flu Shot Date Less 6 Months',
'M_AMAMENTA' : 'Breast Feeds 6 Months',
'DT_DOSEUNI' : 'Date Vaccine Children',
'DT_1_DOSE' : 'Date Vaccine Children1',
'DT_2_DOSE' : 'Date Vaccine Children2',
'ANTIVIRAL' : 'Antiviral Use',
'TP_ANTIVIR' : 'Type Antiviral',
'OUT_ANTIV' : 'Type Antiviral Other',
'DT_ANTIVIR' : 'Antiviral Start Date',
'HOSPITAL' : 'Hospitalization',
'DT_INTERNA' : 'Date Hospitalization',
'SG_UF_INTE' : 'Hospital Region ID',
'ID_RG_INTE' : 'Hospital Region IBGE2',
'CO_RG_INTE' : 'Hospital Region IBGE',
'ID_MN_INTE' : 'Hopspital MunicpialityID',
'CO_MU_INTE' : 'Hopspital Municpiality',
'ID_UN_INTE' : 'Hospital ID',
'CO_UN_INTE' : 'Hospital ID',
'UTI' : 'ICU',
'DT_ENTUTI' : 'ICU start Date',
'DT_SAIDUTI' : 'ICU end Date',
'SUPORT_VEN' : 'Ventilator',
'RAIOX_RES' : 'Xray Thorax Result',
'RAIOX_OUT' : 'Xray Thorax Other',
'DT_RAIOX' : 'Xray Test Date',
'AMOSTRA' : 'Amostra',
'DT_COLETA' : 'Amostra Date',
'TP_AMOSTRA' : 'Amostra Type',
'OUT_AMOST' : 'Amostra Other',
'REQUI_GAL' : 'gal Sys Test',
'IF_RESUL' : 'Test Result',
'DT_IF' : 'Test Result Date',
'POS_IF_FLU' : 'Test Influenza',
'TP_FLU_IF' : 'Influenza Type',
'POS_IF_OUT' : 'Positive Others',
'IF_VSR' : 'Positive VSR',
'IF_PARA1' : 'Positive Influenza 1',
'IF_PARA2' : 'Positive Influenza 2',
'IF_PARA3' : 'Positive Influenza 3',
'IF_ADENO' : 'Positive Adenovirus',
'IF_OUTRO' : 'Positive Other',
'DS_IF_OUT' : 'Other Respiratory Virus',
'LAB_IF' : 'Test Lab',
'CO_LAB_IF' : 'Test Lab Other',
'PCR_RESUL' : 'Result PCR',
'DT_PCR' : 'Result PCR Date',
'POS_PCRFLU' : 'Result PCR Influeza',
'TP_FLU_PCR' : 'Result PCR Type Influeza',
'PCR_FLUASU' : 'Result PCR SubType Influeza',
'FLUASU_OUT' : 'Result PCR SubType Influeza_Other',
'PCR_FLUBLI' : 'Result PCR SubType Influeza_Other_spec',
'FLUBLI_OUT' : 'Result PCR SubType InfluezaB_Linage',
'POS_PCROUT' : 'Result PCR Other',
'PCR_VSR' : 'Result PCR VSR',
'PCR_PARA1' : 'Result PCR parainfluenza1',
'PCR_PARA2' : 'Result PCR parainfluenza2',
'PCR_PARA3' : 'Result PCR parainfluenza3',
'PCR_PARA4' : 'Result PCR parainfluenza4',
'PCR_ADENO' : 'Result PCR adenovirus',
'PCR_METAP' : 'Result PCR metapneumovirus',
'PCR_BOCA' : 'Result PCR bocavirus',
'PCR_RINO' : 'Result PCR rinovirus',
'PCR_OUTRO' : 'Result PCR other',
'DS_PCR_OUT' : 'Result PCR other name',
'LAB_PCR' : 'Lab PCR',
'CO_LAB_PCR' : 'LabP CR co',
'CLASSI_FIN' : 'Result Final',
'CLASSI_OUT' : 'Result Final other',
'CRITERIO' : 'Result Final confirmation',
'EVOLUCAO' : 'Evolution',
'DT_EVOLUCA' : 'Death Date',
'DT_ENCERRA' : 'Date Quarentine',
'OBSERVA' : 'Other Observations',
'DT_DIGITA' : 'Date Registry',
'HISTO_VGM' : 'HISTO_VGM',
'PAIS_VGM' : 'PAIS_VGM',
'CO_PS_VGM' : 'CO_PS_VGM',
'LO_PS_VGM' : 'LO_PS_VGM',
'DT_VGM' : 'DT_VGM',
'DT_RT_VGM' : 'DT_RT_VGM',
'PCR_SARS2' : 'Result PCR Covid',
'PAC_COCBO' : 'Occupation ID',
'PAC_DSCBO' : 'Occupation Des'
}
region_var = 'Test Location Federal'
numeric_cols = ['Age',
#'Gestational Age',
'BMI'
]
date_cols = [
'BirthDate',
'ICUstartDate',
'ICUendDate',
'AmostraDate',
'DeathDate',
'DateQuarentine',
]
categorical_cols = ['Gender',
'Race',
#'Indigenous',
'Schooling',
'Acute Respiratory Distress Syndrome',
'Contracted At Hospital',
#'Contact Birds Pigs',
'Fever',
'Cough',
'Throat',
'Dyspnea',
'Respiratory Discomfort',
'SpO2 less 95%',
'Diarrhea',
'Vomiting',
'Other Symptoms',
'Postpartum',
'Cardiovascular Disease',
'Hematologic Disease',
'Down Syndrome',
'Liver Chronic Disease',
'Asthma',
'Diabetes',
'Neurological Disease',
'Another Chronic Pneumopathy',
'Immunosuppression',
'Renal Chronic Disease',
'Obesity',
'Other Risks',
#'Flu Shot Less 6 months',
#'Breast Feeds 6 Months',
'Antiviral Use',
#'TypeAntiviral',
'Hospitalization',
'ICU',
'Ventilator',
'Xray Thorax Result',
#'Amostra',
'Result Final',
'Evolution',
#'Residency Region',
#'OccupationID',
# 'Hospital ID',
# 'Hospital Region ID',
#'Notification Date'
region_var
]
keep_cols = categorical_cols.copy()
keep_cols.extend(numeric_cols)
post_hosp = [ 'Hospitalization',
'Antiviral',
'ICU',
'Ventilator',
'Xray Torax Result',
'Amostra',
'Evolution'
]
post_death = [
'Hospitalization',
'Antiviral',
'ICU',
'Ventilator',
'Amostra',
'Evolution'
]
var_dictionary = {'typical':{'1.0':'', '2.0':' Nope '},
'Race':{'1.0':'White', '2.0':'Black', '3.0':'Yellow', '4.0':'Brown', '5.0':'Indigenous'},
'Schooling':{'0.0':'No Education', '1.0':'Elem 1-5', '2.0':'Elem 6-9', '3.0':'Medium 1-3', '4.0':'Superior', '5.0':'NA'},
'Xray Thorax Result':{'1.0':'Normal', '2.0':'Interstitial infiltrate', '3.0':'Consolidation', '4.0':'Mixed', '5.0':'Other','6.0':'Not done' },
'Ventilator':{'1.0':'Invasive', '2.0':'Non Invasive', '3.0':'No'},
'Evolution':{'1.0':'Recovered', '2.0':'Death'},
'Gender':{'M':'M', 'F':'F', 'I':'I'},
'Hospital':{'1.0':'Public', '2.0':'Private'},
'Region': {'DF' : 'Midwest', 'SP' : 'Southeast', 'SC' : 'South', 'RJ' : 'Southeast',
'PR' : 'South', 'RS' : 'South', 'ES' : 'Southeast', 'GO' : 'Midwest',
'MG' : 'Southeast', 'MS' : 'Midwest', 'MT' : 'Midwest', 'AP' : 'North',
'RR' : 'North', 'TO' : 'North', 'RO' : 'North', 'RN' : 'Northeast',
'CE' : 'Northeast', 'AM' : 'North', 'PE' : 'Northeast', 'SE' : 'Northeast',
'AC' : 'North', 'BA' : 'Northeast', 'PB' : 'Northeast', 'PA' : 'North',
'PI' : 'Northeast', 'MA' : 'Northeast', 'AL' : 'Northeast'}
}
#fname = 'data/INFLUD-08-06-2020.csv'
fname = 'data/INFLUD-30-06-2020.csv'
ts = datetime.now().strftime("%d-%b-%Y_%H-%M-%S")
outdir = 'results/' + ts
out_dir_p = outdir+'/PredictiveModels'
out_dir_da = outdir+'/DataAnalytics'
os.mkdir(outdir)
os.mkdir(out_dir_p)
os.mkdir(out_dir_da)
# Read Data
keep_cols.append('Date Registry')
df = dm.read_data(fname, feature_dict, keep_cols, sep=';')
df = dm.filter_positive_test(df)
df = dm.get_hosp_from_previous(df, fname='data/INFLUD-08-06-2020.csv',
feature_dict=feature_dict, keep_cols=keep_cols,
categorical_cols=categorical_cols)
'''
# Add public/private vari
df = dm.add_public_private_var(df, fname='data/ICU_beds.csv')
categorical_cols.append('Public Hospital')
keep_cols.append('Public Hospital')
df.drop(columns=['Hospital ID'], inplace=True )
categorical_cols.remove('Hospital ID')
keep_cols.remove('Hospital ID')
'''
# Add 5 regions
df = df.rename(columns={region_var:'Region'})
categorical_cols.append('Region')
keep_cols.append('Region')
categorical_cols.remove(region_var)
keep_cols.remove(region_var)
categorical_cols.remove('Public Hospital')
categorical_cols.append('Hospital')
# Add HDI
'''
fname = 'data/hdi.csv'
pp_df = pd.read_csv(fname)
pp_df.drop(columns=['public nonICU', 'private nonICU', 'public ICU', 'private ICU'], inplace=True)
df = df.merge(pp_df, how='left', on='Hospital ID')
del pp_df
df.rename(columns={'public hospital': 'Public Hospital'}, inplace=True)
df['Public Hospital'].loc[df['Public Hospital'] == 0] = 2.0
df['Public Hospital'].loc[df['Public Hospital'] == 1] = 1.0
df["Public Hospital"] = df["Public Hospital"].fillna(9.0)
'''
#TODO: CREATE CONGESTION METRIC
# Data Analytics
dm.create_basic_analytics(df, categorical_vars=categorical_cols, out_dir=out_dir_da)
# Preprocessing
#TODO: what to do with jobs?
#table = dm.get_categorical_stats(df[categorical_cols], plot=True, fname=out_dir_da+'/cat_stats')
df['Region'] = df['Region'].replace(var_dictionary['Region'])
df = df.drop(df[(df['Age'] > 50) & (df['Postpartum'] == 1)].index)
df = dm.set_mode_on_NA(df, numeric_cols)
df = dm.one_hot_encoding(df, categorical_features=categorical_cols)
print(len(list(df)))
df = dm.remove_features_containing(df, '9.0')
print(len(list(df)))
df = dm.var_to_categorical(df, var_name='Age', bins=[0,30,50,65,100])
#df = dm.var_to_categorical(df, var_name='BMI', bins=[18.5,25,30,40,100])
df.drop(columns=['BMI'], inplace=True)
df = dm.rename_features(df, var_dictionary, ignore_vars=['Age', 'Result Final', 'BMI', 'Gender', 'Region'])
df['Race Brown/Black'] = df['Race Brown'] + df['Race Black']
df.drop(columns=['Race Brown'], inplace=True)
df.drop(columns=['Race Black'], inplace=True)
rem_vars =['Xray Thorax Result Normal',
'Xray Thorax Result Interstitial infiltrate',
'Xray Thorax Result Mixed',
'Xray Thorax Result Other',
'Xray Thorax Result Not done',
# 'Obesity ',
'Result Final_5.0']
print(len(list(df)))
df.drop(columns=rem_vars, inplace=True)
df = dm.remove_features_containing(df, ' Nope ')
print(len(list(df)))
df = dm.remove_features_containing(df, 'NA')
print(len(list(df)))
df = df[(df['Evolution Death']==1) | (df['Evolution Recovered']==1)]
df = dm.remove_corr_features(df, print_=False, plot_=False)
print(len(list(df)))
#dm.create_basic_analytics(df, categorical_vars=list(df), out_dir=out_dir_da)
# Classification
def run_model(df, name, y, remove_vars=False, max_vars=100):
x_train, x_test, y_train, y_test = dm.create_training_and_test_sets(df, y=y, remove_vars=remove_vars, percentage_train=0.7)
selected_features, ktest_table = m.t_test(x_train, y_train, p=0.05)
ktest_table['y'] = name
x_train = x_train[selected_features]
x_test = x_test[selected_features]
print(len(list(x_train)))
selected_features = m.feature_elimination(x_train, y_train, out_dir=out_dir_p, name=name, max_vars=max_vars)
x_train = x_train[selected_features]
x_test = x_test[selected_features]
print(len(list(x_train)))
m.run_classification_models(x_train, x_test, y_train, y_test, name=name, out_dir=out_dir_p, max_steps=10000)
return ktest_table
# Run different classification models
#models = ['Hosp', 'Death', 'Death_hosp', 'ICU', 'Ventilator']#, 'XrayToraxResult']
models = [
'Death_0','Death_1', 'Ventilator','Ventilator_small',
'Ventilator_w_ICU', 'Ventilator_w_ICU_small',
'Death_0_small', 'Death_1_small',
# 'ICU', 'ICU_small',
]
ktest_g = pd.DataFrame(index=list(df))
for name in models:
df0 = df.copy()
df0 = df0[df0['Hospitalization '] == 1]
max_vars = 100
if name == 'Death_0' or name =='Death_0_small':
y = 'Evolution Death'
remove_vars = ['Antiviral', 'ICU', 'Ventilator', 'Evolution']
if 'small' in name:
max_vars = 10
elif name == 'Death_1' or name =='Death_1_small':
y = 'Evolution Death'
remove_vars = ['Evolution']
if 'small' in name:
max_vars = 10
elif name == 'ICU' or name=='ICU_small':
y = 'ICU '
remove_vars = ['ICU ', 'Ventilator', 'Evolution']
if 'small' in name:
max_vars = 10
elif name == 'Ventilator' or name=='Ventilator_small':
y = 'Ventilator Invasive'
remove_vars = ['ICU ', 'Ventilator', 'Evolution']
if 'small' in name:
max_vars = 10
elif name == 'Ventilator_w_ICU' or name=='Ventilator_w_ICU_small':
y = 'Ventilator Invasive'
remove_vars = ['Ventilator', 'Evolution']
if 'small' in name:
max_vars = 10
print('\n--------------------\n'+name+'\n--------------------\n')
ktest = run_model(df=df0, name=name, y=y, remove_vars=remove_vars, max_vars=max_vars)
#ktest_g = pd.concat([ktest_g, ktest], axis=1)
#ktest_g = ktest_g.round(decimals=2)
#ktest_g.to_latex(out_dir_da + '/pvTable.tex', column_format='lrrl|rrl|rrl|rrl|rrl' )
# Generate and save pdf report
shell('mv '+ out_dir_p + ' ' + 'results/report_template/media', printOut=False)
shell('mv '+ out_dir_da + ' ' + 'results/report_template/media', printOut=False)
os.chdir('results/report_template')
shell('pdflatex -interaction nonstopmode --jobname=report_'+ts+' main.tex', printOut=False)
shell('rm *.out *.log *.aux', printOut=False)
os.chdir('../..')
shell('mv results/report_template/report_'+ts+'.pdf results/' + ts, printOut=False)
shell('mv results/report_template/media/PredictiveModels ' + 'results/' + ts , printOut=False)
shell('mv results/report_template/media/DataAnalytics ' + 'results/' + ts , printOut=False) |
py | 7df71c2d99bdf2a53ba927ae9f8ed2c26f35e6af | cid = str(input('Digite o nome da cidade ')).strip()
print(cid[:5].upper() == 'SANTO')
|
py | 7df71c5337fc52bcf6388916644e57902be31f50 | from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from alembic import context
from app.models import Base
from app.config import settings
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
config.set_main_option(
"sqlalchemy.url", f'postgresql+psycopg2://{settings.database_username}:{settings.database_password}@{settings.database_hostname}:{settings.database_port}/{settings.database_name}')
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
py | 7df71cb1871dfea3f8f61aab65a37b78443d8661 | import os
import sys
import ai_flow as af
from ai_flow import ExampleSupportType, ModelType, ExampleMeta, ModelMeta, PythonObjectExecutor, BaseJobConfig
from flink_ai_flow import LocalFlinkJobConfig, FlinkPythonExecutor
from data_type import FloatDataType
from proxima_executor import BuildIndexExecutor, SearchExecutor, SearchExecutor3
from python_job_executor import TrainAutoEncoder, ReadCsvExample, MergePredictResult
from tianchi_executor import ReadTrainExample, StreamTableEnvCreator, ReadPredictExample, PredictAutoEncoder, \
SearchSink, WriteSecondResult, ReadOnlinePredictExample, FindHistory, OnlinePredictAutoEncoder, \
StreamTableEnvCreatorBuildIndex, PredictAutoEncoderWithTrain, WritePredictResult, ReadMergeExample
def get_project_path():
"""
Get the current project path.
"""
return os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def collect_data_file():
"""
Collect the example data file.
"""
# Example data sets are under the following data set path.
data_set = '{}/data_set/'.format(os.environ['ENV_HOME'])
# First output result file is under the following output path.
output = '{}/codes/{}/output/'.format(os.environ['ENV_HOME'], os.environ['TASK_ID'])
predict_result_directory = '{}/codes/{}/'.format(os.environ['ENV_HOME'], os.environ['TASK_ID']) + 'predict_result'
merge_predict_result_path = '{}/codes/{}/'.format(os.environ['ENV_HOME'], os.environ['TASK_ID']) + 'merge_data.csv'
train_data_file = data_set + 'train_data.csv'
label_data_file = data_set + 'label_file.csv'
first_test_file = data_set + 'first_test_data.csv'
first_result_data_file = output + 'first_result.csv'
return train_data_file, predict_result_directory, merge_predict_result_path, first_test_file, first_result_data_file
def prepare_workflow(train_data_file: str, predict_result_directory: str,
merge_predict_result_path: str, first_test_data_file: str,
first_result_data_file: str):
"""
Prepare workflow: Example & Model Metadata registration.
"""
train_example_meta: ExampleMeta = af.register_example(name='train_data',
support_type=ExampleSupportType.EXAMPLE_BATCH,
data_type='pandas',
data_format='csv',
batch_uri=train_data_file)
predict_result_meta: ExampleMeta = af.register_example(name='predict_result',
support_type=ExampleSupportType.EXAMPLE_BATCH,
batch_uri=predict_result_directory)
merge_data_meta: ExampleMeta = af.register_example(name='merge_data',
support_type=ExampleSupportType.EXAMPLE_BATCH,
batch_uri=merge_predict_result_path)
first_test_example_meta: ExampleMeta = af.register_example(name='first_test_data',
support_type=ExampleSupportType.EXAMPLE_BATCH,
data_type='pandas',
data_format='csv',
batch_uri=first_test_data_file)
second_test_example_data: ExampleMeta = af.register_example(name='second_test_data',
support_type=ExampleSupportType.EXAMPLE_STREAM,
data_type='kafka',
data_format='csv',
stream_uri='localhost:9092')
first_result_example_meta: ExampleMeta = af.register_example(name='first_result_111',
support_type=ExampleSupportType.EXAMPLE_BATCH,
data_type='pandas',
data_format='csv',
batch_uri=first_result_data_file)
second_result_example_meta: ExampleMeta = af.register_example(name='second_result_111',
support_type=ExampleSupportType.EXAMPLE_STREAM,
data_type='kafka',
data_format='csv',
stream_uri='localhost:9092')
train_model_meta: ModelMeta = af.register_model(model_name='auto_encoder',
model_type=ModelType.SAVED_MODEL)
return train_example_meta, predict_result_meta, merge_data_meta, \
first_test_example_meta, second_test_example_data, \
first_result_example_meta, second_result_example_meta, train_model_meta
def run_workflow():
"""
Run the user-defined workflow definition.
"""
train_data_file, predict_result_directory, merge_predict_result_path, \
first_test_data_file, first_result_data_file = collect_data_file()
# Prepare workflow: Example & Model Metadata registration.
train_example_meta, predict_result_meta, merge_data_meta, first_test_example_meta, second_test_example_meta, \
first_result_example_meta, second_result_example_meta, train_model_meta = \
prepare_workflow(train_data_file=train_data_file,
predict_result_directory=predict_result_directory,
merge_predict_result_path=merge_predict_result_path,
first_test_data_file=first_test_data_file,
first_result_data_file=first_result_data_file)
# Save proxima indexes under the following index path.
index_path = '{}/codes/{}/'.format(os.environ['ENV_HOME'], os.environ['TASK_ID']) + 'test.index'
# Set Python job config to train model.
python_job_config_0 = BaseJobConfig(platform='local', engine='python', job_name='train')
python_job_config_1 = BaseJobConfig(platform='local', engine='python', job_name='start_cluster_serving')
python_job_config_2 = BaseJobConfig(platform='local', engine='python', job_name='merge_predict_result')
# Set Flink job config to predict with cluster serving
global_job_config_1 = LocalFlinkJobConfig()
global_job_config_1.local_mode = 'cluster'
global_job_config_1.flink_home = os.environ['FLINK_HOME']
global_job_config_1.job_name = 'cluster_serving'
global_job_config_1.set_table_env_create_func(StreamTableEnvCreatorBuildIndex())
# Set Flink job config to build index.
global_job_config_2 = LocalFlinkJobConfig()
global_job_config_2.local_mode = 'cluster'
global_job_config_2.flink_home = os.environ['FLINK_HOME']
global_job_config_2.job_name = 'build_index'
global_job_config_2.set_table_env_create_func(StreamTableEnvCreator())
# Set Flink job config to fink sick.
global_job_config_3 = LocalFlinkJobConfig()
global_job_config_3.local_mode = 'cluster'
global_job_config_3.flink_home = os.environ['FLINK_HOME']
global_job_config_3.job_name = 'find_sick'
global_job_config_3.set_table_env_create_func(StreamTableEnvCreator())
# Set Flink job config to online cluster.
global_job_config_4 = LocalFlinkJobConfig()
global_job_config_4.local_mode = 'cluster'
global_job_config_4.flink_home = os.environ['FLINK_HOME']
global_job_config_4.job_name = 'online_cluster'
global_job_config_4.set_table_env_create_func(StreamTableEnvCreator())
with af.config(python_job_config_0):
# Under first job config, we construct the first job, the job is going to train an auto_encoder model.
python_job_0_read_train_example = af.read_example(example_info=train_example_meta,
executor=PythonObjectExecutor(python_object=ReadCsvExample()))
python_job_0_train_model = af.train(input_data_list=[python_job_0_read_train_example],
executor=PythonObjectExecutor(python_object=TrainAutoEncoder()),
model_info=train_model_meta,
name='trainer_0')
with af.config(python_job_config_1):
python_job_1_cluster_serving_channel = af.cluster_serving(model_info=train_model_meta, parallelism=16)
with af.config(global_job_config_1):
flink_job_0_read_train_example = af.read_example(example_info=train_example_meta,
executor=FlinkPythonExecutor(python_object=ReadTrainExample()))
flink_job_0_predict_model = af.predict(input_data_list=[flink_job_0_read_train_example],
model_info=train_model_meta,
executor=FlinkPythonExecutor(
python_object=PredictAutoEncoderWithTrain()))
flink_job_0_write_predict_data = af.write_example(input_data=flink_job_0_predict_model,
example_info=predict_result_meta,
executor=FlinkPythonExecutor(
python_object=WritePredictResult()))
with af.config(python_job_config_2):
python_job_2_merge_train_data_file = af.user_define_operation(executor=PythonObjectExecutor(
python_object=MergePredictResult()))
with af.config(global_job_config_2):
flink_job_1_read_train_example = af.read_example(example_info=merge_data_meta,
executor=FlinkPythonExecutor(python_object=ReadMergeExample()))
flink_job_1_build_index_channel = af.transform([flink_job_1_read_train_example],
executor=FlinkPythonExecutor(
python_object=BuildIndexExecutor(index_path, FloatDataType(),
128)))
with af.config(global_job_config_3):
flink_job_2_read_history_example = af.read_example(example_info=first_test_example_meta,
executor=FlinkPythonExecutor(
python_object=ReadPredictExample()))
flink_job_2_predict_model = af.predict(input_data_list=[flink_job_2_read_history_example],
model_info=train_model_meta,
executor=FlinkPythonExecutor(python_object=PredictAutoEncoder()))
flink_job_2_transformed_data = af.transform([flink_job_2_predict_model],
executor=FlinkPythonExecutor(
python_object=SearchExecutor(index_path, FloatDataType(), 2)))
flink_job_2_read_train_example = af.read_example(example_info=train_example_meta,
executor=FlinkPythonExecutor(python_object=ReadTrainExample()))
flink_job_2_join_channel = af.transform(
input_data_list=[flink_job_2_transformed_data, flink_job_2_read_train_example],
executor=FlinkPythonExecutor(python_object=FindHistory()))
flink_job_2_write_result = af.write_example(input_data=flink_job_2_join_channel,
example_info=first_result_example_meta,
executor=FlinkPythonExecutor(python_object=SearchSink()))
with af.config(global_job_config_4):
flink_job_3_read_online_example = af.read_example(example_info=second_test_example_meta,
executor=FlinkPythonExecutor(
python_object=ReadOnlinePredictExample()))
flink_job_3_predict_model = af.predict(input_data_list=[flink_job_3_read_online_example],
model_info=train_model_meta,
executor=FlinkPythonExecutor(python_object=OnlinePredictAutoEncoder()))
flink_job_3_transformed_data = af.transform([flink_job_3_predict_model],
executor=FlinkPythonExecutor(
python_object=SearchExecutor3(index_path, FloatDataType(), 2)))
af.write_example(input_data=flink_job_3_transformed_data,
example_info=second_result_example_meta,
executor=FlinkPythonExecutor(python_object=WriteSecondResult()))
af.stop_before_control_dependency(python_job_1_cluster_serving_channel, python_job_0_train_model)
af.stop_before_control_dependency(flink_job_0_read_train_example, python_job_1_cluster_serving_channel)
af.stop_before_control_dependency(python_job_2_merge_train_data_file, flink_job_0_read_train_example)
af.stop_before_control_dependency(flink_job_1_build_index_channel, python_job_2_merge_train_data_file)
af.stop_before_control_dependency(flink_job_2_read_history_example, flink_job_1_build_index_channel)
af.stop_before_control_dependency(flink_job_3_read_online_example, flink_job_2_write_result)
workflow_id = af.run(get_project_path() + '/python_codes')
res = af.wait_workflow_execution_finished(workflow_id)
sys.exit(res)
if __name__ == '__main__':
af.set_project_config_file(get_project_path() + '/project.yaml')
run_workflow()
|
py | 7df71dbc32aad8324aa8c2be9a10218b7f1e1ab1 | """
Default logs and messages for the
:class:`~django_analyses.runner.queryset_runner.QuerySetRunner` class.
"""
class bcolors:
"""
ANSI escape sequences used for text formatting.
References
----------
* https://stackoverflow.com/a/287944/4416932
"""
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKCYAN = "\033[96m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
#: Batch run start messsage.
BATCH_RUN_START = f"{bcolors.UNDERLINE}{bcolors.HEADER}{bcolors.BOLD}{{analysis_version}}{bcolors.ENDC}{bcolors.UNDERLINE}{bcolors.HEADER}: Batch Execution{bcolors.ENDC}"
#: Base queryset generation message.
BASE_QUERY_START = "Querying {model_name} instances..."
#: Base queryset generation result.
BASE_QUERY_END = "{n_instances} instances found."
#: Report querying default execution queryset.
DEFAULT_QUERYSET_QUERY = (
f"\n{bcolors.OKBLUE}🔎 Default execution queryset generation:{bcolors.ENDC}"
)
#: Report the successful creation of an asynchronous execution task.
EXECUTION_STARTED = f"\n{bcolors.OKGREEN}🚀Successfully started {{analysis_version}} execution over {{n_instances}} {{model_name}} instances🚀{bcolors.ENDC}"
#: Filter queryset start.
FILTER_QUERYSET_START = "Filtering queryset..."
#: Report filter result.
FILTER_QUERYSET_END = "{n_candidates} execution candidates found."
#: Reporting starting to generate input specifications for analysis execution.
INPUT_GENERATION = (
f"\n🔀 {bcolors.OKBLUE}Generating input specifications:{bcolors.ENDC}"
)
INPUT_GENERATION_FINISHED = "{n_inputs} input specifications prepared."
#: Report querying existing input instances.
INPUT_QUERY_START = "Querying existing runs..."
#: Report number of existing input instances.
INPUT_QUERY_END = "{n_existing} runs found."
INPUT_QUERYSET_VALIDATION = (
f"\n{bcolors.OKBLUE}🔎 Input queryset validation:{bcolors.ENDC}"
)
#: No pending instances were detected in the database.
NONE_PENDING = f"{bcolors.OKGREEN}Congratulations! No pending {{model_name}} instances were detected in the database 👏{bcolors.ENDC}"
#: No pending instances were detected in the provided queryset.
NONE_PENDING_IN_QUERYSET = f"{bcolors.OKGREEN}All {{n_instances}} provided {{model_name}} instances have been processed already 👑{bcolors.ENDC}"
#: No candidates in execution queryset.
NO_CANDIDATES = f"{bcolors.WARNING}No execution candidates detected in {{model_name}} queryset!{bcolors.ENDC}"
#: Report pending instances.
PENDING_FOUND = f"{{n_existing}} existing runs found.\n{bcolors.BOLD}{{n_pending}}{bcolors.ENDC} instances pending execution."
#: Report starting a queryset existing/pending split.
PENDING_QUERY_START = f"\n⚖ {bcolors.OKBLUE}Checking execution status for the {{queryset_description}} queryset:\n{bcolors.ENDC}Filtering existing runs...\n(large querysets might take a few moments to be evaluated)"
#: General input preprocessing failure message.
PREPROCESSING_FAILURE = f"{bcolors.WARNING}Failed to preprocess {{model_name}} #{{instance_id}}!{bcolors.ENDC}"
#: Report number of preprocessing failures encountered.
PREPROCESSING_FAILURE_REPORT = f"{bcolors.WARNING}{bcolors.BOLD}{{n_invalid}} of {{n_total}} {{model_name}} instances failed to be preprocessed for input generation.{bcolors.ENDC}"
# flake8: noqa: E501
|
py | 7df71e5d959a2048cde1c5c547245bf67c894e76 | from oarepo_nusl_rules import rule_registry
from oarepo_nusl_rules.register import RuleRegistry
def test_register_load():
instance1 = RuleRegistry.Instance()
RuleRegistry.Instance().load()
instance2 = RuleRegistry.Instance()
assert len(RuleRegistry.Instance().rules) > 0
assert instance1 is instance2
def test_register_instance():
instance1 = rule_registry
instance2 = RuleRegistry.Instance()
assert instance1 is instance2
|
py | 7df71e6211fbd6d95879affef9c5a7a6c9ee9740 | """This module contains the general information for BiosVfXPTPrefetch ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class BiosVfXPTPrefetchConsts:
VP_XPTPREFETCH_DISABLED = "Disabled"
VP_XPTPREFETCH_ENABLED = "Enabled"
_VP_XPTPREFETCH_DISABLED = "disabled"
_VP_XPTPREFETCH_ENABLED = "enabled"
VP_XPTPREFETCH_PLATFORM_DEFAULT = "platform-default"
class BiosVfXPTPrefetch(ManagedObject):
"""This is BiosVfXPTPrefetch class."""
consts = BiosVfXPTPrefetchConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("BiosVfXPTPrefetch", "biosVfXPTPrefetch", "xpt-prefetch", VersionMeta.Version311d, "InputOutput", 0x1f, [], ["admin"], [u'biosPlatformDefaults', u'biosSettings'], [], ["Get", "Set"]),
}
prop_meta = {
"classic": {
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version311d, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version311d, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version311d, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"vp_xpt_prefetch": MoPropertyMeta("vp_xpt_prefetch", "vpXPTPrefetch", "string", VersionMeta.Version311d, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["Disabled", "Enabled", "disabled", "enabled", "platform-default"], []),
},
}
prop_map = {
"classic": {
"dn": "dn",
"rn": "rn",
"status": "status",
"vpXPTPrefetch": "vp_xpt_prefetch",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.status = None
self.vp_xpt_prefetch = None
ManagedObject.__init__(self, "BiosVfXPTPrefetch", parent_mo_or_dn, **kwargs)
|
py | 7df71e6fa2edc72eb26c38b7b9ddf805fc896e8a | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from numbers import Integral
import numpy as np
from ... import opcodes as OperandDef
from ...core import TilesError
from ...serialize import KeyField, AnyField, Int32Field
from ...utils import check_chunks_unknown_shape
from ..core import Tensor, TENSOR_TYPE, TENSOR_CHUNK_TYPE, TensorOrder
from ..utils import broadcast_shape, unify_chunks
from ..operands import TensorHasInput, TensorOperandMixin
from ..datasource import tensor as astensor
from ..array_utils import as_same_device, device
from .ravel import ravel
class TensorRepeat(TensorHasInput, TensorOperandMixin):
_op_type_ = OperandDef.REPEAT
_input = KeyField('input')
_repeats = AnyField('repeats')
_axis = Int32Field('axis')
def __init__(self, axis=None, dtype=None, sparse=False, **kw):
super().__init__(_axis=axis, _dtype=dtype, _sparse=sparse, **kw)
@property
def repeats(self):
return self._repeats
@property
def axis(self):
return self._axis
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._input = self._inputs[0]
if len(inputs) > 1:
self._repeats = self._inputs[1]
def __call__(self, a, repeats):
axis = self._axis
a = astensor(a)
if axis is None:
a = ravel(a)
ax = axis or 0
if not isinstance(repeats, Integral):
if not isinstance(repeats, Tensor):
repeats = np.asarray(repeats)
if repeats.size == 1:
repeats = int(repeats[0])
size = repeats * a.shape[axis or 0]
elif a.shape[ax] == 1:
size = repeats = int(repeats.sum())
else:
size = int(repeats.sum())
else:
size = np.nan
if not isinstance(repeats, Integral):
if repeats.ndim != 1:
raise ValueError('repeats should be 1-d tensor')
broadcast_shape(repeats.shape, a.shape[ax: ax + 1])
else:
size = a.shape[axis or 0] * repeats
shape = a.shape[:ax] + (size,) + a.shape[ax + 1:]
self.dtype = a.dtype
self.sparse = a.issparse()
inputs = [a]
if isinstance(repeats, Tensor):
inputs.append(repeats)
else:
self._repeats = repeats
return self.new_tensor(inputs, shape, order=TensorOrder.C_ORDER)
@classmethod
def tile(cls, op):
a = op.input
repeats = op.repeats
axis = op.axis
ax = axis or 0
out = op.outputs[0]
check_chunks_unknown_shape(op.inputs, TilesError)
if isinstance(repeats, TENSOR_TYPE):
a, repeats = unify_chunks(a, (repeats, (ax,)))
nsplit = a.nsplits[axis or 0]
if isinstance(repeats, Integral):
new_nsplit = []
for split in nsplit:
s = max(split // repeats, 1)
c = split // s
new_nsplit.extend([s] * c)
if split % s != 0:
new_nsplit.append(split % s)
a = a.rechunk({ax: new_nsplit})._inplace_tile()
out_chunks = []
ax_cum_count = np.cumsum((0,) + a.nsplits[ax])
is_repeats_ndarray = isinstance(repeats, np.ndarray)
for out_idx in itertools.product(*[range(len(s)) for s in a.nsplits]):
in_chunk = a.cix[out_idx]
ax_idx = out_idx[ax]
if is_repeats_ndarray:
start = ax_cum_count[ax_idx]
stop = ax_cum_count[ax_idx + 1]
rp = repeats[start: stop]
size = int(rp.sum())
elif not isinstance(repeats, Integral):
rp = repeats.cix[ax_idx, ]
size = np.nan
else:
rp = repeats
size = in_chunk.shape[ax] * rp
chunk_inputs = [in_chunk]
if isinstance(rp, TENSOR_CHUNK_TYPE):
chunk_inputs.append(rp)
chunk_shape = in_chunk.shape[:ax] + (size,) + in_chunk.shape[ax + 1:]
chunk_op = op.copy().reset_key()
if len(chunk_inputs) < 2:
# repeats is not chunk
chunk_op._repeats = rp
out_chunk = chunk_op.new_chunk(chunk_inputs, shape=chunk_shape,
index=out_idx, order=out.order)
out_chunks.append(out_chunk)
nsplits = [tuple(c.shape[i] for c in out_chunks
if all(idx == 0 for j, idx in enumerate(c.index) if j != i))
for i in range(len(out_chunks[0].shape))]
new_op = op.copy()
return new_op.new_tensors(op.inputs, out.shape, order=out.order,
chunks=out_chunks, nsplits=nsplits)
@classmethod
def execute(cls, ctx, op):
inputs, device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)
a = inputs[0]
if len(inputs) > 1:
repeats = inputs[1]
else:
repeats = op.repeats
with device(device_id):
ctx[op.outputs[0].key] = xp.repeat(a, repeats=repeats, axis=op.axis)
def repeat(a, repeats, axis=None):
"""
Repeat elements of a tensor.
Parameters
----------
a : array_like
Input tensor.
repeats : int or tensor of ints
The number of repetitions for each element. `repeats` is broadcasted
to fit the shape of the given axis.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input tensor, and return a flat output tensor.
Returns
-------
repeated_tensor : Tensor
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile a tensor.
Examples
--------
>>> import mars.tensor as mt
>>> mt.repeat(3, 4).execute()
array([3, 3, 3, 3])
>>> x = mt.array([[1,2],[3,4]])
>>> mt.repeat(x, 2).execute()
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> mt.repeat(x, 3, axis=1).execute()
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> mt.repeat(x, [1, 2], axis=0).execute()
array([[1, 2],
[3, 4],
[3, 4]])
"""
op = TensorRepeat(axis=axis)
return op(a, repeats)
|
py | 7df71f4cd7eba07294c30ab033df3aeb831c54cb | from autoarray.plot.wrap.wrap_base import set_backend
set_backend()
import matplotlib.pyplot as plt
from matplotlib import patches as ptch
from matplotlib.collections import PatchCollection
import numpy as np
import itertools
from typing import List, Union, Optional, Tuple
from autoarray.plot.wrap import wrap_base as wb
from autoarray.plot.wrap.wrap_base import AbstractMatWrap
from autoarray.inversion.mappers.voronoi import MapperVoronoiNoInterp
from autoarray.inversion.mappers.voronoi import MapperVoronoi
from autoarray.inversion.mappers.delaunay import MapperDelaunay
from autoarray.inversion.mappers.mapper_util import triangle_area_from
from autoarray.structures.grids.two_d.grid_2d import Grid2D
from autoarray.structures.grids.two_d.grid_2d_irregular import Grid2DIrregular
from autoarray.structures.vectors.irregular import VectorYX2DIrregular
from autoarray import exc
class AbstractMatWrap2D(AbstractMatWrap):
"""
An abstract base class for wrapping matplotlib plotting methods which take as input and plot data structures. For
example, the `ArrayOverlay` object specifically plots `Array2D` data structures.
As full description of the matplotlib wrapping can be found in `mat_base.AbstractMatWrap`.
"""
@property
def config_folder(self):
return "mat_wrap_2d"
class ArrayOverlay(AbstractMatWrap2D):
"""
Overlays an `Array2D` data structure over a figure.
This object wraps the following Matplotlib method:
- plt.imshow: https://matplotlib.org/3.3.2/api/_as_gen/matplotlib.pyplot.imshow.html
This uses the `Units` and coordinate system of the `Array2D` to overlay it on on the coordinate system of the
figure that is plotted.
"""
def overlay_array(self, array, figure):
aspect = figure.aspect_from(shape_native=array.shape_native)
extent = array.extent_of_zoomed_array(buffer=0)
plt.imshow(X=array.native, aspect=aspect, extent=extent, **self.config_dict)
class GridScatter(AbstractMatWrap2D):
"""
Scatters an input set of grid points, for example (y,x) coordinates or data structures representing 2D (y,x)
coordinates like a `Grid2D` or `Grid2DIrregular`. List of (y,x) coordinates are plotted with varying colors.
This object wraps the following Matplotlib methods:
- plt.scatter: https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.scatter.html
There are a number of children of this method in the `mat_obj.py` module that plot specific sets of (y,x)
points. Each of these objects uses uses their own config file and settings so that each has a unique appearance
on every figure:
- `OriginScatter`: plots the (y,x) coordinates of the origin of a data structure (e.g. as a black cross).
- `MaskScatter`: plots a mask over an image, using the `Mask2d` object's (y,x) `edge_grid_sub_1` property.
- `BorderScatter: plots a border over an image, using the `Mask2d` object's (y,x) `border_grid_sub_1` property.
- `PositionsScatter`: plots the (y,x) coordinates that are input in a plotter via the `positions` input.
- `IndexScatter`: plots specific (y,x) coordinates of a grid (or grids) via their 1d or 2d indexes.
- `PixelizationGridScatter`: plots the grid of a `Pixelization` object (see `autoarray.inversion`).
Parameters
----------
colors : [str]
The color or list of colors that the grid is plotted using. For plotting indexes or a grid list, a
list of colors can be specified which the plot cycles through.
"""
def scatter_grid(self, grid: Union[np.ndarray, Grid2D]):
"""
Plot an input grid of (y,x) coordinates using the matplotlib method `plt.scatter`.
Parameters
----------
grid : Grid2D
The grid of (y,x) coordinates that is plotted.
errors
The error on every point of the grid that is plotted.
"""
config_dict = self.config_dict
if len(config_dict["c"]) > 1:
config_dict["c"] = config_dict["c"][0]
try:
plt.scatter(y=grid[:, 0], x=grid[:, 1], **config_dict)
except (IndexError, TypeError):
return self.scatter_grid_list(grid_list=grid)
def scatter_grid_list(self, grid_list: Union[List[Grid2D], List[Grid2DIrregular]]):
"""
Plot an input list of grids of (y,x) coordinates using the matplotlib method `plt.scatter`.
This method colors each grid in each entry of the list the same, so that the different grids are visible in
the plot.
Parameters
----------
grid_list
The list of grids of (y,x) coordinates that are plotted.
"""
if len(grid_list) == 0:
return
color = itertools.cycle(self.config_dict["c"])
config_dict = self.config_dict
config_dict.pop("c")
try:
for grid in grid_list:
plt.scatter(y=grid[:, 0], x=grid[:, 1], c=next(color), **config_dict)
except IndexError:
return None
def scatter_grid_colored(
self, grid: Union[np.ndarray, Grid2D], color_array: np.ndarray, cmap: str
):
"""
Plot an input grid of (y,x) coordinates using the matplotlib method `plt.scatter`.
The method colors the scattered grid according to an input ndarray of color values, using an input colormap.
Parameters
----------
grid : Grid2D
The grid of (y,x) coordinates that is plotted.
color_array : ndarray
The array of RGB color values used to color the grid.
cmap : str
The Matplotlib colormap used for the grid point coloring.
"""
config_dict = self.config_dict
config_dict.pop("c")
plt.scatter(y=grid[:, 0], x=grid[:, 1], c=color_array, cmap=cmap, **config_dict)
def scatter_grid_indexes(
self, grid: Union[np.ndarray, Grid2D], indexes: np.ndarray
):
"""
Plot specific points of an input grid of (y,x) coordinates, which are specified according to the 1D or 2D
indexes of the `Grid2D`.
This method allows us to color in points on grids that map between one another.
Parameters
----------
grid : Grid2D
The grid of (y,x) coordinates that is plotted.
indexes
The 1D indexes of the grid that are colored in when plotted.
"""
if not isinstance(grid, np.ndarray):
raise exc.PlottingException(
"The grid passed into scatter_grid_indexes is not a ndarray and thus its"
"1D indexes cannot be marked and plotted."
)
if len(grid.shape) != 2:
raise exc.PlottingException(
"The grid passed into scatter_grid_indexes is not 2D (e.g. a flattened 1D"
"grid) and thus its 1D indexes cannot be marked."
)
if isinstance(indexes, list):
if not any(isinstance(i, list) for i in indexes):
indexes = [indexes]
color = itertools.cycle(self.config_dict["c"])
config_dict = self.config_dict
config_dict.pop("c")
for index_list in indexes:
if all([isinstance(index, float) for index in index_list]) or all(
[isinstance(index, int) for index in index_list]
):
plt.scatter(
y=grid[index_list, 0],
x=grid[index_list, 1],
color=next(color),
**config_dict,
)
elif all([isinstance(index, tuple) for index in index_list]) or all(
[isinstance(index, list) for index in index_list]
):
ys, xs = map(list, zip(*index_list))
plt.scatter(
y=grid.native[ys, xs, 0],
x=grid.native[ys, xs, 1],
color=next(color),
**config_dict,
)
else:
raise exc.PlottingException(
"The indexes input into the grid_scatter_index method do not conform to a "
"useable type"
)
class GridPlot(AbstractMatWrap2D):
"""
Plots `Grid2D` data structure that are better visualized as solid lines, for example rectangular lines that are
plotted over an image and grids of (y,x) coordinates as lines (as opposed to a scatter of points
using the `GridScatter` object).
This object wraps the following Matplotlib methods:
- plt.plot: https://matplotlib.org/3.3.3/api/_as_gen/matplotlib.pyplot.plot.html
Parameters
----------
colors : [str]
The color or list of colors that the grid is plotted using. For plotting indexes or a grid list, a
list of colors can be specified which the plot cycles through.
"""
def plot_rectangular_grid_lines(
self, extent: Tuple[float, float, float, float], shape_native: Tuple[int, int]
):
"""
Plots a rectangular grid of lines on a plot, using the coordinate system of the figure.
The size and shape of the grid is specified by the `extent` and `shape_native` properties of a data structure
which will provide the rectangaular grid lines on a suitable coordinate system for the plot.
Parameters
----------
extent : (float, float, float, float)
The extent of the rectangualr grid, with format [xmin, xmax, ymin, ymax]
shape_native
The 2D shape of the mask the array is paired with.
"""
ys = np.linspace(extent[2], extent[3], shape_native[1] + 1)
xs = np.linspace(extent[0], extent[1], shape_native[0] + 1)
# grid lines
for x in xs:
plt.plot([x, x], [ys[0], ys[-1]], **self.config_dict)
for y in ys:
plt.plot([xs[0], xs[-1]], [y, y], **self.config_dict)
def plot_grid(self, grid: Union[np.ndarray, Grid2D]):
"""
Plot an input grid of (y,x) coordinates using the matplotlib method `plt.scatter`.
Parameters
----------
grid : Grid2D
The grid of (y,x) coordinates that is plotted.
"""
try:
plt.plot(grid[:, 1], grid[:, 0], **self.config_dict)
except (IndexError, TypeError):
return self.plot_grid_list(grid_list=grid)
def plot_grid_list(self, grid_list: Union[List[Grid2D], List[Grid2DIrregular]]):
"""
Plot an input list of grids of (y,x) coordinates using the matplotlib method `plt.line`.
This method colors each grid in the list the same, so that the different grids are visible in the plot.
This provides an alternative to `GridScatter.scatter_grid_list` where the plotted grids appear as lines
instead of scattered points.
Parameters
----------
grid_list : Grid2DIrregular
The list of grids of (y,x) coordinates that are plotted.
"""
if len(grid_list) == 0:
return None
color = itertools.cycle(self.config_dict["c"])
config_dict = self.config_dict
config_dict.pop("c")
try:
for grid in grid_list:
plt.plot(grid[:, 1], grid[:, 0], c=next(color), **config_dict)
except IndexError:
return None
class GridErrorbar(AbstractMatWrap2D):
"""
Plots an input set of grid points with 2D errors, for example (y,x) coordinates or data structures representing 2D
(y,x) coordinates like a `Grid2D` or `Grid2DIrregular`. Multiple lists of (y,x) coordinates are plotted with
varying colors.
This object wraps the following Matplotlib methods:
- plt.errorbar: https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.errorbar.html
Parameters
----------
colors : [str]
The color or list of colors that the grid is plotted using. For plotting indexes or a grid list, a
list of colors can be specified which the plot cycles through.
"""
def errorbar_grid(
self,
grid: Union[np.ndarray, Grid2D],
y_errors: Optional[Union[np.ndarray, List]] = None,
x_errors: Optional[Union[np.ndarray, List]] = None,
):
"""
Plot an input grid of (y,x) coordinates using the matplotlib method `plt.errorbar`.
The (y,x) coordinates are plotted as dots, with a line / cross for its errors.
Parameters
----------
grid : Grid2D
The grid of (y,x) coordinates that is plotted.
y_errors
The y values of the error on every point of the grid that is plotted (e.g. vertically).
x_errors
The x values of the error on every point of the grid that is plotted (e.g. horizontally).
"""
config_dict = self.config_dict
if len(config_dict["c"]) > 1:
config_dict["c"] = config_dict["c"][0]
try:
plt.errorbar(
y=grid[:, 0], x=grid[:, 1], yerr=y_errors, xerr=x_errors, **config_dict
)
except (IndexError, TypeError):
return self.errorbar_grid_list(grid_list=grid)
def errorbar_grid_list(
self,
grid_list: Union[List[Grid2D], List[Grid2DIrregular]],
y_errors: Optional[Union[np.ndarray, List]] = None,
x_errors: Optional[Union[np.ndarray, List]] = None,
):
"""
Plot an input list of grids of (y,x) coordinates using the matplotlib method `plt.errorbar`.
The (y,x) coordinates are plotted as dots, with a line / cross for its errors.
This method colors each grid in each entry of the list the same, so that the different grids are visible in
the plot.
Parameters
----------
grid_list
The list of grids of (y,x) coordinates that are plotted.
"""
if len(grid_list) == 0:
return
color = itertools.cycle(self.config_dict["c"])
config_dict = self.config_dict
config_dict.pop("c")
try:
for grid in grid_list:
plt.errorbar(
y=grid[:, 0],
x=grid[:, 1],
yerr=np.asarray(y_errors),
xerr=np.asarray(x_errors),
c=next(color),
**config_dict,
)
except IndexError:
return None
def errorbar_grid_colored(
self,
grid: Union[np.ndarray, Grid2D],
color_array: np.ndarray,
cmap: str,
y_errors: Optional[Union[np.ndarray, List]] = None,
x_errors: Optional[Union[np.ndarray, List]] = None,
):
"""
Plot an input grid of (y,x) coordinates using the matplotlib method `plt.errorbar`.
The method colors the errorbared grid according to an input ndarray of color values, using an input colormap.
Parameters
----------
grid : Grid2D
The grid of (y,x) coordinates that is plotted.
color_array : ndarray
The array of RGB color values used to color the grid.
cmap : str
The Matplotlib colormap used for the grid point coloring.
"""
config_dict = self.config_dict
config_dict.pop("c")
plt.scatter(y=grid[:, 0], x=grid[:, 1], c=color_array, cmap=cmap)
plt.errorbar(
y=grid[:, 0],
x=grid[:, 1],
yerr=np.asarray(y_errors),
xerr=np.asarray(x_errors),
zorder=0.0,
**self.config_dict,
)
class VectorYXQuiver(AbstractMatWrap2D):
"""
Plots a `VectorField` data structure. A vector field is a set of 2D vectors on a grid of 2d (y,x) coordinates.
These are plotted as arrows representing the (y,x) components of each vector at each (y,x) coordinate of it
grid.
This object wraps the following Matplotlib method:
https://matplotlib.org/3.3.2/api/_as_gen/matplotlib.pyplot.quiver.html
"""
def quiver_vectors(self, vectors: VectorYX2DIrregular):
"""
Plot a vector field using the matplotlib method `plt.quiver` such that each vector appears as an arrow whose
direction depends on the y and x magnitudes of the vector.
Parameters
----------
vectors : VectorYX2DIrregular
The vector field that is plotted using `plt.quiver`.
"""
plt.quiver(
vectors.grid[:, 1],
vectors.grid[:, 0],
vectors[:, 1],
vectors[:, 0],
**self.config_dict,
)
class PatchOverlay(AbstractMatWrap2D):
"""
Adds patches to a plotted figure using matplotlib `patches` objects.
The coordinate system of each `Patch` uses that of the figure, which is typically set up using the plotted
data structure. This makes it straight forward to add patches in specific locations.
This object wraps methods described in below:
https://matplotlib.org/3.3.2/api/collections_api.html
"""
def overlay_patches(self, patches: Union[ptch.Patch]):
"""
Overlay a list of patches on a figure, for example an `Ellipse`.
`
Parameters
----------
patches : [Patch]
The patches that are laid over the figure.
"""
patch_collection = PatchCollection(patches=patches, **self.config_dict)
plt.gcf().gca().add_collection(patch_collection)
class VoronoiDrawer(AbstractMatWrap2D):
"""
Draws Voronoi pixels from a `MapperVoronoiNoInterp` object (see `inversions.mapper`). This includes both drawing
each Voronoi cell and coloring it according to a color value.
The mapper contains the grid of (y,x) coordinate where the centre of each Voronoi cell is plotted.
This object wraps methods described in below:
https://matplotlib.org/3.3.2/api/_as_gen/matplotlib.pyplot.fill.html
"""
def draw_voronoi_pixels(
self,
mapper: MapperVoronoiNoInterp,
values: np.ndarray,
cmap: wb.Cmap,
colorbar: wb.Colorbar,
colorbar_tickparams: wb.ColorbarTickParams = None,
):
"""
Draws the Voronoi pixels of the input `mapper` using its `pixelization_grid` which contains the (y,x)
coordinate of the centre of every Voronoi cell. This uses the method `plt.fill`.
Parameters
----------
mapper : MapperVoronoiNoInterp
An object which contains the (y,x) grid of Voronoi cell centres.
values
An array used to compute the color values that every Voronoi cell is plotted using.
cmap : str
The colormap used to plot each Voronoi cell.
colorbar : Colorbar
The `Colorbar` object in `mat_base` used to set the colorbar of the figure the Voronoi mesh is plotted on.
"""
regions, vertices = self.voronoi_polygons(voronoi=mapper.voronoi)
if values is not None:
vmin = cmap.vmin_from(array=values)
vmax = cmap.vmax_from(array=values)
color_values = np.where(values > vmax, vmax, values)
color_values = np.where(values < vmin, vmin, color_values)
if vmax != vmin:
color_array = (color_values - vmin) / (vmax - vmin)
else:
color_array = np.ones(color_values.shape[0])
cmap = plt.get_cmap(cmap.config_dict["cmap"])
if colorbar is not None:
colorbar = colorbar.set_with_color_values(
cmap=cmap, color_values=color_values
)
if colorbar is not None and colorbar_tickparams is not None:
colorbar_tickparams.set(cb=colorbar)
else:
cmap = plt.get_cmap("Greys")
color_array = np.zeros(shape=mapper.pixels)
for region, index in zip(regions, range(mapper.pixels)):
polygon = vertices[region]
col = cmap(color_array[index])
plt.fill(*zip(*polygon), facecolor=col, zorder=-1, **self.config_dict)
def voronoi_polygons(self, voronoi, radius=None):
"""
Reconstruct infinite voronoi regions in a 2D diagram to finite regions.
Parameters
----------
voronoi : Voronoi
The input Voronoi diagram that is being plotted.
radius, optional
Distance to 'points at infinity'.
Returns
-------
regions : list of tuples
Indices of vertices in each revised Voronoi regions.
vertices : list of tuples
Grid2DIrregular for revised Voronoi vertices. Same as coordinates
of input vertices, with 'points at infinity' appended to the
end.
"""
if voronoi.points.shape[1] != 2:
raise ValueError("Requires 2D input")
new_regions = []
new_vertices = voronoi.vertices.tolist()
center = voronoi.points.mean(axis=0)
if radius is None:
radius = voronoi.points.ptp().max() * 2
# Construct a map containing all ridges for a given point
all_ridges = {}
for (p1, p2), (v1, v2) in zip(voronoi.ridge_points, voronoi.ridge_vertices):
all_ridges.setdefault(p1, []).append((p2, v1, v2))
all_ridges.setdefault(p2, []).append((p1, v1, v2))
# Reconstruct infinite regions
for p1, region in enumerate(voronoi.point_region):
vertices = voronoi.regions[region]
if all(v >= 0 for v in vertices):
# finite region
new_regions.append(vertices)
continue
# reconstruct a non-finite region
ridges = all_ridges[p1]
new_region = [v for v in vertices if v >= 0]
for p2, v1, v2 in ridges:
if v2 < 0:
v1, v2 = v2, v1
if v1 >= 0:
# finite ridge: already in the region
continue
# Compute the missing endpoint of an infinite ridge
t = voronoi.points[p2] - voronoi.points[p1] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # hyper
midpoint = voronoi.points[[p1, p2]].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
far_point = voronoi.vertices[v2] + direction * radius
new_region.append(len(new_vertices))
new_vertices.append(far_point.tolist())
# sort region counterclockwise
vs = np.asarray([new_vertices[v] for v in new_region])
c = vs.mean(axis=0)
angles = np.arctan2(vs[:, 1] - c[1], vs[:, 0] - c[0])
new_region = np.array(new_region)[np.argsort(angles)]
# finish
new_regions.append(new_region.tolist())
return new_regions, np.asarray(new_vertices)
class DelaunayDrawer(AbstractMatWrap2D):
"""
Draws Voronoi pixels from a `MapperVoronoiNoInterp` object (see `inversions.mapper`). This includes both drawing
each Voronoi cell and coloring it according to a color value.
The mapper contains the grid of (y,x) coordinate where the centre of each Voronoi cell is plotted.
This object wraps methods described in below:
https://matplotlib.org/3.3.2/api/_as_gen/matplotlib.pyplot.fill.html
"""
def draw_delaunay_pixels(
self,
mapper: MapperDelaunay,
values: np.ndarray,
cmap: wb.Cmap,
colorbar: wb.Colorbar,
colorbar_tickparams: wb.ColorbarTickParams = None,
aspect=None,
):
"""
Draws the Voronoi pixels of the input `mapper` using its `pixelization_grid` which contains the (y,x)
coordinate of the centre of every Voronoi cell. This uses the method `plt.fill`.
Parameters
----------
mapper : MapperVoronoiNoInterp
An object which contains the (y,x) grid of Voronoi cell centres.
values
An array used to compute the color values that every Voronoi cell is plotted using.
cmap : str
The colormap used to plot each Voronoi cell.
colorbar : Colorbar
The `Colorbar` object in `mat_base` used to set the colorbar of the figure the Voronoi mesh is plotted on.
"""
extent = mapper.source_pixelization_grid.extent
y_mean = 0.5 * (extent[2] + extent[3])
y_half_length = 0.5 * (extent[3] - extent[2])
x_mean = 0.5 * (extent[0] + extent[1])
x_half_length = 0.5 * (extent[1] - extent[0])
half_length = np.max([y_half_length, x_half_length])
y0 = y_mean - half_length
y1 = y_mean + half_length
x0 = x_mean - half_length
x1 = x_mean + half_length
nnn = 401
ys = np.linspace(y0, y1, nnn)
xs = np.linspace(x0, x1, nnn)
xs_grid, ys_grid = np.meshgrid(xs, ys)
xs_grid_1d = xs_grid.ravel()
ys_grid_1d = ys_grid.ravel()
if values is None:
return
interpolating_values = self.delaunay_interpolation_from(
delaunay=mapper.delaunay,
interpolating_yx=np.vstack((ys_grid_1d, xs_grid_1d)).T,
pixel_values=values,
)
vmin = cmap.vmin_from(array=values)
vmax = cmap.vmax_from(array=values)
color_values = np.where(values > vmax, vmax, values)
color_values = np.where(values < vmin, vmin, color_values)
cmap = plt.get_cmap(cmap.config_dict["cmap"])
if colorbar is not None:
colorbar = colorbar.set_with_color_values(
cmap=cmap, color_values=color_values
)
if colorbar is not None and colorbar_tickparams is not None:
colorbar_tickparams.set(cb=colorbar)
plt.imshow(
interpolating_values.reshape((nnn, nnn)),
cmap=cmap,
extent=[x0, x1, y0, y1],
origin="lower",
aspect=aspect,
)
# uncomment below if only plot triangle boundaries
# d_points, simplices = self.delaunay_triangles(mapper.delaunay)
# plt.triplot(d_points[:, 0], d_points[:, 1], simplices)
# plt.xlim([-0.6, 0.6])
# plt.ylim([-0.6, 0.6])
def delaunay_triangles(self, delaunay):
"""
Reconstruct infinite voronoi regions in a 2D diagram to finite regions.
Parameters
----------
voronoi : Voronoi
The input Voronoi diagram that is being plotted.
radius, optional
Distance to 'points at infinity'.
Returns
-------
regions : list of tuples
Indices of vertices in each revised Voronoi regions.
vertices : list of tuples
Grid2DIrregular for revised Voronoi vertices. Same as coordinates
of input vertices, with 'points at infinity' appended to the
end.
"""
xpts = delaunay.points[:, 1]
ypts = delaunay.points[:, 0]
return np.vstack((xpts, ypts)).T, delaunay.simplices
def delaunay_interpolation_from(self, delaunay, interpolating_yx, pixel_values):
simplex_index_for_interpolating_points = delaunay.find_simplex(interpolating_yx)
simplices = delaunay.simplices
pixel_points = delaunay.points
interpolating_values = np.zeros(len(interpolating_yx))
for i in range(len(interpolating_yx)):
simplex_index = simplex_index_for_interpolating_points[i]
interpolating_point = interpolating_yx[i]
if simplex_index == -1:
cloest_pixel_index = np.argmin(
np.sum((pixel_points - interpolating_point) ** 2.0, axis=1)
)
interpolating_values[i] = pixel_values[cloest_pixel_index]
else:
triangle_points = pixel_points[simplices[simplex_index]]
triangle_values = pixel_values[simplices[simplex_index]]
term0 = triangle_area_from(
corner_0=triangle_points[1],
corner_1=triangle_points[2],
corner_2=interpolating_point,
)
term1 = triangle_area_from(
corner_0=triangle_points[0],
corner_1=triangle_points[2],
corner_2=interpolating_point,
)
term2 = triangle_area_from(
corner_0=triangle_points[0],
corner_1=triangle_points[1],
corner_2=interpolating_point,
)
norm = term0 + term1 + term2
weight_abc = np.array([term0, term1, term2]) / norm
interpolating_values[i] = np.sum(weight_abc * triangle_values)
return interpolating_values
class VoronoiNNDrawer(AbstractMatWrap2D):
"""
Draws Voronoi pixels from a `MapperVoronoiNoInterp` object (see `inversions.mapper`). This includes both drawing
each Voronoi cell and coloring it according to a color value.
The mapper contains the grid of (y,x) coordinate where the centre of each Voronoi cell is plotted.
This object wraps methods described in below:
https://matplotlib.org/3.3.2/api/_as_gen/matplotlib.pyplot.fill.html
"""
def draw_voronoiNN_pixels(
self,
mapper: MapperVoronoi,
values: np.ndarray,
cmap: wb.Cmap,
colorbar: wb.Colorbar,
colorbar_tickparams: wb.ColorbarTickParams = None,
aspect=None,
):
"""
Draws the Voronoi pixels of the input `mapper` using its `pixelization_grid` which contains the (y,x)
coordinate of the centre of every Voronoi cell. This uses the method `plt.fill`.
Parameters
----------
mapper : MapperVoronoiNoInterp
An object which contains the (y,x) grid of Voronoi cell centres.
values
An array used to compute the color values that every Voronoi cell is plotted using.
cmap : str
The colormap used to plot each Voronoi cell.
colorbar : Colorbar
The `Colorbar` object in `mat_base` used to set the colorbar of the figure the Voronoi mesh is plotted on.
"""
extent = mapper.source_pixelization_grid.extent
y_mean = 0.5 * (extent[2] + extent[3])
y_half_length = 0.5 * (extent[3] - extent[2])
x_mean = 0.5 * (extent[0] + extent[1])
x_half_length = 0.5 * (extent[1] - extent[0])
half_length = np.max([y_half_length, x_half_length])
y0 = y_mean - half_length
y1 = y_mean + half_length
x0 = x_mean - half_length
x1 = x_mean + half_length
nnn = 401
ys = np.linspace(y0, y1, nnn)
xs = np.linspace(x0, x1, nnn)
xs_grid, ys_grid = np.meshgrid(xs, ys)
xs_grid_1d = xs_grid.ravel()
ys_grid_1d = ys_grid.ravel()
if values is None:
return
interpolating_values = self.voronoiNN_interpolation_from(
voronoi=mapper.voronoi,
interpolating_yx=np.vstack((ys_grid_1d, xs_grid_1d)).T,
pixel_values=values,
)
vmin = cmap.vmin_from(array=values)
vmax = cmap.vmax_from(array=values)
color_values = np.where(values > vmax, vmax, values)
color_values = np.where(values < vmin, vmin, color_values)
cmap = plt.get_cmap(cmap.config_dict["cmap"])
if colorbar is not None:
colorbar = colorbar.set_with_color_values(
cmap=cmap, color_values=color_values
)
if colorbar is not None and colorbar_tickparams is not None:
colorbar_tickparams.set(cb=colorbar)
plt.imshow(
interpolating_values.reshape((nnn, nnn)),
cmap=cmap,
extent=[x0, x1, y0, y1],
origin="lower",
aspect=aspect,
)
# uncomment below if only plot triangle boundaries
# d_points, simplices = self.delaunay_triangles(mapper.delaunay)
# plt.triplot(d_points[:, 0], d_points[:, 1], simplices)
# plt.xlim([-0.6, 0.6])
# plt.ylim([-0.6, 0.6])
def voronoiNN_interpolation_from(self, voronoi, interpolating_yx, pixel_values):
try:
from autoarray.util.nn import nn_py
except ImportError as e:
raise ImportError(
"In order to use the VoronoiNN pixelization you must install the "
"Natural Neighbor Interpolation c package.\n\n"
""
"See: https://github.com/Jammy2211/PyAutoArray/tree/master/autoarray/util/nn"
) from e
pixel_points = voronoi.points
interpolating_values = nn_py.natural_interpolation(
pixel_points[:, 0],
pixel_points[:, 1],
pixel_values,
interpolating_yx[:, 1],
interpolating_yx[:, 0],
)
return interpolating_values
class OriginScatter(GridScatter):
"""
Plots the (y,x) coordinates of the origin of a data structure (e.g. as a black cross).
See `mat_structure.Scatter` for a description of how matplotlib is wrapped to make this plot.
"""
pass
class MaskScatter(GridScatter):
"""
Plots a mask over an image, using the `Mask2d` object's (y,x) `edge_grid_sub_1` property.
See `mat_structure.Scatter` for a description of how matplotlib is wrapped to make this plot.
"""
pass
class BorderScatter(GridScatter):
"""
Plots a border over an image, using the `Mask2d` object's (y,x) `border_grid_sub_1` property.
See `mat_structure.Scatter` for a description of how matplotlib is wrapped to make this plot.
"""
pass
class PositionsScatter(GridScatter):
"""
Plots the (y,x) coordinates that are input in a plotter via the `positions` input.
See `mat_structure.Scatter` for a description of how matplotlib is wrapped to make this plot.
"""
pass
class IndexScatter(GridScatter):
"""
Plots specific (y,x) coordinates of a grid (or grids) via their 1d or 2d indexes.
See `mat_structure.Scatter` for a description of how matplotlib is wrapped to make this plot.
"""
pass
class PixelizationGridScatter(GridScatter):
"""
Plots the grid of a `Pixelization` object (see `autoarray.inversion`).
See `mat_structure.Scatter` for a description of how matplotlib is wrapped to make this plot.
"""
pass
class ParallelOverscanPlot(GridPlot):
pass
class SerialPrescanPlot(GridPlot):
pass
class SerialOverscanPlot(GridPlot):
pass
|
py | 7df7203f505654709e6468f8f8572067b8603b3d | import pathlib
from functools import cached_property
from typing import (
Dict, Iterable,
Optional, Pattern, Set, Tuple, Type)
import verboselogs # type:ignore
import aiohttp
import gidgethub.abc
import gidgethub.aiohttp
import abstracts
from aio.core.tasks import ConcurrentError
from aio.core.functional import async_property
from envoy.github.abstract import (
AGithubRelease, AGithubReleaseAssetsFetcher,
AGithubReleaseAssetsPusher, AGithubReleaseManager,
GithubReleaseError, ReleaseDict)
from envoy.github.release.assets import (
GithubReleaseAssetsFetcher, GithubReleaseAssetsPusher)
@abstracts.implementer(AGithubRelease)
class GithubRelease:
file_exts = {"deb", "changes", "rpm"}
def __init__(self, manager: AGithubReleaseManager, version: str):
self.manager = manager
self._version = version
@async_property(cache=True)
async def asset_names(self) -> Set[str]:
"""Set of the names of assets for this release version."""
return set(asset["name"] for asset in await self.assets)
@async_property(cache=True)
async def assets(self) -> Dict:
"""Assets dictionary as returned by Github Release API."""
try:
return await self.github.getitem(await self.assets_url)
except gidgethub.GitHubException as e:
raise GithubReleaseError(e)
@async_property(cache=True)
async def assets_url(self) -> str:
"""URL for retrieving this version's assets information from."""
return (await self.release)["assets_url"]
@async_property(cache=True)
async def delete_url(self) -> pathlib.PurePosixPath:
"""Github API-relative URL for deleting this release version."""
return self.releases_url.joinpath(str(await self.release_id))
@async_property
async def exists(self) -> bool:
return self.version_name in await self.release_names
@property
def fetcher(self) -> Type[AGithubReleaseAssetsFetcher]:
return GithubReleaseAssetsFetcher
@property
def github(self) -> gidgethub.abc.GitHubAPI:
return self.manager.github
@property
def log(self) -> verboselogs.VerboseLogger:
return self.manager.log
@property
def pusher(self) -> Type[AGithubReleaseAssetsPusher]:
return GithubReleaseAssetsPusher
@async_property(cache=True)
async def release(self) -> Dict:
"""Dictionary of release version information as returned by the Github
Release API."""
return await self.get()
@async_property(cache=True)
async def release_id(self) -> int:
"""The Github release ID for this version, required for some URLs."""
return (await self.release)["id"]
@async_property
async def release_names(self) -> Tuple[str, ...]:
"""Tuple of release tag names as returned by the Github Release API.
This is used to check whether the release exists already.
"""
return tuple(
release["tag_name"]
for release
in await self.manager.releases)
@property
def releases_url(self) -> pathlib.PurePosixPath:
return self.manager.releases_url
@property
def session(self) -> aiohttp.ClientSession:
return self.manager.session
@async_property(cache=True)
async def upload_url(self) -> str:
"""Upload URL for this release version."""
return (await self.release)["upload_url"].split("{")[0]
@property
def version(self) -> str:
return self._version
@property
def version_name(self) -> str:
return self.manager.format_version(self.version)
@cached_property
def version_url(self) -> pathlib.PurePosixPath:
"""Github API-relative URL to retrieve release version information
from."""
return self.releases_url.joinpath("tags", self.version_name)
async def create(
self,
assets: Optional[Iterable[pathlib.Path]] = None) -> ReleaseDict:
results = ReleaseDict()
if await self.exists:
self.fail(f"Release {self.version_name} already exists")
else:
self.log.notice(f"Creating release {self.version}")
try:
results["release"] = await self.github.post(
str(self.releases_url),
data=dict(tag_name=self.version_name))
except gidgethub.GitHubException as e:
raise GithubReleaseError(e)
self.log.success(f"Release created {self.version}")
if assets:
results.update(await self.push(assets))
return results
async def delete(self) -> None:
if not await self.exists:
raise GithubReleaseError(
f"Unable to delete version {self.version_name} as it does not "
"exist")
self.log.notice(f"Deleting release version: {self.version_name}")
try:
await self.github.delete(str(await self.delete_url))
except gidgethub.GitHubException as e:
raise GithubReleaseError(e)
self.log.success(f"Release version deleted: {self.version_name}")
async def fetch(
self,
path: pathlib.Path,
asset_types: Optional[Dict[str, Pattern[str]]] = None,
append: Optional[bool] = False) -> ReleaseDict:
self.log.notice(
"Downloading assets for release version: "
f"{self.version_name} -> {path}")
response = ReleaseDict(assets=[], errors=[])
fetcher = self.fetcher(self, path, asset_types, append=append)
async for result in fetcher:
if result.get("error"):
response["errors"].append(result)
continue
response["assets"].append(result)
self.log.info(
f"Asset saved: {result['name']} -> {result['outfile']}")
if not response["errors"]:
self.log.success(
"Assets downloaded for release version: "
f"{self.version_name} -> {path}")
return response
def fail(self, message: str) -> str:
return self.manager.fail(message)
async def get(self) -> Dict:
try:
return await self.github.getitem(str(self.version_url))
except gidgethub.GitHubException as e:
raise GithubReleaseError(e)
async def push(
self,
artefacts: Iterable[pathlib.Path]) -> ReleaseDict:
self.log.notice(f"Pushing assets for {self.version}")
response = ReleaseDict(assets=[], errors=[])
try:
for path in artefacts:
async for result in self.pusher(self, path):
if result.get("error"):
response["errors"].append(result)
continue
response["assets"].append(result)
self.log.info(f"Release file uploaded {result['name']}")
except ConcurrentError as e:
raise e.args[0]
if not response["errors"]:
self.log.success(f"Assets uploaded: {self.version}")
return response
|
py | 7df72122b6db3e8743d9f8bd644f95a82793657b | import os
import torch
from torch.hub import load_state_dict_from_url
from torchvision.datasets.folder import default_loader
from pathlib import Path
from torchvision import transforms
from .alexnet_gn import *
from .resnet import *
url_root = "https://visionlab-pretrainedmodels.s3.amazonaws.com"
def build_alexnet_model(weights_url, config):
model = alexnet_gn(out_dim=config['out_dim'], l2norm=config['l2norm'])
model.config = config
if weights_url is not None:
print(f"=> loading checkpoint: {Path(weights_url).name}")
checkpoint = load_state_dict_from_url(weights_url, model_dir=None, map_location=torch.device('cpu'))
state_dict = {str.replace(k,'module.',''): v for k,v in checkpoint['state_dict'].items()}
model.load_state_dict(state_dict)
print("=> state loaded.")
# used for test stimuli (for which we don't want to crop out edges)
transform = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=config['mean'], std=config['std'])
])
# standard resize and center crop for validation
model.val_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=config['mean'], std=config['std'])
])
return model, transform
def ipcl1():
model_name = 'ipcl_alpha_alexnet_gn_u128_stack'
filename = '06_instance_imagenet_AlexNet_n5_lr03_pct40_t07_div1000_e100_bs128_bm20_gn_stack_final_weights_only.pth.tar'
weights_url = os.path.join(url_root, "project_instancenet", "ipcl", filename)
config = {
"ref#": 1,
"type": "ipcl",
"details": "primary model",
"aug": "Set 1",
"top1_knn": 38.4,
"top1_linear": 39.5,
"out_dim": 128,
"l2norm": True,
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225]
}
print(config)
model, transform = build_alexnet_model(weights_url, config)
return model, transform
def ipcl2():
model_name = 'ipcl_alpha_alexnet_gn_u128_rep2'
filename = '06_instance_imagenet_AlexNet_n5_lr03_pct40_t07_div1000_e100_bs128_bm20_gn_rep2_final_weights_only.pth.tar'
weights_url = os.path.join(url_root, "project_instancenet", "ipcl", filename)
config = {
"ref#": 2,
"type": "ipcl",
"details": "variation: new code base",
"aug": "Set 1",
"top1_knn": 38.4,
"top1_linear": 39.7,
"out_dim": 128,
"l2norm": True,
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225]
}
print(config)
model, transform = build_alexnet_model(weights_url, config)
return model, transform
def ipcl3():
model_name = 'ipcl_alpha_alexnet_gn_u128_redux'
filename = 'alexnet_gn_dim128_unsupervised_redux_checkpoint_weights_only.pth.tar'
weights_url = os.path.join(url_root, "project_instancenet", "ipcl", filename)
config = {
"ref#": 3,
"type": "ipcl",
"details": "variation: one cycle lr & momentum (73 epochs)",
"aug": "Set 1",
"top1_knn": 35.4,
"top1_linear": 35.7,
"out_dim": 128,
"l2norm": True,
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225]
}
print(config)
model, transform = build_alexnet_model(weights_url, config)
return model, transform
def ipcl4():
model_name = 'ipcl_alpha_alexnet_gn_u128_ranger'
filename = 'alexnet_gn_dim128_unsupervised_ranger_checkpoint_weights_only.pth.tar'
weights_url = os.path.join(url_root, "project_instancenet", "ipcl", filename)
config = {
"ref#": 4,
"type": "ipcl",
"details": "variation: explore ranger (82 epochs)",
"aug": "Set 1",
"top1_knn": 37.5,
"top1_linear": 32.2,
"out_dim": 128,
"l2norm": True,
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225]
}
print(config)
model, transform = build_alexnet_model(weights_url, config)
return model, transform
def ipcl5():
model_name = 'ipcl_alpha_alexnet_gn_u128_transforms'
filename = 'alexnet_gn_dim128_unsupervised_transforms_checkpoint_weights_only.pth.tar'
weights_url = os.path.join(url_root, "project_instancenet", "ipcl", filename)
config = {
"ref#": 5,
"type": "ipcl",
"details": "variation: custom transforms (82 epochs)",
"aug": "Set 1",
"top1_knn": 36.9,
"top1_linear": 38.5,
"out_dim": 128,
"l2norm": True,
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225]
}
print(config)
model, transform = build_alexnet_model(weights_url, config)
return model, transform
def ipcl6():
model_name = 'ipcl_alexnet_gn_u128_imagenet'
filename = 'alexnet_gn_u128_imagenet_final_weights_only.pth.tar'
weights_url = os.path.join(url_root, "project_instancenet", "ipcl", filename)
config = {
"ref#": 6,
"type": "ipcl",
"details": "ImageNet baseline with new augmentations",
"aug": "Set 2",
"top1_knn": 35.1,
"top1_linear": None,
"out_dim": 128,
"l2norm": True,
"mean": [0.5, 0.5, 0.5],
"std": [0.2, 0.2, 0.2]
}
print(config)
model, transform = build_alexnet_model(weights_url, config)
return model, transform
def ipcl7():
model_name = 'ipcl_alexnet_gn_u128_openimagesv6'
filename = 'alexnet_gn_u128_openimagesv6_final_weights_only.pth.tar'
weights_url = os.path.join(url_root, "project_instancenet", "ipcl", filename)
config = {
"ref#": 7,
"type": "ipcl",
"details": "train on independent object dataset, OpenImagesV6",
"aug": "Set 2",
"top1_knn": 33.3,
"top1_linear": None,
"out_dim": 128,
"l2norm": True,
"mean": [0.5, 0.5, 0.5],
"std": [0.2, 0.2, 0.2]
}
print(config)
model, transform = build_alexnet_model(weights_url, config)
return model, transform
def ipcl8():
model_name = 'ipcl_alexnet_gn_u128_places2'
filename = 'alexnet_gn_u128_places2_final_weights_only.pth.tar'
weights_url = os.path.join(url_root, "project_instancenet", "ipcl", filename)
config = {
"ref#": 8,
"type": "ipcl",
"details": "train on scene dataset, Places2",
"aug": "Set 2",
"top1_knn": 30.9,
"top1_linear": None,
"out_dim": 128,
"l2norm": True,
"mean": [0.5, 0.5, 0.5],
"std": [0.2, 0.2, 0.2]
}
print(config)
model, transform = build_alexnet_model(weights_url, config)
return model, transform
def ipcl9():
model_name = 'ipcl_alexnet_gn_u128_vggface2'
filename = 'alexnet_gn_u128_vggface2_lr001_final_weights_only.pth.tar'
weights_url = os.path.join(url_root, "project_instancenet", "ipcl", filename)
config = {
"ref#": 9,
"type": "ipcl",
"details": "train on face dataset, VggFace2",
"aug": "Set 2",
"top1_knn": 12.4,
"top1_linear": None,
"out_dim": 128,
"l2norm": True,
"mean": [0.5, 0.5, 0.5],
"std": [0.2, 0.2, 0.2]
}
print(config)
model, transform = build_alexnet_model(weights_url, config)
return model, transform
def ipcl10():
model_name = 'ipcl_alexnet_gn_u128_FacesPlacesObjects1281167'
filename = 'alexnet_gn_u128_FacesPlacesObjects1281167_final_weights_only.pth.tar'
weights_url = os.path.join(url_root, "project_instancenet", "ipcl", filename)
config = {
"ref#": 10,
"type": "ipcl",
"details": "train on faces-places-objects-1x-ImageNet",
"aug": "Set 2",
"top1_knn": 31.6,
"top1_linear": None,
"out_dim": 128,
"l2norm": True,
"mean": [0.5, 0.5, 0.5],
"std": [0.2, 0.2, 0.2]
}
print(config)
model, transform = build_alexnet_model(weights_url, config)
return model, transform
def ipcl11():
model_name = 'ipcl_alexnet_gn_u128_FacesPlacesObjects1281167x3'
filename = 'alexnet_gn_u128_FacesPlacesObjects1281167x3_final_weights_only.pth.tar'
weights_url = os.path.join(url_root, "project_instancenet", "ipcl", filename)
config = {
"ref#": 11,
"type": "ipcl",
"details": "train on faces-places-objects-3x-ImageNet",
"aug": "Set 2",
"top1_knn": 33.9,
"top1_linear": None,
"out_dim": 128,
"l2norm": True,
"mean": [0.5, 0.5, 0.5],
"std": [0.2, 0.2, 0.2]
}
print(config)
model, transform = build_alexnet_model(weights_url, config)
return model, transform
def ipcl12():
model_name = 'ipcl_alpha_alexnet_gn_s1000_imagenet_wus_aug'
filename = 'alexnet_gn_s1000_imagenet_wus_aug_final_weights_only.pth.tar'
weights_url = os.path.join(url_root, "project_instancenet", "ipcl", filename)
config = {
"ref#": 12,
"type": "category supervised",
"details": "trained with 5 augmentations per image to match IPCL",
"aug": "Set 1",
"top1_knn": 58.8,
"top1_linear": 55.7,
"out_dim": 1000,
"l2norm": False,
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225]
}
print(config)
model, transform = build_alexnet_model(weights_url, config)
return model, transform
def ipcl13():
model_name = 'wusnet_alexnet_gn_s1000'
filename = 'alexnet_gn_supervised_final.pth.tar'
weights_url = os.path.join(url_root, "project_instancenet", "wusnet", filename)
config = {
"ref#": 13,
"type": "category supervised",
"details": "trained with single augmentation per image",
"aug": "Set 1",
"top1_knn": 55.5,
"top1_linear": 54.5,
"out_dim": 1000,
"l2norm": False,
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225]
}
print(config)
model, transform = build_alexnet_model(weights_url, config)
return model, transform
def ipcl14():
model_name = 'ipcl_alexnet_gn_s1000_imagenet'
filename = 'alexnet_gn_s1000_imagenet_base_aug3_blur0_rot0_w0_d0_std02_final_weights_only.pth.tar'
weights_url = os.path.join(url_root, "project_instancenet", "ipcl", filename)
config = {
"ref#": 14,
"type": "category supervised",
"details": "ImageNet baseline with new augmentations",
"aug": "Set 2",
"top1_knn": 56.0,
"top1_linear": None,
"out_dim": 1000,
"l2norm": False,
"mean": [0.5, 0.5, 0.5],
"std": [0.2, 0.2, 0.2]
}
print(config)
model, transform = build_alexnet_model(weights_url, config)
return model, transform
def ipcl15():
model_name = 'ipcl_alexnet_gn_s1000_imagenet_rep1'
filename = 'alexnet_gn_s1000_imagenet_base_aug3_blur0_rot0_w0_d0_std02_rep1_final_weights_only.pth.tar'
weights_url = os.path.join(url_root, "project_instancenet", "ipcl", filename)
config = {
"ref#": 15,
"type": "category supervised",
"details": "primary model",
"aug": "Set 2",
"top1_knn": 56.0,
"top1_linear": None,
"out_dim": 1000,
"l2norm": False,
"mean": [0.5, 0.5, 0.5],
"std": [0.2, 0.2, 0.2]
}
print(config)
model, transform = build_alexnet_model(weights_url, config)
return model, transform
def ipcl16():
model_name = 'ipcl_alpha_alexnet_gn_u128_random'
filename = ''
weights_url = None
config = {
"ref#": 16,
"type": "untrained",
"details": "untrained model with random weights and biases",
"aug": "-",
"top1_knn": 3.5,
"top1_linear": 7.2,
"out_dim": 128,
"l2norm": True,
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225]
}
print(config)
model, transform = build_alexnet_model(weights_url, config)
return model, transform
|
py | 7df722121fe6b2021f2ca33e9628ab4676db6c62 | #!/usr/bin/python
from __future__ import division
import argparse
import datetime, sys, os
import urllib2, json
import ttbindec, defs
p = argparse.ArgumentParser()
p.add_argument('ttbin', nargs='*', type=argparse.FileType('rb'), default=sys.stdin, help='.ttbin files (default stdin)')
p.add_argument('-g', '--geolocate', action='store_true', help='Geolocate using Google Maps api')
p.add_argument('-r', '--rename', action='store_true', help='Rename files to YYYY-MM-DDTHH:MM:SS_Activity_Duration.ttbin')
args = p.parse_args()
for ttbin in args.ttbin:
if len(args.ttbin)>1:
indent=" "
print "\n%s:" % ttbin.name
else:
indent=""
data = ttbin.read()
tag, fh, rls, offset = ttbindec.read_header(data)
fw = '%d.%d.%d' % tuple(fh.firmware_version)
product = "%d (%s)" % (fh.product_id, {1001:"Runner",1002:"MultiSport"}.get(fh.product_id,"Unknown"))
start_time = datetime.datetime.fromtimestamp(fh.start_time-fh.local_time_offset)
activity, laps, distance, duration, end_time = 'UNKNOWN', 1, None, 'UNKNOWN', None
fp = None
while offset<len(data):
tag, rec, offset = ttbindec.read_record(data, offset)
if isinstance(rec, defs.FILE_STATUS_RECORD):
try:
activity = defs.C_ACTIVITY(rec.activity).name.title()
except ValueError:
activity = None
end_time = datetime.datetime.fromtimestamp(rec.timestamp-fh.local_time_offset)
elif isinstance(rec, defs.FILE_LAP_RECORD):
laps += 1
elif isinstance(rec, defs.FILE_SUMMARY_RECORD):
distance = rec.distance
duration = "%02d:%02d:%02d" % (rec.duration//3600, (rec.duration//60)%60, rec.duration%60)
elif fp is None and isinstance(rec, defs.FILE_GPS_RECORD):
lat, long = "%08.d"%rec.latitude, "%08.d"%rec.longitude
fp = "%s.%s,%s.%s" % (lat[:-7],lat[-7:],long[:-7],long[-7:])
print indent+"Device: %s, firmware v%s" % (product, fw)
print indent+"Activity: %s" % activity
print indent+"Start time: %s" % start_time.isoformat()
print indent+"End time: %s" % end_time.isoformat()
print indent+"Duration: %s" % duration
if fp:
print indent+"Start location: %s" % fp
if args.geolocate:
try:
j = json.load(urllib2.urlopen("http://maps.googleapis.com/maps/api/geocode/json?latlng=%s" % fp))
addr = j['results'][0]['formatted_address']
print indent+" %s" % addr
except Exception:
pass
if distance:
print indent+"Distance: %gm, %d laps" % (distance, laps)
if args.rename and ttbin is not sys.stdin:
ttbin.close()
newfn = '%s_%s_%s.ttbin' % (start_time.isoformat(), activity, duration)
newpath = os.path.join(os.path.dirname(ttbin.name), newfn)
if os.path.exists(newpath):
if not os.path.samefile(ttbin.name, newpath):
print>>sys.stderr, "File exists, not renaming: %s" % newpath
else:
os.rename(ttbin.name, newpath)
print indent+"Renamed to %s" % newfn
|
py | 7df722c15f501106c8b215f16456dc4cc03ea838 | # sslutil.py - SSL handling for mercurial
#
# Copyright 2005, 2006, 2007, 2008 Matt Mackall <[email protected]>
# Copyright 2006, 2007 Alexis S. L. Carvalho <[email protected]>
# Copyright 2006 Vadim Gelfer <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import os, sys
from mercurial import util
from mercurial.i18n import _
_canloaddefaultcerts = False
try:
# avoid using deprecated/broken FakeSocket in python 2.6
import ssl
CERT_REQUIRED = ssl.CERT_REQUIRED
try:
ssl_context = ssl.SSLContext
_canloaddefaultcerts = util.safehasattr(ssl_context,
'load_default_certs')
def ssl_wrap_socket(sock, keyfile, certfile, cert_reqs=ssl.CERT_NONE,
ca_certs=None, serverhostname=None):
# Allow any version of SSL starting with TLSv1 and
# up. Note that specifying TLSv1 here prohibits use of
# newer standards (like TLSv1_2), so this is the right way
# to do this. Note that in the future it'd be better to
# support using ssl.create_default_context(), which sets
# up a bunch of things in smart ways (strong ciphers,
# protocol versions, etc) and is upgraded by Python
# maintainers for us, but that breaks too many things to
# do it in a hurry.
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.options &= ssl.OP_NO_SSLv2 & ssl.OP_NO_SSLv3
if certfile is not None:
sslcontext.load_cert_chain(certfile, keyfile)
sslcontext.verify_mode = cert_reqs
if ca_certs is not None:
sslcontext.load_verify_locations(cafile=ca_certs)
elif _canloaddefaultcerts:
sslcontext.load_default_certs()
sslsocket = sslcontext.wrap_socket(sock,
server_hostname=serverhostname)
# check if wrap_socket failed silently because socket had been
# closed
# - see http://bugs.python.org/issue13721
if not sslsocket.cipher():
raise util.Abort(_('ssl connection failed'))
return sslsocket
except AttributeError:
def ssl_wrap_socket(sock, keyfile, certfile, cert_reqs=ssl.CERT_NONE,
ca_certs=None, serverhostname=None):
sslsocket = ssl.wrap_socket(sock, keyfile, certfile,
cert_reqs=cert_reqs, ca_certs=ca_certs,
ssl_version=ssl.PROTOCOL_TLSv1)
# check if wrap_socket failed silently because socket had been
# closed
# - see http://bugs.python.org/issue13721
if not sslsocket.cipher():
raise util.Abort(_('ssl connection failed'))
return sslsocket
except ImportError:
CERT_REQUIRED = 2
import socket, httplib
def ssl_wrap_socket(sock, keyfile, certfile, cert_reqs=CERT_REQUIRED,
ca_certs=None, serverhostname=None):
if not util.safehasattr(socket, 'ssl'):
raise util.Abort(_('Python SSL support not found'))
if ca_certs:
raise util.Abort(_(
'certificate checking requires Python 2.6'))
ssl = socket.ssl(sock, keyfile, certfile)
return httplib.FakeSocket(sock, ssl)
def _verifycert(cert, hostname):
'''Verify that cert (in socket.getpeercert() format) matches hostname.
CRLs is not handled.
Returns error message if any problems are found and None on success.
'''
if not cert:
return _('no certificate received')
dnsname = hostname.lower()
def matchdnsname(certname):
return (certname == dnsname or
'.' in dnsname and certname == '*.' + dnsname.split('.', 1)[1])
san = cert.get('subjectAltName', [])
if san:
certnames = [value.lower() for key, value in san if key == 'DNS']
for name in certnames:
if matchdnsname(name):
return None
if certnames:
return _('certificate is for %s') % ', '.join(certnames)
# subject is only checked when subjectAltName is empty
for s in cert.get('subject', []):
key, value = s[0]
if key == 'commonName':
try:
# 'subject' entries are unicode
certname = value.lower().encode('ascii')
except UnicodeEncodeError:
return _('IDN in certificate not supported')
if matchdnsname(certname):
return None
return _('certificate is for %s') % certname
return _('no commonName or subjectAltName found in certificate')
# CERT_REQUIRED means fetch the cert from the server all the time AND
# validate it against the CA store provided in web.cacerts.
#
# We COMPLETELY ignore CERT_REQUIRED on Python <= 2.5, as it's totally
# busted on those versions.
def _plainapplepython():
"""return true if this seems to be a pure Apple Python that
* is unfrozen and presumably has the whole mercurial module in the file
system
* presumably is an Apple Python that uses Apple OpenSSL which has patches
for using system certificate store CAs in addition to the provided
cacerts file
"""
if sys.platform != 'darwin' or util.mainfrozen() or not sys.executable:
return False
exe = os.path.realpath(sys.executable).lower()
return (exe.startswith('/usr/bin/python') or
exe.startswith('/system/library/frameworks/python.framework/'))
def _defaultcacerts():
"""return path to CA certificates; None for system's store; ! to disable"""
if _plainapplepython():
dummycert = os.path.join(os.path.dirname(__file__), 'dummycert.pem')
if os.path.exists(dummycert):
return dummycert
if _canloaddefaultcerts:
return None
return '!'
def sslkwargs(ui, host):
kws = {}
hostfingerprint = ui.config('hostfingerprints', host)
if hostfingerprint:
return kws
cacerts = ui.config('web', 'cacerts')
if cacerts == '!':
pass
elif cacerts:
cacerts = util.expandpath(cacerts)
if not os.path.exists(cacerts):
raise util.Abort(_('could not find web.cacerts: %s') % cacerts)
else:
cacerts = _defaultcacerts()
if cacerts and cacerts != '!':
ui.debug('using %s to enable OS X system CA\n' % cacerts)
ui.setconfig('web', 'cacerts', cacerts, 'defaultcacerts')
if cacerts != '!':
kws.update({'ca_certs': cacerts,
'cert_reqs': CERT_REQUIRED,
})
return kws
class validator(object):
def __init__(self, ui, host):
self.ui = ui
self.host = host
def __call__(self, sock, strict=False):
host = self.host
cacerts = self.ui.config('web', 'cacerts')
hostfingerprint = self.ui.config('hostfingerprints', host)
if not getattr(sock, 'getpeercert', False): # python 2.5 ?
if hostfingerprint:
raise util.Abort(_("host fingerprint for %s can't be "
"verified (Python too old)") % host)
if strict:
raise util.Abort(_("certificate for %s can't be verified "
"(Python too old)") % host)
if self.ui.configbool('ui', 'reportoldssl', True):
self.ui.warn(_("warning: certificate for %s can't be verified "
"(Python too old)\n") % host)
return
if not sock.cipher(): # work around http://bugs.python.org/issue13721
raise util.Abort(_('%s ssl connection error') % host)
try:
peercert = sock.getpeercert(True)
peercert2 = sock.getpeercert()
except AttributeError:
raise util.Abort(_('%s ssl connection error') % host)
if not peercert:
raise util.Abort(_('%s certificate error: '
'no certificate received') % host)
peerfingerprint = util.sha1(peercert).hexdigest()
nicefingerprint = ":".join([peerfingerprint[x:x + 2]
for x in xrange(0, len(peerfingerprint), 2)])
if hostfingerprint:
if peerfingerprint.lower() != \
hostfingerprint.replace(':', '').lower():
raise util.Abort(_('certificate for %s has unexpected '
'fingerprint %s') % (host, nicefingerprint),
hint=_('check hostfingerprint configuration'))
self.ui.debug('%s certificate matched fingerprint %s\n' %
(host, nicefingerprint))
elif cacerts != '!':
msg = _verifycert(peercert2, host)
if msg:
raise util.Abort(_('%s certificate error: %s') % (host, msg),
hint=_('configure hostfingerprint %s or use '
'--insecure to connect insecurely') %
nicefingerprint)
self.ui.debug('%s certificate successfully verified\n' % host)
elif strict:
raise util.Abort(_('%s certificate with fingerprint %s not '
'verified') % (host, nicefingerprint),
hint=_('check hostfingerprints or web.cacerts '
'config setting'))
else:
self.ui.warn(_('warning: %s certificate with fingerprint %s not '
'verified (check hostfingerprints or web.cacerts '
'config setting)\n') %
(host, nicefingerprint))
|
py | 7df7230e48d1f111ce7ccc2d74c7a621a4f246b4 | import numpy as np
import getopt, sys
import torch
import torch.nn as nn
from option.default_option import TrainOptions
from model_search import Network
from genotypes import *
from option.default_option import TrainOptions
from visualize import *
"""
Loads a model checkpoint from commandline arguments and displays with additional option to visualize
the checkpoint:
- epoch
- training loss
- training accuracy
- validation loss
- validation accuracy
Command Line Arguments:
- experiment name: --experiment=
- epoch number: --epoch=
- visualize the model as .png in /visualizations (bool): --visualize=
"""
opt = TrainOptions()
def initialize():
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
opt = TrainOptions()
criterion = nn.CrossEntropyLoss().to(device)
model = Network(opt.init_channels, 10, opt.layers, criterion)
model.to(device)
optimizer_model = torch.optim.SGD(model.parameters(), lr=opt.learning_rate, momentum=opt.momentum, weight_decay=opt.weight_decay)
optimizer_arch = torch.optim.Adam(model.arch_parameters(), lr=opt.arch_learning_rate, betas=opt.arch_betas, weight_decay=opt.arch_weight_decay)
return device, opt, criterion, model, optimizer_model, optimizer_arch
def load_checkpoint(LOAD_EPOCH, experiment):
"""
Loads model checkpoint metadata saved in /experiments at a particular epoch
"""
checkpoint = torch.load('experiments/' + experiment + '/weights_epoch_' + LOAD_EPOCH + '.pt', map_location=device)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer_model.load_state_dict(checkpoint['optimizer_model_state_dict'])
optimizer_arch.load_state_dict(checkpoint['optimizer_arch_state_dict'])
epoch = checkpoint['epoch']
train_loss = checkpoint['train_loss']
train_acc_top1 = checkpoint['train_acc_top1']
train_acc_top5 = checkpoint['train_acc_top5']
valid_loss = checkpoint['valid_loss']
valid_acc_top1 = checkpoint['valid_acc_top1']
valid_acc_top5 = checkpoint['valid_acc_top5']
model_state_dict = checkpoint['model_state_dict']
arch_alphas = checkpoint['arch_alphas']
return epoch, train_loss, train_acc_top1, train_acc_top5, valid_loss, valid_acc_top1, valid_acc_top5, model_state_dict, arch_alphas
if __name__ == '__main__':
opt_list, _ = getopt.getopt(sys.argv[1:], 'x', ['experiment=', 'epoch=', 'visualize='])
experiment, LOAD_EPOCH, visualize = opt_list[0][1], opt_list[1][1], opt_list[2][1] == 'True'
device, opt, criterion, model, optimizer_model, optimizer_arch = initialize()
epoch, train_loss, train_acc_top1, train_acc_top5, valid_loss, valid_acc_top1, valid_acc_top5, model_state_dict, arch_alphas = load_checkpoint(LOAD_EPOCH, experiment)
if visualize:
temperature = opt.initial_temp * np.exp(opt.anneal_rate * epoch)
alpha_normal, alpha_reduce = arch_alphas[0], arch_alphas[1]
m_normal = torch.distributions.relaxed_categorical.RelaxedOneHotCategorical(
torch.tensor([temperature]), torch.tensor(alpha_normal))
m_reduce = torch.distributions.relaxed_categorical.RelaxedOneHotCategorical(
torch.tensor([temperature]) , torch.tensor(alpha_reduce))
alpha_normal = m_normal.sample().cpu().numpy()
alpha_reduce = m_reduce.sample().cpu().numpy()
ex = genotype(alpha_normal, alpha_reduce)
plot(ex.normal, './visualizations/' + experiment + '/normal_epoch_' + str(epoch))
plot(ex.reduce, './visualizations/' + experiment + '/reduction_epoch_' + str(epoch))
print("Saved visualization to normal.png and reduction.png")
print('SNAS status')
print('epoch:', epoch)
print('train_loss:', train_loss)
print('train_acc_top1:', train_acc_top1)
print('train_acc_top5:', train_acc_top5)
print('valid_loss:', valid_loss)
print('valid_acc_top1:', valid_acc_top1)
print('valid_acc_top5:', valid_acc_top5) |
py | 7df7234d1e43f3707f033ca969e3c3e502bac890 | #!/usr/bin/env python3
import json
import os
# import pandas as pd
from amd_plot_roofline_hierarchical_jg import roofline
# cloned from:
# https://github.com/Techercise/AMD-Instruction-Roofline-using-rocProf-Metrics
# -> blob/main/amd_postprocess_instruction.py
# -----------------------------------------------------------------------------
# https://github.com/Techercise/AMD-Instruction-Roofline-using-rocProf-Metrics
# https://arxiv.org/pdf/2110.08221.pdf
# -> Matthew Leinhauser <[email protected]>
# -> Sunita Chandrasekaran <[email protected]>
# "Metrics and Design of an Instruction Roofline Model for AMD GPUs"
# FP64_Gflops/s:
# P100=4761 (sm60, 56sms*64c/sm*1.33GHz, 16GB),
# V100=7833 (sm70, 80sms*64c/sm*1.53GHz, 16GB),
# A100=9746 (sm80, 108sms*64c/sm*1.41GHz),
# Mi100=11500
# -----------------------------------------------------------------------------
# {{{ read_json_data
def read_json_data(infile, kernel):
f = open(infile)
d = json.load(f)
f.close()
dd = d['runs'][0]['testcases']
labels = []
fetchsize = []
sq_insts_valu = []
writesize = []
sq_insts_salu = []
ns = []
for job_n in range(len(dd)):
# print(f'job_n={job_n}')
for metric_job_n in range(len(dd[job_n]['perfvars'])):
json_metric_name = dd[job_n]["perfvars"][metric_job_n]["name"]
json_metric_value = dd[job_n]["perfvars"][metric_job_n]["value"]
if json_metric_name == 'n_cubeside':
labels.append(f'-n{json_metric_value}')
elif json_metric_name == f'{kernel}_FetchSize':
fetchsize.append(json_metric_value)
elif json_metric_name == f'{kernel}_SQ_INSTS_VALU':
sq_insts_valu.append(json_metric_value)
elif json_metric_name == f'{kernel}_WriteSize':
writesize.append(json_metric_value)
elif json_metric_name == f'{kernel}_SQ_INSTS_SALU':
sq_insts_salu.append(json_metric_value)
elif json_metric_name == f'{kernel}_ns':
ns.append(json_metric_value)
# print(labels)
# print(fetchsize)
# print(sq_insts_valu)
# print(writesize)
# print(sq_insts_salu)
# print(ns)
return (labels, fetchsize, sq_insts_valu, writesize, sq_insts_salu, ns)
# }}}
# {{{ compute roofs, Intensity and GIPS:
# 1ns=10^-9s / 1us=10^-6s
def calc_Instructions(sq_insts_valu, sq_insts_salu):
"""
dfmetric['Instructions'] = \
(dfmetric['SQ_INSTS_VALU'] * 4) + dfmetric['SQ_INSTS_SALU']
"""
# TODO: return [sq_insts_salu[ii] + sq_insts_valu[ii] * 4 for ...]
res = []
for ii in range(len(sq_insts_valu)):
res.append(sq_insts_salu[ii] + sq_insts_valu[ii] * 4)
return res
def calc_Time(ns):
"""
dfmetric['Time'] = dfmetric['time'] * pow(10, -6) # = from usec to sec
nsec * 10^-9 = sec # = from nsec to sec
"""
return [ii * pow(10, -9) for ii in ns]
def calc_Instructions_Intensity(inst, fetchsize, writesize, time):
"""
dfmetric['Instruction Intensity HBM'] = \
(dfmetric['Instructions'] / 64) /
((dfmetric['Fetch_Size'] + dfmetric['Write_Size']) * dfmetric['Time'])
"""
res = []
for ii in range(len(inst)):
res.append((inst[ii] / 64) /
((fetchsize[ii] + writesize[ii]) * time[ii]))
return res
def calc_GIPS(inst, time):
"""
dfmetric['GIPS'] = (dfmetric['Instructions'] / 64) /
(pow(10, 9) * dfmetric['Time'])
"""
res = []
wavefront = 64
for ii in range(len(inst)):
res.append((inst[ii] / wavefront) / (pow(10, 9) * time[ii]))
return res
# }}}
# {{{ plot:
def plot_roofline(jsonfile, gips, Instruction_Intensity_HBM, labels, flag,
kernel):
l2_ai = [0 for z in gips]
l1_ai = [0 for z in gips]
roofline(jsonfile, gips, Instruction_Intensity_HBM, l2_ai, l1_ai, labels,
flag, kernel)
# }}}
if __name__ == '__main__':
jsonfile = 'res_amd_mi100/reframe.json'
# jsonfile = 'eff.json'
kernels = [
'computeMomentumAndEnergyIAD',
'findNeighborsKernel',
'computeIAD',
'density',
]
kernel = kernels[0]
# kernel = kernels[1]
# kernel = kernels[2]
# kernel = kernels[3]
lumi = True # False
if lumi:
# {{{ lumi:
for kernel in kernels:
labels, fetchsize, sq_insts_valu, writesize, sq_insts_salu, ns =
read_json_data(jsonfile, kernel)
time = calc_Time(ns)
# print(f'ns={ns}')
# print(f'time={time}')
inst = calc_Instructions(sq_insts_valu, sq_insts_salu)
Instruction_Intensity_HBM =
calc_Instructions_Intensity(inst, fetchsize, writesize, time)
gips = calc_GIPS(inst, time)
flag = 'HBM'
print(f'# kernel={kernel}')
# print(ns)
# print(time)
# print(inst)
print(f'# jsonfile={jsonfile}')
print(f'Instruction_Intensity_HBM={Instruction_Intensity_HBM}')
print(f'gips={gips}')
print(f'labels={labels}')
print(f'flag={flag}')
plot_roofline(jsonfile, gips, Instruction_Intensity_HBM, labels,
flag, kernel)
os.rename('generic_mi100_roofline.png',
f'{kernel}_mi100_roofline.png')
# }}}
debug = False
if debug:
# {{{ debug:
# {{{ xls ok:fig7 TWEAC_simulations/mi100_tweac_cc_inst_output.csv
# -> gips=[4.993347263108402]
# -> Instruction_Intensity_HBM=[0.40753481867458635]
# metrics,SQ_INSTS_SALU,inst,7430073024
# metrics,SQ_INSTS_VALU,inst,17764624449
# metrics,FetchSize,bytes,11460394000
# metrics,WriteSize,bytes,792172000
# metrics,time,us,245603.571 <----- us ?
sq_insts_salu = [7430073024]
sq_insts_valu = [17764624449]
fetchsize = [11460394000]
writesize = [792172000]
ns = [245603.571]
# }}}
# {{{ ok:fig6 LWFA_simulations/mi100_lw_cc_inst_output.csv
# Instruction_Intensity_HBM=[1.862501575963134]
# gips=[2.855576241257221]
sq_insts_salu = [30791040]
sq_insts_valu = [104751360]
fetchsize = [1124711000]
writesize = [408483000]
ns = [2461.174]
# }}}
# {{{ ?: LWFA_simulations/mi100_lw_pp_inst_output.csv
# Instruction_Intensity_HBM=[2.520477925887229]
# gips=[4.709127371397627]
# sq_insts_salu = [28203840]
# sq_insts_valu = [309335040]
# fetchsize = [1229821000]
# writesize = [638526000]
# ns = [4199.106]
# }}}
time = calc_Time(ns)
inst = calc_Instructions(sq_insts_valu, sq_insts_salu)
gips = calc_GIPS(inst, time)
Instruction_Intensity_HBM =
calc_Instructions_Intensity(inst, fetchsize, writesize, time)
labels = ['x']
flag = 'HBM'
print(f'Instruction_Intensity_HBM={Instruction_Intensity_HBM}')
print(f'gips={gips}')
# }}}
|
py | 7df726885ace3945c2a10a94bd9b4c7a20a82135 | class Node:
def __init__(self, name):
self.name = name
self.children = set()
self.parents = set()
def add_child(self, child):
self.children.add(child)
def add_parent(self, parent):
self.parents.add(parent)
# parse the nodes
nodes = {}
with open(r'C:\Users\jeremymill\Documents\AoC2018\7\input.txt') as f:
for line in f:
line = line.split(' ')
nodename = line[1]
child = line[7]
#if the nodename doesn't exist, create it
if nodename not in nodes:
nodes[nodename] = Node(nodename)
nodes[nodename].add_child(child)
#if the nodename does exist, add a child to it
else:
nodes[nodename].add_child(child)
#if the child isn't a node, create it and add a parent
if child not in nodes:
nodes[child] = Node(child)
nodes[child].add_parent(nodename)
#if the child is a node, just add a parent
else:
nodes[child].add_parent(nodename)
# find the node with no parent
roots = []
for nodename, node in nodes.items():
if len(node.parents) == 0:
roots.append(nodename)
roots = sorted(roots)
print(roots)
#start with node C
eligible_children = []
eliminated_children = [roots[0]]
for child in nodes[roots[0]].children:
print('root child: ', child)
eligible_children.append(child)
for root in roots[1:]:
eligible_children.append(root)
#result variable
result = roots[0]
print("e-children: ", eligible_children)
while len(eligible_children) > 0:
#sort alphabetically
eligible_children = sorted(eligible_children)
#make sure the next lowest eligible has eliminated parents
count = 0
childzero = eligible_children[0]
while True:
curr_count = count
for parent in nodes[eligible_children[count]].parents:
if parent not in eliminated_children:
count += 1
break
if curr_count == count:
break
#add the lowest eligible to the result
result += eligible_children[count]
eliminated_children.append(eligible_children[count])
#add the children of the lowest to eligible children
for child in nodes[eligible_children[count]].children:
if child not in eliminated_children:
eligible_children.append(child)
#remove the child
eligible_children.remove(eligible_children[count])
eligible_children = set(eligible_children)
print("e-children: ", eligible_children)
print(result)
|
py | 7df726a4d5a8bb1071e443bdf6a45dcd6685b3a1 | #! /usr/bin/env python
"""This module contains code to make, locate, and parse the appropriate segment
PSF library files to use for a given simulation. These files are made using
the WebbPSF psf_grid() method, and are turned into photutils GriddedPSFModel
objects using webbpsf.utils.to_griddedpsfmodel.
Author
------
- Lauren Chambers
Use
---
This module can be imported and called as such:
::
from mirage.psf.segment_psfs import get_gridded_segment_psf_library_list
lib = get_gridded_segment_psf_library_list(instrument, detector, filter,
out_dir, pupilname="CLEAR")
"""
import logging
import os
import time
from astropy.io import fits
import numpy as np
import pysiaf
import webbpsf
from webbpsf.gridded_library import CreatePSFLibrary
from webbpsf.utils import to_griddedpsfmodel
import multiprocessing
import functools
from mirage.logging import logging_functions
from mirage.psf.psf_selection import get_library_file
from mirage.utils.constants import LOG_CONFIG_FILENAME, STANDARD_LOGFILE_NAME
classdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))
log_config_file = os.path.join(classdir, 'logging', LOG_CONFIG_FILENAME)
logging_functions.create_logger(log_config_file, STANDARD_LOGFILE_NAME)
def _generate_psfs_for_one_segment(inst, ote, segment_tilts, out_dir, boresight, lib, detectors, filters, fov_pixels, nlambda, overwrite, i):
"""
Helper function for parallelized segment PSF calculations
For use with multiprocessing.Pool, the iterable argument must be in the last position
See doc string of generate_segment_psfs for input parameter definitions.
"""
logger = logging.getLogger('mirage.psf.segment_psfs._generate_psfs_for_one_segment')
i_segment = i + 1
segname = webbpsf.webbpsf_core.segname(i_segment)
logger.info('GENERATING SEGMENT {} DATA'.format(segname))
det_filt_match = False
for det in sorted(detectors):
for filt in list(filters):
if inst.name.lower() == 'nircam':
# Make sure the detectors and filters match for NIRCam LW/SW
# i.e. ignore SW filters if we're on LW, and vice versa
if (det in lib.nrca_short_detectors and filt not in lib.nrca_short_filters) \
or (det in lib.nrca_long_detectors and filt not in lib.nrca_long_filters):
continue
det_filt_match = True
# Define the filter and detector
inst.filter = filt
inst.detector = det
# Restrict the pupil to the current segment
pupil = webbpsf.webbpsf_core.one_segment_pupil(i_segment)
ote.amplitude = pupil[0].data
inst.pupil = ote
# Determine normalization factor - what fraction of total pupil is in this one segment?
full_pupil = fits.getdata(os.path.join(webbpsf.utils.get_webbpsf_data_path(), 'jwst_pupil_RevW_npix1024.fits.gz'))
pupil_fraction_for_this_segment = pupil[0].data.sum() / full_pupil.sum()
# Generate the PSF grid
# NOTE: we are choosing a polychromatic simulation here to better represent the
# complexity of simulating unstacked PSFs. See the WebbPSF website for more details.
grid = inst.psf_grid(num_psfs=1, save=False, all_detectors=False,
use_detsampled_psf=True, fov_pixels=fov_pixels,
oversample=1, overwrite=overwrite, add_distortion=False,
nlambda=nlambda, verbose=False)
# Apply correct normalization factor for the fraction of light in that segment.
# WebbPSF is outputting PSFs normalized to 1 by default even for the individual segments.
grid.data *= pupil_fraction_for_this_segment
# Remove and add header keywords about segment
del grid.meta["grid_xypos"]
del grid.meta["oversampling"]
grid.meta['SEGID'] = (i_segment, 'ID of the mirror segment')
grid.meta['SEGNAME'] = (segname, 'Name of the mirror segment')
grid.meta['XTILT'] = (round(segment_tilts[i, 0], 2), 'X tilt of the segment in micro radians')
grid.meta['YTILT'] = (round(segment_tilts[i, 1], 2), 'Y tilt of the segment in micro radians')
grid.meta['SMPISTON'] = (ote.segment_state[18][4], 'Secondary mirror piston (defocus) in microns')
grid.meta['SMXTILT'] = (ote.segment_state[18][0], 'Secondary mirror X Tilt in microradians')
grid.meta['SMYTILT'] = (ote.segment_state[18][1], 'Secondary mirror Y Tilt in microradians')
grid.meta['SMXTRANS'] = (ote.segment_state[18][2], 'Secondary mirror X Translation in microns')
grid.meta['SMYTRANS'] = (ote.segment_state[18][3], 'Secondary mirror Y Translation in microns')
grid.meta['FRACAREA'] = (pupil_fraction_for_this_segment, "Fractional area of OTE primary for this segment")
if boresight is not None:
grid.meta['BSOFF_V2'] = (boresight[0], 'Telescope boresight offset in V2 in arcminutes')
grid.meta['BSOFF_V3'] = (boresight[1], 'Telescope boresight offset in V3 in arcminutes')
# Write out file
filename = '{}_{}_{}_fovp{}_samp1_npsf1_seg{:02d}.fits'.format(inst.name.lower(), det.lower(), filt.lower(),
fov_pixels, i_segment)
filepath = os.path.join(out_dir, filename)
primaryhdu = fits.PrimaryHDU(grid.data)
tuples = [(a, b, c) for (a, (b, c)) in sorted(grid.meta.items())]
primaryhdu.header.extend(tuples)
hdu = fits.HDUList(primaryhdu)
hdu.writeto(filepath, overwrite=overwrite)
logger.info('Saved gridded library file to {}'.format(filepath))
if inst.name.lower()=='nircam' and det_filt_match == False:
raise ValueError('No matching filters and detectors given - all '
'filters are longwave but detectors are shortwave, '
'or vice versa.')
def generate_segment_psfs(ote, segment_tilts, out_dir, filters=['F212N', 'F480M'],
detectors='all', fov_pixels=1024, boresight=None, overwrite=False,
segment=None, jitter=None, nlambda=10, instrument='NIRCam', inst_options=None):
"""Generate NIRCam PSF libraries for all 18 mirror segments given a perturbed OTE
mirror state. Saves each PSF library as a FITS file named in the following format:
nircam_{filter}_fovp{fov size}_samp1_npsf1_seg{segment number}.fits
OR
fgs_{filter}_fovp{fov size}_samp1_npsf1_seg{segment number}.fits
Parameters
----------
ote : webbpsf.opds.OTE_Linear_Model_WSS object
WebbPSF OTE object describing perturbed OTE state with tip and tilt removed
segment_tilts : numpy.ndarray
List of X and Y tilts for each mirror segment, in microradians
out_dir : str
Directory in which to save FITS files
filters : str or list, optional
Which filters to generate PSF libraries for. Default is ['F212N', 'F480M']
(the two NIRCam filters used for most commissioning activities).
Note, this parameter is ignored for FGS sims.
detectors : str or list, optional
Which detectors to generate PSF libraries for. Default is 'all', which will generate all
detectors for whichever of NIRCam or FGS is specified by the instrument parameter.
fov_pixels : int, optional
Size of the PSF to generate, in pixels. Default is 1024.
boresight: list, optional
Telescope boresight offset in V2/V3 in arcminutes. This offset is added on top of the individual
segment tip/tilt values.
overwrite : bool, optional
True/False boolean to overwrite the output file if it already
exists. Default is True.
segment : int or list
The mirror segment number or list of numbers for which to generate PSF libraries
jitter : float
Jitter value to use in the call to webbpsf when generating PSF library. If None
(default) the nominal jitter (7mas radial) is used.
nlambda : int
Number of wavelengths to use for polychromatic PSF calculations.
instrument : string
Which instrument to generate segment PSFs for. Should be either "NIRCam" or "FGS".
inst_options : dict
Optional; additional options to set on the NIRCam or FGS class instance used in this function.
Any items in this dict will be added into the .options dict prior to the PSF calculations.
"""
logger = logging.getLogger('mirage.psf.segment_psfs.generate_segment_psfs')
# Create webbpsf NIRCam instance
inst = webbpsf.Instrument(instrument)
# Create dummy CreatePSFLibrary instance to get lists of filter and detectors
lib = CreatePSFLibrary
# Define the filter list to loop through
if instrument.upper()=='FGS':
# FGS does not have an option for filters
filters = ['FGS']
else:
# NIRCam can have one or more named filters specified
if isinstance(filters, str):
filters = [filters]
elif not isinstance(filters, list):
raise TypeError('Please define filters as a string or list, not {}'.format(type(filters)))
# Define the detector list to loop through
if detectors == 'all':
detectors = inst.detector_list
elif isinstance(detectors, str):
detectors = [detectors]
elif not isinstance(detectors, list):
raise TypeError('Please define detectors as a string or list, not {}'.format(type(detectors)))
# Make sure segment is a list
segments = list(range(18))
if segment is not None:
if isinstance(segment, int):
segments = [segment]
elif isinstance(segment, list):
segments = segment
else:
raise ValueError("segment keyword must be either an integer or list of integers.")
# Allow for non-nominal jitter values
if jitter is not None:
if isinstance(jitter, float):
inst.options['jitter'] = 'gaussian'
inst.options['jitter_sigma'] = jitter
logger.info('Adding jitter: {} arcsec'.format(jitter))
elif isinstance(jitter, str):
allowed_strings = ['PCS=Coarse_Like_ITM', 'PCS=Coarse']
if jitter in allowed_strings:
inst.options['jitter'] = jitter
logger.info('Adding {} jitter'.format(jitter))
else:
logger.warning("Invalid jitter string. Must be one of: {}. Ignoring and using defaults.".format(allowed_strings))
else:
logger.warning("Wrong input to jitter, assuming defaults")
if inst_options is not None:
inst.options.update(inst_options)
# Set up multiprocessing pool
nproc = min(multiprocessing.cpu_count() // 2,18) # number of procs could be optimized further here. TBD.
# some parts of PSF calc are themselves parallelized so using
# fewer processes than number of cores is likely reasonable.
pool = multiprocessing.Pool(processes=nproc)
logger.info(f"Will perform parallelized calculation using {nproc} processes")
# Set up a function instance with most arguments fixed
calc_psfs_for_one_segment = functools.partial(_generate_psfs_for_one_segment, inst, ote, segment_tilts,
out_dir, boresight, lib, detectors,
filters, fov_pixels, nlambda, overwrite)
# Create PSF grids for all requested segments, detectors, and filters
pool_start_time = time.time()
results = pool.map(calc_psfs_for_one_segment, segments)
pool_stop_time = time.time()
logger.info('\n=========== Elapsed time (all segments): {} ============\n'.format(pool_stop_time - pool_start_time))
pool.close()
def get_gridded_segment_psf_library_list(instrument, detector, filtername,
library_path, pupilname="CLEAR"):
"""Find the filenames for the appropriate gridded segment PSF libraries and
read them into griddedPSFModel objects
Parameters
----------
instrument : str
Name of instrument the PSFs are from
detector : str
Name of the detector within ```instrument```
filtername : str
Name of filter used for PSF library creation
library_path : str
Path pointing to the location of the PSF library
pupilname : str, optional
Name of pupil wheel element used for PSF library creation. Default is "CLEAR".
Returns:
--------
libraries : list of photutils.griddedPSFModel
List of object containing segment PSF libraries
"""
logger = logging.getLogger('mirage.psf.segment_psfs.get_gridded_segment_psf_library_list')
library_list = get_segment_library_list(instrument, detector, filtername, library_path, pupil=pupilname)
logger.info("Segment PSFs will be generated using:")
for filename in library_list:
logger.info(os.path.basename(filename))
libraries = []
for filename in library_list:
with fits.open(filename) as hdulist:
# hdr = hdulist[0].header
# d = hdulist[0].data
#
# data = d[0][0]
# phdu = fits.PrimaryHDU(data, header=hdr)
# hdulist = fits.HDUList(phdu)
lib_model = to_griddedpsfmodel(hdulist)
libraries.append(lib_model)
return libraries
def get_segment_library_list(instrument, detector, filt,
library_path, pupil='CLEAR'):
"""Given an instrument and filter name along with the path of
the PSF library, find the appropriate 18 segment PSF library files.
Parameters
-----------
instrument : str
Name of instrument the PSFs are from
detector : str
Name of the detector within ```instrument```
filt : str
Name of filter used for PSF library creation
library_path : str
Path pointing to the location of the PSF library
pupil : str, optional
Name of pupil wheel element used for PSF library creation. Default is
'CLEAR'.
segment_id : int or None, optional
If specified, returns a segment PSF library file and denotes the ID
of the mirror segment
Returns
--------
library_list : list
List of the names of the segment PSF library files for the instrument
and filter name
"""
library_list = []
for seg_id in np.arange(1, 19):
segment_file = get_library_file(
instrument, detector, filt, pupil, '', 0, library_path,
segment_id=seg_id
)
library_list.append(segment_file)
return library_list
def get_segment_offset(segment_number, detector, library_list):
"""Convert vectors coordinates in the local segment control
coordinate system to NIRCam detector X and Y coordinates,
at least proportionally, in order to calculate the location
of the segment PSFs on the given detector.
Parameters
----------
segment : int
Segment ID, i.e 3
detector : str
Name of NIRCam detector
library_list : list
List of the names of the segment PSF library files
Returns
-------
x_arcsec
The x offset of the segment PSF in arcsec
y_arcsec
The y offset of the segment PSF in arcsec
"""
# Verify that the segment number in the header matches the index
seg_index = int(segment_number) - 1
header = fits.getheader(library_list[seg_index])
assert int(header['SEGID']) == int(segment_number), \
"Uh-oh. The segment ID of the library does not match the requested " \
"segment. The library_list was not assembled correctly."
xtilt = header['XTILT']
ytilt = header['YTILT']
segment = header['SEGNAME'][:2]
sm_piston = header.get('SMPISTON',0)
# SM piston has, as one of its effects, adding tilt onto each segment,
# along with higher order WFE such as defocus. We model here the effect
# of SM piston onto the x and y offsets.
# Coefficients determined based on WAS influence function matrix, as
# derived from segment control geometries.
if segment.startswith('A'):
xtilt += sm_piston * 0.010502
elif segment.startswith('B'):
xtilt += sm_piston * -0.020093
elif segment.startswith('C'):
ytilt += sm_piston * 0.017761
# Next we work out the individual offsets from segment-level tilts
control_xaxis_rotations = {
'A1': 180, 'A2': 120, 'A3': 60, 'A4': 0, 'A5': -60,
'A6': -120, 'B1': 0, 'C1': 60, 'B2': -60, 'C2': 0,
'B3': -120, 'C3': -60, 'B4': -180, 'C4': -120,
'B5': -240, 'C5': -180, 'B6': -300, 'C6': -240
}
x_rot = control_xaxis_rotations[segment] # degrees
x_rot_rad = x_rot * np.pi / 180 # radians
# Note that y is defined as the x component and x is defined as the y component.
# This is because "xtilt" moves the PSF in the y direction, and vice versa.
tilt_onto_y = (xtilt * np.cos(x_rot_rad)) - (ytilt * np.sin(x_rot_rad))
tilt_onto_x = (xtilt * np.sin(x_rot_rad)) + (ytilt * np.cos(x_rot_rad))
umrad_to_arcsec = 1e-6 * (180./np.pi) * 3600
x_arcsec = 2 * umrad_to_arcsec * tilt_onto_x
y_arcsec = 2 * umrad_to_arcsec * tilt_onto_y
# Secondary mirror tilts and translations also shift the apparent location of each PSF,
# often referred to as "changing the boresight".
# Coefficients for this are worked out by Randal Telfer in
# "JWST Secondary Mirror Influence Functions", doc #JWST-PRES-043631
# Values here are taken from Rev C of that document. They are given in units of NIRCam SW pixels per micro-unit of SM pose.
# We include just the first order terms, neglecting the small higher order terms
sm_xtilt = header.get('SMXTILT', 0)
sm_ytilt = header.get('SMYTILT', 0)
sm_xtrans = header.get('SMXTRANS', 0)
sm_ytrans = header.get('SMYTRANS', 0)
nrc_pixelscale = 0.0311 # arcsec/pixel
x_boresight_offset = ( 1.27777*sm_ytilt - 0.71732*sm_xtrans) * nrc_pixelscale
y_boresight_offset = (-1.27363*sm_xtilt - 0.71571*sm_ytrans) * nrc_pixelscale
x_arcsec += x_boresight_offset
y_arcsec += y_boresight_offset
# Optionally, for more recent versions of webbpsf, the FITS header may simply contain the
# Hexike tilt coefficient that we want to use. If so, use that instead of all of the above!
# This method is superior, because it more correctly (and more simply) book-keeps the cross terms
# between different OTE pose terms into optical tip and tilt. In particular, this is needed for
# accurate modeling of radial translation corrections when using incoherent PSF calculations.
if f'S{segment_number:02d}XTILT' in header:
hexike_to_arcsec = 206265/webbpsf.constants.JWST_SEGMENT_RADIUS
# recall that Hexike tilt _around the X axis_ produces an offset _into Y_, and vice versa.
x_arcsec = header[f'S{segment_number:02d}YTILT'] * hexike_to_arcsec
# also recall coord flip of Y axis from OTE L.O.M in entrance pupil to exit pupil
y_arcsec = -header[f'S{segment_number:02d}XTILT'] * hexike_to_arcsec
# Optionally, arbitrary boresight offset may also be present in the FITS header metadata.
# If so, include that in the PSF too. Be careful about coordinate sign for the V2 axis!
try:
x_arcsec -= header['BSOFF_V2']*60 # BS offset values in header are in arcminutes
y_arcsec += header['BSOFF_V3']*60 #
except:
pass
return x_arcsec, y_arcsec
|
py | 7df726b4f6bb69f049b0850b686bf1942af07cdd | import math
import analizer.libs.MathFunctions as mt
list_errors_tg = list()
def acos(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
if isinstance(column[i], int) or isinstance(column[i], float):
valor = ""
if column[i] >= -1 and 1 >= column[i]:
valor = math.acos(column[i])
else:
valor = "Error de dominio"
list_errors_tg.append("Error: 22003: la entrada esta fuera del dominio")
result.append(valor)
else:
result.append(column[i])
i += 1
return result
def acosd(column):
return mt.degrees(acos(column))
def asin(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
if isinstance(column[i], int) or isinstance(column[i], float):
valor = ""
if column[i] >= -1 and 1 >= column[i]:
valor = math.asin(column[i])
else:
valor = "Error de dominio"
list_errors_tg.append("Error: 22003: la entrada esta fuera del dominio")
result.append(valor)
else:
result.append(column[i])
i += 1
return result
def asind(column):
return mt.degrees(asin(column))
def atan(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
if isinstance(column[i], int) or isinstance(column[i], float):
result.append(math.atan(column[i]))
else:
result.append(column[i])
i += 1
return result
def atand(column):
return mt.degrees(atan(column))
def atan2(column1, column2):
i = 0
column1 = mt.convert_num_col(column1)
column2 = mt.convert_num_col(column2)
result = list()
while i < len(column1):
if (isinstance(column1[i], int) or isinstance(column1[i], float)) and (
isinstance(column2[i], int) or isinstance(column2[i], float)
):
result.append(math.atan2(column1[i], column2[i]))
else:
result.append(column1[i])
i += 1
return result
def atan2d(column1, column2):
return mt.degrees(atan2(column1, column2))
def cos(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
if isinstance(column[i], int) or isinstance(column[i], float):
result.append(math.cos(column[i]))
else:
result.append(column[i])
i += 1
return result
def cosd(column):
return mt.degrees(cos(column))
def cot(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
if isinstance(column[i], int) or isinstance(column[i], float):
if column[i] % math.pi != 0:
result.append((math.cos(column[i]) / math.sin(column[i])))
else:
result.append("Error de dominio")
list_errors_tg.append("Error: 22003: la entrada esta fuera del dominio")
else:
result.append(column[i])
i += 1
return result
def cotd(column):
return mt.degrees(cot(column))
def sin(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
if isinstance(column[i], int) or isinstance(column[i], float):
result.append(math.sin(column[i]))
else:
result.append(column[i])
i += 1
return result
def sind(column):
return mt.degrees(sin(column))
def tan(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
if isinstance(column[i], int) or isinstance(column[i], float):
if (column[i] - (math.pi / 2)) % (math.pi) != 0:
result.append(math.tan(column[i]))
else:
result.append("Error de dominio")
list_errors_tg.append("Error: 22003: la entrada esta fuera del dominio")
else:
result.append(column[i])
i += 1
return result
def tand(column):
return mt.degrees(tan(column))
def sinh(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
if isinstance(column[i], int) or isinstance(column[i], float):
result.append(math.sinh(column[i]))
else:
result.append(column[i])
i += 1
return result
def cosh(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
if isinstance(column[i], int) or isinstance(column[i], float):
result.append(math.cosh(column[i]))
else:
result.append(column[i])
i += 1
return result
def tanh(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
if isinstance(column[i], int) or isinstance(column[i], float):
result.append(math.tanh(column[i]))
else:
result.append(column[i])
i += 1
return result
def asinh(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
if isinstance(column[i], int) or isinstance(column[i], float):
result.append(math.asinh(column[i]))
else:
result.append(column[i])
i += 1
return result
def acosh(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
if isinstance(column[i], int) or isinstance(column[i], float):
if column[i] >= 1:
result.append(math.acosh(column[i]))
else:
result.append("Error de dominio")
list_errors_tg.append("Error: 22003: la entrada esta fuera del dominio")
else:
result.append(column[i])
i += 1
return result
def atanh(column):
i = 0
column = mt.convert_num_col(column)
result = list()
while i < len(column):
if isinstance(column[i], int) or isinstance(column[i], float):
if column[i] < 1 and column[i] > -1:
result.append(math.atanh(column[i]))
else:
result.append("Error de dominio")
list_errors_tg.append("Error: 22003: la entrada esta fuera del dominio")
else:
result.append(column[i])
i += 1
return result
|
py | 7df727cd0ba5eb481a9e2568ffdd063bfce90314 | from IPython.core.magic import Magics, line_magic, magics_class # type: ignore
from IPython.core.magic_arguments import (argument, magic_arguments, # type: ignore
parse_argstring) # type: ignore
from .main import find_dotenv, load_dotenv
@magics_class
class IPythonDotEnv(Magics):
@magic_arguments()
@argument(
'-o', '--override', action='store_true',
help="Indicate to override existing variables"
)
@argument(
'-v', '--verbose', action='store_true',
help="Indicate function calls to be verbose"
)
@argument('dotenv_path', nargs='?', type=str, default='.env',
help='Search in increasingly higher folders for the `dotenv_path`')
@line_magic
def dotenv(self, line):
args = parse_argstring(self.dotenv, line)
# Locate the .env file
dotenv_path = args.dotenv_path
try:
dotenv_path = find_dotenv(dotenv_path, True, True)
except IOError:
print("cannot find .env file")
return
# Load the .env file
load_dotenv(dotenv_path, verbose=args.verbose, override=args.override)
def load_ipython_extension(ipython):
"""Register the %dotenv magic."""
ipython.register_magics(IPythonDotEnv)
|
py | 7df728509c11c705a768febf0c2e33a75e44066b | """
Use lldb Python SBValue.WatchPointee() API to create a watchpoint for write of '*g_char_ptr'.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class SetWatchlocationAPITestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Our simple source filename.
self.source = 'main.cpp'
# Find the line number to break inside main().
self.line = line_number(
self.source, '// Set break point at this line.')
# This is for verifying that watch location works.
self.violating_func = "do_bad_thing_with_location"
def test_watch_location(self):
"""Exercise SBValue.WatchPointee() API to set a watchpoint."""
self.build()
exe = self.getBuildArtifact("a.out")
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Now create a breakpoint on main.c.
breakpoint = target.BreakpointCreateByLocation(self.source, self.line)
self.assertTrue(breakpoint and
breakpoint.GetNumLocations() == 1,
VALID_BREAKPOINT)
# Now launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
# We should be stopped due to the breakpoint. Get frame #0.
process = target.GetProcess()
self.assertEqual(process.GetState(), lldb.eStateStopped,
PROCESS_STOPPED)
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
frame0 = thread.GetFrameAtIndex(0)
value = frame0.FindValue('g_char_ptr',
lldb.eValueTypeVariableGlobal)
pointee = value.CreateValueFromAddress(
"pointee",
value.GetValueAsUnsigned(0),
value.GetType().GetPointeeType())
# Watch for write to *g_char_ptr.
error = lldb.SBError()
watchpoint = value.WatchPointee(True, False, True, error)
self.assertTrue(value and watchpoint,
"Successfully found the pointer and set a watchpoint")
self.DebugSBValue(value)
self.DebugSBValue(pointee)
# Hide stdout if not running with '-t' option.
if not self.TraceOn():
self.HideStdout()
print(watchpoint)
# Continue. Expect the program to stop due to the variable being
# written to.
process.Continue()
if (self.TraceOn()):
lldbutil.print_stacktraces(process)
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonWatchpoint)
self.assertTrue(thread, "The thread stopped due to watchpoint")
self.DebugSBValue(value)
self.DebugSBValue(pointee)
self.expect(
lldbutil.print_stacktrace(
thread,
string_buffer=True),
exe=False,
substrs=[
self.violating_func])
# This finishes our test.
|
py | 7df729762df99035f9a9631be48809e2c22a5d1a | import cv2
class Filter(object):
'''invert colors'''
def __init__(self, params):
self.params = params
def apply(self, img):
return cv2.bitwise_not(img)
|
py | 7df72ae28e80a3c12a8f1a36276d88389c58eecd | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'ca_cert_path': 'str',
'control_plane': 'IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfigurationControlPlane',
'discovery': 'IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfigurationDiscovery',
'kind': 'str',
'node_registration': 'IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecInitConfigurationNodeRegistration'
}
attribute_map = {
'api_version': 'apiVersion',
'ca_cert_path': 'caCertPath',
'control_plane': 'controlPlane',
'discovery': 'discovery',
'kind': 'kind',
'node_registration': 'nodeRegistration'
}
def __init__(self, api_version=None, ca_cert_path=None, control_plane=None, discovery=None, kind=None, node_registration=None, local_vars_configuration=None): # noqa: E501
"""IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._ca_cert_path = None
self._control_plane = None
self._discovery = None
self._kind = None
self._node_registration = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if ca_cert_path is not None:
self.ca_cert_path = ca_cert_path
if control_plane is not None:
self.control_plane = control_plane
if discovery is not None:
self.discovery = discovery
if kind is not None:
self.kind = kind
if node_registration is not None:
self.node_registration = node_registration
@property
def api_version(self):
"""Gets the api_version of this IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def ca_cert_path(self):
"""Gets the ca_cert_path of this IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration. # noqa: E501
CACertPath is the path to the SSL certificate authority used to secure comunications between node and control-plane. Defaults to \"/etc/kubernetes/pki/ca.crt\". TODO: revisit when there is defaulting from k/k # noqa: E501
:return: The ca_cert_path of this IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration. # noqa: E501
:rtype: str
"""
return self._ca_cert_path
@ca_cert_path.setter
def ca_cert_path(self, ca_cert_path):
"""Sets the ca_cert_path of this IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration.
CACertPath is the path to the SSL certificate authority used to secure comunications between node and control-plane. Defaults to \"/etc/kubernetes/pki/ca.crt\". TODO: revisit when there is defaulting from k/k # noqa: E501
:param ca_cert_path: The ca_cert_path of this IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration. # noqa: E501
:type: str
"""
self._ca_cert_path = ca_cert_path
@property
def control_plane(self):
"""Gets the control_plane of this IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration. # noqa: E501
:return: The control_plane of this IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration. # noqa: E501
:rtype: IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfigurationControlPlane
"""
return self._control_plane
@control_plane.setter
def control_plane(self, control_plane):
"""Sets the control_plane of this IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration.
:param control_plane: The control_plane of this IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration. # noqa: E501
:type: IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfigurationControlPlane
"""
self._control_plane = control_plane
@property
def discovery(self):
"""Gets the discovery of this IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration. # noqa: E501
:return: The discovery of this IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration. # noqa: E501
:rtype: IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfigurationDiscovery
"""
return self._discovery
@discovery.setter
def discovery(self, discovery):
"""Sets the discovery of this IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration.
:param discovery: The discovery of this IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration. # noqa: E501
:type: IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfigurationDiscovery
"""
self._discovery = discovery
@property
def kind(self):
"""Gets the kind of this IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration. # noqa: E501
:type: str
"""
self._kind = kind
@property
def node_registration(self):
"""Gets the node_registration of this IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration. # noqa: E501
:return: The node_registration of this IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration. # noqa: E501
:rtype: IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecInitConfigurationNodeRegistration
"""
return self._node_registration
@node_registration.setter
def node_registration(self, node_registration):
"""Sets the node_registration of this IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration.
:param node_registration: The node_registration of this IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration. # noqa: E501
:type: IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecInitConfigurationNodeRegistration
"""
self._node_registration = node_registration
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, IoXK8sClusterBootstrapV1alpha4KubeadmConfigSpecJoinConfiguration):
return True
return self.to_dict() != other.to_dict()
|
py | 7df72bab1d9cf1127c46fcec7f83270a88839613 | from output.models.ms_data.complex_type.ct_h011_xsd.ct_h011 import (
FooType,
MyType,
Root,
)
__all__ = [
"FooType",
"MyType",
"Root",
]
|
py | 7df72c460c682961c2394cf861f47a3418b5bd0c | '''
Defines general utility functions
'''
from models.architectures import ALLOWABLE_TYPES as ALLOWABLE_MODEL_TYPES
import os
#############################
# Dynamically set variables
#############################
class RuntimeDeterminedEnviromentVars( object ):
'''
Example use:
inputs = { 'num_samples_epoch': 100 }
cfg = { 'batch_size': 5, 'epoch_steps': [ '<LOAD_DYNAMIC>', 'steps_per_epoch' ] }
for key, value in cfg.items():
if isinstance( value, list ) and len( value ) == 2 and value[0] == 'LOAD_DYNAMIC':
RuntimeDeterminedEnviromentVars.register( cfg, key, value[1] )
RuntimeDeterminedEnviromentVars.load_dynamic_variables( inputs, cfg )
RuntimeDeterminedEnviromentVars.populate_registered_variables()
print( cfg ) # epoch_steps = 20
'''
registered_variables = []
is_loaded = False
# These are initialized in load_dynamic_variables
steps_per_epoch = '' # An int that condains the number of steps the network will take per epoch
@classmethod
def load_dynamic_variables( cls, inputs, cfg ):
'''
Args:
inputs: a dict from train.py
cfg: a dict from a config.py
'''
cls.steps_per_epoch = inputs[ 'num_samples_epoch' ] // cfg[ 'batch_size' ]
cls.is_loaded = True
@classmethod
def register( cls, dict_containing_field_to_populate, field_name, attr_name ):
cls.registered_variables.append( [dict_containing_field_to_populate, field_name, attr_name] )
@classmethod
def register_dict( cls, dict_to_register ):
'''
Registers any fields in the dict that should be dynamically loaded.
Such fields should have value: [ '<LOAD_DYNAMIC>', attr_name ]
'''
for key, value in dict_to_register.items():
if isinstance( value, list ) and len( value ) == 2 and value[0] == '<LOAD_DYNAMIC>':
cls.register( dict_to_register, key, value[1] )
elif isinstance( value, dict ):
cls.register_dict( value )
@classmethod
def populate_registered_variables( cls ):
for dict_containing_field_to_populate, field_name, attr_name in cls.registered_variables:
dict_containing_field_to_populate[field_name] = getattr( cls, attr_name )
print( "\t{0}={1}".format( field_name, getattr( cls, attr_name ) ) )
###########################
# Utility functions
###########################
def validate_config( cfg ):
'''
Catches many general cfg errors.
'''
if cfg[ 'model_type' ] not in ALLOWABLE_MODEL_TYPES:
raise ValueError( "'model_type' in config.py must be one of {0}".format( ALLOWABLE_MODEL_TYPES ))
if cfg[ 'model_type' ] is not 'empty' and 'optimizer' not in cfg:
raise ValueError( "an 'optimizer' must be specified".format( ALLOWABLE_MODEL_TYPES ))
if 'optimizer' in cfg and 'optimizer_kwargs' not in cfg:
raise ValueError( "The arguments for the optimizer {0} must be given, named, in 'optimizer_kwargs'".format( cfg[ 'optimizer' ] ))
def load_config( cfg_dir, nopause=False ):
'''
Raises:
FileNotFoundError if 'config.py' doesn't exist in cfg_dir
'''
if not os.path.isfile( os.path.join( cfg_dir, 'config.py' ) ):
raise ImportError( 'config.py not found in {0}'.format( cfg_dir ) )
import sys
try:
del sys.modules[ 'config' ]
except:
pass
sys.path.insert( 0, cfg_dir )
import config as loading_config
# cleanup
# print([ v for v in sys.modules if "config" in v])
# return
cfg = loading_config.get_cfg( nopause )
try:
del sys.modules[ 'config' ]
except:
pass
sys.path.remove(cfg_dir)
return cfg
def update_keys(old_dict, key_starts_with, new_dict):
for k, v in new_dict.items():
if k.startswith(key_starts_with):
old_dict[k] = v
return old_dict |
py | 7df72d45b423697ee559f8caec84002751226ef7 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training-related part of the Keras engine.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import weakref
import numpy as np
import six
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops.dataset_ops import Dataset
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import losses
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import distributed_training_utils
from tensorflow.python.keras.engine import training_arrays
from tensorflow.python.keras.engine import training_distributed
from tensorflow.python.keras.engine import training_eager
from tensorflow.python.keras.engine import training_generator
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.engine.network import Network
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils.generic_utils import slice_arrays
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer as tf_optimizer_module
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.models.Model', 'keras.Model')
class Model(Network):
"""`Model` groups layers into an object with training and inference features.
There are two ways to instantiate a `Model`:
1 - With the "functional API", where you start from `Input`,
you chain layer calls to specify the model's forward pass,
and finally you create your model from inputs and outputs:
```python
import tensorflow as tf
inputs = tf.keras.Input(shape=(3,))
x = tf.keras.layers.Dense(4, activation=tf.nn.relu)(inputs)
outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
```
2 - By subclassing the `Model` class: in that case, you should define your
layers in `__init__` and you should implement the model's forward pass
in `call`.
```python
import tensorflow as tf
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
def call(self, inputs):
x = self.dense1(inputs)
return self.dense2(x)
model = MyModel()
```
If you subclass `Model`, you can optionally have
a `training` argument (boolean) in `call`, which you can use to specify
a different behavior in training and inference:
```python
import tensorflow as tf
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
self.dropout = tf.keras.layers.Dropout(0.5)
def call(self, inputs, training=False):
x = self.dense1(inputs)
if training:
x = self.dropout(x, training=training)
return self.dense2(x)
model = MyModel()
```
"""
def __init__(self, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
# Create a cache for iterator get_next op.
self._iterator_get_next = weakref.WeakKeyDictionary()
# Create a cache for dataset - uninitialized iterators
self._dataset_iterator_cache = weakref.WeakKeyDictionary()
# initializing _distribution_strategy here since it is possible to call
# predict on a model without compiling it.
self._distribution_strategy = None
def _set_sample_weight_attributes(self, sample_weight_mode,
skip_target_weighing_indices):
"""Sets sample weight related attributes on the model."""
sample_weights, sample_weight_modes = training_utils.prepare_sample_weights(
self.output_names, sample_weight_mode, skip_target_weighing_indices)
self.sample_weights = sample_weights
self.sample_weight_modes = sample_weight_modes
self._feed_sample_weight_modes = [
sample_weight_modes[i]
for i in range(len(self.outputs))
if i not in skip_target_weighing_indices
]
self._feed_sample_weights = [
sample_weights[i]
for i in range(len(sample_weights))
if i not in skip_target_weighing_indices
]
def _cache_output_metric_attributes(self, metrics, weighted_metrics):
"""Caches metric name and function attributes for every model output."""
output_shapes = [
None if output is None else output.get_shape().as_list()
for output in self.outputs
]
self._per_output_metrics = training_utils.collect_per_output_metric_info(
metrics, self.output_names, output_shapes, self.loss_functions)
self._per_output_weighted_metrics = \
training_utils.collect_per_output_metric_info(
weighted_metrics, self.output_names, output_shapes,
self.loss_functions, self.sample_weights)
def _add_unique_metric_name(self, metric_name, output_index):
"""Makes the metric name unique and adds it to the model's metric name list.
If there are multiple outputs for which the metrics are calculated, the
metric names have to be made unique by appending an integer.
Arguments:
metric_name: Metric name that corresponds to the metric specified by the
user. For example: 'acc'.
output_index: The index of the model output for which the metric name is
being added.
Returns:
string, name of the model's unique metric name
"""
if len(self.output_names) > 1:
metric_name = '%s_%s' % (self.output_names[output_index], metric_name)
j = 1
base_metric_name = metric_name
while metric_name in self.metrics_names:
metric_name = '%s_%d' % (base_metric_name, j)
j += 1
return metric_name
def _init_metric_attributes(self):
"""Initialized model metric attributes."""
self.metrics_names = ['loss']
self.metrics_tensors = []
self.metrics_updates = []
self.stateful_metric_names = []
self.stateful_metric_functions = []
def _set_per_output_metric_attributes(self, metrics_dict, output_index):
"""Sets the metric attributes on the model for the given output.
Arguments:
metrics_dict: A dict with metric names as keys and metric fns as values.
output_index: The index of the model output for which the metric
attributes are added.
"""
for metric_name, metric_fn in metrics_dict.items():
metric_name = self._add_unique_metric_name(metric_name, output_index)
# Keep track of metric name.
self.metrics_names.append(metric_name)
# Keep track of stateful metric attributes (name and metric function).
if isinstance(metric_fn, base_layer.Layer) and metric_fn.stateful:
self.stateful_metric_names.append(metric_name)
self.stateful_metric_functions.append(metric_fn)
def _set_metric_attributes(self, outputs, skip_target_indices=None):
"""Sets the metric attributes on the model for all the model outputs."""
skip_target_indices = skip_target_indices or []
for i in range(len(outputs)):
if i in skip_target_indices:
continue
self._set_per_output_metric_attributes(self._per_output_metrics[i], i)
self._set_per_output_metric_attributes(
self._per_output_weighted_metrics[i], i)
def _handle_per_output_metrics(self,
metrics_dict,
y_true,
y_pred,
mask,
weights=None):
"""Calls metric functions for a single output.
Arguments:
metrics_dict: A dict with metric names as keys and metric fns as values.
y_true: Target output.
y_pred: Predicted output.
mask: Computed mask value for the current output.
weights: Weights to be applied on the current output.
Returns:
A list of metric result tensors.
"""
metric_results = []
for metric_name, metric_fn in metrics_dict.items():
with K.name_scope(metric_name):
if isinstance(metric_fn, metrics_module.Metric):
# Call the stateful metric function.
if mask is not None:
mask = math_ops.cast(mask, y_pred.dtype)
# Update weights with mask.
if weights is None:
weights = mask
else:
# Update shape of weights if possible before adding mask.
# Update dimensions of weights to match with mask if possible.
mask, _, weights = metrics_module.squeeze_or_expand_dimensions(
mask, None, weights)
try:
# Broadcast weights if possible.
weights = weights_broadcast_ops.broadcast_weights(weights, mask)
except ValueError:
pass
# TODO(psv): Handle case when mask and weight shapes are not
# compatible.
weights *= mask
metric_result = metric_fn(y_true, y_pred, weights)
else:
# Call the stateless metric function.
weighted_metric_fn = training_utils.weighted_masked_objective(
metric_fn)
metric_result = weighted_metric_fn(
y_true, y_pred, weights=weights, mask=mask)
if not context.executing_eagerly():
# Keep track of metric result tensor.
self.metrics_tensors.append(metric_result)
metric_results.append(metric_result)
is_stateful = isinstance(metric_fn,
base_layer.Layer) and metric_fn.stateful
if is_stateful and not context.executing_eagerly():
# Keep track of updates created by stateful metrics.
self.metrics_updates += metric_fn.updates
return metric_results
def _handle_metrics(self,
outputs,
skip_target_indices=None,
targets=None,
sample_weights=None,
masks=None):
"""Handles calling metric functions.
Arguments:
outputs: List of outputs (predictions).
skip_target_indices: Optional. List of target ids to skip.
targets: List of targets.
sample_weights: Optional list of sample weight arrays.
masks: List of computed output mask values.
Returns:
A list of metric result tensors.
"""
skip_target_indices = skip_target_indices or []
metric_results = []
with K.name_scope('metrics'):
for i in range(len(outputs)):
if i in skip_target_indices:
continue
output = outputs[i] if outputs else None
target = targets[i] if targets else None
output_mask = masks[i] if masks else None
metric_results.extend(
self._handle_per_output_metrics(self._per_output_metrics[i], target,
output, output_mask))
metric_results.extend(
self._handle_per_output_metrics(
self._per_output_weighted_metrics[i],
target,
output,
output_mask,
weights=sample_weights[i]))
return metric_results
@checkpointable.no_automatic_dependency_tracking
def compile(self,
optimizer,
loss=None,
metrics=None,
loss_weights=None,
sample_weight_mode=None,
weighted_metrics=None,
target_tensors=None,
distribute=None,
**kwargs):
"""Configures the model for training.
Arguments:
optimizer: String (name of optimizer) or optimizer instance.
See [optimizers](/api_docs/python/tf/keras/optimizers).
loss: String (name of objective function) or objective function.
See [losses](/api_docs/python/tf/losses).
If the model has multiple outputs, you can use a different loss
on each output by passing a dictionary or a list of losses.
The loss value that will be minimized by the model
will then be the sum of all individual losses.
metrics: List of metrics to be evaluated by the model
during training and testing.
Typically you will use `metrics=['accuracy']`.
To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary,
such as `metrics={'output_a': 'accuracy'}`.
loss_weights: Optional list or dictionary specifying scalar
coefficients (Python floats) to weight the loss contributions
of different model outputs.
The loss value that will be minimized by the model
will then be the *weighted sum* of all individual losses,
weighted by the `loss_weights` coefficients.
If a list, it is expected to have a 1:1 mapping
to the model's outputs. If a tensor, it is expected to map
output names (strings) to scalar coefficients.
sample_weight_mode: If you need to do timestep-wise
sample weighting (2D weights), set this to `"temporal"`.
`None` defaults to sample-wise weights (1D).
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
weighted_metrics: List of metrics to be evaluated and weighted
by sample_weight or class_weight during training and testing.
target_tensors: By default, Keras will create placeholders for the
model's target, which will be fed with the target data during
training. If instead you would like to use your own
target tensors (in turn, Keras will not expect external
Numpy data for these targets at training time), you
can specify them via the `target_tensors` argument. It can be
a single tensor (for a single-output model), a list of tensors,
or a dict mapping output names to target tensors.
distribute: The DistributionStrategy instance that we want to use to
distribute the training of the model.
**kwargs: These arguments are passed to `tf.Session.run`.
Raises:
ValueError: In case of invalid arguments for
`optimizer`, `loss`, `metrics` or `sample_weight_mode`.
"""
# Validate that arguments passed by the user to `compile` are supported by
# DistributionStrategy.
if distribute and not isinstance(
optimizer, (tf_optimizer_module.Optimizer, optimizers.TFOptimizer)):
raise NotImplementedError('Only TF native optimizers are supported with '
'DistributionStrategy.')
if distribute and context.executing_eagerly():
raise NotImplementedError('DistributionStrategy is not supported in '
'Eager mode.')
if distribute and sample_weight_mode:
raise NotImplementedError('sample_weight_mode is not supported with '
'DistributionStrategy.')
if distribute and weighted_metrics:
raise NotImplementedError('weighted_metrics is not supported with '
'DistributionStrategy.')
if distribute and target_tensors:
raise ValueError('target_tensors is not supported with '
'DistributionStrategy.')
loss = loss or {}
if context.executing_eagerly() and not isinstance(
optimizer, (tf_optimizer_module.Optimizer, optimizers.TFOptimizer)):
raise ValueError('Only TF native optimizers are supported in Eager mode.')
self.optimizer = optimizers.get(optimizer)
# We've disabled automatic dependency tracking for this method, but do want
# to add a checkpoint dependency on the optimizer if it's checkpointable.
if isinstance(self.optimizer, checkpointable.CheckpointableBase):
self._track_checkpointable(
self.optimizer, name='optimizer', overwrite=True)
self.loss = loss
self.metrics = metrics or []
self.loss_weights = loss_weights
self.sample_weight_mode = sample_weight_mode
self.weighted_metrics = weighted_metrics
if context.executing_eagerly() and target_tensors is not None:
raise ValueError('target_tensors is not supported in Eager mode.')
self.target_tensors = target_tensors
# Set DistributionStrategy specific parameters.
self._distribution_strategy = distribute
# Reset the value of grouped_model
self._grouped_model = None
if self._distribution_strategy is not None:
distributed_training_utils.configure_and_create_session(
self._distribution_strategy)
if not self.built:
# Model is not compilable because it does not know its number of inputs
# and outputs, nor their shapes and names. We will compile after the first
# time the model gets called on training data.
return
self._is_compiled = True
# Prepare loss functions.
if isinstance(loss, dict):
for name in loss:
if name not in self.output_names:
raise ValueError(
'Unknown entry in loss '
'dictionary: "' + name + '". '
'Only expected the following keys: ' + str(self.output_names))
loss_functions = []
for name in self.output_names:
if name not in loss:
logging.warning(
'Output "' + name +
'" missing from loss dictionary. We assume '
'this was done on purpose. The fit and evaluate APIs will not be '
'expecting any data to be passed to "' + name + '".')
loss_functions.append(losses.get(loss.get(name)))
elif isinstance(loss, list):
if len(loss) != len(self.outputs):
raise ValueError('When passing a list as loss, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss=' + str(loss))
loss_functions = [losses.get(l) for l in loss]
else:
loss_function = losses.get(loss)
loss_functions = [loss_function for _ in range(len(self.outputs))]
self.loss_functions = loss_functions
weighted_losses = [training_utils.weighted_masked_objective(fn)
for fn in loss_functions]
skip_target_indices = []
skip_target_weighing_indices = []
self._feed_outputs = []
self._feed_output_names = []
self._feed_output_shapes = []
self._feed_loss_fns = []
for i in range(len(weighted_losses)):
if weighted_losses[i] is None:
skip_target_indices.append(i)
skip_target_weighing_indices.append(i)
# Prepare output masks.
if not context.executing_eagerly():
masks = [getattr(x, '_keras_mask', None) for x in self.outputs]
if not isinstance(masks, list):
masks = [masks]
# Prepare loss weights.
if loss_weights is None:
loss_weights_list = [1. for _ in range(len(self.outputs))]
elif isinstance(loss_weights, dict):
for name in loss_weights:
if name not in self.output_names:
raise ValueError(
'Unknown entry in loss_weights '
'dictionary: "' + name + '". '
'Only expected the following keys: ' + str(self.output_names))
loss_weights_list = []
for name in self.output_names:
loss_weights_list.append(loss_weights.get(name, 1.))
elif isinstance(loss_weights, list):
if len(loss_weights) != len(self.outputs):
raise ValueError(
'When passing a list as loss_weights, '
'it should have one entry per model output. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss_weights=' + str(loss_weights))
loss_weights_list = loss_weights
else:
raise TypeError('Could not interpret loss_weights argument: ' +
str(loss_weights) + ' - expected a list of dicts.')
self.loss_weights_list = loss_weights_list
# Initialize model metric attributes.
self._init_metric_attributes()
# Initialization for Eager mode execution.
if context.executing_eagerly():
# Prepare sample weights.
self._set_sample_weight_attributes(sample_weight_mode,
skip_target_weighing_indices)
# Save all metric attributes per output of the model.
self._cache_output_metric_attributes(metrics, weighted_metrics)
if target_tensors is not None:
raise ValueError('target_tensors are not currently supported in Eager '
'mode.')
self.total_loss = None
for i in range(len(self.outputs)):
if len(self.outputs) > 1:
self.metrics_names.append(self.output_names[i] + '_loss')
# Set metric attributes on model.
self._set_metric_attributes(
self.outputs,
skip_target_indices=skip_target_indices,
)
self.targets = []
for i in range(len(self.outputs)):
self._feed_output_names.append(self.output_names[i])
self._collected_trainable_weights = self.trainable_weights
return
# Prepare targets of model.
self.targets = []
self._feed_targets = []
if target_tensors not in (None, []):
if isinstance(target_tensors, list):
if len(target_tensors) != len(self.outputs):
raise ValueError(
'When passing a list as `target_tensors`, '
'it should have one entry per model output. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed target_tensors=' + str(target_tensors))
elif isinstance(target_tensors, dict):
for name in target_tensors:
if name not in self.output_names:
raise ValueError(
'Unknown entry in `target_tensors` '
'dictionary: "' + name + '". '
'Only expected the following keys: ' + str(self.output_names))
tmp_target_tensors = []
for name in self.output_names:
tmp_target_tensors.append(target_tensors.get(name, None))
target_tensors = tmp_target_tensors
else:
raise TypeError('Expected `target_tensors` to be '
'a list or dict, but got:', target_tensors)
for i in range(len(self.outputs)):
if i in skip_target_indices:
self.targets.append(None)
else:
shape = K.int_shape(self.outputs[i])
name = self.output_names[i]
if target_tensors not in (None, []):
target = target_tensors[i]
else:
target = None
if target is None or K.is_placeholder(target):
if target is None:
target = K.placeholder(
ndim=len(shape),
name=name + '_target',
sparse=K.is_sparse(self.outputs[i]),
dtype=K.dtype(self.outputs[i]))
self._feed_targets.append(target)
self._feed_outputs.append(self.outputs[i])
self._feed_output_names.append(name)
self._feed_output_shapes.append(shape)
self._feed_loss_fns.append(self.loss_functions[i])
else:
skip_target_weighing_indices.append(i)
self.targets.append(target)
# Prepare sample weights.
self._set_sample_weight_attributes(sample_weight_mode,
skip_target_weighing_indices)
# Save all metric attributes per output of the model.
self._cache_output_metric_attributes(metrics, weighted_metrics)
# Compute total loss.
total_loss = None
with K.name_scope('loss'):
for i in range(len(self.outputs)):
if i in skip_target_indices:
continue
y_true = self.targets[i]
y_pred = self.outputs[i]
weighted_loss = weighted_losses[i]
sample_weight = self.sample_weights[i]
mask = masks[i]
loss_weight = loss_weights_list[i]
with K.name_scope(self.output_names[i] + '_loss'):
output_loss = weighted_loss(y_true, y_pred, sample_weight, mask)
if len(self.outputs) > 1:
self.metrics_tensors.append(output_loss)
self.metrics_names.append(self.output_names[i] + '_loss')
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
if total_loss is None:
if not self.losses:
raise ValueError('The model cannot be compiled '
'because it has no loss to optimize.')
else:
total_loss = 0.
# Add regularization penalties
# and other layer-specific losses.
for loss_tensor in self.losses:
total_loss += loss_tensor
# Set metric attributes on model.
self._set_metric_attributes(
self.outputs,
skip_target_indices=skip_target_indices,
)
# Invoke metric functions for all the outputs.
self._handle_metrics(
self.outputs,
masks=masks,
targets=self.targets,
skip_target_indices=skip_target_indices,
sample_weights=self.sample_weights)
# If using distribution strategy and stateful_metrics, raise an error
# since we currently don't support stateful metrics.
if self._distribution_strategy is not None and self.stateful_metric_names:
raise NotImplementedError('Stateful metrics are not supported with '
'DistributionStrategy.')
# Prepare gradient updates and state updates.
self.total_loss = total_loss
# Functions for train, test and predict will
# be compiled lazily when required.
# This saves time when the user is not using all functions.
self._function_kwargs = kwargs
self.train_function = None
self.test_function = None
self.predict_function = None
# Collected trainable weights, sorted in topological order.
trainable_weights = self.trainable_weights
self._collected_trainable_weights = trainable_weights
def _check_trainable_weights_consistency(self):
"""Check trainable weights count consistency.
This will raise a warning if `trainable_weights` and
`_collected_trainable_weights` are inconsistent (i.e. have different
number of parameters).
Inconsistency will typically arise when one modifies `model.trainable`
without calling `model.compile` again.
"""
if not hasattr(self, '_collected_trainable_weights'):
return
if len(self.trainable_weights) != len(self._collected_trainable_weights):
logging.warning(
UserWarning(
'Discrepancy between trainable weights and collected trainable'
' weights, did you set `model.trainable` without calling'
' `model.compile` after ?'))
def _make_train_function(self):
if not hasattr(self, 'train_function'):
raise RuntimeError('You must compile your model before using it.')
self._check_trainable_weights_consistency()
if self.train_function is None:
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
with K.name_scope('training'):
with K.name_scope(self.optimizer.__class__.__name__):
# Training updates
updates = self.optimizer.get_updates(
params=self._collected_trainable_weights, loss=self.total_loss)
# Unconditional updates
updates += self.get_updates_for(None)
# Conditional updates relevant to this model
updates += self.get_updates_for(self.inputs)
# Stateful metrics updates
updates += self.metrics_updates
# Gets loss and metrics. Updates weights at each call.
self.train_function = K.function(
inputs, [self.total_loss] + self.metrics_tensors,
updates=updates,
name='train_function',
**self._function_kwargs)
def _make_test_function(self):
if not hasattr(self, 'test_function'):
raise RuntimeError('You must compile your model before using it.')
if self.test_function is None:
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
# Return loss and metrics, no gradient updates.
# Does update the network states.
self.test_function = K.function(
inputs, [self.total_loss] + self.metrics_tensors,
updates=self.state_updates + self.metrics_updates,
name='test_function',
**self._function_kwargs)
def _make_predict_function(self):
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs = self._feed_inputs + [K.learning_phase()]
else:
inputs = self._feed_inputs
# Gets network outputs. Does not update weights.
# Does update the network states.
kwargs = getattr(self, '_function_kwargs', {})
self.predict_function = K.function(
inputs,
self.outputs,
updates=self.state_updates,
name='predict_function',
**kwargs)
def _get_iterator_get_next_tensors(self, iterator):
get_next_op = self._iterator_get_next.get(iterator, None)
if get_next_op is None:
get_next_op = iterator.get_next()
self._iterator_get_next[iterator] = get_next_op
return get_next_op
def _distribution_standardize_user_data(self,
x,
y=None,
sample_weight=None,
class_weight=None,
batch_size=None,
check_steps=False,
steps_name='steps',
steps=None,
validation_split=0):
"""Runs validation checks on input and target data passed by the user.
This is called when using DistributionStrategy to train, evaluate or serve
the model.
Args:
x: Input data. A numpy array or `tf.data` dataset.
y: Target data. A numpy array or None if x is a `tf.data` dataset.
sample_weight: An optional sample-weight array passed by the user to
weight the importance of each sample in `x`.
class_weight: An optional class-weight array by the user to
weight the importance of samples in `x` based on the class they belong
to, as conveyed by `y`.
batch_size: Integer batch size. If provided, it is used to run additional
validation checks on stateful models.
check_steps: boolean, True if we want to check for validity of `steps` and
False, otherwise.
steps_name: The public API's parameter name for `steps`.
steps: Integer or `None`. Total number of steps (batches of samples) to
execute.
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
Returns:
Iterator for reading the dataset `x`.
Raises:
ValueError: In case of invalid user-provided data.
RuntimeError: If the model was never compiled.
"""
if sample_weight is not None and sample_weight.all():
raise NotImplementedError('`sample_weight` is currently not supported '
'when using DistributionStrategy.')
if class_weight:
raise NotImplementedError('`class_weight` is currently not supported '
'when using DistributionStrategy.')
# Validates `steps` argument right at the beginning since we use it to
# construct the dataset object.
# TODO(anjalisridhar): This may not be a valid error since we now accept
# numpy array inputs. We still want to assert that we have a populated steps
# parameter.
if check_steps:
if steps is None:
raise ValueError('When using DistributionStrategy, '
'you should specify the `{steps_name}` argument.'
.format(steps_name=steps_name))
first_x_value = nest.flatten(x)[0]
if isinstance(first_x_value, np.ndarray):
x_shape = first_x_value.shape
x_dtype = first_x_value.dtype
if batch_size is None:
batch_size = x_shape[0] // steps
if y is not None:
first_y_value = nest.flatten(y)[0]
x = Dataset.from_generator(lambda x=x, y=y: six.moves.zip(x, y),
output_types=(x_dtype, first_y_value.dtype),
output_shapes=(x_shape[1:],
first_y_value.shape[1:]))
# TODO(anjalisridhar): What should the buffer size be?
x = x.shuffle(10000)
x = x.repeat()
x = x.batch(batch_size)
y = None
else:
# This case is for the predict call where the dataset only contains
# inputs and no targets i.e it does not return a tuple.
# TODO(anjalisridhar): Raise an error if we are not able to process
# all the predict samples. This can happen if the number of batches is
# not evenly divisible by the number of worker devices.
x = Dataset.from_generator(lambda x=x: x,
output_types=x_dtype,
output_shapes=x_shape[1:])
x = x.repeat()
x = x.batch(batch_size)
# TODO(anjalisridhar): Can we use the iterator and getnext op cache?
# We require users to pass Datasets since we distribute the dataset across
# multiple devices.
assert isinstance(x, dataset_ops.Dataset)
# TODO(anjalisridhar): We want distribute_dataset() to accept a Dataset or a
# function which returns a Dataset. Currently distribute_dataset() only
# accepts a function that returns a Dataset. Once we add support for being
# able to clone a Dataset on multiple workers we can remove this lambda.
result = self._distribution_strategy.distribute_dataset(lambda: x)
iterator = result.make_initializable_iterator()
K.get_session().run(iterator.initializer)
training_utils.validate_iterator_input(x, y, sample_weight,
validation_split)
return iterator
def _standardize_user_data(self,
x,
y=None,
sample_weight=None,
class_weight=None,
batch_size=None,
check_steps=False,
steps_name='steps',
steps=None,
validation_split=0):
"""Runs validation checks on input and target data passed by the user.
Also standardizes the data to lists of arrays, in order.
Also builds and compiles the model on the fly if it is a subclassed model
that has never been called before (and thus has no inputs/outputs).
This is a purely internal method, subject to refactoring at any time.
Args:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset or a dataset iterator.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely). If `x` is a dataset or a
dataset iterator, `y` should not be specified
(since targets will be obtained from the iterator).
sample_weight: An optional sample-weight array passed by the user to
weight the importance of each sample in `x`.
class_weight: An optional class-weight array by the user to
weight the importance of samples in `x` based on the class they belong
to, as conveyed by `y`.
batch_size: Integer batch size. If provided, it is used to run additional
validation checks on stateful models.
check_steps: boolean, True if we want to check for validity of `steps` and
False, otherwise. For example, when we are standardizing one batch of
data for train_on_batch/predict_on_batch/test_on_batch APIs, `steps`
value is not required and we should not check for its validity in these
cases.
steps_name: The public API's parameter name for `steps`.
steps: Integer or `None`. Total number of steps (batches of samples) to
execute.
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
Returns:
A tuple of 3: inputs (arrays or dicts, depending on whether `x` was a dict
or not), target arrays, sample-weight arrays.
If the model's input and targets are symbolic, these lists are empty
(since the model takes no user-provided data, instead the data comes
from the symbolic inputs/targets).
Raises:
ValueError: In case of invalid user-provided data.
RuntimeError: If the model was never compiled.
"""
if self._distribution_strategy:
iterator = self._distribution_standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
batch_size=batch_size,
check_steps=check_steps,
steps_name=steps_name,
steps=steps,
validation_split=validation_split)
return iterator, None, None
if isinstance(x, dataset_ops.Dataset):
if context.executing_eagerly():
x = x.make_one_shot_iterator()
else:
if x in self._dataset_iterator_cache:
x = self._dataset_iterator_cache[x]
else:
iterator = x.make_initializable_iterator()
self._dataset_iterator_cache[x] = iterator
x = iterator
K.get_session().run(x.initializer)
# Validates `steps` argument based on x's type.
if check_steps:
training_utils.check_steps_argument(x, steps, steps_name)
is_x_eager_iterator = isinstance(x, iterator_ops.EagerIterator)
is_x_iterator = isinstance(x, iterator_ops.Iterator)
# Validate user inputs when data is given as a dataset or dataset iterator.
if is_x_iterator or is_x_eager_iterator:
training_utils.validate_iterator_input(x, y, sample_weight,
validation_split)
# For eager iterators, when we have to process multiple batches of samples,
# we will standardize the data when we actually loop over iterator and get
# the batches. For now, we just return the iterator as is.
if is_x_eager_iterator and steps is not None:
return x, y, sample_weight
# If input data is a dataset iterator in graph mode or if it is an eager
# iterator and only one batch of samples is required, we fetch the data
# tensors from the iterator and then standardize them.
if is_x_iterator or is_x_eager_iterator:
try:
if is_x_iterator:
next_element = self._get_iterator_get_next_tensors(x)
else:
next_element = x.get_next()
except errors.OutOfRangeError:
raise RuntimeError('Your dataset iterator ran out of data; '
'Make sure that your dataset can generate '
'required number of samples.')
if (not isinstance(next_element, (list, tuple)) or
len(next_element) not in [2, 3]):
raise ValueError(
'Please provide model inputs as a list or tuple of 2 or 3'
'elements: (input, target) or (input, target, sample_weights)'
'Received %s' % next_element)
if len(next_element) == 2:
x, y = next_element
else:
x, y, sample_weight = next_element
x, y, sample_weights = self._standardize_weights(x, y, sample_weight,
class_weight, batch_size)
return x, y, sample_weights
def _standardize_weights(self, x, y, sample_weight=None, class_weight=None,
batch_size=None,):
# TODO(sourabhbajaj): Split input validation from weight standardization.
if sample_weight is not None and class_weight is not None:
logging.warning(
'Received both a `sample_weight` and `class_weight` argument. '
'The `class_weight` argument will be ignored.')
# First, we build/compile the model on the fly if necessary.
all_inputs = []
is_build_called = False
is_compile_called = False
dict_inputs = False
if not self.inputs:
# We need to use `x` to set the model inputs.
# We type-check that `x` and `y` are either single arrays
# or lists of arrays.
if isinstance(x, (list, tuple)):
if not all(isinstance(v, np.ndarray) or
tensor_util.is_tensor(v) for v in x):
raise ValueError('Please provide as model inputs either a single '
'array or a list of arrays. You passed: x=' + str(x))
all_inputs += list(x)
elif isinstance(x, dict):
dict_inputs = True
keys = sorted(x.keys())
all_inputs = [x[k] for k in keys]
else:
if not isinstance(x, np.ndarray) and not tensor_util.is_tensor(x):
raise ValueError('Please provide as model inputs either a single '
'array or a list of arrays. You passed: x=' + str(x))
all_inputs.append(x)
# Build the model using the retrieved inputs (value or symbolic).
# If values, then in symbolic-mode placeholders will be created
# to match the value shapes.
if not self.inputs:
is_build_called = True
self._set_inputs(x)
else:
dict_inputs = isinstance(self.inputs, dict)
if y is not None:
if not self.optimizer:
raise RuntimeError('You must compile a model before '
'training/testing. '
'Use `model.compile(optimizer, loss)`.')
if not self._is_compiled:
# On-the-fly compilation of the model.
# We need to use `y` to set the model targets.
if isinstance(y, (list, tuple)):
if not all(isinstance(v, np.ndarray) or
tensor_util.is_tensor(v) for v in y):
raise ValueError('Please provide as model targets either a single '
'array or a list of arrays. '
'You passed: y=' + str(y))
all_inputs += list(y)
elif isinstance(y, dict):
raise ValueError('Please do not pass a dictionary as model targets.')
else:
if not isinstance(y, np.ndarray) and not tensor_util.is_tensor(y):
raise ValueError('Please provide as model targets either a single '
'array or a list of arrays. '
'You passed: y=' + str(y))
all_inputs.append(y)
# Typecheck that all inputs are *either* value *or* symbolic.
# TODO(fchollet): this check could be removed in Eager mode?
if any(tensor_util.is_tensor(v) for v in all_inputs):
if not all(tensor_util.is_tensor(v) for v in all_inputs):
raise ValueError('Do not pass inputs that mix Numpy arrays and '
'TensorFlow tensors. '
'You passed: x=' + str(x) + '; y=' + str(y))
if context.executing_eagerly():
target_tensors = None
else:
# Handle target tensors if any passed.
if not isinstance(y, (list, tuple)):
y = [y]
target_tensors = [v for v in y if tensor_util.is_tensor(v)]
is_compile_called = True
self.compile(optimizer=self.optimizer,
loss=self.loss,
metrics=self.metrics,
loss_weights=self.loss_weights,
target_tensors=target_tensors)
# In graph mode, if we had just set inputs and targets as symbolic tensors
# by invoking build and compile on the model respectively, we do not have to
# feed anything to the model. Model already has input and target data as
# part of the graph.
# Note: in this case, `any` and `all` are equivalent since we disallow
# mixed symbolic/value inputs.
if (not context.executing_eagerly() and is_build_called and
is_compile_called and
any(tensor_util.is_tensor(v) for v in all_inputs)):
return [], [], []
# What follows is input validation and standardization to list format,
# in the case where all inputs are value arrays.
if context.executing_eagerly():
# In eager mode, do not do shape validation
# since the network has no input nodes (placeholders) to be fed.
feed_input_names = self.input_names
feed_input_shapes = None
elif not self._is_graph_network:
# Case: symbolic-mode subclassed network. Do not do shape validation.
feed_input_names = self._feed_input_names
feed_input_shapes = None
else:
# Case: symbolic-mode graph network.
# In this case, we run extensive shape validation checks.
feed_input_names = self._feed_input_names
feed_input_shapes = self._feed_input_shapes
# Standardize the inputs.
x = training_utils.standardize_input_data(
x,
feed_input_names,
feed_input_shapes,
check_batch_axis=False, # Don't enforce the batch size.
exception_prefix='input')
if y is not None:
if not self._is_graph_network:
feed_output_names = self._feed_output_names
feed_output_shapes = None
# Sample weighting not supported in this case.
# TODO(fchollet): consider supporting it.
feed_sample_weight_modes = [None for _ in self.outputs]
else:
feed_output_names = self._feed_output_names
feed_sample_weight_modes = self._feed_sample_weight_modes
feed_output_shapes = []
for output_shape, loss_fn in zip(self._feed_output_shapes,
self._feed_loss_fns):
if loss_fn is losses.sparse_categorical_crossentropy:
if K.image_data_format() == 'channels_first':
feed_output_shapes.append(
(output_shape[0], 1) + output_shape[2:])
else:
feed_output_shapes.append(output_shape[:-1] + (1,))
elif (not hasattr(loss_fn, '__name__') or
getattr(losses, loss_fn.__name__, None) is None):
# If `loss_fn` is not a function (e.g. callable class)
# or if it not in the `losses` module, then
# it is a user-defined loss and we make no assumptions
# about it.
feed_output_shapes.append(None)
else:
feed_output_shapes.append(output_shape)
# Standardize the outputs.
y = training_utils.standardize_input_data(
y,
feed_output_names,
feed_output_shapes,
check_batch_axis=False, # Don't enforce the batch size.
exception_prefix='target')
# Generate sample-wise weight values given the `sample_weight` and
# `class_weight` arguments.
sample_weights = training_utils.standardize_sample_weights(
sample_weight, feed_output_names)
class_weights = training_utils.standardize_class_weights(
class_weight, feed_output_names)
sample_weights = [
training_utils.standardize_weights(ref, sw, cw, mode)
for (ref, sw, cw, mode) in zip(y, sample_weights, class_weights,
feed_sample_weight_modes)
]
# Check that all arrays have the same length.
if not self._distribution_strategy:
training_utils.check_array_lengths(x, y, sample_weights)
if self._is_graph_network and not context.executing_eagerly():
# Additional checks to avoid users mistakenly using improper loss fns.
training_utils.check_loss_and_target_compatibility(
y, self._feed_loss_fns, feed_output_shapes)
else:
y = []
sample_weights = []
if self.stateful and batch_size:
# Check that for stateful networks, number of samples is a multiple
# of the static batch size.
if x[0].shape[0] % batch_size != 0:
raise ValueError('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples')
# If dictionary inputs were provided, we return a dictionary as well.
if dict_inputs:
x = dict(zip(feed_input_names, x))
return x, y, sample_weights
@checkpointable.no_automatic_dependency_tracking
def _set_inputs(self, inputs, training=None):
"""Set model's input and output specs based on the input data received.
This is to be used for Model subclasses, which do not know at instantiation
time what their inputs look like.
Args:
inputs: Single array, or list of arrays. The arrays could be placeholders,
Numpy arrays, or data tensors.
- if placeholders: the model is built on top of these placeholders,
and we expect Numpy data to be fed for them when calling `fit`/etc.
- if Numpy data: we create placeholders matching the shape of the Numpy
arrays. We expect Numpy data to be fed for these placeholders
when calling `fit`/etc.
- if data tensors: the model is built on top of these tensors.
We do not expect any Numpy data to be provided when calling `fit`/etc.
training: Boolean or None. Only relevant in symbolic mode. Specifies
whether to build the model's graph in inference mode (False), training
mode (True), or using the Keras learning phase (None).
Raises:
ValueError: If dict inputs are passed to a Sequential Model where the
first layer isn't FeatureLayer.
"""
call_convention = getattr(
self,
'_call_convention',
base_layer.CallConvention.EXPLICIT_INPUTS_ARGUMENT)
if call_convention not in (
base_layer.CallConvention.EXPLICIT_INPUTS_ARGUMENT,
base_layer.CallConvention.SINGLE_POSITIONAL_ARGUMENT):
raise NotImplementedError(
'Subclassed Models without "inputs" (or single positional arguments) '
'in their call() signatures do not yet support shape inference. File '
'a feature request if this limitation bothers you.')
if self.__class__.__name__ == 'Sequential':
if tensor_util.is_tensor(inputs):
input_shape = (None,) + tuple(inputs.get_shape().as_list()[1:])
self.build(input_shape=input_shape)
elif isinstance(inputs, dict):
# We assert that the first layer is a FeatureLayer.
if not training_utils.is_feature_layer(self.layers[0]):
raise ValueError('Passing a dictionary input to a Sequential Model '
'which doesnt have FeatureLayer as the first layer '
'is an error')
input_shape = (None,)
self.build(input_shape=input_shape)
else:
input_shape = (None,) + inputs.shape[1:]
self.build(input_shape=input_shape)
if context.executing_eagerly():
self._eager_set_inputs(inputs)
else:
self._symbolic_set_inputs(inputs, training=training)
@checkpointable.no_automatic_dependency_tracking
def _eager_set_inputs(self, inputs):
"""Set model's input and output specs based on the input data received.
This is to be used for Model subclasses, which do not know at instantiation
time what their inputs look like.
We assume the number and ndim of outputs
does not change over different calls.
Args:
inputs: Argument `x` (input data) passed by the user upon first model use.
Raises:
ValueError: If the model's inputs are already set.
"""
assert context.executing_eagerly()
if self.inputs:
raise ValueError('Model inputs are already set.')
# On-the-fly setting of model inputs/outputs as DeferredTensors,
# to keep track of number of inputs and outputs and their ndim.
model_inputs = training_utils.ModelInputs(inputs)
dummy_input_values = model_inputs.get_input_values()
dummy_output_values = self.call(dummy_input_values)
self.inputs = model_inputs.get_symbolic_inputs(return_single_as_list=True)
self.input_names = model_inputs.get_input_names()
dummy_output_values = nest.flatten(dummy_output_values)
self.outputs = [
base_layer.DeferredTensor(shape=(None
for _ in v.shape), dtype=v.dtype)
for v in dummy_output_values
]
self.output_names = [
'output_%d' % (i + 1) for i in range(len(dummy_output_values))]
self.built = True
@checkpointable.no_automatic_dependency_tracking
def _symbolic_set_inputs(self, inputs, outputs=None, training=None):
"""Set model's inputs and output specs based.
This is to be used for Model subclasses, which do not know at instantiation
time what their inputs look like.
Args:
inputs: Argument `x` (input data) passed by the user upon first model use.
outputs: None, a data tensor, or a list of data tensors. If None, the
outputs will be determined by invoking self.call(), otherwise the
provided value will be used.
training: Boolean or None. Only relevant in symbolic mode. Specifies
whether to build the model's graph in inference mode (False), training
mode (True), or using the Keras learning phase (None).
Raises:
ValueError: If the model's inputs are already set.
"""
assert not context.executing_eagerly()
if self.inputs:
raise ValueError('Model inputs are already set.')
# On-the-fly setting of symbolic model inputs (either by using the tensor
# provided, or by creating a placeholder if Numpy data was provided).
model_inputs = training_utils.ModelInputs(inputs)
dummy_input_values = model_inputs.get_symbolic_inputs()
self.inputs = model_inputs.get_symbolic_inputs(return_single_as_list=True)
self.input_names = model_inputs.get_input_names()
self._feed_inputs = []
self._feed_input_names = []
self._feed_input_shapes = []
for k, v in model_inputs.as_dict():
if K.is_placeholder(v):
self._feed_inputs.append(v)
self._feed_input_names.append(k)
self._feed_input_shapes.append(K.int_shape(v))
if outputs is None:
# Obtain symbolic outputs by calling the model.
if self._expects_training_arg:
outputs = self.call(dummy_input_values, training=training)
else:
outputs = self.call(dummy_input_values)
outputs = nest.flatten(outputs)
self.outputs = outputs
self.output_names = [
'output_%d' % (i + 1) for i in range(len(self.outputs))]
self.built = True
def fit(self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
**kwargs):
"""Trains the model for a fixed number of epochs (iterations on a dataset).
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset or a dataset iterator. Should return a tuple
of either `(inputs, targets)` or
`(inputs, targets, sample_weights)`.
- A generator or `keras.utils.Sequence` returning `(inputs, targets)`
or `(inputs, targets, sample weights)`.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely). If `x` is a dataset, dataset
iterator, generator, or `keras.utils.Sequence` instance, `y` should
not be specified (since targets will be obtained from `x`).
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` if your data is in the
form of symbolic tensors, dataset, dataset iterators,
generators, or `keras.utils.Sequence` instances (since they generate
batches).
epochs: Integer. Number of epochs to train the model.
An epoch is an iteration over the entire `x` and `y`
data provided.
Note that in conjunction with `initial_epoch`,
`epochs` is to be understood as "final epoch".
The model is not trained for a number of iterations
given by `epochs`, but merely until the epoch
of index `epochs` is reached.
verbose: Integer. 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = one line per epoch.
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during training.
See [callbacks](/api_docs/python/tf/keras/callbacks).
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate
the loss and any model metrics
on this data at the end of each epoch.
The validation data is selected from the last samples
in the `x` and `y` data provided, before shuffling. This argument is
not supported when `x` is a dataset, dataset iterator, generator or
`keras.utils.Sequence` instance.
validation_data: Data on which to evaluate
the loss and any model metrics at the end of each epoch.
The model will not be trained on this data.
`validation_data` will override `validation_split`.
`validation_data` could be:
- tuple `(x_val, y_val)` of Numpy arrays or tensors
- tuple `(x_val, y_val, val_sample_weights)` of Numpy arrays
- dataset or a dataset iterator
shuffle: Boolean (whether to shuffle the training data
before each epoch) or str (for 'batch').
'batch' is a special option for dealing with the
limitations of HDF5 data; it shuffles in batch-sized chunks.
Has no effect when `steps_per_epoch` is not `None`.
class_weight: Optional dictionary mapping class indices (integers)
to a weight (float) value, used for weighting the loss function
(during training only).
This can be useful to tell the model to
"pay more attention" to samples from
an under-represented class.
sample_weight: Optional Numpy array of weights for
the training samples, used for weighting the loss function
(during training only). You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`. This argument is not
supported when `x` is a dataset, dataset iterator, generator, or
`keras.utils.Sequence` instance, instead provide the sample_weights
as the third element of `x`.
initial_epoch: Integer.
Epoch at which to start training
(useful for resuming a previous training run).
steps_per_epoch: Integer or `None`.
Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. When training with input tensors such as
TensorFlow data tensors, the default `None` is equal to
the number of samples in your dataset divided by
the batch size, or 1 if that cannot be determined.
validation_steps: Only relevant if `steps_per_epoch`
is specified. Total number of steps (batches of samples)
to validate before stopping.
max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
input only. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Used for generator or `keras.utils.Sequence` input
only. Maximum number of processes to spin up
when using process-based threading. If unspecified, `workers`
will default to 1. If 0, will execute the generator on the main
thread.
use_multiprocessing: Boolean. Used for generator or
`keras.utils.Sequence` input only. If `True`, use process-based
threading. If unspecified, `use_multiprocessing` will default to
`False`. Note that because this implementation relies on
multiprocessing, you should not pass non-picklable arguments to
the generator as they can't be passed easily to children processes.
**kwargs: Used for backwards compatibility.
Returns:
A `History` object. Its `History.history` attribute is
a record of training loss values and metrics values
at successive epochs, as well as validation loss values
and validation metrics values (if applicable).
Raises:
RuntimeError: If the model was never compiled.
ValueError: In case of mismatch between the provided input data
and what the model expects.
"""
# TODO(fchollet): this method may be creating reference cycles, which would
# lead to accumulating garbage in memory when called in a loop. Investigate.
if data_utils.is_generator_or_sequence(x):
training_utils.check_generator_arguments(y, sample_weight)
return self.fit_generator(
x,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
validation_data=validation_data,
validation_steps=validation_steps,
class_weight=class_weight,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
shuffle=shuffle,
initial_epoch=initial_epoch)
# Backwards compatibility
if batch_size is None and steps_per_epoch is None:
batch_size = 32
# Legacy support
if 'nb_epoch' in kwargs:
logging.warning(
'The `nb_epoch` argument in `fit` '
'has been renamed `epochs`.')
epochs = kwargs.pop('nb_epoch')
if kwargs:
raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
# Validate and standardize user data.
if self._distribution_strategy:
distributed_training_utils.validate_callbacks(callbacks)
distributed_training_utils.validate_inputs(x, y)
first_x_value = nest.flatten(x)[0]
if not steps_per_epoch and isinstance(first_x_value, np.ndarray):
steps_per_epoch = distributed_training_utils.get_input_batch_params(
first_x_value, batch_size, self._distribution_strategy)
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
class_weight=class_weight,
batch_size=batch_size,
check_steps=True,
steps_name='steps_per_epoch',
steps=steps_per_epoch,
validation_split=validation_split)
# Prepare validation data.
if validation_data:
if (isinstance(validation_data, iterator_ops.Iterator) or
isinstance(validation_data, iterator_ops.EagerIterator) or
isinstance(validation_data, dataset_ops.Dataset)):
val_x = validation_data
val_y = None
val_sample_weight = None
elif len(validation_data) == 2:
val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence
else:
raise ValueError(
'When passing a `validation_data` argument, '
'it must contain either 2 items (x_val, y_val), '
'or 3 items (x_val, y_val, val_sample_weights), '
'or alternatively it could be a dataset or a '
'dataset or a dataset iterator. '
'However we received `validation_data=%s`' % validation_data)
# Validate and standardize validation data.
if self._distribution_strategy:
distributed_training_utils.validate_inputs(val_x, val_y)
first_valx_value = nest.flatten(val_x)[0]
if not validation_steps and isinstance(first_valx_value, np.ndarray):
validation_steps = distributed_training_utils.get_input_batch_params(
first_valx_value, batch_size, self._distribution_strategy)
val_x, val_y, val_sample_weights = self._standardize_user_data(
val_x,
val_y,
sample_weight=val_sample_weight,
batch_size=batch_size,
steps=validation_steps)
elif validation_split and 0. < validation_split < 1.:
if training_utils.has_symbolic_tensors(x):
raise ValueError('If your data is in the form of symbolic tensors, '
'you cannot use `validation_split`.')
if hasattr(x[0], 'shape'):
split_at = int(x[0].shape[0] * (1. - validation_split))
else:
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (slice_arrays(x, 0, split_at), slice_arrays(x, split_at))
y, val_y = (slice_arrays(y, 0, split_at), slice_arrays(y, split_at))
sample_weights, val_sample_weights = (slice_arrays(
sample_weights, 0, split_at), slice_arrays(sample_weights, split_at))
elif validation_steps:
val_x = []
val_y = []
val_sample_weights = []
else:
val_x = None
val_y = None
val_sample_weights = None
if context.executing_eagerly():
return training_eager.fit_loop(
self,
inputs=x,
targets=y,
sample_weights=sample_weights,
class_weight=class_weight,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_inputs=val_x,
val_targets=val_y,
val_sample_weights=val_sample_weights,
shuffle=shuffle,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
elif self._distribution_strategy:
return training_distributed.fit_loop(
self, x,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_iterator=val_x,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
else:
return training_arrays.fit_loop(
self, x, y,
sample_weights=sample_weights,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_inputs=val_x,
val_targets=val_y,
val_sample_weights=val_sample_weights,
shuffle=shuffle,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
def evaluate(self,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False):
"""Returns the loss value & metrics values for the model in test mode.
Computation is done in batches.
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset or a dataset iterator.
- A generator or `keras.utils.Sequence` instance.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely).
If `x` is a dataset, dataset iterator, generator or
`keras.utils.Sequence` instance, `y` should not be specified (since
targets will be obtained from the iterator/dataset).
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` is your data is in the
form of symbolic tensors, dataset, dataset iterators,
generators, or `keras.utils.Sequence` instances (since they generate
batches).
verbose: 0 or 1. Verbosity mode.
0 = silent, 1 = progress bar.
sample_weight: Optional Numpy array of weights for
the test samples, used for weighting the loss function.
You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`. This argument is not
supported when `x` is a dataset or a dataset iterator, instead pass
sample weights as the third element of `x`.
steps: Integer or `None`.
Total number of steps (batches of samples)
before declaring the evaluation round finished.
Ignored with the default value of `None`.
max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
input only. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Used for generator or `keras.utils.Sequence` input
only. Maximum number of processes to spin up when using
process-based threading. If unspecified, `workers` will default
to 1. If 0, will execute the generator on the main thread.
use_multiprocessing: Boolean. Used for generator or
`keras.utils.Sequence` input only. If `True`, use process-based
threading. If unspecified, `use_multiprocessing` will default to
`False`. Note that because this implementation relies on
multiprocessing, you should not pass non-picklable arguments to
the generator as they can't be passed easily to children processes.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: in case of invalid arguments.
"""
if data_utils.is_generator_or_sequence(x):
training_utils.check_generator_arguments(y, sample_weight)
return self.evaluate_generator(
x,
steps=steps,
verbose=verbose,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
# Backwards compatibility.
if batch_size is None and steps is None:
batch_size = 32
# Validate and standardize user data.
if self._distribution_strategy:
distributed_training_utils.validate_inputs(x, y)
first_x_value = nest.flatten(x)[0]
if isinstance(first_x_value, np.ndarray) and not steps:
steps = distributed_training_utils.get_input_batch_params(
first_x_value, batch_size, self._distribution_strategy)
x, y, sample_weights = self._standardize_user_data(
x,
y,
sample_weight=sample_weight,
batch_size=batch_size,
check_steps=True,
steps_name='steps',
steps=steps)
if context.executing_eagerly():
return training_eager.test_loop(
self,
inputs=x,
targets=y,
sample_weights=sample_weights,
batch_size=batch_size,
verbose=verbose,
steps=steps)
elif self._distribution_strategy:
return training_distributed.test_loop(
self,
iterator=x,
verbose=verbose,
steps=steps)
else:
return training_arrays.test_loop(
self,
inputs=x,
targets=y,
sample_weights=sample_weights,
batch_size=batch_size,
verbose=verbose,
steps=steps)
def predict(self,
x,
batch_size=None,
verbose=0,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False):
"""Generates output predictions for the input samples.
Computation is done in batches.
Arguments:
x: Input samples. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A `tf.data` dataset or a dataset iterator.
- A generator or `keras.utils.Sequence` instance.
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` is your data is in the
form of symbolic tensors, dataset, dataset iterators,
generators, or `keras.utils.Sequence` instances (since they generate
batches).
verbose: Verbosity mode, 0 or 1.
steps: Total number of steps (batches of samples)
before declaring the prediction round finished.
Ignored with the default value of `None`.
max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
input only. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Used for generator or `keras.utils.Sequence` input
only. Maximum number of processes to spin up when using
process-based threading. If unspecified, `workers` will default
to 1. If 0, will execute the generator on the main thread.
use_multiprocessing: Boolean. Used for generator or
`keras.utils.Sequence` input only. If `True`, use process-based
threading. If unspecified, `use_multiprocessing` will default to
`False`. Note that because this implementation relies on
multiprocessing, you should not pass non-picklable arguments to
the generator as they can't be passed easily to children processes.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case of mismatch between the provided
input data and the model's expectations,
or in case a stateful model receives a number of samples
that is not a multiple of the batch size.
"""
if data_utils.is_generator_or_sequence(x):
return self.predict_generator(
x,
steps=steps,
verbose=verbose,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
# Backwards compatibility.
if batch_size is None and steps is None:
batch_size = 32
if self._distribution_strategy:
# Turn off prefetching since this is currently not deterministic. Once
# b/112498930 is fixed we can turn it back on.
# `_prefetch_on_device` is currently a property of only
# `MirroredStrategy`.
if hasattr(self._distribution_strategy, '_prefetch_on_device'):
self._distribution_strategy._prefetch_on_device = False # pylint: disable=protected-access
distributed_training_utils.validate_inputs(x, None)
first_x_value = nest.flatten(x)[0]
if isinstance(first_x_value, np.ndarray) and not steps:
steps = distributed_training_utils.get_input_batch_params(
first_x_value, batch_size, self._distribution_strategy)
# Validate and standardize user data.
# TODO(anjalisridhar): We don't pass batch_size here for some reason. This
# means that we end up calculating it twice which we should avoid.
x, _, _ = self._standardize_user_data(
x, check_steps=True, steps_name='steps', steps=steps)
if context.executing_eagerly():
return training_eager.predict_loop(
self, x, batch_size=batch_size, verbose=verbose, steps=steps)
elif self._distribution_strategy:
results = training_distributed.predict_loop(
self, x, verbose=verbose, steps=steps)
# Turn prefetching back on since we turned it off previously.
if hasattr(self._distribution_strategy, '_prefetch_on_device'):
self._distribution_strategy._prefetch_on_device = True # pylint: disable=protected-access
return results
else:
return training_arrays.predict_loop(
self, x, batch_size=batch_size, verbose=verbose, steps=steps)
def train_on_batch(self, x, y=None, sample_weight=None, class_weight=None):
"""Runs a single gradient update on a single batch of data.
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset or a dataset iterator.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely). If `x` is a dataset or a
dataset iterator, `y` should not be specified
(since targets will be obtained from the iterator).
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile(). This argument is not
supported when `x` is a dataset or a dataset iterator.
class_weight: Optional dictionary mapping
class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
Returns:
Scalar training loss
(if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
if self._distribution_strategy:
raise NotImplementedError('`train_on_batch` is not supported for models '
'compiled with DistributionStrategy.')
# Validate and standardize user data.
x, y, sample_weights = self._standardize_user_data(
x, y, sample_weight=sample_weight, class_weight=class_weight)
if context.executing_eagerly():
outputs = training_eager.train_on_batch(
self, x, y, sample_weights=sample_weights)
else:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [1]
else:
ins = x + y + sample_weights
self._make_train_function()
outputs = self.train_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def test_on_batch(self, x, y=None, sample_weight=None):
"""Test the model on a single batch of samples.
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset or a dataset iterator.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely). If `x` is a dataset or a
dataset iterator, `y` should not be specified
(since targets will be obtained from the iterator).
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile(). This argument is not
supported when `x` is a dataset or a dataset iterator.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
if self._distribution_strategy:
raise NotImplementedError('`test_on_batch` is not supported for models '
'compiled with DistributionStrategy.')
# Validate and standardize user data.
x, y, sample_weights = self._standardize_user_data(
x, y, sample_weight=sample_weight)
if context.executing_eagerly():
outputs = training_eager.test_on_batch(
self, x, y, sample_weights=sample_weights)
else:
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = x + y + sample_weights + [0]
else:
ins = x + y + sample_weights
self._make_test_function()
outputs = self.test_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples.
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A `tf.data` dataset or a dataset iterator.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case of mismatch between given number of inputs and
expectations of the model.
"""
if self._distribution_strategy:
raise NotImplementedError('`predict_on_batch` is not supported for '
'models compiled with DistributionStrategy.')
# Validate and standardize user data.
inputs, _, _ = self._standardize_user_data(x)
if context.executing_eagerly():
if (isinstance(x, iterator_ops.EagerIterator) or
(isinstance(x, dataset_ops.Dataset) and context.executing_eagerly())):
inputs = training_utils.cast_if_floating_dtype(inputs)
else:
inputs = [
ops.convert_to_tensor(val, dtype=K.floatx()) for val in inputs
]
return self(inputs) # pylint: disable=not-callable
if not context.executing_eagerly():
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = inputs + [0]
else:
ins = inputs
self._make_predict_function()
outputs = self.predict_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def fit_generator(self,
generator,
steps_per_epoch=None,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=0):
"""Fits the model on data yielded batch-by-batch by a Python generator.
The generator is run in parallel to the model, for efficiency.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
The use of `keras.utils.Sequence` guarantees the ordering
and guarantees the single use of every input per epoch when
using `use_multiprocessing=True`.
Arguments:
generator: A generator or an instance of `Sequence`
(`keras.utils.Sequence`)
object in order to avoid duplicate data
when using multiprocessing.
The output of the generator must be either
- a tuple `(inputs, targets)`
- a tuple `(inputs, targets, sample_weights)`.
This tuple (a single output of the generator) makes a single batch.
Therefore, all arrays in this tuple must have the same length (equal
to the size of this batch). Different batches may have different
sizes.
For example, the last batch of the epoch is commonly smaller than
the
others, if the size of the dataset is not divisible by the batch
size.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `steps_per_epoch`
batches have been seen by the model.
steps_per_epoch: Total number of steps (batches of samples)
to yield from `generator` before declaring one epoch
finished and starting the next epoch. It should typically
be equal to the number of samples of your dataset
divided by the batch size.
Optional for `Sequence`: if unspecified, will use
the `len(generator)` as a number of steps.
epochs: Integer, total number of iterations on the data.
verbose: Verbosity mode, 0, 1, or 2.
callbacks: List of callbacks to be called during training.
validation_data: This can be either
- a generator for the validation data
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
validation_steps: Only relevant if `validation_data`
is a generator. Total number of steps (batches of samples)
to yield from `generator` before stopping.
Optional for `Sequence`: if unspecified, will use
the `len(validation_data)` as a number of steps.
class_weight: Dictionary mapping class indices to a weight
for the class.
max_queue_size: Integer. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Maximum number of processes to spin up
when using process-based threading.
If unspecified, `workers` will default to 1. If 0, will
execute the generator on the main thread.
use_multiprocessing: Boolean.
If `True`, use process-based threading.
If unspecified, `use_multiprocessing` will default to `False`.
Note that because this implementation relies on multiprocessing,
you should not pass non-picklable arguments to the generator
as they can't be passed easily to children processes.
shuffle: Boolean. Whether to shuffle the order of the batches at
the beginning of each epoch. Only used with instances
of `Sequence` (`keras.utils.Sequence`).
Has no effect when `steps_per_epoch` is not `None`.
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
Returns:
A `History` object.
Example:
```python
def generate_arrays_from_file(path):
while 1:
f = open(path)
for line in f:
# create numpy arrays of input data
# and labels, from each line in the file
x1, x2, y = process_line(line)
yield ({'input_1': x1, 'input_2': x2}, {'output': y})
f.close()
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
steps_per_epoch=10000, epochs=10)
```
Raises:
ValueError: In case the generator yields data in an invalid format.
"""
if self._distribution_strategy:
raise NotImplementedError('`fit_generator` is not supported for '
'models compiled with DistributionStrategy.')
if not self.built and not self._is_graph_network:
raise NotImplementedError(
'`fit_generator` is not yet enabled for unbuilt Model subclasses')
return training_generator.fit_generator(
self,
generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
validation_data=validation_data,
validation_steps=validation_steps,
class_weight=class_weight,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
shuffle=shuffle,
initial_epoch=initial_epoch)
def evaluate_generator(self,
generator,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""Evaluates the model on a data generator.
The generator should return the same kind of data
as accepted by `test_on_batch`.
Arguments:
generator: Generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
or an instance of `keras.utils.Sequence`
object in order to avoid duplicate data
when using multiprocessing.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
Optional for `Sequence`: if unspecified, will use
the `len(generator)` as a number of steps.
max_queue_size: maximum size for the generator queue
workers: Integer. Maximum number of processes to spin up
when using process-based threading.
If unspecified, `workers` will default to 1. If 0, will
execute the generator on the main thread.
use_multiprocessing: Boolean.
If `True`, use process-based threading.
If unspecified, `use_multiprocessing` will default to `False`.
Note that because this implementation relies on multiprocessing,
you should not pass non-picklable arguments to the generator
as they can't be passed easily to children processes.
verbose: Verbosity mode, 0 or 1.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: in case of invalid arguments.
Raises:
ValueError: In case the generator yields data in an invalid format.
"""
if self._distribution_strategy:
raise NotImplementedError('`evaluate_generator` is not supported for '
'models compiled with DistributionStrategy.')
if not self.built and not self._is_graph_network:
raise NotImplementedError(
'`evaluate_generator` is not yet enabled for '
'unbuilt Model subclasses')
return training_generator.evaluate_generator(
self,
generator,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose)
def predict_generator(self,
generator,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
Arguments:
generator: Generator yielding batches of input samples
or an instance of `keras.utils.Sequence` object in order to
avoid duplicate data when using multiprocessing.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
Optional for `Sequence`: if unspecified, will use
the `len(generator)` as a number of steps.
max_queue_size: Maximum size for the generator queue.
workers: Integer. Maximum number of processes to spin up
when using process-based threading.
If unspecified, `workers` will default to 1. If 0, will
execute the generator on the main thread.
use_multiprocessing: Boolean.
If `True`, use process-based threading.
If unspecified, `use_multiprocessing` will default to `False`.
Note that because this implementation relies on multiprocessing,
you should not pass non-picklable arguments to the generator
as they can't be passed easily to children processes.
verbose: verbosity mode, 0 or 1.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case the generator yields data in an invalid format.
"""
if self._distribution_strategy:
raise NotImplementedError('`predict_generator` is not supported for '
'models compiled with DistributionStrategy.')
if not self.built and not self._is_graph_network:
raise NotImplementedError(
'`predict_generator` is not yet enabled for unbuilt Model subclasses')
return training_generator.predict_generator(
self,
generator,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose)
def _get_callback_model(self):
"""Returns the Callback Model for this Model."""
if hasattr(self, '_replicated_model') and self._replicated_model:
# When using training_distributed, we set the callback model
# to an instance of the `DistributedModel` that we create in
# the `compile` call. The `DistributedModel` is initialized
# with the first replicated model. We need to set the callback
# model to a DistributedModel to allow us to override saving
# and loading weights when we checkpoint the model during training.
return self._replicated_model
if hasattr(self, 'callback_model') and self.callback_model:
return self.callback_model
return self
def _make_callback_model(self):
first_replicated_model = self._distribution_strategy.unwrap(
self._grouped_model)[0]
# We initialize the callback model with the first replicated model.
self._replicated_model = DistributedCallbackModel(first_replicated_model)
self._replicated_model.set_original_model(self)
class DistributedCallbackModel(Model):
"""Model that is used for callbacks with DistributionStrategy."""
def __init__(self, model):
super(DistributedCallbackModel, self).__init__()
# TODO(anjalisridhar): Right now the only attributes set are the layer and
# weights. We may need to set additional attributes as needed since we have
# not called compile on this model.
def set_original_model(self, orig_model):
self._original_model = orig_model
def save_weights(self, filepath, overwrite=True, save_format=None):
self._replicated_model.save_weights(filepath, overwrite=overwrite,
save_format=save_format)
def save(self, filepath, overwrite=True, include_optimizer=True):
# save weights from the distributed model to the original model
distributed_model_weights = self.get_weights()
self._original_model.set_weights(distributed_model_weights)
# TODO(anjalisridhar): Do we need to save the original model here?
# Saving the first replicated model works as well.
self._original_model.save(filepath, overwrite=True, include_optimizer=False)
def load_weights(self, filepath, by_name=False):
self._original_model.load_weights(filepath, by_name=False)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = self._original_model.get_weights()
distributed_training_utils.set_weights(
self._original_model._distribution_strategy, self, # pylint: disable=protected-access
orig_model_weights)
def __getattr__(self, item):
# Whitelisted atttributes of the model that can be accessed by the user
# during a callback.
if item not in ['_setattr_tracking']:
logging.warning('You are accessing attribute ' + item + 'of the '
'DistributedCallbackModel that may not have been set '
'correctly.')
|
py | 7df72d958e57cd7b2ce3cec6f07fe8aa4fadde6d | from django.contrib import messages
from django.core.exceptions import ImproperlyConfigured
from django.shortcuts import redirect, render
from requests.exceptions import ConnectionError, HTTPError
from .forms import EmailSubscriptionForm
def home(request):
# Include email signup form
context = {
'form': EmailSubscriptionForm(),
}
return render(request, 'landingpage/home.html', context)
def email_signup(request):
template = 'landingpage/email_signup_confirm.html'
# process the form data if it has been POSTed to this view
if request.method == 'POST':
# populate form instance with post data
form = EmailSubscriptionForm(request.POST)
# The posted form data is valid, try to subscribe the email to Mailchimp
if form.is_valid():
try:
form.signup_email(request)
# cannot connect to MailChimp
except (HTTPError, ConnectionError) as error:
message = "MailChimp Error: {}."
messages.error(request, message.format(error))
# missing MAILCHIMP_LIST_ID or API Key
except ImproperlyConfigured as error:
messages.error(request, error)
# redirect home if there was no exception
else:
return redirect('home')
# if it was any other request, just display the empty form
else:
form = EmailSubscriptionForm()
return render(request, template, {'form': form})
|
py | 7df72fd6b37f9d329ee39e2082e0fff9fe47eb2b | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2013 Bartosz Janda
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import UIView
class UIControlSyntheticProvider(UIView.UIViewSyntheticProvider):
"""
Class representing UIControl.
"""
def __init__(self, value_obj, internal_dict):
super(UIControlSyntheticProvider, self).__init__(value_obj, internal_dict)
self.type_name = "UIControl"
|
py | 7df73089386543a5e3a0a8b7bcc5df32177ac323 | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example use of the CFR algorithm on Kuhn Poker."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from open_spiel.python.algorithms import cfr
from open_spiel.python.algorithms import exploitability
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_integer("iterations", 100, "Number of iterations")
flags.DEFINE_string("game", "kuhn_poker", "Name of the game")
flags.DEFINE_integer("players", 2, "Number of players")
flags.DEFINE_integer("print_freq", 10, "How often to print the exploitability")
def main(_):
#game = pyspiel.load_game(FLAGS.game, {"players": FLAGS.players})
game = pyspiel.load_game(FLAGS.game)
cfr_solver = cfr.CFRSolver(game)
for i in range(FLAGS.iterations):
cfr_solver.evaluate_and_update_policy()
if i % FLAGS.print_freq == 0:
conv = exploitability.exploitability(game, cfr_solver.average_policy())
print("Iteration {} exploitability {}".format(i, conv))
if __name__ == "__main__":
app.run(main)
|
py | 7df7318d6fb9e5a00906b504fc6e677b6334a63c | """Create synthetic datasets for surrogate training."""
# %%
from os import mkdir, getcwd
import numpy as np
import pandas as pd
from optproblems import wfg, dtlz, zdt
from pyDOE import lhs
from tqdm import tqdm
mean = 0.5
std_dev = 0.1
noise_mean = 0
noise_std = 0.05
num_obj = 2
num_var_zdt = {"ZDT1": 30, "ZDT2": 30, "ZDT3": 30, "ZDT4": 10, "ZDT6": 10}
problems = {
"WFG1": wfg.WFG1,
"WFG2": wfg.WFG2,
"WFG3": wfg.WFG3,
"WFG4": wfg.WFG4,
"WFG5": wfg.WFG5,
"WFG6": wfg.WFG6,
"WFG7": wfg.WFG7,
"WFG8": wfg.WFG8,
"WFG9": wfg.WFG9,
"ZDT1": zdt.ZDT1,
"ZDT2": zdt.ZDT2,
"ZDT3": zdt.ZDT3,
"ZDT4": zdt.ZDT4,
"ZDT6": zdt.ZDT6,
"DTLZ1": dtlz.DTLZ1,
"DTLZ2": dtlz.DTLZ2,
"DTLZ3": dtlz.DTLZ3,
"DTLZ4": dtlz.DTLZ4,
"DTLZ5": dtlz.DTLZ5,
"DTLZ6": dtlz.DTLZ6,
"DTLZ7": dtlz.DTLZ7,
}
def generatedata(
*,
problemname: str,
num_var: int,
num_samples: int,
distribution: str,
noise: bool,
missing_data: bool,
save_folder: str,
):
"""Generate random dataset from known benchmark problems or engineering problems.
Parameters
----------
problemname : str
Name of the problem
num_var : int
number of decision variables
num_samples : int
number of samples
distribution : str
Normal or uniform distribution
noise : bool
Presence or absence of noise in data
missing_data : bool
Presence or absence of missing chunks of data
save_folder : str
Path to the save folder
"""
if "DTLZ" in problemname:
generateDTLZ(
problemname,
num_var,
num_samples,
distribution,
noise,
missing_data,
save_folder,
)
return
elif "WFG" in problemname:
generateWFG(
problemname,
num_var,
num_samples,
distribution,
noise,
missing_data,
save_folder,
)
return
elif "ZDT" in problemname:
generateZDT(
problemname,
num_var,
num_samples,
distribution,
noise,
missing_data,
save_folder,
)
return
else:
print("Error with Problem name")
return
def generate_var_0_1(
problemname: str,
num_var: int,
num_samples: int,
distribution: str,
noise: bool,
missing_data: bool,
save_folder: str,
):
filename = (
save_folder
+ "/"
+ problemname
+ "_"
+ str(num_var)
+ "_"
+ str(num_samples)
+ "_"
+ distribution
)
if distribution == "uniform":
var = lhs(num_var, num_samples)
elif distribution == "normal":
means = [mean] * num_var
cov = np.eye(num_var) * np.square(std_dev)
var = np.random.multivariate_normal(means, cov, num_samples)
if noise:
noise_means = [noise_mean] * num_var
noise_cov = np.eye(num_var) * np.square(noise_std)
noise_var = np.random.multivariate_normal(noise_means, noise_cov, num_samples)
filename = filename + "_noisy"
var = var + noise_var
# To keep values between 0 and 1
var[var > 1] = 1
var[var < 0] = 0
return (var, filename)
def generateDTLZ(
problemname: str,
num_var: int,
num_samples: int,
distribution: str,
noise: bool,
missing_data: bool,
save_folder: str,
):
"""Generate and save DTLZ datasets as csv.
Parameters
----------
problemname : str
Name of the problem
num_var : int
number of variables
num_samples : int
Number of samples
distribution : str
Uniform or normal distribution
noise : bool
Presence or absence of noise
missing_data : bool
Presence or absence of missing data
save_folder : str
Path to the folder to save csv files
"""
objective = problems[problemname](num_obj, num_var)
var_names = ["x{0}".format(x) for x in range(num_var)]
obj_names = ["f1", "f2"]
if distribution in ["uniform", "normal"]:
var, filename = generate_var_0_1(
problemname,
num_var,
num_samples,
distribution,
noise,
missing_data,
save_folder,
)
elif distribution == "optimal":
x_first_m_1 = np.random.random((num_samples, num_obj - 1))
if problemname == "DTLZ6" or problemname == "DTLZ7":
x_last_k = np.zeros((num_samples, num_var - num_obj + 1))
else:
x_last_k = np.zeros((num_samples, num_var - num_obj + 1)) + 0.5
var = np.hstack((x_first_m_1, x_last_k))
filename = (
save_folder
+ "/"
+ problemname
+ "_"
+ str(num_var)
+ "_"
+ str(num_samples)
+ "_"
+ distribution
)
obj = [objective(x) for x in var]
data = np.hstack((var, obj))
data = pd.DataFrame(data, columns=var_names + obj_names)
filename = filename + ".csv"
data.to_csv(filename, index=False)
return
def generateWFG(
problemname: str,
num_var: int,
num_samples: int,
distribution: str,
noise: bool,
missing_data: bool,
save_folder: str,
):
"""Generate and save WFG datasets as csv.
Parameters
----------
problemname : str
Name of the problem
num_var : int
number of variables
num_samples : int
Number of samples
distribution : str
Uniform or normal distribution
noise : bool
Presence or absence of noise
missing_data : bool
Presence or absence of missing data
save_folder : str
Path to the folder to save csv files
"""
objective = problems[problemname](num_obj, num_var, k=4)
var_names = ["x{0}".format(x) for x in range(num_var)]
obj_names = ["f1", "f2"]
if distribution in ["uniform", "normal"]:
var, filename = generate_var_0_1(
problemname,
num_var,
num_samples,
distribution,
noise,
missing_data,
save_folder,
)
elif distribution == "optimal":
solns = objective.get_optimal_solutions(max_number=num_samples)
var = np.asarray([soln.phenome for soln in solns])
filename = (
save_folder
+ "/"
+ problemname
+ "_"
+ str(num_var)
+ "_"
+ str(num_samples)
+ "_"
+ distribution
)
obj = [objective(x) for x in var]
data = np.hstack((var, obj))
data = pd.DataFrame(data, columns=var_names + obj_names)
filename = filename + ".csv"
data.to_csv(filename, index=False)
return
def generateZDT(
problemname: str,
num_var: int,
num_samples: int,
distribution: str,
noise: bool,
missing_data: bool,
save_folder: str,
):
"""Generate and save ZDT datasets as csv.
Parameters
----------
problemname : str
Name of the problem
num_var : int
number of variables
num_samples : int
Number of samples
distribution : str
Uniform or normal distribution
noise : bool
Presence or absence of noise
missing_data : bool
Presence or absence of missing data
save_folder : str
Path to the folder to save csv files
"""
objective = problems[problemname]()
num_var = num_var_zdt[problemname]
var_names = ["x{0}".format(x) for x in range(num_var)]
obj_names = ["f1", "f2"]
if distribution in ["uniform", "normal"]:
var, filename = generate_var_0_1(
problemname,
num_var,
num_samples,
distribution,
noise,
missing_data,
save_folder,
)
elif distribution == "optimal":
var = np.zeros((num_samples, num_var - 1))
var_x1 = np.linspace(0, 1, num_samples).reshape(-1, 1)
var = np.hstack((var_x1, var))
filename = (
save_folder
+ "/"
+ problemname
+ "_"
+ str(num_var)
+ "_"
+ str(num_samples)
+ "_"
+ distribution
)
obj = [objective(x) for x in var]
data = np.hstack((var, obj))
data = pd.DataFrame(data, columns=var_names + obj_names)
filename = filename + ".csv"
data.to_csv(filename, index=False)
return
def create_datasets(
folder: str = None,
num_vars: list = None,
num_samples: list = None,
distribution: list = None,
missing_data: bool = False,
noise: bool = False,
):
"""Automatically create datasets and save them as csv files in ./datasets_benchmark
"""
if folder is None:
folder = "datasets_benchmark_train"
mkdir(folder)
folder = getcwd() + "/" + folder
if num_vars is None:
num_vars = [6, 10, 16, 20, 30]
if num_samples is None:
num_samples = [
100,
150,
200,
250,
300,
350,
400,
500,
600,
700,
800,
900,
1000,
1200,
1500,
2000,
]
if distribution is None:
distribution = ["uniform", "normal"]
for problem in tqdm(problems):
for num_var in num_vars:
for samples in num_samples:
for dist in distribution:
generatedata(
problemname=problem,
num_var=num_var,
num_samples=samples,
distribution=dist,
noise=noise,
missing_data=missing_data,
save_folder=folder,
)
if __name__ == "__main__":
# Training files
# create_datasets()
# Testing files
create_datasets(
folder="datasets_benchmark_test", num_samples=[10000], distribution=["uniform"]
)
|
py | 7df731a04731cea99944b11364c2e81b5e04b73d | import os
from aioweb.conf import settings
from aioweb.util import package_path
def setup(router):
router.root('site#index')
router.get('site#test')
router.static('/static/', [
os.path.join(package_path('aioweb'), 'assets'),
os.path.join(settings.BASE_DIR, 'app/assets'),
])
|
py | 7df731e5579584900689bf82b3d7fd9981dd8003 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=R0913, C0302
"""
This module contains various GCP Cloud DLP operators
which allow you to perform basic operations using
Cloud DLP.
"""
from typing import Dict, Optional, Sequence, Tuple, Union
from google.api_core.exceptions import AlreadyExists, NotFound
from google.api_core.retry import Retry
from google.cloud.dlp_v2.types import (
ByteContentItem, ContentItem, DeidentifyConfig, DeidentifyTemplate, FieldMask, InspectConfig,
InspectJobConfig, InspectTemplate, JobTrigger, RedactImageRequest, RiskAnalysisJobConfig,
StoredInfoTypeConfig,
)
from google.protobuf.json_format import MessageToDict
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.dlp import CloudDLPHook
from airflow.utils.decorators import apply_defaults
class CloudDLPCancelDLPJobOperator(BaseOperator):
"""
Starts asynchronous cancellation on a long-running DlpJob.
:param dlp_job_id: ID of the DLP job resource to be cancelled.
:type dlp_job_id: str
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. If set to None or missing, the default project_id
from the GCP connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
"""
template_fields = ("dlp_job_id", "project_id", "gcp_conn_id")
@apply_defaults
def __init__(
self, *,
dlp_job_id: str,
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.dlp_job_id = dlp_job_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
hook.cancel_dlp_job(
dlp_job_id=self.dlp_job_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class CloudDLPCreateDeidentifyTemplateOperator(BaseOperator):
"""
Creates a DeidentifyTemplate for re-using frequently used configuration for
de-identifying content, images, and storage.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:type organization_id: str
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:type project_id: str
:param deidentify_template: (Optional) The DeidentifyTemplate to create.
:type deidentify_template: dict or google.cloud.dlp_v2.types.DeidentifyTemplate
:param template_id: (Optional) The template ID.
:type template_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: google.cloud.dlp_v2.types.DeidentifyTemplate
"""
template_fields = (
"organization_id",
"project_id",
"deidentify_template",
"template_id",
"gcp_conn_id",
)
@apply_defaults
def __init__(
self, *,
organization_id: Optional[str] = None,
project_id: Optional[str] = None,
deidentify_template: Optional[Union[Dict, DeidentifyTemplate]] = None,
template_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.organization_id = organization_id
self.project_id = project_id
self.deidentify_template = deidentify_template
self.template_id = template_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
try:
template = hook.create_deidentify_template(
organization_id=self.organization_id,
project_id=self.project_id,
deidentify_template=self.deidentify_template,
template_id=self.template_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
template = hook.get_deidentify_template(
organization_id=self.organization_id,
project_id=self.project_id,
template_id=self.template_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return MessageToDict(template)
class CloudDLPCreateDLPJobOperator(BaseOperator):
"""
Creates a new job to inspect storage or calculate risk metrics.
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the GCP connection is used.
:type project_id: str
:param inspect_job: (Optional) The configuration for the inspect job.
:type inspect_job: dict or google.cloud.dlp_v2.types.InspectJobConfig
:param risk_job: (Optional) The configuration for the risk job.
:type risk_job: dict or google.cloud.dlp_v2.types.RiskAnalysisJobConfig
:param job_id: (Optional) The job ID.
:type job_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param wait_until_finished: (Optional) If true, it will keep polling the job state
until it is set to DONE.
:type wait_until_finished: bool
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: google.cloud.dlp_v2.types.DlpJob
"""
template_fields = ("project_id", "inspect_job", "risk_job", "job_id", "gcp_conn_id")
@apply_defaults
def __init__(
self, *,
project_id: Optional[str] = None,
inspect_job: Optional[Union[Dict, InspectJobConfig]] = None,
risk_job: Optional[Union[Dict, RiskAnalysisJobConfig]] = None,
job_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
wait_until_finished: bool = True,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.inspect_job = inspect_job
self.risk_job = risk_job
self.job_id = job_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.wait_until_finished = wait_until_finished
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
try:
job = hook.create_dlp_job(
project_id=self.project_id,
inspect_job=self.inspect_job,
risk_job=self.risk_job,
job_id=self.job_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
wait_until_finished=self.wait_until_finished,
)
except AlreadyExists:
job = hook.get_dlp_job(
project_id=self.project_id,
dlp_job_id=self.job_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return MessageToDict(job)
class CloudDLPCreateInspectTemplateOperator(BaseOperator):
"""
Creates an InspectTemplate for re-using frequently used configuration for
inspecting content, images, and storage.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:type organization_id: str
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:type project_id: str
:param inspect_template: (Optional) The InspectTemplate to create.
:type inspect_template: dict or google.cloud.dlp_v2.types.InspectTemplate
:param template_id: (Optional) The template ID.
:type template_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: google.cloud.dlp_v2.types.InspectTemplate
"""
template_fields = (
"organization_id",
"project_id",
"inspect_template",
"template_id",
"gcp_conn_id",
)
@apply_defaults
def __init__(
self, *,
organization_id: Optional[str] = None,
project_id: Optional[str] = None,
inspect_template: Optional[InspectTemplate] = None,
template_id: Optional[Union[Dict, InspectTemplate]] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.organization_id = organization_id
self.project_id = project_id
self.inspect_template = inspect_template
self.template_id = template_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
try:
template = hook.create_inspect_template(
organization_id=self.organization_id,
project_id=self.project_id,
inspect_template=self.inspect_template,
template_id=self.template_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
template = hook.get_inspect_template(
organization_id=self.organization_id,
project_id=self.project_id,
template_id=self.template_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return MessageToDict(template)
class CloudDLPCreateJobTriggerOperator(BaseOperator):
"""
Creates a job trigger to run DLP actions such as scanning storage for sensitive
information on a set schedule.
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the GCP connection is used.
:type project_id: str
:param job_trigger: (Optional) The JobTrigger to create.
:type job_trigger: dict or google.cloud.dlp_v2.types.JobTrigger
:param trigger_id: (Optional) The JobTrigger ID.
:type trigger_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: google.cloud.dlp_v2.types.JobTrigger
"""
template_fields = ("project_id", "job_trigger", "trigger_id", "gcp_conn_id")
@apply_defaults
def __init__(
self, *,
project_id: Optional[str] = None,
job_trigger: Optional[Union[Dict, JobTrigger]] = None,
trigger_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.job_trigger = job_trigger
self.trigger_id = trigger_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
try:
trigger = hook.create_job_trigger(
project_id=self.project_id,
job_trigger=self.job_trigger,
trigger_id=self.trigger_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
trigger = hook.get_job_trigger(
project_id=self.project_id,
job_trigger_id=self.trigger_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return MessageToDict(trigger)
class CloudDLPCreateStoredInfoTypeOperator(BaseOperator):
"""
Creates a pre-built stored infoType to be used for inspection.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:type organization_id: str
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:type project_id: str
:param config: (Optional) The config for the StoredInfoType.
:type config: dict or google.cloud.dlp_v2.types.StoredInfoTypeConfig
:param stored_info_type_id: (Optional) The StoredInfoType ID.
:type stored_info_type_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: google.cloud.dlp_v2.types.StoredInfoType
"""
template_fields = (
"organization_id",
"project_id",
"config",
"stored_info_type_id",
"gcp_conn_id",
)
@apply_defaults
def __init__(
self, *,
organization_id: Optional[str] = None,
project_id: Optional[str] = None,
config: Optional[StoredInfoTypeConfig] = None,
stored_info_type_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.organization_id = organization_id
self.project_id = project_id
self.config = config
self.stored_info_type_id = stored_info_type_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
try:
info = hook.create_stored_info_type(
organization_id=self.organization_id,
project_id=self.project_id,
config=self.config,
stored_info_type_id=self.stored_info_type_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
info = hook.get_stored_info_type(
organization_id=self.organization_id,
project_id=self.project_id,
stored_info_type_id=self.stored_info_type_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return MessageToDict(info)
class CloudDLPDeidentifyContentOperator(BaseOperator):
"""
De-identifies potentially sensitive info from a ContentItem. This method has limits
on input size and output size.
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the GCP connection is used.
:type project_id: str
:param deidentify_config: (Optional) Configuration for the de-identification of the
content item. Items specified here will override the template referenced by the
deidentify_template_name argument.
:type deidentify_config: dict or google.cloud.dlp_v2.types.DeidentifyConfig
:param inspect_config: (Optional) Configuration for the inspector. Items specified
here will override the template referenced by the inspect_template_name argument.
:type inspect_config: dict or google.cloud.dlp_v2.types.InspectConfig
:param item: (Optional) The item to de-identify. Will be treated as text.
:type item: dict or google.cloud.dlp_v2.types.ContentItem
:param inspect_template_name: (Optional) Optional template to use. Any configuration
directly specified in inspect_config will override those set in the template.
:type inspect_template_name: str
:param deidentify_template_name: (Optional) Optional template to use. Any
configuration directly specified in deidentify_config will override those set
in the template.
:type deidentify_template_name: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: google.cloud.dlp_v2.types.DeidentifyContentResponse
"""
template_fields = (
"project_id",
"deidentify_config",
"inspect_config",
"item",
"inspect_template_name",
"deidentify_template_name",
"gcp_conn_id",
)
@apply_defaults
def __init__(
self, *,
project_id: Optional[str] = None,
deidentify_config: Optional[Union[Dict, DeidentifyConfig]] = None,
inspect_config: Optional[Union[Dict, InspectConfig]] = None,
item: Optional[Union[Dict, ContentItem]] = None,
inspect_template_name: Optional[str] = None,
deidentify_template_name: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.deidentify_config = deidentify_config
self.inspect_config = inspect_config
self.item = item
self.inspect_template_name = inspect_template_name
self.deidentify_template_name = deidentify_template_name
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
response = hook.deidentify_content(
project_id=self.project_id,
deidentify_config=self.deidentify_config,
inspect_config=self.inspect_config,
item=self.item,
inspect_template_name=self.inspect_template_name,
deidentify_template_name=self.deidentify_template_name,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return MessageToDict(response)
class CloudDLPDeleteDeidentifyTemplateOperator(BaseOperator):
"""
Deletes a DeidentifyTemplate.
:param template_id: The ID of deidentify template to be deleted.
:type template_id: str
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:type organization_id: str
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
"""
template_fields = ("template_id", "organization_id", "project_id", "gcp_conn_id")
@apply_defaults
def __init__(
self, *,
template_id: str,
organization_id: Optional[str] = None,
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.template_id = template_id
self.organization_id = organization_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
try:
hook.delete_deidentify_template(
template_id=self.template_id,
organization_id=self.organization_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except NotFound:
self.log.error("Template %s not found.", self.template_id)
class CloudDLPDeleteDLPJobOperator(BaseOperator):
"""
Deletes a long-running DlpJob. This method indicates that the client is no longer
interested in the DlpJob result. The job will be cancelled if possible.
:param dlp_job_id: The ID of the DLP job resource to be cancelled.
:type dlp_job_id: str
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the GCP connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
"""
template_fields = ("dlp_job_id", "project_id", "gcp_conn_id")
@apply_defaults
def __init__(
self, *,
dlp_job_id: str,
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.dlp_job_id = dlp_job_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
try:
hook.delete_dlp_job(
dlp_job_id=self.dlp_job_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except NotFound:
self.log.error("Job %s id not found.", self.dlp_job_id)
class CloudDLPDeleteInspectTemplateOperator(BaseOperator):
"""
Deletes an InspectTemplate.
:param template_id: The ID of the inspect template to be deleted.
:type template_id: str
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:type organization_id: str
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
"""
template_fields = ("template_id", "organization_id", "project_id", "gcp_conn_id")
@apply_defaults
def __init__(
self, *,
template_id: str,
organization_id: Optional[str] = None,
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.template_id = template_id
self.organization_id = organization_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
try:
hook.delete_inspect_template(
template_id=self.template_id,
organization_id=self.organization_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except NotFound:
self.log.error("Template %s not found", self.template_id)
class CloudDLPDeleteJobTriggerOperator(BaseOperator):
"""
Deletes a job trigger.
:param job_trigger_id: The ID of the DLP job trigger to be deleted.
:type job_trigger_id: str
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the GCP connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
"""
template_fields = ("job_trigger_id", "project_id", "gcp_conn_id")
@apply_defaults
def __init__(
self, *,
job_trigger_id: str,
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.job_trigger_id = job_trigger_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
try:
hook.delete_job_trigger(
job_trigger_id=self.job_trigger_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except NotFound:
self.log.error("Trigger %s not found", self.job_trigger_id)
class CloudDLPDeleteStoredInfoTypeOperator(BaseOperator):
"""
Deletes a stored infoType.
:param stored_info_type_id: The ID of the stored info type to be deleted.
:type stored_info_type_id: str
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:type organization_id: str
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
"""
template_fields = (
"stored_info_type_id",
"organization_id",
"project_id",
"gcp_conn_id",
)
@apply_defaults
def __init__(
self, *,
stored_info_type_id: str,
organization_id: Optional[str] = None,
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.stored_info_type_id = stored_info_type_id
self.organization_id = organization_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
try:
hook.delete_stored_info_type(
stored_info_type_id=self.stored_info_type_id,
organization_id=self.organization_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except NotFound:
self.log.error("Stored info %s not found", self.stored_info_type_id)
class CloudDLPGetDeidentifyTemplateOperator(BaseOperator):
"""
Gets a DeidentifyTemplate.
:param template_id: The ID of deidentify template to be read.
:type template_id: str
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:type organization_id: str
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: google.cloud.dlp_v2.types.DeidentifyTemplate
"""
template_fields = ("template_id", "organization_id", "project_id", "gcp_conn_id")
@apply_defaults
def __init__(
self, *,
template_id: str,
organization_id: Optional[str] = None,
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.template_id = template_id
self.organization_id = organization_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
template = hook.get_deidentify_template(
template_id=self.template_id,
organization_id=self.organization_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return MessageToDict(template)
class CloudDLPGetDLPJobOperator(BaseOperator):
"""
Gets the latest state of a long-running DlpJob.
:param dlp_job_id: The ID of the DLP job resource to be read.
:type dlp_job_id: str
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the GCP connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: google.cloud.dlp_v2.types.DlpJob
"""
template_fields = ("dlp_job_id", "project_id", "gcp_conn_id")
@apply_defaults
def __init__(
self, *,
dlp_job_id: str,
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.dlp_job_id = dlp_job_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
job = hook.get_dlp_job(
dlp_job_id=self.dlp_job_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return MessageToDict(job)
class CloudDLPGetInspectTemplateOperator(BaseOperator):
"""
Gets an InspectTemplate.
:param template_id: The ID of inspect template to be read.
:type template_id: str
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:type organization_id: str
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: google.cloud.dlp_v2.types.InspectTemplate
"""
template_fields = ("template_id", "organization_id", "project_id", "gcp_conn_id")
@apply_defaults
def __init__(
self, *,
template_id: str,
organization_id: Optional[str] = None,
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.template_id = template_id
self.organization_id = organization_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
template = hook.get_inspect_template(
template_id=self.template_id,
organization_id=self.organization_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return MessageToDict(template)
class CloudDLPGetDLPJobTriggerOperator(BaseOperator):
"""
Gets a job trigger.
:param job_trigger_id: The ID of the DLP job trigger to be read.
:type job_trigger_id: str
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the GCP connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: google.cloud.dlp_v2.types.JobTrigger
"""
template_fields = ("job_trigger_id", "project_id", "gcp_conn_id")
@apply_defaults
def __init__(
self, *,
job_trigger_id: str,
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.job_trigger_id = job_trigger_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
trigger = hook.get_job_trigger(
job_trigger_id=self.job_trigger_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return MessageToDict(trigger)
class CloudDLPGetStoredInfoTypeOperator(BaseOperator):
"""
Gets a stored infoType.
:param stored_info_type_id: The ID of the stored info type to be read.
:type stored_info_type_id: str
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:type organization_id: str
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: google.cloud.dlp_v2.types.StoredInfoType
"""
template_fields = (
"stored_info_type_id",
"organization_id",
"project_id",
"gcp_conn_id",
)
@apply_defaults
def __init__(
self, *,
stored_info_type_id: str,
organization_id: Optional[str] = None,
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.stored_info_type_id = stored_info_type_id
self.organization_id = organization_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
info = hook.get_stored_info_type(
stored_info_type_id=self.stored_info_type_id,
organization_id=self.organization_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return MessageToDict(info)
class CloudDLPInspectContentOperator(BaseOperator):
"""
Finds potentially sensitive info in content. This method has limits on
input size, processing time, and output size.
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the GCP connection is used.
:type project_id: str
:param inspect_config: (Optional) Configuration for the inspector. Items specified
here will override the template referenced by the inspect_template_name argument.
:type inspect_config: dict or google.cloud.dlp_v2.types.InspectConfig
:param item: (Optional) The item to de-identify. Will be treated as text.
:type item: dict or google.cloud.dlp_v2.types.ContentItem
:param inspect_template_name: (Optional) Optional template to use. Any configuration
directly specified in inspect_config will override those set in the template.
:type inspect_template_name: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: google.cloud.tasks_v2.types.InspectContentResponse
"""
template_fields = (
"project_id",
"inspect_config",
"item",
"inspect_template_name",
"gcp_conn_id",
)
@apply_defaults
def __init__(
self, *,
project_id: Optional[str] = None,
inspect_config: Optional[Union[Dict, InspectConfig]] = None,
item: Optional[Union[Dict, ContentItem]] = None,
inspect_template_name: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.inspect_config = inspect_config
self.item = item
self.inspect_template_name = inspect_template_name
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
response = hook.inspect_content(
project_id=self.project_id,
inspect_config=self.inspect_config,
item=self.item,
inspect_template_name=self.inspect_template_name,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return MessageToDict(response)
class CloudDLPListDeidentifyTemplatesOperator(BaseOperator):
"""
Lists DeidentifyTemplates.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:type organization_id: str
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:type project_id: str
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:type page_size: int
:param order_by: (Optional) Optional comma separated list of fields to order by,
followed by asc or desc postfix.
:type order_by: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: list[google.cloud.dlp_v2.types.DeidentifyTemplate]
"""
template_fields = ("organization_id", "project_id", "gcp_conn_id")
@apply_defaults
def __init__(
self, *,
organization_id: Optional[str] = None,
project_id: Optional[str] = None,
page_size: Optional[int] = None,
order_by: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.organization_id = organization_id
self.project_id = project_id
self.page_size = page_size
self.order_by = order_by
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
template = hook.list_deidentify_templates(
organization_id=self.organization_id,
project_id=self.project_id,
page_size=self.page_size,
order_by=self.order_by,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return MessageToDict(template)
class CloudDLPListDLPJobsOperator(BaseOperator):
"""
Lists DlpJobs that match the specified filter in the request.
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the GCP connection is used.
:type project_id: str
:param results_filter: (Optional) Filter used to specify a subset of results.
:type results_filter: str
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:type page_size: int
:param job_type: (Optional) The type of job.
:type job_type: str
:param order_by: (Optional) Optional comma separated list of fields to order by,
followed by asc or desc postfix.
:type order_by: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: list[google.cloud.dlp_v2.types.DlpJob]
"""
template_fields = ("project_id", "gcp_conn_id")
@apply_defaults
def __init__(
self, *,
project_id: Optional[str] = None,
results_filter: Optional[str] = None,
page_size: Optional[int] = None,
job_type: Optional[str] = None,
order_by: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.results_filter = results_filter
self.page_size = page_size
self.job_type = job_type
self.order_by = order_by
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
job = hook.list_dlp_jobs(
project_id=self.project_id,
results_filter=self.results_filter,
page_size=self.page_size,
job_type=self.job_type,
order_by=self.order_by,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return MessageToDict(job)
class CloudDLPListInfoTypesOperator(BaseOperator):
"""
Returns a list of the sensitive information types that the DLP API supports.
:param language_code: (Optional) Optional BCP-47 language code for localized infoType
friendly names. If omitted, or if localized strings are not available, en-US
strings will be returned.
:type language_code: str
:param results_filter: (Optional) Filter used to specify a subset of results.
:type results_filter: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: ListInfoTypesResponse
"""
template_fields = ("language_code", "gcp_conn_id")
@apply_defaults
def __init__(
self, *,
language_code: Optional[str] = None,
results_filter: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.language_code = language_code
self.results_filter = results_filter
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
response = hook.list_info_types(
language_code=self.language_code,
results_filter=self.results_filter,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return MessageToDict(response)
class CloudDLPListInspectTemplatesOperator(BaseOperator):
"""
Lists InspectTemplates.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:type organization_id: str
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:type project_id: str
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:type page_size: int
:param order_by: (Optional) Optional comma separated list of fields to order by,
followed by asc or desc postfix.
:type order_by: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: list[google.cloud.dlp_v2.types.InspectTemplate]
"""
template_fields = ("organization_id", "project_id", "gcp_conn_id")
@apply_defaults
def __init__(
self, *,
organization_id: Optional[str] = None,
project_id: Optional[str] = None,
page_size: Optional[int] = None,
order_by: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.organization_id = organization_id
self.project_id = project_id
self.page_size = page_size
self.order_by = order_by
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
templates = hook.list_inspect_templates(
organization_id=self.organization_id,
project_id=self.project_id,
page_size=self.page_size,
order_by=self.order_by,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return [MessageToDict(t) for t in templates]
class CloudDLPListJobTriggersOperator(BaseOperator):
"""
Lists job triggers.
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the GCP connection is used.
:type project_id: str
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:type page_size: int
:param order_by: (Optional) Optional comma separated list of fields to order by,
followed by asc or desc postfix.
:type order_by: str
:param results_filter: (Optional) Filter used to specify a subset of results.
:type results_filter: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: list[google.cloud.dlp_v2.types.JobTrigger]
"""
template_fields = ("project_id", "gcp_conn_id")
@apply_defaults
def __init__(
self, *,
project_id: Optional[str] = None,
page_size: Optional[int] = None,
order_by: Optional[str] = None,
results_filter: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.page_size = page_size
self.order_by = order_by
self.results_filter = results_filter
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
jobs = hook.list_job_triggers(
project_id=self.project_id,
page_size=self.page_size,
order_by=self.order_by,
results_filter=self.results_filter,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return [MessageToDict(j) for j in jobs]
class CloudDLPListStoredInfoTypesOperator(BaseOperator):
"""
Lists stored infoTypes.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:type organization_id: str
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:type project_id: str
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:type page_size: int
:param order_by: (Optional) Optional comma separated list of fields to order by,
followed by asc or desc postfix.
:type order_by: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: list[google.cloud.dlp_v2.types.StoredInfoType]
"""
template_fields = ("organization_id", "project_id", "gcp_conn_id")
@apply_defaults
def __init__(
self, *,
organization_id: Optional[str] = None,
project_id: Optional[str] = None,
page_size: Optional[int] = None,
order_by: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.organization_id = organization_id
self.project_id = project_id
self.page_size = page_size
self.order_by = order_by
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
infos = hook.list_stored_info_types(
organization_id=self.organization_id,
project_id=self.project_id,
page_size=self.page_size,
order_by=self.order_by,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return [MessageToDict(i) for i in infos]
class CloudDLPRedactImageOperator(BaseOperator):
"""
Redacts potentially sensitive info from an image. This method has limits on
input size, processing time, and output size.
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the GCP connection is used.
:type project_id: str
:param inspect_config: (Optional) Configuration for the inspector. Items specified
here will override the template referenced by the inspect_template_name argument.
:type inspect_config: dict or google.cloud.dlp_v2.types.InspectConfig
:param image_redaction_configs: (Optional) The configuration for specifying what
content to redact from images.
:type image_redaction_configs: list[dict] or
list[google.cloud.dlp_v2.types.RedactImageRequest.ImageRedactionConfig]
:param include_findings: (Optional) Whether the response should include findings
along with the redacted image.
:type include_findings: bool
:param byte_item: (Optional) The content must be PNG, JPEG, SVG or BMP.
:type byte_item: dict or google.cloud.dlp_v2.types.ByteContentItem
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: google.cloud.dlp_v2.types.RedactImageResponse
"""
template_fields = (
"project_id",
"inspect_config",
"image_redaction_configs",
"include_findings",
"byte_item",
"gcp_conn_id",
)
@apply_defaults
def __init__(
self, *,
project_id: Optional[str] = None,
inspect_config: Optional[Union[Dict, InspectConfig]] = None,
image_redaction_configs: Optional[Union[Dict, RedactImageRequest.ImageRedactionConfig]] = None,
include_findings: Optional[bool] = None,
byte_item: Optional[Union[Dict, ByteContentItem]] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.inspect_config = inspect_config
self.image_redaction_configs = image_redaction_configs
self.include_findings = include_findings
self.byte_item = byte_item
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
response = hook.redact_image(
project_id=self.project_id,
inspect_config=self.inspect_config,
image_redaction_configs=self.image_redaction_configs,
include_findings=self.include_findings,
byte_item=self.byte_item,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return MessageToDict(response)
class CloudDLPReidentifyContentOperator(BaseOperator):
"""
Re-identifies content that has been de-identified.
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the GCP connection is used.
:type project_id: str
:param reidentify_config: (Optional) Configuration for the re-identification of
the content item.
:type reidentify_config: dict or google.cloud.dlp_v2.types.DeidentifyConfig
:param inspect_config: (Optional) Configuration for the inspector.
:type inspect_config: dict or google.cloud.dlp_v2.types.InspectConfig
:param item: (Optional) The item to re-identify. Will be treated as text.
:type item: dict or google.cloud.dlp_v2.types.ContentItem
:param inspect_template_name: (Optional) Optional template to use. Any configuration
directly specified in inspect_config will override those set in the template.
:type inspect_template_name: str
:param reidentify_template_name: (Optional) Optional template to use. References an
instance of DeidentifyTemplate. Any configuration directly specified in
reidentify_config or inspect_config will override those set in the template.
:type reidentify_template_name: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: google.cloud.dlp_v2.types.ReidentifyContentResponse
"""
template_fields = (
"project_id",
"reidentify_config",
"inspect_config",
"item",
"inspect_template_name",
"reidentify_template_name",
"gcp_conn_id",
)
@apply_defaults
def __init__(
self, *,
project_id: Optional[str] = None,
reidentify_config: Optional[Union[Dict, DeidentifyConfig]] = None,
inspect_config: Optional[Union[Dict, InspectConfig]] = None,
item: Optional[Union[Dict, ContentItem]] = None,
inspect_template_name: Optional[str] = None,
reidentify_template_name: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.reidentify_config = reidentify_config
self.inspect_config = inspect_config
self.item = item
self.inspect_template_name = inspect_template_name
self.reidentify_template_name = reidentify_template_name
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
response = hook.reidentify_content(
project_id=self.project_id,
reidentify_config=self.reidentify_config,
inspect_config=self.inspect_config,
item=self.item,
inspect_template_name=self.inspect_template_name,
reidentify_template_name=self.reidentify_template_name,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return MessageToDict(response)
class CloudDLPUpdateDeidentifyTemplateOperator(BaseOperator):
"""
Updates the DeidentifyTemplate.
:param template_id: The ID of deidentify template to be updated.
:type template_id: str
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:type organization_id: str
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:type project_id: str
:param deidentify_template: New DeidentifyTemplate value.
:type deidentify_template: dict or google.cloud.dlp_v2.types.DeidentifyTemplate
:param update_mask: Mask to control which fields get updated.
:type update_mask: dict or google.cloud.dlp_v2.types.FieldMask
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: google.cloud.dlp_v2.types.DeidentifyTemplate
"""
template_fields = (
"template_id",
"organization_id",
"project_id",
"deidentify_template",
"update_mask",
"gcp_conn_id",
)
@apply_defaults
def __init__(
self, *,
template_id: str,
organization_id: Optional[str] = None,
project_id: Optional[str] = None,
deidentify_template: Optional[Union[Dict, DeidentifyTemplate]] = None,
update_mask: Optional[Union[Dict, FieldMask]] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.template_id = template_id
self.organization_id = organization_id
self.project_id = project_id
self.deidentify_template = deidentify_template
self.update_mask = update_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
template = hook.update_deidentify_template(
template_id=self.template_id,
organization_id=self.organization_id,
project_id=self.project_id,
deidentify_template=self.deidentify_template,
update_mask=self.update_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return MessageToDict(template)
class CloudDLPUpdateInspectTemplateOperator(BaseOperator):
"""
Updates the InspectTemplate.
:param template_id: The ID of the inspect template to be updated.
:type template_id: str
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:type organization_id: str
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:type project_id: str
:param inspect_template: New InspectTemplate value.
:type inspect_template: dict or google.cloud.dlp_v2.types.InspectTemplate
:param update_mask: Mask to control which fields get updated.
:type update_mask: dict or google.cloud.dlp_v2.types.FieldMask
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: google.cloud.dlp_v2.types.InspectTemplate
"""
template_fields = (
"template_id",
"organization_id",
"project_id",
"inspect_template",
"update_mask",
"gcp_conn_id",
)
@apply_defaults
def __init__(
self, *,
template_id: str,
organization_id: Optional[str] = None,
project_id: Optional[str] = None,
inspect_template: Optional[Union[Dict, InspectTemplate]] = None,
update_mask: Optional[Union[Dict, FieldMask]] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.template_id = template_id
self.organization_id = organization_id
self.project_id = project_id
self.inspect_template = inspect_template
self.update_mask = update_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
template = hook.update_inspect_template(
template_id=self.template_id,
organization_id=self.organization_id,
project_id=self.project_id,
inspect_template=self.inspect_template,
update_mask=self.update_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return MessageToDict(template)
class CloudDLPUpdateJobTriggerOperator(BaseOperator):
"""
Updates a job trigger.
:param job_trigger_id: The ID of the DLP job trigger to be updated.
:type job_trigger_id: str
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the GCP connection is used.
:type project_id: str
:param job_trigger: New JobTrigger value.
:type job_trigger: dict or google.cloud.dlp_v2.types.JobTrigger
:param update_mask: Mask to control which fields get updated.
:type update_mask: dict or google.cloud.dlp_v2.types.FieldMask
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: google.cloud.dlp_v2.types.InspectTemplate
"""
template_fields = (
"job_trigger_id",
"project_id",
"job_trigger",
"update_mask",
"gcp_conn_id",
)
@apply_defaults
def __init__(
self, *,
job_trigger_id,
project_id: Optional[str] = None,
job_trigger: Optional[JobTrigger] = None,
update_mask: Optional[Union[Dict, FieldMask]] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.job_trigger_id = job_trigger_id
self.project_id = project_id
self.job_trigger = job_trigger
self.update_mask = update_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
trigger = hook.update_job_trigger(
job_trigger_id=self.job_trigger_id,
project_id=self.project_id,
job_trigger=self.job_trigger,
update_mask=self.update_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return MessageToDict(trigger)
class CloudDLPUpdateStoredInfoTypeOperator(BaseOperator):
"""
Updates the stored infoType by creating a new version.
:param stored_info_type_id: The ID of the stored info type to be updated.
:type stored_info_type_id: str
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:type organization_id: str
:param project_id: (Optional) Google Cloud Platform project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:type project_id: str
:param config: Updated configuration for the storedInfoType. If not provided, a new
version of the storedInfoType will be created with the existing configuration.
:type config: dict or google.cloud.dlp_v2.types.StoredInfoTypeConfig
:param update_mask: Mask to control which fields get updated.
:type update_mask: dict or google.cloud.dlp_v2.types.FieldMask
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:rtype: google.cloud.dlp_v2.types.StoredInfoType
"""
template_fields = (
"stored_info_type_id",
"organization_id",
"project_id",
"config",
"update_mask",
"gcp_conn_id",
)
@apply_defaults
def __init__(
self, *,
stored_info_type_id,
organization_id: Optional[str] = None,
project_id: Optional[str] = None,
config: Optional[Union[Dict, StoredInfoTypeConfig]] = None,
update_mask: Optional[Union[Dict, FieldMask]] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id: str = "google_cloud_default",
**kwargs
) -> None:
super().__init__(**kwargs)
self.stored_info_type_id = stored_info_type_id
self.organization_id = organization_id
self.project_id = project_id
self.config = config
self.update_mask = update_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
def execute(self, context):
hook = CloudDLPHook(gcp_conn_id=self.gcp_conn_id)
info = hook.update_stored_info_type(
stored_info_type_id=self.stored_info_type_id,
organization_id=self.organization_id,
project_id=self.project_id,
config=self.config,
update_mask=self.update_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return MessageToDict(info)
|
py | 7df7325dc066bd5034e5e2abde290fb04cef7905 | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import json
import pytest
import yaml
from assertpy import assert_that
from marshmallow.validate import ValidationError
from pcluster.constants import MAX_NUMBER_OF_COMPUTE_RESOURCES, MAX_NUMBER_OF_QUEUES
from pcluster.schemas.cluster_schema import (
ClusterSchema,
HeadNodeIamSchema,
ImageSchema,
QueueIamSchema,
SchedulingSchema,
SharedStorageSchema,
)
from tests.pcluster.aws.dummy_aws_api import mock_aws_api
from tests.pcluster.utils import load_cluster_model_from_yaml
def _check_cluster_schema(config_file_name):
# Load cluster model from Yaml file
input_yaml, cluster = load_cluster_model_from_yaml(config_file_name)
# Re-create Yaml file from model and compare content
cluster_schema = ClusterSchema(cluster_name="clustername")
cluster_schema.context = {"delete_defaults_when_dump": True}
output_json = cluster_schema.dump(cluster)
assert_that(json.dumps(input_yaml, sort_keys=True)).is_equal_to(json.dumps(output_json, sort_keys=True))
# Print output yaml
output_yaml = yaml.dump(output_json)
print(output_yaml)
@pytest.mark.parametrize("config_file_name", ["slurm.required.yaml", "slurm.full.yaml"])
def test_cluster_schema_slurm(mocker, test_datadir, config_file_name):
mock_aws_api(mocker)
_check_cluster_schema(config_file_name)
@pytest.mark.parametrize("config_file_name", ["awsbatch.simple.yaml", "awsbatch.full.yaml"])
def test_cluster_schema_awsbatch(test_datadir, config_file_name):
_check_cluster_schema(config_file_name)
@pytest.mark.parametrize(
"os, custom_ami, failure_message",
[
(None, None, "Missing data for required field"),
("ubuntu1804", "ami-12345678", None),
("centos7", None, None),
],
)
def test_image_schema(os, custom_ami, failure_message):
image_schema = {}
if os:
image_schema["Os"] = os
if custom_ami:
image_schema["CustomAmi"] = custom_ami
if failure_message:
with pytest.raises(ValidationError, match=failure_message):
ImageSchema().load(image_schema)
else:
image = ImageSchema().load(image_schema)
assert_that(image.os).is_equal_to(os)
assert_that(image.custom_ami).is_equal_to(custom_ami)
@pytest.mark.parametrize(
"instance_role, instance_profile, additional_iam_policies, s3_access, failure_message",
[
(None, None, "arn:aws:iam::aws:policy/AdministratorAccess", True, False),
(
"arn:aws:iam::aws:role/CustomHeadNodeRole",
None,
"arn:aws:iam::aws:policy/AdministratorAccess",
False,
"InstanceProfile, InstanceRole or AdditionalIamPolicies can not be configured together.",
),
(
"arn:aws:iam::aws:role/CustomHeadNodeRole",
None,
None,
True,
"S3Access can not be configured when InstanceRole is set.",
),
(
None,
"arn:aws:iam::aws:instance-profile/CustomNodeInstanceProfile",
None,
True,
"S3Access can not be configured when InstanceProfile is set.",
),
(
"arn:aws:iam::aws:role/CustomHeadNodeRole",
"arn:aws:iam::aws:instance-profile/CustomNodeInstanceProfile",
None,
False,
"InstanceProfile, InstanceRole or AdditionalIamPolicies can not be configured together.",
),
(None, "arn:aws:iam::aws:instance-profile/CustomNodeInstanceProfile", None, False, False),
("arn:aws:iam::aws:role/CustomHeadNodeRole", None, None, False, False),
],
)
def test_iam_schema(instance_role, instance_profile, additional_iam_policies, s3_access, failure_message):
iam_dict = dict()
if instance_role:
iam_dict["InstanceRole"] = instance_role
if instance_profile:
iam_dict["InstanceProfile"] = instance_profile
if additional_iam_policies:
iam_dict["AdditionalIamPolicies"] = [{"Policy": additional_iam_policies}]
if s3_access:
iam_dict["S3Access"] = [{"BucketName": "dummy-bucket-name"}]
if failure_message:
with pytest.raises(
ValidationError,
match=failure_message,
):
HeadNodeIamSchema().load(iam_dict)
with pytest.raises(
ValidationError,
match=failure_message,
):
QueueIamSchema().load(iam_dict)
else:
iam = HeadNodeIamSchema().load(iam_dict)
assert_that(iam.instance_role).is_equal_to(instance_role)
assert_that(iam.instance_profile).is_equal_to(instance_profile)
iam = QueueIamSchema().load(iam_dict)
assert_that(iam.instance_role).is_equal_to(instance_role)
assert_that(iam.instance_profile).is_equal_to(instance_profile)
DUMMY_AWSBATCH_QUEUE = {
"Name": "queue1",
"Networking": {"SubnetIds": ["subnet-12345678"]},
"ComputeResources": [{"Name": "compute_resource1", "InstanceTypes": ["c5.xlarge"]}],
}
def dummy_slurm_queue(name="queue1", number_of_compute_resource=1):
slurm_queue = {
"Name": name,
"Networking": {"SubnetIds": ["subnet-12345678"]},
"ComputeResources": [],
}
for index in range(number_of_compute_resource):
slurm_queue["ComputeResources"].append(
dummy_slurm_compute_resource(f"compute_resource{index}", f"c{index}.xlarge")
)
return slurm_queue
def dummpy_slurm_queue_list(queue_num):
return [dummy_slurm_queue(f"queue{index}") for index in range(queue_num)]
def dummy_slurm_compute_resource(name, instance_type):
return {"Name": name, "InstanceType": instance_type}
@pytest.mark.parametrize(
"config_dict, failure_message",
[
# failures
({"Scheduler": "awsbatch"}, "AwsBatchQueues section must be specified"),
({"Scheduler": "slurm"}, "SlurmQueues section must be specified"),
(
{"Scheduler": "slurm", "AwsBatchQueues": [DUMMY_AWSBATCH_QUEUE]},
"Queues section is not appropriate to the Scheduler",
),
(
{"Scheduler": "awsbatch", "SlurmQueues": [dummy_slurm_queue()]},
"Queues section is not appropriate to the Scheduler",
),
(
{"Scheduler": "slurm", "SlurmQueues": [dummy_slurm_queue()], "AwsBatchQueues": [DUMMY_AWSBATCH_QUEUE]},
"Queues section is not appropriate to the Scheduler",
),
(
{"Scheduler": "slurm", "SlurmSettings": {}, "AwsBatchSettings": {}},
"Multiple .*Settings sections cannot be specified in the Scheduling section",
),
# success
({"Scheduler": "slurm", "SlurmQueues": [dummy_slurm_queue()]}, None),
(
{
"Scheduler": "slurm",
"SlurmQueues": [
dummy_slurm_queue(),
{
"Name": "queue2",
"Networking": {"SubnetIds": ["subnet-12345678"]},
"ComputeResources": [
{"Name": "compute_resource3", "InstanceType": "c5.2xlarge", "MaxCount": 5},
{"Name": "compute_resource4", "InstanceType": "c4.2xlarge"},
],
},
],
},
None,
),
(
{
"Scheduler": "slurm",
"SlurmQueues": [
dummy_slurm_queue(),
{
"Name": "queue2",
"Networking": {"SubnetIds": ["subnet-00000000"]},
"ComputeResources": [
{"Name": "compute_resource3", "InstanceType": "c5.2xlarge", "MaxCount": 5},
{"Name": "compute_resource4", "InstanceType": "c4.2xlarge"},
],
},
],
},
"The SubnetIds used for all of the queues should be the same",
),
( # maximum slurm queue length
{
"Scheduler": "slurm",
"SlurmQueues": dummpy_slurm_queue_list(10),
},
None,
),
( # beyond maximum slurm queue length
{
"Scheduler": "slurm",
"SlurmQueues": dummpy_slurm_queue_list(11),
},
f"Queue.*Longer than maximum length {MAX_NUMBER_OF_QUEUES}",
),
( # maximum slurm queue length
{
"Scheduler": "slurm",
"SlurmQueues": [dummy_slurm_queue("queue1", number_of_compute_resource=5)],
},
None,
),
( # beyond maximum slurm queue length
{
"Scheduler": "slurm",
"SlurmQueues": [dummy_slurm_queue("queue1", number_of_compute_resource=6)],
},
f"ComputeResources.*Longer than maximum length {MAX_NUMBER_OF_COMPUTE_RESOURCES}",
),
],
)
def test_scheduling_schema(mocker, config_dict, failure_message):
mock_aws_api(mocker)
if failure_message:
with pytest.raises(ValidationError, match=failure_message):
SchedulingSchema().load(config_dict)
else:
SchedulingSchema().load(config_dict)
@pytest.mark.parametrize(
"config_dict, failure_message",
[
# failures
({"StorageType": "Ebs"}, "Missing data for required field."),
({"StorageType": "Ebs", "MountDir": "mount/tmp"}, "Missing data for required field."),
({"StorageType": "Ebs", "Name": "name"}, "Missing data for required field."),
({"StorageType": "Efs", "Name": "name"}, "Missing data for required field."),
(
{
"StorageType": "Ebs",
"Name": "name",
"MountDir": "mount/tmp",
"FsxLustreSettings": {"CopyTagsToBackups": True},
},
"SharedStorage > .*Settings section is not appropriate to the",
),
(
{"StorageType": "Efs", "Name": "name", "MountDir": "mount/tmp", "EbsSettings": {"Encrypted": True}},
"SharedStorage > .*Settings section is not appropriate to the",
),
(
{"StorageType": "FsxLustre", "Name": "name", "MountDir": "mount/tmp", "EfsSettings": {"Encrypted": True}},
"SharedStorage > .*Settings section is not appropriate to the",
),
(
{
"StorageType": "Efs",
"Name": "name",
"MountDir": "mount/tmp",
"EbsSettings": {"Encrypted": True},
"EfsSettings": {"Encrypted": True},
},
"Multiple .*Settings sections cannot be specified in the SharedStorage items",
),
# success
(
{
"StorageType": "FsxLustre",
"Name": "name",
"MountDir": "mount/tmp",
"FsxLustreSettings": {"CopyTagsToBackups": True},
},
None,
),
({"StorageType": "Efs", "Name": "name", "MountDir": "mount/tmp", "EfsSettings": {"Encrypted": True}}, None),
({"StorageType": "Ebs", "Name": "name", "MountDir": "mount/tmp", "EbsSettings": {"Encrypted": True}}, None),
],
)
def test_shared_storage_schema(mocker, config_dict, failure_message):
mock_aws_api(mocker)
if failure_message:
with pytest.raises(ValidationError, match=failure_message):
SharedStorageSchema().load(config_dict)
else:
SharedStorageSchema().load(config_dict)
@pytest.mark.parametrize(
"scheduler, install_intel_packages_enabled, failure_message",
[
("slurm", True, None),
("slurm", False, None),
("awsbatch", True, "use of the IntelSoftware configuration is not supported when using awsbatch"),
("awsbatch", False, None),
],
)
def test_scheduler_constraints_for_intel_packages(
mocker, test_datadir, scheduler, install_intel_packages_enabled, failure_message
):
mock_aws_api(mocker)
config_file_name = f"{scheduler}.{'enabled' if install_intel_packages_enabled else 'disabled'}.yaml"
if failure_message:
with pytest.raises(
ValidationError,
match=failure_message,
):
load_cluster_model_from_yaml(config_file_name, test_datadir)
else:
_, cluster = load_cluster_model_from_yaml(config_file_name, test_datadir)
assert_that(cluster.scheduling.scheduler).is_equal_to(scheduler)
assert_that(cluster.additional_packages.intel_software.intel_hpc_platform).is_equal_to(
install_intel_packages_enabled
)
|
py | 7df7338e99334d5f9af467adbfe3a2a4b24ae78b | # --------------------------
# UFSC - CTC - INE - INE5603
# Exercício do Navio
# --------------------------
# Classe que permite gerenciar os containers de um navio.
#
from view.menu import Menu
class PainelGerenciaContainer:
def gerencie(self, navio):
opcoes = {
0: 'Voltar',
1: 'Carregar Container',
2: 'Descarregar Container',
3: 'Verificar se Carrega Container',
4: 'Procurar Containers por Peso'
}
menu = Menu('Gerenciando Containers', opcoes)
voltar = False
while not voltar:
opcao = menu.pergunte()
if opcao == 0:
voltar = True
else:
if opcao == 1:
self._carregue(navio)
elif opcao == 2:
self._descarregue(navio)
elif opcao == 3:
self._verifique(navio)
elif opcao == 4:
self._procure_por_peso(navio)
def _carregue(self, navio):
print('** Carregar Container **')
codigo = input('Código do Container: ')
refrigerado = 'S' == input('Refrigerado? [s/n]').upper()
peso_mercadoria = int(input('Peso da mercadoria: '))
if navio.carregue(codigo, refrigerado, peso_mercadoria):
print('Carregou!')
else:
print('Não foi possível carregar.')
input('Tecle ENTER para continuar')
def _descarregue(self, navio):
print('** Descarregar Container **')
codigo = input('Código do Container a ser descarregado: ')
container = navio.descarregue(codigo)
if container is None:
print('Não há container com código {}'.format(codigo))
else:
print('Descarregado container {} cujo peso é {}'.format(codigo, container.peso()))
input('Tecle ENTER para continuar')
def _verifique(self, navio):
print('** Verificar se Carrega Container **')
codigo = input('Código do Container a ser verificado: ')
if navio.carregando(codigo):
print('Sim, está carregando este container.')
else:
print('Não, não carrega este container.')
input('Tecle ENTER para continuar')
def _procure_por_peso(self, navio):
print('** Procurar Conainers Mais Leves Que **')
peso = int(input('Qual o peso limite desejado? '))
containers = navio.containers_abaixo_de_peso(peso)
if len(containers) == 0:
print('Não há nenhum container cujo peso esteja abaixo de {}'.format(peso))
else:
print('Há {} container(s) cujo peso está abaixo de {}'.format(len(containers), peso))
for container in containers:
print(container)
input('Tecle ENTER para continuar')
|
py | 7df73434435dbe2dc01627b0ef5899eb2317d034 | # coding=utf-8
# Copyright 2019 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""TABLE BERT utility functions."""
from tapas.models.bert import modeling
from tapas.utils import attention_utils
import tensorflow.compat.v1 as tf
_AttentionMode = attention_utils.RestrictAttentionMode
def create_model(
features,
mode,
bert_config,
restrict_attention_mode=_AttentionMode.FULL,
restrict_attention_bucket_size=0,
restrict_attention_header_size=None,
restrict_attention_row_heads_ratio=0.5,
restrict_attention_sort_after_projection=True,
token_weights=None,
disabled_features=None,
disable_position_embeddings=False,
reset_position_index_per_cell=False,
proj_value_length=None,
):
"""Creates a TABLE BERT model."""
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
token_type_features = [
"segment_ids", "column_ids", "row_ids", "prev_label_ids", "column_ranks",
"inv_column_ranks", "numeric_relations"
]
token_type_ids = []
for key in token_type_features:
if disabled_features is not None and key in disabled_features:
token_type_ids.append(tf.zeros_like(features[key]))
else:
token_type_ids.append(features[key])
attention_mask = None
custom_attention_layer = None
num_row_heads = int(bert_config.num_attention_heads *
restrict_attention_row_heads_ratio)
num_column_heads = bert_config.num_attention_heads - num_row_heads
if restrict_attention_mode == _AttentionMode.HEADWISE_SAME_COLUMN_OR_ROW:
attention_mask = attention_utils.compute_headwise_sparse_attention_mask(
num_row_heads=num_row_heads,
num_column_heads=num_column_heads,
bucket_size=restrict_attention_bucket_size,
header_size=restrict_attention_header_size,
**features)
elif restrict_attention_mode == _AttentionMode.SAME_COLUMN_OR_ROW:
attention_mask = attention_utils.compute_sparse_attention_mask(**features)
elif restrict_attention_mode == _AttentionMode.HEADWISE_EFFICIENT:
custom_attention_layer = attention_utils.create_bucketed_attention_layer(
input_mask=features["input_mask"],
input_header=tf.math.equal(features["segment_ids"], 0),
bucket_size=restrict_attention_bucket_size,
header_size=restrict_attention_header_size,
sort_after_projection=restrict_attention_sort_after_projection,
token_type_ids=[(num_row_heads, True, features["row_ids"]),
(num_column_heads, False, features["column_ids"])])
elif restrict_attention_mode == _AttentionMode.FULL:
pass
else:
raise ValueError(f"Unknown attention mode: {restrict_attention_mode}")
return modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=features["input_ids"],
input_mask=features["input_mask"],
attention_mask=attention_mask,
custom_attention_layer=custom_attention_layer,
token_weights=token_weights,
token_type_ids=token_type_ids,
use_position_embeddings=not disable_position_embeddings,
reset_position_index_per_cell=reset_position_index_per_cell,
proj_value_length=proj_value_length,
)
|
py | 7df73447566991a673cb31f09fcd3248cef954fd | # labplus mPython library
# MIT license; Copyright (c) 2018 labplus
# V1.0 Zhang KaiHua([email protected])
# mpython buildin periphers drivers
# history:
# V1.1 add oled draw function,add buzz.freq(). by tangliufeng
# V1.2 add servo/ui class,by tangliufeng
from machine import I2C, PWM, Pin, ADC, TouchPad
from ssd1106 import SSD1106_I2C
import esp, math, time, network
import ustruct, array
from neopixel import NeoPixel
# from esp import dht_readinto
from time import sleep_ms, sleep_us, sleep
import framebuf
import calibrate_img
from micropython import schedule,const
i2c = I2C(0, scl=Pin(Pin.P19), sda=Pin(Pin.P20), freq=400000)
class Font(object):
def __init__(self, font_address=0x400000):
self.font_address = font_address
buffer = bytearray(18)
esp.flash_read(self.font_address, buffer)
self.header, \
self.height, \
self.width, \
self.baseline, \
self.x_height, \
self.Y_height, \
self.first_char,\
self.last_char = ustruct.unpack('4sHHHHHHH', buffer)
self.first_char_info_address = self.font_address + 18
def GetCharacterData(self, c):
uni = ord(c)
# if uni not in range(self.first_char, self.last_char):
# return None
if (uni < self.first_char or uni > self.last_char):
return None
char_info_address = self.first_char_info_address + \
(uni - self.first_char) * 6
buffer = bytearray(6)
esp.flash_read(char_info_address, buffer)
ptr_char_data, len = ustruct.unpack('IH', buffer)
if (ptr_char_data) == 0 or (len == 0):
return None
buffer = bytearray(len)
esp.flash_read(ptr_char_data + self.font_address, buffer)
return buffer
class TextMode():
normal = 1
rev = 2
trans = 3
xor = 4
class OLED(SSD1106_I2C):
""" 128x64 oled display """
def __init__(self):
super().__init__(128, 64, i2c)
self.f = Font()
if self.f is None:
raise Exception('font load failed')
def DispChar(self, s, x, y, mode=TextMode.normal, auto_return=False):
row = 0
str_width = 0
if self.f is None:
return
for c in s:
data = self.f.GetCharacterData(c)
if data is None:
if auto_return is True:
x = x + self.f.width
else:
x = x + self.width
continue
width, bytes_per_line = ustruct.unpack('HH', data[:4])
# print('character [%d]: width = %d, bytes_per_line = %d' % (ord(c)
# , width, bytes_per_line))
if auto_return is True:
if x > self.width - width:
str_width += self.width - x
x = 0
row += 1
y += self.f.height
if y > (self.height - self.f.height)+0:
y, row = 0, 0
for h in range(0, self.f.height):
w = 0
i = 0
while w < width:
mask = data[4 + h * bytes_per_line + i]
if (width - w) >= 8:
n = 8
else:
n = width - w
py = y + h
page = py >> 3
bit = 0x80 >> (py % 8)
for p in range(0, n):
px = x + w + p
c = 0
if (mask & 0x80) != 0:
if mode == TextMode.normal or \
mode == TextMode.trans:
c = 1
if mode == TextMode.rev:
c = 0
if mode == TextMode.xor:
c = self.buffer[page * (self.width if auto_return is True else 128) + px] & bit
if c != 0:
c = 0
else:
c = 1
super().pixel(px, py, c)
else:
if mode == TextMode.normal:
c = 0
super().pixel(px, py, c)
if mode == TextMode.rev:
c = 1
super().pixel(px, py, c)
mask = mask << 1
w = w + 8
i = i + 1
x = x + width + 1
str_width += width + 1
return (str_width-1,(x-1, y))
def DispChar_font(self, font, s, x, y, invert=False):
"""
custom font display.Ref by , https://github.com/peterhinch/micropython-font-to-py
:param font: use font_to_py.py script convert to `py` from `ttf` or `otf`.
"""
screen_width = self.width
screen_height = self.height
text_row = x
text_col = y
text_length = 0
if font.hmap():
font_map = framebuf.MONO_HMSB if font.reverse() else framebuf.MONO_HLSB
else:
raise ValueError('Font must be horizontally mapped.')
for c in s:
glyph, char_height, char_width = font.get_ch(c)
buf = bytearray(glyph)
if invert:
for i, v in enumerate(buf):
buf[i] = 0xFF & ~ v
fbc = framebuf.FrameBuffer(buf, char_width, char_height, font_map)
if text_row + char_width > screen_width - 1:
text_length += screen_width-text_row
text_row = 0
text_col += char_height
if text_col + char_height > screen_height + 2:
text_col = 0
super().blit(fbc, text_row, text_col)
text_row = text_row + char_width+1
text_length += char_width+1
return (text_length-1, (text_row-1, text_col))
# display
if 60 in i2c.scan():
oled = OLED()
display = oled
class Accelerometer():
"""MSA300"""
# Range and resolustion
RANGE_2G = const(0)
RANGE_4G = const(1)
RANGE_8G = const(2)
RANGE_16G = const(3)
RES_14_BIT = const(0)
RES_12_BIT = const(1)
RES_10_BIT = const(2)
# Event
TILT_LEFT = const(0)
TILT_RIGHT = const(1)
TILT_UP = const(2)
TILT_DOWN = const(3)
FACE_UP = const(4)
FACE_DOWN = const(5)
SINGLE_CLICK = const(6)
DOUBLE_CLICK = const(7)
FREEFALL = const(8)
def __init__(self):
self.addr = 38
self.i2c = i2c
self.set_resolution(Accelerometer.RES_10_BIT)
self.set_range(Accelerometer.RANGE_2G)
self._writeReg(0x12, 0x03) # polarity of y,z axis,
self._writeReg(0x11, 0) # set power mode = normal
# interrupt
self._writeReg(0x16, 0x70) # int enabled: Orient | S_TAP | D_TAP
self._writeReg(0x17, 0x08) # int enabled: Freefall
self._writeReg(0x19, 0x71) # int1 map to: Orient, S_TAP, D_TAP, Freefall
self._writeReg(0x20, 0x02) # int1 active level = 0, output = OD
self._writeReg(0x21, 0x0C) # int tempoary latched 25ms
# freefall:
# single mode: |acc_x| < Threshold && |acc_y| < Threshold && |acc_z| < Threshold, at least time > Duration
# sum mode: |acc_x| + |acc_y| + |acc_z| < Threshold, at least time > Duration
self._writeReg(0x22, 20) # Freefall Duration:(n+1)*2ms, range from 2ms to 512ms
self._writeReg(0x23, 48) # Freefall Threshold: n*7.81mg
self._writeReg(0x24, 0x01) # Freefall mode = 0-singlemode;hysteresis = n*125mg
# tap:
self._writeReg(0x2A, 0x06) # Tap duration:quit = 30ms, shock=50ms, time window for secent shock=500ms
self._writeReg(0x2B, 0x0A) # Tap threshold = 10*[62.5mg@2G | 125mg@4G | 250mg@8G | 500mg@16g]
# Orient
self._writeReg(0x2C, 0x18) # Orient hysteresis= 1*62.5mg;
# block mode = 10 z_axis blocking or slope in any axis > 0.2g;
# orient mode = 00-symetrical
self._writeReg(0x2D, 8) # Z-axis block
# int pin irq register
self.int = Pin(37, Pin.IN)
self.int.irq(trigger=Pin.IRQ_FALLING, handler=self.irq)
# event handler
self.event_tilt_up = None
self.event_tilt_down = None
self.event_tilt_left = None
self.event_tilt_right = None
self.event_face_up = None
self.event_face_down = None
self.event_single_click = None
self.event_double_click = None
self.event_freefall = None
def irq(self, arg):
reg_int = self._readReg(0x09)[0]
reg_orent = self._readReg(0x0C)[0]
# orient_int
if (reg_int & 0x40):
if ((reg_orent & 0x30) == 0x00 and self.event_tilt_left is not None):
schedule(self.event_tilt_left, self.TILT_LEFT)
if ((reg_orent & 0x30) == 0x10 and self.event_tilt_right is not None):
schedule(self.event_tilt_right, self.TILT_RIGHT)
if ((reg_orent & 0x30) == 0x20 and self.event_tilt_up is not None):
schedule(self.event_tilt_up, self.TILT_UP)
if ((reg_orent & 0x30) == 0x30 and self.event_tilt_down is not None):
schedule(self.event_tilt_down, self.TILT_DOWN)
if ((reg_orent & 0x40) == 0x00 and self.event_face_up):
schedule(self.event_face_up, self.FACE_UP)
if ((reg_orent & 0x40) == 0x40 and self.event_face_down):
schedule(self.event_face_down, self.FACE_DOWN)
# single tap
if (reg_int & 0x20):
if (self.event_single_click is not None):
schedule(self.event_single_click, self.SINGLE_CLICK)
# double tap
if (reg_int & 0x10):
if (self.event_double_click is not None):
schedule(self.event_double_click, self.DOUBLE_CLICK)
# freefall
if (reg_int & 0x01):
if (self.event_freefall is not None):
schedule(self.event_freefall, self.FREEFALL)
# print("acc sensor interrupt, because 0x%2x, orient = 0x%2x" % (reg_int, reg_orent))
def _readReg(self, reg, nbytes=1):
return self.i2c.readfrom_mem(self.addr, reg, nbytes)
def _writeReg(self, reg, value):
self.i2c.writeto_mem(self.addr, reg, value.to_bytes(1, 'little'))
def set_resolution(self, resolution):
format = self._readReg(0x0f, 1)
format = format[0] & ~0xC
format |= (resolution << 2)
self._writeReg(0x0f, format)
def set_range(self, range):
self.range = range
format = self._readReg(0x0f, 1)
format = format[0] & ~0x3
format |= range
self._writeReg(0x0f, format)
def set_offset(self, x=None, y=None, z=None):
for i in (x, y, z):
if i is not None:
if i < -1 or i > 1:
raise ValueError("out of range,only offset 1 gravity")
if x is not None:
self._writeReg(0x39, int(round(x/0.0039)))
elif y is not None:
self._writeReg(0x38, int(round(y/0.0039)))
elif z is not None:
self._writeReg(0x3A, int(round(z/0.0039)))
def get_x(self):
retry = 0
if (retry < 5):
try:
buf = self._readReg(0x02, 2)
x = ustruct.unpack('h', buf)[0]
return x / 4 / 4096 * 2**self.range
except:
retry = retry + 1
else:
raise Exception("i2c read/write error!")
def get_y(self):
retry = 0
if (retry < 5):
try:
buf = self._readReg(0x04, 2)
y = ustruct.unpack('h', buf)[0]
return y / 4 / 4096 * 2**self.range
except:
retry = retry + 1
else:
raise Exception("i2c read/write error!")
def get_z(self):
retry = 0
if (retry < 5):
try:
buf = self._readReg(0x06, 2)
z = ustruct.unpack('h', buf)[0]
return z / 4 / 4096 * 2**self.range
except:
retry = retry + 1
else:
raise Exception("i2c read/write error!")
def roll_pitch_angle(self):
x, y, z = self.get_x(), self.get_y(), -self.get_z()
# vector normalize
mag = math.sqrt(x ** 2 + y ** 2+z ** 2)
x /= mag
y /= mag
z /= mag
roll = math.degrees(-math.asin(y))
pitch = math.degrees(math.atan2(x, z))
return roll, pitch
# 3 axis accelerometer
accelerometer = Accelerometer()
class Magnetic(object):
""" MMC5983MA driver """
def __init__(self):
self.addr = 48
self.i2c = i2c
# 传量器裸数据,乘0.25后转化为mGS
self.raw_x = 0.0
self.raw_y = 0.0
self.raw_z = 0.0
# 校准后的偏移量, 基于裸数据
self.cali_offset_x = 0.0
self.cali_offset_y = 0.0
self.cali_offset_z = 0.0
# 去皮偏移量,类似电子秤去皮功能,基于裸数据。
self.peeling_x = 0.0
self.peeling_y = 0.0
self.peeling_z = 0.0
self.is_peeling = 0
self.i2c.writeto(self.addr, b'\x09\x20\xbd\x00', True)
# self.i2c.writeto(self.addr, b'\x09\x21', True)
def _set_offset(self):
self.i2c.writeto(self.addr, b'\x09\x08', True) #set
self.i2c.writeto(self.addr, b'\x09\x01', True)
while True:
self.i2c.writeto(self.addr, b'\x08', False)
buf = self.i2c.readfrom(self.addr, 1)
status = ustruct.unpack('B', buf)[0]
if(status & 0x01):
break
self.i2c.writeto(self.addr, b'\x00', False)
buf = self.i2c.readfrom(self.addr, 6)
data = ustruct.unpack('>3H', buf)
self.i2c.writeto(self.addr, b'\x09\x10', True) #reset
self.i2c.writeto(self.addr, b'\x09\x01', True)
while True:
self.i2c.writeto(self.addr, b'\x08', False)
buf = self.i2c.readfrom(self.addr, 1)
status = ustruct.unpack('B', buf)[0]
if(status & 0x01):
break
self.i2c.writeto(self.addr, b'\x00', False)
buf = self.i2c.readfrom(self.addr, 6)
data1 = ustruct.unpack('>3H', buf)
self.x_offset = (data[0] + data1[0])/2
self.y_offset = (data[1] + data1[1])/2
self.z_offset = (data[2] + data1[2])/2
# print(self.x_offset)
# print(self.y_offset)
# print(self.z_offset)
def _get_raw(self):
retry = 0
if (retry < 5):
try:
self.i2c.writeto(self.addr, b'\x09\x08', True) #set
self.i2c.writeto(self.addr, b'\x09\x01', True)
while True:
self.i2c.writeto(self.addr, b'\x08', False)
buf = self.i2c.readfrom(self.addr, 1)
status = ustruct.unpack('B', buf)[0]
if(status & 0x01):
break
self.i2c.writeto(self.addr, b'\x00', False)
buf = self.i2c.readfrom(self.addr, 6)
data = ustruct.unpack('>3H', buf)
self.i2c.writeto(self.addr, b'\x09\x10', True) #reset
self.i2c.writeto(self.addr, b'\x09\x01', True)
while True:
self.i2c.writeto(self.addr, b'\x08', False)
buf = self.i2c.readfrom(self.addr, 1)
status = ustruct.unpack('B', buf)[0]
if(status & 0x01):
break
self.i2c.writeto(self.addr, b'\x00', False)
buf = self.i2c.readfrom(self.addr, 6)
data1 = ustruct.unpack('>3H', buf)
self.raw_x = -((data[0] - data1[0])/2)
self.raw_y = -((data[1] - data1[1])/2)
self.raw_z = -((data[2] - data1[2])/2)
# print(str(self.x) + " " + str(self.y) + " " + str(self.z))
except:
retry = retry + 1
else:
raise Exception("i2c read/write error!")
def peeling(self):
self._get_raw()
self.peeling_x = self.raw_x
self.peeling_y = self.raw_y
self.peeling_z = self.raw_z
self.is_peeling = 1
def clear_peeling(self):
self.peeling_x = 0.0
self.peeling_y = 0.0
self.peeling_z = 0.0
self.is_peeling = 0
def get_x(self):
self._get_raw()
return self.raw_x * 0.25
def get_y(self):
self._get_raw()
return self.raw_y * 0.25
def get_z(self):
self._get_raw()
return self.raw_z * 0.25
def get_field_strength(self):
self._get_raw()
if self.is_peeling == 1:
return (math.sqrt((self.raw_x - self.peeling_x)*(self.raw_x - self.peeling_x) + (self.raw_y - self.peeling_y)*(self.raw_y - self.peeling_y) + (self.raw_z - self.peeling_z)*(self.raw_z - self.peeling_z)))*0.25
return (math.sqrt(self.raw_x * self.raw_x + self.raw_y * self.raw_y + self.raw_z * self.raw_z))*0.25
def calibrate(self):
oled.fill(0)
oled.DispChar("步骤1:", 0,0,1)
oled.DispChar("如图",0,26,1)
oled.DispChar("转几周",0,43,1)
oled.bitmap(64,0,calibrate_img.rotate,64,64,1)
oled.show()
self._get_raw()
min_x = max_x = self.raw_x
min_y = max_y = self.raw_y
min_z = max_z = self.raw_z
ticks_start = time.ticks_ms()
while (time.ticks_diff(time.ticks_ms(), ticks_start) < 15000) :
self._get_raw()
min_x = min(self.raw_x, min_x)
min_y = min(self.raw_y, min_y)
max_x = max(self.raw_x, max_x)
max_y = max(self.raw_y, max_y)
time.sleep_ms(100)
self.cali_offset_x = (max_x + min_x) / 2
self.cali_offset_y = (max_y + min_y) / 2
print('cali_offset_x: ' + str(self.cali_offset_x) + ' cali_offset_y: ' + str(self.cali_offset_y))
oled.fill(0)
oled.DispChar("步骤2:", 85,0,1)
oled.DispChar("如图",85,26,1)
oled.DispChar("转几周",85,43,1)
oled.bitmap(0,0,calibrate_img.rotate1,64,64,1)
oled.show()
ticks_start = time.ticks_ms()
while (time.ticks_diff(time.ticks_ms(), ticks_start) < 15000) :
self._get_raw()
min_z = min(self.raw_z, min_z)
# min_y = min(self.raw_y, min_y)
max_z = max(self.raw_z, max_z)
# max_y = max(self.raw_y, max_y)
time.sleep_ms(100)
self.cali_offset_z = (max_z + min_z) / 2
# self.cali_offset_y = (max_y + min_y) / 2
print('cali_offset_z: ' + str(self.cali_offset_z))
# print('cali_offset_y: ' + str(self.cali_offset_y))
oled.fill(0)
oled.DispChar("校准完成", 40,24,1)
oled.show()
oled.fill(0)
def get_heading(self):
self._get_raw()
# if (accelerometer):
# # use accelerometer get inclination
# x = accelerometer.get_x()
# y = accelerometer.get_y()
# z = accelerometer.get_z()
# phi = math.atan2(x, -z)
# theta = math.atan2(y, (x*math.sin(phi) - z*math.cos(phi)))
# sinPhi = math.sin(phi)
# cosPhi = math.cos(phi)
# sinTheta = math.sin(theta)
# cosTheta = math.cos(theta)
# heading = (math.atan2(x*cosTheta + y*sinTheta*sinPhi + z*sinTheta*cosPhi, z*sinPhi - y*cosPhi)) * (180 / 3.14159265) + 180
# return heading
temp_x = self.raw_x - self.cali_offset_x
temp_y = self.raw_y - self.cali_offset_y
temp_z = self.raw_z - self.cali_offset_z
heading = math.atan2(temp_y, -temp_x) * (180 / 3.14159265) + 180
return heading
def _get_temperature(self):
retry = 0
if (retry < 5):
try:
self.i2c.writeto(self.addr, b'\x09\x02', True)
while True:
self.i2c.writeto(self.addr, b'\x08', False)
buf = self.i2c.readfrom(self.addr, 1)
status = ustruct.unpack('B', buf)[0]
if(status & 0x02):
break
self.i2c.writeto(self.addr, b'\x07', False)
buf = self.i2c.readfrom(self.addr, 1)
temp = (ustruct.unpack('B', buf)[0])*0.8 -75
# print(data)
return temp
except:
retry = retry + 1
else:
raise Exception("i2c read/write error!")
def _get_id(self):
retry = 0
if (retry < 5):
try:
self.i2c.writeto(self.addr, bytearray([0x2f]), False)
buf = self.i2c.readfrom(self.addr, 1, True)
print(buf)
id = ustruct.unpack('B', buf)[0]
return id
except:
retry = retry + 1
else:
raise Exception("i2c read/write error!")
# Magnetic
if 48 in i2c.scan():
magnetic = Magnetic()
class BME280(object):
def __init__(self):
self.addr = 119
# The “ctrl_hum” register sets the humidity data acquisition options of the device
# 0x01 = [2:0]oversampling ×1
i2c.writeto(self.addr, b'\xF2\x01')
# The “ctrl_meas” register sets the pressure and temperature data acquisition options of the device.
# The register needs to be written after changing “ctrl_hum” for the changes to become effective.
# 0x27 = [7:5]Pressure oversampling ×1 | [4:2]Temperature oversampling ×4 | [1:0]Normal mode
i2c.writeto(self.addr, b'\xF4\x27')
# The “config” register sets the rate, filter and interface options of the device. Writes to the “config”
# register in normal mode may be ignored. In sleep mode writes are not ignored.
i2c.writeto(self.addr, b'\xF5\x00')
i2c.writeto(self.addr, b'\x88', False)
bytes = i2c.readfrom(self.addr, 6)
self.dig_T = ustruct.unpack('Hhh', bytes)
i2c.writeto(self.addr, b'\x8E', False)
bytes = i2c.readfrom(self.addr, 18)
self.dig_P = ustruct.unpack('Hhhhhhhhh', bytes)
i2c.writeto(self.addr, b'\xA1', False)
self.dig_H = array.array('h', [0, 0, 0, 0, 0, 0])
self.dig_H[0] = i2c.readfrom(self.addr, 1)[0]
i2c.writeto(self.addr, b'\xE1', False)
buff = i2c.readfrom(self.addr, 7)
self.dig_H[1] = ustruct.unpack('h', buff[0:2])[0]
self.dig_H[2] = buff[2]
self.dig_H[3] = (buff[3] << 4) | (buff[4] & 0x0F)
self.dig_H[4] = (buff[5] << 4) | (buff[4] >> 4 & 0x0F)
self.dig_H[5] = buff[6]
def temperature(self):
retry = 0
if (retry < 5):
try:
i2c.writeto(self.addr, b'\xFA', False)
buff = i2c.readfrom(self.addr, 3)
T = (((buff[0] << 8) | buff[1]) << 4) | (buff[2] >> 4 & 0x0F)
c1 = (T / 16384.0 - self.dig_T[0] / 1024.0) * self.dig_T[1]
c2 = ((T / 131072.0 - self.dig_T[0] / 8192.0) * (T / 131072.0 - self.dig_T[0] / 8192.0)) * self.dig_T[2]
self.tFine = c1 + c2
return self.tFine / 5120.0
except:
retry = retry + 1
else:
raise Exception("i2c read/write error!")
def pressure(self):
retry = 0
if (retry < 5):
try:
self.temperature()
i2c.writeto(self.addr, b'\xF7', False)
buff = i2c.readfrom(self.addr, 3)
P = (((buff[0] << 8) | buff[1]) << 4) | (buff[2] >> 4 & 0x0F)
c1 = self.tFine / 2.0 - 64000.0
c2 = c1 * c1 * self.dig_P[5] / 32768.0
c2 = c2 + c1 * self.dig_P[4] * 2.0
c2 = c2 / 4.0 + self.dig_P[3] * 65536.0
c1 = (self.dig_P[2] * c1 * c1 / 524288.0 + self.dig_P[1] * c1) / 524288.0
c1 = (1.0 + c1 / 32768.0) * self.dig_P[0]
if c1 == 0.0:
return 0
p = 1048576.0 - P
p = (p - c2 / 4096.0) * 6250.0 / c1
c1 = self.dig_P[8] * p * p / 2147483648.0
c2 = p * self.dig_P[7] / 32768.0
p = p + (c1 + c2 + self.dig_P[6]) / 16.0
return p
except:
retry = retry + 1
else:
raise Exception("i2c read/write error!")
def humidity(self):
retry = 0
if (retry < 5):
try:
self.temperature()
i2c.writeto(self.addr, b'\xFD', False)
buff = i2c.readfrom(self.addr, 2)
H = buff[0] << 8 | buff[1]
h = self.tFine - 76800.0
h = (H - (self.dig_H[3] * 64.0 + self.dig_H[4] / 16384.0 * h)) * \
(self.dig_H[1] / 65536.0 * (1.0 + self.dig_H[5] / 67108864.0 * h * \
(1.0 + self.dig_H[2] / 67108864.0 * h)))
h = h * (1.0 - self.dig_H[0] * h / 524288.0)
if h > 100.0:
return 100.0
elif h < 0.0:
return 0.0
else:
return h
except:
retry = retry + 1
else:
raise Exception("i2c read/write error!")
# bm280
if 119 in i2c.scan():
bme280 = BME280()
class PinMode(object):
IN = 1
OUT = 2
PWM = 3
ANALOG = 4
OUT_DRAIN = 5
pins_remap_esp32 = (33, 32, 35, 34, 39, 0, 16, 17, 26, 25, 36, 2, -1, 18, 19, 21, 5, -1, -1, 22, 23, -1, -1, 27, 14, 12,
13, 15, 4)
class MPythonPin():
def __init__(self, pin, mode=PinMode.IN, pull=None):
if mode not in [PinMode.IN, PinMode.OUT, PinMode.PWM, PinMode.ANALOG, PinMode.OUT_DRAIN]:
raise TypeError("mode must be 'IN, OUT, PWM, ANALOG,OUT_DRAIN'")
if pin == 4:
raise TypeError("P4 is used for light sensor")
if pin == 10:
raise TypeError("P10 is used for sound sensor")
try:
self.id = pins_remap_esp32[pin]
except IndexError:
raise IndexError("Out of Pin range")
if mode == PinMode.IN:
# if pin in [3]:
# raise TypeError('IN not supported on P%d' % pin)
self.Pin = Pin(self.id, Pin.IN, pull)
if mode == PinMode.OUT:
if pin in [2, 3]:
raise TypeError('OUT not supported on P%d' % pin)
self.Pin = Pin(self.id, Pin.OUT, pull)
if mode == PinMode.OUT_DRAIN:
if pin in [2, 3]:
raise TypeError('OUT_DRAIN not supported on P%d' % pin)
self.Pin = Pin(self.id, Pin.OPEN_DRAIN, pull)
if mode == PinMode.PWM:
if pin not in [0, 1, 5, 6, 7, 8, 9, 11, 13, 14, 15, 16, 19, 20, 23, 24, 25, 26, 27, 28]:
raise TypeError('PWM not supported on P%d' % pin)
self.pwm = PWM(Pin(self.id), duty=0)
if mode == PinMode.ANALOG:
if pin not in [0, 1, 2, 3, 4, 10]:
raise TypeError('ANALOG not supported on P%d' % pin)
self.adc = ADC(Pin(self.id))
self.adc.atten(ADC.ATTN_11DB)
self.mode = mode
def irq(self, handler=None, trigger=Pin.IRQ_RISING):
if not self.mode == PinMode.IN:
raise TypeError('the pin is not in IN mode')
return self.Pin.irq(handler, trigger)
def read_digital(self):
if not self.mode == PinMode.IN:
raise TypeError('the pin is not in IN mode')
return self.Pin.value()
def write_digital(self, value):
if self.mode not in [PinMode.OUT, PinMode.OUT_DRAIN]:
raise TypeError('the pin is not in OUT or OUT_DRAIN mode')
self.Pin.value(value)
def read_analog(self):
if not self.mode == PinMode.ANALOG:
raise TypeError('the pin is not in ANALOG mode')
return self.adc.read()
def write_analog(self, duty, freq=1000):
if not self.mode == PinMode.PWM:
raise TypeError('the pin is not in PWM mode')
self.pwm.freq(freq)
self.pwm.duty(duty)
'''
# to be test
class LightSensor(ADC):
def __init__(self):
super().__init__(Pin(pins_remap_esp32[4]))
# super().atten(ADC.ATTN_11DB)
def value(self):
# lux * k * Rc = N * 3.9/ 4096
# k = 0.0011mA/Lux
# lux = N * 3.9/ 4096 / Rc / k
return super().read() * 1.1 / 4095 / 6.81 / 0.011
'''
class wifi:
def __init__(self):
self.sta = network.WLAN(network.STA_IF)
self.ap = network.WLAN(network.AP_IF)
def connectWiFi(self, ssid, passwd, timeout=10):
if self.sta.isconnected():
self.sta.disconnect()
self.sta.active(True)
list = self.sta.scan()
for i, wifi_info in enumerate(list):
try:
if wifi_info[0].decode() == ssid:
self.sta.connect(ssid, passwd)
wifi_dbm = wifi_info[3]
break
except UnicodeError:
self.sta.connect(ssid, passwd)
wifi_dbm = '?'
break
if i == len(list) - 1:
raise OSError("SSID invalid / failed to scan this wifi")
start = time.time()
print("Connection WiFi", end="")
while (self.sta.ifconfig()[0] == '0.0.0.0'):
if time.ticks_diff(time.time(), start) > timeout:
print("")
raise OSError("Timeout!,check your wifi password and keep your network unblocked")
print(".", end="")
time.sleep_ms(500)
print("")
print('WiFi(%s,%sdBm) Connection Successful, Config:%s' % (ssid, str(wifi_dbm), str(self.sta.ifconfig())))
def disconnectWiFi(self):
if self.sta.isconnected():
self.sta.disconnect()
self.sta.active(False)
print('disconnect WiFi...')
def enable_APWiFi(self, essid, password=b'',channel=10):
self.ap.active(True)
if password:
authmode=4
else:
authmode=0
self.ap.config(essid=essid,password=password,authmode=authmode, channel=channel)
def disable_APWiFi(self):
self.ap.active(False)
print('disable AP WiFi...')
# 3 rgb leds
rgb = NeoPixel(Pin(17, Pin.OUT), 3, 3, 1, brightness=0.3)
rgb.write()
# light sensor
light = ADC(Pin(39))
light.atten(light.ATTN_11DB)
# sound sensor
sound = ADC(Pin(36))
sound.atten(sound.ATTN_11DB)
# buttons
class Button:
def __init__(self, pin_num, reverse=False):
self.__reverse = reverse
(self.__press_level, self.__release_level) = (0, 1) if not self.__reverse else (1, 0)
self.__pin = Pin(pin_num, Pin.IN, pull=Pin.PULL_UP)
self.__pin.irq(trigger=Pin.IRQ_FALLING | Pin.IRQ_RISING, handler=self.__irq_handler)
# self.__user_irq = None
self.event_pressed = None
self.event_released = None
self.__pressed_count = 0
self.__was_pressed = False
# print("level: pressed is {}, released is {}." .format(self.__press_level, self.__release_level))
def __irq_handler(self, pin):
irq_falling = True if pin.value() == self.__press_level else False
# debounce
time.sleep_ms(10)
if self.__pin.value() == (self.__press_level if irq_falling else self.__release_level):
# new event handler
# pressed event
if irq_falling:
if self.event_pressed is not None:
schedule(self.event_pressed, self.__pin)
# key status
self.__was_pressed = True
if (self.__pressed_count < 100):
self.__pressed_count = self.__pressed_count + 1
# release event
else:
if self.event_released is not None:
schedule(self.event_released, self.__pin)
def is_pressed(self):
if self.__pin.value() == self.__press_level:
return True
else:
return False
def was_pressed(self):
r = self.__was_pressed
self.__was_pressed = False
return r
def get_presses(self):
r = self.__pressed_count
self.__pressed_count = 0
return r
def value(self):
return self.__pin.value()
def irq(self, *args, **kws):
self.__pin.irq(*args, **kws)
class Touch:
def __init__(self, pin):
self.__touch_pad = TouchPad(pin)
self.__touch_pad.irq(self.__irq_handler)
self.event_pressed = None
self.event_released = None
self.__pressed_count = 0
self.__was_pressed = False
self.__value = 0
def __irq_handler(self, value):
# when pressed
if value == 1:
if self.event_pressed is not None:
self.event_pressed(value)
self.__was_pressed = True
self.__value = 1
if (self.__pressed_count < 100):
self.__pressed_count = self.__pressed_count + 1
# when released
else:
self.__value = 0
if self.event_released is not None:
self.event_released(value)
def config(self, threshold):
self.__touch_pad.config(threshold)
def is_pressed(self):
if self.__value:
return True
else:
return False
def was_pressed(self):
r = self.__was_pressed
self.__was_pressed = False
return r
def get_presses(self):
r = self.__pressed_count
self.__pressed_count = 0
return r
def read(self):
return self.__touch_pad.read()
# button_a = Pin(0, Pin.IN, Pin.PULL_UP)
# button_b = Pin(2, Pin.IN, Pin.PULL_UP)
button_a = Button(0)
button_b = Button(2)
# touchpad
touchpad_p = touchPad_P = Touch(Pin(27))
touchpad_y = touchPad_Y = Touch(Pin(14))
touchpad_t = touchPad_T = Touch(Pin(12))
touchpad_h = touchPad_H = Touch(Pin(13))
touchpad_o = touchPad_O = Touch(Pin(15))
touchpad_n = touchPad_N = Touch(Pin(4))
from gui import *
def numberMap(inputNum, bMin, bMax, cMin, cMax):
outputNum = 0
outputNum = ((cMax - cMin) / (bMax - bMin)) * (inputNum - bMin) + cMin
return outputNum
|
py | 7df7347b194cc6d1c4d8d2726a2271745689c067 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
class CheddarError(Exception):
def __init__(self, message=None, http_body=None, http_status=None):
super(CheddarError, self).__init__(message)
self.http_body = http_body
self.http_status = http_status
class ConfigurationError(CheddarError):
pass
class AuthenticationError(CheddarError):
pass
class APIError(CheddarError):
pass
class APIConnectionError(APIError):
pass
class MessageIntegrityError(CheddarError):
pass
|
py | 7df73490145238c63d540d903dd50c1101a5a231 | from .base import BaseAPI, CRUDAPI, check_token
__all__ = [
'ConfigAPI',
'StoreAPI',
'WebhookAPI',
]
# TODO: build account
class ConfigAPI(BaseAPI):
"""Config API."""
@check_token
def __call__(self, **kw):
r"""
Return account config.
:param \**kw: additional keyword arguments are passed to requests.
:raises: :class:`recommendpy.exceptions.RecommendAPIError`.
:return: result of response.json().
"""
return self._client.send(
'get', self.get_path(), **kw
)
@check_token
def update(
self, default_store, default_currency, default_price_list,
**kw
):
r"""
Update config.
:param default_store: default store (required).
:param default_currency: default currency (required).
:param default_price_list: default price list (required).
:param \**kw: additional keyword arguments are passed to requests.
:raises: :class:`recommendpy.exceptions.RecommendAPIError`.
:return: result of response.json().
"""
data = {
'default_store': default_store,
'default_currency': default_currency,
'default_price_list': default_price_list,
}
return self._client.send(
'put', self.get_path(), data, **kw
)
class StoreAPI(CRUDAPI):
"""Store API."""
pass
class WebhookAPI(CRUDAPI):
"""WebhookAPI API."""
def update(self, identifier, **kw):
raise NotImplementedError()
|
py | 7df736444aaded22807bfd494a9909c42159acc0 | """
UK-specific Form helpers
"""
import re
from django.forms.fields import CharField, Select
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
class UKPostcodeField(CharField):
"""
A form field that validates its input is a UK postcode.
The regular expression used is sourced from the schema for British Standard
BS7666 address types: http://www.govtalk.gov.uk/gdsc/schemas/bs7666-v2-0.xsd
The value is uppercased and a space added in the correct place, if required.
"""
default_error_messages = {
'invalid': _(u'Enter a valid postcode.'),
}
outcode_pattern = '[A-PR-UWYZ]([0-9]{1,2}|([A-HIK-Y][0-9](|[0-9]|[ABEHMNPRVWXY]))|[0-9][A-HJKSTUW])'
incode_pattern = '[0-9][ABD-HJLNP-UW-Z]{2}'
postcode_regex = re.compile(r'^(GIR 0AA|%s %s)$' % (outcode_pattern, incode_pattern))
space_regex = re.compile(r' *(%s)$' % incode_pattern)
def clean(self, value):
value = super(UKPostcodeField, self).clean(value)
if value == u'':
return value
postcode = value.upper().strip()
# Put a single space before the incode (second part).
postcode = self.space_regex.sub(r' \1', postcode)
if not self.postcode_regex.search(postcode):
raise ValidationError(self.default_error_messages['invalid'])
return postcode
class UKCountySelect(Select):
"""
A Select widget that uses a list of UK Counties/Regions as its choices.
"""
def __init__(self, attrs=None):
from uk_regions import UK_REGION_CHOICES
super(UKCountySelect, self).__init__(attrs, choices=UK_REGION_CHOICES)
class UKNationSelect(Select):
"""
A Select widget that uses a list of UK Nations as its choices.
"""
def __init__(self, attrs=None):
from uk_regions import UK_NATIONS_CHOICES
super(UKNationSelect, self).__init__(attrs, choices=UK_NATIONS_CHOICES)
|
py | 7df736a2935129faf3279c2c247ebdcc6442b026 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.solvers.python.ops import lanczos
from tensorflow.contrib.solvers.python.ops import util
def _add_test(test, test_name, fn):
test_name = "_".join(["test", test_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, fn)
class LanczosBidiagTest(tf.test.TestCase):
pass # Filled in below.
def _get_lanczos_tests(dtype_, use_static_shape_, shape_, orthogonalize_,
steps_):
def test_lanczos_bidiag(self):
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
tol = 1e-12 if dtype_ == np.float64 else 1e-5
with self.test_session() as sess:
if use_static_shape_:
a = tf.constant(a_np)
else:
a = tf.placeholder(dtype_)
operator = util.create_operator(a)
lbd = lanczos.lanczos_bidiag(
operator, steps_, orthogonalize=orthogonalize_)
# The computed factorization should satisfy the equations
# A * V = U * B
# A' * U[:, :-1] = V * B[:-1, :]'
av = tf.batch_matmul(a, lbd.v)
ub = lanczos.bidiag_matmul(lbd.u, lbd.alpha, lbd.beta, adjoint_b=False)
atu = tf.batch_matmul(a, lbd.u[:, :-1], adj_x=True)
vbt = lanczos.bidiag_matmul(lbd.v, lbd.alpha, lbd.beta, adjoint_b=True)
if use_static_shape_:
av_val, ub_val, atu_val, vbt_val = sess.run([av, ub, atu, vbt])
else:
av_val, ub_val, atu_val, vbt_val = sess.run([av, ub, atu, vbt],
feed_dict={a: a_np})
self.assertAllClose(av_val, ub_val, atol=tol, rtol=tol)
self.assertAllClose(atu_val, vbt_val, atol=tol, rtol=tol)
return [test_lanczos_bidiag]
if __name__ == "__main__":
for dtype in np.float32, np.float64:
for shape in [[4, 4], [7, 4], [5, 8]]:
for orthogonalize in True, False:
for steps in range(1, min(shape) + 1):
for use_static_shape in True, False:
arg_string = "%s_%s_%s_%s_staticshape_%s" % (
dtype.__name__, "_".join(map(str, shape)), orthogonalize, steps,
use_static_shape)
for test_fn in _get_lanczos_tests(dtype, use_static_shape, shape,
orthogonalize, steps):
name = "_".join(["Lanczos", test_fn.__name__, arg_string])
_add_test(LanczosBidiagTest, name, test_fn)
tf.test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.