input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
:vartype client_request_id: str
:param errors: The error details.
:type errors:
list[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationEligibilityResultsErrorInfo]
"""
_validation = {
'client_request_id': {'readonly': True},
}
_attribute_map = {
'client_request_id': {'key': 'clientRequestId', 'type': 'str'},
'errors': {'key': 'errors', 'type': '[ReplicationEligibilityResultsErrorInfo]'},
}
def __init__(
self,
*,
errors: Optional[List["ReplicationEligibilityResultsErrorInfo"]] = None,
**kwargs
):
super(ReplicationEligibilityResultsProperties, self).__init__(**kwargs)
self.client_request_id = None
self.errors = errors
class ReplicationGroupDetails(ConfigurationSettings):
"""Replication group details. This will be used in case of San.
All required parameters must be populated in order to send to Azure.
:param instance_type: Required. Gets the class type. Overridden in derived classes.Constant
filled by server.
:type instance_type: str
"""
_validation = {
'instance_type': {'required': True},
}
_attribute_map = {
'instance_type': {'key': 'instanceType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ReplicationGroupDetails, self).__init__(**kwargs)
self.instance_type = 'ReplicationGroupDetails' # type: str
class ReplicationProtectedItem(Resource):
"""Replication protected item.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:ivar type: Resource Type.
:vartype type: str
:param location: Resource Location.
:type location: str
:param properties: The custom data.
:type properties:
~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItemProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'ReplicationProtectedItemProperties'},
}
def __init__(
self,
*,
location: Optional[str] = None,
properties: Optional["ReplicationProtectedItemProperties"] = None,
**kwargs
):
super(ReplicationProtectedItem, self).__init__(location=location, **kwargs)
self.properties = properties
class ReplicationProtectedItemCollection(msrest.serialization.Model):
"""Replication protected item collection.
:param value: The Replication protected item details.
:type value: list[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:param next_link: The value of next link.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ReplicationProtectedItem]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["ReplicationProtectedItem"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(ReplicationProtectedItemCollection, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ReplicationProtectedItemProperties(msrest.serialization.Model):
"""Replication protected item custom data details.
:param friendly_name: The name.
:type friendly_name: str
:param protected_item_type: The type of protected item type.
:type protected_item_type: str
:param protectable_item_id: The protected item ARM Id.
:type protectable_item_id: str
:param recovery_services_provider_id: The recovery provider ARM Id.
:type recovery_services_provider_id: str
:param primary_fabric_friendly_name: The friendly name of the primary fabric.
:type primary_fabric_friendly_name: str
:param primary_fabric_provider: The fabric provider of the primary fabric.
:type primary_fabric_provider: str
:param recovery_fabric_friendly_name: The friendly name of recovery fabric.
:type recovery_fabric_friendly_name: str
:param recovery_fabric_id: The Arm Id of recovery fabric.
:type recovery_fabric_id: str
:param primary_protection_container_friendly_name: The name of primary protection container
friendly name.
:type primary_protection_container_friendly_name: str
:param recovery_protection_container_friendly_name: The name of recovery container friendly
name.
:type recovery_protection_container_friendly_name: str
:param protection_state: The protection status.
:type protection_state: str
:param protection_state_description: The protection state description.
:type protection_state_description: str
:param active_location: The Current active location of the PE.
:type active_location: str
:param test_failover_state: The Test failover state.
:type test_failover_state: str
:param test_failover_state_description: The Test failover state description.
:type test_failover_state_description: str
:param allowed_operations: The allowed operations on the Replication protected item.
:type allowed_operations: list[str]
:param replication_health: The consolidated protection health for the VM taking any issues with
SRS as well as all the replication units associated with the VM's replication group into
account. This is a string representation of the ProtectionHealth enumeration.
:type replication_health: str
:param failover_health: The consolidated failover health for the VM.
:type failover_health: str
:param health_errors: List of health errors.
:type health_errors: list[~azure.mgmt.recoveryservicessiterecovery.models.HealthError]
:param policy_id: The ID of Policy governing this PE.
:type policy_id: str
:param policy_friendly_name: The name of Policy governing this PE.
:type policy_friendly_name: str
:param last_successful_failover_time: The Last successful failover time.
:type last_successful_failover_time: ~datetime.datetime
:param last_successful_test_failover_time: The Last successful test failover time.
:type last_successful_test_failover_time: ~datetime.datetime
:param current_scenario: The current scenario.
:type current_scenario: ~azure.mgmt.recoveryservicessiterecovery.models.CurrentScenarioDetails
:param failover_recovery_point_id: The recovery point ARM Id to which the Vm was failed over.
:type failover_recovery_point_id: str
:param provider_specific_details: The Replication provider custom settings.
:type provider_specific_details:
~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProviderSpecificSettings
:param recovery_container_id: The recovery container Id.
:type recovery_container_id: str
:param event_correlation_id: The correlation Id for events associated with this protected item.
:type event_correlation_id: str
"""
_attribute_map = {
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'protected_item_type': {'key': 'protectedItemType', 'type': 'str'},
'protectable_item_id': {'key': 'protectableItemId', 'type': 'str'},
'recovery_services_provider_id': {'key': 'recoveryServicesProviderId', 'type': 'str'},
'primary_fabric_friendly_name': {'key': 'primaryFabricFriendlyName', 'type': 'str'},
'primary_fabric_provider': {'key': 'primaryFabricProvider', 'type': 'str'},
'recovery_fabric_friendly_name': {'key': 'recoveryFabricFriendlyName', 'type': 'str'},
'recovery_fabric_id': {'key': 'recoveryFabricId', 'type': 'str'},
'primary_protection_container_friendly_name': {'key': 'primaryProtectionContainerFriendlyName', 'type': 'str'},
'recovery_protection_container_friendly_name': {'key': 'recoveryProtectionContainerFriendlyName', 'type': 'str'},
'protection_state': {'key': 'protectionState', 'type': 'str'},
'protection_state_description': {'key': 'protectionStateDescription', 'type': 'str'},
'active_location': {'key': 'activeLocation', 'type': 'str'},
'test_failover_state': {'key': 'testFailoverState', 'type': 'str'},
'test_failover_state_description': {'key': 'testFailoverStateDescription', 'type': 'str'},
'allowed_operations': {'key': 'allowedOperations', 'type': '[str]'},
'replication_health': {'key': 'replicationHealth', 'type': 'str'},
'failover_health': {'key': 'failoverHealth', 'type': 'str'},
'health_errors': {'key': 'healthErrors', 'type': '[HealthError]'},
'policy_id': {'key': 'policyId', 'type': 'str'},
'policy_friendly_name': {'key': 'policyFriendlyName', 'type': 'str'},
'last_successful_failover_time': {'key': 'lastSuccessfulFailoverTime', 'type': 'iso-8601'},
'last_successful_test_failover_time': {'key': 'lastSuccessfulTestFailoverTime', 'type': 'iso-8601'},
'current_scenario': {'key': 'currentScenario', 'type': 'CurrentScenarioDetails'},
'failover_recovery_point_id': {'key': 'failoverRecoveryPointId', 'type': 'str'},
'provider_specific_details': {'key': 'providerSpecificDetails', 'type': 'ReplicationProviderSpecificSettings'},
'recovery_container_id': {'key': 'recoveryContainerId', 'type': 'str'},
'event_correlation_id': {'key': 'eventCorrelationId', 'type': 'str'},
}
def __init__(
self,
*,
friendly_name: Optional[str] = None,
protected_item_type: Optional[str] = None,
protectable_item_id: Optional[str] = None,
recovery_services_provider_id: Optional[str] = None,
primary_fabric_friendly_name: Optional[str] = None,
primary_fabric_provider: Optional[str] = None,
recovery_fabric_friendly_name: Optional[str] = None,
recovery_fabric_id: Optional[str] = None,
primary_protection_container_friendly_name: Optional[str] = None,
recovery_protection_container_friendly_name: Optional[str] = None,
protection_state: Optional[str] = None,
protection_state_description: Optional[str] = None,
active_location: Optional[str] = None,
test_failover_state: Optional[str] = None,
test_failover_state_description: Optional[str] = None,
allowed_operations: Optional[List[str]] = None,
replication_health: Optional[str] = None,
failover_health: Optional[str] = None,
health_errors: Optional[List["HealthError"]] = None,
policy_id: Optional[str] = None,
policy_friendly_name: Optional[str] = None,
last_successful_failover_time: Optional[datetime.datetime] = None,
last_successful_test_failover_time: Optional[datetime.datetime] = None,
current_scenario: Optional["CurrentScenarioDetails"] = None,
failover_recovery_point_id: Optional[str] = None,
provider_specific_details: Optional["ReplicationProviderSpecificSettings"] = None,
recovery_container_id: Optional[str] = None,
event_correlation_id: Optional[str] = None,
**kwargs
):
super(ReplicationProtectedItemProperties, self).__init__(**kwargs)
self.friendly_name = friendly_name
self.protected_item_type = protected_item_type
self.protectable_item_id = protectable_item_id
self.recovery_services_provider_id = recovery_services_provider_id
self.primary_fabric_friendly_name = primary_fabric_friendly_name
self.primary_fabric_provider = primary_fabric_provider
self.recovery_fabric_friendly_name = recovery_fabric_friendly_name
self.recovery_fabric_id = recovery_fabric_id
self.primary_protection_container_friendly_name = primary_protection_container_friendly_name
self.recovery_protection_container_friendly_name = recovery_protection_container_friendly_name
self.protection_state = protection_state
self.protection_state_description = protection_state_description
self.active_location = active_location
self.test_failover_state = test_failover_state
self.test_failover_state_description = test_failover_state_description
self.allowed_operations = allowed_operations
self.replication_health = replication_health
self.failover_health = failover_health
self.health_errors = health_errors
self.policy_id = policy_id
self.policy_friendly_name = policy_friendly_name
self.last_successful_failover_time = last_successful_failover_time
self.last_successful_test_failover_time = last_successful_test_failover_time
self.current_scenario = current_scenario
self.failover_recovery_point_id = failover_recovery_point_id
self.provider_specific_details = provider_specific_details
self.recovery_container_id = recovery_container_id
self.event_correlation_id = event_correlation_id
class ReplicationProtectionIntent(Resource):
"""Replication protection intent.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:ivar type: Resource Type.
:vartype type: str
:param location: Resource Location.
:type location: str
:param properties: The custom data.
:type properties:
~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectionIntentProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'ReplicationProtectionIntentProperties'},
}
def __init__(
self,
*,
location: Optional[str] = None,
properties: Optional["ReplicationProtectionIntentProperties"] = None,
**kwargs
):
super(ReplicationProtectionIntent, self).__init__(location=location, **kwargs)
self.properties = properties
class ReplicationProtectionIntentCollection(msrest.serialization.Model):
"""Replication protection intent objects collection.
:param value: The Replication protection intent details.
:type value: list[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectionIntent]
:param next_link: The value of next link.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ReplicationProtectionIntent]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["ReplicationProtectionIntent"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(ReplicationProtectionIntentCollection, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ReplicationProtectionIntentProperties(msrest.serialization.Model):
"""Replication protection intent custom data details.
Variables are only populated by the server, and will be ignored when sending a request.
:param friendly_name: The name.
:type friendly_name: str
:ivar job_id: The job Id.
:vartype job_id: str
:ivar job_state: The job state.
:vartype job_state: str
:ivar is_active: A value indicating whether the intent object is active.
:vartype is_active: bool
:ivar creation_time_utc: The creation time in UTC.
:vartype creation_time_utc: str
:param provider_specific_details: The Replication provider custom settings.
:type provider_specific_details:
~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectionIntentProviderSpecificSettings
"""
_validation = {
'job_id': {'readonly': True},
'job_state': {'readonly': True},
'is_active': {'readonly': True},
'creation_time_utc': {'readonly': True},
}
_attribute_map = {
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'job_id': | |
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 30 20:39:24 2018
@author: <NAME>
"""
import pandas as pd
import time
import datetime as datetime
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
import numpy as np
begin = time.time()
# Create list of matrics for further use
metrics = ['PF','Margin','FGM','FGA',
'FGM3','FGA3','FGM2','FGA2','Ast','OR','DR','TR',
'FTA','FTM','Blk','Foul']
###############################################################################
###############################################################################
# Define opponentadjust UDF (for ease of opponent-adjusting metrics)
###############################################################################
###############################################################################
def opponentadjust(OAmetric):
global irsg, iteams
# Figure out the prefix, core metric, for use later
if OAmetric[:2] == 'Tm':
prefix = OAmetric[:2]
otherprefix = 'Opp'
coremetric = OAmetric[2:]
if OAmetric[:3] == 'Opp':
prefix = OAmetric[:3]
otherprefix = 'Tm'
coremetric = OAmetric[3:]
# print (coremetric + prefix)
# From iteams put average PF into opponent side of irsg
# Example, Opp_AvgPF_Against, Opp_AvgPA_Against
# If I am OAing TmPFper40 (my offense proficiency), I want to get OppPFper40 for my opponent
# So, for a TmName, get their OppPFper40, aka their PAper40
tempiteams = iteams[['TmName',otherprefix+coremetric]]
# Rename my opponent's metric to say it's *their* average <insert metric>
# Rename to OppAvg_OppScoreper40 (it's my opponent's average opponents (me) score per 40)
tempiteams = tempiteams.rename(columns = {otherprefix+coremetric:'OppAvg_'+otherprefix+coremetric})
# Merge in this info into irsg, for the opponent in irsg
irsg = pd.merge(irsg,tempiteams,left_on='OppName',right_on='TmName',how='left',suffixes=('','_y'))
del irsg['TmName_y']
# In irsg, determine for that game how the Tm did vs Opp_Avg's
# Example, GameOppAdj_TmPFper40 = TmPFper40 - OppAvg_OppPFper40
irsg['GameOppAdj_'+OAmetric] = irsg[OAmetric] - irsg['OppAvg_'+otherprefix+coremetric]
# switch it for when you start with an opponent
if prefix == 'Opp':
irsg['GameOppAdj_'+OAmetric] = irsg['GameOppAdj_'+OAmetric] * -1
# In iteamstemp, sum the opponent-adjusted metric and get a new average
# Example, sum(GameOppAdj_TmPFper40) gets you the TOTAL OA_PFper40
iteamstemp = irsg.groupby(['TmName'])['GameOppAdj_'+OAmetric].sum().reset_index()
# bring that value back into iteams
iteams = pd.merge(iteams,iteamstemp,left_on='TmName',right_on='TmName',how='left',suffixes=('','_y'))
iteams = iteams.rename(columns = {'GameOppAdj_'+OAmetric:'OA_'+OAmetric})
iteams['OA_'+OAmetric] = iteams['OA_'+OAmetric] / iteams['GameMins'] * 40
# del iteams['TmName_y']
###############################################################################
###############################################################################
# Read in raw game data, clean up, manipulate for further analysis
###############################################################################
###############################################################################
def creatersg():
# Set start time
rsgbegin = time.time()
global metrics
# Read in regular season games (rsg), seasons, teams
rsg = pd.read_csv(filepath_or_buffer = '/Users/Ryan/Desktop/DataFiles/PrelimData2018/RegularSeasonDetailedResults_Prelim2018.csv')
seasons = pd.read_csv(filepath_or_buffer = '/Users/Ryan/Desktop/DataFiles/Seasons.csv')
teams = pd.read_csv(filepath_or_buffer = '/Users/Ryan/Desktop/DataFiles/Teams.csv')
# Merge in day 0 to rsg, add days to day 0 to get date of game
rsg = pd.merge(rsg,seasons[['Season','DayZero']],on='Season')
rsg['DayZero'] = pd.to_datetime(rsg['DayZero'],format='%m/%d/%Y')
rsg['DayNum'] = pd.to_timedelta(rsg['DayNum'],unit='d')
rsg['GameDate'] = rsg['DayZero'] + rsg['DayNum']
del rsg['DayNum'], rsg['DayZero']
# Duplicate rsg for renaming
lrsg = rsg.copy()
# Rename columns for rsg
rsg = rsg.rename(columns = {'WTeamID':'TmID','WScore':'TmPF','LTeamID':'OppID','LScore':'OppPF','WLoc':'TmLoc'})
rsg = rsg.rename(columns = {'WFGM':'TmFGM','WFGA':'TmFGA','WFGM3':'TmFGM3','WFGA3':'TmFGA3','WFTM':'TmFTM','WFTA':'TmFTA'})
rsg = rsg.rename(columns = {'WOR':'TmOR','WDR':'TmDR','WAst':'TmAst','WTo':'TmTO','WFTM':'TmFTM','WFTA':'TmFTA'})
rsg = rsg.rename(columns = {'WTO':'TmTO','WStl':'TmStl','WBlk':'TmBlk','WPF':'TmFoul'})
rsg = rsg.rename(columns = {'LFGM':'OppFGM','LFGA':'OppFGA','LFGM3':'OppFGM3','LFGA3':'OppFGA3','LFTM':'OppFTM','LFTA':'OppFTA'})
rsg = rsg.rename(columns = {'LOR':'OppOR','LDR':'OppDR','LAst':'OppAst','LTo':'OppTO','LFTM':'OppFTM','LFTA':'OppFTA'})
rsg = rsg.rename(columns = {'LTO':'OppTO','LStl':'OppStl','LBlk':'OppBlk','LPF':'OppFoul'})
rsg['TmWin'] = 1
# Rename columns for lrsg
lrsg = lrsg.rename(columns = {'WTeamID':'OppID','WScore':'OppPF','LTeamID':'TmID','LScore':'TmPF'})
lrsg = lrsg.rename(columns = {'WFGM':'OppFGM','WFGA':'OppFGA','WFGM3':'OppFGM3','WFGA3':'OppFGA3','WFTM':'OppFTM','WFTA':'OppFTA'})
lrsg = lrsg.rename(columns = {'WOR':'OppOR','WDR':'OppDR','WAst':'OppAst','WTo':'OppTO','WFTM':'OppFTM','WFTA':'OppFTA'})
lrsg = lrsg.rename(columns = {'WTO':'OppTO','WStl':'OppStl','WBlk':'OppBlk','WPF':'OppFoul'})
lrsg = lrsg.rename(columns = {'LFGM':'TmFGM','LFGA':'TmFGA','LFGM3':'TmFGM3','LFGA3':'TmFGA3','LFTM':'TmFTM','LFTA':'TmFTA'})
lrsg = lrsg.rename(columns = {'LOR':'TmOR','LDR':'TmDR','LAst':'TmAst','LTo':'TmTO','LFTM':'TmFTM','LFTA':'TmFTA'})
lrsg = lrsg.rename(columns = {'LTO':'TmTO','LStl':'TmStl','LBlk':'TmBlk','LPF':'TmFoul'})
lrsg['TmWin'] = 0
# Put in loser locations
lrsg.loc[(lrsg['WLoc'] == 'H'),'TmLoc'] = 'A'
lrsg.loc[(lrsg['WLoc'] == 'A'),'TmLoc'] = 'H'
lrsg.loc[(lrsg['WLoc'] == 'N'),'TmLoc'] = 'N'
del lrsg['WLoc']
# Append lrsg to rsg, delete lrsg,
rsg = rsg.append(lrsg)
del lrsg
# Bring in team names for both Tm and Opp
rsg = pd.merge(rsg,teams[['TeamID','TeamName']],left_on='TmID',right_on='TeamID')
del rsg['TeamID']
rsg = rsg.rename(columns = {'TeamName':'TmName'})
rsg = pd.merge(rsg,teams[['TeamID','TeamName']],left_on='OppID',right_on='TeamID')
del rsg['TeamID']
rsg = rsg.rename(columns = {'TeamName':'OppName'})
# Add countable field for number of games
rsg['TmGame'] = 1
# Add field for number of minutes
rsg['GameMins'] = 40 + rsg['NumOT']*5
# Add field for Total Rebounds
rsg['TmTR'] = rsg['TmOR'] + rsg['TmDR']
rsg['OppTR'] = rsg['OppOR'] + rsg['OppDR']
# Count number of FGA2/FGM2
rsg['TmFGM2'] = rsg['TmFGM'] - rsg['TmFGM3']
rsg['TmFGA2'] = rsg['TmFGA'] - rsg['TmFGA3']
rsg['OppFGM2'] = rsg['OppFGM'] - rsg['OppFGM3']
rsg['OppFGA2'] = rsg['OppFGA'] - rsg['OppFGA3']
# Calculate game margin
rsg['TmMargin'] = rsg['TmPF'] - rsg['OppPF']
rsg['OppMargin'] = -rsg['TmMargin']
# Add in per-40 stats to rsg
for x in {'Opp','Tm'}:
for column in metrics:
rsg[x + column + 'per40'] = rsg[x + column] / rsg['GameMins'] * 40
del column, x
# Total Possessions (tbd)
# rsg['TmPoss'] = rsg['TmFGA'] + rsg['TmFGA3'] TBD
# Benchmark time
rsgtime = time.time()-rsgbegin
if rsgtime < 60:
print('Create RSG Time: ' + str(round((rsgtime),2)) + ' sec')
else:
print('Create RSG Time: ' + str(round((rsgtime)/60,2)) + ' min')
# return the rsg dataframe as the output
return rsg
###############################################################################
###############################################################################
# Read in raw vegas data, clean up, manipulate for further analysis
###############################################################################
###############################################################################
def createvegas(rsg):
# Set start time
vegasbegin = time.time()
# Read in raw vegas analysis data
vegas = pd.read_csv(filepath_or_buffer = '/Users/Ryan/Google Drive/HistoricalNCAAData/VegasAnalysisFull.csv')
# De-dupe vegas data (raw data has a row for both sides of the line)
vegasfaves = vegas.loc[(vegas['TeamLineVegas'] < 0)]
vegaspushes = vegas.loc[(vegas['TeamLineVegas'] == 0) & (vegas['Team'] < vegas['Opponent'])]
vegas = vegasfaves.append(vegaspushes)
del vegasfaves, vegaspushes
# Only pull necessary columns
vegas = vegas[['Date','Team','Opponent','TeamLineVegas']]
# Rename columns
vegas = vegas.rename(columns = {'Date':'GameDate','Team':'Tm1','Opponent':'Tm2','TeamLineVegas':'Tm1LineVegas'})
# Change GameDate column to Datetime type
vegas['GameDate'] = pd.to_datetime(vegas['GameDate'],format='%Y/%m/%d')
# Get season by adding 24 weeks to game date and pulling year
vegas['Season'] = vegas['GameDate'] + datetime.timedelta(weeks=24)
vegas['Season'] = vegas['Season'].dt.year
# Get game results into vegas
vegas = pd.merge(vegas,rsg[['GameDate','TmName','OppName','TmWin','TmMargin']],left_on=['GameDate','Tm1','Tm2'],
right_on=['GameDate','TmName','OppName'])
# Delete merged-in names
del vegas['TmName'], vegas['OppName']
# Rename columns
vegas = vegas.rename(columns = {'TmMargin':'Tm1Margin','TmWin':'Tm1Win'})
# Check margin vs vegas to create TmWinVegas
vegas['Tm1WinVegas'] = ""
vegas.loc[(vegas['Tm1LineVegas'] > -1 * vegas['Tm1Margin']),'Tm1WinVegas'] = 1
vegas.loc[(vegas['Tm1LineVegas'] < -1 * vegas['Tm1Margin']),'Tm1WinVegas'] = -1
vegas.loc[(vegas['Tm1LineVegas'] == -1 * vegas['Tm1Margin']),'Tm1WinVegas'] = 0
vegas['Tm1WinVegas'] = pd.to_numeric(vegas['Tm1WinVegas'])
# Benchmark time
vegastime = time.time()-vegasbegin
if vegastime < 60:
print('Create Vegas Time: ' + str(round((vegastime),2)) + ' sec')
else:
print('Create Vegas Time: ' + str(round((vegastime)/60,2)) + ' min')
# Output vegas DF
return vegas
###############################################################################
###############################################################################
# Create vegasdates dataframe
###############################################################################
###############################################################################
def createvegasdates(vegas,rsg,size):
# Set start time
vegasdatesbegin = time.time()
# Handle logic based on size input
if size == 'full':
# Pull out each unique game date/Season combo, count # of games
vegasdates = vegas[['Season','GameDate','Tm1']].groupby(['Season','GameDate']).agg('count').reset_index()
vegasdates = vegasdates.rename(columns = {'Tm1':'GameCount'})
# Trim vegasdates to not include games where there is no game data to use in calculations
vegasdates = vegasdates.loc[(vegasdates['GameDate'] <= max(rsg['GameDate']))]
if size == 'small':
# Create small vegasdates for testing
vegasdates = vegasdates.loc[:7]
# Benchmark time
vegasdatestime = time.time()-vegasdatesbegin
if vegasdatestime < 60:
print('Create Vegasdates Time: ' + str(round((vegasdatestime),2)) + ' sec')
else:
print('Create Vegasdates Time: ' + str(round((vegasdatestime)/60,2)) + ' min')
# return vegasdated dataframe
return vegasdates
###############################################################################
###############################################################################
# Calculate cumulative stats for a season up to a date
###############################################################################
###############################################################################
def createvegasout(how):
# Set start time
vegasoutbegin = time.time()
if how == 'read-in':
# Read CSV
vegasout = pd.read_csv(filepath_or_buffer = '/Users/Ryan/Desktop/DataFiles/PrelimData2018/vegasout.csv')
if how == 'new':
# Create rsg dataframe
rsg = creatersg()
# Create vegas dataframe
vegas = createvegas(rsg)
# Create vegasdates dataframe
vegasdates = createvegasdates(vegas,rsg,size='small')
global metrics
# Create vegasout dataframe for the output of the for loop
vegasout = pd.DataFrame()
# Create summable fields
summables = ['GameMins','TmWin','TmGame']
for x in {'Opp','Tm'}:
for column in metrics:
summables.append(x + column)
del column, x
###############################
## Begin For Loop
###############################
# Loop through the data we have vegas info for
for row in vegasdates.itertuples():
# Set included season & the day of the game
inclseason = row[1]
dayofgame = row[2]
print ('Season: ' + str(inclseason) + '; Date: ' + str(dayofgame))
# Limit the game data to that season, & before the game date (i.e., all games before the current game)
irsg = rsg.loc[(rsg['Season'] == inclseason) & (rsg['GameDate'] < dayofgame)]
# Sum the summable fields
iteams = irsg.groupby(['TmID','TmName'])[summables].sum().reset_index()
for x in {'Opp','Tm'}:
for column in metrics:
iteams[x + column + 'per40'] = iteams[x + column] / iteams['GameMins'] * 40
del column, x
# put Season & GameDate into iteams so we know what to merge back to the vegas data on
iteams['Season'] = inclseason
iteams['GameDate'] = dayofgame
# Calculate Win Pct
| |
<filename>cstatgen/src/egglib/test/t_egglib_binding.py<gh_stars>1-10
import os
from .. import egglib_binding
import example_files
########################################################################
def test_ABC():
"""
Test ABC
"""
print "## Testing egglib_binding.ABC"
f = open("ABC_input1.txt", 'w')
f.write("""4.95194298907 0.0153325269453 # 1.15981742471 0.516166666667 10.9333333333
1.98635510461 0.491933633245 # 31.9733452219 21.6196666667 55.0666666667
3.29351499573 0.228908428875 # 20.6259423098 11.9238333333 52.2666666667
4.7437088623 0.305096623189 # 27.2713826892 13.6665 57.8666666667
2.64284692458 0.198574028268 # 15.8299405265 9.7905 48.0
1.02024068344 0.441248261522 # 17.6793791227 12.2138333333 48.6666666667
1.66583032231 0.375171886971 # 22.193263154 15.79 48.1333333333
2.47682701097 0.41098071521 # 30.7821813803 20.7763333333 56.4
2.23190710444 0.264390633197 # 19.2780463838 12.9171666667 51.3333333333
4.90752660309 0.105732060591 # 10.0621998198 5.34483333333 41.4666666667
3.08032053071 0.323599198806 # 28.8700499503 16.6708333333 57.7333333333
2.60467202517 0.0267422062183 # 2.41367410008 1.45183333333 18.0
4.79410305881 0.466240648083 # 44.1357549729 24.1845 65.0666666667
2.26126178743 0.215717101296 # 15.0149336875 10.5583333333 45.7333333333
2.85407209632 0.00567788064123 # 0.532889087031 0.4105 7.6
2.35756332194 0.488754120473 # 35.9229937493 22.2838333333 58.0
2.59873167477 0.132524627148 # 10.814513825 7.52066666667 41.8666666667
3.78029735352 0.25989812506 # 23.4471198294 13.7135 55.0666666667
3.80525557452 0.17916015372 # 16.0493654447 9.7595 50.4
3.55586791557 0.0442993587781 # 3.54214510791 2.16066666667 25.3333333333
4.91906847941 0.282067576661 # 27.7102325256 14.4543333333 59.2
3.02938569678 0.492301567372 # 42.1922771261 26.7441666667 61.4666666667
1.3042908075 0.393186747802 # 20.7199815604 13.2196666667 49.0666666667
0.231909071363 0.121422489456 # 1.00308534029 0.930666666667 10.4
1.85256018463 0.0905626391996 # 5.79908712357 4.07433333333 30.8
0.144764703453 0.0479666941369 # 0.188078501305 0.214 2.26666666667
0.320654364195 0.287756119316 # 3.94964852741 2.71366666667 27.7333333333
3.03275337793 0.498803120198 # 41.2205382027 25.9251666667 63.0666666667
4.58480058627 0.342914241632 # 30.0298673751 17.6878333333 56.5333333333
0.938088339711 0.296943758093 # 13.6983841784 10.2643333333 44.0
2.2423060314 0.401898522407 # 30.9702598816 19.4815 59.7333333333
0.921005398924 0.0577682828909 # 2.35098126631 1.88416666667 18.1333333333
1.27348469316 0.40813404689 # 18.6511180461 13.26 48.1333333333
2.13387569874 0.153659566536 # 11.5668278303 7.80933333333 44.1333333333
1.15057063398 0.371128085872 # 15.4537835239 11.9005 47.4666666667
1.46765056341 0.414746734908 # 23.54115908 16.6858333333 51.2
1.10394729051 0.339684734299 # 16.9584115343 12.3315 48.1333333333
1.3378381372 0.352703818653 # 17.0211043681 12.6275 46.4
1.19431686816 0.429554816402 # 20.5319030591 15.128 48.8
2.87289011439 0.292179557542 # 25.8294475126 16.4733333333 55.8666666667
3.49125361273 0.2136503791 # 17.0211043681 10.8833333333 49.7333333333
4.79254622735 0.217694384159 # 21.5036419826 11.2396666667 54.9333333333
4.76891534596 0.0150428341611 # 1.47328159356 0.806166666667 12.9333333333
3.52170597349 0.470239589002 # 41.7847737066 25.7671666667 62.1333333333
0.308579938981 0.278337291578 # 3.79291644299 2.729 25.8666666667
4.38703040908 0.318761162533 # 29.3715926205 16.1536666667 58.9333333333
4.49931825435 0.0233353606266 # 1.7867457624 1.02666666667 17.2
4.9014891786 0.0306204612331 # 2.5704061845 1.23883333333 18.8
3.98263248294 0.160991598118 # 14.9522408538 8.81716666667 50.1333333333
1.25708893341 0.0387669318678 # 2.19424918189 1.672 20.0
0.326615341063 0.49514820845 # 7.5231400522 6.3805 32.5333333333
4.11922609252 0.442668784601 # 41.0951525352 22.262 62.2666666667
2.6656577415 0.083868534029 # 6.30062979372 3.8895 36.1333333333
0.860622853681 0.0606174872015 # 1.9748242637 1.19616666667 18.4
3.89690292911 0.17427862282 # 15.3597442733 8.55183333333 49.6
2.92461510803 0.462439136844 # 36.9260790896 22.6243333333 60.6666666667
3.66404780628 0.234430737897 # 20.3438245578 11.8148333333 50.8
3.09959322746 0.407777141718 # 36.6126149207 20.7783333333 59.8666666667
3.1998186802 0.465569957742 # 36.9260790896 21.8086666667 62.1333333333
2.84967534561 0.112841294662 # 9.18450014707 5.998 34.6666666667
2.41147988297 0.349742554348 # 28.0236966945 16.9838333333 55.6
1.6202851128 0.494180147502 # 26.9579185204 17.835 54.2666666667
4.3025792422 0.378200913378 # 35.9543401662 21.4943333333 61.7333333333
1.98380508978 0.162098133698 # 10.3756639887 7.3515 42.4
4.62229723369 0.425949845897 # 39.6845637754 20.4628333333 63.7333333333
3.12889919064 0.276346489403 # 23.1650020774 14.1796666667 55.0666666667
0.440733340009 0.0701937662012 # 1.28520309225 0.883333333333 14.6666666667
0.212828834754 0.0280329192247 # 0.282117751958 0.177666666667 3.46666666667
4.86019594803 0.30882644587 # 27.8042717763 15.7166666667 61.8666666667
2.43751472685 0.29437756619 # 23.008269993 14.4385 52.1333333333
2.25464292428 0.0582855932469 # 3.85560927676 2.669 30.1333333333
4.45739883972 0.3354820328 # 29.4342854543 17.2266666667 58.8
2.57383701655 0.369238080742 # 30.0925602088 18.9933333333 55.6
3.77521195494 0.0471847466015 # 4.13772702871 2.14433333333 25.0666666667
4.46980910917 0.490549870633 # 44.4492191418 24.568 64.0
4.43335090973 0.329294370615 # 29.1521677023 15.635 62.0
2.19577682136 0.258246134555 # 17.0211043681 11.3206666667 48.6666666667
1.29140264848 0.497711071806 # 25.0457870905 18.0733333333 54.1333333333
0.654334969958 0.456926256532 # 14.0431947641 11.2698333333 42.2666666667
2.20217444417 0.317910889447 # 23.1650020774 15.9455 51.7333333333
4.41777837164 0.102383714864 # 8.65161106004 4.62416666667 38.0
2.46004227502 0.116104208769 # 9.34123223149 6.2765 39.7333333333
4.68816725743 0.42553313794 # 41.1891917858 22.8011666667 62.4
1.85612205818 0.0326126692392 # 2.53905976762 1.7745 21.3333333333
4.04609030198 0.0828302134053 # 7.64852571974 4.01383333333 33.8666666667
1.04244523031 0.31085060383 # 12.444527503 8.199 43.7333333333
0.533529751109 0.178973234014 # 4.79600178328 4.04433333333 28.0
4.27995103668 0.176800938253 # 16.3001367798 8.50933333333 54.5333333333
""")
f.close()
f = open("ABC_input2.txt", 'w')
f.write("""0.0153325269453 # 1.15981742471 0.516166666667 10.9333333333
0.491933633245 # 31.9733452219 21.6196666667 55.0666666667
0.228908428875 # 20.6259423098 11.9238333333 52.2666666667
0.305096623189 # 27.2713826892 13.6665 57.8666666667
0.198574028268 # 15.8299405265 9.7905 48.0
0.441248261522 # 17.6793791227 12.2138333333 48.6666666667
0.375171886971 # 22.193263154 15.79 48.1333333333
0.41098071521 # 30.7821813803 20.7763333333 56.4
0.264390633197 # 19.2780463838 12.9171666667 51.3333333333
0.105732060591 # 10.0621998198 5.34483333333 41.4666666667
0.323599198806 # 28.8700499503 16.6708333333 57.7333333333
0.0267422062183 # 2.41367410008 1.45183333333 18.0
0.466240648083 # 44.1357549729 24.1845 65.0666666667
0.215717101296 # 15.0149336875 10.5583333333 45.7333333333
0.00567788064123 # 0.532889087031 0.4105 7.6
0.488754120473 # 35.9229937493 22.2838333333 58.0
0.132524627148 # 10.814513825 7.52066666667 41.8666666667
0.25989812506 # 23.4471198294 13.7135 55.0666666667
0.17916015372 # 16.0493654447 9.7595 50.4
0.0442993587781 # 3.54214510791 2.16066666667 25.3333333333
0.282067576661 # 27.7102325256 14.4543333333 59.2
0.492301567372 # 42.1922771261 26.7441666667 61.4666666667
0.393186747802 # 20.7199815604 13.2196666667 49.0666666667
0.121422489456 # 1.00308534029 0.930666666667 10.4
0.0905626391996 # 5.79908712357 4.07433333333 30.8
0.0479666941369 # 0.188078501305 0.214 2.26666666667
0.287756119316 # 3.94964852741 2.71366666667 27.7333333333
0.498803120198 # 41.2205382027 25.9251666667 63.0666666667
0.00759146872179 # 0.0 0.0 0.0
""")
f.close()
try:
ABC = egglib_binding.ABC()
ABC.number_of_statistics(3)
ABC.add_fname('ABC_input1.txt', 2)
ABC.add_fname('ABC_input2.txt', 1)
ABC.get_threshold(0.5)
ABC.sd(0)
ABC.sd(1)
ABC.sd(2)
ABC.threshold()
ABC.obs(0, 12.0)
ABC.obs(0, 6.2)
ABC.obs(0, 31.9)
ABC.rejection("ABC_output1.txt", False)
ABC.rejection("ABC_output2.txt", True)
del ABC
ABC = egglib_binding.ABC()
ABC.number_of_statistics(3)
ABC.add_fname('ABC_input1.txt', 2)
ABC.get_threshold(0.4)
ABC.number_of_samples()
ABC.sd(0)
ABC.sd(1)
ABC.sd(2)
ABC.threshold()
ABC.obs(0, 0.1)
ABC.obs(1, 6.2)
ABC.obs(2, 31.9)
ABC.rejection("ABC_output3.txt", False)
ABC.rejection("ABC_output4.txt", True)
ABC.regression("ABC_output3.txt", "ABC_output5.txt", ABC.NONE, "param1\tparam2")
ABC.regression("ABC_output3.txt", "ABC_output6.txt", ABC.LOG, "param1\tparam2")
ABC.regression("ABC_output3.txt", "ABC_output7.txt", ABC.TAN)
del ABC
finally:
if os.path.isfile('ABC_input1.txt'): os.remove("ABC_input1.txt")
if os.path.isfile('ABC_input2.txt'): os.remove("ABC_input2.txt")
if os.path.isfile('ABC_output1.txt'): os.remove("ABC_output1.txt")
if os.path.isfile('ABC_output2.txt'): os.remove("ABC_output2.txt")
if os.path.isfile('ABC_output3.txt'): os.remove("ABC_output3.txt")
if os.path.isfile('ABC_output4.txt'): os.remove("ABC_output4.txt")
if os.path.isfile('ABC_output5.txt'): os.remove("ABC_output5.txt")
if os.path.isfile('ABC_output6.txt'): os.remove("ABC_output6.txt")
if os.path.isfile('ABC_output7.txt'): os.remove("ABC_output7.txt")
########################################################################
def test_Align():
"""
Test Align
"""
print "## Testing egglib_binding.Align"
align = egglib_binding.Align()
align = egglib_binding.Align(10, 100)
assert align.append("name", "G"*100)==11
assert align.removePosition(50)==99
assert align.removePosition(0)==98
assert align.removePosition(97)==97
assert align.remove(10)==10
assert align.remove(0)==9
assert align.remove(5)==8
align.sequence(1, "A"*97)
assert align.ls()==97
assert align.ls(0)==97
assert align.ls(1)==97
assert align.ls(7)==97
assert align.character(7, 96)=='?'
assert align.get(7, 96)=='?'
align.set(0, 0, "N")
for i in range(6): align.set(i, 7, "0")
for i in range(6,8): align.set(i, 7, "1")
align.binSwitch(7)
assert ''.join([align.get(i, 7) for i in range(8)]) == '11111100'
align2 = align.vslice(0, 4)
align.clear()
assert align.ns()==0
assert align.ls()==0
assert align2.numberOfSequences()==8
assert align2.numberOfSites()==4
align2.populationLabel(4)
align2.sitePosition(3)
align2.name(4, "flahiheup")
assert align2.name(4) == "flahiheup"
assert align2.find("flahiheup")==4
assert align2.find("flahi")==-1
assert align2.find("flahi", False)==4
align2.group(1, 747)
assert align2.group(1)==747
cont = align2.hslice(2,5)
assert cont.ns()==3
assert cont.ls(0)==align2.ls()
########################################################################
def test_BppDiversity():
"""
Test BppDiversity
"""
print "## Testing egglib_binding.BppDiversity"
align = egglib_binding.Align()
align.append("name1", "AAGAAAAAAAAAAAAAAAAAA")
align.append("name2", "AAGAAAAAAAAAAAAA-AAAA")
align.append("name3", "AAAACAAAAAAAAAAA-AAAA")
align.append("name4", "AAAACAAAAAGAAAAA-AAAA")
align.append("name5", "AAAACAAAAAGAAAAAAAAAA")
align.append("name6", "AAGACAATAAGAAAAAAAAAG", 999)
bpp = egglib_binding.BppDiversity()
bpp.load(align)
assert bpp.hasOutgroup()==True
assert bpp.S() == 3
bpp.Sinf()
bpp.Ssin()
bpp.eta()
bpp.Sext()
bpp.He()
bpp.He2()
bpp.tW()
bpp.T83()
bpp.K()
bpp.H()
bpp.Ti()
bpp.Tv()
bpp.TiTv()
bpp.load(align, 4)
bpp.nstop()
bpp.ncodon1mut()
bpp.nsyn()
assert bpp.tWS() != 0
bpp.tWNS()
bpp.PiS()
bpp.PiNS()
bpp.Ssites()
bpp.NSsites()
bpp.SS()
bpp.SNS()
a, b, c, d = bpp.MK()
assert bpp.NI() != 0
bpp.D()
bpp.Deta()
assert bpp.Dfl() != 0
bpp.Dflstar()
bpp.F()
bpp.Fstar()
bpp.rhoH()
########################################################################
def test_Consensus():
"""
Test Consensus
"""
print "## Testing egglib_binding.Consensus"
align = egglib_binding.Align()
align.append("name1_a", "AAAAAAAAAAAAAA")
align.append("name1_b", "AAAAAAAAAAAAAA")
align.append("name2_c", "AAACAAAAAAAAAA")
align.append("name2_d", "AAACAAATAAAAAA")
align.append("name3#e", "AAAAAAAAAAAAAA")
align.append("name3#f", "AAAAAAANAAA?AA")
consensus = egglib_binding.Consensus()
consensus.check_sequences(align)
align.set(5, 11, '?')
assert align.ns()==6
consensus.setMissing("#")
consensus.setDisagreement("?")
align2 = consensus.consensus(align, '_', True)
assert align2.ns() == 4
a, b, c, d = consensus.firstSequenceNames()
a, b, c, d = consensus.secondSequenceNames()
a, b, c, d = consensus.roots()
consensus.consistentPositions()
consensus.complementaryPositions()
consensus.uninformativePositions()
consensus.ambiguousPositions()
consensus.atLeastPartiallyResolvedAmbiguities()
consensus.inconsistentPositions()
align3 = consensus.consensus(align, '#', True)
assert align3.ns() == 5
########################################################################
def test_Container():
"""
Test Container
"""
print "## Testing egglib_binding.Container"
cont = egglib_binding.Container()
cont.append('name1', 'AAAAAAAAAAAA')
cont.append('name2', 'AAAAAAAAAAAAAAAAAAAAAA')
cont.append('name3', 'AAAAAAAAAAAACCCG')
cont.append('name4', 'AAAAAAAAAAAAGCCCAAAAGGGGGCC')
cont.remove(2)
cont.name(0)
cont.name(1, 'name2bis')
cont.sequence(2)
cont.sequence(2, 'AAAAAAAAAAAACCCG')
cont.appendSequence(2, 'AAAAAAAAAAAACCCG')
cont.set(2,31, 'U')
assert cont.get(0,4) == 'A'
assert cont.group(1) == 0
cont.group(1,743)
cont2 = cont.hslice(1,3)
assert cont2.ns() == 2
assert cont2.ls(0) == 22
assert cont2.ls(1) == 32
assert cont2.group(0) == 743
assert cont.isEqual() == False
cont.equalize('N')
assert cont.isEqual() == True
assert cont.get(0,30) == 'N'
assert cont.find('name2') == -1
assert cont.find('name2', False) == 1
cont.clear()
########################################################################
def test_coalesce():
"""
Test coalescence module classes and Random
"""
print "## Testing egglib_binding coalescence module and egglib_binding.Random"
r = egglib_binding.Random()
r = egglib_binding.Random(1445.12, 0.1485)
assert r.seed1()==1445.12
assert r.seed2()==0.1485
r.erand(4.)
for i in range(100000):
if r.irand(120)>=120: raise AssertionError, 'too large irand!'
r.prand(0.5)
r.nrand()
r.grand(0.163)
r.uniform()
r = egglib_binding.Random()
ps = egglib_binding.ParamSet()
ps.numberOfPopulations()
ps.addPopulation(0)
ps.addPopulation(0)
ps.pairwiseMigrationRate(0, 1, 0.5)
assert ps.pairwiseMigrationRate(0, 1) == 0.5
ps.migrationRate(1.2)
ps.populationSize(1, 2)
assert ps.populationSize(1)==2
ps.growthRate(2, 1.8)
assert ps.growthRate(2) == 1.8
ps.numberOfSegments(1000)
assert ps.numberOfSegments()==1000
ps.recombinationRate(4.)
assert ps.recombinationRate() == 4
ps.selfingRate(0.96)
assert ps.selfingRate()==0.96
ps.singles(0, 20)
ps.doubles(1, 20)
ps.singles(2, 8)
assert ps.singles(0)==20
assert ps.doubles(0)==0
assert ps.singles(1)==0
assert ps.doubles(1)==20
assert ps.singles(2)==8
assert ps.doubles(2)==0
assert ps.numberOfSamples()==68
e1 = egglib_binding.PopulationFusion(0.1, 0, 1)
e2 = egglib_binding.AllMigrationRateChange(0.3, 0.4)
e3 = egglib_binding.PopulationSplit(0.2, 2, 0.8)
e4 = egglib_binding.AllPopulationSizeChange(0.4, 1)
e5 = egglib_binding.Bottleneck(0.5, 0.8)
e6 = egglib_binding.GrowthRateChange(0.6, 0)
e7 = egglib_binding.SelfingRateChange(0.7, 0.01)
e8 = egglib_binding.SingleMigrationRateChange(0.8, 0, 2, 1.8)
e9 = egglib_binding.PopulationBottleneck(0.9, 0, 0.05)
e10 = egglib_binding.PopulationGrowthRateChange(1.0, 0, 5)
e11 = egglib_binding.SinglePopulationSizeChange(1.1, 2, 4)
ps.addChange(e1)
ps.addChange(e2)
ps.addChange(e3)
ps.addChange(e4)
ps.addChange(e5)
ps.addChange(e6)
ps.addChange(e7)
ps.addChange(e8)
ps.addChange(e9)
ps.addChange(e10)
ps.addChange(e11)
## we suppose that change array methods are called by Controller
# we will also skip special methods of Controller to apply events
# the same for Current, Edge, EdgePool, Mutation, Population
controller = egglib_binding.Controller(ps, r)
x = ps.numberOfSamples()
while x>1:
x = controller.step()
controller.reset()
x = ps.numberOfSamples()
while x>1:
x = controller.step()
arg = controller.getArg()
arg.time()
arg.ageUltimateMRCA()
for i in range(1000):
arg.ageMRCA(i)
arg.newick(i)
# some methods skipped again
m = egglib_binding.Mutator()
m.fixedNumberOfMutations(12)
assert | |
<gh_stars>10-100
"""
==========
Engine
==========
Engine runs the simulation.
"""
import os
import logging as log
import pprint
from typing import (
Any, Dict, Optional, Union, Tuple, Callable)
import math
import datetime
import time as clock
import uuid
from vivarium.composites.toys import Proton, Electron, Sine, PoQo, ToyDivider
from vivarium.core.store import hierarchy_depth, Store, generate_state
from vivarium.core.emitter import get_emitter
from vivarium.core.process import (
Process,
ParallelProcess,
)
from vivarium.core.serialize import serialize_value
from vivarium.core.composer import Composer
from vivarium.library.topology import (
delete_in,
assoc_path,
inverse_topology
)
from vivarium.library.units import units
from vivarium.core.types import (
HierarchyPath, Topology, Schema, State, Update, Processes)
pretty = pprint.PrettyPrinter(indent=2, sort_dicts=False)
def pp(x: Any) -> None:
"""Print ``x`` in a pretty format."""
pretty.pprint(x)
def pf(x: Any) -> str:
"""Format ``x`` for display."""
return pretty.pformat(x)
log.basicConfig(level=os.environ.get("LOGLEVEL", log.WARNING))
def starts_with(
a_list: HierarchyPath,
sub: HierarchyPath,
) -> bool:
"""Check whether one path is a prefix of another.
Args:
a_list: Path to look for prefix in.
sub: Prefix.
Returns:
True if ``sub`` is a prefix of ``a_list``; False otherwise.
"""
return len(sub) <= len(a_list) and all(
a_list[i] == el
for i, el in enumerate(sub))
def invert_topology(
update: Update,
args: Tuple[HierarchyPath, Topology],
) -> State:
"""Wrapper function around ``inverse_topology``.
Wraps :py:func:`vivarium.library.topology.inverse_topology`.
Updates are produced relative to the process that produced them. To
transform them such that they are relative to the root of the
simulation hierarchy, this function "inverts" a topology.
Args:
update: The update.
args: Tuple of the path to which the update is relative and the
topology.
Returns:
The update, relative to the root of ``path``.
"""
path, topology = args
return inverse_topology(path[:-1], update, topology)
def timestamp(dt: Optional[Any] = None) -> str:
"""Get a timestamp of the form ``YYYYMMDD.HHMMSS``.
Args:
dt: Datetime object to generate timestamp from. If not
specified, the current time will be used.
Returns:
Timestamp.
"""
if not dt:
dt = datetime.datetime.now()
return "%04d%02d%02d.%02d%02d%02d" % (
dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second)
def invoke_process(
process: Process,
interval: float,
states: State,
) -> Update:
"""Compute a process's next update.
Call the process's
:py:meth:`vivarium.core.process.Process.next_update` function with
``interval`` and ``states``.
"""
return process.next_update(interval, states)
class Defer:
def __init__(
self,
defer: Any,
f: Callable,
args: Tuple,
) -> None:
"""Allows for delayed application of a function to an update.
The object simply holds the provided arguments until it's time
for the computation to be performed. Then, the function is
called.
Args:
defer: An object with a ``.get()`` method whose output will
be passed to the function. For example, the object could
be an :py:class:`InvokeProcess` object whose ``.get()``
method will return the process update.
function: The function. For example,
:py:func:`invert_topology` to transform the returned
update.
args: Passed as the second argument to the function.
"""
self.defer = defer
self.f = f
self.args = args
def get(self) -> Update:
"""Perform the deferred computation.
Returns:
The result of calling the function.
"""
return self.f(
self.defer.get(),
self.args)
class EmptyDefer(Defer):
def __init__(self) -> None:
function = lambda update, arg: update
args = ()
super().__init__(None, function, args)
def get(self) -> Update:
return {}
class InvokeProcess:
def __init__(
self,
process: Process,
interval: float,
states: State,
) -> None:
"""A wrapper object that computes an update.
This class holds the update of a process that is not running in
parallel. When instantiated, it immediately computes the
process's next update.
Args:
process: The process that will calculate the update.
interval: The timestep for the update.
states: The simulation state to pass to the process's
``next_update`` function.
"""
self.process = process
self.interval = interval
self.states = states
self.update = invoke_process(
self.process,
self.interval,
self.states)
def get(self) -> Update:
"""Return the computed update.
This method is analogous to the ``.get()`` method in
:py:class:`vivarium.core.process.ParallelProcess` so that
parallel and non-parallel updates can be intermixed in the
simulation engine.
"""
return self.update
class Engine:
def __init__(
self,
processes: Optional[Processes] = None,
topology: Optional[Topology] = None,
store: Optional[Store] = None,
initial_state: Optional[State] = None,
experiment_id: Optional[str] = None,
experiment_name: Optional[str] = None,
description: str = '',
emitter: Union[str, dict] = 'timeseries',
emit_topology: bool = True,
emit_processes: bool = False,
emit_config: bool = False,
invoke: Optional[Any] = None,
emit_step: float = 1,
display_info: bool = True,
progress_bar: bool = False,
) -> None:
"""Defines simulations
Arguments:
* **processes** (:py:class:`dict`): A dictionary that
maps :term:`process` names to process objects. You will
usually get this from the ``processes`` attribute of the
dictionary from :py:meth:`vivarium.core.composer.Composer.generate`.
* **topology** (:py:class:`dict`): A dictionary that
maps process names to sub-dictionaries. These sub-dictionaries
map the process's port names to tuples that specify a path through
the :term:`tree` from the :term:`compartment` root to the
:term:`store` that will be passed to the process for that port.
* **store**: A pre-loaded Store. This is an alternative to
passing in processes and topology dict, which can not be
loaded at the same time.
* **initial_state** (:py:class:`dict`): By default an
empty dictionary, this is the initial state of the simulation.
* **experiment_id** (:py:class:`uuid.UUID` or :py:class:`str`):
A unique identifier for the experiment. A UUID will be generated
if none is provided.
* **description** (:py:class:`str`): A description of the
experiment. A blank string by default.
* **emitter** (:py:class:`dict`): An emitter configuration
which must conform to the specification in the documentation
for :py:func:`vivarium.core.emitter.get_emitter`. The experiment
ID will be added to the dictionary you provide as the value for
the key ``experiment_id``.
* **display_info** (:py:class:`bool`): prints experiment info
* **progress_bar** (:py:class:`bool`): shows a progress bar
* **emit_config** (:py:class:`bool`): If True, this will emit
the serialized processes, topology, and initial state.
"""
self.experiment_id = experiment_id or str(uuid.uuid1())
self.initial_state = initial_state or {}
self.emit_step = emit_step
# get the processes, topology, and store
if processes and topology and not store:
self.processes = processes
self.topology = topology
# initialize the store
self.state: Store = generate_state(
self.processes,
self.topology,
self.initial_state)
elif store:
self.state = store
# get processes and topology from the store
self.processes = self.state.get_processes()
self.topology = self.state.get_topology()
else:
raise Exception(
'load either store or (processes and topology) into Engine')
# display settings
self.experiment_name = experiment_name or self.experiment_id
self.description = description
self.display_info = display_info
self.progress_bar = progress_bar
self.time_created = timestamp()
if self.display_info:
self.print_display()
# parallel settings
self.invoke = invoke or InvokeProcess
self.parallel: Dict[HierarchyPath, ParallelProcess] = {}
# get a mapping of all paths to processes
self.process_paths: Dict[HierarchyPath, Process] = {}
self.deriver_paths: Dict[HierarchyPath, Process] = {}
self._find_process_paths(self.processes)
# emitter settings
emitter_config = emitter
if isinstance(emitter_config, str):
emitter_config = {'type': emitter_config}
else:
emitter_config = dict(emitter_config)
emitter_config['experiment_id'] = self.experiment_id
self.emitter = get_emitter(emitter_config)
self.emit_topology = emit_topology
self.emit_processes = emit_processes
self.emit_config = emit_config
# initialize global time
self.experiment_time = 0.0
# run the derivers
self.send_updates([])
# run the emitter
self.emit_configuration()
self.emit_data()
# logging information
log.info('experiment %s', str(self.experiment_id))
log.info('\nPROCESSES:')
log.info(pf(self.processes))
log.info('\nTOPOLOGY:')
log.info(pf(self.topology))
def _add_process_path(
self,
process: Process,
path: HierarchyPath
) -> None:
if process.is_deriver():
self.deriver_paths[path] = process
else:
self.process_paths[path] = process
def _find_process_paths(
self,
processes: Processes
) -> None:
tree = hierarchy_depth(processes)
for path, process in tree.items():
self._add_process_path(process, path)
def emit_configuration(self) -> None:
"""Emit experiment configuration."""
data: Dict[str, Any] = {
'time_created': self.time_created,
'experiment_id': self.experiment_id,
'name': self.experiment_name,
'description': self.description,
'topology': self.topology
if self.emit_topology else None,
'processes': serialize_value(self.processes)
if self.emit_processes else None,
'state': serialize_value(self.state.get_config())
if self.emit_config else None,
}
emit_config: Dict[str, Any] = {
'table': 'configuration',
'data': data}
self.emitter.emit(emit_config)
def emit_data(self) -> None:
"""Emit the current simulation state.
Only variables with ``_emit=True`` are emitted.
"""
data = self.state.emit_data()
data.update({
'time': self.experiment_time})
emit_config = {
'table': 'history',
'data': serialize_value(data)}
self.emitter.emit(emit_config)
def invoke_process(
self,
process: Process,
path: HierarchyPath,
interval: float,
states: State,
) -> Any:
"""Trigger computation of a process's update.
To allow processes to run in parallel, this function only
triggers update computation. When the function exits,
computation may not be complete.
Args:
process: The process.
path: The path at which the process resides. This is used to
track parallel processes in ``self.parallel``.
interval: The timestep for which to compute the update.
states: The simulation state to pass to
:py:meth:`vivarium.core.process.Process.next_update`.
Returns:
The deferred simulation update, for example a
:py:class:`vivarium.core.process.ParallelProcess` or an
:py:class:`InvokeProcess` object.
"""
if process.parallel:
# add parallel process if it doesn't exist
if path not in self.parallel:
self.parallel[path] = ParallelProcess(process)
# trigger the computation of the parallel process
self.parallel[path].update(interval, states)
return self.parallel[path]
| |
import sys
import unittest
import copy
import numpy as np
from scipy.linalg import block_diag
import pyinduct as pi
import pyinduct.hyperbolic.feedforward as hff
import pyinduct.parabolic as parabolic
import pyinduct.simulation as sim
from pyinduct.tests import show_plots
import pyqtgraph as pg
class SimpleInput(sim.SimulationInput):
"""
the simplest input we can imagine
"""
def __init__(self):
super().__init__("SimpleInput")
def _calc_output(self, **kwargs):
return 0
class MonotonousInput(sim.SimulationInput):
"""
an input that ramps up
"""
def __init__(self):
super().__init__("MonotonousInput")
def _calc_output(self, **kwargs):
t = kwargs["time"]
extra_data = np.sin(t)
if np.isclose(t % 2, 0):
extra_data = np.nan
return dict(output=kwargs["time"], extra_data=extra_data)
class CorrectInput(sim.SimulationInput):
"""
a diligent input
"""
def __init__(self, output, limits=(0, 1), der_order=0):
super().__init__(self)
self.out = np.ones(der_order + 1) * output
self.t_min, self.t_max = limits
def _calc_output(self, **kwargs):
if "time" not in kwargs:
raise ValueError("mandatory key not found!")
if "weights" not in kwargs:
raise ValueError("mandatory key not found!")
if "weight_lbl" not in kwargs:
raise ValueError("mandatory key not found!")
return dict(output=self.out)
class AlternatingInput(sim.SimulationInput):
"""
a simple alternating input, composed of smooth transitions
"""
def _calc_output(self, **kwargs):
t = kwargs["time"] % 2
if t < 1:
res = self.tr_up(t)
else:
res = self.tr_down(t)
return dict(output=res - .5)
def __init__(self):
super().__init__(self)
self.tr_up = pi.SmoothTransition(states=(0, 1),
interval=(0, 1),
method="poly")
self.tr_down = pi.SmoothTransition(states=(1, 0),
interval=(1, 2),
method="poly")
class SimulationInputTest(unittest.TestCase):
def setUp(self):
pass
def test_abstract_funcs(self):
# raise type error since abstract method is not implemented
self.assertRaises(TypeError, sim.SimulationInput)
# method implemented, should work
u = SimpleInput()
def test_call_arguments(self):
a = np.eye(2, 2)
b = np.array([[0], [1]])
u = CorrectInput(output=1, limits=(0, 1))
ic = np.zeros((2, 1))
ss = sim.StateSpace({1: a}, {0: {1: b}}, input_handle=u)
# if caller provides correct kwargs no exception should be raised
res = sim.simulate_state_space(ss, ic, pi.Domain((0, 1), num=10))
def test_storage(self):
a = np.eye(2, 2)
b = np.array([[0], [1]])
u = MonotonousInput()
ic = np.zeros((2, 1))
ss = sim.StateSpace(a, b, input_handle=u)
# run simulation to fill the internal storage
domain = pi.Domain((0, 10), num=11)
bigger_domain = pi.Domain((-1, 11), num=13)
res = sim.simulate_state_space(ss, ic, domain)
# don't return any entries that aren't there
self.assertRaises(KeyError, u.get_results, domain, "Unknown Entry")
# default key is "output"
ed = u.get_results(domain)
ed_explicit = u.get_results(domain, result_key="output")
self.assertTrue(np.array_equal(ed, ed_explicit))
# return an np.ndarray as default
self.assertIsInstance(ed, np.ndarray)
# return EvalData if corresponding flag is set
self.assertIsInstance(u.get_results(domain, as_eval_data=True),
pi.EvalData)
# if data has to be extrapolated, just repeat the last values
res = u.get_results(bigger_domain)
self.assertEqual(res[0], res[1])
self.assertEqual(res[-2], res[-1])
# nan values in the data storage should be ignored
res = u.get_results(bigger_domain, result_key="extra_data")
# storage contains values
self.assertTrue(u._time_storage)
self.assertTrue(u._value_storage)
# clear it
u.clear_cache()
# storage should be empty
self.assertFalse(u._time_storage)
self.assertFalse(u._value_storage)
# double clearing should work
u.clear_cache()
class CanonicalFormTest(unittest.TestCase):
def setUp(self):
self.cf = sim.CanonicalForm()
self.u = SimpleInput()
def test_add_to(self):
a = np.eye(5)
self.cf.add_to(dict(name="E", order=0, exponent=1), a)
self.assertTrue(np.array_equal(self.cf.matrices["E"][0][1], a))
self.cf.add_to(dict(name="E", order=0, exponent=1), 5 * a)
self.assertTrue(np.array_equal(self.cf.matrices["E"][0][1], 6 * a))
b = np.eye(10)
self.assertRaises(ValueError,
self.cf.add_to,
dict(name="E", order=0, exponent=1), b)
self.cf.add_to(dict(name="E", order=2, exponent=1), b)
self.assertTrue(np.array_equal(self.cf.matrices["E"][2][1], b))
f = np.atleast_2d(np.array(range(5))).T
self.assertRaises(ValueError,
self.cf.add_to,
dict(name="E", order=0, exponent=1), f)
self.cf.add_to(dict(name="f"), f)
self.assertTrue(np.array_equal(self.cf.matrices["f"], f))
# try to add something with derivative or exponent to f: value should
# end up in f
self.cf.add_to(dict(name="f"), f)
self.assertTrue(np.array_equal(self.cf.matrices["f"], 2 * f))
c = np.atleast_2d(np.array(range(5))).T
# that one should be easy
self.cf.add_to(dict(name="G", order=0, exponent=1), c, column=0)
self.assertTrue(np.array_equal(self.cf.matrices["G"][0][1], c))
# here G01 as to be expanded
self.cf.add_to(dict(name="G", order=0, exponent=1), c, column=1)
self.assertTrue(np.array_equal(self.cf.matrices["G"][0][1],
np.hstack((c, c))))
# here G01 as to be expanded again
self.cf.add_to(dict(name="G", order=0, exponent=1), c, column=3)
self.assertTrue(np.array_equal(self.cf.matrices["G"][0][1],
np.hstack((c, c, np.zeros_like(c), c))))
# input derivatives can occur
self.cf.add_to(dict(name="G", order=1, exponent=1), c, column=0)
self.assertTrue(np.array_equal(self.cf.matrices["G"][1][1], c))
# expansion should still work
self.cf.add_to(dict(name="G", order=1, exponent=1), c, column=1)
self.assertTrue(np.array_equal(self.cf.matrices["G"][1][1],
np.hstack((c, c))))
class ParseTest(unittest.TestCase):
def setUp(self):
# scalars
self.scalars = pi.Scalars(np.vstack(list(range(3))))
# callbacks
self.u = pi.ConstantTrajectory(7)
u1 = CorrectInput(output=1)
u2 = CorrectInput(output=2)
self.u_vec = pi.SimulationInputVector([u1, u2])
self.u_dt = CorrectInput(output=1, der_order=1)
u1_dt = CorrectInput(output=1, der_order=1)
u2_dt = CorrectInput(output=2, der_order=1)
self.u_vec_dt = pi.SimulationInputVector([u1_dt, u2_dt])
# inputs
self.input = pi.Input(self.u)
self.vec_input_1 = pi.Input(self.u_vec, index=0)
self.vec_input_2 = pi.Input(self.u_vec, index=1)
self.input_dt = pi.Input(self.u_dt, order=1)
self.vec_input_dt_1 = pi.Input(self.u_vec_dt, index=0, order=1)
self.vec_input_dt_2 = pi.Input(self.u_vec_dt, index=1, order=1)
# scale function
def heavyside(z):
if z < 0.5:
return 0
elif z == 0.5:
return .5
else:
return 1
base = pi.Base(pi.Function(heavyside))
pi.register_base("heavyside_base", base)
# distributed base
nodes = pi.Domain((0, 1), num=3)
self.distributed_base = pi.LagrangeFirstOrder.cure_interval(nodes)
pi.register_base("distributed_base", self.distributed_base)
fractions = [pi.ComposedFunctionVector(f, s) for f, s in
zip(self.distributed_base, nodes)]
self.composed_base = pi.Base(fractions)
pi.register_base("composed_base", self.composed_base)
# lumped base
self.lumped_base = pi.Base([pi.ConstantFunction(1)])
pi.register_base("lumped_base", self.lumped_base)
# Test Functions
self.test_funcs = pi.TestFunction("distributed_base")
self.test_funcs_at0 = self.test_funcs(0)
self.test_funcs_at1 = self.test_funcs(1)
self.test_funcs_dz = self.test_funcs.derive(1)
self.test_funcs_dz_at1 = self.test_funcs_dz(1)
self.comp_test_funcs = pi.TestFunction("composed_base")
self.comp_test_funcs_at0 = self.comp_test_funcs(0)
self.comp_test_funcs_at1 = self.comp_test_funcs(1)
self.comp_test_funcs_dz = self.comp_test_funcs.derive(1)
self.comp_test_funcs_dz_at1 = self.comp_test_funcs_dz(1)
# Scalar Functions
self.scalar_func = pi.ScalarFunction("heavyside_base")
# Distributed / Field Variables
self.field_var = pi.FieldVariable("distributed_base")
self.field_var_at1 = self.field_var(1)
self.field_var_dz = self.field_var.derive(spat_order=1)
self.field_var_dz_at1 = self.field_var_dz(1)
self.field_var_ddt = self.field_var.derive(temp_order=2)
self.field_var_ddt_at0 = self.field_var_ddt(0)
self.field_var_ddt_at1 = self.field_var_ddt(1)
self.comp_field_var = pi.FieldVariable("composed_base")
self.comp_field_var_at1 = self.comp_field_var(1)
self.comp_field_var_dz = self.comp_field_var.derive(spat_order=1)
self.odd_weight_field_var = pi.FieldVariable(
"distributed_base", weight_label="special_weights")
# Field variable 2
self.lumped_var = pi.FieldVariable("lumped_base")
# ---------------------------------------------------------------------
# Construction of Equation Terms
# ---------------------------------------------------------------------
# inputs
self.input_term1 = pi.ScalarTerm(pi.Product(self.test_funcs_at1,
self.input))
self.input_term1_swapped = pi.ScalarTerm(pi.Product(self.input,
self.test_funcs_at1)
)
self.input_term2 = pi.ScalarTerm(pi.Product(self.test_funcs_dz_at1,
self.input))
self.input_term3 = pi.IntegralTerm(pi.Product(self.test_funcs,
self.input),
limits=(0, 1))
self.input_term3_swapped = pi.IntegralTerm(pi.Product(self.input,
self.test_funcs),
limits=(0, 1))
self.input_term3_scaled = pi.IntegralTerm(
pi.Product(pi.Product(self.scalar_func, self.test_funcs),
self.input),
limits=(0, 1))
self.input_term3_scaled_first_half = pi.IntegralTerm(
pi.Product(pi.Product(self.scalar_func, self.test_funcs),
self.input),
limits=(0, .5))
self.input_term3_scaled_second_half = pi.IntegralTerm(
pi.Product(pi.Product(self.scalar_func, self.test_funcs),
self.input),
limits=(.5, 1))
self.input_term_dt = pi.IntegralTerm(pi.Product(self.test_funcs,
self.input_dt),
limits=(0, 1))
self.input_term_vectorial1 = pi.ScalarTerm(
pi.Product(self.test_funcs_at0, self.vec_input_1))
self.input_term_vectorial2 = pi.ScalarTerm(
pi.Product(self.test_funcs_at1, self.vec_input_2))
self.input_term_vectorial_dt1 = pi.ScalarTerm(
pi.Product(self.test_funcs_at0, self.vec_input_dt_1))
self.input_term_vectorial_dt2 = pi.ScalarTerm(
pi.Product(self.test_funcs_at1, self.vec_input_dt_2))
# pure test function terms
self.func_term = pi.ScalarTerm(self.test_funcs_at1)
self.func_term_int = pi.IntegralTerm(pi.Product(self.test_funcs,
self.test_funcs),
limits=(0, 1))
self.comp_func_term = pi.ScalarTerm(self.comp_test_funcs_at1)
self.comp_func_term_int = pi.IntegralTerm(
pi.Product(self.comp_test_funcs, self.comp_test_funcs),
limits=(0, 1))
# pure field variable terms
self.field_term_at1 = pi.ScalarTerm(self.field_var_at1)
self.field_term_dz_at1 = pi.ScalarTerm(self.field_var_dz_at1)
self.field_term_ddt_at1 = pi.ScalarTerm(self.field_var_ddt_at1)
self.field_int = pi.IntegralTerm(self.field_var, limits=(0, 1))
self.field_int_half = pi.IntegralTerm(self.field_var, limits=(0, .5))
self.field_dz_int = pi.IntegralTerm(self.field_var_dz, (0, 1))
self.field_ddt_int = pi.IntegralTerm(self.field_var_ddt, (0, 1))
self.comp_field_term_at1 = pi.ScalarTerm(self.comp_field_var_at1)
self.comp_field_int = pi.IntegralTerm(self.comp_field_var,
limits=(0, 1))
self.comp_field_dz_int = pi.IntegralTerm(self.comp_field_var,
limits=(0, 1))
# products
self.prod_term_fs_at1 = pi.ScalarTerm(
pi.Product(self.field_var_at1, self.scalars))
self.prod_int_fs = pi.IntegralTerm(pi.Product(self.field_var,
self.scalars),
(0, 1))
self.prod_int_f_f = pi.IntegralTerm(pi.Product(self.field_var,
self.test_funcs),
(0, 1))
self.prod_int_f_f_swapped = pi.IntegralTerm(pi.Product(self.test_funcs,
self.field_var),
(0, 1))
self.prod_int_f_at1_f = pi.IntegralTerm(
pi.Product(self.field_var_at1, self.test_funcs), (0, 1))
self.prod_int_f_f_at1 = pi.IntegralTerm(
pi.Product(self.field_var, self.test_funcs_at1), (0, 1))
self.prod_term_f_at1_f_at1 = pi.ScalarTerm(
pi.Product(self.field_var_at1, self.test_funcs_at1))
self.prod_int_fddt_f = pi.IntegralTerm(
pi.Product(self.field_var_ddt, self.test_funcs), (0, 1))
self.prod_term_fddt_at0_f_at0 = pi.ScalarTerm(
pi.Product(self.field_var_ddt_at0, self.test_funcs_at0))
self.prod_term_f_at1_dphi_at1 = pi.ScalarTerm(
pi.Product(self.field_var_at1, self.test_funcs_dz_at1))
self.temp_int = pi.IntegralTerm(pi.Product(self.field_var_ddt,
self.test_funcs),
limits=(0, 1))
self.spat_int = pi.IntegralTerm(pi.Product(self.field_var_dz,
self.test_funcs_dz),
limits=(0, 1))
self.spat_int_asymmetric = pi.IntegralTerm(pi.Product(self.field_var_dz,
self.test_funcs),
limits=(0, 1))
self.prod_term_tf_at0_lv_at0 = pi.ScalarTerm(
pi.Product(self.test_funcs(0), self.lumped_var(0)))
self.prod_term_tf_at0_lv_at0_swapped = pi.ScalarTerm(
pi.Product(self.lumped_var(0), self.test_funcs(0)))
self.prod_int_sf_fv = pi.IntegralTerm(pi.Product(self.scalar_func,
self.field_var),
limits=(0, 1))
self.prod_int_sf_fv_swapped = pi.IntegralTerm(
pi.Product(self.field_var, self.scalar_func),
limits=(0, 1))
self.alternating_weights_term = pi.IntegralTerm(
self.odd_weight_field_var,
limits=(0, 1))
def test_Input_term(self):
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term2, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms["G"][0][1],
np.array([[0], [-2], [2]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term3, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms["G"][0][1],
np.array([[.25], [.5], [.25]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term3_swapped, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms["G"][0][1],
np.array([[.25], [.5], [.25]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term3_scaled, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms["G"][0][1],
np.array([[.0], [.25], [.25]]))
terms_fh = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term3_scaled_first_half, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms_fh["G"][0][1],
np.array([[.0], [.0], [.0]]))
terms_sh = sim.parse_weak_formulation(
sim.WeakFormulation(self.input_term3_scaled_second_half, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms_sh["G"][0][1],
np.array([[.0], [.25], [.25]]))
# vectorial inputs
terms = sim.parse_weak_formulation(sim.WeakFormulation(
[self.input_term_vectorial1, self.input_term_vectorial2],
name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][0][1]))
np.testing.assert_array_almost_equal(terms["G"][0][1],
np.array([[1, 0],
[0, 0],
[0, 1]]))
# time derivatives of inputs
terms = sim.parse_weak_formulation(sim.WeakFormulation(
self.input_term_dt,
name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][1][1]))
np.testing.assert_array_almost_equal(terms["G"][1][1],
np.array([[.25], [.5], [.25]]))
# time derivative of vectorial inputs
terms = sim.parse_weak_formulation(sim.WeakFormulation(
[self.input_term_vectorial_dt1, self.input_term_vectorial_dt2],
name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["G"][1][1]))
np.testing.assert_array_almost_equal(terms["G"][1][1],
np.array([[1, 0],
[0, 0],
[0, 1]]))
def test_TestFunction_term(self):
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.func_term, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["f"]))
np.testing.assert_array_almost_equal(terms["f"],
np.array([[0], [0], [1]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.func_term_int, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["f"]))
np.testing.assert_array_almost_equal(terms["f"],
np.array([[1 / 6],
[1 / 3],
[1 / 6]]))
if 0:
# composed
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.comp_func_term, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["f"]))
np.testing.assert_array_almost_equal(terms["f"],
np.array([[0, 0],
[0, .5],
[1, 1]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.comp_func_term_int, name="test"),
finalize=False).get_static_terms()
self.assertFalse(np.iscomplexobj(terms["f"]))
np.testing.assert_array_almost_equal(terms["f"],
np.array([[1 / 6 + 0],
[1 / 3 + .25],
[1 / 6 + 1]]))
def test_FieldVariable_term(self):
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.field_term_at1, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms["E"][0][1],
np.array([[0, 0, 1]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.field_term_ddt_at1, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][2][1]))
np.testing.assert_array_almost_equal(terms["E"][2][1],
np.array([[0, 0, 1]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.field_term_dz_at1, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms["E"][0][1],
np.array([[0, -2, 2]]))
terms = sim.parse_weak_formulation(
sim.WeakFormulation(self.field_int, name="test"),
finalize=False).get_dynamic_terms()["distributed_base"]
self.assertFalse(np.iscomplexobj(terms["E"][0][1]))
np.testing.assert_array_almost_equal(terms["E"][0][1],
np.array([[.25, .5, .25]]))
terms = | |
<reponame>ucd-cws/amaptor
import os
import logging
log = logging.getLogger("amaptor.layer")
import arcpy
from amaptor.version_check import PRO, ARCMAP, mapping, mp
from amaptor.errors import NotSupportedError, EmptyFieldError, LayerNotFoundError
from amaptor.functions import get_workspace_type, get_workspace_factory_of_dataset
from amaptor.constants import _BLANK_FEATURE_LAYER, _BLANK_RASTER_LAYER
class Layer(object):
"""
This object corresponds to arcpy Layers - it theoretically supports the full range of API calls for Layer objects
but with a major caveat that only some of that has been specifically written for amaptor. The remaining calls
get passed straight through to the underlying Layer object, and this behavior is subjec to change as more of the
object is officially supported. When using amaptor Layers (and the rest of amaptor) take note of the version
you are using so that if the API changes (it will), you can continue to run your code. We'll try to make sensible
evolutions that help with things and harm as little prior code as possible.
This object is new and not as well-tested. Existing amaptor functions should now return amaptor.Layer objects,
but the ability to work with either amaptor layers or ArcGIS native layers is preserved in many cases throughout
code, both for backwards compatibility and for future convenience, where you might want to
"""
def __init__(self, layer_object_or_file, name=None, map_object=None, template_layer=None):
"""
Create a Layer object by providing an ArcGIS layer instance, an ArcGIS layer file, or a data source.
:param layer_object_or_file: an actual instance of a layer object, a valid data source (feature class, raster, etc) or a layer file path (layer file paths work best in Pro, which supports multiple layers in a single file - for cross platform usage, open the layer file and get the Layer object you need, then make an amaptor layer with that)
:param name: used when loading from a file in Pro to select the layer of interest
:param map_object: the map this layer belongs to - optional but used when updating symbology in ArcMap - not necessary if you plan to use map.add_layer or map.insert_layer before updating symbology
:param template_layer: This is used in Pro when constructing a layer from a data source - it will start automatically
with this layer's properties, symbology, etc. In future versions, we hope to have it autodetect the most appropriate
template layer that comes with amaptor, but for now, this is an option so that you can get the right properties
immediately.
"""
self.init = False # we'll set to True when done with init - provides a flag when creating a new layer from scratch in Pro, that we're loading a blank layer
self.layer_object = None
if PRO and isinstance(layer_object_or_file, arcpy._mp.Layer):
self.layer_object = layer_object_or_file
elif ARCMAP and isinstance(layer_object_or_file, mapping.Layer):
self.layer_object = layer_object_or_file
elif PRO: # otherwise, assume it's a path and run the import for each.
if layer_object_or_file.endswith(".lyr") or layer_object_or_file.endswith(".lyrx"):
layer_file = mp.LayerFile(layer_object_or_file)
for layer in layer_file.listLayers(): # gets the specified layer from the layer file OR the last one
self.layer_object = layer
if name and layer.name == name:
break
else: # handle the case of providing a data source of some sort - TODO: Needs to do more checking and raise appropriate exceptions (instead of raising ArcGIS' exceptions)
# In Pro this is complicated - we can't initialize Layers directly, so we'll use a template for the appropriate data type, then modify it with our information
desc = arcpy.Describe(layer_object_or_file)
if not template_layer:
if desc.dataType in ("FeatureClass", "ShapeFile"):
layer_file = _BLANK_FEATURE_LAYER
elif desc.dataType in ("RasterDataset", "RasterBand"):
layer_file = _BLANK_RASTER_LAYER
else:
raise NotSupportedError(
"This type of dataset isn't supported for initialization in amaptor via ArcGIS Pro")
else:
layer_file = template_layer
avail_layer = arcpy.mp.LayerFile(layer_file)
arcgis_template_layer = avail_layer.listLayers()[0]
if arcgis_template_layer is None:
raise LayerNotFoundError("No layer available for copying from layer file")
elif not arcgis_template_layer.supports("DATASOURCE"):
raise NotSupportedError("Provided layer file doesn't support accessing or setting the data source")
self.layer_object = arcgis_template_layer # set the layer object to the template
self._set_data_source(layer_object_or_file) # now set the data source to be the actual source data - self.data_source does the annoying magic behind this in Pro
self.name = desc.name # set the name to the dataset name, as would be typical - just a simple default
del desc
else:
self.layer_object = mapping.Layer(layer_object_or_file)
self.map = map_object
@property
def name(self):
return self.layer_object.name
@name.setter
def name(self, value):
self.layer_object.name = value
@property
def data_source(self):
if not self.layer_object.supports("DATASOURCE"):
raise NotSupportedError("Provided layer doesn't support accessing or setting the data source")
return self.layer_object.dataSource
@data_source.setter
def data_source(self, new_source):
self._set_data_source(new_source)
def _set_data_source(self, new_source):
if not self.layer_object.supports("DATASOURCE"):
raise NotSupportedError("Provided layer file doesn't support accessing or setting the data source")
desc = arcpy.Describe(new_source)
if desc.extension and desc.extension != "": # get the name with extension for replacing the data source
name = "{}.{}".format(desc.baseName, desc.extension)
else:
name = desc.baseName
if PRO:
old_connection_properties = self.layer_object.connectionProperties
new_factory_type = get_workspace_factory_of_dataset(new_source)
self.layer_object.updateConnectionProperties(
old_connection_properties,
{
'dataset': desc.name,
'connection_info': {'database': desc.path},
'workspace_factory': new_factory_type
}
)
else:
self.layer_object.replaceDataSource(desc.path, get_workspace_type(new_source), name)
@property
def symbology(self):
"""
Access symbology properties. If running from ArcMap and layer symbologyType is "OTHER" raises NotSupportedError
to flag that the symbology is unreadable and unreturnable. No equivalent check in Pro.
:return:
"""
if ARCMAP and self.layer_object.symbologyType == "OTHER":
raise NotSupportedError("Unsupported symbology type in ArcMap")
return self.layer_object.symbology
@symbology.setter
def symbology(self, symbology):
"""
Updates layer symbology based on copying from:
1) amaptor Layer object
2) arcpy.mapping/arcpy.mp Layer objects
3) A path to a layer file - if you pass in a string, it will be assumed to be a path to a layer file and symbology will be loaded from the file
4) symbology object. Symbology objects only exist in Pro, so take care when using them for cross-platform support.
raises NotSupportedError if the provided object cannot be copied from. If you wish to copy symbology from a Layer
file, open it and retrieve the appropriate Layer object and pass it here.
In ArcMap, it *may* require that the current layer and the symbology object (of whatever form) share the same
type of renderer (for example, on a raster, that they both use a classified renderer or both use a stretched
renderer, etc).
IMPORTANT: If you are setting symbology using this method in ArcMap, you MUST attach an amaptor.Map instance
representing the Data Frame that this layer is within as this_layer.map *before* doing any symbology operations.
amaptor functions handle this by default when finding layers and inserting them, but if you are creating
your own amaptor layer objects and haven't yet inserted it into a map, you'll need to set the `map` attribute
```
my_layer = amaptor.Layer("my_layer_name", template_layer="some_layer_file.lyr")
my_layer.map = my_map # set the map attribute so it knows what data frame to use. Should be an amaptor.Map object, not an actual data frame.
my_layer.symbology = symbol_layer # copies symbology from symbol_layer to my_layer
```
The step `my_layer.map` isn't necessary in the instance that you use map.add_layer or map.insert_layer before updating symbology
:param symbology: Symbology can be a symbology object or a layer to copy it from, or a path to a layer file on disk
:return:
"""
if PRO:
if isinstance(symbology, arcpy._mp.Symbology):
new_symbology = symbology
elif isinstance(symbology, arcpy._mp.Layer) or isinstance(symbology, Layer): # if it's an amaptor layer, and we're Pro, copy it from there
new_symbology = symbology.symbology
elif type(symbology) == str:
if not os.path.exists(symbology):
raise RuntimeError("Provided symbology was a string, but is not a valid file path. Please provide a valid file path, layer object, or symbology object")
new_symbology = arcpy.mp.LayerFile(symbology).symbology
else:
raise NotSupportedError("Cannot retrieve symbology from the object provided. Accepted types are amaptor.Layer, arcpy.mp.Symbology, and arcpy.mp.Layer. You provided {}".format(type(symbology)))
self.layer_object.symbology = new_symbology
#self.layer_object.symbology.updateRenderer(new_symbology.renderer.type) # only used in 2.0+
#self.layer_object.symbology.updateColorizer(new_symbology.colorizer.type)
else: # if ArcMap, we need to do some workaround
from amaptor.classes.map import Map # if we put this at the top, we get a circular import - need it to run at runtime for checking - this should be refactored, but not immediately sure how since these classes are mostly appropriately isolated, but bidrectionally reference each other
if self.map is None or not isinstance(self.map, Map):
raise EmptyFieldError("map", "Layer is not attached to an amaptor.Map instance - cannot change symbology. See documentation.")
if isinstance(symbology, Layer):
source_data = symbology.layer_object
elif isinstance(symbology, arcpy.mapping.Layer):
source_data = symbology
elif type(symbology) in (str, unicode):
if not os.path.exists(symbology):
raise RuntimeError("Provided symbology was a string, but is not a valid file path. Please provide a valid file path or layer object")
source_data = arcpy.mapping.Layer(symbology)
else:
raise NotSupportedError("Cannot retrieve symbology from the object provided. Accepted types are amaptor.Layer and arcpy.mapping.Layer. You provided {}".format(type(symbology)))
if self.layer_object.symbologyType != source_data.symbologyType:
log.warning("Trying to apply symbology with a renderer of type {} to a layer with renderer of type {} - this"
"may fail in ArcMap".format(source_data.symbologyType, self.layer_object.symbologyType))
arcpy.mapping.UpdateLayer(data_frame=self.map.map_object,
update_layer=self.layer_object,
source_layer=source_data,
symbology_only=True)
def __getter__(self, key):
"""
Helps this to be a standin where layers were used before because it will behave as expected for attributes
of arcpy layers.
:return:
"""
return getattr(self.layer_object, key)
def __setter__(self, key, value):
"""
Helps this to be a standin where layers were used | |
import re
from typing import Dict, Union, Iterable, Any
from ._BaseClasses import DOMNode
POSSIBLE_TAG_CHILD = Union[str, int, float, DOMNode]
def maketag(name: str) -> type:
"""
Creates a new class for a tag with the specified name.
The class can be used like those associated with standard HTML tags :
"""
tags: Dict[str, type]
"""
Dictionary mapping tag names to the matching class.
If new classes are added by function maketag(), they are also added to this dictionary.
"""
def attribute_mapper(attr: str) -> str:
"""
For all the classes defined in the module, this function is called to transform the keyword arguments into HTML tag attributes.
By default, the function replaces underscores (_) by hyphens (-).
"""
return re.sub("^v_(.*)_(.*)$", r"v-\1:\2", attr)
# html element classes
class B(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <b> tag """
class COLGROUP(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <colgroup> tag """
class DEL(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <del> tag """
class DT(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <dt> tag """
class INS(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <ins> tag """
class PICTURE(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <picture> tag """
class VIDEO(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <video> tag """
class ABBR(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <abbr> tag """
class TH(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <th> tag """
class BUTTON(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <button> tag """
class SCRIPT(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <script> tag """
class HEAD(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <head> tag """
class SECTION(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <section> tag """
class VAR(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <var> tag """
class APPLET(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <applet> tag """
class TABLE(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <table> tag """
class KEYGEN(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <keygen> tag """
class RUBY(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <ruby> tag """
class OBJECT(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <object> tag """
class IMG(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <img> tag """
class DIV(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <div> tag """
class ISINDEX(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <isindex> tag """
class TBODY(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <tbody> tag """
class MENU(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <menu> tag """
class DFN(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <dfn> tag """
class FIELDSET(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <fieldset> tag """
class LABEL(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <label> tag """
class COL(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <col> tag """
class TEXTAREA(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <textarea> tag """
class CANVAS(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <canvas> tag """
class FONT(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <font> tag """
class ACRONYM(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <acronym> tag """
class BDI(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <bdi> tag """
class AREA(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <area> tag """
class INPUT(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <input> tag """
class DATALIST(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <datalist> tag """
class CITE(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <cite> tag """
class ASIDE(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <aside> tag """
class U(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <u> tag """
class OL(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <ol> tag """
class CENTER(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <center> tag """
class SUB(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <sub> tag """
class LEGEND(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <legend> tag """
class NOFRAMES(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <noframes> tag """
class Q(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <q> tag """
class BASE(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <base> tag """
class H3(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <h3> tag """
class BR(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <br> tag """
class RP(DOMNode):
# noinspection PyDefaultArgument
def __init__(
self,
content: Union[ POSSIBLE_TAG_CHILD, Iterable[POSSIBLE_TAG_CHILD] ],
attributes: Dict[str, Any] = dict(),
style: Dict[str, Any] = dict(),
Class: str = ''
):
""" Represents the <rp> tag """
class | |
bias or input "
f"node ({src_node.id}->{dest_node.id}). Nodes of this type "
f"don't process input.")
weight = (np.random.uniform(*self.config.new_weight_interval)
if weight is None else weight)
connection = ne.neat.ConnectionGene(cid=cid,
from_node=src_node,
to_node=dest_node,
weight=weight)
connection.enabled = enabled
self.connections.append(connection)
src_node.out_connections.append(connection)
dest_node.in_connections.append(connection)
if src_node.id not in self._existing_connections_dict:
self._existing_connections_dict[src_node.id] = {}
self._existing_connections_dict[src_node.id][dest_node.id] = connection
def add_random_connection(self,
id_handler: "ne.neat.id_handler.IdHandler",
) -> Optional[Tuple["ne.neat.genes.NodeGene", "ne.neat.genes.NodeGene"]]:
""" Adds a new connection between two random nodes in the genome.
This is an implementation of the `add connection mutation`, described in
the original NEAT paper :cite:`stanley:ec02`.
Args:
id_handler (IdHandler): ID handler that will be used to assign an ID
to the new connection. The handler's internal cache of existing
connections will be updated accordingly.
Returns:
A tuple containing the source node and the destination node of the
connection, if a new connection was successfully created. `None`, if
there is no space in the genome for a new connection.
"""
all_src_nodes = self.nodes()
np.random.shuffle(all_src_nodes)
all_dest_nodes = [n for n in all_src_nodes
if (n.type != ne.neat.NodeGene.Type.BIAS
and n.type != ne.neat.NodeGene.Type.INPUT)]
np.random.shuffle(all_dest_nodes)
for src_node in all_src_nodes:
for dest_node in all_dest_nodes:
if src_node != dest_node or self.config.allow_self_connections:
if not self.connection_exists(src_node.id, dest_node.id):
cid = id_handler.next_connection_id(src_node.id,
dest_node.id)
self.add_connection(cid, src_node, dest_node)
return src_node, dest_node
return None
def enable_random_connection(self) -> None:
""" Randomly activates a disabled connection gene. """
disabled = [c for c in self.connections if not c.enabled]
if len(disabled) > 0:
connection = np.random.choice(disabled)
connection.enabled = True
def add_random_hidden_node(self,
id_handler: "ne.neat.id_handler.IdHandler",
) -> Optional["ne.neat.genes.NodeGene"]:
""" Adds a new hidden node to the genome in a random position.
This method implements the `add node mutation` procedure described in
the original NEAT paper:
"An existing connection is split and the new node placed where the old
connection used to be. The old connection is disabled and two new
connections are added to the genome. The new connection leading into the
new node receives a weight of 1, and the new connection leading out
receives the same weight as the old connection." - :cite:`stanley:ec02`
Only currently enabled connections are considered eligible to "host" the
new hidden node.
Args:
id_handler (IdHandler): ID handler that will be used to assign an ID
to the new hidden node. The handler's internal cache of existing
nodes and connections will be updated accordingly.
Returns:
The new hidden node, if it was successfully created. `None` if it
wasn't possible to find a connection to "host" the new node. This
usually happens when the ID handler hasn't been reset in a while.
"""
eligible_connections = [c for c in self.connections if c.enabled]
if not eligible_connections:
return None
np.random.shuffle(eligible_connections)
for original_connection in eligible_connections:
src_node = original_connection.from_node
dest_node = original_connection.to_node
hid = id_handler.next_hidden_node_id(src_node.id, dest_node.id)
if (self.connection_exists(src_node.id, hid)
or self.connection_exists(hid, dest_node.id)):
# might happen if the id handler cache hasn't been reset yet
continue
original_connection.enabled = False
new_node = ne.neat.NodeGene(
node_id=hid,
node_type=ne.neat.NodeGene.Type.HIDDEN,
activation_func=self._hidden_activation,
initial_activation=self.config.initial_node_activation
)
self.hidden_nodes.append(new_node)
# adding connections
cid = id_handler.next_connection_id(src_node.id, new_node.id)
self.add_connection(cid,
src_node, new_node,
weight=1)
cid = id_handler.next_connection_id(new_node.id, dest_node.id)
self.add_connection(cid,
new_node, dest_node,
weight=original_connection.weight)
return new_node
return None
def mutate_weights(self) -> None:
""" Randomly mutates the weights of the genome's connections.
Each connection gene in the genome has a chance to be perturbed, reset
or to remain unchanged.
"""
for connection in self.connections:
if ne.utils.chance(self.config.weight_reset_chance):
# perturbating the connection
connection.weight = np.random.uniform(
*self.config.new_weight_interval)
else:
# resetting the connection
p = np.random.uniform(low=-self.config.weight_perturbation_pc,
high=self.config.weight_perturbation_pc)
d = connection.weight * p
connection.weight += d
def simple_copy(self) -> "NeatGenome":
""" Makes a simple copy of the genome.
Wraps a call to this class' constructor.
Returns:
A copy of the genome without any of its connections (including the
ones between input and output nodes) and hidden nodes.
"""
return NeatGenome(num_inputs=len(self.input_nodes),
num_outputs=len(self.output_nodes),
config=self.config,
initial_connections=False)
def __copy_aux(self, random_weights: bool) -> "NeatGenome":
""" Auxiliary function used for deep copying the genome, with or without
random weights.
"""
new_genome = self.simple_copy()
copied_nodes = {n.id: n for n in new_genome.nodes()}
# creating required nodes
for node in self.hidden_nodes:
new_node = node.simple_copy()
copied_nodes[node.id] = new_node
new_genome.hidden_nodes.append(new_node)
# adding connections
for c in self.connections:
try:
new_genome.add_connection(
cid=c.id,
src_node=copied_nodes[c.from_node.id],
dest_node=copied_nodes[c.to_node.id],
enabled=c.enabled,
weight=c.weight if not random_weights else None)
except ConnectionExistsError as e:
cons = [f"[{con.id}] {con.from_node.id}->{con.to_node.id} "
f"({con.enabled})" for con in self.connections]
raise ConnectionExistsError(
"Connection exists error when duplicating parent.\n"
f"Source node's connections: {cons}") from e
return new_genome
def random_copy(self) -> "NeatGenome":
""" Makes a deep copy of the genome, but with random weights.
Returns:
A deep copy of the genome with the same topology of the original
genome, but random connections weights.
"""
return self.__copy_aux(random_weights=True)
def deep_copy(self) -> "NeatGenome":
""" Makes an exact/deep copy of the genome.
All the nodes and connections (including their weights) of the parent
genome are copied to the new genome.
Returns:
An exact/deep copy of the genome.
"""
return self.__copy_aux(random_weights=False)
def process_node(self, n: "ne.neat.genes.NodeGene") -> float:
""" Recursively processes the activation of the given node.
Unless it's a bias or input node (that have a fixed output), a node must
process the input it receives from other nodes in order to produce an
activation. This is done recursively: if `n` receives input from a node
`m` that haven't had its activation calculated yet, the activation of
`m` will be calculated recursively before the activation of `n` is
computed. Recurrences are solved by using the previous activation of the
"problematic" node.
Let :math:`w_i` be the weight of the :math:`i^{\\text{th}}` connection
that has `n` as destination node. Let :math:`a_i` be the current cached
output of the source node of :math:`c_i`. Let :math:`\\sigma` be the
activation function of `n`. The activation (output) `a` of `n` is
computed as follows:
:math:`a = \\sigma (\\sum \\limits_{i} w_i \\cdot a_i)`
Args:
n (NodeGene): The node to be processed.
Returns:
The activation value (output) of the node.
"""
# checking if the node needs to be activated
if (n.type != ne.neat.NodeGene.Type.INPUT
and n.type != ne.neat.NodeGene.Type.BIAS
and not self._activated_nodes[n.id]):
# activating the node
# the current node (n) is immediately marked as activated; this is
# needed due to recurrency: if, during the recursive calls, some
# node m depends on the activation of n, the old activation of n
# is used.
self._activated_nodes[n.id] = True
zsum = 0.0
for connection in n.in_connections:
if connection.enabled:
src_node, weight = connection.from_node, connection.weight
zsum += weight * self.process_node(src_node)
n.activate(zsum)
return n.activation
def process(self, x: Sequence[float]) -> np.ndarray:
""" Feeds the given input to the neural network.
In this implementation, there is no distinction between a genome and
the neural network it encodes. The genome will emulate a neural network
(its phenotype) in order to process the given input. The encoded network
is a Graph Neural Networks (GNN).
Note:
The processing is done recursively, starting from the output nodes
(top-down approach). Because of that, nodes not connected to at
least one of the network's output nodes won't be processed.
Args:
x (Sequence[float]): A sequence object (like a list or numpy array)
containing the inputs to be fed to the neural network input
nodes. It represents a single training sample. The value in the
index `i` of `X` will be fed to the :math:`i^{th}` input node
of the neural network.
Returns:
A numpy array containing the outputs of the network's output nodes.
The index `i` contains the activation value of the :math:`i^{th}`
output node of the network.
Raises:
InvalidInputError: If the number of elements in `X` doesn't match
the number of input nodes in the network.
"""
if len(x) != len(self.input_nodes):
raise ne.InvalidInputError(
"The input size must match the number of input nodes in the "
f"network! Expected input of length {len(self.input_nodes)} "
f"but got {len(x)}."
)
# preparing input nodes
for in_node, value in zip(self.input_nodes, x):
in_node.activate(value)
# resetting activated nodes dict
self._activated_nodes = {
n.id: False
for n in self.output_nodes + self.hidden_nodes
}
# processing nodes in a top-down manner (starts from the output nodes)
# nodes not connected to at least | |
<gh_stars>0
from __future__ import print_function
from cloudmesh.config.cm_config import cm_config_server
from cloudmesh_base.util import banner
import os
import sys
from cloudmesh_base.util import path_expand
from cloudmesh.shell.Shell import Shell
# BUG: replace with Shell
import sh
# need to get rid of fabric later
from fabric.api import task, local, settings, hide
import subprocess
import json
import hostlist
from cloudmesh_base.locations import config_file
from cloudmesh_base.ConfigDict import ConfigDict
from cloudmesh_base.logger import LOGGER
import time
from pprint import pprint
# ----------------------------------------------------------------------
# SETTING UP A LOGGER
# ----------------------------------------------------------------------
log = LOGGER(__file__)
def isyes(value):
check = str(value).lower()
if check in ['true', 'false', 'y', 'n', 'yes', 'no']:
return check in ['true', 'y', 'yes']
else:
print("parameter not in", ['true', 'false', 'y', 'n', 'yes', 'no'])
print("found", value, check)
sys.exit()
'''
class cloudmesh_server(object):
def __init__(self):
self.env = {
"type": "server",
}
def info(self):
pass
def status(self):
pass
def start(self):
"""starts a server"""
pass
def stop(self):
pass
def __str__(self):
self.info()
class mongo_server(cloudmesh_server):
def __init__(self):
self.env = {
"type": "server",
}
def info(self):
pass
def status(self):
pass
def start(self):
"""starts a server"""
pass
def stop(self):
pass
class rabbitmq_server(cloudmesh_server):
def __init__(self):
self.env = {
"type": "server",
}
def info(self):
pass
def status(self):
pass
def start(self):
"""starts a server"""
pass
def stop(self):
pass
class celery_server(cloudmesh_server):
def __init__(self):
self.env = {
"type": "server",
}
def info(self):
pass
def status(self):
pass
def start(self):
"""starts a server"""
pass
def stop(self):
pass
'''
class cloudmesh_server(object):
def _ps(self):
return sh.ps("-ax", _tty_out=False)
# def __init__(self):
# pass
'''
self.server_env = {
"name": "server"
}
self.rabbit_env = {
'rabbitmq_server': "sudo rabbitmq-server",
'rabbitmqctl': "sudo rabbitmqctl",
'detached': ""
}
try:
import cloudmesh
except Exception, e:
print ("ERROR: could not find package\n\n cloudmesh\n")
print ("please run first\n")
print (" ./install cloudmesh\n")
banner()
print (e)
banner()
# import cloudmesh_web
# self.server_env['location'] = os.path.dirname(cloudmesh_web.__file__)
self.server_env['location'] = "./cloudmesh_web"
celery_config = ConfigDict(
filename=config_file("/cloudmesh_celery.yaml"),
kind="worker")
self.workers = celery_config.get("cloudmesh.workers")
for worker in self.workers:
self.workers[worker]["hostlist"] = hostlist.expand_hostlist(
"{0}[1-{1}]".format(self.workers[worker]["id"],
self.workers[worker]["count"]))
print(json.dumps(self.workers, indent=4))
self.celery_cmd = sh.which("celery")
# print ("CCCC", self.celery_cmd)
# sys.exit()
'''
def info(self):
#
# getting the basic mongo info
#
banner("mongo info")
d = {}
d['mongo'] = self._info_mongo()
print("Mongo pid", d['mongo']['pid'])
print("Mongo port", d['mongo']['port'])
d['celery'] = self._info_celery()
pprint(d)
def start(self):
"""starts in dir webgui the program server.py and displays a browser on the
given port and link
"""
self._start_web_server()
self._start_mongo()
# banner("KILL THE SERVER", debug=debug)
# kill(debug=debug)
# mongo.start()
# execute_command("START MONGO",
# "fab mongo.start",
# debug)
# queue.start()
# execute_command("START RABITMQ",
# "fab queue.start", debug)
# queue.flower_server()
# execute_command("START FLOWER",
# "fab queue.flower_server",
# debug)
pass
def stop(self):
self._stop_web_server()
self._stop_mongo()
def status(self):
pass
# ######################################################################
# WEB SERVER
# ######################################################################
def _start_web_server(self):
# from cloudmesh_web import server as cloudmesh_web_server_start
banner("start the web server")
os.system("cd cloudmesh_web; python server.py &")
time.sleep(4)
def _stop_web_server(self):
# stop web server
banner("stop the web server")
try:
result = sh.fgrep(
sh.fgrep(self._ps(), "python {name}.py".format(**self.server_env)),
"-v", "fgrep"
).split("\n")[:-1]
print(result)
for line in result:
if line is not '':
pid = line.split(" ")[0]
print(line)
print("PID", pid)
print("KILL")
try:
sh.kill("-9", str(pid))
except Exception, e:
print("ERROR")
print(e)
except Exception, e:
print("INFO: cloudmesh web server not running")
# ######################################################################
# MONGO SERVER
# ######################################################################
def _info_mongo(self):
config = cm_config_server().get("cloudmesh.server.mongo")
path = path_expand(config["path"])
port = config["port"]
# print (config)
# print(port, path)
# d = {
# 'pid': None,
# 'port': None,
# 'path': None,
# 'command': None
# }
# try:
# lines = sh.grep(
# sh.grep(self._ps(), "mongod"), "log").split("\n")[:-1]
# if lines != ['']:
# (pid) = lines[0].lstrip().split(" ")[0]
# d = {'pid': pid,
# 'port': port,
# 'path': path,
# 'command': lines}
# except:
# pass
# return d
# need to get rid of fabric local later
with settings(warn_only=True):
with hide('output', 'running', 'warnings'):
lines = local(
"ps -ax |grep '[m]ongod.*port {0}'".format(port), capture=True) \
.split("\n")
if lines != ['']:
pid = lines[0].split(" ")[0]
d = {'pid': pid,
'port': port,
'path': path,
'command': lines}
else:
d = {'pid': "mongodb not active",
'port': None,
'path': None,
'command': None}
return d
def _start_mongo(self):
"""
start the mongodb service in the location as specified in
cloudmesh_server.yaml
"""
banner("Starting mongod")
config = cm_config_server().get("cloudmesh.server.mongo")
path = path_expand(config["path"])
port = config["port"]
# pprint(config)
# print(path)
# print(port)
banner("creating dir")
if not os.path.exists(path):
print("Creating mongodb directory in {0}".format(path))
sh.mkdir("-p", path)
banner("check")
# lines = str(sh.grep(sh.ps("-ax"), "mongod", "*port {0}".format(port)))
# need to get rid of fabric local later
with settings(warn_only=True):
with hide('output', 'running', 'warnings'):
lines = local(
"ps -ax |grep '[m]ongod.*port {0}'".format(port), capture=True) \
.split("\n")
print("search result:")
# print(type(lines))
print(lines)
if lines != ['']:
pid = lines[0].split(" ")[0]
print("NO ACTION: mongo already running in pid "
"{0} for port {1}".format(pid, port))
return
print("ACTION: Starting mongod")
print()
print("NOTE: the preparation of mongo may take a few minutes")
print(" please do not interrupt this program.")
print()
print(" Please be patient!")
print()
'''
Shell.mongod("--auth",
"--bind_ip", "127.0.0.1"
"--fork",
"--dbpath", path,
"--logpath", "{0}/mongodb.log".format(path),
"--port", port,
_bg=True)
'''
# need to get rid of fabric local later
local(
'mongod --auth --bind_ip 127.0.0.1 '
'--fork --dbpath {0} '
'--logpath {0}/mongodb.log '
'--port {1}'.format(path, port))
def _stop_mongo(self):
"""starts in dir webgui the program server.py and displays a
browser on the given port and link """
try:
sh.killall("-15", "mongod")
except:
print("INFO: cloudmesh mongo server not running")
# ######################################################################
# CELERY SERVER
# ######################################################################
def _info_celery(self):
d = {}
try:
lines = sh.grep(
sh.grep(self._ps(), "celery"), "worker").split("\n")[:-1]
for line in lines:
(pid, command) = line.lstrip().split(" ", 1)
d[pid] = line
except:
pass
return d
def _celery_command(self, command, app, workers, queue, concurrency=None):
"""execute the celery command on the application and workers
specified"""
worker_str = " ".join(workers)
parameter_string = "celery multi {0} {1} -A {2} -l info -Q {3}".format(
command, worker_str, app, queue)
# directories for log and pid file
celery_dir = path_expand("~/.cloudmesh/celery")
sh.mkdir("-p", celery_dir)
parameter_string += " --pidfile=\"{0}/%n.pid\" ".format(celery_dir)
parameter_string += " --logfile=\"{0}/%n.log\" ".format(celery_dir)
if concurrency is not None:
parameter_string += " --concurrency={0}".format(concurrency)
print(parameter_string)
proc = subprocess.Popen(parameter_string,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_value, stderr_value = proc.communicate()
# print (stdout_value)
# print (stderr_value)
def _start_celery(self):
# mq.start()
for worker in self.workers:
print(worker)
print(json.dumps(self.workers[worker]))
concurrency = None
if "concurrency" in self.workers[worker]:
concurrency = self.workers[worker]["concurrency"]
self._celery_command(
"start",
self.workers[worker]["app"],
self.workers[worker]["hostlist"],
self.workers[worker]["queue"],
concurrency=concurrency)
def _stop_celery(self):
processes = self._info_celery()
print(processes.keys())
for pid in processes:
try:
sh.kill("-9", str(pid))
except:
print(pid, " process already deleted")
# ######################################################################
# RABBITMQ SERVER
# ######################################################################
class rabbitmq_server(object):
def __init__(self):
self.rabbit_env = {
'rabbitmq_server': "sudo rabbitmq-server",
'rabbitmqctl': "sudo rabbitmqctl",
'detached': ""
}
self._set_rabbitmq_env()
def _set_rabbitmq_env(self):
location = path_expand("~/.cloudmesh/rabbitm")
if sys.platform == "darwin":
sh.mkdir("-p", location)
self.rabbit_env["RABBITMQ_MNESIA_BASE"] = location
self.rabbit_env["RABBITMQ_LOG_BASE"] = location
os.environ["RABBITMQ_MNESIA_BASE"] = location
os.environ["RABBITMQ_LOG_BASE"] = location
self.rabbit_env["rabbitmq_server"] = \
"/usr/local/opt/rabbitmq/sbin/rabbitmq-server"
self.rabbit_env["rabbitmqctl"] = \
"/usr/local/opt/rabbitmq/sbin/rabbitmqctl"
elif sys.platform == "linux2":
sh.mkdir("-p", location)
self.rabbit_env["RABBITMQ_MNESIA_BASE"] = location
self.rabbit_env["RABBITMQ_LOG_BASE"] = location
os.environ["RABBITMQ_MNESIA_BASE"] = location
os.environ["RABBITMQ_LOG_BASE"] = location
self.rabbit_env["rabbitmq_server"] = "/usr/sbin/rabbitmq-server"
self.rabbit_env["rabbitmqctl"] = "/usr/sbin/rabbitmqctl"
else:
print("WARNING: cloudmesh rabbitmq user install not supported, "
"using system install")
def info(self):
"""print the status of rabbitmq"""
s = os.popen("sudo {rabbitmqctl} status".format
(**self.rabbit_env)).read()
# s = Shell.sudo("{rabbitmqctl}".format(**self.rabbit_env), "status")
def list_queues(parameters):
"""list all queues available in rabbitmq"""
self.rabbit_env['parameters'] = parameters
r = os.popen("{rabbitmqctl} list_queues {parameters}"
.format(**self.rabbit_env)).read()
return r
l = ["name", "memory", "consumers", "messages",
"messages_ready", "messages_unacknowledged"]
r = list_queues(" ".join(l)).split("\n")[1].split("\t")
d = zip(l, r)
return s, d
def start(self, detached=None):
"""start the rabbit mq server"""
if detached is None:
self.rabbit_env['detached'] = "-detached"
os.popen("{rabbitmq_server} {detached}".format(**self.rabbit_env))
def stop(self):
"""stop the rabbit mq server"""
os.popen("{rabbitmqctl} stop".format(**self.rabbit_env))
'''
queue.start()
# execute_command("START RABITMQ",
# "fab queue.start", debug)
queue.flower_server()
# execute_command("START FLOWER",
# "fab queue.flower_server",
# debug)
def _queue_start(view=None):
"""start the celery server
:param: if view is set to any value start also rabit and attach
to it so we can see the log
"""
# pprint (fabric.state.output)
with settings(warn_only=True):
stop()
time.sleep(2)
mq.start()
time.sleep(2)
for worker in workers:
concurrency = None
if "concurrency" in workers[worker]:
concurrency = workers[worker]["concurrency"]
# print worker, ": ", str(workers[worker])
celery_command("start", workers[worker]["app"],
workers[worker]["hostlist"], workers[
worker]["queue"],
concurrency=concurrency)
if view is None:
time.sleep(2)
print
# local("celery worker --app={0} -l info".format(app))
# local("celery worker -l info".format(app))
def _mq_start(self):
set_rabbitmq_env()
if detached is None:
rabbit_env['detached'] = "-detached"
# log.info (rabbit_env)
local("{rabbitmq_server} {detached}".format(**rabbit_env))
def _mq_stop():
"""stop the rabbit mq server"""
local("{rabbitmqctl} stop".format(**rabbit_env))
def _set_rabbitmq_env():
location = path_expand("~/.cloudmesh/rabbitm")
| |
<gh_stars>0
import numpy as np
from Code.JsonChecker import JsonChecker
class EndResults:
"""
In this class, the intervals of growth of the bacteria are calculated using different formulas/equations that are
specifically called in the class.
There are a total of 4 different formulas/equations to calculate the growth of a bacterium.
Parameters
----------
bact_name: String
The name of a bacteria
temp_input: Float
The user input for the temperature
ph_input: Float
The user input of the PH
end_time: Float
The user input of end time
type_graph: int
The user choice for the equation / formulas
Attributes
----------
bact_name: String
Store the name of a bacteria
temp_input: Float
Store the user input for the temperature
ph_input: Float
Store the user input of the PH
end_time: Float
Store the user input of end time
type_graph: int
Store the user choice for the equation / formulas
"""
def __init__(self, bact_name: str, temp_input: float, ph_input: float, aw:float, end_time: float, type_graph: int):
super().__init__(bact_name, temp_input, ph_input, aw, end_time, type_graph)
self.bact_naam = bact_name
self.temp_input = temp_input
self.ph_input = ph_input
self.aw = aw
self.end_time = end_time
self.type_graph = type_graph
self.__new__()
def __new__(cls, bact_naam: str, temp_input: float, ph_input: float, aw:float, end_time: float, type_graph: int) -> list:
"""
Constructs based, got everything the same except that in the this method there is a return of the answer.
Based on the type equation that the user chooses, calls a function.
Raises
--------
ValueError
when the check of the ph and the temperature returns a None
Return
--------
list
A list of the intervals of growth of the bacteria, that would be used by the plot of the graph
"""
temp_check = JsonChecker(bact_naam, temp_input, ph_input, "temp", temp_input)
temp_check_terug = temp_check.values_check()
ph_check = JsonChecker(bact_naam, temp_input, ph_input, "ph", ph_input)
ph_check_terug = ph_check.values_check()
aw_check = JsonChecker(bact_naam, temp_input, ph_input, "aw", aw)
aw_check_terug = aw_check.values_check()
antwoord = 0
try:
if (temp_check_terug and ph_check_terug and aw_check_terug) is not None:
if type_graph == 1:
antwoord = cls.logistic(cls,bact_naam, end_time, ph_input, temp_input)
if type_graph == 2:
antwoord = cls.logstic_curve(cls, bact_naam, end_time, ph_input, temp_input)
if type_graph == 3:
antwoord = cls.log_growth(cls, bact_naam, end_time, ph_input, temp_input)
if type_graph == 4:
antwoord= cls.temp_growth_rate(cls, bact_naam, ph_input, end_time, temp_check_terug)
return antwoord
except ValueError as e:
print("incorrect type of value was entered", e)
def new_growth_rate(self, bact_name: str, pH: float, temperature: float) -> list:
"""
Here the growth factor is calculated at a certain temperature and ph value.
Using the CTPM equation :
μ max (T, pH) = CTPM(T, pH) = μ opt t(T)* p(pH)
t(T)= (T - T MAX)(T - T MIN)**2 / (T opt 2 T min) [(Topt - Tmin)(T - Topt) - (Topt - Tmax) (Topt + Tmin - 2T)]
p(pH) = (pH - pH min) (pH - pH max)/(pH - pH min)(pH - pH max) - (pH - pH opt) **2
Return
--------
Float
A float of the new growth rate
"""
temp_check = JsonChecker(bact_name, temperature, pH, "temp", temperature)
temp_waardes= temp_check.read_value_json()
ph_check = JsonChecker(bact_name, temperature, pH, "ph", pH)
pH_waardes = ph_check.read_value_json()
groeisFcator = JsonChecker(bact_name, temperature, pH, "gr", None)
groeisFcator_is = groeisFcator.read_value_json()
# max growth rate(T, pH) = CTPM(T, pH)= optimum growth rate t(T) p(pH)
# temerature t(T)
# de noemer stukje 1
tt = ((temp_waardes[1] - temp_waardes[0]) * (temperature - temp_waardes[1]) - (temp_waardes[1] - temp_waardes[2])
* (temp_waardes[1] + temp_waardes[0] - 2 * temperature))
# de noemer stukje 2
tt2 = ((temp_waardes[1] - temp_waardes[0]) * tt)
# de teller
tt3 = ((temperature - temp_waardes[2]) * (temperature - temp_waardes[0]) ** 2 / tt2)
# pH p(pH)
# de noemer
phh = ((pH - pH_waardes[0]) * (pH - pH_waardes[2]) - (pH - pH_waardes[1]) ** 2)
# de teller
phh2 = ((pH - pH_waardes[0]) * (pH - pH_waardes[2]) / phh)
# new groei factor
newgroeiFactor = groeisFcator_is[0] * tt3 * phh2
return newgroeiFactor
def log_growth(self, bact_name: str, time: float, pH: float, temperature: float) -> list:
"""
This function calculates the growth of the bactria on the basis of 4 phases:
Lag phase, logarithmic phase, the stationary phase and the death phase.
Using this equation: Ln N -Ln N0 = μ *(t-t0)
where:
μ stands for the growth rate per h^-1
N stands for the number of CFU / ml at time t
N0 stands for the initial number of CFU / ml at time t0
t stands for time
Parameters
----------
bact_naam: String
The name of a bacteria
time: Float
The user input of end time
pH: Float
The user input of the PH
temperature: Float
The user input for the temperature
ant_lijst: List
The list where the results of the algorithm are stored
Return
--------
List
A list of the intervals of growth of the bacteria, that would be used by the plot of the graph
"""
ant_lijst, lijstDeath = [], []
beperkendeFactor = JsonChecker(bact_name, temperature, pH, "br", None)
beperkendeFactor_is = beperkendeFactor.read_value_json()
lnN0_ = JsonChecker(bact_name, temperature, pH, "bw", None)
lnN0 = lnN0_.read_value_json()
ant_lijst.append(lnN0[0])
# de specfieke groeifactor uitrekenen
newgroeiFactor = EndResults.new_growth_rate(self, bact_name, pH, temperature)
for t in range(0, int(time)+1):
lnN = (newgroeiFactor * t) + lnN0[0]
if lnN < beperkendeFactor_is[0]:
ant_lijst.append(lnN)
else:
ant_lijst.append(beperkendeFactor_is[0])
lijstDeath.append(ant_lijst[-1])
if ant_lijst[-1] == beperkendeFactor_is[0]:
while lijstDeath[-1] >= lnN0[0]:
antwoord= lijstDeath[-1] - (newgroeiFactor*len(lijstDeath))
if antwoord >= lnN0[0]:
lijstDeath.append(antwoord)
else:
lijstDeath.append(lnN0[0])
break
for item in lijstDeath:
ant_lijst.append(item)
return ant_lijst
def logistic(self, bact_name: str, time: float, pH: float, temperature: float) -> list:
"""
This formula calculates the growth of the bacteria based on the logistics formula.
Here the growth is calculated until reaching the limiting factor.
Using the formula:
y(t) = limiting factor/ (1+ initial value* exp^(-growth rate *t ))
Return
--------
List
A list of the intervals of growth of the bacteria, that would be used by the plot of the graph
"""
ant_lijst = []
groeisFcator = EndResults.new_growth_rate(self, bact_name, pH, temperature)
beperkendeFactor = JsonChecker(bact_name, temperature, pH, "br", None)
beperkendeFactor_is = beperkendeFactor.read_value_json()
begingValue_is = JsonChecker(bact_name, None, None, "bw", None)
begingValue = begingValue_is.read_value_json()
for t in range(0, int(time)):
ant = (beperkendeFactor_is[0] / (1 + begingValue[0] * np.exp(-groeisFcator * t)))
if ant <= beperkendeFactor_is[0]:
ant_lijst.append(ant)
return ant_lijst
def logstic_curve(self, bact_name: str, time: float, pH: float, temperature: float)-> list:
"""
This formula use the logistic formula to calculate the growth until the limiting factor.
Then it would calculate the death phase of the bacteria.
Return
--------
List
A list of the intervals of growth of the bacteria, that would be used by the plot of the graph
"""
list = EndResults.logistic(self, bact_name, time, pH, temperature)
groeisFcator = EndResults.new_growth_rate(self, bact_name, pH, temperature)
beperkendeFactor = JsonChecker(bact_name, temperature, pH, "br", None)
beperkendeFactor_is = beperkendeFactor.read_value_json()
lijstDeath=[]
lijstDeath.append(list[-1])
while lijstDeath[-1] > list[0]:
# zolang de deathwaarde grooter of glijk is aan de beginwaarde van de groei is:
antwoord = lijstDeath[-1] - (groeisFcator * len(lijstDeath))
if beperkendeFactor_is[0] >= antwoord >= list[0]:
# als de antwoord niet gelijk of groter aan de beperkende factor is
lijstDeath.append(antwoord)
else:
lijstDeath.append(list[0])
break
for item in lijstDeath:
list.append(item)
return list
def temp_growth_rate(self, bact_name: str, pH: float, end_time: float, temp_check:list, ) -> list:
"""
This formula calculates the growth factor per temperature difference. the temperature rises one grade up every
hour, until the max temperature is reached.
The temperatures are shown on the x-axis and the growth factors on the y-axis.
Using the function : new_growth_rate
Return
------
list
A list with growth factors that were calculated in the algorithm
"""
beginRange, eindRange = 0, 0
list = []
tijd_lijst = []
if temp_check is not None:
if len(temp_check) == 3:
beginRange = temp_check[0]
eindRange = temp_check[2] + 1
elif len(temp_check) == 2:
beginRange = temp_check[0]
eindRange = temp_check[1] + 1
elif len(temp_check) == 1:
beginRange = temp_check[0]
eindRange = temp_check[0] + 1
begingValue_is = JsonChecker(bact_name, None, None, "bw", None)
begingValue = begingValue_is.read_value_json()
beperkingsFactor_is = JsonChecker(bact_name, None, None, "br", None)
beperkingsFactor = beperkingsFactor_is.read_value_json()
groei_lijst = []
for time in range(0, int(100)+1):
tijd_lijst.append(time)
for temp in range(int(beginRange), int(eindRange)+1):
if list:
if list[-1] <= beperkingsFactor[0]:
if tijd_lijst:
groeisFactor | |
Constraint(expr=m.x556*(10.749094 + m.x2039) - m.x2994 == 0)
m.c557 = Constraint(expr=m.x557*(6.95367819652136 + m.x2091) - m.x2995 == 0)
m.c558 = Constraint(expr=m.x558*(68.611061605179 + m.x2092) - m.x2996 == 0)
m.c559 = Constraint(expr=m.x559*(149.982358690318 + m.x2093) - m.x2997 == 0)
m.c560 = Constraint(expr=m.x560*(175.844560388705 + m.x2094) - m.x2998 == 0)
m.c561 = Constraint(expr=m.x561*(10.1522671595645 + m.x2095) - m.x2999 == 0)
m.c562 = Constraint(expr=m.x562*(121.104830353398 + m.x2096) - m.x3000 == 0)
m.c563 = Constraint(expr=m.x563*(8.00892516581441 + m.x2097) - m.x3001 == 0)
m.c564 = Constraint(expr=m.x564*(97.3173040663597 + m.x2098) - m.x3002 == 0)
m.c565 = Constraint(expr=m.x565*(58.8520674314227 + m.x2099) - m.x3003 == 0)
m.c566 = Constraint(expr=m.x566*(89.3791992560023 + m.x2100) - m.x3004 == 0)
m.c567 = Constraint(expr=m.x567*(177.636006160939 + m.x2101) - m.x3005 == 0)
m.c568 = Constraint(expr=m.x568*(373.780859160202 + m.x2102) - m.x3006 == 0)
m.c569 = Constraint(expr=m.x569*(158.856788 + m.x2107) - m.x3007 == 0)
m.c570 = Constraint(expr=m.x570*(85.0133724208126 + m.x1997) - m.x3008 == 0)
m.c571 = Constraint(expr=m.x571*(108.052970594387 + m.x1998) - m.x3009 == 0)
m.c572 = Constraint(expr=m.x572*(9.3711459385556 + m.x1999) - m.x3010 == 0)
m.c573 = Constraint(expr=m.x573*(10.69589 + m.x2000) - m.x3011 == 0)
m.c574 = Constraint(expr=m.x574*(17.932791400734 + m.x2001) - m.x3012 == 0)
m.c575 = Constraint(expr=m.x575*(88.805712423724 + m.x2002) - m.x3013 == 0)
m.c576 = Constraint(expr=m.x576*(67.92235 + m.x2003) - m.x3014 == 0)
m.c577 = Constraint(expr=m.x577*(75.509965 + m.x2005) - m.x3015 == 0)
m.c578 = Constraint(expr=m.x578*(68.860513 + m.x2006) - m.x3016 == 0)
m.c579 = Constraint(expr=m.x579*(29.0537838075122 + m.x2019) - m.x3017 == 0)
m.c580 = Constraint(expr=m.x580*(158.856788 + m.x2107) - m.x3018 == 0)
m.c581 = Constraint(expr=m.x581*(85.0133724208126 + m.x1997) - m.x3019 == 0)
m.c582 = Constraint(expr=m.x582*(108.052970594387 + m.x1998) - m.x3020 == 0)
m.c583 = Constraint(expr=m.x583*(9.3711459385556 + m.x1999) - m.x3021 == 0)
m.c584 = Constraint(expr=m.x584*(10.69589 + m.x2000) - m.x3022 == 0)
m.c585 = Constraint(expr=m.x585*(17.932791400734 + m.x2001) - m.x3023 == 0)
m.c586 = Constraint(expr=m.x586*(88.805712423724 + m.x2002) - m.x3024 == 0)
m.c587 = Constraint(expr=m.x587*(17.52572 + m.x2004) - m.x3025 == 0)
m.c588 = Constraint(expr=m.x588*(75.509965 + m.x2005) - m.x3026 == 0)
m.c589 = Constraint(expr=m.x589*(215.1945909789 + m.x2007) - m.x3027 == 0)
m.c590 = Constraint(expr=m.x590*(17.9818975244236 + m.x2008) - m.x3028 == 0)
m.c591 = Constraint(expr=m.x591*(82.3846155412095 + m.x2009) - m.x3029 == 0)
m.c592 = Constraint(expr=m.x592*(15.77529785051 + m.x2010) - m.x3030 == 0)
m.c593 = Constraint(expr=m.x593*(20.585074453376 + m.x2011) - m.x3031 == 0)
m.c594 = Constraint(expr=m.x594*(17.73824148824 + m.x2012) - m.x3032 == 0)
m.c595 = Constraint(expr=m.x595*(9.7831921864888 + m.x2013) - m.x3033 == 0)
m.c596 = Constraint(expr=m.x596*(58.3304919073372 + m.x2014) - m.x3034 == 0)
m.c597 = Constraint(expr=m.x597*(70.841638270004 + m.x2015) - m.x3035 == 0)
m.c598 = Constraint(expr=m.x598*(12.908328297966 + m.x2017) - m.x3036 == 0)
m.c599 = Constraint(expr=m.x599*(25.5807469993058 + m.x2018) - m.x3037 == 0)
m.c600 = Constraint(expr=m.x600*(29.0537838075122 + m.x2019) - m.x3038 == 0)
m.c601 = Constraint(expr=m.x601*(11.179067059 + m.x2020) - m.x3039 == 0)
m.c602 = Constraint(expr=m.x602*(16.47769975 + m.x2021) - m.x3040 == 0)
m.c603 = Constraint(expr=m.x603*(10.8297732214437 + m.x2022) - m.x3041 == 0)
m.c604 = Constraint(expr=m.x604*(29.39924999665 + m.x2023) - m.x3042 == 0)
m.c605 = Constraint(expr=m.x605*(9.34536262823 + m.x2024) - m.x3043 == 0)
m.c606 = Constraint(expr=m.x606*(17.3365643030813 + m.x2025) - m.x3044 == 0)
m.c607 = Constraint(expr=m.x607*(48.547749096 + m.x2026) - m.x3045 == 0)
m.c608 = Constraint(expr=m.x608*(149.23057111 + m.x2027) - m.x3046 == 0)
m.c609 = Constraint(expr=m.x609*(27.47191645805 + m.x2028) - m.x3047 == 0)
m.c610 = Constraint(expr=m.x610*(40.593786 + m.x2029) - m.x3048 == 0)
m.c611 = Constraint(expr=m.x611*(277.48319 + m.x2030) - m.x3049 == 0)
m.c612 = Constraint(expr=m.x612*(254.79773 + m.x2031) - m.x3050 == 0)
m.c613 = Constraint(expr=m.x613*(117.202966 + m.x2032) - m.x3051 == 0)
m.c614 = Constraint(expr=m.x614*(20.035404 + m.x2033) - m.x3052 == 0)
m.c615 = Constraint(expr=m.x615*(32.373595 + m.x2034) - m.x3053 == 0)
m.c616 = Constraint(expr=m.x616*(46.195028 + m.x2035) - m.x3054 == 0)
m.c617 = Constraint(expr=m.x617*(118.743516912 + m.x2036) - m.x3055 == 0)
m.c618 = Constraint(expr=m.x618*(10.749094 + m.x2039) - m.x3056 == 0)
m.c619 = Constraint(expr=m.x619*(6.95367819652136 + m.x2091) - m.x3057 == 0)
m.c620 = Constraint(expr=m.x620*(68.611061605179 + m.x2092) - m.x3058 == 0)
m.c621 = Constraint(expr=m.x621*(149.982358690318 + m.x2093) - m.x3059 == 0)
m.c622 = Constraint(expr=m.x622*(175.844560388705 + m.x2094) - m.x3060 == 0)
m.c623 = Constraint(expr=m.x623*(10.1522671595645 + m.x2095) - m.x3061 == 0)
m.c624 = Constraint(expr=m.x624*(121.104830353398 + m.x2096) - m.x3062 == 0)
m.c625 = Constraint(expr=m.x625*(8.00892516581441 + m.x2097) - m.x3063 == 0)
m.c626 = Constraint(expr=m.x626*(97.3173040663597 + m.x2098) - m.x3064 == 0)
m.c627 = Constraint(expr=m.x627*(58.8520674314227 + m.x2099) - m.x3065 == 0)
m.c628 = Constraint(expr=m.x628*(89.3791992560023 + m.x2100) - m.x3066 == 0)
m.c629 = Constraint(expr=m.x629*(177.636006160939 + m.x2101) - m.x3067 == 0)
m.c630 = Constraint(expr=m.x630*(373.780859160202 + m.x2102) - m.x3068 == 0)
m.c631 = Constraint(expr=m.x631*(158.856788 + m.x2107) - m.x3069 == 0)
m.c632 = Constraint(expr=m.x632*(67.92235 + m.x2003) - m.x3070 == 0)
m.c633 = Constraint(expr=m.x633*(215.1945909789 + m.x2007) - m.x3071 == 0)
m.c634 = Constraint(expr=m.x634*(82.3846155412095 + m.x2009) - m.x3072 == 0)
m.c635 = Constraint(expr=m.x635*(25.5807469993058 + m.x2018) - m.x3073 == 0)
m.c636 = Constraint(expr=m.x636*(29.0537838075122 + m.x2019) - m.x3074 == 0)
m.c637 = Constraint(expr=m.x637*(10.8297732214437 + m.x2022) - m.x3075 == 0)
m.c638 = Constraint(expr=m.x638*(29.39924999665 + m.x2023) - m.x3076 == 0)
m.c639 = Constraint(expr=m.x639*(9.34536262823 + m.x2024) - m.x3077 == 0)
m.c640 = Constraint(expr=m.x640*(17.3365643030813 + m.x2025) - m.x3078 == 0)
m.c641 = Constraint(expr=m.x641*(48.547749096 + m.x2026) - m.x3079 == 0)
m.c642 = Constraint(expr=m.x642*(149.23057111 + m.x2027) - m.x3080 == 0)
m.c643 = Constraint(expr=m.x643*(27.47191645805 + m.x2028) - m.x3081 == 0)
m.c644 = Constraint(expr=m.x644*(118.743516912 + m.x2036) - m.x3082 == 0)
m.c645 = Constraint(expr=m.x645*(22.880176696 + m.x2038) - m.x3083 == 0)
m.c646 = Constraint(expr=m.x646*(6.95367819652136 + m.x2091) - m.x3084 == 0)
m.c647 = Constraint(expr=m.x647*(68.611061605179 + m.x2092) - m.x3085 == 0)
m.c648 = Constraint(expr=m.x648*(149.982358690318 + m.x2093) - m.x3086 == 0)
m.c649 = Constraint(expr=m.x649*(175.844560388705 + m.x2094) - m.x3087 == 0)
m.c650 = Constraint(expr=m.x650*(10.1522671595645 + m.x2095) - m.x3088 == 0)
m.c651 = Constraint(expr=m.x651*(121.104830353398 + m.x2096) - m.x3089 == 0)
m.c652 = Constraint(expr=m.x652*(8.00892516581441 + m.x2097) - m.x3090 == 0)
m.c653 = Constraint(expr=m.x653*(97.3173040663597 + m.x2098) - m.x3091 == 0)
m.c654 = Constraint(expr=m.x654*(58.8520674314227 + m.x2099) - m.x3092 == 0)
m.c655 = Constraint(expr=m.x655*(89.3791992560023 + m.x2100) - m.x3093 == 0)
m.c656 = Constraint(expr=m.x656*(177.636006160939 + m.x2101) - m.x3094 == 0)
m.c657 = Constraint(expr=m.x657*(373.780859160202 + m.x2102) - m.x3095 == 0)
m.c658 = Constraint(expr=m.x658*(158.856788 + m.x2107) - m.x3096 == 0)
m.c659 = Constraint(expr=m.x659*(17.932791400734 + m.x2001) - m.x3097 == 0)
m.c660 = Constraint(expr=m.x660*(67.92235 + m.x2003) - m.x3098 == 0)
m.c661 = Constraint(expr=m.x661*(17.52572 + m.x2004) - m.x3099 == 0)
m.c662 = Constraint(expr=m.x662*(75.509965 + m.x2005) - m.x3100 == 0)
m.c663 = Constraint(expr=m.x663*(68.860513 + m.x2006) - m.x3101 == 0)
m.c664 = Constraint(expr=m.x664*(215.1945909789 + m.x2007) - m.x3102 == 0)
m.c665 = Constraint(expr=m.x665*(17.9818975244236 + m.x2008) - m.x3103 == 0)
m.c666 = Constraint(expr=m.x666*(82.3846155412095 + m.x2009) - m.x3104 == 0)
m.c667 = Constraint(expr=m.x667*(15.77529785051 + m.x2010) - m.x3105 == 0)
m.c668 = Constraint(expr=m.x668*(20.585074453376 + m.x2011) - m.x3106 == 0)
m.c669 = Constraint(expr=m.x669*(17.73824148824 + m.x2012) - m.x3107 == 0)
m.c670 = Constraint(expr=m.x670*(9.7831921864888 + m.x2013) - m.x3108 == 0)
m.c671 = Constraint(expr=m.x671*(58.3304919073372 + m.x2014) - m.x3109 == 0)
m.c672 = Constraint(expr=m.x672*(70.841638270004 + m.x2015) - m.x3110 == 0)
m.c673 = Constraint(expr=m.x673*(2.457537796 + m.x2016) - m.x3111 == 0)
m.c674 = Constraint(expr=m.x674*(12.908328297966 + m.x2017) - m.x3112 == 0)
m.c675 = Constraint(expr=m.x675*(25.5807469993058 + m.x2018) - m.x3113 == 0)
m.c676 = Constraint(expr=m.x676*(29.0537838075122 + m.x2019) - m.x3114 == 0)
m.c677 = Constraint(expr=m.x677*(11.179067059 + m.x2020) - m.x3115 == 0)
m.c678 = Constraint(expr=m.x678*(16.47769975 + m.x2021) - m.x3116 == 0)
m.c679 = Constraint(expr=m.x679*(10.8297732214437 + m.x2022) - m.x3117 == 0)
m.c680 = Constraint(expr=m.x680*(29.39924999665 + m.x2023) - m.x3118 == 0)
m.c681 = Constraint(expr=m.x681*(9.34536262823 + m.x2024) - m.x3119 == 0)
m.c682 = Constraint(expr=m.x682*(17.3365643030813 + m.x2025) - m.x3120 == 0)
m.c683 = Constraint(expr=m.x683*(48.547749096 + m.x2026) - m.x3121 == 0)
m.c684 = Constraint(expr=m.x684*(149.23057111 + m.x2027) - m.x3122 == 0)
m.c685 = Constraint(expr=m.x685*(27.47191645805 + m.x2028) - m.x3123 == 0)
m.c686 = Constraint(expr=m.x686*(40.593786 + m.x2029) - m.x3124 == 0)
m.c687 = Constraint(expr=m.x687*(277.48319 + m.x2030) - m.x3125 == 0)
m.c688 = Constraint(expr=m.x688*(254.79773 + m.x2031) - m.x3126 == 0)
m.c689 = Constraint(expr=m.x689*(32.373595 + m.x2034) - m.x3127 == 0)
m.c690 = Constraint(expr=m.x690*(118.743516912 + m.x2036) - m.x3128 == 0)
m.c691 = Constraint(expr=m.x691*(54.5829056 + m.x2037) - m.x3129 == 0)
m.c692 = Constraint(expr=m.x692*(68.611061605179 + m.x2092) - m.x3130 == 0)
m.c693 = Constraint(expr=m.x693*(149.982358690318 + m.x2093) - m.x3131 == 0)
m.c694 = Constraint(expr=m.x694*(175.844560388705 + m.x2094) - m.x3132 == 0)
m.c695 = Constraint(expr=m.x695*(10.1522671595645 + m.x2095) - m.x3133 == 0)
m.c696 = Constraint(expr=m.x696*(121.104830353398 + m.x2096) - m.x3134 == 0)
m.c697 = Constraint(expr=m.x697*(8.00892516581441 + m.x2097) - m.x3135 == 0)
m.c698 = Constraint(expr=m.x698*(97.3173040663597 + m.x2098) - m.x3136 == 0)
m.c699 = Constraint(expr=m.x699*(58.8520674314227 + m.x2099) - m.x3137 == 0)
m.c700 = Constraint(expr=m.x700*(89.3791992560023 + m.x2100) - m.x3138 == 0)
m.c701 = Constraint(expr=m.x701*(177.636006160939 + m.x2101) - m.x3139 == 0)
m.c702 = Constraint(expr=m.x702*(373.780859160202 + m.x2102) - m.x3140 == 0)
m.c703 = Constraint(expr=m.x703*(158.856788 + m.x2107) - m.x3141 == 0)
m.c704 = Constraint(expr=m.x704*(245.77006 + m.x2108) - m.x3142 == 0)
m.c705 = Constraint(expr=m.x705*(85.0133724208126 + m.x1997) - m.x3143 == 0)
m.c706 = Constraint(expr=m.x706*(108.052970594387 + m.x1998) - m.x3144 == 0)
m.c707 = Constraint(expr=m.x707*(9.3711459385556 + m.x1999) - m.x3145 == 0)
m.c708 = Constraint(expr=m.x708*(10.69589 + m.x2000) - m.x3146 == 0)
m.c709 = Constraint(expr=m.x709*(17.932791400734 + m.x2001) - m.x3147 == 0)
m.c710 = Constraint(expr=m.x710*(88.805712423724 + m.x2002) - m.x3148 == 0)
m.c711 = Constraint(expr=m.x711*(67.92235 + m.x2003) - m.x3149 == 0)
m.c712 = Constraint(expr=m.x712*(17.52572 + m.x2004) - m.x3150 == 0)
m.c713 = Constraint(expr=m.x713*(75.509965 + m.x2005) - m.x3151 == 0)
m.c714 = Constraint(expr=m.x714*(68.860513 + m.x2006) - m.x3152 == 0)
m.c715 = Constraint(expr=m.x715*(215.1945909789 + m.x2007) - m.x3153 == 0)
m.c716 = | |
timeout_ctype, count_ctype, voltage_measurements_ctype, current_measurements_ctype, in_compliance_ctype, None if actual_count_ctype is None else (ctypes.pointer(actual_count_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return voltage_measurements_array, current_measurements_array, [bool(in_compliance_ctype[i]) for i in range(count_ctype.value)]
@ivi_synchronized
def _get_attribute_vi_boolean(self, attribute_id):
r'''_get_attribute_vi_boolean
| Queries the value of a ViBoolean property.
| You can use this method to get the values of device-specific
properties and inherent IVI properties.
Tip:
This method can be called on specific channels within your :py:class:`nidcpower.Session` instance.
Use Python index notation on the repeated capabilities container channels to specify a subset,
and then call this method on the result.
Example: :py:meth:`my_session.channels[ ... ]._get_attribute_vi_boolean`
To call the method on all channels, you can call it directly on the :py:class:`nidcpower.Session`.
Example: :py:meth:`my_session._get_attribute_vi_boolean`
Args:
attribute_id (int): Specifies the ID of a property. From the method panel window, you
can use this control as follows.
- In the method panel window, click on the control or press **Enter**
or the spacebar to display a dialog box containing hierarchical list
of the available properties. Help text is shown for each property.
Select a property by double-clicking on it or by selecting it and
then pressing **Enter**.
- A ring control at the top of the dialog box allows you to see all IVI
properties or only the properties of type ViBoolean. If you choose to
see all IVI properties, the data types appear to the right of the
property names in the list box. Properties with data types other
than ViBoolean are dim. If you select a property data type that is
dim, LabWindows/CVI transfers you to the method panel for the
corresponding method that is consistent with the data type.
- If you want to enter a variable name, press **Ctrl**\ +\ **T** to
change this ring control to a manual input box. If the property in
this ring control has named constants as valid values, you can view
the constants by moving to the value control and pressing **Enter**.
Returns:
attribute_value (bool): Returns the current value of the property. Passes the address of a
ViBoolean variable.
If the property currently showing in the property ring control has
constants as valid values, you can view a list of the constants by
pressing **Enter** on this control. Select a value by double-clicking on
it or by selecting it and then pressing **Enter**.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
attribute_id_ctype = _visatype.ViAttr(attribute_id) # case S150
attribute_value_ctype = _visatype.ViBoolean() # case S220
error_code = self._library.niDCPower_GetAttributeViBoolean(vi_ctype, channel_name_ctype, attribute_id_ctype, None if attribute_value_ctype is None else (ctypes.pointer(attribute_value_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return bool(attribute_value_ctype.value)
@ivi_synchronized
def _get_attribute_vi_int32(self, attribute_id):
r'''_get_attribute_vi_int32
| Queries the value of a ViInt32 property.
| You can use this method to get the values of device-specific
properties and inherent IVI properties.
Tip:
This method can be called on specific channels within your :py:class:`nidcpower.Session` instance.
Use Python index notation on the repeated capabilities container channels to specify a subset,
and then call this method on the result.
Example: :py:meth:`my_session.channels[ ... ]._get_attribute_vi_int32`
To call the method on all channels, you can call it directly on the :py:class:`nidcpower.Session`.
Example: :py:meth:`my_session._get_attribute_vi_int32`
Args:
attribute_id (int): Specifies the ID of a property. From the method panel window, you
can use this control as follows.
- In the method panel window, click on the control or press **Enter**
or the spacebar to display a dialog box containing hierarchical list
of the available properties. Help text is shown for each property.
Select a property by double-clicking on it or by selecting it and
then pressing **Enter**.
- A ring control at the top of the dialog box allows you to see all IVI
properties or only the properties of type ViInt32. If you choose to
see all IVI properties, the data types appear to the right of the
property names in the list box. Properties with data types other
than ViInt32 are dim. If you select a property data type that is
dim, LabWindows/CVI transfers you to the method panel for the
corresponding method that is consistent with the data type.
- If you want to enter a variable name, press **Ctrl**\ +\ **T** to
change this ring control to a manual input box. If the property in
this ring control has named constants as valid values, you can view
the constants by moving to the value control and pressing **Enter**.
Returns:
attribute_value (int): Returns the current value of the property. Passes the address of a
ViInt32 variable.
If the property currently showing in the property ring control has
constants as valid values, you can view a list of the constants by
pressing **Enter** on this control. Select a value by double-clicking on
it or by selecting it and then pressing **Enter**.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
attribute_id_ctype = _visatype.ViAttr(attribute_id) # case S150
attribute_value_ctype = _visatype.ViInt32() # case S220
error_code = self._library.niDCPower_GetAttributeViInt32(vi_ctype, channel_name_ctype, attribute_id_ctype, None if attribute_value_ctype is None else (ctypes.pointer(attribute_value_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return int(attribute_value_ctype.value)
@ivi_synchronized
def _get_attribute_vi_int64(self, attribute_id):
r'''_get_attribute_vi_int64
| Queries the value of a ViInt64 property.
| You can use this method to get the values of device-specific
properties and inherent IVI properties.
Tip:
This method can be called on specific channels within your :py:class:`nidcpower.Session` instance.
Use Python index notation on the repeated capabilities container channels to specify a subset,
and then call this method on the result.
Example: :py:meth:`my_session.channels[ ... ]._get_attribute_vi_int64`
To call the method on all channels, you can call it directly on the :py:class:`nidcpower.Session`.
Example: :py:meth:`my_session._get_attribute_vi_int64`
Args:
attribute_id (int): Specifies the ID of a property. From the method panel window, you
can use this control as follows.
- In the method panel window, click on the control or press **Enter**
or the spacebar to display a dialog box containing hierarchical list
of the available properties. Help text is shown for each property.
Select a property by double-clicking on it or by selecting it and
then pressing **Enter**.
- A ring control at the top of the dialog box allows you to see all IVI
properties or only the properties of type ViReal64. If you choose to
see all IVI properties, the data types appear to the right of the
property names in the list box. Properties with data types other
than ViReal64 are dim. If you select a property data type that is
dim, LabWindows/CVI transfers you to the method panel for the
corresponding method that is consistent with the data type.
- If you want to enter a variable name, press **Ctrl**\ +\ **T** to
change this ring control to a manual input box. If the property in
this ring control has named constants as valid values, you can view
the constants by moving to the value control and pressing **Enter**.
Returns:
attribute_value (int): Returns the current value of the property. Passes the address of a
ViReal64 variable.
If the property currently showing in the property ring control has
constants as valid values, you can view a list of the constants by
pressing **Enter** on this control. Select a value by double-clicking on
it or by selecting it and then pressing **Enter**.
'''
vi_ctype = _visatype.ViSession(self._vi) # case S110
channel_name_ctype = ctypes.create_string_buffer(self._repeated_capability.encode(self._encoding)) # case C010
attribute_id_ctype = _visatype.ViAttr(attribute_id) # case S150
attribute_value_ctype = _visatype.ViInt64() # case S220
error_code = self._library.niDCPower_GetAttributeViInt64(vi_ctype, channel_name_ctype, attribute_id_ctype, None if attribute_value_ctype is None else (ctypes.pointer(attribute_value_ctype)))
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return int(attribute_value_ctype.value)
@ivi_synchronized
def _get_attribute_vi_real64(self, attribute_id):
r'''_get_attribute_vi_real64
| Queries the value of a ViReal64 property.
| You can use this method to get the values of device-specific
properties and inherent IVI properties.
Tip:
This method can be called on specific channels within your :py:class:`nidcpower.Session` instance.
Use Python index notation on the repeated capabilities container channels to specify a subset,
and then call this method on the result.
Example: :py:meth:`my_session.channels[ ... | |
= None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-Cds(Cds-Cdd-Cd)S2s",
group =
"""
1 * Cd u0 {2,S} {4,D} {5,S}
2 Cd u0 {1,S} {3,D}
3 Cdd u0 {2,D} {6,D}
4 Cd u0 {1,D}
5 S2s u0 {1,S}
6 C u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-CdsCtSs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cd u0 {1,D}
3 Ct u0 {1,S}
4 S2s u0 {1,S}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-CdsCbSs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cd u0 {1,D}
3 Cb u0 {1,S}
4 S2s u0 {1,S}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-CddCsSs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D}
3 Cs u0 {1,S}
4 S2s u0 {1,S}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-S2d)CsSs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cs u0 {1,S}
4 S2s u0 {1,S}
5 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-Cd)CsSs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cs u0 {1,S}
4 S2s u0 {1,S}
5 C u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-CddCdsSs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D}
3 Cd u0 {1,S}
4 S2s u0 {1,S}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-S2d)(Cds-Cd)S2s",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cd u0 {1,S} {6,D}
4 S2s u0 {1,S}
5 S2d u0 {2,D}
6 C u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-S2d)(Cds-Cds)S2s",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cd u0 {1,S} {6,D}
4 S2s u0 {1,S}
5 S2d u0 {2,D}
6 Cd u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-S2d)(Cds-Cdd)S2s",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cd u0 {1,S} {6,D}
4 S2s u0 {1,S}
5 S2d u0 {2,D}
6 Cdd u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-S2d)(Cds-Cdd-S2d)S2s",
group =
"""
1 * Cd u0 {2,S} {3,D} {5,S}
2 Cd u0 {1,S} {4,D}
3 Cdd u0 {1,D} {6,D}
4 Cdd u0 {2,D} {7,D}
5 S2s u0 {1,S}
6 S2d u0 {3,D}
7 S2d u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-S2d)(Cds-Cdd-Cd)S2s",
group =
"""
1 * Cd u0 {2,S} {3,D} {5,S}
2 Cd u0 {1,S} {4,D}
3 Cdd u0 {1,D} {6,D}
4 Cdd u0 {2,D} {7,D}
5 S2s u0 {1,S}
6 S2d u0 {3,D}
7 C u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-Cd)(Cds-Cd)S2s",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cd u0 {1,S} {6,D}
4 S2s u0 {1,S}
5 C u0 {2,D}
6 C u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-Cd)(Cds-Cds)S2s",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cd u0 {1,S} {6,D}
4 S2s u0 {1,S}
5 C u0 {2,D}
6 Cd u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-Cd)(Cds-Cdd)S2s",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cd u0 {1,S} {6,D}
4 S2s u0 {1,S}
5 C u0 {2,D}
6 Cdd u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-Cd)(Cds-Cdd-S2d)S2s",
group =
"""
1 * Cd u0 {2,S} {3,D} {5,S}
2 Cd u0 {1,S} {4,D}
3 Cdd u0 {1,D} {6,D}
4 Cdd u0 {2,D} {7,D}
5 S2s u0 {1,S}
6 C u0 {3,D}
7 S2d u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-Cd)(Cds-Cdd-Cd)S2s",
group =
"""
1 * Cd u0 {2,S} {3,D} {5,S}
2 Cd u0 {1,S} {4,D}
3 Cdd u0 {1,D} {6,D}
4 Cdd u0 {2,D} {7,D}
5 S2s u0 {1,S}
6 C u0 {3,D}
7 C u0 {4,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-CddCtSs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D}
3 Ct u0 {1,S}
4 S2s u0 {1,S}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-S2d)CtSs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Ct u0 {1,S}
4 S2s u0 {1,S}
5 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-Cd)CtSs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Ct u0 {1,S}
4 S2s u0 {1,S}
5 C u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-CddCbSs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D}
3 Cb u0 {1,S}
4 S2s u0 {1,S}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-S2d)CbSs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cb u0 {1,S}
4 S2s u0 {1,S}
5 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-Cd)CbSs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 Cb u0 {1,S}
4 S2s u0 {1,S}
5 C u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-(Cdd-S2d)C=SSs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cdd u0 {1,D} {5,D}
3 CS u0 {1,S} {6,D}
4 S2s u0 {1,S}
5 S2d u0 {2,D}
6 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-CdsC=SSs",
group =
"""
1 * Cd u0 {2,S} {3,D} {4,S}
2 CS u0 {1,S} {5,D}
3 Cd u0 {1,D}
4 S2s u0 {1,S}
5 S2d u0 {2,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 200,
label = "Cds-CdCC",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 C u0 {1,D}
3 C u0 {1,S}
4 C u0 {1,S}
""",
thermo = u'Cds-CdsCsCs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 201,
label = "Cds-CdsCsCs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cd u0 {1,D}
3 Cs u0 {1,S}
4 Cs u0 {1,S}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([4.1,4.61,4.99,5.26,5.8,6.08,6.36],'cal/(mol*K)','+|-',[0.1,0.1,0.1,0.1,0.1,0.1,0.1]),
H298 = (10.34,'kcal/mol','+|-',0.24),
S298 = (-12.7,'cal/(mol*K)','+|-',0.12),
),
shortDesc = u"""Cd-CsCs BENSON""",
longDesc =
u"""
""",
)
entry(
index = 202,
label = "Cds-CdsCdsCs",
group =
"""
1 * Cd u0 {2,D} {3,S} {4,S}
2 Cd u0 {1,D}
3 [Cd,CO] u0 {1,S}
4 Cs u0 {1,S}
""",
thermo = u'Cds-Cds(Cds-Cds)Cs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 204,
label = "Cds-Cds(Cds-Cd)Cs",
group =
"""
1 * Cd u0 {2,S} {3,D} {4,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,D}
4 Cs u0 {1,S}
5 C u0 {2,D}
""",
thermo = u'Cds-Cds(Cds-Cds)Cs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 205,
label = "Cds-Cds(Cds-Cds)Cs",
group =
"""
1 * Cd u0 {2,S} {3,D} {4,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,D}
4 Cs u0 {1,S}
5 Cd u0 {2,D}
""",
thermo = ThermoData(
Tdata = ([300,400,500,600,800,1000,1500],'K'),
Cpdata = ([4.4,5.37,5.93,6.18,6.5,6.62,6.72],'cal/(mol*K)','+|-',[0.1,0.1,0.1,0.1,0.1,0.1,0.1]),
H298 = (8.88,'kcal/mol','+|-',0.24),
S298 = (-14.6,'cal/(mol*K)','+|-',0.12),
),
shortDesc = u"""Cd-CdCs BENSON""",
longDesc =
u"""
""",
)
entry(
index = 206,
label = "Cds-Cds(Cds-Cdd)Cs",
group =
"""
1 * Cd u0 {2,S} {3,D} {4,S}
2 Cd u0 {1,S} {5,D}
3 Cd u0 {1,D}
4 Cs u0 {1,S}
5 Cdd u0 {2,D}
""",
thermo = u'Cds-Cds(Cds-Cdd-Cd)Cs',
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = -1,
label = "Cds-Cds(Cds-Cdd-S2d)Cs",
group =
"""
1 * Cd u0 {2,S} {4,D} {5,S}
2 Cd u0 {1,S} {3,D}
3 Cdd u0 {2,D} {6,D}
4 Cd u0 {1,D}
5 Cs u0 {1,S}
6 S2d u0 {3,D}
""",
thermo = None,
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 208,
label = "Cds-Cds(Cds-Cdd-Cd)Cs",
group =
"""
1 * Cd u0 {2,S} {4,D} {5,S}
2 Cd u0 {1,S} {3,D}
3 Cdd u0 {2,D} {6,D}
4 Cd u0 {1,D}
5 Cs u0 | |
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(SuccessResponse, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['result_id'] = \
result_id
return self.get_result_v1_result_result_id_endpoint.call_with_http_info(**kwargs)
def post_headline_and_summary_v1_headline_and_summary(
self,
payload,
**kwargs
) -> str:
"""POST request to create a 2-3 sentence summary from input text # noqa: E501
Endpoint for initiating a processing job to create a 2-3 sentence summary from input text. # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
payload (Request):
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
str
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['payload'] = \
payload
return self.post_headline_and_summary_v1_headline_and_summary_endpoint.call_with_http_info(**kwargs)
def post_headline_and_summary_v1_headline_and_summary_with_http_info(
self,
payload,
**kwargs
) -> typing.Tuple[str, int, typing.MutableMapping]:
"""POST request to create a 2-3 sentence summary from input text # noqa: E501
Endpoint for initiating a processing job to create a 2-3 sentence summary from input text. # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
payload (Request):
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
str
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['payload'] = \
payload
return self.post_headline_and_summary_v1_headline_and_summary_endpoint.call_with_http_info(**kwargs)
def post_headline_and_summary_v1_headline_and_summary_async(
self,
payload,
**kwargs
) -> "ApplyResult[str]":
"""POST request to create a 2-3 sentence summary from input text # noqa: E501
Endpoint for initiating a processing job to create a 2-3 sentence summary from input text. # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
payload (Request):
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[str]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['payload'] = \
payload
return self.post_headline_and_summary_v1_headline_and_summary_endpoint.call_with_http_info(**kwargs)
def post_headline_and_summary_v1_headline_and_summary_with_http_info_async(
self,
payload,
**kwargs
) -> "ApplyResult[typing.Tuple[str, int, typing.MutableMapping]]":
"""POST request to create a 2-3 sentence summary from input text # noqa: E501
Endpoint for initiating a processing job to create a 2-3 sentence summary from input text. # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
payload (Request):
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(str, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['payload'] = \
payload
return self.post_headline_and_summary_v1_headline_and_summary_endpoint.call_with_http_info(**kwargs)
def post_headline_v1_headline(
self,
payload,
**kwargs
) -> str:
"""POST request to create a headline from input text # noqa: E501
Endpoint for initiating a processing job to create a headline from input text. # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
payload (Request):
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types | |
expectativa esta me matando','surpresa'),
('vou caminhar sempre na expectativa de encontrá-lo','surpresa'),
('você emudece minhas palavras','surpresa'),
('minhas palavras vão emudecer se não parar de me surpreender','surpresa'),
('a mulher e um efeito deslumbrante da natureza','surpresa'),
('estou deslumbrada com essas jóias','surpresa'),
('isso e romântico e deslumbrante','surpresa'),
('isso pode ser surpreendentemente deslumbrante','surpresa'),
('trabalho deslumbrante','surpresa'),
('essas pessoas são esplêndida','surpresa'),
('e esplendido como o ceu se encontra no momento','surpresa'),
('e um carro fantástico','surpresa'),
('um edifício realmente fantástico','surpresa'),
('por favor não me abandone','tristeza'),
('não quero ficar sozinha','tristeza'),
('não me deixe sozinha','tristeza'),
('estou abatida','tristeza'),
('ele esta todo abatido','tristeza'),
('tão triste suas palavras','tristeza'),
('seu amor não e mais meu','tristeza'),
('estou aborrecida','tristeza'),
('isso vai me aborrecer','tristeza'),
('estou com muita aflição','tristeza'),
('me aflige o modo como fala','tristeza'),
('estou em agonia com meu intimo','tristeza'),
('não quero fazer nada','tristeza'),
('me sinto ansiosa e tensa','tristeza'),
('não consigo parar de chorar','tristeza'),
('não consigo segurar as lagrimas','tristeza'),
('e muita dor perder um ente querido','tristeza'),
('estou realmente arrependida','tristeza'),
('acho que o carma volta, pois agora sou eu quem sofro','tristeza'),
('você não cumpriu suas promessas','tristeza'),
('me sinto amargurada','tristeza'),
('coitado esta tão triste','tristeza'),
('já e tarde de mais','tristeza'),
('nosso amor acabou','tristeza'),
('essa noite machuca só para mim','tristeza'),
('eu não estou mais no seu coração','tristeza'),
('você mudou comigo','tristeza'),
('quando eu penso em você realmente dói','tristeza'),
('como se fosse nada você vê minhas lagrimas','tristeza'),
('você disse cruelmente que não se arrependeu','tristeza'),
('eu nunca mais vou te ver','tristeza'),
('ela esta com depressão','tristeza'),
('a depressão aflige as pessoas','tristeza'),
('estar depressivo e muito ruim','tristeza'),
('estou derrotada e deprimida depois deste dia','tristeza'),
('e comovente te ver dessa maneira','tristeza'),
('e comovente ver o que os filhos do brasil passam','tristeza'),
('como me sinto culpada','tristeza'),
('estou abatida','tristeza'),
('a ansiedade tomou conta de mim','tristeza'),
('as pessoas não gostam do meu jeito','tristeza'),
('adeus passamos bons momentos juntos','tristeza'),
('sinto sua falta','tristeza'),
('ele não gostou da minha comida','tristeza'),
('estou sem dinheiro para a comida','tristeza'),
('queria que fosse o ultimo dia da minha vida','tristeza'),
('você está com vergonha de mim','tristeza'),
('ela não aceitou a minha proposta','tristeza'),
('era o meu ultimo centavo','tristeza'),
('reprovei de ano na faculdade','tristeza'),
('afinal você só sabe me desfazer','tristeza'),
('eu falhei em tudo nessa vida','tristeza'),
('eu fui muito humilhado','tristeza'),
('e uma história muito triste','tristeza'),
('ninguem acredita em mim','tristeza'),
('eu não sirvo para nada mesmo','tristeza'),
('droga, não faço nada direito','tristeza'),
('sofrimento em dobro na minha vida','tristeza'),
('fui demitida essa semana','tristeza'),
('as crianças sofrem ainda mais que os adultos','tristeza'),
('pra mim um dia e ruim, o outro e pior','tristeza'),
('de repente perdi o apetite','tristeza'),
('oh que dia infeliz','tristeza'),
('estamos afundados em contas','tristeza'),
('nem um milagre pode nos salvar','tristeza'),
('só me resta a esperança','tristeza'),
('pior que isso não pode ficar','tristeza'),
('meu salário e baixo','tristeza'),
('não passei no vestibular','tristeza'),
('ninguem se importa comigo','tristeza'),
('ninguem lembrou do meu aniversário','tristeza'),
('tenho tanto azar','tristeza'),
('o gosto da vingança e amargo','tristeza'),
('sou uma mulher amargurada depois de que você me deixou','tristeza'),
('estou desanimada com a vida','tristeza'),
('e um desanimo só coitadinha','tristeza'),
('a derrota e depressiva','tristeza'),
('discriminar e desumano','tristeza'),
('que desanimo','tristeza'),
('e uma desonra para o pais','tristeza'),
('a preocupação deveria nos levar a ação não a depressão','tristeza'),
('passamos ao desalento e a loucura','tristeza'),
('aquele que nunca viu a tristeza nunca reconhecerá a alegria','tristeza'),
('cuidado com a tristeza ela e um vicio','tristeza')]
# Dataset baseTest (30 %)
baseTest = [('não precisei pagar o ingresso','alegria'),
('se eu ajeitar tudo fica bem','alegria'),
('minha fortuna ultrapassa a sua','alegria'),
('sou muito afortunado','alegria'),
('e benefico para todos esta nova medida','alegria'),
('ficou lindo','alegria'),
('achei esse sapato muito simpático','alegria'),
('estou ansiosa pela sua chegada','alegria'),
('congratulações pelo seu aniversário','alegria'),
('delicadamente ele a colocou para dormir','alegria'),
('a musica e linda','alegria'),
('sem musica eu não vivo','alegria'),
('conclui uma tarefa muito difícil','alegria'),
('conclui minha graduação','alegria'),
('estou muito contente com tudo','alegria'),
('eu confio em você','alegria'),
('e um prazer conhecê-lo','alegria'),
('o coleguismo de vocês e animador','alegria'),
('estou aproveitando as ferias','alegria'),
('vamos aproveitar as ferias','alegria'),
('e muito divertido este jogo','alegria'),
('vamos ter muita diversão','alegria'),
('não achei que me divertiria tanto assim','alegria'),
('vou consentir o orçamento ao cliente','alegria'),
('com o consentimento dos meus pais podemos nos casar','alegria'),
('eu adorei este perfume','alegria'),
('sua bondade e cativante','alegria'),
('estou despreocupada','alegria'),
('não me preocupo com o que aconteceu','alegria'),
('me sinto completamente segura','alegria'),
('estimo muito o seu trabalho','alegria'),
('somos estimados por nossa família','alegria'),
('concretizamos nossa ideia','alegria'),
('nosso ideal foi alcançado','alegria'),
('estamos muito felizes juntos','alegria'),
('estou tão animada com os preparativos para o casamento','alegria'),
('você será muito amado meu filho','alegria'),
('os apaixonados são maravilhosos','alegria'),
('agradeço imensamente o seu apoio nestes dias','alegria'),
('esta comida me parece muito atraente','alegria'),
('você me completa','alegria'),
('poderemos completar o projeto hoje!','alegria'),
('estamos namorando','alegria'),
('estou namorando este vestido a um tempo','alegria'),
('pude comprar meu celular hoje','alegria'),
('e um deleite poder compartilhar minhas vitórias','alegria'),
('ela e um boa garota','alegria'),
('estivemos em um ótimo show','alegria'),
('o mundo e feio como o pecado','desgosto'),
('a coisa mais difícil de esconder e aquilo que não existe','desgosto'),
('você errou feio aquele gol','desgosto'),
('nunca vou me casar sou muito feia','desgosto'),
('os golpes da adversidade são terrivelmente amargos','desgosto'),
('os homem ficam terrivelmente chatos','desgosto'),
('abominavelmente convencido','desgosto'),
('terrivelmente irritado','desgosto'),
('as instituições publicas estão terrivelmente decadentes','desgosto'),
('a população viveu em isolamento por muito tempo','desgosto'),
('estou terrivelmente preocupada','desgosto'),
('o nacionalismo e uma doença infantil','desgosto'),
('se me es antipático a minha negação esta pronta','desgosto'),
('muitos documentários sobre esse casal antipático','desgosto'),
('sua beleza não desfaça sua antipatia','desgosto'),
('esta e uma experiência desagradável','desgosto'),
('desagradável estrago nos banheiros','desgosto'),
('o mais irritante no amor e que se trata de um crime que precisa de um cúmplice','desgosto'),
('a situação nos causa grande incomodo','desgosto'),
('estou preocupado com o incomodo na garganta','desgosto'),
('simplesmente não quero amolação da policia','desgosto'),
('você e uma criaturinha muito impertinente','desgosto'),
('o peso e a dor da vida','desgosto'),
('me arrependo amargamente de minhas ações','desgosto'),
('o destino e cruel e os homens não são dignos de compaixão','desgosto'),
('o ódio conduz ao isolamento cruel e ao desespero','desgosto'),
('encerrou com o massacre mais repudiável e asqueroso que se conhece','desgosto'),
('de mal gosto e asqueroso','desgosto'),
('tudo e inserto neste mundo hediondo','desgosto'),
('o crime de corrupção e um crime hediondo','desgosto'),
('o rio esta fetido e de cor escura','desgosto'),
('muito lixo no rio o deixa malcheiroso','desgosto'),
('existe uma laranja podre no grupo e já desconfiamos quem e','desgosto'),
('foi de repente estou machucado e me sentindo enjoado','desgosto'),
('eu fiquei enojado','desgosto'),
('daqui alguns meses vou embora deste pais que já estou nauseado','desgosto'),
('que abominável esse montro!','medo'),
('vamos alarmar a todos sobre a situação','medo'),
('estou amedrontada','medo'),
('estou com muito medo da noite','medo'),
('ele esta me ameaçando a dias','medo'),
('quanta angustia','medo'),
('estou angustiada','medo'),
('angustiadamente vou sair e casa','medo'),
('isso me deixa apavorada','medo'),
('você esta me apavorando','medo'),
('estou desconfiada de você','medo'),
('não confio em você','medo'),
('ate o cachorro está apavorado','medo'),
('estou assustado com as ações do meu colega','medo'),
('agora se sente humilhado, apavorado','medo'),
('assustou a população e provocou mortes','medo'),
('estou com dificuldades para respirar e muito assustado','medo'),
('os policiais se assustaram quando o carro capotou','medo'),
('o trabalhador e assombrado pelo temor do desemprego','medo'),
('este lugar e mal assombrado','medo'),
('estou assombrado pela crise financeira','medo'),
('mesmo aterrorizado lembro de você','medo'),
('aterrorizado e suando frio','medo'),
('um grupo de elefantes selvagens tem aterrorizado vilas','medo'),
('me sinto intimidada pela sua presença','medo'),
('tenho medo de ser advertida novamente','medo'),
('estou correndo o risco de ser advertido','medo'),
('estou correndo riscos de saúde','medo'),
('os riscos são reais','medo'),
('podemos perder muito dinheiro com essa investida','medo'),
('socorro, fui intimado a depor','medo'),
('fui notificado e estou com medo de perde a guarda da minha filha','medo'),
('estou angustiada com meus filhos na rua','medo'),
('e abominável o que fazem com os animais','medo'),
('foi terrível o tigre quase o matou','medo'),
('me advertiram sobre isso','medo'),
('ate que enfim, não agüentava mais te esperar','raiva'),
('eu quero meu dinheiro de volta agora!','raiva'),
('eu odeio a escola!','raiva'),
('vou fazer picadinho de você','raiva'),
('detesto trabalhar no verão','raiva'),
('quero minha comida, e quero agora!','raiva'),
('melhor você recolher minhas compras agora!','raiva'),
('quero descer agora sua maluca','raiva'),
('vou reclamar com o gerente!','raiva'),
('vai engolir o que disse!','raiva'),
('ele me ridiculariza diante | |
<gh_stars>0
# Copyright 2012 <NAME>. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from numpy import array, zeros, ones, hstack, delete, insert, arange
from randnums import FltRand, IntRand, FlipCoin
def SimpleChromo(x, nbases):
"""
SimpleChromo splits x into 'nbases' unequal parts
Input:
x -- a single number or a list whose size equals the number of genes
Output:
c -- the chromosome (numpy.ndarray)
Note:
If x is a list, ngenes = len(x) and then:
If ngenes > 1, each gene is split into 'nbases' and the following
chromosome structure is assumed:
c = [0, 1, 2, ... nbases-1, 0, 1, 2, ... nbases-1]
\___________________/ \___________________/
gene # 0 gene # 1
"""
if isinstance(x, float):
vals = FltRand(nbases)
sumv = sum(vals)
return x * vals / sumv
if isinstance(x, list): x = array(x)
ngenes = len(x)
c = zeros(nbases * ngenes)
for i, v in enumerate(x):
vals = FltRand(nbases)
sumv = sum(vals)
a = i * nbases
b = a + nbases
c[a:b] = v * vals / sumv
return c
def Fitness(Y):
"""
Fitness function: map objective function into [0, 1]
Y -- objective values
"""
ymin, ymax = min(Y), max(Y)
if abs(ymax - ymin) < 1e-14: return ones(len(Y))
return (ymax - Y) / (ymax - ymin)
def SortPop(C, Y, F):
"""
SortPop sort individuals by fitness (decreasing order)
C -- chromosomes/population
Y -- objective values
F -- fitness
"""
I = F.argsort()[::-1] # the [::-1] is a trick to reverse the sorting order
C = C[I] # sorted chromosomes
Y = Y[I] # sorted objective values
F = F[I] # sorted fitness
return C, Y, F
def Ranking(ninds, sp=1.2):
"""
Ranking computes fitness corresponding to a linear ranking
Input:
ninds -- number of individuals
sp -- selective pressure; must be inside [1, 2]
Output:
F -- ranked fitnesses
"""
if sp < 1.0 or sp > 2.0: sp = 1.2
F = zeros(ninds)
for i in range(ninds):
F[i] = 2.0 - sp + 2.0*(sp-1.0)*float(ninds-i-1)/float(ninds-1)
return F
def RouletteSelect(M, n, sample=None):
"""
RouletteSelect selects n individuals
Input:
M -- cumulated probabilities (from sorted population)
sample -- a list of random numbers
Output:
S -- selected individuals (indices)
"""
if sample==None: sample = FltRand(n)
S = zeros(n, dtype=int) # selected individuals
for i, s in enumerate(sample):
for j, m in enumerate(M):
if m > s:
S[i] = j
break
return S
def SUSselect(M, n, pb=None):
"""
SUSselect performs the Stochastic-Universal-Sampling selection
It selects n individuals
Input:
M -- cumulated probabilities (from sorted population)
pb -- one random number corresponding to the first probability (pointer/position)
Output:
S -- selected individuals (indices)
"""
dp = 1.0 / float(n)
if pb == None: pb = FltRand(1, 0.0, dp)
S = zeros(n, dtype=int) # selected individuals
for i in range(n):
j = 0
while pb > M[j]: j += 1
pb += dp
S[i] = j
return S
def FilterPairs(S):
"""
FilterPairs generates 2 x ninds/2 lists from selected individuals
try to avoid repeated indices in pairs
"""
ninds = len(S)
A = zeros(ninds/2, dtype=int)
B = zeros(ninds/2, dtype=int)
for i in range(ninds/2):
a, b = S[2*i], S[2*i+1]
if a == b:
for s in S:
if s != a:
b = s
break
A[i], B[i] = a, b
return A, B
def FltCrossover(A, B, pc=0.8):
"""
FltCrossover performs the crossover in a pair of individuals with float point numbers
Input:
A -- chromosome of parent
B -- chromosome of parent
pc -- probability of crossover
Output:
a -- chromosome of offspring
b -- chromosome of offspring
"""
if FlipCoin(pc):
nbases = len(A)
pos = IntRand(1, nbases-1)
a = hstack([A[:pos], B[pos:]])
b = hstack([B[:pos], A[pos:]])
else:
a, b = A.copy(), B.copy()
return a, b
def FltMutation(c, pm=0.01, coef=1.1):
"""
FltMutation performs mutation in an individual with float point numbers
Input:
c -- chromosome
pm -- probability of mutation
coef -- coefficient to increase or decrease bases
Output:
c -- modified (or not) chromosome
"""
if FlipCoin(pm):
nbases = len(c)
bmax = max(c)
pos = IntRand(0, nbases)
if FlipCoin(0.5): c[pos] += bmax * coef
else: c[pos] -= bmax * coef
return c
def OrdCrossover(A, B, pc=0.8, method='OX1', cut1=None, cut2=None):
"""
OrdCrossover performs the crossover in a pair of individuals with integer numbers
that correspond to a ordered sequence, e.g. traveling salesman problem
Input:
A -- chromosome of parent
B -- chromosome of parent
pc -- probability of crossover
method -- OX1: order crossover # 1
cut1 -- position of first cut: use None for random value
cut2 -- position of second cut: use None for random value
Output:
a -- chromosome of offspring
b -- chromosome of offspring
"""
if FlipCoin(pc):
nbases = len(A)
if cut1==None: cut1 = IntRand(1, nbases-1)
if cut2==None: cut2 = IntRand(cut1+1, nbases)
if cut1==cut2: raise Exception('problem with cut1 and cut2')
a, b = zeros(nbases, dtype=int), zeros(nbases, dtype=int)
m, n = A[cut1 : cut2], B[cut1 : cut2]
a[cut1 : cut2] = m
b[cut1 : cut2] = n
#print '\ncut1 =', cut1, ', cut2 =', cut2
#print 'A =', A
#print 'B =', B
#print 'a =', a
#print 'b =', b
c = hstack([[v for v in B[cut2 : nbases] if not v in m],
[v for v in B[ : cut2 ] if not v in m]])
d = hstack([[v for v in A[cut2 : nbases] if not v in n],
[v for v in A[ : cut2 ] if not v in n]])
#from numpy import array
#print 'c =', array(c, dtype=int)
#print 'd =', array(d, dtype=int), '\n'
a[cut2:] = c[:nbases-cut2]
a[:cut1] = c[nbases-cut2:]
b[cut2:] = d[:nbases-cut2]
b[:cut1] = d[nbases-cut2:]
else:
a, b = A.copy(), B.copy()
return a, b
def OrdMutation(c, pm=0.01, method='DM', cut1=None, cut2=None, ins=None):
"""
OrdMutation performs the mutation in an individual with integer numbers
corresponding to a ordered sequence, e.g. traveling salesman problem
Input:
c -- chromosome
pm -- probability of mutation
method -- DM: displacement mutation
cut1 -- position of first cut: use None for random value
cut2 -- position of second cut: use None for random value
ins -- position in *cut* slice (v) after which the cut subtour (u) is inserted
Output:
c -- modified (or not) chromosome
"""
if FlipCoin(pm):
nbases = len(c)
if cut1==None: cut1 = IntRand(1, nbases-1)
if cut2==None: cut2 = IntRand(cut1+1, nbases)
if cut1==cut2: raise Exception('problem with cut1 and cut2')
# create copy of c
c = c.copy()
# new method
if True:
# lengths and insertion point
nc = len(c)
ncut = cut2 - cut1 # number of cut items
nrem = nc - ncut # number of remaining items
if ins==None: ins = IntRand(0, nrem)
# auxiliary map: old => new index
o2n = arange(nc)
for i in range(nc):
if i < cut1: o2n[i] = i # index is unchanged
elif i < cut2: o2n[i] = -1 # mark cut items with -1
else: o2n[i] = i-ncut # shift items after cut to the left
k = 1 # increment for index of new cut item
for i in range(nc):
if o2n[i] > ins:
o2n[i] += ncut # shift right to accomodate cut items
if o2n[i] < 0:
o2n[i] = ins+k # put cut items after 'ins'
k += 1
# copy items to the right place
cc = c.copy()
for o, n in enumerate(o2n):
c[n] = cc[o]
# this method, using 'insert', apparently fails in some
# versions of numpy and windows
if False:
u = c[cut1 : cut2]
v = delete(c, range(cut1, cut2))
if ins==None: ins = IntRand(0, len(v))
#print 'u =', u
#print 'v =', v
#print 'cut1 =', cut1, ' cut2 =', cut2, ' ins =', ins
c = insert(v, ins+1, u)
return c
# test
if __name__ == "__main__":
from numpy import cumsum
from pylab import show, plot
from testing import CheckVector
from output import | |
<gh_stars>0
# coding: utf-8
import numpy as np
import argparse
import matplotlib.pyplot as plt
from time import sleep
import support
from mpl_toolkits.mplot3d import Axes3D
def rungekutta4d( func , argdict = None ,spoint = 0, epoint = 100, initial_value = 1, N = 1000 , numberofvalues = 2 ):
h = ( epoint - spoint ) / N
t = np.arange( spoint, epoint, h )
#1変数の場合
if numberofvalues == 1:
xpoints = []
initial_value = np.array( initial_value ).reshape( -1, )
for xx0 in initial_value:
x = xx0
xpoint = []
for i, tt in enumerate(t):
xpoint.append( x )
k1 = h * func( x, t )
k2 = h * ( func( x + k1 / 2, t + h / 2 ) )
k3 = h * ( func( x + k2 / 2, t + h / 2 ) )
k4 = h * ( func( x + k3, t + h ) )
x += ( k1 + 2.0 * k2 + 2.0 * k3 + k4 ) / 6
xpoints.append( xpoint )
return t, xpoints
elif numberofvalues == 2:
xpoints = []
ypoints = []
if isinstance( initial_value, float ) or len( initial_value ) == 0:
initial_value = [[ 1, 1 ]]
for iv in initial_value:
xpoint = []
ypoint = []
x = iv[ 0 ]
y = iv[ 1 ]
for i, tt in enumerate( t ):
xpoint.append( x )
ypoint.append( y )
k1 = h * func( x, y, tt )[ 0 ]
l1 = h * func( x, y, tt )[ 1 ]
k2 = h * func( x + l1 / 2, y + l1 / 2, tt + h / 2 )[ 0 ]
l2 = h * func( x + k1 / 2, y + k1 / 2, tt + h / 2 )[ 1 ]
k3 = h * func( x + l2 / 2, y + l2 / 2, tt + h / 2 )[ 0 ]
l3 = h * func( x + k2 / 2, y + k2 / 2, tt + h / 2 )[ 1 ]
k4 = h * func( x + l3, y + l3, tt + h )[ 0 ]
l4 = h * func( x + k3, y + k3, tt + h )[ 1 ]
x += ( k1 + 2.0 * k2 + 2.0 * k3 + k4 ) / 6
y += ( l1 + 2.0 * l2 + 2.0 * l3 + l4 ) / 6
xpoints.append( xpoint )
ypoints.append( ypoint )
return t,xpoints, ypoints
elif numberofvalues == 3:
xpoints = []
ypoints = []
zpoints = []
if isinstance( initial_value, float ) or len( initial_value ) == 0 :
initial_value = [[ 1, 1, 1 ]]
for iv in initial_value:
if len(iv) != 3:
iv = [0.1, 0.1, 0.1]
xpoint = []
ypoint = []
zpoint = []
x = iv[ 0 ]
y = iv[ 1 ]
z = iv[ 2 ]
for i, tt in enumerate( t ):
xpoint.append( x )
ypoint.append( y )
zpoint.append( z )
k1 = h * func( x, y, z, tt )[ 0 ]
l1 = h * func( x, y, z ,tt )[ 1 ]
m1 = h * func( x, y, z ,tt )[ 2 ]
k2 = h * func( x + l1 / 2, y + l1 / 2, z + l1 / 2, tt + h / 2 )[ 0 ]
l2 = h * func( x + k1 / 2, y + k1 / 2, z + k1 / 2, tt + h / 2 )[ 1 ]
m2 = h * func( x + m1 / 2, y + m1 / 2, z + m1 / 2, tt + h / 2 )[ 2 ]
k3 = h * func( x + l2 / 2, y + l2 / 2, z + l2 / 2, tt + h / 2 )[ 0 ]
l3 = h * func( x + k2 / 2, y + k2 / 2, z + k2 / 2, tt + h / 2 )[ 1 ]
m3 = h * func( x + m2 / 2, y + m2 / 2, z + m2 / 2, tt + h / 2 )[ 2 ]
k4 = h * func( x + l3, y + l3, z + l3, tt + h )[ 0 ]
l4 = h * func( x + k3, y + k3, z + k3, tt + h )[ 1 ]
m4 = h * func( x + m3, y + m3, z + m3 ,tt + h )[ 2 ]
x += ( k1 + 2.0 * k2 + 2.0 * k3 + k4 ) / 6
y += ( l1 + 2.0 * l2 + 2.0 * l3 + l4 ) / 6
z += ( m1 + 2.0 * m2 + 2.0 * m3 + m4 ) / 6
xpoints.append( xpoint )
ypoints.append( ypoint )
zpoints.append( zpoint )
return t, xpoints, ypoints, zpoints
def float_to_list( f ):
if not isinstance( f, list ):
return [ f ]
return f
def reshape1n(x):
return np.array(x).reshape(-1,)
def get_d( points = None ):
l = np.log10( np.max( points ) ) - 1
dl = 10 ** int( l )
dl = round( dl, int( abs( l ) ) )
return dl
def graph_plot( t, xpoints, ypoints = None, zpoints = None, chapter = 0, function = None, graph_n = None, hlines = None, vlines = None, linelist = None, savefigOn = True, N = 1000, graph_label = None, pointplot = None, ntimegraphs = 1, n3dgraphs = 1, additionalplots = None ):
func_name = function.__class__.__name__
argdict = { key: val for key, val in function.__dict__.items() if str(key) != "t" and str(key) != "xpoints" and str(key) != "ypoints" and str(key) != "numberofvalues" }
print('chapter{}'.format( chapter))
print('function : {}'.format( function.__class__.__name__ ) )
print('function coefficient : {}'.format( argdict ) )
# 2次元のグラフ作成
if zpoints == None:
if ypoints:
xpoints = float_to_list( xpoints )
ypoints = float_to_list( ypoints )
# 時間に対するx, yグラフ
if ntimegraphs:
for i, (x, y) in enumerate( zip ( xpoints, ypoints )):
x = reshape1n( x )
y = reshape1n( y )
plt.title( 't-x,y x0 ={}, y0={}, argument : {}'.format( x[ 0 ],y[ 0 ] , argdict ), fontsize = 7 )
if i > ntimegraphs:
break
plt.plot( t, x.reshape( -1, ), label = 'x' )
plt.plot( t, y.reshape( -1, ), label = 'y' )
plt.title( func_name )
plt.grid()
plt.legend()
if savefigOn == True:
plt.savefig('./img/chapter{}/tgraph{}.jpg'.format( chapter, i ) )
plt.show()
else:
ypoints = float_to_list( xpoints )
xpoints = []
for i in range( len( ypoints ) ):
xpoints.append( float_to_list( t ) )
plt.scatter( [], [], label = 'coefficient : {}'.format( argdict ), color = 'k' )
for x, y in zip ( xpoints, ypoints ):
x = reshape1n(x)
y = reshape1n(y)
plt.plot( x, y, label = 'initial value (x, y) : ({}, {})'.format( x[ 0 ], y[ 0 ] ) )
dx = get_d( xpoints )
dy = get_d( ypoints )
y_max, y_min = ( np.max( ypoints ) - np.max( ypoints )/ 10 ) + dy , -dy
x_max, x_min = ( np.max( xpoints ) - np.max( xpoints )/ 10 ) + dx , -dx
plt.xlim( xmin = x_min, xmax = x_max )
plt.ylim( ymin = y_min, ymax = y_max )
if not hlines:
hlines = []
if not vlines:
vlines = []
plt.title( func_name, loc = 'center')
if len(linelist) > 0:
p = np.arange( x_min, x_max, 0.01 )
for l in linelist:
if l[ 1 ] == 0:
vlines.append( l[ 0 ])
elif l[ 0 ] == 0:
hlines.append( l[ 1 ] )
else:
q = p * l[ 0 ] + l[ 1 ]
plt.plot( p, q, linestyle = '--')
if function.numberofvalues == 2 :
plt.xlabel( "x( t )" )
plt.ylabel( "y( t )" )
elif function.numberofvalues == 1:
plt.xlabel( " t " )
plt.ylabel(" x( t ) ")
plt.hlines( y = 0.0, xmin = x_min, xmax = x_max, colors = 'k', linewidths = 2)
plt.vlines( x = 0.0, ymin = y_min, ymax = y_max, colors = 'k', linewidths = 2)
if hlines:
plt.hlines( y = hlines, xmin = x_min, xmax = x_max,colors = 'lightpink', linewidths = 2, alpha = 0.8, label = 'hlines: {}'.format( hlines ) )
plt.yticks( list( np.arange( y_min, y_max, dy ) ) + hlines )
if vlines:
plt.vlines( x = vlines, ymin = y_min, ymax = y_max, colors = | |
<filename>run_benchmark.py
#!/usr/bin/env python3
import re
import os
import sys
import json
import fileinput
import subprocess
import time
import csv
import platform
import datetime
import configparser
import argparse
import collections
try:
import psutil
except ImportError:
found_psutil = False
print("Was not able to import psutil. Please install it via `pip3 install "
"psutil`.\nThe benchmark will run, but it won't be able to extract "
"CPU or memory metrics.\n", file=sys.stderr)
else:
found_psutil = True
def get_cmd_args():
# Define ArgumentParser and declare all needed command line arguments
parser = argparse.ArgumentParser(description='Execute nodegame benchmark '
'and write benchmark data to csv file.')
parser.add_argument('-c', '--config', type=str, required=True,
help='Benchmark configuration file in INI format '
'containing variables that likely do not change '
'between benchmarks.')
parser.add_argument('-n', '--num_conns', type=int, nargs='+',
help='Number of simultaneous connections to consider '
'for the benchmark, can be a list.')
parser.add_argument('-r', '--reliable', action='store_true',
help='Boolean flag to turn on reliable messaging.')
parser.add_argument('-nr', '--no_run', action='store_true',
help='Boolean flag to disable launching the game. '
'Will just process existing log files.')
parser.add_argument('-t', '--timeouts', type=int, nargs='+',
help='Timeouts to consider for the benchmark when '
'reliable messaging is used, can be a list.')
args = parser.parse_args()
# Manually check dependency between command line arguments
if not args.no_run:
if not args.num_conns:
print('Error: --num_conns needs to be specified when a benchmark '
'is run.', file=sys.stderr)
sys.exit(1)
if args.reliable and not args.timeouts:
print('Error: --timeouts needs to be specified when reliable '
'messaging is activated.', file=sys.stderr)
sys.exit(1)
# Make sure we have a default value for args.timeouts. This is important
# because we are iterating over it, even though the actual value does not
# matter
if not args.reliable:
args.timeouts = [4000]
return args
def expand_user_in_cfg(cfg):
""" Iterate over all options in both the 'Directories' and 'Files' sections
and expand the user variable"""
for dir_option in cfg.options('Directories'):
cfg.set('Directories', dir_option,
os.path.expanduser(cfg.get('Directories', dir_option)))
for file_option in cfg.options('Files'):
cfg.set('Files', file_option,
os.path.expanduser(cfg.get('Files', file_option)))
# Record the current Unix time in micro seconds.
# This is used to uniquely identify the benchmark.
BENCHMARK_TIME = int(time.time() * 10**6)
def get_benchmark_filename(folder, suffix, ext):
""" Utility function to create benchmark filenames with timestamp included.
"""
file_name = 'benchmark_{}_{}.{}'.format(BENCHMARK_TIME, suffix, ext)
return os.path.join(folder, file_name)
def write_launcher_settings(settings_file, settings):
with open(settings_file, 'w') as settings_fp:
settings_str = ",\n".join([" {}: {}".format(k, v)
for (k, v) in settings])
settings_fp.write("module.exports = {{\n{}\n}};\n"
.format(settings_str))
def write_timeout_to_cfg_files(cfg, reliable, timeout):
""" Writes the retry timeout and the reliable boolean flag to the client
and server var file. Note that even though timeout is written every time it
only takes effect if reliable == True.
"""
for mode in ['client', 'server']:
var_section = '{} Variables'.format(mode.capitalize())
var_file = '{}_var_file'.format(mode)
re_reliable = re.compile(r'({0})\s*=\s*(true|false)'.format(
cfg.get(var_section, 'rel_msg_var')))
re_retry = re.compile(r'({0})\s*=\s*\d+'.format(
cfg.get(var_section, 'rel_retry_var')))
# We iterate through the client variable file and modify it in-place.
# In this case everything written to stdout will be redirected to the
# file we opened, hence we need to print every line.
for line in fileinput.input(cfg.get('Files', var_file), inplace=True):
# Remove trailing whitespace
line = line.rstrip()
# If the current line matches the reliable regular expression, do
# the appropriate substitution. We convert reliable to lower case,
# because booleans are uppercase in python (e.g. True vs. true).
if re_reliable.search(line):
print(re_reliable.sub(r'\1 = ' + str(reliable).lower(), line))
# Else if it matches the retry variable regular expression, do
# another substitution.
elif re_retry.search(line):
print(re_retry.sub(r'\1 = ' + str(timeout), line))
# Else print the original line.
else:
print(line)
def sizeof_fmt(num, suffix='B'):
""" Utility function to convert byte amounts to human readable format.
Taken from http://stackoverflow.com/a/1094933/2528077 """
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def time_fmt(seconds):
""" Utilty function to convert duration to human readable format. Follows
default format of the Unix `time` command. """
return "{:.0f}m{:.3f}s".format(seconds // 60, seconds % 60)
def build_nodegame(cfg):
""" Routine to build nodegame, saves the build log into a separate file.
Warns if there was an error. """
build_log = get_benchmark_filename(cfg.get('Directories', 'log_dir'),
'build', 'log')
print('Build Log:\n{}\n'.format(build_log))
with open(build_log, 'a') as b_log:
retcode = subprocess.call(['node', 'bin/make.js', 'build-client',
'-a', '-o', 'nodegame-full'],
cwd=cfg.get('Directories', 'server_dir'),
stdout=b_log, stderr=b_log)
if retcode:
print("Warning: The nodegame build had a non-zero exit code.",
file=sys.stderr)
def run_launcher(cfg):
""" Executes `node launcher.js` from the right cwd and logs stdout and
stderr to the previously defined log folder.
"""
stdout_log = get_benchmark_filename(cfg.get('Directories', 'log_dir'),
'stdout', 'log')
stderr_log = get_benchmark_filename(cfg.get('Directories', 'log_dir'),
'stderr', 'log')
print('Logging stdout and stderr:\n{}\n{}'.format(stdout_log, stderr_log))
launcher_file = cfg.get('Files', 'launcher_file')
if not os.path.exists(launcher_file):
raise FileNotFoundError("$[Files] launcher_file = {} does not "
"exist.".format(launcher_file))
launcher_cwd = cfg.get('Directories', 'launcher_cwd')
if not os.path.exists(launcher_cwd):
raise FileNotFoundError("[Directories] launcher_cwd = {} does not "
"exist.".format(launcher_cwd))
with open(stdout_log, 'a') as f_out, open(stderr_log, 'a') as f_err:
proc = subprocess.Popen(['node', cfg.get('Files', 'launcher_file'),
cfg.get('General Settings', 'game')],
cwd=cfg.get('Directories', 'launcher_cwd'),
stdout=f_out, stderr=f_err)
return proc
def get_process_metrics(proc):
""" Extracts CPU times, memory infos and connection infos about a given
process started via Popen(). Also obtains the return code. """
p = psutil.Process(proc.pid)
max_cpu = [0, 0]
max_mem = [0, 0]
conns = []
while proc.poll() is None:
try:
cpu = list(p.cpu_times())
mem = list(p.memory_info())
conns = p.connections('all')
for child in p.children(recursive=True):
c_cpu = list(child.cpu_times())
c_mem = list(child.memory_info())
cpu[0] += c_cpu[0]
cpu[1] += c_cpu[1]
mem[0] += c_mem[0]
mem[1] += c_mem[1]
if max_cpu[0] < cpu[0]:
max_cpu = cpu
if max_mem[0] < mem[0]:
max_mem = mem
except (psutil.AccessDenied, psutil.NoSuchProcess):
pass
time.sleep(1)
retcode = proc.wait()
return retcode, max_cpu, max_mem, conns
def run_test(cfg):
""" Runs `npm test` from the correct cwd and returns the return code. """
return subprocess.call(['npm', 'test'],
cwd=cfg.get('Directories', 'test_cwd'))
def parse_server_msg_file(msg_file, is_reliable):
""" Parses the server message log file. Extract metrics about the total
number of messages and the break down according to type. In addition
computes the average delay of a message round-trip if reliable messaging is
enabled. """
# define a message counter and a timestamps dictionary for both client and
# server
msg_counter = collections.Counter()
timestamps = {'client': {}, 'server': {}}
# open the message file for reading
with open(msg_file) as messages:
for message in messages:
# increment total message counter
msg_counter['total'] += 1
# parse the resulting json strings
winston_msg = json.loads(message)
game_msg = winston_msg['GameMsg']
# increment corresponding target counter
msg_counter[game_msg['target']] += 1
# skip the rest if reliable messaging is not activated
if not is_reliable:
continue
# extract message id
msg_id = str(game_msg['id'])
# parse JavaScript Date.prototype.toISOString() into a Python
# datetime object
created = datetime.datetime.strptime(game_msg['created'],
'%Y-%m-%dT%H:%M:%S.%fZ')
timestamp = datetime.datetime.strptime(winston_msg['timestamp'],
'%Y-%m-%dT%H:%M:%S.%fZ')
# initialize timestamps
if msg_id not in timestamps['client']:
timestamps['client'][msg_id] = [0, 0]
if msg_id not in timestamps['server']:
timestamps['server'][msg_id] = [0, 0]
# different between ACK and normal messages for both client and
# server
if game_msg['target'] == 'ACK':
if game_msg['to'] == 'SERVER':
timestamps['server'][game_msg['text']][1] = timestamp
elif game_msg['from'] == 'ultimatum':
timestamps['client'][game_msg['text']][1] = timestamp
else:
if game_msg['to'] == 'SERVER':
timestamps['client'][msg_id][0] = created
elif game_msg['from'] == 'ultimatum':
timestamps['server'][msg_id][0] = timestamp
# simply return counter if no reliable messaging
if not is_reliable:
return msg_counter
# compute timedeltas for both client and server
client_server_times = [
v[1] - v[0] for v in timestamps['client'].values() if v[0] and v[1]
]
server_client_times = {
v[1] - v[0] for v in timestamps['server'].values() if v[0] and v[1]
}
if len(client_server_times) == 0:
print("Warning: Could not record time deltas for client -> server "
"messages.", file=sys.stderr)
avg_client_server_time = 0.0
else:
avg_client_server_time = sum(
client_server_times, datetime.timedelta(0)
).total_seconds() / len(client_server_times)
if len(server_client_times) == 0:
print("Warning: Could not record time deltas for server -> client "
"messages.", file=sys.stderr)
avg_server_client_time = 0.0
else:
avg_server_client_time = sum(
server_client_times, datetime.timedelta(0)
).total_seconds() / len(server_client_times)
print("The average delay to deliver a message was {:.0f} milliseconds."
.format(avg_server_client_time * 1000))
return msg_counter, avg_client_server_time, avg_server_client_time
def main():
args = get_cmd_args()
with open(args.config) as cfg_fp:
cfg = configparser.ConfigParser(interpolation=configparser.
ExtendedInterpolation())
# make the config options case sensitive
cfg.optionxform = str
cfg.read_file(cfg_fp)
expand_user_in_cfg(cfg)
# construct metrics.csv file name
if args.no_run:
csv_metrics_file = os.devnull
else:
csv_metrics_file = \
get_benchmark_filename(cfg.get('Directories', 'csv_dir'),
'metrics', 'csv')
# construct messages.csv file name
csv_msg_file = \
get_benchmark_filename(cfg.get('Directories', 'csv_dir'),
'messages', 'csv')
print('CSV files:\n{}\n{}\n'.format(csv_metrics_file, csv_msg_file))
# this defines the metrics we want to record
metrics_names = | |
if not check_sparse_nnz and any(t.is_sparse for t in tupled_inputs if isinstance(t, torch.Tensor)):
return fail_test('gradcheck expects all tensor inputs are dense when check_sparse_nnz is set to False.')
# Make sure that gradients are saved for at least one input
any_input_requiring_grad = False
for idx, inp in enumerate(tupled_inputs):
if is_tensor_like(inp) and inp.requires_grad:
if not (inp.dtype == torch.float64 or inp.dtype == torch.complex128):
warnings.warn(
f'Input #{idx} requires gradient and '
'is not a double precision floating point or complex. '
'This check will likely fail if all the inputs are '
'not of double precision floating point or complex. ')
content = inp._values() if inp.is_sparse else inp
# TODO: To cover more problematic cases, replace stride = 0 check with
# "any overlap in memory" once we have a proper function to check it.
if content.layout is not torch._mkldnn: # type: ignore
if not all(st > 0 or sz <= 1 for st, sz in zip(content.stride(), content.size())):
raise RuntimeError(
f'The {idx}th input has a dimension with stride 0. gradcheck only '
'supports inputs that are non-overlapping to be able to '
'compute the numerical gradients correctly. You should call '
'.contiguous on the input before passing it to gradcheck.')
any_input_requiring_grad = True
inp.retain_grad()
if not any_input_requiring_grad:
raise ValueError(
'gradcheck expects at least one input tensor to require gradient, '
'but none of the them have requires_grad=True.')
return True
def check_outputs(outputs) -> None:
if any(t.is_sparse for t in outputs if isinstance(t, torch.Tensor)):
# it is easier to call to_dense() on the sparse output than
# to modify analytical jacobian
raise ValueError('Sparse output is not supported at gradcheck yet. '
'Please call to_dense() on the output of fn for gradcheck.')
if any(t.layout == torch._mkldnn for t in outputs if isinstance(t, torch.Tensor)): # type: ignore
raise ValueError('MKLDNN output is not supported at gradcheck yet. '
'Please call to_dense() on the output of fn for gradcheck.')
def check_no_differentiable_outputs(fail_test, func, inputs, func_out, eps) -> bool:
# When there are no differentiable outputs, numerical gradient for a function is
# expected to be zero.
for i, o in enumerate(func_out):
def fn(input):
return _as_tuple(func(*input))[i]
numerical = get_numerical_jacobian(fn, inputs, eps=eps)
for n in numerical:
if torch.ne(n, 0).sum() > 0:
return fail_test('Numerical gradient for function expected to be zero')
return True
FAILED_BATCHED_GRAD_MSG = """
gradcheck or gradgradcheck failed while testing batched gradient computation.
This could have been invoked in a number of ways (via a test that calls
gradcheck/gradgradcheck directly or via an autogenerated test).
If you are adding a new operator, please file an issue and then use one of the
workarounds. The workaround depends on how your test invokes gradcheck/gradgradcheck.
If the test
- manually invokes gradcheck/gradgradcheck, then call gradcheck/gradgradcheck
with `check_batched_grad=False` as a keyword argument.
- is OpInfo-based (e.g., in test_ops.py), then modify the OpInfo for the test
to have `check_batched_grad=False` and/or `check_batched_gradgrad=False`.
- is common_method_invocations-based, then add your test to the denylist
EXCLUDE_BATCHED_GRAD_TESTS in test_autograd.py
If you're modifying an existing operator that supports batched grad computation,
or wish to make a new operator work with batched grad computation, please read
the following.
To compute batched grads (e.g., jacobians, hessians), we vmap over the backward
computation. The most common failure case is if there is a 'vmap-incompatible
operation' in the backward pass. Please see
NOTE: [How to write vmap-compatible backward formulas]
in the codebase for an explanation of how to fix this.
""".strip()
def get_failed_batched_grad_test_msg(output_idx, input_idx, res, exp):
return f"""
For output {output_idx} and input {input_idx}:
{FAILED_BATCHED_GRAD_MSG}
Got:
{res}
Expected:
{exp}
""".strip()
def test_batched_grad(fail_test, input, output, output_idx) -> bool:
# NB: test_batched_grad compares two autograd.grad invocations with a single
# vmap(autograd.grad) invocation. It's not exactly a "gradcheck" in the
# sense that we're not comparing an analytical jacobian with a numeric one,
# but it is morally similar (we could have computed a full analytic jac
# via vmap, but that is potentially slow)
diff_input_list = list(iter_tensors(input, True))
grad = functools.partial(torch.autograd.grad, output, diff_input_list, retain_graph=True, allow_unused=True)
def vjp(v):
results = grad(v)
results = tuple(grad if grad is not None else
torch.zeros([], dtype=inp.dtype, device=inp.device).expand(inp.shape)
for grad, inp in zip(results, diff_input_list))
return results
grad_outputs = [torch.randn_like(output) for _ in range(2)]
expected = [vjp(gO) for gO in grad_outputs]
expected = [torch.stack(shards) for shards in zip(*expected)]
# Squash warnings since these are expected to happen in most cases
# NB: this doesn't work for CUDA tests: https://github.com/pytorch/pytorch/issues/50209
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="Batching rule not implemented")
warnings.filterwarnings("ignore", message="torch.vmap is an experimental prototype")
try:
result = vmap(vjp)(torch.stack(grad_outputs))
except RuntimeError as ex:
# It's OK that we're not raising the error at the correct callsite.
# That's because the callsite is always going to inside the Python
# autograd.grad instead of the C++ traceback of what line in the
# backward formula
return fail_test(
f'While computing batched gradients, got: {ex}\n\n{FAILED_BATCHED_GRAD_MSG}')
for input_idx, (res, exp) in enumerate(zip(result, expected)):
if torch.allclose(res, exp):
continue
return fail_test(get_failed_batched_grad_test_msg(output_idx, input_idx, res, exp))
return True
def test_backward_mul_by_grad_output(fail_test, outputs, inputs, check_sparse_nnz) -> bool:
# Tests that backward is multiplied by grad_output
diff_input_list: List[torch.Tensor] = list(iter_tensors(inputs, True))
if not diff_input_list:
raise RuntimeError("no Tensors requiring grad found in input")
grads_input = torch.autograd.grad(outputs, diff_input_list,
[torch.zeros_like(o, memory_format=torch.legacy_contiguous_format) for o in outputs],
allow_unused=True)
for gi, di in zip(grads_input, diff_input_list):
if gi is None:
continue
if isinstance(gi, torch.Tensor) and gi.layout != torch.strided:
if gi.layout != di.layout:
return fail_test('grad is incorrect layout (' + str(gi.layout) + ' is not ' + str(di.layout) + ')')
if gi.layout == torch.sparse_coo:
if gi.sparse_dim() != di.sparse_dim():
return fail_test('grad is sparse tensor, but has incorrect sparse_dim')
if gi.dense_dim() != di.dense_dim():
return fail_test('grad is sparse tensor, but has incorrect dense_dim')
gi = gi.to_dense()
di = di.to_dense()
if check_sparse_nnz:
if not torch.allclose(gi, torch.zeros_like(gi)):
return fail_test('backward not multiplied by grad_output')
elif not gi.eq(0).all():
return fail_test('backward not multiplied by grad_output')
if gi.dtype != di.dtype or gi.device != di.device or gi.is_sparse != di.is_sparse:
return fail_test("grad is incorrect type")
if gi.size() != di.size():
return fail_test('grad is incorrect size')
return True
def test_undefined_grad(fail_test, func, outputs, inputs) -> bool:
diff_input_list: List[torch.Tensor] = list(iter_tensors(inputs, True))
if not diff_input_list:
raise RuntimeError("no Tensors requiring grad found in input")
def warn_bc_breaking():
warnings.warn((
'Backwards compatibility: New undefined gradient support checking '
'feature is enabled by default, but it may break existing callers '
'of this function. If this is true for you, you can call this '
'function with "check_undefined_grad=False" to disable the feature'))
def check_undefined_grad_support(output_to_check):
grads_output = [torch.zeros_like(o, memory_format=torch.legacy_contiguous_format) for o in output_to_check]
try:
grads_input = torch.autograd.grad(output_to_check, diff_input_list,
grads_output, allow_unused=True)
except RuntimeError:
warn_bc_breaking()
return fail_test((
'Expected backward function to handle undefined output grads. '
'Please look at "Notes about undefined output gradients" in '
'"tools/autograd/derivatives.yaml"'))
for gi, i in zip(grads_input, diff_input_list):
if (gi is not None) and (not gi.eq(0).all()):
warn_bc_breaking()
return fail_test((
'Expected all input grads to be undefined or zero when all output grads are undefined '
'or zero. Please look at "Notes about undefined output gradients" in '
'"tools/autograd/derivatives.yaml"'))
return True
# All backward functions must work properly if all output grads are undefined
outputs_to_check = [[
torch._C._functions.UndefinedGrad()(o) for o in _differentiable_outputs(func(*inputs))
# This check filters out Tensor-likes that aren't instances of Tensor.
if isinstance(o, torch.Tensor)
]]
# If there are multiple output grads, we should be able to undef one at a time without error
if len(outputs_to_check[0]) > 1:
for undef_grad_idx in range(len(outputs)):
output_to_check = _differentiable_outputs(func(*inputs))
outputs_to_check.append([
torch._C._functions.UndefinedGrad()(o) if idx == undef_grad_idx else o
for idx, o in enumerate(output_to_check)])
return all(check_undefined_grad_support(output) for output in outputs_to_check)
def _as_tuple(x):
if isinstance(x, tuple):
return x
elif isinstance(x, list):
return tuple(x)
else:
return x,
def _differentiable_outputs(x):
return tuple(o for o in _as_tuple(x) if o.requires_grad)
def get_notallclose_msg(analytical, numerical, output_idx, input_idx, error_str='') -> str:
return error_str + 'Jacobian mismatch for output %d with respect to input %d,\n' \
'numerical:%s\nanalytical:%s\n' % (output_idx, input_idx, numerical, analytical)
# Note [VarArg of Tensors]
# ~~~~~~~~~~~~~~~~~~~~~~~~
# 'func' accepts a vararg of tensors, which isn't expressable in the type system at the moment.
# If https://mypy.readthedocs.io/en/latest/additional_features.html?highlight=callable#extended-callable-types is accepted,
# the '...' first argument of Callable can be replaced with VarArg(Tensor).
# For now, we permit any input.
# the '...' first argument of Callable can be replaced with VarArg(Tensor).
# For now, | |
"""
const_pass.py - AST pass that collects constants.
Immutable string constants like 'new Str("foo")' are moved to the top level of
the generated C++ program for efficiency.
"""
import json
from typing import overload, Union, Optional, Any, Dict, List
from mypy.visitor import ExpressionVisitor, StatementVisitor
from mypy.nodes import (
Expression, Statement, ExpressionStmt, StrExpr, ComparisonExpr, NameExpr,
MemberExpr, IntExpr)
from mypy.types import Type
from crash import catch_errors
from util import log
import format_strings
T = None # TODO: Make it type check?
class UnsupportedException(Exception):
pass
class Collect(ExpressionVisitor[T], StatementVisitor[None]):
def __init__(self,
types: Dict[Expression, Type],
const_lookup: Dict[Expression, str],
const_code: List[str]):
self.types = types
self.const_lookup = const_lookup
self.const_code = const_code
self.unique_id = 0
self.indent = 0
def out(self, msg, *args):
ind_str = self.indent * ' '
if args:
msg = msg % args
self.const_code.append(msg)
#
# COPIED from IRBuilder
#
@overload
def accept(self, node: Expression) -> T: ...
@overload
def accept(self, node: Statement) -> None: ...
def accept(self, node: Union[Statement, Expression]) -> Optional[T]:
with catch_errors(self.module_path, node.line):
if isinstance(node, Expression):
try:
res = node.accept(self)
#res = self.coerce(res, self.node_type(node), node.line)
# If we hit an error during compilation, we want to
# keep trying, so we can produce more error
# messages. Generate a temp of the right type to keep
# from causing more downstream trouble.
except UnsupportedException:
res = self.alloc_temp(self.node_type(node))
return res
else:
try:
node.accept(self)
except UnsupportedException:
pass
return None
def log(self, msg, *args):
if 0: # quiet
ind_str = self.indent * ' '
log(ind_str + msg, *args)
# Not in superclasses:
def visit_mypy_file(self, o: 'mypy.nodes.MypyFile') -> T:
# Skip some stdlib stuff. A lot of it is brought in by 'import
# typing'.
if o.fullname() in (
'__future__', 'sys', 'types', 'typing', 'abc', '_ast', 'ast',
'_weakrefset', 'collections', 'cStringIO', 're', 'builtins'):
# These module are special; their contents are currently all
# built-in primitives.
return
self.module_path = o.path
self.indent += 1
for node in o.defs:
# skip module docstring
if isinstance(node, ExpressionStmt) and isinstance(node.expr, StrExpr):
continue
self.accept(node)
self.indent -= 1
# LITERALS
def visit_int_expr(self, o: 'mypy.nodes.IntExpr') -> T:
self.log('IntExpr %d', o.value)
def visit_str_expr(self, o: 'mypy.nodes.StrExpr') -> T:
# - Need new Str() everywhere because "foo" doesn't match Str* :-(
id_ = 'str%d' % self.unique_id
raw_string = format_strings.DecodeMyPyString(o.value)
self.out('Str* %s = new Str(%s);', id_, json.dumps(raw_string))
self.unique_id += 1
self.const_lookup[o] = id_
def visit_bytes_expr(self, o: 'mypy.nodes.BytesExpr') -> T:
pass
def visit_unicode_expr(self, o: 'mypy.nodes.UnicodeExpr') -> T:
pass
def visit_float_expr(self, o: 'mypy.nodes.FloatExpr') -> T:
pass
def visit_complex_expr(self, o: 'mypy.nodes.ComplexExpr') -> T:
pass
# Expression
def visit_ellipsis(self, o: 'mypy.nodes.EllipsisExpr') -> T:
pass
def visit_star_expr(self, o: 'mypy.nodes.StarExpr') -> T:
pass
def visit_name_expr(self, o: 'mypy.nodes.NameExpr') -> T:
#self.log('NameExpr %s', o.name)
pass
def visit_member_expr(self, o: 'mypy.nodes.MemberExpr') -> T:
if o.expr:
self.accept(o.expr)
def visit_yield_from_expr(self, o: 'mypy.nodes.YieldFromExpr') -> T:
pass
def visit_yield_expr(self, o: 'mypy.nodes.YieldExpr') -> T:
pass
def visit_call_expr(self, o: 'mypy.nodes.CallExpr') -> T:
self.log('CallExpr')
self.accept(o.callee) # could be f() or obj.method()
self.indent += 1
for arg in o.args:
self.accept(arg)
# The type of each argument
#self.log(':: %s', self.types[arg])
self.indent -= 1
#self.log( 'args %s', o.args)
#self.log(' arg_kinds %s', o.arg_kinds)
#self.log(' arg_names %s', o.arg_names)
def visit_op_expr(self, o: 'mypy.nodes.OpExpr') -> T:
self.log('OpExpr')
self.indent += 1
self.accept(o.left)
self.accept(o.right)
self.indent -= 1
def visit_comparison_expr(self, o: 'mypy.nodes.ComparisonExpr') -> T:
self.log('ComparisonExpr')
self.log(' operators %s', o.operators)
self.indent += 1
for operand in o.operands:
self.indent += 1
self.accept(operand)
self.indent -= 1
self.indent -= 1
def visit_cast_expr(self, o: 'mypy.nodes.CastExpr') -> T:
pass
def visit_reveal_expr(self, o: 'mypy.nodes.RevealExpr') -> T:
pass
def visit_super_expr(self, o: 'mypy.nodes.SuperExpr') -> T:
pass
def visit_assignment_expr(self, o: 'mypy.nodes.AssignmentExpr') -> T:
pass
def visit_unary_expr(self, o: 'mypy.nodes.UnaryExpr') -> T:
# e.g. a[-1] or 'not x'
self.accept(o.expr)
def visit_list_expr(self, o: 'mypy.nodes.ListExpr') -> T:
# lists are MUTABLE, so we can't generate constants at the top level
# but we want to visit the string literals!
for item in o.items:
self.accept(item)
def visit_dict_expr(self, o: 'mypy.nodes.DictExpr') -> T:
pass
def visit_tuple_expr(self, o: 'mypy.nodes.TupleExpr') -> T:
for item in o.items:
self.accept(item)
def visit_set_expr(self, o: 'mypy.nodes.SetExpr') -> T:
pass
def visit_index_expr(self, o: 'mypy.nodes.IndexExpr') -> T:
self.accept(o.base)
self.accept(o.index)
def visit_type_application(self, o: 'mypy.nodes.TypeApplication') -> T:
pass
def visit_lambda_expr(self, o: 'mypy.nodes.LambdaExpr') -> T:
pass
def visit_list_comprehension(self, o: 'mypy.nodes.ListComprehension') -> T:
gen = o.generator # GeneratorExpr
left_expr = gen.left_expr
index_expr = gen.indices[0]
seq = gen.sequences[0]
cond = gen.condlists[0]
# We might use all of these, so collect constants.
self.accept(left_expr)
self.accept(index_expr)
self.accept(seq)
# Why does this cause a crash?
#self.accept(cond)
def visit_set_comprehension(self, o: 'mypy.nodes.SetComprehension') -> T:
pass
def visit_dictionary_comprehension(self, o: 'mypy.nodes.DictionaryComprehension') -> T:
pass
def visit_generator_expr(self, o: 'mypy.nodes.GeneratorExpr') -> T:
pass
def visit_slice_expr(self, o: 'mypy.nodes.SliceExpr') -> T:
if o.begin_index:
self.accept(o.begin_index)
if o.end_index:
self.accept(o.end_index)
if o.stride:
self.accept(o.stride)
def visit_conditional_expr(self, o: 'mypy.nodes.ConditionalExpr') -> T:
self.accept(o.cond)
self.accept(o.if_expr)
self.accept(o.else_expr)
def visit_backquote_expr(self, o: 'mypy.nodes.BackquoteExpr') -> T:
pass
def visit_type_var_expr(self, o: 'mypy.nodes.TypeVarExpr') -> T:
pass
def visit_type_alias_expr(self, o: 'mypy.nodes.TypeAliasExpr') -> T:
pass
def visit_namedtuple_expr(self, o: 'mypy.nodes.NamedTupleExpr') -> T:
pass
def visit_enum_call_expr(self, o: 'mypy.nodes.EnumCallExpr') -> T:
pass
def visit_typeddict_expr(self, o: 'mypy.nodes.TypedDictExpr') -> T:
pass
def visit_newtype_expr(self, o: 'mypy.nodes.NewTypeExpr') -> T:
pass
def visit__promote_expr(self, o: 'mypy.nodes.PromoteExpr') -> T:
pass
def visit_await_expr(self, o: 'mypy.nodes.AwaitExpr') -> T:
pass
def visit_temp_node(self, o: 'mypy.nodes.TempNode') -> T:
pass
def visit_assignment_stmt(self, o: 'mypy.nodes.AssignmentStmt') -> T:
# How does this get reached??
# Ah wtf, why is there no type on here!
# I thought we did parse_and_typecheck already?
if 1:
self.log('AssignmentStmt')
#self.log(' type %s', o.type)
#self.log(' unanalyzed_type %s', o.unanalyzed_type)
# NICE! Got the lvalue
for lval in o.lvalues:
try:
self.log(' lval %s :: %s', lval, self.types[lval])
except KeyError: # TODO: handle this
pass
self.accept(lval)
try:
r = self.types[o.rvalue]
except KeyError:
# This seems to only happen for Ellipsis, I guess in the abc module
#log(' NO TYPE FOR RVALUE: %s', o.rvalue)
pass
else:
#self.log(' %s :: %s', o.rvalue, r)
self.indent += 1
#self.log(' rvalue :: %s', r)
self.accept(o.rvalue)
self.indent -= 1
#self.log(' o.rvalue %s', o.rvalue)
def visit_for_stmt(self, o: 'mypy.nodes.ForStmt') -> T:
self.log('ForStmt')
#self.log(' index_type %s', o.index_type)
#self.log(' inferred_item_type %s', o.inferred_item_type)
#self.log(' inferred_iterator_type %s', o.inferred_iterator_type)
self.accept(o.index) # index var expression
self.accept(o.expr) # the thing being iterated over
self.accept(o.body)
if o.else_body:
raise AssertionError("can't translate for-else")
def visit_with_stmt(self, o: 'mypy.nodes.WithStmt') -> T:
self.accept(o.body)
def visit_del_stmt(self, o: 'mypy.nodes.DelStmt') -> T:
self.accept(o.expr)
def visit_func_def(self, o: 'mypy.nodes.FuncDef') -> T:
# got the type here, nice!
typ = o.type
self.log('FuncDef %s :: %s', o.name(), typ)
#self.log('%s', type(typ))
for t, name in zip(typ.arg_types, typ.arg_names):
self.log(' arg %s %s', t, name)
self.log(' ret %s', o.type.ret_type)
self.indent += 1
for arg in o.arguments:
# e.g. foo=''
if arg.initializer:
self.accept(arg.initializer)
# We can't use __str__ on these Argument objects? That seems like an
# oversight
#self.log('%r', arg)
self.log('Argument %s', arg.variable)
self.log(' type_annotation %s', arg.type_annotation)
# I think these are for default values
self.log(' initializer %s', arg.initializer)
self.log(' kind %s', arg.kind)
self.accept(o.body)
self.indent -= 1
def visit_overloaded_func_def(self, o: 'mypy.nodes.OverloadedFuncDef') -> T:
pass
def visit_class_def(self, o: 'mypy.nodes.ClassDef') -> T:
self.log('const_pass ClassDef %s', o.name)
for b in o.base_type_exprs:
self.log(' base_type_expr %s', b)
self.indent += 1
self.accept(o.defs)
self.indent -= 1
def visit_global_decl(self, o: 'mypy.nodes.GlobalDecl') -> T:
pass
def visit_nonlocal_decl(self, o: 'mypy.nodes.NonlocalDecl') -> T:
pass
def visit_decorator(self, o: 'mypy.nodes.Decorator') -> T:
pass
def visit_var(self, o: 'mypy.nodes.Var') -> T:
pass
# Module structure
def visit_import(self, o: 'mypy.nodes.Import') -> T:
pass
def visit_import_from(self, o: 'mypy.nodes.ImportFrom') -> T:
pass
def visit_import_all(self, o: 'mypy.nodes.ImportAll') -> T:
pass
# Statements
def visit_block(self, block: 'mypy.nodes.Block') -> T:
self.log('Block')
self.indent += 1
for stmt in block.body:
# Ignore things that look like docstrings
if isinstance(stmt, ExpressionStmt) and isinstance(stmt.expr, StrExpr):
continue
#log('-- %d', self.indent)
self.accept(stmt)
self.indent -= 1
def visit_expression_stmt(self, o: 'mypy.nodes.ExpressionStmt') -> T:
self.log('ExpressionStmt')
self.indent += 1
self.accept(o.expr)
self.indent -= 1
def visit_operator_assignment_stmt(self, o: 'mypy.nodes.OperatorAssignmentStmt') -> T:
self.log('OperatorAssignmentStmt')
def visit_while_stmt(self, o: 'mypy.nodes.WhileStmt') -> T:
self.log('WhileStmt')
self.accept(o.expr)
self.accept(o.body)
def visit_return_stmt(self, o: 'mypy.nodes.ReturnStmt') -> T:
self.log('ReturnStmt')
if o.expr:
self.accept(o.expr)
def visit_assert_stmt(self, o: 'mypy.nodes.AssertStmt') -> T:
pass
def visit_if_stmt(self, o: 'mypy.nodes.IfStmt') -> T:
# Copied from cppgen_pass.py
# Not sure why this wouldn't be true
assert len(o.expr) == 1, o.expr
# Omit anything that looks like if __name__ == ...
cond = o.expr[0]
if (isinstance(cond, ComparisonExpr) and
isinstance(cond.operands[0], NameExpr) and | |
<reponame>ArnePlatteau/time_var_extr_index
# -*- coding: utf-8 -*-
"""
Created on Fri May 7 11:22:11 2021
@author: arnep
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from methods_extremal_index import non_parametric_extremal_index, non_parametric_day_based
from methods_extremal_index import horizon_rolling_estimator, theta_sliding_blocks
from pathlib import Path
import pickle
from scipy.stats import invweibull
"""Simulator class"""
class theta_simulator():
"""
Object containing all necessary data for simulations of the performance of
extremal index estimators. The initialisation
creates the data on which the estimation will be done and decides the
criterion to compare performance. Then, by specifying an estimator with all
relevant parameters, the extremal index will be estimated for each sample.
The performance is then calculated based on the criterion function.
Each object can only hold one simulation data model settings, but on this
data, several estimators can be used.
Initialisation parameters:
name: name of the object
n: the length of each simulation run
runs: number of simulation runs
func_gt: ground truth function for the extremal index (is the same for each run)
func_rand: function to generate random samples
func_crit: function which determines the criterion used for the comparison
of different options (standard MSE)
kwargs: other elements needed for one of the functions
"""
def __init__(self, name, n, runs, func_gt, func_rand, func_crit, **kwargs):
"""
initialise the object with data given, and make empty dicts for the
output from the simulations.
"""
self.__name__ = name
self.sim_df = func_rand(n, runs, **kwargs)
self.ground_truth = pd.DataFrame(func_gt(n, **kwargs))
self.res = pd.DataFrame()
self.nan = pd.DataFrame()
self.theta= dict()
self.avg_theta= dict()
self.gt_name = func_gt.__name__
self.rand_name = func_rand.__name__
self.crit_name = func_crit.__name__
self.func_crit = func_crit
self.est_kwargs = {}
self.est_func = {}
def append_sim_array(self, runs, func_rand, **kwargs):
"""
Add runs to the simulation array. Can be useful for doing a first simulation
on a few runs and then expanding it after EDA.
"""
#check to see if same data generating function is used
if func_rand.__name__ != self.rand_name:
print('warning: other function used than before')
#make array to append
append = func_rand(len(self.sim_df), runs, **kwargs)
#check the last index and rename the append array column names
last_idx = self.sim_df.columns.max()
append.columns = pd.RangeIndex(last_idx + 1,last_idx +1+ len(append.columns), step = 1 )
#merge the original simulation data with the new
self.sim_df = self.sim_df.merge(append, left_index=True, right_index=True)
def simulate(self, func_est, name =None , redo_from_scratch= False, **kwargs):
"""
Estimate the extremal index on each run. Can be interrupted and later
be continued.
func_est : estimator function used
name : name of the simulation
redo_from_scratch : if False, the estimation will continue from where
it was left. If True, it will be redone completely
kwargs : arguments for the estimator
"""
#set name of the simulation
if name is not None:
func_name = name
else:
func_name = func_est.__name__
#get the columns for which to estimate the extremal index, default all of them
sim_column = self.sim_df.columns
#if there is already a part for which it was estimated, these can be left out
if (func_name in self.est_func.keys())&(not redo_from_scratch) :
sim_column = self.sim_df.columns[~self.sim_df.columns.isin(self.theta[func_name])]
#in case the length of the simulation df doesn't match the results df
#it will be appended so to match it.
if len(self.sim_df.columns) != len(self.res):
new_array = np.empty((len(sim_column), self.res.shape[1]))
new_array[:]= np.nan
new_df = pd.DataFrame(new_array, columns = self.res.columns)
self.res = self.res.append(new_df, ignore_index = True)
new_df = pd.DataFrame(new_array, columns = self.nan.columns)
self.nan = self.nan.append(new_df, ignore_index = True)
else:
#in case the length of the simulation df doesn't match the results df
#it will be appended so to match it.
if len(self.sim_df.columns) != len(self.res):
new_array = np.empty((len(sim_column) - len(self.res), self.res.shape[1]))
new_array[:]= np.nan
new_df = pd.DataFrame(new_array, columns = self.res.columns)
self.res = self.res.append(new_df, ignore_index = True)
new_df = pd.DataFrame(new_array, columns = self.nan.columns)
self.nan = self.nan.append(new_df, ignore_index = True)
#add the name of the simulation to all result dataframes
self.res[func_name] = np.empty(len(sim_column))
self.nan[func_name] = np.empty(len(sim_column))
self.theta[func_name] = pd.DataFrame()
self.est_kwargs[func_name] = kwargs
self.est_func[func_name] = func_est
#simulation: go through all prespecified columns and estimate the extremal index
for col in sim_column:
self.theta[func_name][col] = func_est(self.sim_df[col],**kwargs)
#calculate the performance measures after simulation has been performed
self.calculate_criterion(func_name)
self.calc_stats()
self.avg_theta[func_name] = np.mean(self.theta[func_name], axis = 1)
def plot_theta_gt(self, name , color = 'grey', alpha = 0.3, fn = None,
save = False):
"""Plot of the sample average, the density and ground_truth."""
if save:
#set path.
Path('pictures/simulation/' + self.__name__).mkdir(parents=True, exist_ok=True)
#initialise plot
plt.figure(figsize=(10, 6), dpi=200)
#loop over all columns and plot them
for col in self.theta[name].columns:
plt.plot(self.theta[name][col], color = color, alpha = alpha)
#layout parameters
plt.grid(color = 'black')
plt.plot(self.ground_truth, color = 'darkred', label = 'Ground truth')
plt.plot(self.avg_theta[name], color = 'navy', label = 'Sample average')
plt.yticks(np.arange(0, 1.2, .2))
plt.legend()
plt.title(name)
#function to prevent problems with the filename (special characters)
if fn is None:
filename = 'pictures/simulation/' + self.__name__ + "/" + self.__name__ + ' ' + name + '.png'
filename = filename.replace(':', '')
filename = filename.replace('\\', '')
filename = filename.replace('$', '')
else:
filename = 'pictures/simulation/' + self.__name__ + "/" + self.__name__ + ' ' + fn + '.png'
filename = filename.replace(':', '')
filename = filename.replace('\\', '')
filename = filename.replace('$', '')
#save and plot
if save:
plt.savefig(filename)
print(filename)
plt.show()
def plot_gt(self, name, fn = None, save = False):
"""Plot of the ground_truth."""
if save:
#set path
Path('pictures/simulation/' + self.__name__).mkdir(parents=True, exist_ok=True)
#initialise plt and set the layout
plt.figure(figsize=(10, 6), dpi=200)
plt.grid(color = 'black')
plt.plot(self.ground_truth, color = 'darkred')
plt.yticks(np.arange(0, 1.2, .2))
plt.title("$theta$ for " + name)
#replace characters to prevent problems in the filename
name = name.replace(':', '')
name = name.replace('\\', '')
name = name.replace('$', '')
if fn is None:
filename = 'pictures/simulation/' + self.__name__ + "/" + self.__name__ + name +' ground_truth.png'
filename = filename.replace(':', '')
filename = filename.replace('\\', '')
filename = filename.replace('$', '')
else:
filename = 'pictures/simulation/' + self.__name__ + "/" + self.__name__ + fn +' ground_truth.png'
filename = filename.replace(':', '')
filename = filename.replace('\\', '')
filename = filename.replace('$', '')
#save and plot
if save:
plt.savefig(filename)
print(filename)
plt.show()
def plot_sim_example(self, name, fn = None, save = False):
if save:
#check path
Path('pictures/simulation/' + self.__name__).mkdir(parents=True, exist_ok=True)
#initialise picture and set layout things
plt.figure(figsize=(10, 6), dpi=200)
plt.grid(color = 'black')
plt.plot(self.sim_df[0])
plt.title(name + ': sample')
#replace characters to prevent problems in the filename
if fn is None:
filename ='pictures/simulation/' + self.__name__ + "/" + self.__name__ + name +' sim_example.png'
filename = filename.replace(':', '')
filename = filename.replace('\\', '')
filename = filename.replace('$', '')
else:
filename ='pictures/simulation/' + self.__name__ + "/" + self.__name__ + fn +' sim_example.png'
filename = filename.replace(':', '')
filename = filename.replace('\\', '')
filename = filename.replace('$', '')
#save and plot
if save:
plt.savefig(filename)
print(filename)
plt.show()
def calc_stats(self):
"""Description of basic statistics """
self.crit_stats = self.res.describe()
self.crit_stats.loc['Sample size', ]= len(self.sim_df)
def set_first_theta_nan(self, theta_name, number):
"""
Set the first number of estimators to NAN. Used when estimator undefined
for a certain time period.
"""
self.theta[theta_name][:number] = np.nan
def calculate_criterion(self, theta_name):
"""
Calculate the criterion function.
"""
#get the columns and initialise result arrays
sim_column = self.sim_df.columns
res = np.empty(len(sim_column))
nnan = np.empty(len(sim_column))
#loop over each run, and calculate the criterion. Also keeps tracks of how
#many NANs in the sample;
for col in sim_column:
res[col], nnan[col] = self.func_crit(self.ground_truth[0], self.theta[theta_name][col])
#add results and nans to the result dict
self.res[theta_name] = res
self.nan[theta_name] = nnan
def fill_calc(self):
"""
Function to do all estimations previously undone. Used in case
the size of the simulation array increases, or in case the process
was interrupted.
"""
#go through all estimators currently available
for est in self.est_func.keys():
print(self.est_func[est].__name__)
#simulate whatever hasn't been simulated yet.
self.simulate(self.est_func[est], est, **self.est_kwargs[est])
#calculate criterion and statistics
self.calculate_criterion(est)
self.calc_stats()
"""Functions to determine the ground truth and random values"""
def random_ARt(n, runs, dof, phi ):
"""returns an array of random samples, generated by an AR(1) model with
student-t distributed errors
PARAMETERS:
n : number of rows
| |
25*m.b863 <= 0)
m.c684 = Constraint(expr= m.x312 - 22*m.b864 <= 0)
m.c685 = Constraint(expr= m.x313 - 22*m.b865 <= 0)
m.c686 = Constraint(expr= m.x314 - 25*m.b866 <= 0)
m.c687 = Constraint(expr= m.x315 - 22*m.b867 <= 0)
m.c688 = Constraint(expr= m.x316 - 22*m.b868 <= 0)
m.c689 = Constraint(expr= m.x317 - 25*m.b869 <= 0)
m.c690 = Constraint(expr= m.x318 - 22*m.b870 <= 0)
m.c691 = Constraint(expr= m.x319 - 22*m.b871 <= 0)
m.c692 = Constraint(expr= m.x332 - 25*m.b872 <= 0)
m.c693 = Constraint(expr= m.x333 - 22*m.b873 <= 0)
m.c694 = Constraint(expr= m.x334 - 22*m.b874 <= 0)
m.c695 = Constraint(expr= m.x335 - 25*m.b875 <= 0)
m.c696 = Constraint(expr= m.x336 - 22*m.b876 <= 0)
m.c697 = Constraint(expr= m.x337 - 22*m.b877 <= 0)
m.c698 = Constraint(expr= m.x338 - 25*m.b878 <= 0)
m.c699 = Constraint(expr= m.x339 - 22*m.b879 <= 0)
m.c700 = Constraint(expr= m.x340 - 22*m.b880 <= 0)
m.c701 = Constraint(expr= m.x341 - 25*m.b881 <= 0)
m.c702 = Constraint(expr= m.x342 - 22*m.b882 <= 0)
m.c703 = Constraint(expr= m.x343 - 22*m.b883 <= 0)
m.c704 = Constraint(expr= m.x320 - 25*m.b884 <= 0)
m.c705 = Constraint(expr= m.x321 - 22*m.b885 <= 0)
m.c706 = Constraint(expr= m.x322 - 22*m.b886 <= 0)
m.c707 = Constraint(expr= m.x323 - 25*m.b887 <= 0)
m.c708 = Constraint(expr= m.x324 - 22*m.b888 <= 0)
m.c709 = Constraint(expr= m.x325 - 22*m.b889 <= 0)
m.c710 = Constraint(expr= m.x326 - 25*m.b890 <= 0)
m.c711 = Constraint(expr= m.x327 - 22*m.b891 <= 0)
m.c712 = Constraint(expr= m.x328 - 22*m.b892 <= 0)
m.c713 = Constraint(expr= m.x329 - 25*m.b893 <= 0)
m.c714 = Constraint(expr= m.x330 - 22*m.b894 <= 0)
m.c715 = Constraint(expr= m.x331 - 22*m.b895 <= 0)
m.c716 = Constraint(expr= m.x344 - 24*m.b884 <= 0)
m.c717 = Constraint(expr= m.x345 - 21*m.b885 <= 0)
m.c718 = Constraint(expr= m.x346 - 20*m.b886 <= 0)
m.c719 = Constraint(expr= m.x347 - 24*m.b887 <= 0)
m.c720 = Constraint(expr= m.x348 - 21*m.b888 <= 0)
m.c721 = Constraint(expr= m.x349 - 20*m.b889 <= 0)
m.c722 = Constraint(expr= m.x350 - 24*m.b890 <= 0)
m.c723 = Constraint(expr= m.x351 - 21*m.b891 <= 0)
m.c724 = Constraint(expr= m.x352 - 20*m.b892 <= 0)
m.c725 = Constraint(expr= m.x353 - 24*m.b893 <= 0)
m.c726 = Constraint(expr= m.x354 - 21*m.b894 <= 0)
m.c727 = Constraint(expr= m.x355 - 20*m.b895 <= 0)
m.c728 = Constraint(expr= m.x356 - 24*m.b896 <= 0)
m.c729 = Constraint(expr= m.x357 - 21*m.b897 <= 0)
m.c730 = Constraint(expr= m.x358 - 20*m.b898 <= 0)
m.c731 = Constraint(expr= m.x359 - 24*m.b899 <= 0)
m.c732 = Constraint(expr= m.x360 - 21*m.b900 <= 0)
m.c733 = Constraint(expr= m.x361 - 20*m.b901 <= 0)
m.c734 = Constraint(expr= m.x362 - 24*m.b902 <= 0)
m.c735 = Constraint(expr= m.x363 - 21*m.b903 <= 0)
m.c736 = Constraint(expr= m.x364 - 20*m.b904 <= 0)
m.c737 = Constraint(expr= m.x365 - 24*m.b905 <= 0)
m.c738 = Constraint(expr= m.x366 - 21*m.b906 <= 0)
m.c739 = Constraint(expr= m.x367 - 20*m.b907 <= 0)
m.c740 = Constraint(expr= m.x380 - 30*m.b896 <= 0)
m.c741 = Constraint(expr= m.x381 - 25*m.b897 <= 0)
m.c742 = Constraint(expr= m.x382 - 21*m.b898 <= 0)
m.c743 = Constraint(expr= m.x383 - 30*m.b899 <= 0)
m.c744 = Constraint(expr= m.x384 - 25*m.b900 <= 0)
m.c745 = Constraint(expr= m.x385 - 21*m.b901 <= 0)
m.c746 = Constraint(expr= m.x386 - 30*m.b902 <= 0)
m.c747 = Constraint(expr= m.x387 - 25*m.b903 <= 0)
m.c748 = Constraint(expr= m.x388 - 21*m.b904 <= 0)
m.c749 = Constraint(expr= m.x389 - 30*m.b905 <= 0)
m.c750 = Constraint(expr= m.x390 - 25*m.b906 <= 0)
m.c751 = Constraint(expr= m.x391 - 21*m.b907 <= 0)
m.c752 = Constraint(expr= m.x368 - 30*m.b908 <= 0)
m.c753 = Constraint(expr= m.x369 - 25*m.b909 <= 0)
m.c754 = Constraint(expr= m.x370 - 21*m.b910 <= 0)
m.c755 = Constraint(expr= m.x371 - 30*m.b911 <= 0)
m.c756 = Constraint(expr= m.x372 - 25*m.b912 <= 0)
m.c757 = Constraint(expr= m.x373 - 21*m.b913 <= 0)
m.c758 = Constraint(expr= m.x374 - 30*m.b914 <= 0)
m.c759 = Constraint(expr= m.x375 - 25*m.b915 <= 0)
m.c760 = Constraint(expr= m.x376 - 21*m.b916 <= 0)
m.c761 = Constraint(expr= m.x377 - 30*m.b917 <= 0)
m.c762 = Constraint(expr= m.x378 - 25*m.b918 <= 0)
m.c763 = Constraint(expr= m.x379 - 21*m.b919 <= 0)
m.c764 = Constraint(expr= m.x260 - 10*m.b824 <= 0)
m.c765 = Constraint(expr= m.x261 - 10*m.b825 <= 0)
m.c766 = Constraint(expr= m.x262 - 10*m.b826 <= 0)
m.c767 = Constraint(expr= m.x263 - 10*m.b827 <= 0)
m.c768 = Constraint(expr= m.x264 - 10*m.b828 <= 0)
m.c769 = Constraint(expr= m.x265 - 10*m.b829 <= 0)
m.c770 = Constraint(expr= m.x266 - 50*m.b830 <= 0)
m.c771 = Constraint(expr= m.x267 - 50*m.b831 <= 0)
m.c772 = Constraint(expr= m.x268 - 50*m.b832 <= 0)
m.c773 = Constraint(expr= m.x269 - 50*m.b833 <= 0)
m.c774 = Constraint(expr= m.x270 - 50*m.b834 <= 0)
m.c775 = Constraint(expr= m.x271 - 50*m.b835 <= 0)
m.c776 = Constraint(expr= m.x272 + m.x284 - 40*m.b836 <= 0)
m.c777 = Constraint(expr= m.x273 + m.x285 - 40*m.b837 <= 0)
m.c778 = Constraint(expr= m.x274 + m.x286 - 40*m.b838 <= 0)
m.c779 = Constraint(expr= m.x275 + m.x287 - 40*m.b839 <= 0)
m.c780 = Constraint(expr= m.x276 + m.x288 - 40*m.b840 <= 0)
m.c781 = Constraint(expr= m.x277 + m.x289 - 40*m.b841 <= 0)
m.c782 = Constraint(expr= m.x278 + m.x290 - 60*m.b842 <= 0)
m.c783 = Constraint(expr= m.x279 + m.x291 - 60*m.b843 <= 0)
m.c784 = Constraint(expr= m.x280 + m.x292 - 60*m.b844 <= 0)
m.c785 = Constraint(expr= m.x281 + m.x293 - 60*m.b845 <= 0)
m.c786 = Constraint(expr= m.x282 + m.x294 - 60*m.b846 <= 0)
m.c787 = Constraint(expr= m.x283 + m.x295 - 60*m.b847 <= 0)
m.c788 = Constraint(expr= m.x296 - 15*m.b848 <= 0)
m.c789 = Constraint(expr= m.x297 - 15*m.b849 <= 0)
m.c790 = Constraint(expr= m.x298 - 15*m.b850 <= 0)
m.c791 = Constraint(expr= m.x299 - 15*m.b851 <= 0)
m.c792 = Constraint(expr= m.x300 - 15*m.b852 <= 0)
m.c793 = Constraint(expr= m.x301 - 15*m.b853 <= 0)
m.c794 = Constraint(expr= m.x302 - 25*m.b854 <= 0)
m.c795 = Constraint(expr= m.x303 - 25*m.b855 <= 0)
m.c796 = Constraint(expr= m.x304 - 25*m.b856 <= 0)
m.c797 = Constraint(expr= m.x305 - 25*m.b857 <= 0)
m.c798 = Constraint(expr= m.x306 - 25*m.b858 <= 0)
m.c799 = Constraint(expr= m.x307 - 25*m.b859 <= 0)
m.c800 = Constraint(expr= m.x308 - 15*m.b860 <= 0)
m.c801 = Constraint(expr= m.x309 - 15*m.b861 <= 0)
m.c802 = Constraint(expr= m.x310 - 15*m.b862 <= 0)
m.c803 = Constraint(expr= m.x311 - 15*m.b863 <= 0)
m.c804 = Constraint(expr= m.x312 - 15*m.b864 <= 0)
m.c805 = Constraint(expr= m.x313 - 15*m.b865 <= 0)
m.c806 = Constraint(expr= m.x314 - 20*m.b866 <= 0)
m.c807 = Constraint(expr= m.x315 - 20*m.b867 <= 0)
m.c808 = Constraint(expr= m.x316 - 20*m.b868 <= 0)
m.c809 = Constraint(expr= m.x317 - 20*m.b869 <= 0)
m.c810 = Constraint(expr= m.x318 - 20*m.b870 <= 0)
m.c811 = Constraint(expr= m.x319 - 20*m.b871 <= 0)
m.c812 = Constraint(expr= m.x332 - 10*m.b872 <= 0)
m.c813 = Constraint(expr= m.x333 - 10*m.b873 <= 0)
m.c814 = Constraint(expr= m.x334 - 10*m.b874 <= 0)
m.c815 = Constraint(expr= m.x335 - 10*m.b875 <= 0)
m.c816 = Constraint(expr= m.x336 - 10*m.b876 <= 0)
m.c817 = Constraint(expr= m.x337 - 10*m.b877 <= 0)
m.c818 = Constraint(expr= m.x338 - 20*m.b878 <= 0)
m.c819 = Constraint(expr= m.x339 - 20*m.b879 <= 0)
m.c820 = Constraint(expr= m.x340 - 20*m.b880 <= 0)
m.c821 = Constraint(expr= m.x341 - 20*m.b881 <= 0)
m.c822 = Constraint(expr= m.x342 - 20*m.b882 <= 0)
m.c823 = Constraint(expr= m.x343 - 20*m.b883 <= 0)
m.c824 = Constraint(expr= m.x320 + m.x344 - 20*m.b884 <= 0)
m.c825 = Constraint(expr= m.x321 + m.x345 - 20*m.b885 <= 0)
m.c826 = Constraint(expr= m.x322 + m.x346 - 20*m.b886 <= 0)
m.c827 = Constraint(expr= m.x323 + m.x347 - 20*m.b887 <= 0)
m.c828 = Constraint(expr= m.x324 + m.x348 - 20*m.b888 <= 0)
m.c829 = Constraint(expr= m.x325 + m.x349 - 20*m.b889 <= 0)
m.c830 = Constraint(expr= m.x326 + m.x350 - 55*m.b890 <= 0)
m.c831 = Constraint(expr= m.x327 + m.x351 - 55*m.b891 <= 0)
m.c832 = Constraint(expr= m.x328 + m.x352 - 55*m.b892 <= 0)
m.c833 = Constraint(expr= m.x329 + m.x353 - 55*m.b893 <= 0)
m.c834 = Constraint(expr= m.x330 + m.x354 - 55*m.b894 <= 0)
m.c835 = Constraint(expr= m.x331 + m.x355 - 55*m.b895 <= 0)
m.c836 = Constraint(expr= m.x356 + m.x380 - 25*m.b896 <= 0)
m.c837 = Constraint(expr= m.x357 + m.x381 - 25*m.b897 <= 0)
m.c838 = Constraint(expr= m.x358 + m.x382 - 25*m.b898 <= 0)
m.c839 = Constraint(expr= m.x359 + m.x383 - 25*m.b899 <= 0)
m.c840 = Constraint(expr= m.x360 + m.x384 - 25*m.b900 <= 0)
m.c841 = Constraint(expr= m.x361 + m.x385 - 25*m.b901 <= 0)
m.c842 = Constraint(expr= m.x362 + m.x386 - 50*m.b902 <= 0)
m.c843 = Constraint(expr= m.x363 + m.x387 - 50*m.b903 <= 0)
m.c844 = Constraint(expr= m.x364 + m.x388 - 50*m.b904 <= 0)
m.c845 = Constraint(expr= m.x365 + m.x389 - 50*m.b905 <= 0)
m.c846 = Constraint(expr= m.x366 + m.x390 - 50*m.b906 <= 0)
m.c847 = Constraint(expr= m.x367 + m.x391 - 50*m.b907 <= 0)
m.c848 = Constraint(expr= m.x368 - 15*m.b908 <= 0)
m.c849 = Constraint(expr= m.x369 - 15*m.b909 <= 0)
m.c850 = Constraint(expr= m.x370 - 15*m.b910 <= 0)
m.c851 = Constraint(expr= m.x371 - 15*m.b911 <= 0)
m.c852 = Constraint(expr= m.x372 - 15*m.b912 <= 0)
m.c853 = Constraint(expr= m.x373 - 15*m.b913 <= 0)
m.c854 = Constraint(expr= m.x374 - 35*m.b914 <= 0)
m.c855 = Constraint(expr= m.x375 - 35*m.b915 <= 0)
m.c856 = | |
from cleverhans.attacks import FastGradientMethod
from cleverhans.attacks_tf import fgm
import tensorflow as tf
import cleverhans.utils as utils
import cleverhans.utils_tf as utils_tf
import collections
from cleverhans.model import Model, CallableModelWrapper
import numpy as np
class MyFastGradientMethod(FastGradientMethod):
def __init__(self, model, back='tf', sess=None):
"""
Create a FastGradientMethod instance.
Note: the model parameter should be an instance of the
cleverhans.model.Model abstraction provided by CleverHans.
"""
super(FastGradientMethod, self).__init__(model, back, sess)
self.feedable_kwargs = {'eps': np.float32,
'y': np.float32,
'y_target': np.float32,
'clip_min': np.float32,
'clip_max': np.float32}
self.structural_kwargs = ['ord', 'features', 'pos_grad']
self.prob = {}
if not isinstance(self.model, Model):
self.model = CallableModelWrapper(self.model, 'probs')
def printTensorValue(self, name, feed_dict):
tensor = tf.get_default_graph().get_tensor_by_name(name)
result = self.sess.run(tensor,feed_dict)
print(name+': '+str(result))
def getTensor(self, name, feed_dict):
tensor = tf.get_default_graph().get_tensor_by_name(name)
return self.sess.run(tensor,feed_dict)
def getProb(self,hashKey):
if hashKey not in self.prob:
x, new_kwargs, x_adv = self.graphs[hashKey]
self.prob[hashKey] = self.model.get_probs(x)
return self.prob[hashKey]
def generate_adv(self, x_val, maxiter, **kwargs):
"""
Go along the gradient until adversarial examples are found or the number of iterations have exceeded maxiter
:param x_val: A NumPy array with the original inputs.
:param **kwargs: optional parameters used by child classes.
:return: A NumPy array holding the adversarial examples.
"""
if self.back == 'th':
raise NotImplementedError('Theano version not implemented.')
if self.sess is None:
raise ValueError("Cannot use `generate_np` when no `sess` was"
" provided")
# the set of arguments that are structural properties of the attack
# if these arguments are different, we must construct a new graph
fixed = dict((k, v) for k, v in list(kwargs.items())
if k in self.structural_kwargs)
# the set of arguments that are passed as placeholders to the graph
# on each call, and can change without constructing a new graph
feedable = dict((k, v) for k, v in list(kwargs.items())
if k in self.feedable_kwargs)
if len(fixed) + len(feedable) < len(kwargs):
warnings.warn("Supplied extra keyword arguments that are not "
"used in the graph computation. They have been "
"ignored.")
if not all(isinstance(value, collections.Hashable)
for value in list(fixed.values())):
# we have received a fixed value that isn't hashable
# this means we can't cache this graph for later use,
# and it will have to be discarded later
hash_key = None
raise ValueError('Non hashable params!')
else:
# create a unique key for this set of fixed paramaters
hash_key = tuple(sorted(fixed.items()))
if hash_key not in self.graphs or hash_key is None:
self.construct_graph(fixed, feedable, x_val, hash_key)
x, new_kwargs, x_adv = self.graphs[hash_key]
feed_dict = {}
for name in feedable:
feed_dict[new_kwargs[name]] = feedable[name]
preds = self.getProb(hash_key)
if 'y_target' in new_kwargs:
expected_labels = new_kwargs['y_target']
else:
expected_labels = None
if expected_labels != None:
expected_labels = np.argmax(self.sess.run(expected_labels, feed_dict), axis=1)
feed_dict[x] = x_val
old_probs = self.sess.run(preds, feed_dict)
orig_labs = np.argmax(old_probs, axis=1)
new_labs_mult = orig_labs.copy()
adv_examples = x_val.copy()
last_adv = x_val
iter_count = 0
while iter_count < maxiter:
iter_count+=1
feed_dict[x] = adv_examples
new_x_vals = self.sess.run(x_adv, feed_dict)
feed_dict[x] = new_x_vals
new_probs = self.sess.run(preds, feed_dict)
delta = new_x_vals - adv_examples
#print(delta)
#print 'gradient: '
#print self.sess.run(grad, feed_dict)
#print 'New probaiblity: '
#print new_probs
new_labs = np.argmax(new_probs, axis=1)
if expected_labels == None:
I, = np.where(orig_labs == new_labs_mult)
else:
I, = np.where(expected_labels != new_labs_mult)
if I.size == 0:
break
if np.array_equal(last_adv,new_x_vals):
raise ValueError('Gradient 0. Something wrong or hit the corner case.')
# update labels
last_adv = new_x_vals
new_labs_mult[I] = new_labs[I]
adv_examples[I] = new_x_vals[I]
if iter_count >= maxiter:
print("Fail to find an adversarial example!")
return None
return adv_examples
def generate(self, x, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param eps: (optional float) attack step size (input variation)
:param ord: (optional) Order of the norm (mimics NumPy).
Possible values: np.inf, 1 or 2.
:param y: (optional) A tensor with the model labels. Only provide
this parameter if you'd like to use true labels when crafting
adversarial samples. Otherwise, model predictions are used as
labels to avoid the "label leaking" effect (explained in this
paper: https://arxiv.org/abs/1611.01236). Default is None.
Labels should be one-hot-encoded.
:param y_target: (optional) A tensor with the labels to target. Leave
y_target=None if y is also set. Labels should be
one-hot-encoded.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
labels, nb_classes = self.get_or_guess_labels(x, kwargs)
return myfgm(x, self.model.get_probs(x), y=labels, eps=self.eps,
ord=self.ord, features = self.features, clip_min=self.clip_min,
clip_max=self.clip_max,
targeted=(self.y_target is not None), pos_grad=self.pos_grad)
def parse_params(self, eps=0.3, ord=np.inf, features = None, y=None, y_target=None,
clip_min=None, clip_max=None, pos_grad = False, **kwargs):
"""
Take in a dictionary of parameters and applies attack-specific checks
before saving them as attributes.
Attack-specific parameters:
:param eps: (optional float) attack step size (input variation)
:param ord: (optional) Order of the norm (mimics NumPy).
Possible values: np.inf, 1 or 2.
:param y: (optional) A tensor with the model labels. Only provide
this parameter if you'd like to use true labels when crafting
adversarial samples. Otherwise, model predictions are used as
labels to avoid the "label leaking" effect (explained in this
paper: https://arxiv.org/abs/1611.01236). Default is None.
Labels should be one-hot-encoded.
:param y_target: (optional) A tensor with the labels to target. Leave
y_target=None if y is also set. Labels should be
one-hot-encoded.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
# Save attack-specific parameters
self.eps = eps
self.ord = ord
self.features = features
self.y = y
self.y_target = y_target
self.clip_min = clip_min
self.clip_max = clip_max
self.pos_grad = pos_grad
if self.y is not None and self.y_target is not None:
raise ValueError("Must not set both y and y_target")
# Check if order of the norm is acceptable given current implementation
if self.ord not in [np.inf, int(1), int(2)]:
raise ValueError("Norm order must be either np.inf, 1, or 2.")
return True
def myfgm(x, preds, y=None, eps=0.3, ord=np.inf, features = None,
clip_min=None, clip_max=None,
targeted=False, pos_grad=False):
"""
TensorFlow implementation of the Fast Gradient Method.
:param x: the input placeholder
:param preds: the model's output tensor (the attack expects the
probabilities, i.e., the output of the softmax)
:param y: (optional) A placeholder for the model labels. If targeted
is true, then provide the target label. Otherwise, only provide
this parameter if you'd like to use true labels when crafting
adversarial samples. Otherwise, model predictions are used as
labels to avoid the "label leaking" effect (explained in this
paper: https://arxiv.org/abs/1611.01236). Default is None.
Labels should be one-hot-encoded.
:param eps: the epsilon (input variation parameter)
:param ord: (optional) Order of the norm (mimics NumPy).
Possible values: np.inf, 1 or 2.
:param clip_min: Minimum float value for adversarial example components
:param clip_max: Maximum float value for adversarial example components
:param targeted: Is the attack targeted or untargeted? Untargeted, the
default, will try to make the label incorrect. Targeted
will instead try to move in the direction of being more
like y.
:return: a tensor for the adversarial example
"""
if features is None:
features = np.ones(x.get_shape()[1:])
features = tf.constant(features, dtype=x.dtype, name = 'feature_clipping')
if y is None:
# Using model predictions as ground truth to avoid label leaking
preds_max = tf.reduce_max(preds, 1, keep_dims=True)
y = tf.to_float(tf.equal(preds, preds_max))
y = tf.stop_gradient(y)
y = y / tf.reduce_sum(y, 1, keep_dims=True)
# Compute loss
loss = utils_tf.model_loss(y, preds, mean=False)
if targeted:
loss = -loss
# Define gradient of loss wrt input
grad, = tf.gradients(loss, x, name = 'adv_gradient')
grad = tf.multiply(grad, features, name = 'feature_gradient')
if ord == np.inf:
# Take sign of gradient
normalized_grad = tf.sign(grad)
# The following line should not change the numerical results.
# It applies only because `normalized_grad` is the output of
# a `sign` op, which has zero derivative anyway.
# It should not be applied for the other norms, where the
# perturbation has a non-zero derivative.
normalized_grad = tf.stop_gradient(normalized_grad)
elif ord == 1:
#red_ind = list(xrange(1, len(x.get_shape())))
#normalized_grad = grad / tf.reduce_sum(tf.abs(grad),
#reduction_indices=red_ind,
#keep_dims=True)
grad_shape = tf.shape(grad, name='gradShape')
second_dim = tf.reduce_prod(grad_shape[1:], name='secDim')
#second_dim = 28 * 28
grad1 = tf.reshape(grad, [grad_shape[0], second_dim], | |
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bc_multiplier
)
return result
def solve_poisson_ldgc(
mesh,
degree=1,
is_multiplier_continuous=True
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
primal_family = "DQ" if use_quads else "DG"
V = FunctionSpace(mesh, primal_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
trace_family = "HDiv Trace"
T = FunctionSpace(mesh, trace_family, degree)
W = V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
p, lambda_h = TrialFunctions(W)
q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
# Forcing function
f_expression = div(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# Dirichlet BCs
p_boundaries = Constant(0.0)
bc_multiplier = DirichletBC(W.sub(1), p_exact, "on_boundary")
# Hybridization parameter
s = Constant(-1.0)
beta = Constant(32.0)
h = CellDiameter(mesh)
h_avg = avg(h)
# Classical term
a = dot(grad(p), grad(q)) * dx
L = f * q * dx
# Hybridization terms
a += s * dot(grad(q), n)("+") * (p("+") - lambda_h("+")) * dS
a += -dot(grad(p), n)("+") * (q("+") - mu_h("+")) * dS
a += (beta / h_avg) * (p("+") - lambda_h("+")) * (q("+") - mu_h("+")) * dS
# Boundary terms
# a += -dot(vel_projected, n) * v * ds # How to set this bc??
# a += (beta / h) * (p- p_boundaries) * q * ds # is this necessary?
L += s * dot(grad(q), n) * p_boundaries * ds
F = a - L
a_form = lhs(F)
_A = Tensor(a_form)
A = _A.blocks
S = A[1, 1] - A[1, :1] * A[:1, :1].inv * A[:1, 1]
Smat = assemble(S, bcs=bc_multiplier)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bc_multiplier
)
return result
def solve_poisson_lsh(
mesh,
degree=1,
is_multiplier_continuous=False
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
trace_family = "HDiv Trace"
T = FunctionSpace(mesh, trace_family, degree)
W = U * V * T
# Trial and test functions
solution = Function(W)
u, p, lambda_h = split(solution)
# u, p, lambda_h = TrialFunctions(W)
v, q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
exact_solution, sigma_e, exact_trace = calculate_exact_solution_with_trace(
mesh,
pressure_family,
velocity_family,
degree + 3,
degree + 3
)
# Forcing function
f = div(-grad(exact_solution))
f = Function(V).interpolate(f)
# BCs
p_exact = Constant(0)
# bcs = DirichletBC(W.sub(2), exact_trace, "on_boundary")
bcs = DirichletBC(W.sub(2), p_exact, "on_boundary")
# Hybridization parameter
beta_0 = Constant(1.0e0)
# beta = beta_0 / h
beta = beta_0
beta_avg = beta_0 / h("+")
# Stabilizing parameter
# delta_0 = Constant(1)
# delta_1 = Constant(1)
# delta_2 = Constant(1)
# delta_3 = Constant(1)
# delta_4 = Constant(1)
# delta_5 = Constant(1)
delta = h * h
# delta = Constant(1)
LARGE_NUMBER = Constant(1e0)
# delta = 1 / h
delta_0 = delta
delta_1 = delta
delta_2 = delta
delta_3 = delta
delta_4 = delta #/ h
delta_5 = delta #/ h
# delta_5 = LARGE_NUMBER / h
# Numerical flux trace
u_hat = u + beta * (p - lambda_h) * n
v_hat = v + beta * (q - mu_h) * n
# Flux least-squares
# a = (
# (inner(u, v) - q * div(u) - p * div(v) + inner(grad(p), grad(q)))
# * delta_1
# * dx
# )
# # These terms below are unsymmetric
# a += delta_1("+") * jump(u_hat, n=n) * q("+") * dS
# a += delta_1 * dot(u_hat, n) * q * ds
# # a += delta_1 * dot(u, n) * q * ds
# # L = -delta_1 * dot(u_projected, n) * q * ds
# a += delta_1("+") * lambda_h("+") * jump(v, n=n) * dS
# a += delta_1 * lambda_h * dot(v, n) * ds
# L = delta_1 * exact_solution * dot(v, n) * ds
# Flux Least-squares as in DG
a = delta_0 * inner(u + grad(p), v + grad(q)) * dx
# Classical mixed Darcy eq. first-order terms as stabilizing terms
a += delta_1 * (dot(u, v) - div(v) * p) * dx
a += delta_1("+") * lambda_h("+") * jump(v, n=n) * dS
a += delta_1 * lambda_h * dot(v, n) * ds
# Mass balance least-square
a += delta_2 * div(u) * div(v) * dx
L = delta_2 * f * div(v) * dx
# Irrotational least-squares
a += delta_3 * inner(curl(u), curl(v)) * dx
# Hybridization terms
a += mu_h("+") * jump(u_hat, n=n) * dS
# a += mu_h * dot(u_hat, n) * ds
# L += mu_h * dot(sigma_e, n) * ds
a += delta_4("+") * (p("+") - lambda_h("+")) * (q("+") - mu_h("+")) * dS
# a += delta_4 * (exact_solution - lambda_h) * (q - mu_h) * ds
# Alternative primal
# a += delta_4("+") * (lambda_h("+") - p("+")) * (mu_h("+") - q("+")) * dS
# a += delta_4 * (lambda_h - p) * (mu_h - q) * ds
# Flux
a += delta_5("+") * (dot(u, n)("+") - dot(u_hat, n)("+")) * (dot(v, n)("+") - dot(v_hat, n)("+")) * dS
a += delta_5 * (dot(u, n) - dot(u_hat, n)) * (dot(v, n) - dot(v_hat, n)) * ds
# Alternative
# a += delta_5("+") * (dot(u_hat, n)("+") - dot(u, n)("+")) * (dot(v_hat, n)("+") - dot(v, n)("+")) * dS
# a += delta_5 * (dot(u_hat, n) - dot(u, n)) * (dot(v_hat, n) - dot(v, n)) * ds
# Weakly imposed BC from hybridization
# a += mu_h * (lambda_h - exact_trace) * ds
# a += mu_h * lambda_h * ds
# ###
# a += (
# delta_4 * (mu_h - q) * (lambda_h - exact_solution) * ds
# ) # maybe this is not a good way to impose BC, but this necessary
# a += (
# delta_4 * (q - mu_h) * (exact_solution - lambda_h) * ds
# ) # maybe this is not a good way to impose BC, but this necessary
# L += delta_1 * exact_solution * dot(v, n) * ds # study if this is a good BC imposition
F = a - L
params = {
"snes_type": "ksponly",
"mat_type": "matfree",
"pmat_type": "matfree",
"ksp_type": "preonly",
"pc_type": "python",
# Use the static condensation PC for hybridized problems
# and use a direct solve on the reduced system for lambda_h
"pc_python_type": "firedrake.SCPC",
"pc_sc_eliminate_fields": "0, 1",
"condensed_field": {
"ksp_type": "preonly",
"pc_type": "lu",
"pc_factor_mat_solver_type": "mumps",
},
}
problem = NonlinearVariationalProblem(F, solution, bcs=bcs)
# problem = NonlinearVariationalProblem(F, solution)
solver = NonlinearVariationalSolver(problem, solver_parameters=params)
solver.solve()
# Retrieving the solution
sigma_h, p_h, lambda_h = solution.split()
sigma_h.rename('Velocity', 'label')
p_h.rename('Pressure', 'label')
# Calculating L2-error for primal variable
p_error_L2 = errornorm(exact_solution, p_h, norm_type="L2")
# Calculating H1-error for primal variable
p_error_H1 = errornorm(exact_solution, p_h, norm_type="H1")
# Calculating L2-error for flux variable
sigma_error_L2 = errornorm(sigma_e, sigma_h, norm_type="L2")
# Calculating Hdiv-error for flux variable
sigma_error_Hdiv = errornorm(sigma_e, sigma_h, norm_type="Hdiv")
return p_error_L2, p_error_H1, sigma_error_L2, sigma_error_Hdiv
def compute_convergence_hp(
solver,
min_degree=1,
max_degree=4,
numel_xy=(2, 4, 8, 16, 32, 64, 128, 256),
quadrilateral=True,
name="",
**kwargs
):
computed_errors_dict = {
"Element": list(),
"Degree": list(),
"Cells": list(),
"Mesh size": list(),
"L2-error p": list(),
"H1-error p": list(),
"L2-error u": list(),
"Hdiv-error u": list(),
}
element_kind = "Quad" if quadrilateral else "Tri"
for degree in range(min_degree, max_degree):
p_errors_L2 = np.array([])
| |
<gh_stars>0
from astropy.io import fits as apfts
from random import uniform as rdunf
from random import choice as rdchc
from random import random as rdrnd
from astropy import convolution as apcvl
from astropy.io import fits as apfts
from Fnc_Syn_Dir import *
from Fnc_Syn_Mth import *
from Fnc_Syn_Tbl import *
####Fnc_Syn_Syn####
def create_raw_image(n_pix_x,n_pix_y):
raw_image_zeros = np.zeros((n_pix_y,n_pix_x))
return raw_image_zeros
def add_stars2image(image_in,image_star,*args,**kwargs):
return image_in + image_star
def star_gen_prp(nx_sg,ny_sg,A_min_sg,A_max_sg,ofs_min_sg,ofs_max_sg,sigmax_min_sg,sigmax_max_sg,sigmay_min_sg,sigmay_max_sg,shape_sg,theta_sg,*args,**kwargs):
fixed_position_str = kwargs.get('fixed_position_str',False)
x_fxd_str = kwargs.get('x_fxd_str',(nx_sg / 2))
y_fxd_str = kwargs.get('y_fxd_str',(ny_sg / 2))
fixed_size_str = kwargs.get('fixed_size_str',False)
sx_fxd_str = kwargs.get('sx_fxd_str',10)
sy_fxd_str = kwargs.get('sy_fxd_str',10)
fixed_ampl_str = kwargs.get('fixed_ampl_str',False)
am_fxd_str = kwargs.get('am_fxd_str',1)
fixed_offset_str = kwargs.get('fixed_offset_str',False)
offset_fxd_str = kwargs.get('offset_fxd_str',0)
theta_cte_vl = kwargs.get('theta_cte_vl',0)
#position and angle
r = rdrnd() * nx_sg
theta_p = rdunf(0., (2. * np.pi))
# Compute position
if fixed_position_str == False:
x = (nx_sg / 2) + (r * np.cos(theta_p))
y = (ny_sg / 2) + (r * np.sin(theta_p))
elif fixed_position_str == True:
x = x_fxd_str
y = y_fxd_str
#Generate random parameters for the 2d gaussian
#Amplitude
if fixed_ampl_str == False:
am = rdunf(A_min_sg,A_max_sg)
elif fixed_ampl_str == True:
am = am_fxd_str
#Offset
if fixed_offset_str == False:
offset = rdunf(ofs_min_sg,ofs_max_sg)
elif fixed_offset_str == True:
offset = offset_fxd_str
#SIZE
if fixed_size_str == False:
sigma_x = rdunf(sigmax_min_sg,sigmax_max_sg)
sigma_y = rdunf(sigmay_min_sg,sigmay_max_sg)
elif fixed_size_str == True:
sigma_x = sx_fxd_str
sigma_y = sy_fxd_str
#SHAPE
if shape_sg == 'ellipse':
sigma_x = sigma_x
sigma_y = sigma_y
elif shape_sg == 'circular':
sigma_x = sigma_x
sigma_y = sigma_x
#ORIENTATION
if theta_sg =='random':
theta = rdunf(0., (2. * np.pi))
elif theta_sg =='value':
theta = np.deg2rad(theta_cte_vl)
return x,y,theta,am,offset,sigma_x,sigma_y
def create_star_image(nstrs_csi,nx_csi,ny_csi,A_min_csi,A_max_csi,ofs_min_csi,ofs_max_csi,sigmax_min_csi,sigmax_max_csi,sigmay_min_csi,sigmay_max_csi,shape_csi,theta_csi,*args,**kwargs):
fixed_position_str = kwargs.get('fixed_position_str',False)
x_fxd_str = kwargs.get('x_fxd_str',(nx_csi / 2))
y_fxd_str = kwargs.get('y_fxd_str',(ny_csi / 2))
fixed_size_str = kwargs.get('fixed_size_str',False)
sx_fxd_str = kwargs.get('sx_fxd_str' ,10)
sy_fxd_str = kwargs.get('sy_fxd_str' ,10)
fixed_ampl_str = kwargs.get('fixed_ampl_str',False)
am_fxd_str = kwargs.get('am_fxd_str' ,1)
fixed_offset_str = kwargs.get('fixed_offset_str',False)
offset_fxd_str = kwargs.get('offset_fxd_str' ,0)
wrt_otp_tbl_str = kwargs.get('wrt_otp_tbl_str',False)
theta_csi_vl = kwargs.get('theta_csi_vl',0)
star_0_cumulative = create_raw_image(nx,ny)
X = []
Y = []
T = []
A = []
AM = []
OS = []
SX = []
SY = []
TN = []
for j in range(nstrs_csi):
star_0 = create_raw_image(nx,ny)
star_outpout=star_gen_prp(
nx_sg = nx_csi ,ny_sg = ny_csi ,
A_min_sg = A_min_csi ,A_max_sg = A_max_csi ,
ofs_min_sg = ofs_min_csi ,ofs_max_sg = ofs_max_csi ,
sigmax_min_sg = sigmax_min_csi ,sigmax_max_sg = sigmax_max_csi,
sigmay_min_sg = sigmay_min_csi ,sigmay_max_sg = sigmay_max_csi,
shape_sg = shape_csi ,theta_sg = theta_csi , theta_cte_vl = theta_csi_vl,
fixed_position_str = fixed_position_str,x_fxd_str = x_fxd_str , y_fxd_str = y_fxd_str,
fixed_size_str = fixed_size_str ,sx_fxd_str = sx_fxd_str , sy_fxd_str = sy_fxd_str,
fixed_ampl_str = fixed_ampl_str ,am_fxd_str = am_fxd_str )
posx_g = star_outpout[0]
posy_g = star_outpout[1]
theta_g = star_outpout[2]
a_g = star_outpout[3]
ofs_g = 0
sigmax_g = star_outpout[5]
sigmay_g = star_outpout[6]
n_t_n_g = 0
count=0
while posx_g < 0 or posx_g > nx_csi or posy_g < 0 or posy_g > ny_csi:
star_outpout=star_gen_prp(
nx_sg = nx_csi ,ny_sg = ny_csi ,
A_min_sg = A_min_csi ,A_max_sg = A_max_csi ,
ofs_min_sg = ofs_min_csi ,ofs_max_sg = ofs_max_csi ,
sigmax_min_sg = sigmax_min_csi ,sigmax_max_sg = sigmax_max_csi,
sigmay_min_sg = sigmay_min_csi ,sigmay_max_sg = sigmay_max_csi,
shape_sg = shape_csi ,theta_sg = theta_csi , theta_cte_vl = theta_csi_vl,
fixed_position_str = fixed_position_str,x_fxd_str = x_fxd_str , y_fxd_str = y_fxd_str,
fixed_size_str = fixed_size_str ,sx_fxd_str = sx_fxd_str , sy_fxd_str = sy_fxd_str,
fixed_ampl_str = fixed_ampl_str ,am_fxd_str = am_fxd_str )
posx_g = star_outpout[0]
posy_g = star_outpout[1]
theta_g = star_outpout[2]
a_g = star_outpout[3]
ofs_g = 0
sigmax_g = star_outpout[5]
sigmay_g = star_outpout[6]
n_t_n_g = 0
count = count+1
else:
x = np.linspace(0, nx, nx)
y = np.linspace(0, ny, ny)
x, y = np.meshgrid(x, y)
#create data
star_array = func_2D_Gaussian_star((x, y), posx_g, posy_g, a_g, sigmax_g, sigmay_g, theta_g, ofs_g)
am_max_g = max(star_array)
star_image = (star_array.reshape(nx,ny))
star_0 = add_stars2image(star_0,star_image)
star_0_cumulative = add_stars2image(star_0,star_0_cumulative)
X.append(posx_g)
Y.append(posy_g)
T.append(theta_g)
A.append(a_g)
AM.append(am_max_g)
OS.append(ofs_g)
SX.append(sigmax_g)
SY.append(sigmay_g)
TN.append(n_t_n_g)
X = np.asarray(X[0])
Y = np.asarray(Y[0])
T = np.asarray(T[0])
A = np.asarray(A[0])
AM = np.asarray(AM[0])
OS = np.asarray(OS[0])
SX = np.asarray(SX[0])
SY = np.asarray(SY[0])
TN = np.asarray(TN[0])
if wrt_otp_tbl_str == True:
# Create Stars data
r = astropy.table.Table()
r['X '] = X
r['Y '] = Y
r['A '] = A
r['A_M']= AM
r['SX'] = SX
r['SY'] = SY
r['T '] = T
r['OS'] = OS
r['xN'] = TN
r.write('Objectlist_I_MC.csv', format='csv',overwrite=True)
r.write(output_table_obj, format='ascii.fixed_width_two_line',overwrite=True)
print 'Results containing stars data: ',output_table_obj
elif wrt_otp_tbl_str == False:
pass
return star_0_cumulative,X,Y,A,AM,SX,SY,T,OS,TN
def displ_fits(image,scale,colormap,*args,**kwargs):
image_data = apfts.getdata(image)
if scale =='norm':
plt.imshow(image_data, origin='lower',cmap=colormap)
elif scale =='log':
plt.imshow(image_data, origin='lower',cmap=colormap, norm=LogNorm()) #log scale
plt.colorbar()
plt.show()
def Create_Synthetic_Cube(x_cube_dim,y_cube_dim,z_cube_dim,krn_conv_noise,*args,**kwargs):
A_min_csi = kwargs.get('A_min_csi' ,A_min)
A_max_csi = kwargs.get('A_max_csi' ,A_max)
ofs_min_csi = kwargs.get('ofs_min_csi' ,ofs_min)
ofs_max_csi = kwargs.get('ofs_max_csi' ,ofs_max)
sigmax_min_csi = kwargs.get('sigmax_min_csi',sigmax_min)
sigmax_max_csi = kwargs.get('sigmax_max_csi',sigmax_max)
sigmay_min_csi = kwargs.get('sigmay_min_csi',sigmay_min)
sigmay_max_csi = kwargs.get('sigmay_max_csi',sigmay_max)
theta_csi = kwargs.get('theta_csi' ,Theta)
shape_csi = kwargs.get('shape_csi' ,starshape)
table_csi = kwargs.get('table_csi' ,tbl_strs_prp)
A_noise = kwargs.get('A_noise' ,1)
fixed_width_str = kwargs.get('fixed_width_str' ,True)
fixed_position_str = kwargs.get('fixed_position_str',True)
fixed_size_str = kwargs.get('fixed_size_str' ,True)
fixed_ampl_str = kwargs.get('fixed_ampl_str' ,True)
fixed_offset_str = kwargs.get('fixed_offset_str' ,True)
fixed_ampl_nse = kwargs.get('fixed_ampl_nse' ,True)
n_stars = kwargs.get('n_stars' ,1)
x_fxd_str = kwargs.get('x_fxd_str' ,(x_cube_dim / 2))
y_fxd_str = kwargs.get('y_fxd_str' ,(y_cube_dim / 2))
sx_fxd_str = kwargs.get('sx_fxd_str' ,5)
sy_fxd_str = kwargs.get('sy_fxd_str' ,5)
am_fxd_str = kwargs.get('am_fxd_str' ,1)
offset_fxd_str = kwargs.get('offset_fxd_str' ,0)
noise_fxd_str = kwargs.get('noise_fxd_str' ,1)
sgm_1d_str_fxd = kwargs.get('sgm_1d_str_fxd' ,1)
amp_star_gauss = kwargs.get('amp_star_gauss' ,True)
A_min_1dg_str = kwargs.get('A_min_1dg_str' ,1)
A_max_1dg_str = kwargs.get('A_max_1dg_str' ,1.1)
sigma_min_1dg_str = kwargs.get('sigma_min_1dg_str' ,fwhm2sigma(2))
sigma_max_1dg_str = kwargs.get('sigma_max_1dg_str' ,fwhm2sigma(3))
ofs_min_1dg_str = kwargs.get('ofs_min_1dg_str' ,0)
ofs_max_1dg_str = kwargs.get('ofs_max_1dg_str' ,0.1)
theta_vl_1dg_str = kwargs.get('theta_vl_1dg_str' ,0)
shape_str = kwargs.get('shape_str' ,'circle')
amp_nse_type = kwargs.get('amp_nse_type' ,'constant_u')
A_min_1dg_nse = kwargs.get('A_min_1dg_nse' ,1)
A_max_1dg_nse = kwargs.get('A_max_1dg_nse' ,1)
sigma_min_1dg_nse = kwargs.get('sigma_min_1dg_nse' ,1)
sigma_max_1dg_nse = kwargs.get('sigma_max_1dg_nse' ,1)
ofs_min_1dg_nse = kwargs.get('ofs_min_1dg_nse' ,0)
ofs_max_1dg_nse = kwargs.get('ofs_max_1dg_nse' ,0)
chn_wth_sze = kwargs.get('chn_wth_sze' ,channel_width)
amp_1d_nse_fxd = kwargs.get('amp_1d_nse_fxd' ,10)
cube_ofn_sfx = kwargs.get('cube_ofn_sfx' ,'')
write_step_fits = kwargs.get('write_step_fits' ,False)
nse_idp_all_slcs = kwargs.get('nse_idp_all_slcs' ,False)
dst_img_dir = kwargs.get('dst_img_dir' ,None)
img_stat_hst_f = []
hdu = []
if fixed_ampl_nse == False and amp_nse_type == 'gauss':
sp_axs = np.arange(-math.floor(z_cube_dim/2.),math.floor(z_cube_dim/2.)+1,1)
amp_1d_nse = rdunf(A_min_1dg_nse ,A_max_1dg_nse)
sgm_1d_nse = rdunf(sigma_min_1dg_nse/2,sigma_max_1dg_nse/2)
ofs_1d_nse = rdunf(ofs_min_1dg_nse ,ofs_max_1dg_nse)
gas_amp_nse = func_1D_Gaussian(sp_axs,ofs_1d_nse,amp_1d_nse,sgm_1d_nse)
amp_nse_2b_svd = max(gas_amp_nse)
elif fixed_ampl_nse == False and amp_nse_type == 'uniform':
sp_axs = np.arange(-math.floor(z_cube_dim/2.),math.floor(z_cube_dim/2.)+1,1)
uas_amp_nse = np.random.uniform(A_min_1dg_nse,A_max_1dg_nse,sp_axs.shape)
amp_nse_2b_svd = max(uas_amp_nse)
elif fixed_ampl_nse == False and amp_nse_type=='constant_u':
amp_nse_2b_svd = rdunf(A_min_1dg_nse,A_max_1dg_nse)
elif fixed_ampl_nse == True and (amp_nse_type =='constant' or amp_nse_type == 'constant_u'):
amp_nse_2b_svd = amp_1d_nse_fxd
elif fixed_ampl_nse == False:
pass
else:
print
print (colored('Conditions not well defined! (322-Syn-Syn.py)','yellow'))
print (fixed_ampl_nse)
print (amp_nse_type)
print
quit()
if fixed_ampl_str == True and amp_star_gauss==True and fixed_width_str==False:
sp_axs = np.arange(-math.floor(z_cube_dim/2.),math.floor(z_cube_dim/2.)+1,1)
amp_1d_str = rdunf(A_min_1dg_str ,A_max_1dg_str)
sgm_1d_str = rdunf(sigma_min_1dg_str,sigma_max_1dg_str)
ofs_1d_str = 0
gas_amp_str = func_1D_Gaussian(sp_axs,ofs_1d_str,amp_1d_str,sgm_1d_str)
amp_str_2b_svd = max(gas_amp_str)
elif fixed_ampl_str == True and amp_star_gauss==True and fixed_width_str==True:
sp_axs = np.arange(-math.floor(z_cube_dim/2.),math.floor(z_cube_dim/2.)+1,1)
amp_1d_str = rdunf(A_min_1dg_str ,A_max_1dg_str)
sgm_1d_str = sgm_1d_str_fxd
ofs_1d_str = 0
gas_amp_str = func_1D_Gaussian(sp_axs,ofs_1d_str,amp_1d_str,sgm_1d_str_fxd)
amp_str_2b_svd = max(gas_amp_str)
elif fixed_ampl_str == True and amp_star_gauss==False:
sp_axs = np.arange(-math.floor(z_cube_dim/2.),math.floor(z_cube_dim/2.)+1,1)
amp_1d_str = A_max_1dg_str
sgm_1d_str = sigma_max_1dg_nse
ofs_1d_str = 0#
amp_2b_svd = amp_1d_str
gas_amp_str = amp_1d_str
amp_str_2b_svd = A_max_1dg_str
elif fixed_ampl_str == False:
pass
gas_amp_str = amp_nse_2b_svd * gas_amp_str
sgm_1d_str_nmb = sgm_1d_str
sgm_1d_str_vlc = sgm_1d_str * chn_wth_sze
sgm_1d_str_vfw = sigma2fwhm(sgm_1d_str_vlc)
raw_image_0 = create_raw_image(x_cube_dim,y_cube_dim)
if nse_idp_all_slcs == False:
# Add noise
noise = A_noise*np.random.normal(0, 1, raw_image_0.shape)
elif nse_idp_all_slcs == True:
pass
for i,freq in enumerate(range(z_cube_dim)):
#Create raw fits of size x_cube_dim y_cube_dim
raw_image=create_raw_image(x_cube_dim,y_cube_dim)
if nse_idp_all_slcs == False:
pass
elif nse_idp_all_slcs == True:
# Add noise
noise = A_noise*np.random.normal(0, 1, raw_image.shape)
# Convolve with a gaussian
g2d = astpy_conv_gaus_2dkernel(krn_conv_noise)#krn_conv_noise
noise_psf = apcvl.convolve(noise, g2d, boundary='extend')
#Noise Stats
noise_avg = np.mean(noise_psf)
noise_med = np.median(noise_psf)
noise_std = np.std(noise_psf)
noise_rms = noise_std#noise_med**2 + noise_std**2
#Normalize noise
imagenoise = raw_image+noise_psf
imagenoise_n = imagenoise/noise_rms
#Normalized Noise Stats
noise_avg = np.mean(imagenoise_n)
noise_med = np.median(imagenoise_n)
noise_std = np.std(imagenoise_n)
noise_rms = noise_std#noise_med**2 + noise_std**2
if fixed_ampl_str == True and amp_star_gauss==True:
am_fxd_str = gas_amp_str[i]
elif fixed_ampl_str == True and amp_star_gauss==False:
am_fxd_str = A_max_1dg_str
#Create stars
star_array = create_star_image(nstrs_csi= n_stars,
nx_csi = x_cube_dim ,ny_csi = y_cube_dim,
A_min_csi = A_min_csi ,A_max_csi = A_max_csi ,
ofs_min_csi = ofs_min_csi ,ofs_max_csi = ofs_max_csi,
sigmax_min_csi = sigmax_min_csi ,sigmax_max_csi = sigmax_max_csi,
sigmay_min_csi = sigmay_min_csi ,sigmay_max_csi = sigmay_max_csi,
theta_csi = theta_csi ,theta_vl_1dg_str=theta_vl_1dg_str,
shape_csi = shape_csi ,
table_csi = table_csi ,
fixed_position_str = fixed_position_str, x_fxd_str = x_fxd_str ,y_fxd_str = y_fxd_str ,
fixed_size_str = fixed_size_str , sx_fxd_str = sx_fxd_str ,sy_fxd_str = sy_fxd_str,
fixed_ampl_str = fixed_ampl_str , am_fxd_str = am_fxd_str ,
fixed_offset_str = fixed_offset_str , offset_fxd_str = offset_fxd_str )
#Stars + Norm noise
img_noise_psf_stars = star_array[0] + imagenoise_n
# Display Image
displ_image = False
if displ_image == True:
displ_fits(img_noise_psf_stars,'norm','viridis')
elif displ_image == False:
pass
img_stat_hst_f.append(img_noise_psf_stars)
if write_step_fits == True:
# Write out to FITS image
apfts.writeto(ifn_raw , raw_image , overwrite=True)#raw
apfts.writeto(ifn_noise , imagenoise , overwrite=True)#noise
apfts.writeto(ifn_noise_psf , noise_psf , overwrite=True)#convolved noise
apfts.writeto(ifn_noise_n , imagenoise_n , overwrite=True)#normalized
apfts.writeto(ifn_stars , star_array[0] , overwrite=True)#stars
apfts.writeto(ifn_noise_psf_stars, img_noise_psf_stars, overwrite=True)#Stars + Norm noise
print colored(ifn_raw,'yellow')
print colored(ifn_noise,'yellow')
print colored(ifn_noise_psf,'yellow')
print colored(ifn_noise_n,'yellow')
print colored(ifn_stars,'yellow')
print colored(ifn_noise_psf_stars,'yellow')
elif write_step_fits == False:
pass
hdu = apfts.PrimaryHDU(img_stat_hst_f)
hdul = apfts.HDUList([hdu])
if dst_img_dir != None:
SyntheticCube=dst_img_dir + 'SyntheticCube'+cube_ofn_sfx+'.fits'
if os.path.exists(dst_img_dir) == False:
print
print (colored(dst_img_dir,'yellow'))
print (colored('Directory does not exist!','yellow'))
print (colored('Creating it.','yellow'))
print
os.system('mkdir ' + dst_img_dir)
else:
pass
else:
SyntheticCube=img_dir_res + 'SyntheticCube'+cube_ofn_sfx+'.fits'
hdul.writeto(SyntheticCube,overwrite=True)
Header_Get_Add(SyntheticCube,'SIMPLE', 'T',header_comment='conforms to FITS standard ')
Header_Get_Add(SyntheticCube,'BITPIX', -32,header_comment='array data type ')
Header_Get_Add(SyntheticCube,'NAXIS' , 3,header_comment='number of array dimensions')
Header_Get_Add(SyntheticCube,'NAXIS1', 256)
Header_Get_Add(SyntheticCube,'NAXIS2', 256)
Header_Get_Add(SyntheticCube,'NAXIS3', 17)
Header_Get_Add(SyntheticCube,'BMAJ' ,1.250000000000E-03)
Header_Get_Add(SyntheticCube,'BMIN' ,1.250000000000E-03)
Header_Get_Add(SyntheticCube,'BPA' ,0.0000000)
Header_Get_Add(SyntheticCube,'BTYPE' ,'Intensity')
Header_Get_Add(SyntheticCube,'OBJECT' ,'HOT2_EI_CII_G09.v2.58')
Header_Get_Add(SyntheticCube,'BUNIT' ,'Jy' ,header_comment=' Brightness (pixel) unit')
Header_Get_Add(SyntheticCube,'ALTRVAL' ,-1.999999999950E+06 ,header_comment='Alternate frequency reference value')
Header_Get_Add(SyntheticCube,'ALTRPIX' ,1.000000000000E+00 ,header_comment='Alternate frequency reference pixel')
Header_Get_Add(SyntheticCube,'VELREF ' ,258 ,header_comment='1 LSR, 2 HEL, 3 OBS, +256 Radio COMMENT casacore non-standard usage: 4 LSD, 5 GEO, 6 SOU, 7 GAL')
Header_Get_Add(SyntheticCube,'TELESCOP','ALMA')
Header_Get_Add(SyntheticCube,'OBSERVER','hmendez')
Header_Get_Add(SyntheticCube,'TIMESYS' ,'UTC')
Header_Get_Add(SyntheticCube,'OBSRA' ,1.363858337500E+02)
Header_Get_Add(SyntheticCube,'OBSDEC' ,2.039406944444E+00)
Header_Get_Add(SyntheticCube,'DATE' ,'2017-07-06T21:54:46.399391' ,header_comment='Date FITS file was written')
Header_Get_Add(SyntheticCube,'ORIGIN' ,'CASA 4.7.2-REL (r39762)' )
Header_Get_Add(SyntheticCube,'WCSAXES' ,3)
Header_Get_Add(SyntheticCube,'CRPIX1' ,129.0)
Header_Get_Add(SyntheticCube,'CRPIX2' ,129.0)
Header_Get_Add(SyntheticCube,'CRPIX3' ,1.0)
Header_Get_Add(SyntheticCube,'CDELT1' , -0.0001388888888889)
Header_Get_Add(SyntheticCube,'CDELT2' ,0.0001388888888889)
Header_Get_Add(SyntheticCube,'CDELT3' ,249.99999999999)
Header_Get_Add(SyntheticCube,'CUNIT1' ,'deg')
Header_Get_Add(SyntheticCube,'CUNIT2' ,'deg')
Header_Get_Add(SyntheticCube,'CUNIT3' ,'km s-1')
Header_Get_Add(SyntheticCube,'CTYPE1' ,'RA---SIN')
Header_Get_Add(SyntheticCube,'CTYPE2' ,'DEC--SIN')
Header_Get_Add(SyntheticCube,'CTYPE3' ,'VRAD')
Header_Get_Add(SyntheticCube,'CRVAL1' ,136.38583375)
Header_Get_Add(SyntheticCube,'CRVAL2' ,2.039406944444)
Header_Get_Add(SyntheticCube,'CRVAL3' ,-1999.9999999531)
Header_Get_Add(SyntheticCube,'PV2_1' ,0.0)
Header_Get_Add(SyntheticCube,'PV2_2' ,0.0)
Header_Get_Add(SyntheticCube,'LONPOLE' ,180.0)
Header_Get_Add(SyntheticCube,'LATPOLE' ,2.039406944444)
Header_Get_Add(SyntheticCube,'RESTFRQ' ,restframe_frequency)
Header_Get_Add(SyntheticCube,'RADESYS' ,'FK5')
Header_Get_Add(SyntheticCube,'EQUINOX' ,2000.0)
Header_Get_Add(SyntheticCube,'SPECSYS' ,'BARYCENT')
Header_Get_Add(SyntheticCube,'OBSGEO-X',2225142.180269)
Header_Get_Add(SyntheticCube,'OBSGEO-Y',-5440307.370349)
Header_Get_Add(SyntheticCube,'OBSGEO-Z',-2481029.851874)
Cat_Ipt_Tbl = Table_Read(cat_tbl,tbl_format_ipt)
fits = Cat_Ipt_Tbl[2]
delta_nu = Cat_Ipt_Tbl[4]
z = Cat_Ipt_Tbl[8]
Lfir = Cat_Ipt_Tbl[11]
nu = Cat_Ipt_Tbl[13]
vel = Cat_Ipt_Tbl[14]
num_obj = len(Cat_Ipt_Tbl[0])
z_sample_avg = np.mean(z)
z_sample_med = np.median(z)
z_sample_1sl = np.nanpercentile(z, 15.9)
z_sample_1sh = np.nanpercentile(z, 84.1)
z_sample_2sl = np.nanpercentile(z, 2.30)
z_sample_2sh = np.nanpercentile(z, 97.7)
z_sample_3sl = np.nanpercentile(z, 0.20)
z_sample_3sh = np.nanpercentile(z, 99.8)
z_sample_p25 = np.nanpercentile(z, 25.0)
z_sample_p75 = np.nanpercentile(z, 75.0)
Header_Get_Add(SyntheticCube,'STK_NUM',cube_number ,header_comment='Redshift Average')
Header_Get_Add(SyntheticCube,'STZ_AVG',z_sample_avg ,header_comment='Redshift Average')
Header_Get_Add(SyntheticCube,'STZ_MED',z_sample_med ,header_comment='Redshift Median')
Header_Get_Add(SyntheticCube,'STZ_1SL',z_sample_1sl ,header_comment='Redshift 1 sgm lw lmt 15.9 pct')
Header_Get_Add(SyntheticCube,'STZ_1SH',z_sample_1sh ,header_comment='Redshift 1 sgm hg lmt 84.1 pct')
Header_Get_Add(SyntheticCube,'STZ_2SL',z_sample_2sl ,header_comment='Redshift 2 sgm lw lmt 2.30 pct')
Header_Get_Add(SyntheticCube,'STZ_2SH',z_sample_2sh ,header_comment='Redshift 2 sgm hg lmt 97.7 pct')
Header_Get_Add(SyntheticCube,'STZ_3SL',z_sample_3sl ,header_comment='Redshift 3 sgm lw lmt 0.20 pct')
Header_Get_Add(SyntheticCube,'STZ_3SH',z_sample_3sh ,header_comment='Redshift 3 sgm hg lmt 99.8 pct')
Header_Get_Add(SyntheticCube,'STZ_P25',z_sample_p25 ,header_comment='Redshift 25 pct')
Header_Get_Add(SyntheticCube,'STZ_P75',z_sample_p75 ,header_comment='Redshift 75 pct')
X_2D = (star_array[1])
Y_2D = (star_array[2])
A_2D = (star_array[3])
AM_2D = (star_array[4])
SX_2D = (star_array[5])
SY_2D = (star_array[6])
T_2D = (star_array[7])
OS_2D = (star_array[8])
TN_2D = (star_array[9])
hdul.close()
SyntheticCube_clean | |
#! /usr/bin/env python
# -*- coding: utf-8
import sys
import os
import cv2
import numpy as np
import time
import StringIO
from threading import Lock
from misc import WithTimer
from numpy_cache import FIFOLimitedArrayCache
from app_base import BaseApp
from core import CodependentThread
from image_misc import norm01, norm01c, norm0255, tile_images_normalize, ensure_float01, tile_images_make_tiles, ensure_uint255_and_resize_to_fit, caffe_load_image, get_tiles_height_width
from image_misc import FormattedString, cv2_typeset_text, to_255, is_masked
from caffe_proc_thread import CaffeProcThread
from caffe_vis_app_state import CaffeVisAppState
from jpg_vis_loading_thread import JPGVisLoadingThread
layer_renames = {
'pool1': 'p1',
'norm1': 'n1',
'pool2': 'p2',
'norm2': 'n2',
'pool5': 'p5',
}
def get_pp_layer_name(layer_name):
return layer_renames.get(layer_name, layer_name)
def read_label_file(filename):
ret = []
with open(filename, 'r') as ff:
for line in ff:
label = line.strip()
if len(label) > 0:
ret.append(label)
return ret
class CaffeVisApp(BaseApp):
'''App to visualize using caffe.'''
def __init__(self, settings, key_bindings):
super(CaffeVisApp, self).__init__(settings, key_bindings)
print 'Got settings', settings
self.settings = settings
self.bindings = key_bindings
sys.path.insert(0, os.path.join(settings.caffevis_caffe_root, 'python'))
import caffe
try:
self._data_mean = np.load(settings.caffevis_data_mean)
except IOError:
print '\n\nCound not load mean file:', settings.caffevis_data_mean
print 'Ensure that the values in settings.py point to a valid model weights file, network'
print 'definition prototxt, and mean. To fetch a default model and mean file, use:\n'
print '$ cd models/caffenet-yos/'
print '$ ./fetch.sh\n\n'
raise
# Crop center region (e.g. 227x227) if mean is larger (e.g. 256x256)
excess_h = self._data_mean.shape[1] - self.settings.caffevis_data_hw[0]
excess_w = self._data_mean.shape[2] - self.settings.caffevis_data_hw[1]
assert excess_h >= 0 and excess_w >= 0, 'mean should be at least as large as %s' % repr(self.settings.caffevis_data_hw)
self._data_mean = self._data_mean[:, excess_h:(excess_h+self.settings.caffevis_data_hw[0]),
excess_w:(excess_w+self.settings.caffevis_data_hw[1])]
self._net_channel_swap = (2,1,0)
self._net_channel_swap_inv = tuple([self._net_channel_swap.index(ii) for ii in range(len(self._net_channel_swap))])
self._range_scale = 1.0 # not needed; image comes in [0,255]
#self.net.set_phase_test()
#if settings.caffevis_mode_gpu:
# self.net.set_mode_gpu()
# print 'CaffeVisApp mode: GPU'
#else:
# self.net.set_mode_cpu()
# print 'CaffeVisApp mode: CPU'
# caffe.set_phase_test() # TEST is default now
if settings.caffevis_mode_gpu:
caffe.set_mode_gpu()
print 'CaffeVisApp mode: GPU'
else:
caffe.set_mode_cpu()
print 'CaffeVisApp mode: CPU'
self.net = caffe.Classifier(
settings.caffevis_deploy_prototxt,
settings.caffevis_network_weights,
mean = self._data_mean,
channel_swap = self._net_channel_swap,
raw_scale = self._range_scale,
#image_dims = (227,227),
)
self.labels = None
if self.settings.caffevis_labels:
self.labels = read_label_file(self.settings.caffevis_labels)
self.proc_thread = None
self.jpgvis_thread = None
self.handled_frames = 0
if settings.caffevis_jpg_cache_size < 10*1024**2:
raise Exception('caffevis_jpg_cache_size must be at least 10MB for normal operation.')
self.img_cache = FIFOLimitedArrayCache(settings.caffevis_jpg_cache_size)
def start(self):
self.state = CaffeVisAppState(self.net, self.settings, self.bindings)
self.state.drawing_stale = True
self.layer_print_names = [get_pp_layer_name(nn) for nn in self.state._layers]
if self.proc_thread is None or not self.proc_thread.is_alive():
# Start thread if it's not already running
self.proc_thread = CaffeProcThread(self.net, self.state,
self.settings.caffevis_frame_wait_sleep,
self.settings.caffevis_pause_after_keys,
self.settings.caffevis_heartbeat_required)
self.proc_thread.start()
if self.jpgvis_thread is None or not self.jpgvis_thread.is_alive():
# Start thread if it's not already running
self.jpgvis_thread = JPGVisLoadingThread(self.settings, self.state, self.img_cache,
self.settings.caffevis_jpg_load_sleep,
self.settings.caffevis_heartbeat_required)
self.jpgvis_thread.start()
def get_heartbeats(self):
return [self.proc_thread.heartbeat, self.jpgvis_thread.heartbeat]
def quit(self):
print 'CaffeVisApp: trying to quit'
with self.state.lock:
self.state.quit = True
if self.proc_thread != None:
for ii in range(3):
self.proc_thread.join(1)
if not self.proc_thread.is_alive():
break
if self.proc_thread.is_alive():
raise Exception('CaffeVisApp: Could not join proc_thread; giving up.')
self.proc_thread = None
print 'CaffeVisApp: quitting.'
def _can_skip_all(self, panes):
return ('caffevis_layers' not in panes.keys())
def handle_input(self, input_image, mask, panes):
if self.debug_level > 1:
print 'handle_input: frame number', self.handled_frames, 'is', 'None' if input_image is None else 'Available'
self.handled_frames += 1
if self._can_skip_all(panes):
return
with self.state.lock:
if self.debug_level > 1:
print 'CaffeVisApp.handle_input: pushed frame'
self.state.next_frame = input_image
self.state.mask = mask
if self.debug_level > 1:
print 'CaffeVisApp.handle_input: caffe_net_state is:', self.state.caffe_net_state
def redraw_needed(self):
return self.state.redraw_needed()
def draw(self, panes):
print 'draw'
if self._can_skip_all(panes):
if self.debug_level > 1:
print 'CaffeVisApp.draw: skipping'
return False
with self.state.lock:
# Hold lock throughout drawing
do_draw = self.state.drawing_stale and self.state.caffe_net_state == 'free'
#print 'CaffeProcThread.draw: caffe_net_state is:', self.state.caffe_net_state
if do_draw:
self.state.caffe_net_state = 'draw'
if do_draw:
print 'CaffeVisApp.draw: drawing'
if self.debug_level > 1:
print 'CaffeVisApp.draw: drawing'
#if 'input' in panes:
# self._draw_input_pane(panes['input'])
if 'caffevis_control' in panes:
self._draw_control_pane(panes['caffevis_control'])
if 'caffevis_status' in panes:
self._draw_status_pane(panes['caffevis_status'])
layer_data_3D_highres = None
if 'caffevis_layers' in panes:
layer_data_3D_highres = self._draw_layer_pane(panes['caffevis_layers'])
if 'caffevis_aux' in panes:
self._draw_aux_pane(panes['caffevis_aux'], layer_data_3D_highres)
if 'caffevis_back' in panes:
# Draw back pane as normal
self._draw_back_pane(panes['caffevis_back'])
if self.state.layers_pane_zoom_mode == 2:
# ALSO draw back pane into layers pane
self._draw_back_pane(panes['caffevis_layers'])
if 'caffevis_jpgvis' in panes:
self._draw_jpgvis_pane(panes['caffevis_jpgvis'])
with self.state.lock:
self.state.drawing_stale = False
self.state.caffe_net_state = 'free'
return do_draw
def _OLDDEP_draw_control_pane(self, pane):
pane.data[:] = to_255(self.settings.window_background)
with self.state.lock:
layer_idx = self.state.layer_idx
face = getattr(cv2, self.settings.caffevis_control_face)
loc = self.settings.caffevis_control_loc[::-1] # Reverse to OpenCV c,r order
clr = to_255(self.settings.caffevis_control_clr)
clr_sel = to_255(self.settings.caffevis_control_clr_selected)
clr_high = to_255(self.settings.caffevis_control_clr_cursor)
fsize = self.settings.caffevis_control_fsize
thick = self.settings.caffevis_control_thick
thick_sel = self.settings.caffevis_control_thick_selected
thick_high = self.settings.caffevis_control_thick_cursor
st1 = ' '.join(self.layer_print_names[:layer_idx])
st3 = ' '.join(self.layer_print_names[layer_idx+1:])
st2 = ((' ' if len(st1) > 0 else '')
+ self.layer_print_names[layer_idx]
+ (' ' if len(st3) > 0 else ''))
st1 = ' ' + st1
cv2.putText(pane.data, st1, loc, face, fsize, clr, thick)
boxsize1, _ = cv2.getTextSize(st1, face, fsize, thick)
loc = (loc[0] + boxsize1[0], loc[1])
if self.state.cursor_area == 'top':
clr_this, thick_this = clr_high, thick_high
else:
clr_this, thick_this = clr_sel, thick_sel
cv2.putText(pane.data, st2, loc, face, fsize, clr_this, thick_this)
boxsize2, _ = cv2.getTextSize(st2, face, fsize, thick_this)
loc = (loc[0] + boxsize2[0], loc[1])
cv2.putText(pane.data, st3, loc, face, fsize, clr, thick)
#print 'st1', st1
#print 'st2', st2
#print 'st3', st3
def _draw_prob_labels_pane(self, pane):
'''Adds text label annotation atop the given pane.'''
if not self.labels or not self.state.show_label_predictions:
return
#pane.data[:] = to_255(self.settings.window_background)
defaults = {'face': getattr(cv2, self.settings.caffevis_class_face),
'fsize': self.settings.caffevis_class_fsize,
'clr': to_255(self.settings.caffevis_class_clr_0),
'thick': self.settings.caffevis_class_thick}
loc = self.settings.caffevis_class_loc[::-1] # Reverse to OpenCV c,r order
clr_0 = to_255(self.settings.caffevis_class_clr_0)
clr_1 = to_255(self.settings.caffevis_class_clr_1)
probs_flat = self.net.blobs['prob'].data.flatten()
top_5 = probs_flat.argsort()[-1:-6:-1]
strings = []
pmax = probs_flat[top_5[0]]
for idx in top_5:
prob = probs_flat[idx]
text = '%.2f %s' % (prob, self.labels[idx])
fs = FormattedString(text, defaults)
#fs.clr = tuple([clr_1[ii]*prob/pmax + clr_0[ii]*(1-prob/pmax) for ii in range(3)])
fs.clr = tuple([clr_1[ii]*prob + clr_0[ii]*(1-prob) for ii in range(3)])
strings.append([fs]) # Line contains just fs
cv2_typeset_text(pane.data, strings, loc,
line_spacing = self.settings.caffevis_class_line_spacing)
def _draw_control_pane(self, pane):
pane.data[:] = to_255(self.settings.window_background)
with self.state.lock:
layer_idx = self.state.layer_idx
loc = self.settings.caffevis_control_loc[::-1] # Reverse to OpenCV c,r order
strings = []
defaults = {'face': getattr(cv2, self.settings.caffevis_control_face),
'fsize': self.settings.caffevis_control_fsize,
'clr': to_255(self.settings.caffevis_control_clr),
'thick': self.settings.caffevis_control_thick}
for ii in range(len(self.layer_print_names)):
fs = FormattedString(self.layer_print_names[ii], defaults)
this_layer = self.state._layers[ii]
if self.state.backprop_selection_frozen and this_layer == self.state.backprop_layer:
fs.clr = to_255(self.settings.caffevis_control_clr_bp)
fs.thick = self.settings.caffevis_control_thick_bp
if this_layer == self.state.layer:
if self.state.cursor_area == 'top':
fs.clr = to_255(self.settings.caffevis_control_clr_cursor)
fs.thick = self.settings.caffevis_control_thick_cursor
else:
if not (self.state.backprop_selection_frozen and this_layer == self.state.backprop_layer):
fs.clr = to_255(self.settings.caffevis_control_clr_selected)
fs.thick = self.settings.caffevis_control_thick_selected
strings.append(fs)
cv2_typeset_text(pane.data, strings, loc)
def _draw_status_pane(self, pane):
pane.data[:] = to_255(self.settings.window_background)
defaults = {'face': getattr(cv2, self.settings.caffevis_status_face),
'fsize': self.settings.caffevis_status_fsize,
'clr': to_255(self.settings.caffevis_status_clr),
'thick': self.settings.caffevis_status_thick}
loc = self.settings.caffevis_status_loc[::-1] # Reverse to OpenCV c,r order
status = StringIO.StringIO()
with self.state.lock:
print >>status, 'opt' if self.state.pattern_mode else ('back' if self.state.layers_show_back else 'fwd'),
print >>status, '%s_%d |' % (self.state.layer, self.state.selected_unit),
if not self.state.back_enabled:
print >>status, 'Back: off',
else:
print >>status, 'Back: %s' % ('deconv' if self.state.back_mode == 'deconv' else 'bprop'),
print >>status, '(from %s_%d, disp %s)' % (self.state.backprop_layer,
self.state.backprop_unit,
self.state.back_filt_mode),
print >>status, '|',
print >>status, 'Boost: %g/%g' % (self.state.layer_boost_indiv, self.state.layer_boost_gamma)
if self.state.extra_msg:
print >>status, '|', self.state.extra_msg
self.state.extra_msg = ''
strings = [FormattedString(line, defaults) for line in status.getvalue().split('\n')]
cv2_typeset_text(pane.data, strings, loc,
line_spacing = self.settings.caffevis_status_line_spacing)
def _draw_layer_pane(self, pane):
'''Returns the data shown in highres format, b01c order.'''
if self.state.layers_show_back:
layer_dat_3D = self.net.blobs[self.state.layer].diff[0]
else:
layer_dat_3D = self.net.blobs[self.state.layer].data[0]
# Promote FC layers with shape (n) to have shape (n,1,1)
if len(layer_dat_3D.shape) == 1:
layer_dat_3D = layer_dat_3D[:,np.newaxis,np.newaxis]
n_tiles = layer_dat_3D.shape[0]
tile_rows,tile_cols = get_tiles_height_width(n_tiles)
display_3D_highres = None
if self.state.pattern_mode:
# Show desired patterns loaded from disk
#available = ['conv1', 'conv2', 'conv3', 'conv4', 'conv5', 'fc6', 'fc7', 'fc8', 'prob']
jpg_path = os.path.join(self.settings.caffevis_unit_jpg_dir,
'regularized_opt', self.state.layer, 'whole_layer.jpg')
# Get highres version
cache_before = str(self.img_cache)
display_3D_highres = self.img_cache.get((jpg_path, 'whole'), None)
if display_3D_highres is None:
try:
with WithTimer('CaffeVisApp:load_sprite_image', quiet = self.debug_level < 1):
display_3D_highres = load_sprite_image(jpg_path, (tile_rows, tile_cols), n_sprites = n_tiles)
except IOError:
# File does not exist, so just display disabled.
pass
else:
self.img_cache.set((jpg_path, 'whole'), display_3D_highres)
cache_after = str(self.img_cache)
#print 'Cache was / is:\n %s\n %s' % (cache_before, cache_after)
if display_3D_highres is not None:
# Get lowres version, maybe. Assume we want at least one pixel for selection border.
row_downsamp_factor = int(np.ceil(float(display_3D_highres.shape[1]) / (pane.data.shape[0] / tile_rows - 2)))
col_downsamp_factor = int(np.ceil(float(display_3D_highres.shape[2]) / (pane.data.shape[1] / tile_cols - 2)))
ds = max(row_downsamp_factor, col_downsamp_factor)
if ds > 1:
#print 'Downsampling by', ds
display_3D = display_3D_highres[:,::ds,::ds,:]
else:
display_3D = display_3D_highres
else:
display_3D = | |
<filename>test/unit/test_ssl_certificate_api_v1.py
# -*- coding: utf-8 -*-
# (C) Copyright IBM Corp. 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime, timezone
from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator
import inspect
import json
import pytest
import responses
from ibm_cloud_networking_services.ssl_certificate_api_v1 import *
crn = 'testString'
zone_identifier = 'testString'
service = SslCertificateApiV1(
authenticator=NoAuthAuthenticator(),
crn=crn,
zone_identifier=zone_identifier
)
base_url = 'https://api.cis.cloud.ibm.com'
service.set_service_url(base_url)
##############################################################################
# Start of Service: SSLCertificate
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for list_certificates
#-----------------------------------------------------------------------------
class TestListCertificates():
#--------------------------------------------------------
# list_certificates()
#--------------------------------------------------------
@responses.activate
def test_list_certificates_all_params(self):
# Set up mock
url = base_url + '/v1/testString/zones/testString/ssl/certificate_packs'
mock_response = '{"result": [{"id": "0f405ba2-8c18-49eb-a30b-28b85427780f", "type": "dedicated", "hosts": ["example.com"], "certificates": [{"id": "436627", "hosts": ["example.com"], "status": "active"}], "primary_certificate": 0}], "result_info": {"page": 1, "per_page": 2, "count": 1, "total_count": 200}, "success": true, "errors": [["errors"]], "messages": [{"status": "OK"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
x_correlation_id = 'testString'
# Invoke method
response = service.list_certificates(
x_correlation_id=x_correlation_id
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_list_certificates_required_params()
#--------------------------------------------------------
@responses.activate
def test_list_certificates_required_params(self):
# Set up mock
url = base_url + '/v1/testString/zones/testString/ssl/certificate_packs'
mock_response = '{"result": [{"id": "0f405ba2-8c18-49eb-a30b-28b85427780f", "type": "dedicated", "hosts": ["example.com"], "certificates": [{"id": "436627", "hosts": ["example.com"], "status": "active"}], "primary_certificate": 0}], "result_info": {"page": 1, "per_page": 2, "count": 1, "total_count": 200}, "success": true, "errors": [["errors"]], "messages": [{"status": "OK"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.list_certificates()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#-----------------------------------------------------------------------------
# Test Class for order_certificate
#-----------------------------------------------------------------------------
class TestOrderCertificate():
#--------------------------------------------------------
# order_certificate()
#--------------------------------------------------------
@responses.activate
def test_order_certificate_all_params(self):
# Set up mock
url = base_url + '/v1/testString/zones/testString/ssl/certificate_packs'
mock_response = '{"id": "0f405ba2-8c18-49eb-a30b-28b85427780f", "type": "dedicated", "hosts": ["example.com"], "certificates": [{"id": "436627", "hosts": ["example.com"], "status": "active"}], "primary_certificate": 0}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
type = 'dedicated'
hosts = ['example.com']
x_correlation_id = 'testString'
# Invoke method
response = service.order_certificate(
type=type,
hosts=hosts,
x_correlation_id=x_correlation_id
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['type'] == type
assert req_body['hosts'] == hosts
#--------------------------------------------------------
# test_order_certificate_required_params()
#--------------------------------------------------------
@responses.activate
def test_order_certificate_required_params(self):
# Set up mock
url = base_url + '/v1/testString/zones/testString/ssl/certificate_packs'
mock_response = '{"id": "0f405ba2-8c18-49eb-a30b-28b85427780f", "type": "dedicated", "hosts": ["example.com"], "certificates": [{"id": "436627", "hosts": ["example.com"], "status": "active"}], "primary_certificate": 0}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.order_certificate()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#-----------------------------------------------------------------------------
# Test Class for delete_certificate
#-----------------------------------------------------------------------------
class TestDeleteCertificate():
#--------------------------------------------------------
# delete_certificate()
#--------------------------------------------------------
@responses.activate
def test_delete_certificate_all_params(self):
# Set up mock
url = base_url + '/v1/testString/zones/testString/ssl/certificate_packs/testString'
responses.add(responses.DELETE,
url,
status=200)
# Set up parameter values
cert_identifier = 'testString'
x_correlation_id = 'testString'
# Invoke method
response = service.delete_certificate(
cert_identifier,
x_correlation_id=x_correlation_id
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_delete_certificate_required_params()
#--------------------------------------------------------
@responses.activate
def test_delete_certificate_required_params(self):
# Set up mock
url = base_url + '/v1/testString/zones/testString/ssl/certificate_packs/testString'
responses.add(responses.DELETE,
url,
status=200)
# Set up parameter values
cert_identifier = 'testString'
# Invoke method
response = service.delete_certificate(
cert_identifier
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#-----------------------------------------------------------------------------
# Test Class for get_ssl_setting
#-----------------------------------------------------------------------------
class TestGetSslSetting():
#--------------------------------------------------------
# get_ssl_setting()
#--------------------------------------------------------
@responses.activate
def test_get_ssl_setting_all_params(self):
# Set up mock
url = base_url + '/v1/testString/zones/testString/settings/ssl'
mock_response = '{"success": true, "result": {"id": "ssl", "value": "off", "editable": true, "modified_on": "2017-01-01T05:20:00.12345Z"}, "errors": [["errors"]], "messages": [{"status": "OK"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.get_ssl_setting()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_get_ssl_setting_required_params()
#--------------------------------------------------------
@responses.activate
def test_get_ssl_setting_required_params(self):
# Set up mock
url = base_url + '/v1/testString/zones/testString/settings/ssl'
mock_response = '{"success": true, "result": {"id": "ssl", "value": "off", "editable": true, "modified_on": "2017-01-01T05:20:00.12345Z"}, "errors": [["errors"]], "messages": [{"status": "OK"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.get_ssl_setting()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#-----------------------------------------------------------------------------
# Test Class for change_ssl_setting
#-----------------------------------------------------------------------------
class TestChangeSslSetting():
#--------------------------------------------------------
# change_ssl_setting()
#--------------------------------------------------------
@responses.activate
def test_change_ssl_setting_all_params(self):
# Set up mock
url = base_url + '/v1/testString/zones/testString/settings/ssl'
mock_response = '{"success": true, "result": {"id": "ssl", "value": "off", "editable": true, "modified_on": "2017-01-01T05:20:00.12345Z"}, "errors": [["errors"]], "messages": [{"status": "OK"}]}'
responses.add(responses.PATCH,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
value = 'off'
# Invoke method
response = service.change_ssl_setting(
value=value,
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['value'] == value
#--------------------------------------------------------
# test_change_ssl_setting_required_params()
#--------------------------------------------------------
@responses.activate
def test_change_ssl_setting_required_params(self):
# Set up mock
url = base_url + '/v1/testString/zones/testString/settings/ssl'
mock_response = '{"success": true, "result": {"id": "ssl", "value": "off", "editable": true, "modified_on": "2017-01-01T05:20:00.12345Z"}, "errors": [["errors"]], "messages": [{"status": "OK"}]}'
responses.add(responses.PATCH,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.change_ssl_setting()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#-----------------------------------------------------------------------------
# Test Class for list_custom_certificates
#-----------------------------------------------------------------------------
class TestListCustomCertificates():
#--------------------------------------------------------
# list_custom_certificates()
#--------------------------------------------------------
@responses.activate
def test_list_custom_certificates_all_params(self):
# Set up mock
url = base_url + '/v1/testString/zones/testString/custom_certificates'
mock_response = '{"result": [{"id": "0f405ba2-8c18-49eb-a30b-28b85427780f", "hosts": ["example.com"], "issuer": "/Country=US/Organization=Lets Encrypt/CommonName=Lets Encrypt Authority X3", "signature": "SHA256WithRSA", "status": "active", "bundle_method": "bundle_method", "zone_id": "zone_id", "uploaded_on": "uploaded_on", "modified_on": "modified_on", "expires_on": "expires_on", "priority": 8}], "result_info": {"page": 1, "per_page": 2, "count": 1, "total_count": 200}, "success": true, "errors": [["errors"]], "messages": [{"status": "OK"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.list_custom_certificates()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#--------------------------------------------------------
# test_list_custom_certificates_required_params()
#--------------------------------------------------------
@responses.activate
def test_list_custom_certificates_required_params(self):
# Set up mock
url = base_url + '/v1/testString/zones/testString/custom_certificates'
mock_response = '{"result": [{"id": "0f405ba2-8c18-49eb-a30b-28b85427780f", "hosts": ["example.com"], "issuer": "/Country=US/Organization=Lets Encrypt/CommonName=Lets Encrypt Authority X3", "signature": "SHA256WithRSA", "status": "active", "bundle_method": "bundle_method", "zone_id": "zone_id", "uploaded_on": "uploaded_on", "modified_on": "modified_on", "expires_on": "expires_on", "priority": 8}], "result_info": {"page": 1, "per_page": 2, "count": 1, "total_count": 200}, "success": true, "errors": [["errors"]], "messages": [{"status": "OK"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.list_custom_certificates()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#-----------------------------------------------------------------------------
# Test Class for upload_custom_certificate
#-----------------------------------------------------------------------------
class TestUploadCustomCertificate():
#--------------------------------------------------------
# upload_custom_certificate()
#--------------------------------------------------------
@responses.activate
def test_upload_custom_certificate_all_params(self):
# Set up mock
url = base_url + '/v1/testString/zones/testString/custom_certificates'
mock_response = '{"result": {"id": "0f405ba2-8c18-49eb-a30b-28b85427780f", "hosts": ["example.com"], "issuer": "/Country=US/Organization=Lets Encrypt/CommonName=Lets Encrypt Authority X3", "signature": "SHA256WithRSA", "status": "active", "bundle_method": "bundle_method", "zone_id": "zone_id", "uploaded_on": "uploaded_on", "modified_on": "modified_on", "expires_on": "expires_on", "priority": 8}, "success": true, "errors": [["errors"]], "messages": [{"status": "OK"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Construct a dict representation of a CustomCertReqGeoRestrictions model
custom_cert_req_geo_restrictions_model = {
'label': 'us'
}
# Set up parameter values
certificate = 'testString'
private_key = 'testString'
bundle_method = 'ubiquitous'
geo_restrictions = custom_cert_req_geo_restrictions_model
# Invoke method
response = service.upload_custom_certificate(
certificate=certificate,
private_key=private_key,
bundle_method=bundle_method,
geo_restrictions=geo_restrictions,
)
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# Validate body params
req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))
assert req_body['certificate'] == certificate
assert req_body['private_key'] == private_key
assert req_body['bundle_method'] == bundle_method
assert req_body['geo_restrictions'] == geo_restrictions
#--------------------------------------------------------
# test_upload_custom_certificate_required_params()
#--------------------------------------------------------
@responses.activate
def test_upload_custom_certificate_required_params(self):
# Set up mock
url = base_url + '/v1/testString/zones/testString/custom_certificates'
mock_response = '{"result": {"id": "0f405ba2-8c18-49eb-a30b-28b85427780f", "hosts": ["example.com"], "issuer": "/Country=US/Organization=Lets Encrypt/CommonName=Lets Encrypt Authority X3", "signature": "SHA256WithRSA", "status": "active", "bundle_method": "bundle_method", "zone_id": "zone_id", "uploaded_on": "uploaded_on", "modified_on": "modified_on", "expires_on": "expires_on", "priority": 8}, "success": true, "errors": [["errors"]], "messages": [{"status": "OK"}]}'
responses.add(responses.POST,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.upload_custom_certificate()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
#-----------------------------------------------------------------------------
# Test Class for get_custom_certificate
#-----------------------------------------------------------------------------
class TestGetCustomCertificate():
#--------------------------------------------------------
# get_custom_certificate()
#--------------------------------------------------------
@responses.activate
def test_get_custom_certificate_all_params(self):
# Set up mock
url = base_url + '/v1/testString/zones/testString/custom_certificates/testString'
mock_response = '{"result": {"id": "0f405ba2-8c18-49eb-a30b-28b85427780f", "hosts": ["example.com"], "issuer": "/Country=US/Organization=Lets Encrypt/CommonName=Lets Encrypt Authority X3", "signature": "SHA256WithRSA", "status": "active", "bundle_method": "bundle_method", "zone_id": "zone_id", "uploaded_on": "uploaded_on", "modified_on": "modified_on", "expires_on": "expires_on", "priority": 8}, "success": true, "errors": [["errors"]], "messages": [{"status": "OK"}]}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Set up parameter values
custom_cert_id = 'testString'
# Invoke method
response = service.get_custom_certificate(
custom_cert_id
)
# Check for correct operation
| |
nondimensionalized with :math:`\alpha'=\alpha (4 \pi \varepsilon_{0}) 3k_{B}T e^{-6}`, where the dimensionalized version is the polarizability volume
beadB : dict
Dictionary of Mie and multipole parameters for bead_B.
- epsilon (float) Energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Size parameter in [Å], or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- charge (float) Charge of bead in [e], or nondimensionalized as :math:`q'=q/e`
- dipole (float) Dipole of bead in [Debye], or nondimensionalized as :math:`\mu'=\mu (4 \pi \varepsilon_{0}) 3k_{B}T e^{-3}`
- quadrupole (float) Quadrupole of bead in [Debye*Å], or nondimensionalized as :math:`Q'=Q (4 \pi \varepsilon_{0})^{2} (3k_{B}T)^{2} e^{-5}`
- ionization_energy (float) Ionization_energy of bead in [kcal/mol], or nondimensionalized as :math:`I'=I/(3k_{B}T)`
- polarizability (float) Polarizability of bead in [:math:`Å^3`] or nondimensionalized with :math:`\alpha'=\alpha (4 \pi \varepsilon_{0}) 3k_{B}T e^{-6}`, where the dimensionalized version is the polarizability volume
shape_factor_scale : bool, Optional, default=False
Scale energy parameter based on shape factor epsilon*Si*Sj
temperature : float, Optional, default=None
Temperature in [K] for adding and removing dimensions, if the parameters are nondimensionalized, this value isn't used.
nondimensional : bool, Optional, default=False
Indicates whether the given bead library has been nondimensionalized by :func:`~mapsci.multipole_mie_combining_rules.dict_dimensions`
distance_opts : dict, Optional, default={}
Dictionary of keyword arguments for :func:`~mapsci.multipole_mie_combining_rules.calc_distance_array`
distance_array : numpy.ndarray, Optional, default=None
Array (or float) in either [Å] or nondimensionalized distance between two beads. :math:`r'=r (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`, whatever is consistent with 'bead_dict'. If None, 'distance_opts' are used to generate the array.
Returns
-------
output_dict : dict
Dictionary of:
- epsilon (float) Fit energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`. Calculated from fit lambda and van der Waals attraction parameter.
- kij (float) Binary interaction parameter for fit energy parameter, where :math:`\epsilon_{fit}=(1-k_{ij})\sqrt{\epsilon_i\epsilon_j}`
- sigma (float) Size parameter taken at mean, reported in [Å], or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Fit repulsive exponent, calculated as K/epsilon_fit
- lambdaa (float) Fit attractive exponent
- lambdaa_variance (float) Variance in attractive exponent during fitting process
- epsilon_saft (float) Energy parameter from SAFT method of scaling with geometric mean, scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- kij (float) Binary interaction parameter for SAFT prediction of energy parameter, where epsilon_saft = :math:`\epsilon_{saft}=(1-k_{ij,saft})\sqrt{\epsilon_i\epsilon_j}`
- K (float) Equal to :math:`C_{Mie}\epsilon_{fit}`, used in fitting process. Used to calculate lambdar.
- K_variance (float) Variance in calculation of dummy variable K
"""
if not nondimensional:
if temperature is None:
logger.error("Temperature should be included with 'nondimensional' is False")
bead1 = dict_dimensions(beadA.copy(), temperature, dimensions=False)
bead2 = dict_dimensions(beadB.copy(), temperature, dimensions=False)
else:
bead1 = beadA.copy()
bead2 = beadB.copy()
# Set-up Mie parameters
beadAB = mie_combining_rules(bead1, bead2)
Cmie = prefactor(beadAB["lambdar"], beadAB["lambdaa"])
if shape_factor_scale:
if "Sk" not in beadA:
beadA["Sk"] = 1.0
if "Sk" not in beadB:
beadB["Sk"] = 1.0
multipole_terms = calc_cross_multipole_terms(bead1, bead2, nondimensional=True)
# From curve fit
if distance_array is None:
r = calc_distance_array(beadAB, **distance_opts)
else:
r = distance_array
w_multipole, potential_terms = calc_cross_multipole_potential(r, multipole_terms, total_only=False, nondimensional=True)
# ___________ VDW parameter combining _______________
params, var_matrix = spo.curve_fit(lambda x, K, lambdaa: log_mie_attractive(
r, bead1, bead2, lambda_a=lambdaa, Kprefactor=K, shape_factor_scale=shape_factor_scale),
r,
np.log(-w_multipole),
p0=[beadAB["epsilon"] * Cmie, beadAB["lambdaa"]],
bounds=(0.0, np.inf))
K = params[0]
lambdaa_fit = params[1]
eps_fit = calc_epsilonij_from_lambda_aij(lambdaa_fit, bead1, bead2)
if K / eps_fit < 1.01:
raise ValueError(
"A suitable repulsive exponent cannot be calculated using the following cross interaction parameters:\n epsilon: {}, lambdaa: {}, Cmie: {} < 1.0\n Check self-interaction parameters above. A common cause could be poorly fit polarizability because a partial charge was assigned to an bead where it's Mie potential is fit to expect dipole to be the highest order."
.format(float_dimensions(eps_fit, "epsilon", temperature), lambdaa_fit, K / eps_fit))
else:
try:
lambdar_fit = spo.brentq(lambda x: K / eps_fit - prefactor(x, lambdaa_fit), lambdaa_fit * 1.01, 1e+4, xtol=1e-12)
except:
raise ValueError("This shouldn't happen, check given parameters.")
# Save output
if not nondimensional:
tmp = beadAB["epsilon"] * np.sqrt(bead1["sigma"]**3 * bead2["sigma"]**3) / beadAB["sigma"]**3
beadAB["epsilon_saft"] = float_dimensions(tmp,"epsilon",temperature,dimensions=True)
beadAB["epsilon"] = float_dimensions(eps_fit,"epsilon",temperature,dimensions=True)
beadAB["K"] = float_dimensions(K,"epsilon",temperature,dimensions=True)
beadAB["K_variance"] = float_dimensions(var_matrix[1][1],"epsilon",temperature,dimensions=True)
beadAB["sigma"] = float_dimensions(beadAB["sigma"],"sigma",temperature,dimensions=True)
else:
beadAB["epsilon_saft"] = beadAB["epsilon"] * np.sqrt(
bead1["sigma"]**3 * bead2["sigma"]**3) / beadAB["sigma"]**3
beadAB["epsilon"] = eps_fit
beadAB["K"] = K
beadAB["K_variance"] = var_matrix[1][1]
beadAB["lambdaa"] = lambdaa_fit
beadAB["lambdaa_variance"] = var_matrix[0][0]
beadAB["lambdar"] = lambdar_fit
beadAB["kij_saft"] = 1 - beadAB["epsilon_saft"] / np.sqrt(bead1["epsilon"]*bead2["epsilon"])
beadAB["kij"] = 1 - beadAB["epsilon"] / np.sqrt(bead1["epsilon"]*bead2["epsilon"])
return beadAB
def log_mie_attractive(r, bead1, bead2, lambda_a=None, Kprefactor=None, epsilon=None, shape_factor_scale=False):
r"""
Calculate the log of the attractive term of the Mie potential. This linearizes the curve for the fitting process
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
r : numpy.ndarray
Array (or float) of nondimensionalized distance between two beads. :math:`r'=r (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
bead1 : dict
Dictionary of multipole parameters for bead_A.
- epsilon (float) Nondimensionalized energy parameter, :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Nondimensionalized size parameter, :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- Sk (float) Shape factor
bead2 : dict
Dictionary of multipole parameters for bead_B. If provided, the mixed energy parameter is fit.
- epsilon (float) Nondimensionalized energy parameter, :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Nondimensionalized size parameter, :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- Sk (float) Shape factor
epsilon : float, Optional, default=None
The energy parameter for the Mie potential, if not specified the combining rule from `Lafitte 2013 <https://doi.org/10.1063/1.4819786>`_ is used
lambda_a : float, Optional, default=None
The cross interaction attractive exponent, if not specified the combining rule from `Lafitte 2013 <https://doi.org/10.1063/1.4819786>`_ is used
Kprefactor : float, Optional, default=None
Total prefactor of Mie potential equal to the energy parameters times the Mie prefactor, C. If not specified, the value using the combining rules from `Lafitte 2013 <https://doi.org/10.1063/1.4819786>`_ is used.
shape_factor_scale : bool, Optional, default=False
Scale energy parameter based on shape factor epsilon*Si*Sj
Returns
-------
log_potential : numpy.ndarray
The potential array for the given value of epsilon
"""
beadAB = mie_combining_rules(bead1, bead2)
sigma = beadAB["sigma"]
lambda_r = beadAB["lambdar"]
if epsilon is not None and lambda_a is not None:
# Assume lambdar follows normal combining rules
Kprefactor = epsilon * prefactor(lambda_r, lambda_a)
elif epsilon is not None and Kprefactor is not None:
raise ValueError("Specifying 'epsilon' and 'Kprefactor' is redundant.")
elif epsilon is not None:
# Assume both exponents follow normal combining rules
lambda_a = beadAB["lambdaa"]
Kprefactor = epsilon * prefactor(lambda_r, lambda_a)
elif lambda_a is not None and Kprefactor is None:
# Assume lambdar follows normal combining rules, epsilon can be derived from 1 fluid combining rule
epsilon = calc_epsilonij_from_lambda_aij(lambda_a, bead1, bead2)
Kprefactor = epsilon * prefactor(lambda_r, lambda_a)
elif lambda_a is None and Kprefactor is not None:
# Assume lambdaa follows normal combining rules
lambda_a = beadAB["lambdaa"]
if shape_factor_scale:
Kprefactor = Kprefactor * bead1["Sk"] * bead2["Sk"]
return np.log(Kprefactor) + lambda_a * np.log(sigma / r)
def calc_self_mie_from_multipole(bead_dict,
mie_vdw=None,
temperature=298,
lambda_r=12,
distance_opts={},
distance_array=None,
polarizability_opts={},
shape_factor_scale=False,
nondimensional=False):
r"""
Calculation of self-interaction parameters for the Mie potential from multipole moments.
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
bead_dict : dict
Dictionary of Mie and multipole parameters for bead_A.
- epsilon (float) Energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Size parameter in [Å], or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- charge (float) Charge of bead in [e], or nondimensionalized as :math:`q'=q/e`
- dipole (float) Dipole of bead in [Debye], or nondimensionalized as :math:`\mu'=\mu (4 \pi \varepsilon_{0}) 3k_{B}T e^{-3}`
- quadrupole (float) Quadrupole of bead in [Debye*Å], or nondimensionalized as :math:`Q'=Q (4 \pi \varepsilon_{0})^{2} (3k_{B}T)^{2} e^{-5}`
- ionization_energy (float) Ionization_energy of bead in [kcal/mol], or nondimensionalized as :math:`I'=I/(3k_{B}T)`
- polarizability (float) Polarizability of bead in [:math:`Å^3`] or nondimensionalized with :math:`\alpha'=\alpha (4 \pi | |
``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_changed_offset_events(self):
"""Registers for notification of updated offset events.
``OffsetEventReceiver.changedOffsetEvents()`` is invoked when an
offset event is changed.
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_changed_offset_event(self, offset_event_id):
"""Registers for notification of an updated offset event.
``OffsetEventReceiver.changedOffsetEvents()`` is invoked when
the specified offset event is changed.
:param offset_event_id: the ``Id`` of the ``OffsetEventId`` to monitor
:type offset_event_id: ``osid.id.Id``
:raise: ``NullArgument`` -- ``offset_event_id is null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_deleted_offset_events(self):
"""Registers for notification of deleted offset events.
``OffsetEventReceiver.deletedOffsetEvents()`` is invoked when an
offset event is removed from this calendar.
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_deleted_offset_event(self, offset_event_id):
"""Registers for notification of a deleted offset event.
``OffsetEventReceiver.changedOffsetEvents()`` is invoked when
the specified offset event is removed from this calendar.
:param offset_event_id: the ``Id`` of the ``OffsetEvent`` to monitor
:type offset_event_id: ``osid.id.Id``
:raise: ``NullArgument`` -- ``offset_eventid is null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
class OffsetEventCalendarSession:
"""This session provides methods to retrieve ``OffsetEvent`` to ``Calendar`` mappings.
An ``OffsetEvent`` may appear in multiple ``Calendars``. Each
``Calendar`` may have its own authorizations governing who is
allowed to look at it.
This lookup session defines two views:
* comparative view: elements may be silently omitted or re-ordered
* plenary view: provides a complete result set or is an error
condition
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def can_lookup_event_calendar_mappings(self):
"""Tests if this user can perform lookups of event/calendar mappings.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known lookup methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
lookup operations to unauthorized users.
:return: ``false`` if looking up mappings is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def use_comparative_calendar_view(self):
"""The returns from the lookup methods may omit or translate elements based on this session, such as
authorization, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def use_plenary_calendar_view(self):
"""A complete view of the ``Event`` and ``Calendar`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def get_offset_event_ids_by_calendar(self, calendar_id):
"""Gets the list of ``OffsetEvent`` ``Ids`` associated with a ``Calendar``.
:param calendar_id: ``Id`` of the ``Calendar``
:type calendar_id: ``osid.id.Id``
:return: list of related offset event ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NotFound`` -- ``calendar_id`` is not found
:raise: ``NullArgument`` -- ``calendar_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
@abc.abstractmethod
def get_offset_events_by_calendar(self, calendar_id):
"""Gets the list of ``OffsetEvents`` associated with a ``Calendar``.
:param calendar_id: ``Id`` of the ``Calendar``
:type calendar_id: ``osid.id.Id``
:return: list of related offset events
:rtype: ``osid.calendaring.OffsetEventList``
:raise: ``NotFound`` -- ``calendar_id`` is not found
:raise: ``NullArgument`` -- ``calendar_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.OffsetEventList
@abc.abstractmethod
def get_offset_event_ids_by_calendars(self, calendar_ids):
"""Gets the list of ``OffsetEvent Ids`` corresponding to a list of ``Calendars``.
:param calendar_ids: list of calendar ``Ids``
:type calendar_ids: ``osid.id.IdList``
:return: list of offset event ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NullArgument`` -- ``calendar_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
@abc.abstractmethod
def get_offset_events_by_calendars(self, calendar_ids):
"""Gets the list of ``OffsetEvents`` corresponding to a list of ``Calendars``.
:param calendar_ids: list of calendar ``Ids``
:type calendar_ids: ``osid.id.IdList``
:return: list of offset events
:rtype: ``osid.calendaring.OffsetEventList``
:raise: ``NullArgument`` -- ``calendar_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.OffsetEventList
@abc.abstractmethod
def get_calendar_ids_by_offset_event(self, offset_event_id):
"""Gets the list of ``Calendar`` ``Ids`` mapped to an ``OffsetEvent``.
:param offset_event_id: ``Id`` of an ``OffsetEvent``
:type offset_event_id: ``osid.id.Id``
:return: list of calendar ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NotFound`` -- ``offset_event_id`` is not found
:raise: ``NullArgument`` -- ``offset_event_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
@abc.abstractmethod
def get_calendars_by_offset_event(self, offset_e_vent_id):
"""Gets the list of ``Calendars`` mapped to an ``OffsetEvent``.
:param offset_e_vent_id: ``Id`` of an ``OffsetEvent``
:type offset_e_vent_id: ``osid.id.Id``
:return: list of calendars
:rtype: ``osid.calendaring.CalendarList``
:raise: ``NotFound`` -- ``offset_event_id`` is not found
:raise: ``NullArgument`` -- ``offset_event_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.CalendarList
class OffsetEventCalendarAssignmentSession:
"""This session provides methods to re-assign ``OffsetEvents`` to ``Calendars``.
An ``OffsetEvent`` may map to multiple ``Calendars`` and removing
the last reference to an ``OffsetEvent`` is the equivalent of
deleting it. Each ``Calendar`` may have its own authorizations
governing who is allowed to operate on it.
Moving or adding a reference of an ``OffsetEvent`` to another
``Calendar`` is not a copy operation (eg: does not change its ``Id``
).
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def can_assign_offset_events(self):
"""Tests if this user can alter offset event/calendar mappings.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known mapping methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
assignment operations to unauthorized users.
:return: ``false`` if mapping is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def can_assign_offset_events_to_calendar(self, calendar_id):
"""Tests if this user can alter offset event/calendar mappings.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known mapping methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
assignment operations to unauthorized users.
:param calendar_id: the ``Id`` of the ``Calendar``
:type calendar_id: ``osid.id.Id``
:return: ``false`` if mapping is not authorized, ``true`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``calendar_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_assignable_calendar_ids(self, calendar_id):
"""Gets a list of calendars including and under the given calendar node in which any offset event can be
assigned.
:param calendar_id: the ``Id`` of the ``Calendar``
:type calendar_id: ``osid.id.Id``
:return: list of assignable calendar ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NullArgument`` -- ``calendar_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
@abc.abstractmethod
def get_assignable_calendar_ids_for_offset_event(self, calendar_id, offset_event_id):
"""Gets a list of calendars including and under the given calendar node in which a specific offset event can be
assigned.
:param calendar_id: the ``Id`` of the ``Calendar``
:type calendar_id: ``osid.id.Id``
:param offset_event_id: the ``Id`` of the ``offset_event_id``
:type offset_event_id: ``osid.id.Id``
:return: list of assignable calendar ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NullArgument`` -- ``calendar_id`` or ``offset_event_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
@abc.abstractmethod
def assign_offset_event_to_calendar(self, offset_event_id, | |
import numpy as np
import pandas as pd
import xarray as xr
import Grid
import pf_dynamic_sph
import os
from timeit import default_timer as timer
import sys
from copy import deepcopy
# import matplotlib
# import matplotlib.pyplot as plt
if __name__ == "__main__":
start = timer()
# ---- INITIALIZE GRIDS ----
(Lx, Ly, Lz) = (20, 20, 20)
(dx, dy, dz) = (0.2, 0.2, 0.2)
# (Lx, Ly, Lz) = (30, 30, 30)
# (dx, dy, dz) = (0.2, 0.2, 0.2)
# (Lx, Ly, Lz) = (30, 30, 30)
# (dx, dy, dz) = (0.25, 0.25, 0.25)
xgrid = Grid.Grid('CARTESIAN_3D')
xgrid.initArray('x', -Lx, Lx, dx); xgrid.initArray('y', -Ly, Ly, dy); xgrid.initArray('z', -Lz, Lz, dz)
NGridPoints_cart = (1 + 2 * Lx / dx) * (1 + 2 * Ly / dy) * (1 + 2 * Lz / dz)
NGridPoints_desired = (1 + 2 * Lx / dx) * (1 + 2 * Lz / dz)
Ntheta = 50
Nk = np.ceil(NGridPoints_desired / Ntheta)
theta_max = np.pi
thetaArray, dtheta = np.linspace(0, theta_max, Ntheta, retstep=True)
# k_max = np.sqrt((np.pi / dx)**2 + (np.pi / dy)**2 + (np.pi / dz)**2)
k_max = ((2 * np.pi / dx)**3 / (4 * np.pi / 3))**(1 / 3)
k_min = 1e-5
kArray, dk = np.linspace(k_min, k_max, Nk, retstep=True)
if dk < k_min:
print('k ARRAY GENERATION ERROR')
kgrid = Grid.Grid("SPHERICAL_2D")
kgrid.initArray_premade('k', kArray)
kgrid.initArray_premade('th', thetaArray)
# tMax = 400; dt = 1
# tMax = 480; dt = 0.1
tMax = 5000; dt = 0.5
# tMax = 100; dt = 1
tgrid = np.arange(0, tMax + dt, dt)
gParams = [xgrid, kgrid, tgrid]
NGridPoints = kgrid.size()
print('Total time steps: {0}'.format(tgrid.size))
print('UV cutoff: {0}'.format(k_max))
print('dk: {0}'.format(dk))
print('NGridPoints: {0}'.format(NGridPoints))
# Basic parameters
expParams = pf_dynamic_sph.Zw_expParams()
# L_exp2th, M_exp2th, T_exp2th = pf_dynamic_sph.unitConv_exp2th(expParams['n0_BEC'], expParams['mB'])
L_exp2th, M_exp2th, T_exp2th = pf_dynamic_sph.unitConv_exp2th(expParams['n0_BEC_scale'], expParams['mB'])
n0 = expParams['n0_BEC'] / (L_exp2th**3) # should ~ 1
mB = expParams['mB'] * M_exp2th # should = 1
mI = expParams['mI'] * M_exp2th
aBB = expParams['aBB'] * L_exp2th
gBB = (4 * np.pi / mB) * aBB
sParams = [mI, mB, n0, gBB]
# Trap parameters
n0_TF = expParams['n0_TF'] / (L_exp2th**3)
n0_thermal = expParams['n0_thermal'] / (L_exp2th**3)
RTF_BEC_X = expParams['RTF_BEC_X'] * L_exp2th; RTF_BEC_Y = expParams['RTF_BEC_Y'] * L_exp2th; RTF_BEC_Z = expParams['RTF_BEC_Z'] * L_exp2th
RG_BEC_X = expParams['RG_BEC_X'] * L_exp2th; RG_BEC_Y = expParams['RG_BEC_Y'] * L_exp2th; RG_BEC_Z = expParams['RG_BEC_Z'] * L_exp2th
omega_BEC_osc = expParams['omega_BEC_osc'] / T_exp2th
omega_Imp_x = expParams['omega_Imp_x'] / T_exp2th
# Derived quantities
nu = pf_dynamic_sph.nu(mB, n0, gBB)
xi = (8 * np.pi * n0 * aBB)**(-1 / 2)
Fscale = 2 * np.pi * (nu / xi**2)
vI_init = expParams['vI_init'] * L_exp2th / T_exp2th
PI_init = mI * vI_init
tscale = xi / nu
To = 2 * np.pi / omega_BEC_osc
print(To, To / tscale)
print(80 * 1e-3 * T_exp2th, 1e3 * 5000 / T_exp2th)
print(1 / L_exp2th, expParams['RTF_BEC_X'], RTF_BEC_X / L_exp2th)
print(0.75 * RTF_BEC_X / xi, omega_BEC_osc * tscale)
print(RTF_BEC_X * (omega_BEC_osc / 8) / nu)
print(RTF_BEC_X * (omega_BEC_osc / 2) / nu)
print('c_BEC: {:.2E}'.format(nu * T_exp2th / L_exp2th))
print(mI * nu)
# ---- SET OSC PARAMS ----
x0 = round(pf_dynamic_sph.x_BEC_osc(0, omega_BEC_osc, RTF_BEC_X, 0.5), 1)
print('X0: {0}, Tosc: {1}'.format(x0, To))
oscParams_List = [{'X0': 0.0, 'P0': 0.4, 'a_osc': expParams['a_osc']}]
# oscParams_List = [{'X0': 0.75 * RTF_BEC_X, 'P0': 0.6, 'a_osc': 0.5}]
# oscParams_List = [{'X0': 0.0, 'P0': 0.1, 'a_osc': 0.5},
# {'X0': 0.0, 'P0': 0.6, 'a_osc': 0.5}]
# oscParams_List = [{'X0': 0.0, 'P0': 1.8, 'a_osc': 0.5},
# {'X0': 0.0, 'P0': 0.1, 'a_osc': 0.0},
# {'X0': 0.0, 'P0': 0.6, 'a_osc': 0.0},
# {'X0': 0.0, 'P0': 1.8, 'a_osc': 0.0}]
TTList = []
for oscParams in oscParams_List:
toggleDict = {'Location': 'cluster', 'Dynamics': 'real', 'Interaction': 'on', 'InitCS': 'steadystate', 'InitCS_datapath': '', 'Coupling': 'twophonon', 'Grid': 'spherical',
'F_ext': 'off', 'PosScat': 'off', 'BEC_density': 'off', 'BEC_density_osc': 'on', 'Imp_trap': 'on', 'CS_Dyn': 'on', 'Polaron_Potential': 'on'}
trapParams = {'n0_TF_BEC': n0_TF, 'RTF_BEC_X': RTF_BEC_X, 'RTF_BEC_Y': RTF_BEC_Y, 'RTF_BEC_Z': RTF_BEC_Z, 'n0_thermal_BEC': n0_thermal, 'RG_BEC_X': RG_BEC_X, 'RG_BEC_Y': RG_BEC_Y, 'RG_BEC_Z': RG_BEC_Z,
'omega_Imp_x': omega_Imp_x, 'omega_BEC_osc': omega_BEC_osc, 'X0': oscParams['X0'], 'P0': oscParams['P0'], 'a_osc': oscParams['a_osc']}
if trapParams['P0'] >= 1.1 * mI * nu:
toggleDict['InitCS'] = 'file'
if trapParams['a_osc'] == 0.0:
toggleDict['BEC_density_osc'] = 'off'
if toggleDict['BEC_density_osc'] == 'off':
trapParams['a_osc'] = 0.0
if toggleDict['Imp_trap'] == 'off':
trapParams['omega_Imp_x'] = 0.0
# ---- SET OUTPUT DATA FOLDER ----
if toggleDict['Location'] == 'personal':
datapath = '/Users/kis/Dropbox/VariationalResearch/HarvardOdyssey/ZwierleinExp_data/aBB_{:.3f}/NGridPoints_{:.2E}/BEC_osc'.format(aBB, NGridPoints_cart)
elif toggleDict['Location'] == 'cluster':
datapath = '/n/scratchlfs02/demler_lab/kis/ZwierleinExp_data/aBB_{:.3f}/NGridPoints_{:.2E}/BEC_osc'.format(aBB, NGridPoints_cart)
if toggleDict['PosScat'] == 'on':
innerdatapath = datapath + '/PosScat'
else:
innerdatapath = datapath + '/NegScat'
if toggleDict['BEC_density'] == 'off':
innerdatapath = innerdatapath + '/HomogBEC'
toggleDict['Polaron_Potential'] = 'off'
if toggleDict['Polaron_Potential'] == 'off':
innerdatapath = innerdatapath + '/NoPolPot'
else:
innerdatapath = innerdatapath + '/PolPot'
if toggleDict['CS_Dyn'] == 'off':
innerdatapath = innerdatapath + '_NoCSDyn'
else:
innerdatapath = innerdatapath + '_CSDyn'
innerdatapath = innerdatapath + '/fBEC={:d}_fImp={:d}_aosc={:.1f}_X0={:.1f}_P0={:.1f}'.format(int(np.ceil(expParams['omega_BEC_osc'] / (2 * np.pi))), int(np.ceil(expParams['omega_Imp_x'] / (2 * np.pi))), trapParams['a_osc'], trapParams['X0'], trapParams['P0'])
if toggleDict['InitCS'] == 'file':
toggleDict['InitCS_datapath'] = datapath + '/PolGS_spherical'
else:
toggleDict['InitCS_datapath'] = 'InitCS ERROR'
TTList.append((toggleDict, trapParams, innerdatapath))
# # Test of density for homogeneous case (n0), center of inhomogenous BEC with experimental params (n_center), and the density away from the center of the BEC for an inhomogeneous BEC with very wide harmonic trap in the direction of motion
# n_center = pf_dynamic_sph.n_BEC(oscParams['X0'], 0, 0, n0_TF, n0_thermal, trapParams['RTF_BEC_X'], trapParams['RTF_BEC_Y'], trapParams['RTF_BEC_Z'], trapParams['RG_BEC_X'], trapParams['RG_BEC_Y'], trapParams['RG_BEC_Z']) # ASSUMING PARTICLE IS IN CENTER OF TRAP IN Y AND Z DIRECTIONS
# wideRTF = 1e4 * trapParams['RTF_BEC_X']
# wideRG = 1e4 * trapParams['RG_BEC_X']
# widePos = oscParams['X0'] + 1e2 * trapParams['RTF_BEC_X']
# n_center_wide = pf_dynamic_sph.n_BEC(widePos, 0, 0, n0_TF, n0_thermal, wideRTF, trapParams['RTF_BEC_Y'], trapParams['RTF_BEC_Z'], wideRG, trapParams['RG_BEC_Y'], trapParams['RG_BEC_Z']) # ASSUMING PARTICLE IS IN CENTER OF TRAP IN Y AND Z DIRECTIONS
# print(n0, n_center, n_center_wide) # turns out that a homogeneous BEC is a good approx if the furthest position our impurity gets away from the center of the BEC is 1-2 orders of magnitude smaller than the radius of the TF profile (set by RTF & RG)
# # # ---- CREATE EXTERNAL DATA FOLDERS ----
# if os.path.isdir(datapath) is False:
# os.mkdir(datapath)
# os.mkdir(datapath + '/BEC_osc')
# # ---- CREATE OUTPUT DATA FOLDERS ----
# for tup in TTList:
# (toggleDict, trapParams, innerdatapath) = tup
# if os.path.isdir(innerdatapath) is False:
# os.mkdir(innerdatapath)
# # ---- SINGLE FUNCTION RUN ----
# runstart = timer()
# (toggleDict, trapParams, innerdatapath0, innerdatapath) = TTList[0]
# aIBi = -1.3
# dP = 0.5 * mI * nu
# F = 0.1 * Fscale
# filepath = innerdatapath + '/aIBi_{:.2f}_dP_{:.2f}mIc_F_{:.2f}.nc'.format(aIBi, dP / (mI * nu), F)
# if toggleDict['F_ext'] == 'off':
# dP = 0; F = 0; filepath = innerdatapath + '/aIBi_{:.2f}.nc'.format(aIBi)
# print('mI: {:.2f}, mB:{:.1f}, aBB: {:.3f}, aIBi: {:.2f}, n0: {:.1f}'.format(mI, mB, aBB, aIBi, n0))
# # print('TF: {0}'.format(dP / F))
# cParams = {'aIBi': aIBi}
# fParams = {'dP_ext': dP, 'Fext_mag': F}
# ds = pf_dynamic_sph.LDA_quenchDynamics_DataGeneration(cParams, gParams, sParams, fParams, trapParams, toggleDict)
# Obs_ds = ds[['Pph', 'Nph', 'P', 'X']]; Obs_ds.attrs = ds.attrs; Obs_ds.to_netcdf(filepath)
# end = timer()
# print('Time: {:.2f}'.format(end - runstart))
# ---- SET CPARAMS (RANGE OVER MULTIPLE aIBi) ----
a0_exp = 5.29e-11 # Bohr radius (m)
# aIBi_Vals = np.concatenate((np.array([-150, -140, -130, -120, -110]), np.linspace(-100, -1, 199))); aIBi_Vals = np.concatenate((aIBi_Vals, np.array([-0.25])))
# aIB_exp = ((1 / aIBi_Vals) / L_exp2th) / a0_exp
# print(aIB_exp)
aIBexp_Vals = np.concatenate((np.array([-12000, -8000, -7000, -6000, -5000]), np.linspace(-4000, -2000, 20, endpoint=False), np.linspace(-2000, -70, 175, endpoint=False), np.linspace(-70, -20, 5))) * a0_exp
aIBi_Vals = 1 / (aIBexp_Vals * L_exp2th)
if toggleDict['PosScat'] == 'on':
aIBi_Vals = -1 * aIBi_Vals
# print(aIBi_Vals)
metaList = []
for tup in TTList:
(toggleDict, trapParams, innerdatapath) = tup
for aIBi in aIBi_Vals:
metaList.append((toggleDict, trapParams, innerdatapath, aIBi))
# # missInds = [5, 23, 28, 42, 128] # negative scattering length (no pol pot, CS dyn on)
# # missInds = [1, 2, 174, 175, 176, 177, 178, 179, 180] # positive scattering length (no pol pot, CS dyn on)
# # missInds = [98, 99, 100, 101, 102, 103, 104] # negative scattering length (homog, no pol pot, CS dyn on)
# | |
from time import time
import cv2
import numpy as np
from scene import Scene
from light import Light
from camera import Camera
from game_object import GameObject
def triangle_area(v0, v1, v2):
"""
| v01[0] v01[1] |
| v02[0] v02[1] | = v01[0]*v02[1] - v01[1]*v02[0]
"""
return (v1[0]-v0[0])*(v2[1]-v0[1]) - (v1[1]-v0[1])*(v2[0]-v0[0])
def convert_map_for_vis(m, ignore=np.inf):
m = np.array(m)
m[m == ignore] = 1
m_min = np.min(m[m != 1])
m_max = np.max(m[m != 1])
m[m != 1] = (m[m != 1] - m_min) / (m_max - m_min) * 0.8
return m
def geometric_transform(scene):
world_to_camera = scene.camera.world_to_camera
world_to_light = scene.light.world_to_light
near_clip = scene.camera.near_clip
far_clip = scene.camera.far_clip
# for light.shadow_map_param
sxmin, sxmax = np.inf, -np.inf
symin, symax = np.inf, -np.inf
for obj in scene.objects:
for name, mesh in obj.mesh.mesh.items():
mesh_format = mesh['format'] # V: vertex, N: normal, T: texture
if mesh_format == 'V3F':
step = 3
elif mesh_format == 'N3F_V3F':
step = 6
elif mesh_format == 'T2F_V3F':
step = 5
elif mesh_format == 'T2F_N3F_V3F':
step = 8
else:
assert False, 'invalid mesh_format'
vertices = mesh['vertices']
for i in range(0, len(vertices), step*3):
# triangle vertex coordinates and scaling
v0 = np.array([[*vertices[i+1*step-3:i+1*step], 1]]).T * obj.scale
v1 = np.array([[*vertices[i+2*step-3:i+2*step], 1]]).T * obj.scale
v2 = np.array([[*vertices[i+3*step-3:i+3*step], 1]]).T * obj.scale
v0[3, 0] = 1
v1[3, 0] = 1
v2[3, 0] = 1
if False and 'N3F' in mesh_format:
# triangle vertex normal vectors
n0 = np.array([vertices[i+1*step-6:i+1*step-3]]).T
n1 = np.array([vertices[i+2*step-6:i+2*step-3]]).T
n2 = np.array([vertices[i+3*step-6:i+3*step-3]]).T
else:
# if the model does not provide normal vectors, generate normal from triangle vertices
n = np.cross((v1-v0)[:3, 0], (v2-v0)[:3, 0])
n = np.expand_dims(n / np.linalg.norm(n), 1)
n0 = n
n1 = n
n2 = n
if 'T2F' in mesh_format:
# triangle vertex texture coordinates
t0 = vertices[i+0*step:i+0*step+2]
t1 = vertices[i+1*step:i+1*step+2]
t2 = vertices[i+2*step:i+2*step+2]
else:
t0 = np.array([0, 0])
t1 = np.array([0, 0])
t2 = np.array([0, 0])
# cv: (vertex camera coordinate)
# = (world to camera) x (object transform) x (vertex object coordinate)
cv0 = (world_to_camera @ obj.transform @ v0).squeeze()[:3]
cv1 = (world_to_camera @ obj.transform @ v1).squeeze()[:3]
cv2 = (world_to_camera @ obj.transform @ v2).squeeze()[:3]
# cn: (normal camera coordinate)
# = (world to camera rotation) x (object transform rotation) x (normal object coordinate)
cn0 = (world_to_camera[:3, :3] @ obj.transform[:3, :3] @ n0).squeeze()
cn1 = (world_to_camera[:3, :3] @ obj.transform[:3, :3] @ n1).squeeze()
cn2 = (world_to_camera[:3, :3] @ obj.transform[:3, :3] @ n2).squeeze()
# lv: (vertex light coordinate)
# = (world to light) x (object transform) x (vertex object coordinate)
lv0 = (world_to_light @ obj.transform @ v0).squeeze()[:3]
lv1 = (world_to_light @ obj.transform @ v1).squeeze()[:3]
lv2 = (world_to_light @ obj.transform @ v2).squeeze()[:3]
# update shadow map min/max valud
sxmin = min(sxmin, lv0[0], lv1[0], lv2[0])
sxmax = max(sxmax, lv0[0], lv1[0], lv2[0])
symin = min(symin, lv0[1], lv1[1], lv2[1])
symax = max(symax, lv0[1], lv1[1], lv2[1])
# if triangle faces light source, add it to light_vertices list
if triangle_area(lv0, lv1, lv2) > 0:
mesh['light_vertices'].extend([*lv0, *lv1, *lv2])
# near/far frustum clipping
# if min(-cv0[2], -cv1[2], -cv2[2]) > far_clip or max(-cv0[2], -cv1[2], -cv2[2]) < near_clip:
# continue
# camera coordinate backface culling
# if triangle_area(cv0, cv1, cv2) > 0:
# mesh['cam_vertices'].extend([*t0, *cn0, *cv0, *t1, *cn1, *cv1, *t2, *cn2, *cv2])
mesh['cam_vertices'].extend([*t0, *cn0, *cv0, *t1, *cn1, *cv1, *t2, *cn2, *cv2])
mesh['cam_vertices'] = np.array(mesh['cam_vertices'])
mesh['light_vertices'] = np.array(mesh['light_vertices'])
# expand shadow map range by 10%
sscale = scene.light.shadow_map_dim / (max(sxmax-sxmin, symax-symin) * 1.2)
sxoff = sxmin - (sxmax-sxmin)*0.1
syoff = symin - (symax-symin)*0.1
scene.light.shadow_map_param = [sxoff, sscale, syoff, sscale]
def shadow_mapping(scene, shadow_map):
light = scene.light
shadow_map_dim = shadow_map.shape[0]
assert shadow_map.shape[0] == shadow_map.shape[1] == light.shadow_map_dim
for obj in scene.objects:
for name, mesh in obj.mesh.mesh.items():
light_vertices = mesh['light_vertices']
for i in range(0, len(light_vertices), 9):
# sv: (vertex shadow map coordinate)
# = (shadow map projection matrix) x (vertex light coordinate)
sv0 = light.project(light_vertices[i+0:i+3])
sv1 = light.project(light_vertices[i+3:i+6])
sv2 = light.project(light_vertices[i+6:i+9])
# shadow map coordinate backface culling
area = triangle_area(sv0, sv1, sv2)
if area <= 0:
continue
# triangle bounding box
symax = min(shadow_map_dim, int(max(sv0[1], sv1[1], sv2[1])))
symin = max(0, int(min(sv0[1], sv1[1], sv2[1])))
sxmax = min(shadow_map_dim, int(max(sv0[0], sv1[0], sv2[0])))
sxmin = max(0, int(min(sv0[0], sv1[0], sv2[0])))
# update shadow map
for sy in range(symin, symax+1):
for sx in range(sxmin, sxmax+1):
w0 = triangle_area(sv1, sv2, (sx, sy))
w1 = triangle_area(sv2, sv0, (sx, sy))
w2 = triangle_area(sv0, sv1, (sx, sy))
if (w0 >= 0) and (w1 >= 0) and (w2 >= 0):
# interpolate and update shadow map z-value
sz = -(w0*sv0[2] + w1*sv1[2] + w2*sv2[2]) / area
shadow_map[sy, sx] = min(shadow_map[sy, sx], sz)
def rasterization(scene, depth_buffer, raster_buffer):
camera = scene.camera
light = scene.light
height, width = depth_buffer.shape
# normalized light direction vector
lnorm = camera.world_to_camera[:3, :3] @ light.norm
print('transformed lnorm:', lnorm)
# ambient light (range: 0.0~1.0)
lambt = light.ambient
for obj_id, obj in enumerate(scene.objects):
for name, mesh in obj.mesh.mesh.items():
vertices = mesh['cam_vertices']
step, ti0, ti1, ni0, ni1, vi0, vi1 = 8, 0, 2, 2, 5, 5, 8
for i in range(0, len(vertices), step*3):
# cv: vertex camera coordinate
cv0 = vertices[i+0*step+vi0:i+0*step+vi1]
cv1 = vertices[i+1*step+vi0:i+1*step+vi1]
cv2 = vertices[i+2*step+vi0:i+2*step+vi1]
# fv: (vertex frame coordinate)
# = (frame projection matrix) x (vertex camera coordinate)
# NOTE: frame y is inverted
fv0 = camera.project(cv0)
fv1 = camera.project(cv1)
fv2 = camera.project(cv2)
# t: triangle vertex texture coordinates
t0 = vertices[i+0*step+ti0:i+0*step+ti1]
t1 = vertices[i+1*step+ti0:i+1*step+ti1]
t2 = vertices[i+2*step+ti0:i+2*step+ti1]
area = triangle_area(fv2, fv1, fv0)
if area <= 0:
continue
cn0 = vertices[i+0*step+ni0:i+0*step+ni1]
cn1 = vertices[i+1*step+ni0:i+1*step+ni1]
cn2 = vertices[i+2*step+ni0:i+2*step+ni1]
fymax = int(np.round(min(height-1, max(fv0[1], fv1[1], fv2[1]))))
fymin = int(np.round(max(0, min(fv0[1], fv1[1], fv2[1]))))
fxmax = int(np.round(min(width-1, max(fv0[0], fv1[0], fv2[0]))))
fxmin = int(np.round(max(0, min(fv0[0], fv1[0], fv2[0]))))
l0 = lambt + np.dot(cn0, lnorm) * (1 - lambt)
l1 = lambt + np.dot(cn1, lnorm) * (1 - lambt)
l2 = lambt + np.dot(cn2, lnorm) * (1 - lambt)
fz0_1 = 1 / fv0[2]
fz1_1 = 1 / fv1[2]
fz2_1 = 1 / fv2[2]
for fy in range(fymin, fymax+1):
for fx in range(fxmin, fxmax+1):
w0 = triangle_area((fx, fy), fv2, fv1) / area
w1 = triangle_area((fx, fy), fv0, fv2) / area
w2 = triangle_area((fx, fy), fv1, fv0) / area
if (w0 >= 0) and (w1 >= 0) and (w2 >= 0):
fz_1 = w0*fz0_1 + w1*fz1_1 + w2*fz2_1
fz = 1 / fz_1
if fz < depth_buffer[fy, fx]:
tex = (w0*t0*fz0_1 + w1*t1*fz1_1 + w2*t2*fz2_1) * fz
lum = (w0*l0*fz0_1 + w1*l1*fz1_1 + w2*l2*fz2_1) * fz
raster_buffer[0, fy, fx] = obj_id
raster_buffer[1, fy, fx] = tex[0]
raster_buffer[2, fy, fx] = tex[1]
raster_buffer[3, fy, fx] = lum
depth_buffer[fy, fx] = fz
def shadow_raster(scene, shadow_buffer, depth_buffer, shadow_map):
camera = scene.camera
height, width = shadow_buffer.shape
assert shadow_map.shape[0] == shadow_map.shape[1]
shadow_map_dim = shadow_map.shape[0]
light = scene.light
lnorm = camera.world_to_camera[:3, :3] @ light.norm
camera_to_light = light.world_to_light @ camera.camera_to_world
shadow_map_bias = light.shadow_map_bias
for obj in scene.objects:
for name, mesh in obj.mesh.mesh.items():
vertices = mesh['cam_vertices']
step, ti0, ti1, ni0, ni1, vi0, vi1 = 8, 0, 2, 2, 5, 5, 8
light_vertices = mesh['light_vertices']
for i in range(0, len(vertices), step*3):
cv0 = np.array([[*vertices[i+0*step+vi0:i+0*step+vi1], 1]]).T
cv1 = np.array([[*vertices[i+1*step+vi0:i+1*step+vi1], 1]]).T
cv2 = np.array([[*vertices[i+2*step+vi0:i+2*step+vi1], 1]]).T
# lv0 = light_vertices[]
# lv1 =
# lv2 =
cnorm = vertices[i+0*step+ni0:i+0*step+ni1]
# frame vertices, y-coord is inverted
fv0 = camera.project(cv0)
fv1 = camera.project(cv1)
fv2 = camera.project(cv2)
area = -triangle_area(fv0, fv1, fv2)
if area <= 0:
continue
fz0_1 = 1 / fv0[2]
fz1_1 = 1 / fv1[2]
fz2_1 = 1 / fv2[2]
fymax = int(np.round(min(height-1, max(fv0[1], fv1[1], fv2[1]))))
fymin = int(np.round(max(0, min(fv0[1], fv1[1], fv2[1]))))
fxmax = int(np.round(min(width-1, max(fv0[0], fv1[0], fv2[0]))))
fxmin = int(np.round(max(0, min(fv0[0], fv1[0], fv2[0]))))
sv0 = light.project((camera_to_light @ cv0).squeeze()[:3])
sv1 = light.project((camera_to_light @ cv1).squeeze()[:3])
sv2 = light.project((camera_to_light @ cv2).squeeze()[:3])
bias = shadow_map_bias * np.tan(np.arccos(np.dot(cnorm, lnorm)))
np.clip(bias, 0, shadow_map_bias)
for fy in range(fymin, fymax+1):
for fx in range(fxmin, fxmax+1):
w0 = triangle_area((fx, fy), fv2, fv1) / area
w1 = triangle_area((fx, fy), fv0, fv2) / area
w2 = triangle_area((fx, fy), fv1, fv0) / area
if (w0 >= 0) and (w1 >= 0) and (w2 >= 0):
fz_1 = w0*fz0_1 + w1*fz1_1 + w2*fz2_1
fz = 1 / fz_1
if fz < depth_buffer[fy, | |
import numpy as np
import time
import math
# auxiliary function for cleaning the workspace
def clear_all():
gl = globals().copy()
for var in gl:
if var[0] == '_': continue
if 'func' in str(globals()[var]): continue
if 'module' in str(globals()[var]): continue
del globals()[var]
# Type: ndarray
# - represents array of any dimensionality
# - 0-based indexing
# - all elements have to be of same type
# (they are stored directly in the array, not through pointers)
# Initialization from data
clear_all()
A = np.array([[1, 2], [3, 4]]) # 2D array of size 2x2 (integer)
B = np.array([[1, 2], [3, 4.5]]) # 2D array of size 2x2 (float)
C = np.array([1, 2]) # 1D vector of size 2 (integer)
D = np.array([[1, 2]]) # 2D row vector 1x2 (integer)
E = np.array([[1], [2]]) # 2D column vector 2x1 (integer)
F = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # 3D array 2x2x2 (integer)
# Type ndarray can store non-numeric types
clear_all()
strings = np.array(['abc', 'pqr', 'xyzuvw']) # 1D vector of strings
mixed1 = np.array([3, True, 'xyz']) # type conversion: 1D vector of strings
mixed2 = np.array([3, True, False]) # type conversion: 1D vector of integers
lists = np.array([[1, 2], [3]]) # 1D array of lists
# Type: matrix
# - inherits from ndarray type
# - only 2D arrays
# - DEPRECATED
# Special initialization
clear_all()
A = np.empty((4, 2)) # uninitialized 4x2 array (float)
B = np.zeros((2, 3)) # 2x3 zeros array (float)
C = np.ones((3, 2, 1)) # 3x2x1 ones array (float)
D = np.identity(3) # 3x3 identity array (float)
# Random arrays
m = 5
n = 3
U1 = np.random.rand(m, n) # m-by-n array, uniform distribution [0,1)
U2 = np.random.uniform(100, 200,
(m, n)) # m-by-n array, uniform distribution [100,200)
R1 = np.random.randn(m, n) # m-by-n array, normal distribution N(0,1)
R2 = np.random.normal(10, 2,
(m, n)) # m-by-n array, normal distribution N(10,2)
# Accessing elements
# - ndarray type is mutable,
# - whenever possible, operations produce views of existing data,
# - copying must be often explicitly specified,
# - potential performance boost.
clear_all()
A = np.array([[1, 2], [3, 4], [5, 6]]) # 2D array of size 3x2 (integer)
a = A[0, 0] # get an element
A[0, 1] = 8 # set an element
A[:, 0] = 5 # set a column
A[1, :] = 10, 20 # set a row
A[2, :] = [30, 40] # set a row
B = A # create a view
B[0, 0] = 0 # modifies A
C = A.copy() # create a copy
C[0, 0] = 111 # does not modify A
rowA_1D = A[1, :] # get row view by an index (1D vector)
rowA_1D[0] = -100 # modifies A and B
rowA_2D = A[1:2, :] # get row view by slice operator (2D vector)
rowA_2D[0, 1] = -200 # modifies A, B, and rowA_1D
subA = A[1:, :] # get submatrix view
subA[1, 1] = 1 # modifies A and its views
E = A[0, :].copy() # row copy (1D)
F = A[0:1, :].copy() # row copy (2D)
E[0] = 1234 # does not modify A
F[0, 0] = 4321 # does not modify A
# Q: When the assignment operator produces a copy instead of a view?
# A: When it is unable to determine the stride.
clear_all()
V = np.asarray(range(1, 10)) # initialize using generator
V1 = V[0:9:2] # get view
V1[0] = 100 # modifies V
V2 = V[9:None:-2] # get view (using None includes 0'th index)
V2[0] = 100 # modifies V
V3 = V[[1, 5, 3]] # get copy (unable to determine the stride)
V3[0] = 200 # does not modify V
V4 = V[[0, 4, 8]] # get copy (unable to determine the stride)
V4[0] = 200 # does not modify V
Q = np.asarray(range(0, 100)).reshape(10, 10)
subQ = Q[10:None:-3, 0:10:2] # get view (known strides for rows and columns)
subQ[1, 2] = 10000 # modifies Q
rowQ = Q[[1], :] # get row copy (unable to determine stride)
rowQ[0, 1] = -300 # does not modify Q
# Reshaping
# - whenever possible reshape function returns a view on the array
clear_all()
V = np.array([[1, 2]]) # 2D row vector 1x2 (integer)
V2 = np.reshape(V, (2, 1)) # 1x2->2x1 conversion - view creation
V2[0, 0] = 100 # modifies V
V3 = np.reshape(V, 2) # 2D->1D conversion - view creation
V3[1] = 200 # modifies V
A = np.array([[1, 2], [3, 4]]) # 2D matrix 2x2
B = np.reshape(A, (-1, 1)) # 2x2->4x1 conversion, 4 set automatically
B[0] = 100 # modifies A
C = np.reshape(A, 4) # 2x2-> x4 vector conversion
D = np.reshape(A, 5) # 2x2-> x5 vector conversion - exception
A = np.asarray(range(0, 9)).reshape(3, 3)
flatA = np.reshape(A, -1) # 3x3->9x vector conversion - view creation
flatA[0] = 100; # modifies A
At = np.transpose(A) # creates a transposition as view
At[0, 0] = -100 # modifies A
flatAt = np.reshape(At, -1) # 3x3->9x vector conversion as copy
flatAt[0] = 100 # does not modify At and A
# views - access times
n_reps = 10
m = 10000
A = np.random.uniform(0, 1, (m, m))
B = A.copy()
C = A.copy()
D = A.copy()
# vertical sums as row vectors
vsumA = np.zeros((1, m))
vsumB = np.zeros((1, m))
vsumC = np.zeros((1, m))
t = time.perf_counter()
for r in range(n_reps):
for i in range(m):
vsumA = vsumA + A[i, :] # access directly
print("w/o view:", (time.perf_counter() - t) / n_reps)
t = time.perf_counter()
for r in range(n_reps):
for i in range(m):
row = B[i, :] # create view
vsumB = vsumB + row
print("w/ view:", (time.perf_counter() - t) / n_reps)
t = time.perf_counter()
for r in range(n_reps):
for i in range(m):
row = C[[i], :] # create copy
vsumC = vsumC + row
print("Row copy:", (time.perf_counter() - t) / n_reps)
t = time.perf_counter()
for r in range(n_reps):
vsumD = np.sum(D, axis=0) # sum the columns (1D vector as a result)
print("Library call:", (time.perf_counter() - t) / n_reps)
# verify correctness
(vsumA == vsumB).all()
(vsumA == vsumC).all()
# operations
clear_all()
A = np.array([[1, 2], [3, 4]]) # 2x2 integer array
B = A + A # sum elements
C = -2.5 * A # multiply matrix by scalar
D = np.array([[10, -10], [-10, 10]])
E = A * D # multiply corresponding elements (not a matrix multiplication!)
F = np.dot(A, D) # matrix multiplication
FF = A @ D # matrix multiplication - new operator in python 3.5
F = np.array([[1, -1]]) # row vector (2D)
G = A * F # multiply columns per F elements
print(G) # [[1, -2],[3, -4]]
Q = A * np.array([1, -1]) # the same, but with 1D vector
H = A * F.transpose() # multiply rows per F elements
print(H) # [[1, 2],[-3, -4]]
clear_all()
A = np.random.rand(2, 10)
B = np.random.rand(10, 2)
C = np.dot(A, B) # 2x10 by 10x2 matrix multiplication
V = np.array([[1], [2]])
U = np.dot(B, V) # 10x2 by 2x1 matrix-vector multiplication
# solving linear system AX = B
clear_all()
A = np.random.rand(10, 10)
X = np.random.rand(10, 1)
B = np.dot(A, X)
A_inv = np.linalg.inv(A) # matrix inverse
Xsolved = np.dot(A_inv, B) # solve the system
print(X == Xsolved) # array comparison (logical array as a result)
print(abs(X - Xsolved)) # determine reminders
print(np.isclose(X, Xsolved)) # comparison with tolerance
# Math functions
clear_all()
n = 1000000
X = np.random.rand(n)
sinX = np.sin(X)
# more complicated functions
f_scalar = lambda x: math.log(math.sin(x) * math.sqrt(x / 2) + 1)
f_vector = lambda x: np.log(np.sin(x) * np.sqrt(x / 2) + 1)
Y1 = np.empty(n)
# for-based variant
n_reps = 1
t = time.perf_counter()
for r in range(n_reps):
for i in range(n):
Y1[i] = f_scalar(X[i])
print("For-based:", (time.perf_counter() - t) / n_reps)
# vectorized variant
n_reps = 10
t = time.perf_counter()
for r in range(n_reps):
Y2 = f_vector(X)
print("Vectorized:", (time.perf_counter() - t) / n_reps)
# Concatenations, diagonals
clear_all()
A = np.array([[1, 2]])
B = np.array([[3, 4]])
C = np.array([[9, 8, 7, 6]])
AB = np.hstack((A, B)) # horizontal concatenation
ABC = np.vstack((AB, C)) # vertical concatenation
D = np.array([[1, 2], [3, 4]]) # 2x2 matrix
E = np.diag(D) # reduce dimensionality: matrix 2D -> vector 1D
F = np.diag(E) # extend dimensionality: vector 1D -> matrix 2D
G = np.diag(ABC) # reduction: get diagonal of square submatrix
H = [[1, 4]] # 2D row vector
I = np.diag(H) # reduction: get diagonal of square submatrix
np.fill_diagonal(A, [-1, -2]) # set diagonal
# logical indexing
A = np.array([[1, 2, 3], [3, 4, 3], [3, 2, 5]])
B = A[A[:, 1] == 2, :] # select rows containing 2 as a second element
C = A[A[:, 1] == 2] # second dimension can be ommited
D = A[:, A[1, :] == 3] # select columns containing 3 as a second element
# Access test
# Generate rotation matrix for given vector
clear_all()
m = | |
np.arange(0, n_j, 1), np.arange(0, n_z, 1))
# query points
xi = (di, dj, dz)
# multidimensional interpolation
output = interpolate.interpn(points, normalized_blurred_grid, xi, method='linear')
return output
# =============================================================
# class: synthetic_image_generate
# creates sysnthetic images for different purposes
# =============================================================
class synthetic_image_generate:
def __init__(self, width, height, name="synthetic_image"):
self.name = name
self.width = width
self.height = height
def create_lens_shading_correction_images(self, dark_current=0, flat_max=65535, flat_min=0, clip_range=[0, 65535]):
# Objective: creates two images:
# dark_current_image and flat_field_image
dark_current_image = dark_current * np.ones((self.height, self.width), dtype=np.float32)
flat_field_image = np.empty((self.height, self.width), dtype=np.float32)
center_pixel_pos = [self.height/2, self.width/2]
max_distance = distance_euclid(center_pixel_pos, [self.height, self.width])
for i in range(0, self.height):
for j in range(0, self.width):
flat_field_image[i, j] = (max_distance - distance_euclid(center_pixel_pos, [i, j])) / max_distance
flat_field_image[i, j] = flat_min + flat_field_image[i, j] * (flat_max - flat_min)
dark_current_image = np.clip(dark_current_image, clip_range[0], clip_range[1])
flat_field_image = np.clip(flat_field_image, clip_range[0], clip_range[1])
return dark_current_image, flat_field_image
def create_zone_plate_image(self):
pass
def create_color_gradient_image(self):
pass
def create_random_noise_image(self, mean=0, standard_deviation=1, seed=0):
# Creates normally distributed noisy image
np.random.seed(seed)
return np.random.normal(mean, standard_deviation, (self.height, self.width))
def create_noisy_image(self, data, mean=0, standard_deviation=1, seed=0, clip_range=[0, 65535]):
# Adds normally distributed noise to the data
return np.clip(data + self.create_random_noise_image(mean, standard_deviation, seed), clip_range[0], clip_range[1])
# =============================================================
# class: create_filter
# creates different filters, generally 2D filters
# =============================================================
class create_filter:
def __init__(self, name="filter"):
self.name = name
def gaussian(self, kernel_size, sigma):
# calculate which number to where the grid should be
# remember that, kernel_size[0] is the width of the kernel
# and kernel_size[1] is the height of the kernel
temp = np.floor(np.float32(kernel_size) / 2.)
# create the grid
# example: if kernel_size = [5, 3], then:
# x: array([[-2., -1., 0., 1., 2.],
# [-2., -1., 0., 1., 2.],
# [-2., -1., 0., 1., 2.]])
# y: array([[-1., -1., -1., -1., -1.],
# [ 0., 0., 0., 0., 0.],
# [ 1., 1., 1., 1., 1.]])
x, y = np.meshgrid(np.linspace(-temp[0], temp[0], kernel_size[0]),\
np.linspace(-temp[1], temp[1], kernel_size[1]))
# Gaussian equation
temp = np.exp( -(x**2 + y**2) / (2. * sigma**2) )
# make kernel sum equal to 1
return temp / np.sum(temp)
def gaussian_separable(self, kernel_size, sigma):
# calculate which number to where the grid should be
# remember that, kernel_size[0] is the width of the kernel
# and kernel_size[1] is the height of the kernel
temp = np.floor(np.float32(kernel_size) / 2.)
# create the horizontal kernel
x = np.linspace(-temp[0], temp[0], kernel_size[0])
x = x.reshape((1, kernel_size[0])) # reshape to create row vector
hx = np.exp(-x**2 / (2 * sigma**2))
hx = hx / np.sum(hx)
# create the vertical kernel
y = np.linspace(-temp[1], temp[1], kernel_size[1])
y = y.reshape((kernel_size[1], 1)) # reshape to create column vector
hy = np.exp(-y**2 / (2 * sigma**2))
hy = hy / np.sum(hy)
return hx, hy
def sobel(self, kernel_size):
# Returns the Sobel filter kernels Sx and Sy
Sx = .25 * np.dot([[1.], [2.], [1.]], [[1., 0., -1.]])
if (kernel_size > 3):
n = np.int(np.floor((kernel_size - 5) / 2 + 1))
for i in range(0, n):
Sx = (1./16.) * signal.convolve2d(np.dot([[1.], [2.], [1.]], [[1., 2., 1.]]), Sx)
Sy = np.transpose(Sx)
return Sx, Sy
def __str__(self):
return self.name
# =============================================================
# class: color_conversion
# color conversion from one color space to another
# =============================================================
class color_conversion:
def __init__(self, data, name="color conversion"):
self.data = np.float32(data)
self.name = name
def rgb2gray(self):
return 0.299 * self.data[:, :, 0] +\
0.587 * self.data[:, :, 1] +\
0.114 * self.data[:, :, 2]
def rgb2ycc(self, rule="bt601"):
# map to select kr and kb
kr_kb_dict = {"bt601" : [0.299, 0.114],\
"bt709" : [0.2126, 0.0722],\
"bt2020" : [0.2627, 0.0593]}
kr = kr_kb_dict[rule][0]
kb = kr_kb_dict[rule][1]
kg = 1 - (kr + kb)
output = np.empty(np.shape(self.data), dtype=np.float32)
output[:, :, 0] = kr * self.data[:, :, 0] + \
kg * self.data[:, :, 1] + \
kb * self.data[:, :, 2]
output[:, :, 1] = 0.5 * ((self.data[:, :, 2] - output[:, :, 0]) / (1 - kb))
output[:, :, 2] = 0.5 * ((self.data[:, :, 0] - output[:, :, 0]) / (1 - kr))
return output
def ycc2rgb(self, rule="bt601"):
# map to select kr and kb
kr_kb_dict = {"bt601" : [0.299, 0.114],\
"bt709" : [0.2126, 0.0722],\
"bt2020" : [0.2627, 0.0593]}
kr = kr_kb_dict[rule][0]
kb = kr_kb_dict[rule][1]
kg = 1 - (kr + kb)
output = np.empty(np.shape(self.data), dtype=np.float32)
output[:, :, 0] = 2. * self.data[:, :, 2] * (1 - kr) + self.data[:, :, 0]
output[:, :, 2] = 2. * self.data[:, :, 1] * (1 - kb) + self.data[:, :, 0]
output[:, :, 1] = (self.data[:, :, 0] - kr * output[:, :, 0] - kb * output[:, :, 2]) / kg
return output
def rgb2xyz(self, color_space="srgb", clip_range=[0, 65535]):
# input rgb in range clip_range
# output xyz is in range 0 to 1
if (color_space == "srgb"):
# degamma / linearization
data = helpers(self.data).degamma_srgb(clip_range)
data = np.float32(data)
data = np.divide(data, clip_range[1])
# matrix multiplication`
output = np.empty(np.shape(self.data), dtype=np.float32)
output[:, :, 0] = data[:, :, 0] * 0.4124 + data[:, :, 1] * 0.3576 + data[:, :, 2] * 0.1805
output[:, :, 1] = data[:, :, 0] * 0.2126 + data[:, :, 1] * 0.7152 + data[:, :, 2] * 0.0722
output[:, :, 2] = data[:, :, 0] * 0.0193 + data[:, :, 1] * 0.1192 + data[:, :, 2] * 0.9505
elif (color_space == "adobe-rgb-1998"):
# degamma / linearization
data = helpers(self.data).degamma_adobe_rgb_1998(clip_range)
data = np.float32(data)
data = np.divide(data, clip_range[1])
# matrix multiplication
output = np.empty(np.shape(self.data), dtype=np.float32)
output[:, :, 0] = data[:, :, 0] * 0.5767309 + data[:, :, 1] * 0.1855540 + data[:, :, 2] * 0.1881852
output[:, :, 1] = data[:, :, 0] * 0.2973769 + data[:, :, 1] * 0.6273491 + data[:, :, 2] * 0.0752741
output[:, :, 2] = data[:, :, 0] * 0.0270343 + data[:, :, 1] * 0.0706872 + data[:, :, 2] * 0.9911085
elif (color_space == "linear"):
# matrix multiplication`
output = np.empty(np.shape(self.data), dtype=np.float32)
data = np.float32(self.data)
data = np.divide(data, clip_range[1])
output[:, :, 0] = data[:, :, 0] * 0.4124 + data[:, :, 1] * 0.3576 + data[:, :, 2] * 0.1805
output[:, :, 1] = data[:, :, 0] * 0.2126 + data[:, :, 1] * 0.7152 + data[:, :, 2] * 0.0722
output[:, :, 2] = data[:, :, 0] * 0.0193 + data[:, :, 1] * 0.1192 + data[:, :, 2] * 0.9505
else:
print("Warning! color_space must be srgb or adobe-rgb-1998.")
return
return output
def xyz2rgb(self, color_space="srgb", clip_range=[0, 65535]):
# input xyz is in range 0 to 1
# output rgb in clip_range
# allocate space for output
output = np.empty(np.shape(self.data), dtype=np.float32)
if (color_space == "srgb"):
# matrix multiplication
output[:, :, 0] = self.data[:, :, 0] * 3.2406 + self.data[:, :, 1] * -1.5372 + self.data[:, :, 2] * -0.4986
output[:, :, 1] = self.data[:, :, 0] * -0.9689 + self.data[:, :, 1] * 1.8758 + self.data[:, :, 2] * 0.0415
output[:, :, 2] = self.data[:, :, 0] * 0.0557 + self.data[:, :, 1] * -0.2040 + self.data[:, :, 2] * 1.0570
# gamma to retain nonlinearity
output = helpers(output * clip_range[1]).gamma_srgb(clip_range)
elif (color_space == "adobe-rgb-1998"):
# matrix multiplication
output[:, :, 0] = self.data[:, :, 0] * 2.0413690 + self.data[:, :, 1] * -0.5649464 + self.data[:, :, 2] * -0.3446944
output[:, :, 1] = self.data[:, :, 0] * -0.9692660 + self.data[:, :, 1] * 1.8760108 + self.data[:, :, 2] * 0.0415560
output[:, :, 2] = self.data[:, :, 0] * 0.0134474 + self.data[:, :, 1] * -0.1183897 + self.data[:, :, 2] * 1.0154096
# gamma to retain nonlinearity
output = helpers(output * clip_range[1]).gamma_adobe_rgb_1998(clip_range)
elif (color_space == "linear"):
# matrix multiplication
output[:, :, 0] = self.data[:, :, 0] * 3.2406 + self.data[:, :, 1] * -1.5372 + self.data[:, :, 2] * -0.4986
output[:, :, 1] = self.data[:, :, 0] * -0.9689 + self.data[:, :, 1] * 1.8758 + self.data[:, :, 2] * 0.0415
output[:, :, 2] = self.data[:, :, 0] * 0.0557 + self.data[:, :, 1] * -0.2040 + self.data[:, :, 2] * 1.0570
# gamma to retain nonlinearity
output = | |
int_x, int_y)
def expand_img_column(self):
'''
Expand img column to its id-number and stack-name
'''
if self.has_label('rlnImageName'):
def parse_img_name(img):
img_id, img_name = img.split('@')
img_head, img_tail = os.path.split(img_name)
return int(img_id), img_tail
img_num_list = []
img_head_list = []
img_data = np.array(list(map(parse_img_name, self.data_block['rlnImageName'].tolist())))
# Expand the columns
self.data_block['idx'] = img_data[:,0]
self.data_block['img'] = img_data[:,1]
def delete_img_columns(self):
'''
Delete expanded image columns
'''
if self.has_label('idx') and self.has_label('img'):
self.data_block = self.data_block.drop(columns=['idx', 'img'])
def get_ptcl_key(self, ptcl):
'''
Get ptcl key
'''
# Get img number and full image name
img_num, img_name = ptcl['rlnImageName'].split('@')
# Get img tail
img_head, img_tail = os.path.split(img_name)
# Construct ptcl key
ptcl_key = (int(img_num), img_tail)
return ptcl_key
def has_label(self, label):
'''
Check if the label exists in data frame
'''
if self.data_block is not None and label in self.data_block.columns:
return True
else:
return False
def get_data_block(self):
'''
Get data block
'''
return self.data_block
def is_particle_inside(self, ptcl, mic_apix, NX, NY):
'''
Is particle inside
'''
# Relative scale of pixel sizes
apix_scale = 1.0*self.star_apix/mic_apix
cor_offsetx = self.data_block['rlnOriginX'][ptcl]*apix_scale
int_offsetx = np.round(cor_offsetx)
cor_offsety = self.data_block['rlnOriginY'][ptcl]*apix_scale
int_offsety = np.round(cor_offsety)
new_coordx = self.data_block.loc[ptcl, 'rlnCoordinateX'] - int_offsetx
new_coordy = self.data_block.loc[ptcl, 'rlnCoordinateY'] - int_offsety
if(new_coordx < NX and new_coordx > 0 and
new_coordy < NY and new_coordy > 0):
return True
else:
return False
def addOffset2D(self, t=[0, 0], ptcls=None):
'''
Translate
'''
if len(t) == 2:
dx = float(t[0])
dy = float(t[1])
# If necessary create the new data columns
if not self.has_label('rlnOriginX'):
self.add_column('rlnOriginX')
if not self.has_label('rlnOriginY'):
self.add_column('rlnOriginY')
if ptcls is None:
ptcls = np.arange(self.num_data_points)
self.data_block.loc[ptcls, 'rlnOriginX'] += dx
self.data_block.loc[ptcls, 'rlnOriginY'] += dy
def set_star_apix(self, apix=None):
'''
Set star apix
'''
if type(apix) == float:
self.star_apix = apix
else:
self.star_apix = 1.0
def get_star_apix(self):
'''
Get star apix
'''
return self.star_apix
def determine_star_apix(self):
'''
Determine star apix
'''
if self.has_label('rlnDetectorPixelSize') and self.has_label('rlnMagnification'):
self.star_apix = 10000*self.data_block.loc[0, 'rlnDetectorPixelSize']/self.data_block.loc[0, 'rlnMagnification']
else:
print('Warning: No pixel size information in star file %s' % (self.star_file))
self.star_apix = 1.0
return self.star_apix
def recenter2D(self, mic_apix=1.82):
'''
Recenter particles
'''
# Relative scale of pixel sizes
self.apix_scale = 1.0*self.star_apix/mic_apix
if(self.has_label('rlnOriginX') and
self.has_label('rlnOriginY')):
# Center x-coordinate
cor_offsetx = self.data_block['rlnOriginX']*self.apix_scale
int_offsetx = np.round(cor_offsetx)
dif_offsetx = cor_offsetx - int_offsetx
self.data_block.loc[:, 'rlnOriginX'] = dif_offsetx/self.apix_scale
self.data_block.loc[:, 'rlnCoordinateX'] -= int_offsetx
# Center y-coordinate
cor_offsety = self.data_block['rlnOriginY']*self.apix_scale
int_offsety = np.round(cor_offsety)
dif_offsety = cor_offsety - int_offsety
self.data_block.loc[:, 'rlnOriginY'] = dif_offsety/self.apix_scale
self.data_block.loc[:, 'rlnCoordinateY'] -= int_offsety
def change_label(self, old_label, new_label):
'''
Change label name
'''
if self.has_label(old_label) and new_label in self.PARAMETERS and not self.has_label(new_label):
self.data_block.rename(columns={old_label: new_label},
inplace=True)
def rename_column(self, old_label, new_label):
'''
Rename column
'''
self.change_label(old_label, new_label)
def dublicate_column(self, label, new_label):
'''
Duplicate a column with a label
'''
if self.has_label(label) and new_label in self.PARAMETERS:
self.data_block.loc[:, new_label] = self.data_block[label]
def rotate_psi(self, rotangle=0):
'''
Rotate psi angle
'''
self.data_block.loc[:, 'rlnAnglePsi'] += rotangle
# Normalize psi
self.normalize_psi()
def merge_star(self, other_star):
'''
Merge with the current star
'''
if other_star is not None and self.data_block is not None:
self.data_block = self.data_block.append(other_star.data_block, ignore_index=True)
def normalize_psi(self):
'''
Normalize psi angle
'''
self.data_block.loc[:, 'rlnAnglePsi'] %= 360
# Find angles higher than 180
mask = self.data_block['rlnAnglePsi'] > 180
# Subtract 180 from angles higher than 180
self.data_block.loc[mask, 'rlnAnglePsi'] -= 360
def rotate2D(self, rotangle=0, offset=[0, 0], final_offset=[0, 0], ptcls=None):
'''
Rotate particles
'''
# Check if the offset columns exist
if(not self.has_label('rlnOriginX') or
not self.has_label('rlnOriginY')):
self.add_column('rlnOriginX')
self.add_column('rlnOriginY')
# Check if the Psi (rot for 2D transformation)
if not self.has_label('rlnAnglePsi'):
self.add_column('rlnAnglePsi')
# Update Offsets
if ptcls is None:
ptcls = np.arange(self.num_data_points)
# Check if there is any particle to transform
if len(ptcls) == 0:
return
# Iterate through each particle to get the corrected offset
new_offsets = []
for ptcl in ptcls:
oldangle = self.data_block.loc[ptcl, 'rlnAnglePsi']
rotM = util.euler2rot2D(float(oldangle))
# Get the transpose
rotMT = rotM.T
# Get the corrected offset
corrected_offset = rotMT.dot(np.array(offset))
# Final offset
final_rotM = util.euler2rot2D(float(oldangle+rotangle))
final_rotMT = final_rotM.T
final_corrected_offset = final_rotMT.dot(np.array(final_offset))
new_offsets.append(corrected_offset+final_corrected_offset)
# Update offsets (Needs to be investigated)
new_offsets = np.array(new_offsets)
self.data_block.loc[ptcls, 'rlnOriginX'] += new_offsets[:, 0]
self.data_block.loc[ptcls, 'rlnOriginY'] += new_offsets[:, 1]
# Update psi angles
self.data_block.loc[ptcls, 'rlnAnglePsi'] += rotangle
# Normalize psi angle
self.normalize_psi()
def num2className(self, ptcls=None):
'''
Assign class names from particle numbers in ImageName
'''
if not self.has_label('rlnClassNumber'):
self.add_column('rlnClassNumber')
# Get the particle ids
particle_nums = [int(image_name.split('@')[0]) for image_name in self.data_block['rlnImageName']]
particle_nums = np.array(particle_nums)
if ptcls is None:
ptcls = np.arange(self.num_data_points)
self.data_block.loc[ptcls, 'rlnClassNumber'] = particle_nums[ptcls]
def get_class_rows(self, class_id=1):
'''
Get rows with the defined class id
'''
if self.has_label('rlnClassNumber'):
return np.nonzero(self.data_block['rlnClassNumber'] == class_id)[0]
else:
return None
def get_class_ids(self):
'''
Return class ids
'''
return np.unique(self.data_block['rlnClassNumber'])
def get_column(self, label=None):
'''
Get data column
'''
if self.has_label(label):
return self.data_block[label]
def tilt90(self):
'''
Tilt 90 star
'''
if self.has_label('rlnAngleTilt') and self.has_label('rlnAngleRot'):
valid_rows = (self.data_block['rlnAngleTilt'] % 360) > 180
# Update tilt angle
self.data_block.loc[valid_rows, 'rlnAngleTilt'] -= 180
self.data_block.loc[valid_rows, 'rlnAngleTilt'] %= 360
# Update rot angle
self.data_block.loc[valid_rows, 'rlnAngleRot'] += 180
self.data_block.loc[valid_rows, 'rlnAngleRot'] %= 360
def Zflip(self):
'''
Z-flip star
'''
if self.has_label('rlnAngleRot'):
# Update tilt angle
self.data_block['rlnAngleRot'] *= -1
self.data_block['rlnAngleRot'] %= 360
def create_write_formatter(self):
'''
Create write formatter
'''
formatter = []
for label in self.data_block.columns:
type_name = self.PARAMETERS[label]["typename"]
formatter.append(self.type2format[type_name])
# Create write formatter
self.write_formatter = ' '.join(formatter)
def write(self, out_fname, verbose=True):
'''
Write star file
'''
# Create the formatter
self.create_write_formatter()
# Create header
header = []
# Write data block name
header.append("data_%s" % self.data_name)
header.append("")
header.append("loop_")
# Write the data labels
for label in self.data_block.columns:
header.append("_%s" % label)
# Make header string
header = '\n'.join(header)
# Print particle number info
if verbose:
print('Writing %d particles in %s' % (self.data_block.shape[0], out_fname))
# Save file
np.savetxt(out_fname, self.data_block.values, fmt=self.write_formatter, header=header, comments='')
class Cistem(Project):
'''
Cistem class
'''
def __init__(self, name='EMStar2Par'):
super().__init__(name)
self.par_file = None
self.par_data_block = None
self.original_star = None
self.original_star_file = None
# Par to star parameters
self.delclasses = []
self.selclasses = []
self.scorecutoff = None
self.sigmacutoff = None
self.mlcutoff = None
# Database variables
self.db_conn = None
self.ref_packages = []
self.ref_nums = []
self.ref_num = None
self.orig_positions = None
def set_par2star_params(self, delclasses=None, selclasses=None, scorecutoff=None, sigmacutoff=None, mlcutoff=None):
'''
Set par2star params
'''
self.delclasses = delclasses
self.selclasses = selclasses
self.scorecutoff = scorecutoff
self.sigmacutoff = sigmacutoff
self.mlcutoff = mlcutoff
def sort_images(self):
'''
Sort based on the image name
'''
self.particle_star.sort('rlnImageName')
def read_par(self, fname):
'''
Read par file from Cistem
'''
if os.path.isfile(fname):
self.par_data_block = np.genfromtxt(fname,
skip_header=self.par_skip_rows,
dtype=self.par_dtypes,
comments='C')
def read_db(self, db_file):
'''
Read Cistem database
'''
if db_file is not None and os.path.isfile(db_file):
self.db_conn = sqlite3.connect(db_file)
else:
return
# Get refinement package info
self.ref_packages = []
self.ref_nums = []
self.orig_positions = []
# Set a db cursor
c = self.db_conn.cursor()
# Iterate over the refinement packages
for row in c.execute("select * from refinement_package_assets"):
print('Refinement ID: %d - Refinement PackageName: %s' % (row[0], row[1]))
self.ref_packages.append(row)
self.ref_nums.append(int(row[0]))
# Get reference info
while self.ref_num not in self.ref_nums:
self.ref_num = int(input('Enter the refinement package ID: '))
# Get the original position ids
for row in c.execute("select original_particle_position_asset_id from refinement_package_contained_particles_%d" % (self.ref_num)):
self.orig_positions.append(int(row[0])-1)
def select_particles(self):
'''
Select from particles based on the cistem database information
'''
if self.orig_positions is not None and len(self.orig_positions) > 0:
self.particle_star.data_block = self.particle_star.data_block.iloc[self.orig_positions]
def copy2star(self):
'''
Convert par data to star object
'''
if self.particle_star is not None and self.par_data_block is not None:
if self.particle_star.data_block.shape[0] != self.par_data_block.shape[0]:
sys.exit('Particle star and par file rows dont match. Exiting!')
# Copy the data columns from par to star
self.particle_star.data_block['rlnOriginX'] = -self.par_data_block['SHX']/self.particle_apix
self.particle_star.data_block['rlnOriginY'] = -self.par_data_block['SHY']/self.particle_apix
self.particle_star.data_block['rlnAnglePsi'] = self.par_data_block['PSI']
self.particle_star.data_block['rlnAngleTilt'] = self.par_data_block['THETA']
self.particle_star.data_block['rlnAngleRot'] = self.par_data_block['PHI']
def create_write_formatter(self):
'''
Create write formatter
'''
self.par_skip_rows = 1
self.header_list = ('C', 'PSI', 'THETA', 'PHI', 'SHX', 'SHY', 'MAG', 'INCLUDE', 'DF1', 'DF2', 'ANGAST', 'PSHIFT','OCC', 'LogP', 'SIGMA', 'SCORE', 'CHANGE')
self.data_formats = ('i4', 'f4', 'f4', 'f4', 'f4', 'f4', 'i4', 'i4', 'f4', 'f4', 'f4', 'f4', 'f4', 'i4', 'f4', 'f4', 'f4')
self.write_header = "%-7s%8s%8s%8s%10s%10s%8s%9s%6s%9s%8s%8s%8s%10s%11s%8s%8s" % self.header_list
self.write_formatter = "%7d%8.2f%8.2f%8.2f%10.2f%10.2f%8d%6d%9.1f%9.1f%8.2f%8.2f%8.2f%10d%11.4f%8.2f%8.2f"
# Par data types
self.par_dtypes = {'names': self.header_list,
'formats': self.data_formats}
def write_output_file(self, verbose=True):
# Save file
# Print particle number info
if verbose:
print('Writing %d particles in %s' % (self.par_data.shape[0], self.particle_par_file))
np.savetxt(self.particle_par_file, | |
)
} ) )
} )
}
} )
@IsolatedYcmd( { 'global_ycm_extra_conf':
PathToTestFile( 'extra_confs', 'brace_on_same_line.py' ) } )
def test_Subcommands_Format_ExtraConf_BraceOnSameLine( self, app ):
WaitUntilCompleterServerReady( app, 'javascript' )
filepath = PathToTestFile( 'extra_confs', 'func.js' )
RunTest( app, {
'description': 'Format with an extra conf, braces on new line',
'request': {
'command': 'Format',
'filepath': filepath,
'options': {
'tab_size': 4,
'insert_spaces': True
}
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'fixits': contains_exactly( has_entries( {
'chunks': contains_exactly(
ChunkMatcher( ' ',
LocationMatcher( filepath, 2, 1 ),
LocationMatcher( filepath, 2, 1 ) ),
)
} ) )
} )
}
} )
@IsolatedYcmd( { 'global_ycm_extra_conf':
PathToTestFile( 'extra_confs', 'brace_on_new_line.py' ) } )
def test_Subcommands_Format_ExtraConf_BraceOnNewLine( self, app ):
WaitUntilCompleterServerReady( app, 'javascript' )
filepath = PathToTestFile( 'extra_confs', 'func.js' )
RunTest( app, {
'description': 'Format with an extra conf, braces on new line',
'request': {
'command': 'Format',
'filepath': filepath,
'options': {
'tab_size': 4,
'insert_spaces': True
}
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'fixits': contains_exactly( has_entries( {
'chunks': contains_exactly(
ChunkMatcher( matches_regexp( '\r?\n' ),
LocationMatcher( filepath, 1, 19 ),
LocationMatcher( filepath, 1, 20 ) ),
ChunkMatcher( ' ',
LocationMatcher( filepath, 2, 1 ),
LocationMatcher( filepath, 2, 1 ) ),
)
} ) )
} )
}
} )
@SharedYcmd
def test_Subcommands_GetType( self, app ):
RunTest( app, {
'description': 'GetType works',
'request': {
'command': 'GetType',
'line_num': 14,
'column_num': 1,
'filepath': PathToTestFile( 'test.js' ),
},
'expect': {
'response': requests.codes.ok,
'data': MessageMatcher( 'var foo: Foo' )
}
} )
@SharedYcmd
def test_Subcommands_GetDoc_Method( self, app ):
RunTest( app, {
'description': 'GetDoc on a method returns its docstring',
'request': {
'command': 'GetDoc',
'line_num': 31,
'column_num': 5,
'filepath': PathToTestFile( 'test.js' ),
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'detailed_info': '(method) Bar.testMethod(): void\n\n'
'Method documentation'
} )
}
} )
@SharedYcmd
def test_Subcommands_GetDoc_Class( self, app ):
RunTest( app, {
'description': 'GetDoc on a class returns its docstring',
'request': {
'command': 'GetDoc',
'line_num': 34,
'column_num': 3,
'filepath': PathToTestFile( 'test.js' ),
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'detailed_info': 'class Bar\n\n'
'Class documentation\n\n'
'Multi-line'
} )
}
} )
@SharedYcmd
def test_Subcommands_GoToReferences( self, app ):
RunTest( app, {
'description': 'GoToReferences works',
'request': {
'command': 'GoToReferences',
'line_num': 30,
'column_num': 5,
'filepath': PathToTestFile( 'test.js' ),
},
'expect': {
'response': requests.codes.ok,
'data': contains_inanyorder(
has_entries( { 'description': 'var bar = new Bar();',
'line_num' : 30,
'column_num' : 5,
'filepath' : PathToTestFile( 'test.js' ) } ),
has_entries( { 'description': 'bar.testMethod();',
'line_num' : 31,
'column_num' : 1,
'filepath' : PathToTestFile( 'test.js' ) } ),
has_entries( { 'description': 'bar.nonExistingMethod();',
'line_num' : 32,
'column_num' : 1,
'filepath' : PathToTestFile( 'test.js' ) } ),
has_entries( { 'description': 'var bar = new Bar();',
'line_num' : 1,
'column_num' : 5,
'filepath' : PathToTestFile( 'file3.js' ) } ),
has_entries( { 'description': 'bar.testMethod();',
'line_num' : 2,
'column_num' : 1,
'filepath' : PathToTestFile( 'file3.js' ) } )
)
}
} )
@SharedYcmd
def test_Subcommands_GoToSymbol( self, app ):
for req, rep in [
( ( 'file3.js', 1, 1, 'testMethod' ),
( 'test.js', 27, 3, 'testMethod' ) ),
( ( 'file3.js', 1, 1, 'BAR' ),
[ ( 'file3.js', 1, 5, 'bar' ),
( 'test.js', 30, 5, 'bar' ),
( 'test.js', 22, 1, 'Bar' ) ] ),
( ( 'file3.js', 1, 1, 'nothinghere' ), 'Symbol not found' )
]:
with self.subTest( req = req, rep = rep ):
if isinstance( rep, tuple ):
expect = {
'response': requests.codes.ok,
'data': LocationMatcher( PathToTestFile( rep[ 0 ] ), *rep[ 1: ] )
}
elif isinstance( rep, list ):
expect = {
'response': requests.codes.ok,
'data': contains_inanyorder( *[
LocationMatcher( PathToTestFile( r[ 0 ] ), *r[ 1: ] )
for r in rep
] )
}
else:
expect = {
'response': requests.codes.internal_server_error,
'data': ErrorMatcher( RuntimeError, rep )
}
RunTest( app, {
'request': {
'command': 'GoToSymbol',
'arguments': [ req[ 3 ] ],
'line_num': req[ 1 ],
'column_num': req[ 2 ],
'filepath': PathToTestFile( req[ 0 ] ),
},
'expect': expect
} )
@SharedYcmd
def test_Subcommands_GoTo( self, app ):
for command in [ 'GoTo', 'GoToDefinition', 'GoToDeclaration' ]:
with self.subTest( command = command ):
Subcommands_GoTo( app, command )
@SharedYcmd
def test_Subcommands_GoToType( self, app ):
RunTest( app, {
'description': 'GoToType works',
'request': {
'command': 'GoToType',
'line_num': 11,
'column_num': 6,
'filepath': PathToTestFile( 'test.js' ),
},
'expect': {
'response': requests.codes.ok,
'data': LocationMatcher( PathToTestFile( 'test.js' ), 1, 7 )
}
} )
@SharedYcmd
def test_Subcommands_GoToCallers( self, app ):
RunTest( app, {
'description': 'Basic GoToCallers works.',
'request': {
'command': 'GoToCallers',
'line_num': 27,
'column_num': 3,
'filepath': PathToTestFile( 'test.js' ),
},
'expect': {
'response': requests.codes.ok,
'data': contains_inanyorder(
LocationMatcher( PathToTestFile( 'file2.js' ), 1, 11 ),
LocationMatcher( PathToTestFile( 'file3.js' ), 2, 5 ),
LocationMatcher( PathToTestFile( 'test.js' ), 31, 5 ),
)
}
} )
@SharedYcmd
def test_Subcommands_FixIt( self, app ):
filepath = PathToTestFile( 'test.js' )
RunTest( app, {
'description': 'FixIt works on a non-existing method',
'request': {
'command': 'FixIt',
'line_num': 32,
'column_num': 19,
'filepath': filepath,
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'fixits': contains_inanyorder(
has_entries( {
'text': "Declare method 'nonExistingMethod'",
'chunks': contains_exactly(
ChunkMatcher(
matches_regexp(
'^\r?\n'
' nonExistingMethod\\(\\) {\r?\n'
' throw new Error\\("Method not implemented."\\);\r?\n'
' }$',
),
LocationMatcher( filepath, 22, 12 ),
LocationMatcher( filepath, 22, 12 ) )
),
'location': LocationMatcher( filepath, 32, 19 )
} )
)
} )
}
} )
@SharedYcmd
def test_Subcommands_OrganizeImports( self, app ):
filepath = PathToTestFile( 'imports.js' )
RunTest( app, {
'description': 'OrganizeImports removes unused imports, '
'coalesces imports from the same module, and sorts them',
'request': {
'command': 'OrganizeImports',
'filepath': filepath,
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'fixits': contains_exactly( has_entries( {
'chunks': contains_exactly(
ChunkMatcher(
matches_regexp(
'import \\* as lib from "library";\r?\n'
'import func, { func1, func2 } from "library";\r?\n' ),
LocationMatcher( filepath, 1, 1 ),
LocationMatcher( filepath, 2, 1 ) ),
ChunkMatcher(
'',
LocationMatcher( filepath, 5, 1 ),
LocationMatcher( filepath, 6, 1 ) ),
ChunkMatcher(
'',
LocationMatcher( filepath, 9, 1 ),
LocationMatcher( filepath, 10, 1 ) ),
)
} ) )
} )
}
} )
@SharedYcmd
def test_Subcommands_RefactorRename_Missing( self, app ):
RunTest( app, {
'description': 'RefactorRename requires a parameter',
'request': {
'command': 'RefactorRename',
'line_num': 27,
'column_num': 8,
'filepath': PathToTestFile( 'test.js' ),
},
'expect': {
'response': requests.codes.internal_server_error,
'data': ErrorMatcher( ValueError,
'Please specify a new name to rename it to.\n'
'Usage: RefactorRename <new name>' )
}
} )
@SharedYcmd
def test_Subcommands_RefactorRename_NotPossible( self, app ):
RunTest( app, {
'description': 'RefactorRename cannot rename a non-existing method',
'request': {
'command': 'RefactorRename',
'arguments': [ 'whatever' ],
'line_num': 35,
'column_num': 5,
'filepath': PathToTestFile( 'test.js' ),
},
'expect': {
'response': requests.codes.internal_server_error,
'data': ErrorMatcher( RuntimeError,
'Value cannot be renamed: '
'You cannot rename this element.' )
}
} )
@SharedYcmd
def test_Subcommands_RefactorRename_Simple( self, app ):
RunTest( app, {
'description': 'RefactorRename works on a class name',
'request': {
'command': 'RefactorRename',
'arguments': [ 'test' ],
'line_num': 1,
'column_num': 7,
'filepath': PathToTestFile( 'test.js' ),
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'fixits': contains_exactly( has_entries( {
'chunks': contains_inanyorder(
ChunkMatcher(
'test',
LocationMatcher( PathToTestFile( 'test.js' ), 11, 15 ),
LocationMatcher( PathToTestFile( 'test.js' ), 11, 18 ) ),
ChunkMatcher(
'test',
LocationMatcher( PathToTestFile( 'test.js' ), 1, 7 ),
LocationMatcher( PathToTestFile( 'test.js' ), 1, 10 ) ),
),
'location': LocationMatcher( PathToTestFile( 'test.js' ), 1, 7 )
} ) )
} )
}
} )
@SharedYcmd
def test_Subcommands_RefactorRename_MultipleFiles( self, app ):
RunTest( app, {
'description': 'RefactorRename works across files',
'request': {
'command': 'RefactorRename',
'arguments': [ 'this-is-a-longer-string' ],
'line_num': 22,
'column_num': 8,
'filepath': PathToTestFile( 'test.js' ),
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'fixits': contains_exactly( has_entries( {
'chunks': contains_inanyorder(
ChunkMatcher(
'this-is-a-longer-string',
LocationMatcher( PathToTestFile( 'test.js' ), 22, 7 ),
LocationMatcher( PathToTestFile( 'test.js' ), 22, 10 ) ),
ChunkMatcher(
'this-is-a-longer-string',
LocationMatcher( PathToTestFile( 'test.js' ), 30, 15 ),
LocationMatcher( PathToTestFile( 'test.js' ), 30, 18 ) ),
ChunkMatcher(
'this-is-a-longer-string',
LocationMatcher( PathToTestFile( 'test.js' ), 34, 1 ),
LocationMatcher( PathToTestFile( 'test.js' ), 34, 4 ) ),
ChunkMatcher(
'this-is-a-longer-string',
LocationMatcher( PathToTestFile( 'file2.js' ), 1, 5 ),
LocationMatcher( PathToTestFile( 'file2.js' ), 1, 8 ) ),
ChunkMatcher(
'this-is-a-longer-string',
LocationMatcher( PathToTestFile( 'file3.js' ), 1, 15 ),
LocationMatcher( PathToTestFile( 'file3.js' ), | |
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of <NAME>, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import with_statement, print_function
import os
import re
from rospkg import MANIFEST_FILE
from rospkg.common import ResourceNotFound
from qt_dotgraph.colors import get_color_for_string
def matches_any(name, patternlist):
for pattern in patternlist:
if name == pattern:
return True
if re.match("^[a-zA-Z0-9_]+$", pattern) is None:
if re.match(pattern, name) is not None:
return True
return False
class RosPackageGraphDotcodeGenerator:
def __init__(self, rospack, rosstack):
"""
:param rospack: use rospkg.RosPack()
:param rosstack: use rospkg.RosStack()
"""
self.rospack = rospack
self.rosstack = rosstack
self.stacks = {}
self.packages = {}
self.package_types = {}
self.edges = {}
self.traversed_ancestors = {}
self.traversed_descendants = {}
self.last_drawargs = None
self.last_selection = None
def generate_dotcode(self,
dotcode_factory,
selected_names=[],
excludes=[],
depth=3,
with_stacks=True,
descendants=True,
ancestors=True,
hide_transitives=True,
show_system=False,
mark_selected=True,
colortheme=None,
rank='same', # None, same, min, max, source, sink
ranksep=0.2, # vertical distance between layers
rankdir='TB', # direction of layout (TB top > bottom, LR left > right)
simplify=True, # remove double edges
force_refresh=False,
hide_wet=False,
hide_dry=False):
"""
:param hide_transitives: if true, then dependency of children to grandchildren will be hidden if parent has same dependency
:param show_system: if true, then system dependencies will be shown
"""
# defaults
selected_names = filter(lambda x: x is not None and x != '', selected_names)
excludes = filter(lambda x: x is not None and x != '', excludes)
if selected_names is None or selected_names == []:
selected_names = ['.*']
self.depth = 1
if depth is None:
depth = -1
# update arguments
selection_args = {
"dotcode_factory": dotcode_factory,
"with_stacks": with_stacks,
"depth": depth,
"hide_transitives": hide_transitives,
"show_system": show_system,
"selected_names": selected_names,
"excludes": excludes,
"ancestors": ancestors,
"descendants": descendants,
"hide_wet": hide_wet,
"hide_dry": hide_dry
}
# if selection did not change, we need not build up the graph again
selection_changed = False
if self.last_selection != selection_args:
selection_changed = True
self.last_selection = selection_args
self.dotcode_factory = dotcode_factory
self.with_stacks = with_stacks
self.depth = depth
self.hide_transitives = hide_transitives
self.show_system = show_system
self.selected_names = selected_names
self.excludes = excludes
self.ancestors = ancestors
self.descendants = descendants
self.hide_wet = hide_wet
self.hide_dry = hide_dry
if force_refresh or selection_changed:
self.stacks = {}
self.packages = {}
self.package_types = {}
self.edges = {}
self.traversed_ancestors = {}
self.traversed_descendants = {}
# update internal graph structure
for name in self.rospack.list():
if matches_any(name, self.selected_names):
if descendants:
self.add_package_descendants_recursively(name)
if ancestors:
self.add_package_ancestors_recursively(name)
for stackname in self.rosstack.list():
if matches_any(stackname, self.selected_names):
manifest = self.rosstack.get_manifest(stackname)
if manifest.is_catkin:
if descendants:
self.add_package_descendants_recursively(stackname)
if ancestors:
self.add_package_ancestors_recursively(stackname)
else:
for package_name in self.rosstack.packages_of(stackname):
if descendants:
self.add_package_descendants_recursively(package_name)
if ancestors:
self.add_package_ancestors_recursively(package_name)
drawing_args = {
'dotcode_factory': dotcode_factory,
"rank": rank,
"rankdir": rankdir,
"ranksep": ranksep,
"simplify": simplify,
"colortheme": colortheme,
"mark_selected": mark_selected
}
# if selection and display args did not change, no need to generate dotcode
display_changed = False
if self.last_drawargs != drawing_args:
display_changed = True
self.last_drawargs = drawing_args
self.dotcode_factory = dotcode_factory
self.rank = rank
self.rankdir = rankdir
self.ranksep = ranksep
self.simplify = simplify
self.colortheme = colortheme
self.dotcode_factory = dotcode_factory
self.mark_selected = mark_selected
#generate new dotcode
if force_refresh or selection_changed or display_changed:
self.graph = self.generate(self.dotcode_factory)
self.dotcode = dotcode_factory.create_dot(self.graph)
return self.dotcode
def generate(self, dotcode_factory):
graph = dotcode_factory.get_graph(rank=self.rank,
rankdir=self.rankdir,
ranksep=self.ranksep,
simplify=self.simplify)
# print("In generate", self.with_stacks, len(self.stacks), len(self.packages), len(self.edges))
packages_in_stacks = []
if self.with_stacks and not self.hide_dry:
for stackname in self.stacks:
color = None
if self.mark_selected and not '.*' in self.selected_names and matches_any(stackname, self.selected_names):
color = 'tomato'
else:
color = 'gray'
if self.colortheme is not None:
color = get_color_for_string(stackname)
g = dotcode_factory.add_subgraph_to_graph(graph,
stackname,
color=color,
rank=self.rank,
rankdir=self.rankdir,
ranksep=self.ranksep,
simplify=self.simplify)
for package_name in self.stacks[stackname]['packages']:
packages_in_stacks.append(package_name)
self._generate_package(dotcode_factory, g, package_name)
for package_name, attributes in self.packages.items():
if package_name not in packages_in_stacks:
self._generate_package(dotcode_factory, graph, package_name, attributes)
for name1, name2 in self.edges.keys():
dotcode_factory.add_edge_to_graph(graph, name1, name2)
return graph
def _generate_package(self, dotcode_factory, graph, package_name, attributes=None):
if self._hide_package(package_name):
return
color = None
if self.mark_selected and not '.*' in self.selected_names and matches_any(package_name, self.selected_names):
if attributes and attributes['is_catkin']:
color = 'red'
else:
color = 'tomato'
elif attributes and not attributes['is_catkin']:
color = 'gray'
if attributes and 'not_found' in attributes and attributes['not_found']:
color = 'orange'
package_name += ' ?'
dotcode_factory.add_node_to_graph(graph, package_name, color=color)
def _add_stack(self, stackname):
if stackname is None or stackname in self.stacks:
return
self.stacks[stackname] = {'packages': []}
def _add_package(self, package_name, parent=None):
"""
adds object based on package_name to self.packages
:param parent: packagename which referenced package_name (for debugging only)
"""
if self._hide_package(package_name):
return
if package_name in self.packages:
return False
catkin_package = self._is_package_wet(package_name)
if catkin_package is None:
return False
self.packages[package_name] = {'is_catkin': catkin_package}
if self.with_stacks:
try:
stackname = self.rospack.stack_of(package_name)
except ResourceNotFound as e:
print('RosPackageGraphDotcodeGenerator._add_package(%s), parent %s: ResourceNotFound:' % (package_name, parent), e)
stackname = None
if not stackname is None and stackname != '':
if not stackname in self.stacks:
self._add_stack(stackname)
self.stacks[stackname]['packages'].append(package_name)
return True
def _hide_package(self, package_name):
if not self.hide_wet and not self.hide_dry:
return False
catkin_package = self._is_package_wet(package_name)
if self.hide_wet and catkin_package:
return True
if self.hide_dry and catkin_package is False:
return True
# if type of package is unknown don't hide it
return False
def _is_package_wet(self, package_name):
if package_name not in self.package_types:
try:
package_path = self.rospack.get_path(package_name)
manifest_file = os.path.join(package_path, MANIFEST_FILE)
self.package_types[package_name] = not os.path.exists(manifest_file)
except ResourceNotFound:
return None
return self.package_types[package_name]
def _add_edge(self, name1, name2, attributes=None):
if self._hide_package(name1) or self._hide_package(name2):
return
self.edges[(name1, name2)] = attributes
def add_package_ancestors_recursively(self, package_name, expanded_up=None, depth=None, implicit=False, parent=None):
"""
:param package_name: the name of package for which to add ancestors
:param expanded_up: names that have already been expanded (to avoid cycles)
:param depth: how many layers to follow
:param implicit: arg to rospack
:param parent: package that referenced package_name for error message only
"""
if package_name in self.traversed_ancestors:
traversed_depth = self.traversed_ancestors[package_name]
if traversed_depth is None:
return
if depth is not None and traversed_depth >= depth:
return
self.traversed_ancestors[package_name] = depth
if matches_any(package_name, self.excludes):
return False
if (depth == 0):
return False
if (depth == None):
depth = self.depth
self._add_package(package_name, parent=parent)
if expanded_up is None:
expanded_up = []
expanded_up.append(package_name)
if (depth != 1):
try:
depends_on = self.rospack.get_depends_on(package_name, implicit=implicit)
except ResourceNotFound as e:
print('RosPackageGraphDotcodeGenerator.add_package_ancestors_recursively(%s), parent %s: ResourceNotFound:' % (package_name, parent), e)
depends_on = []
new_nodes = []
for dep_on_name in [x for x in depends_on if not matches_any(x, self.excludes)]:
if not self.hide_transitives or not dep_on_name in expanded_up:
new_nodes.append(dep_on_name)
self._add_edge(dep_on_name, package_name)
self._add_package(dep_on_name, parent=package_name)
expanded_up.append(dep_on_name)
for dep_on_name in new_nodes:
self.add_package_ancestors_recursively(package_name=dep_on_name,
expanded_up=expanded_up,
depth=depth - 1,
implicit=implicit,
parent=package_name)
def add_package_descendants_recursively(self, package_name, expanded=None, depth=None, implicit=False, parent=None):
if package_name in self.traversed_descendants:
traversed_depth = self.traversed_descendants[package_name]
if traversed_depth is None:
return
if depth is not None and traversed_depth >= depth:
return
self.traversed_descendants[package_name] = depth
if matches_any(package_name, self.excludes):
return
if (depth == 0):
return
if (depth == None):
depth = self.depth
self._add_package(package_name, parent=parent)
if expanded is None:
expanded = []
expanded.append(package_name)
if (depth != 1):
try:
try:
depends = self.rospack.get_depends(package_name, implicit=implicit)
except ResourceNotFound:
# try falling back to rosstack to find wet metapackages
manifest = self.rosstack.get_manifest(package_name)
if manifest.is_catkin:
depends = [d.name | |
<filename>IRIS_data_download/IRIS_download_support/obspy/signal/tests/test_quality_control.py
# -*- coding: utf-8 -*-
"""
The Quality Control test suite.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
import os
import unittest
import numpy as np
import obspy
from obspy.core.util.base import NamedTemporaryFile, get_dependency_version
# A bit wild to import a utility function from another test suite ...
from obspy.io.mseed.tests.test_mseed_util import _create_mseed_file
from obspy.signal.quality_control import MSEEDMetadata
try:
import jsonschema # NOQA
# 1.0.0 is the first version with full $ref support.
if get_dependency_version("jsonschema") < [1, 0, 0]:
HAS_JSONSCHEMA = False
else:
HAS_JSONSCHEMA = True
except ImportError:
HAS_JSONSCHEMA = False
class QualityControlTestCase(unittest.TestCase):
"""
Test cases for Quality Control.
"""
def setUp(self):
# Directory where the test files are located
self.path = os.path.join(os.path.dirname(__file__), "data")
def test_no_files_given(self):
"""
Tests the raised exception if no file is given.
"""
with self.assertRaises(ValueError) as e:
MSEEDMetadata(files=[])
self.assertEqual(e.exception.args[0],
"No data within the temporal constraints.")
def test_gaps_and_overlaps(self):
"""
Test gaps and overlaps.
"""
# Create a file. No gap between 1 and 2, 10 second gap between 2 and
# 3, 5 second overlap between 3 and 4, and another 10 second gap
# between 4 and 5.
tr_1 = obspy.Trace(data=np.arange(10, dtype=np.int32),
header={"starttime": obspy.UTCDateTime(0)})
tr_2 = obspy.Trace(data=np.arange(10, dtype=np.int32),
header={"starttime": obspy.UTCDateTime(10)})
tr_3 = obspy.Trace(data=np.arange(10, dtype=np.int32),
header={"starttime": obspy.UTCDateTime(30)})
tr_4 = obspy.Trace(data=np.arange(10, dtype=np.int32),
header={"starttime": obspy.UTCDateTime(35)})
tr_5 = obspy.Trace(data=np.arange(10, dtype=np.int32),
header={"starttime": obspy.UTCDateTime(55)})
st = obspy.Stream(traces=[tr_1, tr_2, tr_3, tr_4, tr_5])
with NamedTemporaryFile() as tf:
st.write(tf.name, format="mseed")
mseed_metadata = MSEEDMetadata(files=[tf.name])
self.assertEqual(mseed_metadata.meta['num_gaps'], 2)
self.assertEqual(mseed_metadata.meta['num_overlaps'], 1)
self.assertEqual(mseed_metadata.meta['sum_overlaps'], 5.0)
self.assertEqual(mseed_metadata.meta['sum_gaps'], 20.0)
self.assertEqual(mseed_metadata.meta['percent_availability'],
45.0 / 65.0 * 100.0)
# Same again but this time with start-and end time settings.
mseed_metadata = MSEEDMetadata(
files=[tf.name], starttime=obspy.UTCDateTime(5),
endtime=obspy.UTCDateTime(60))
self.assertEqual(mseed_metadata.meta['num_gaps'], 2)
self.assertEqual(mseed_metadata.meta['num_overlaps'], 1)
self.assertEqual(mseed_metadata.meta['sum_overlaps'], 5.0)
self.assertEqual(mseed_metadata.meta['sum_gaps'], 20.0)
self.assertEqual(mseed_metadata.meta['percent_availability'],
35.0 / 55.0 * 100.0)
# Head and tail gaps.
mseed_metadata = MSEEDMetadata(
files=[tf.name], starttime=obspy.UTCDateTime(-10),
endtime=obspy.UTCDateTime(80))
self.assertEqual(mseed_metadata.meta['num_gaps'], 4)
self.assertEqual(mseed_metadata.meta['num_overlaps'], 1)
self.assertEqual(mseed_metadata.meta['sum_overlaps'], 5.0)
self.assertEqual(mseed_metadata.meta['sum_gaps'], 45.0)
self.assertEqual(mseed_metadata.meta['percent_availability'],
45.0 / 90.0 * 100.0)
# Tail gap must be larger than 1 delta, otherwise it does not
# count.
mseed_metadata = MSEEDMetadata(files=[tf.name],
endtime=obspy.UTCDateTime(64))
self.assertEqual(mseed_metadata.meta['num_gaps'], 2)
self.assertEqual(mseed_metadata.meta['sum_gaps'], 20.0)
self.assertEqual(mseed_metadata.meta['percent_availability'],
44.0 / 64.0 * 100.0)
mseed_metadata = MSEEDMetadata(files=[tf.name],
endtime=obspy.UTCDateTime(65))
self.assertEqual(mseed_metadata.meta['num_gaps'], 2)
self.assertEqual(mseed_metadata.meta['sum_gaps'], 20.0)
self.assertEqual(mseed_metadata.meta['percent_availability'],
45.0 / 65.0 * 100.0)
mseed_metadata = MSEEDMetadata(files=[tf.name],
endtime=obspy.UTCDateTime(66))
self.assertEqual(mseed_metadata.meta['num_gaps'], 3)
self.assertEqual(mseed_metadata.meta['sum_gaps'], 21.0)
self.assertEqual(mseed_metadata.meta['percent_availability'],
45.0 / 66.0 * 100.0)
def test_raise_unmatching_ids(self):
"""
Test error raised for multiple stream identifiers
"""
with NamedTemporaryFile() as tf1, NamedTemporaryFile() as tf2:
obspy.Trace(data=np.arange(10, dtype=np.int32),
header={"starttime": obspy.UTCDateTime(0),
"network": "NL", "station": "HGN",
"location": "02", "channel": "BHZ"}).write(
tf1.name, format="mseed", encoding="STEIM1", reclen=256)
obspy.Trace(data=np.arange(10, dtype=np.float32),
header={"starttime": obspy.UTCDateTime(100),
"sampling_rate": 2.0, "network": "BW",
"station": "ALTM", "location": "00",
"channel": "EHE"}).write(
tf2.name, format="mseed", encoding="FLOAT32", reclen=1024)
with self.assertRaises(ValueError) as e:
MSEEDMetadata([tf1.name, tf2.name])
self.assertEqual(e.exception.args[0],
"All traces must have the same SEED id and quality.")
def test_gaps_between_multiple_files(self):
"""
Test gap counting between multiple files. Simple test but there is
no effective difference between having multiple files and a single
one with many Traces as internally it is all parsed to a single
Stream object.
"""
with NamedTemporaryFile() as tf1, NamedTemporaryFile() as tf2:
# Two files, same ids but a gap in-between.
obspy.Trace(data=np.arange(10, dtype=np.int32),
header={"starttime": obspy.UTCDateTime(0)}).write(
tf1.name, format="mseed")
obspy.Trace(data=np.arange(10, dtype=np.int32),
header={"starttime": obspy.UTCDateTime(100)}).write(
tf2.name, format="mseed")
# Don't calculate statistics on the single segments.
mseed_metadata = MSEEDMetadata([tf1.name, tf2.name],
add_c_segments=False)
self.assertEqual(mseed_metadata.meta['num_gaps'], 1)
self.assertNotIn("c_segments", mseed_metadata.meta)
def test_file_with_no_timing_quality(self):
"""
Tests timing quality extraction in files with no timing quality.
"""
with NamedTemporaryFile() as tf1:
obspy.Trace(data=np.arange(10, dtype=np.int32),
header={"starttime": obspy.UTCDateTime(0)}).write(
tf1.name, format="mseed")
mseed_metadata = MSEEDMetadata([tf1.name], add_flags=True)
ref = mseed_metadata.meta['miniseed_header_percentages']
self.assertEqual(ref['timing_quality_max'],
None)
self.assertEqual(ref['timing_quality_min'],
None)
self.assertEqual(ref['timing_quality_mean'],
None)
def test_extraction_of_basic_mseed_headers(self):
"""
Tests extraction of basic features.
"""
# Mixed files.
with NamedTemporaryFile() as tf1, NamedTemporaryFile() as tf2:
obspy.Trace(data=np.arange(10, dtype=np.int32),
header={"starttime": obspy.UTCDateTime(0),
"network": "BW", "station": "ALTM",
"location": "00", "channel": "EHE"}).write(
tf1.name, format="mseed", encoding="STEIM1", reclen=256)
obspy.Trace(data=np.arange(10, dtype=np.float32),
header={"starttime": obspy.UTCDateTime(100),
"sampling_rate": 2.0, "network": "BW",
"station": "ALTM", "location": "00",
"channel": "EHE"}).write(
tf2.name, format="mseed", encoding="FLOAT32", reclen=1024)
md = MSEEDMetadata([tf1.name, tf2.name], add_flags=True)
self.assertEqual(md.meta["network"], "BW")
self.assertEqual(md.meta["station"], "ALTM")
self.assertEqual(md.meta["location"], "00")
self.assertEqual(md.meta["channel"], "EHE")
self.assertEqual(md.meta["quality"], "D")
self.assertEqual(md.meta["start_time"], obspy.UTCDateTime(0))
self.assertEqual(md.meta["end_time"],
obspy.UTCDateTime(105))
self.assertEqual(md.meta["num_records"], 2)
self.assertEqual(md.meta["num_samples"], 20)
self.assertEqual(md.meta["sample_rate"], [1.0, 2.0])
self.assertEqual(md.meta["record_length"], [256, 1024])
self.assertEqual(md.meta["encoding"], ["FLOAT32", "STEIM1"])
def test_extraction_header_flags_complex(self):
"""
Tests the flag extraction in a complex record situation
Three records, with records 2 & 3 two overlapping 50% and
a 50% record length gap between record 1 and 2.
Rules for overlaps with different bits are as follows:
Records are sorted from end to start by endtime and processed
in this order. Each consecutive record occupies a time-range
that can no longer be used by another record. In the following
example, the third record is dominant over the second record
because it is processed first and occupies the time range.
Therefore the bit in this entire range is set to 1, despite
partially overlapping with a record with its bit set to 0
Bits in the test are set as shown
[ ==1== ]
[ ==1== ]...[ ==0== ]
| |
START END
25 125
[RECORD 1 (1)] = 0 - 50 [clock_locked: 1]
[RECORD 2 (0)] = 75 - 125 [clock_locked: 0]
[RECORD 3 (1)] = 100 - 150 [clock_locked: 1]
With starttime = 25 and endtime = 125
The clock_locked percentage should thus be exactly 50.0%
"""
# Couldn't properly break this line following PEP8 so use
# shorter notation .....
short = NamedTemporaryFile
with short() as tf1, short() as tf2, short() as tf3:
_create_mseed_file(tf1.name, record_count=1,
starttime=obspy.UTCDateTime(0),
seed=12345, flags={
'io_and_clock_flags': {
"clock_locked": 1}})
_create_mseed_file(tf2.name, record_count=1,
starttime=obspy.UTCDateTime(75),
seed=12345, flags={
'io_and_clock_flags': {
"clock_locked": 0}})
_create_mseed_file(tf3.name, record_count=1,
starttime=obspy.UTCDateTime(100),
seed=12345, flags={
'io_and_clock_flags': {
"clock_locked": 1}})
md = MSEEDMetadata([tf1.name, tf2.name, tf3.name],
starttime=obspy.UTCDateTime(25),
endtime=obspy.UTCDateTime(125), add_flags=True)
io_f = md.meta["miniseed_header_percentages"]["io_and_clock_flags"]
self.assertEqual(io_f["clock_locked"], 50.0)
def test_extraction_fixed_header_flags(self):
# Had to put positive_leap count to 0 to prevent
# end time from being wrong
with NamedTemporaryFile() as tf1, NamedTemporaryFile() as tf2:
_create_mseed_file(tf1.name, record_count=35,
starttime=obspy.UTCDateTime(0),
seed=12345, flags={
'data_quality_flags': {
"amplifier_saturation": 25,
"digitizer_clipping": 12,
"spikes": 30,
"glitches": 6,
"missing_padded_data": 15,
"telemetry_sync_error": 16,
"digital_filter_charging": 4,
"suspect_time_tag": 8},
'activity_flags': {
"calibration_signal": 10,
"time_correction_applied": 20,
"event_begin": 33,
"event_end": 33,
"positive_leap": 0,
"negative_leap": 10,
"event_in_progress": 15},
'io_and_clock_flags': {
"station_volume": 8,
"long_record_read": 33,
"short_record_read": 24,
"start_time_series": 31,
"end_time_series": 24,
"clock_locked": 32}})
# Previous file ends exactly on 1750, start new file
# to prevent overlapping records. When records overlap
# their contributions should NOT be summed
_create_mseed_file(tf2.name, record_count=23,
starttime=obspy.UTCDateTime(1750),
seed=12345, flags={
'data_quality_flags': {
"amplifier_saturation": 5,
"digitizer_clipping": 7,
"spikes": 5,
"glitches": 3,
"missing_padded_data": 5,
"telemetry_sync_error": 3,
"digital_filter_charging": 4,
"suspect_time_tag": 2},
'activity_flags': {
"calibration_signal": 1,
"time_correction_applied": 0,
"event_begin": 3,
"event_end": 3,
"positive_leap": 0,
"negative_leap": 1,
"event_in_progress": 5},
'io_and_clock_flags': {
"station_volume": 1,
"long_record_read": 3,
"short_record_read": 2,
"start_time_series": 3,
"end_time_series": 4,
"clock_locked": 2}})
md = MSEEDMetadata([tf1.name, tf2.name], add_flags=True)
def _assert_float_equal(a, b):
"""
Supplementary function to test floats to precision of 1E-6
"""
self.assertTrue(abs(a - b) < 1E-6)
# Sum up contributions from both files.
# Check percentages
meta = md.meta['miniseed_header_counts']
meta_dq = meta['data_quality_flags']
self.assertEqual(meta_dq['glitches'], 9)
self.assertEqual(meta_dq['amplifier_saturation'], 30)
self.assertEqual(meta_dq['digital_filter_charging'], 8)
self.assertEqual(meta_dq['digitizer_clipping'], 19)
self.assertEqual(meta_dq['missing_padded_data'], 20)
self.assertEqual(meta_dq['spikes'], 35)
self.assertEqual(meta_dq['suspect_time_tag'], 10)
self.assertEqual(meta_dq['telemetry_sync_error'], 19)
meta_af = meta['activity_flags']
self.assertEqual(meta_af['calibration_signal'], 11)
self.assertEqual(meta_af['event_begin'], 36)
self.assertEqual(meta_af['event_end'], 36)
self.assertEqual(meta_af['event_in_progress'], 20)
self.assertEqual(meta_af['time_correction_applied'], 20)
meta_io = meta['io_and_clock_flags']
self.assertEqual(meta_io['clock_locked'], 34)
self.assertEqual(meta_io['station_volume'], 9)
self.assertEqual(meta_io['long_record_read'], 36)
self.assertEqual(meta_io['short_record_read'], 26)
self.assertEqual(meta_io['start_time_series'], 34)
self.assertEqual(meta_io['end_time_series'], 28)
meta = md.meta['miniseed_header_percentages']
meta_dq = meta['data_quality_flags']
_assert_float_equal(meta_dq['glitches'], 9 / 0.58)
_assert_float_equal(meta_dq['amplifier_saturation'], 30 / 0.58)
_assert_float_equal(meta_dq['digital_filter_charging'], 8 / 0.58)
_assert_float_equal(meta_dq['digitizer_clipping'], 19 / 0.58)
_assert_float_equal(meta_dq['missing_padded_data'], 20 / 0.58)
_assert_float_equal(meta_dq['spikes'], 35 / 0.58)
_assert_float_equal(meta_dq['suspect_time_tag'], 10 / 0.58)
_assert_float_equal(meta_dq['telemetry_sync_error'], 19 / 0.58)
meta_af = meta['activity_flags']
_assert_float_equal(meta_af['calibration_signal'], 11 / 0.58)
_assert_float_equal(meta_af['event_begin'], 36 / 0.58)
_assert_float_equal(meta_af['event_end'], 36 / 0.58)
_assert_float_equal(meta_af['event_in_progress'], 20 / 0.58)
_assert_float_equal(meta_af['time_correction_applied'], 20 / 0.58)
meta_io = meta['io_and_clock_flags']
_assert_float_equal(meta_io['clock_locked'], 34 / 0.58)
_assert_float_equal(meta_io['station_volume'], 9 / 0.58)
_assert_float_equal(meta_io['long_record_read'], 36 / 0.58)
_assert_float_equal(meta_io['short_record_read'], 26 / 0.58)
_assert_float_equal(meta_io['start_time_series'], 34 / 0.58)
_assert_float_equal(meta_io['end_time_series'], 28 / 0.58)
ref = md.meta['miniseed_header_percentages']
self.assertEqual(ref['timing_quality_mean'], None)
self.assertEqual(ref['timing_quality_min'], None)
self.assertEqual(ref['timing_quality_max'], None)
def test_timing_quality(self):
"""
Test extraction of timing quality with a file that actually has it.
"""
# Test file is constructed and orignally from the obspy.io.mseed
# test suite.
md = MSEEDMetadata(files=[os.path.join(self.path,
"timingquality.mseed")],
add_flags=True)
ref = md.meta['miniseed_header_percentages']
self.assertEqual(ref['timing_quality_mean'], 50.0)
self.assertEqual(ref['timing_quality_min'], 0.0)
self.assertEqual(ref['timing_quality_max'], 100.0)
self.assertEqual(ref['timing_quality_median'], 50.0)
self.assertEqual(ref['timing_quality_lower_quartile'], 25.0)
self.assertEqual(ref['timing_quality_upper_quartile'], 75.0)
def test_overall_sample_metrics(self):
"""
Tests the global metrics on the samples.
"""
with NamedTemporaryFile() as tf:
obspy.Trace(data=np.arange(10, dtype=np.int32),
header={"starttime": obspy.UTCDateTime(0)}).write(
tf.name, format="mseed")
md = MSEEDMetadata(files=[tf.name])
self.assertEqual(md.meta["sample_min"], | |
# -*- coding: utf-8 -*-
"""
Contains the crawling logic.
"""
from __future__ import unicode_literals, absolute_import
import base64
from collections import defaultdict
import logging
import sys
import time
from pylinkvalidator.included.bs4 import BeautifulSoup, UnicodeDammit
import pylinkvalidator.compat as compat
from pylinkvalidator.compat import (
range, HTTPError, get_url_open, unicode,
get_content_type, get_url_request, get_charset)
from pylinkvalidator.models import (
Config, WorkerInit, Response, PageCrawl,
ExceptionStr, Link, SitePage, WorkerInput, TYPE_ATTRIBUTES, HTML_MIME_TYPE,
MODE_THREAD, MODE_PROCESS, MODE_GREEN, WHEN_ALWAYS, UTF8Class,
PageStatus, PageSource, PAGE_QUEUED, PAGE_CRAWLED, VERBOSE_QUIET,
VERBOSE_NORMAL, LazyLogParam, PREFIX_ALL)
from pylinkvalidator.reporter import report
from pylinkvalidator.urlutil import (
get_clean_url_split, get_absolute_url_split,
is_link, is_similar_url_split, is_supported_scheme)
WORK_DONE = '__WORK_DONE__'
def get_logger(propagate=False):
"""Returns a logger."""
root_logger = logging.getLogger()
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
if root_logger.level != logging.CRITICAL:
logger.addHandler(handler)
logger.propagate = propagate
else:
logger.addHandler(compat.NullHandler())
return logger
class SiteCrawler(object):
"""Main crawler/orchestrator"""
def __init__(self, config, logger):
self.config = config
self.start_url_splits = list(config.start_url_splits)
self.workers = []
self.input_queue = self.build_queue(config)
self.output_queue = self.build_queue(config)
self.logger = logger
self.site = Site(self.start_url_splits, config, self.logger)
def build_logger(self):
return self.logger
def crawl(self):
worker_init = WorkerInit(
self.config.worker_config, self.input_queue,
self.output_queue, self.build_logger())
self.workers = self.get_workers(self.config, worker_init)
queue_size = len(self.start_url_splits)
for start_url_split in self.start_url_splits:
self.input_queue.put(
WorkerInput(
start_url_split, True, 0, start_url_split.netloc,
self.config.content_check),
False)
self.start_workers(self.workers, self.input_queue, self.output_queue)
self.start_progress()
while True:
page_crawl = self.output_queue.get()
queue_size -= 1
new_worker_inputs = self.process_page_crawl(page_crawl)
# We only process new pages if we did not exceed configured depth
for worker_input in new_worker_inputs:
queue_size += 1
self.input_queue.put(worker_input, False)
self.progress(page_crawl, len(self.site.pages), queue_size)
if queue_size <= 0:
self.stop_workers(self.workers, self.input_queue,
self.output_queue)
self.stop_progress()
return self.site
def start_progress(self):
if self.config.options.progress:
print("Starting crawl...")
def stop_progress(self):
if self.config.options.progress:
print("Crawling Done...\n")
def progress(self, page_crawl, done_size, queue_size):
if not self.config.options.progress:
return
total = done_size + queue_size
percent = float(done_size) / float(total) * 100.0
url = ""
if page_crawl.final_url_split:
url = page_crawl.final_url_split.geturl()
elif page_crawl.original_url_split:
url = page_crawl.original_url_split.geturl()
status = page_crawl.status
if not status:
status = "error"
print("{0} - {1} ({2} of {3} - {4:.0f}%)".format(
status, url, done_size, total, percent))
def build_queue(self, config):
"""Returns an object implementing the Queue interface."""
raise NotImplementedError()
def get_workers(self, config, worker_init):
"""Returns a sequence of workers of the desired type."""
raise NotImplementedError()
def start_workers(self, workers, input_queue, output_queue):
"""Start the workers."""
raise NotImplementedError()
def stop_workers(self, workers, input_queue, output_queue):
"""Stops the workers."""
for worker in workers:
input_queue.put(WORK_DONE)
def process_page_crawl(self, page_crawl):
"""Returns a sequence of SplitResult to crawl."""
return self.site.add_crawled_page(page_crawl)
class ThreadSiteCrawler(SiteCrawler):
"""Site Crawler with thread workers."""
def build_queue(self, config):
return compat.Queue.Queue()
def get_workers(self, config, worker_init):
from threading import Thread
workers = []
for _ in range(config.worker_size):
workers.append(
Thread(target=crawl_page, kwargs={'worker_init': worker_init}))
return workers
def start_workers(self, workers, input_queue, output_queue):
for worker in workers:
worker.start()
class ProcessSiteCrawler(SiteCrawler):
"""Site Crawler with process workers."""
def __init__(self, *args, **kwargs):
import multiprocessing
self.manager = multiprocessing.Manager()
self.ProcessClass = multiprocessing.Process
super(ProcessSiteCrawler, self).__init__(*args, **kwargs)
def build_logger(self):
"""We do not want to share a logger."""
return None
def build_queue(self, config):
return self.manager.Queue()
def get_workers(self, config, worker_init):
workers = []
for _ in range(config.worker_size):
workers.append(self.ProcessClass(
target=crawl_page, kwargs={'worker_init': worker_init}))
return workers
def start_workers(self, workers, input_queue, output_queue):
for worker in workers:
worker.start()
class GreenSiteCrawler(SiteCrawler):
"""Site Crawler with green thread workers."""
def __init__(self, *args, **kwargs):
from gevent import monkey, queue, Greenlet
# TODO thread=false should be used to remove useless exception
# But weird behavior sometimes happen when it is not patched...
monkey.patch_all()
self.QueueClass = queue.Queue
self.GreenClass = Greenlet
super(GreenSiteCrawler, self).__init__(*args, **kwargs)
def build_queue(self, config):
return self.QueueClass()
def get_workers(self, config, worker_init):
workers = []
for _ in range(config.worker_size):
workers.append(self.GreenClass(
crawl_page, worker_init=worker_init))
return workers
def start_workers(self, workers, input_queue, output_queue):
for worker in workers:
worker.start()
class PageCrawler(object):
"""Worker that parses a page and extracts links"""
def __init__(self, worker_init):
self.worker_config = worker_init.worker_config
self.input_queue = worker_init.input_queue
self.output_queue = worker_init.output_queue
self.urlopen = get_url_open()
self.request_class = get_url_request()
self.logger = worker_init.logger
if not self.logger:
# Get a new one!
self.logger = get_logger()
# We do this here to allow patching by gevent
import socket
self.timeout_exception = socket.timeout
self.auth_header = None
if self.worker_config.username and self.worker_config.password:
base64string = unicode(
base64.encodestring(
'{0}:{1}'.format(
self.worker_config.username,
self.worker_config.password)
.encode("utf-8")), "utf-8")
self.auth_header = ("Authorization",
"Basic {0}".format(base64string))
def crawl_page_forever(self):
"""Starts page crawling loop for this worker."""
while True:
worker_input = self.input_queue.get()
if worker_input == WORK_DONE:
# No more work! Pfew!
return
else:
page_crawl = self._crawl_page(worker_input)
self.output_queue.put(page_crawl)
def _crawl_page(self, worker_input):
page_crawl = None
erroneous_content = []
missing_content = []
url_split_to_crawl = worker_input.url_split
try:
response = open_url(
self.urlopen, self.request_class,
url_split_to_crawl.geturl(), self.worker_config.timeout,
self.timeout_exception, self.auth_header,
extra_headers=self.worker_config.extra_headers,
logger=self.logger)
if response.exception:
if response.status:
# This is a http error. Good.
page_crawl = PageCrawl(
original_url_split=url_split_to_crawl,
final_url_split=None, status=response.status,
is_timeout=False, is_redirect=False, links=[],
exception=None, is_html=False,
depth=worker_input.depth,
response_time=response.response_time,
process_time=None,
site_origin=worker_input.site_origin)
elif response.is_timeout:
# This is a timeout. No need to wrap the exception
page_crawl = PageCrawl(
original_url_split=url_split_to_crawl,
final_url_split=None, status=None,
is_timeout=True, is_redirect=False, links=[],
exception=None, is_html=False,
depth=worker_input.depth,
response_time=response.response_time,
process_time=0,
site_origin=worker_input.site_origin)
else:
# Something bad happened when opening the url
exception = ExceptionStr(
unicode(type(response.exception)),
unicode(response.exception))
page_crawl = PageCrawl(
original_url_split=url_split_to_crawl,
final_url_split=None, status=None,
is_timeout=False, is_redirect=False, links=[],
exception=exception, is_html=False,
depth=worker_input.depth,
response_time=response.response_time,
process_time=0,
site_origin=worker_input.site_origin)
else:
final_url_split = get_clean_url_split(response.final_url)
message = response.content.info()
mime_type = get_content_type(message)
if self.worker_config.prefer_server_encoding:
charset = get_charset(message)
else:
charset = None
links = []
is_html = mime_type == HTML_MIME_TYPE
process_time = None
if is_html and worker_input.should_crawl:
start = time.time()
html_soup = BeautifulSoup(
response.content, self.worker_config.parser,
from_encoding=charset)
links = self.get_links(html_soup, final_url_split)
if self._has_content_to_check(worker_input):
(missing_content, erroneous_content) =\
self.check_content(
unicode(html_soup), html_soup,
url_split_to_crawl,
final_url_split, worker_input.content_check)
process_time = time.time() - start
else:
self.logger.debug(
"Won't crawl %s. MIME Type: %s. Should crawl: %s",
final_url_split, mime_type,
worker_input.should_crawl)
if self._has_content_to_check(worker_input):
text_content = self.get_text_content(
response.content.read(), charset)
(missing_content, erroneous_content) =\
self.check_content(
text_content, None, url_split_to_crawl,
final_url_split, worker_input.content_check)
page_crawl = PageCrawl(
original_url_split=url_split_to_crawl,
final_url_split=final_url_split, status=response.status,
is_timeout=False, is_redirect=response.is_redirect,
links=links, exception=None, is_html=is_html,
depth=worker_input.depth,
response_time=response.response_time,
process_time=process_time,
site_origin=worker_input.site_origin,
missing_content=missing_content,
erroneous_content=erroneous_content)
except Exception as exc:
exception = ExceptionStr(unicode(type(exc)), unicode(exc))
page_crawl = PageCrawl(
original_url_split=url_split_to_crawl,
final_url_split=None, status=None,
is_timeout=False, is_redirect=False, links=[],
exception=exception, is_html=False,
depth=worker_input.depth,
response_time=None,
process_time=None,
site_origin=worker_input.site_origin)
self.logger.exception("Exception occurred while crawling a page.")
return page_crawl
def _has_content_to_check(self, worker_input):
return worker_input.content_check and\
worker_input.content_check.has_something_to_check
def get_text_content(self, binary_blob, charset):
"""Retrieves unicode content from response binary blob.
"""
override_encodings = []
if charset:
override_encodings.append(charset)
return UnicodeDammit(binary_blob, override_encodings).unicode_markup
def check_content(
self, response_content, html_soup, original_url_split,
final_url_split, content_check):
"""Ensures that the specified content is present (or absent).
"""
missing_content = []
erroneous_content = []
if html_soup:
for content, found in self.check_html_content_single(
content_check.html_presence, html_soup, original_url_split,
final_url_split):
if not found:
missing_content.append(content)
if html_soup:
for content, found in self.check_html_content_single(
content_check.html_absence, html_soup, original_url_split,
final_url_split):
if found:
erroneous_content.append(content)
for content, found in self.check_text_content_single(
content_check.text_presence, response_content,
original_url_split, final_url_split):
if not found:
missing_content.append(content)
for content, found in self.check_text_content_single(
content_check.text_absence, response_content,
original_url_split, final_url_split):
if found:
erroneous_content.append(content)
return (missing_content, erroneous_content)
def check_html_content_single(
self, html_to_check, html_soup, original_url_split,
final_url_split):
"""Returns a list of tuple (content, presence) indicating whether an
html tag was present or not in the source.
"""
content = []
for key, html_check_list in html_to_check.items():
if key == PREFIX_ALL or\
is_similar_url_split(key, original_url_split) or\
is_similar_url_split(key, final_url_split):
# we check
for html_check in html_check_list:
kwargs = {}
if html_check.attrs:
kwargs["attrs"] = html_check.attrs
if html_check.content:
# XXX Use text because the included bs4 does not use
# the new string parameter and text is backward
# compatible.
kwargs["text"] = html_check.content
found = html_soup.find(
html_check.tag, **kwargs) is not None
content.append((str(html_check), found))
return content
def check_text_content_single(
self, text_content_to_check, full_text, original_url_split,
final_url_split):
"""Returns a list of tuple (content, presence) indicating whether an
html tag was present or not in the source.
"""
content = []
for key, text_check_list in text_content_to_check.items():
if key == PREFIX_ALL or\
is_similar_url_split(key, original_url_split) or\
is_similar_url_split(key, final_url_split):
# we check
for text_check in text_check_list:
try:
match = text_check.search(full_text)
content.append((text_check.pattern, match is not None))
except AttributeError:
found = text_check in full_text
content.append((text_check, found))
return content
def get_links(self, html_soup, original_url_split):
"""Gets links for desired types (e.g., a, link, img, script)
:param html_soup: The page parsed by BeautifulSoup
:param original_url_split: The URL of the page used to resolve relative
links.
:rtype: A sequence of Link objects
"""
# This is a weird html tag that defines the base URL of a page.
base_url_split = original_url_split
bases = html_soup.find_all('base')
if bases:
base = bases[0]
if 'href' in base.attrs:
base_url_split = get_clean_url_split(base['href'])
links = []
for element_type in self.worker_config.types:
if element_type not in TYPE_ATTRIBUTES:
raise Exception(
"Unknown element type: {0}".format(element_type))
attribute = TYPE_ATTRIBUTES[element_type]
element_links = html_soup.find_all(element_type)
links.extend(self._get_links(
element_links, attribute, base_url_split, original_url_split))
return links
| |
<reponame>monferrand/scipy_con_2019<filename>MachineLearning/dtaidistance/dtw_weighted.py
# -*- coding: UTF-8 -*-
"""
dtaidistance.dtw_weighted
~~~~~~~~~~~~~~~~~~~~~~~~~
Dynamic Time Warping (DTW) with custom internal distance function.
:author: <NAME>
:copyright: Copyright 2018 KU Leuven, DTAI Research Group.
:license: Apache License, Version 2.0, see LICENSE for details.
Weights are represented using a tuple (-x3, -x2, -x1, -x0, x0, x1, x2, x3).
The distance d, used in DTW, is multiplied with factor w(d):
.. code-block::
^
w(d)|
| /
3| +
| /
| /
1| +--+
| /
0+----+--------------->
0 x0 x1 x2 x3 d
The negative and positive values are used to make a distinction between negative
and postive distances. Thus to differentiate between the case that the function
compared with is higher or lower than the target function.
"""
import logging
import math
from collections import defaultdict, deque
import io
import numpy as np
from matplotlib import pyplot as plt
from .dtw import best_path
logger = logging.getLogger("be.kuleuven.dtai.distance")
try:
from tqdm import tqdm
except ImportError:
logger.info('tqdm library not available')
tqdm = None
def warping_paths(s1, s2, weights=None, window=None, **_kwargs):
"""
Input: s1 and s2 are time series of length n/l1 and m/l2
:param s1:
:param s2:
:param weights: Weights on s1
:return: DTW similarity m between s1 and s2, warping paths matrix
"""
# TODO: copy original function in DTW to support all options and integrate weights
# print('warping_paths')
l1 = len(s1)
l2 = len(s2)
# print('l1', l1, 'l2', l2)
if window is None:
window = max(l1, l2)
else:
window += 1 # TODO: 0 should be diagonal, this is now 1
# print('window', window)
paths = np.full((l1 + 1, l2 + 1), np.inf)
paths[0, 0] = 0
for i in range(l1):
# print('i', i)
# for j in range(max(0, i - max(0, r - c) - window + 1), min(c, i + max(0, c - r) + window)):
j_start = max(0, i - max(0, l1 - l2) - window + 1)
j_end = min(l2, i + max(0, l2 - l1) + window)
# print(j_start, j_end)
for j in range(j_start, j_end):
# print('j', j)
# for j in range(1, l2 + 1):
d = s1[i] - s2[j]
# print(f'd[{i},{j}] = {d}')
if weights is not None:
# print(weights[i, :])
# multiplication with LeRu like function
xn3, xn2, xn1, xn0, xp0, xp1, xp2, xp3 = weights[i, :]
# print('xn1, xn0, xp0, xp1', xn1, xn0, xp0, xp1)
if d < 0:
x0, x1, x2, x3 = xn0, xn1, xn2, xn3
d = -d
else:
x0, x1, x2, x3 = xp0, xp1, xp2, xp3
if d <= x0:
d = 0
elif x0 < d < x1:
d *= (d - x0) / (x1 - x0)
elif x2 <= d:
if np.isinf(x3) or x3 == x1:
a = 1
else:
a = 2 / (x3 - x2)
d *= (1 + a * (d - x2))
else:
pass # keep d
# print('d\'', d)
cost = d ** 2
paths[i + 1, j + 1] = cost + min(paths[i, j + 1], paths[i + 1, j], paths[i, j])
# s = math.sqrt(paths[l1 - 1, l2 - 1])
paths = np.sqrt(paths)
s = paths[l1 - 1, l2 - 1]
return s, paths
def distance_matrix(s, weights, window=None, show_progress=False, **kwargs):
dist_opts = {
'window': window
}
dists = np.full((len(s), len(s)), np.inf)
it_r = range(len(s))
if show_progress:
it_r = tqdm(it_r)
for r in it_r:
it_c = range(r + 1, len(s))
for c in it_c:
# Because of the weights this is not symmetric (?)
# TODO: make function not hardcoded
# print(f'{r} -- {c}')
# print(f's[{r}]', s[r])
# print('weights', weights.get(r, None))
# print(f's[{c}]', s[c])
# print('weights', weights.get(c, None))
weights_r = weights.get(r, None)
d1, paths = warping_paths(s[r], s[c], weights_r, **dist_opts)
# print(f'd1(r)={d1} -- w=\n{weights_r}')
# print (paths)
weights_c = weights.get(c, None)
if weights_r is None and weights_c is None:
dists[r, c] = d1
else:
d2, paths = warping_paths(s[c], s[r], weights_c, **dist_opts)
# print(f'd2(c)={d2} -- w=\n{weights_c}')
# print(paths)
dists[r, c] = min(d1, d2)
return dists
def compute_weights_using_dt(series, labels, prototypeidx, **kwargs):
"""Compute weight array by learning an ensemble of Decision Trees representing
the differences between the different labels.
:param series: List of sequences
:param labels: Labels for series
:param prototypeidx: The target sequence to learn weights for
:param kwargs: Arguments to pass to `series_to_dt`
:return:
"""
ml_values, cl_values, _clfs, importances = series_to_dt(series, labels, prototypeidx, **kwargs)
weights = compute_weights_from_mlclvalues(series[prototypeidx], ml_values, cl_values, **kwargs)
return weights, importances
def series_to_dt(series, labels, prototypeidx, classifier=None, max_clfs=None, min_ig=0,
savefig=None, warping_paths_fnc=None, ignore_idxs=None, patternlen=None,
min_purity=1.0, **kwargs):
"""Compute Decision Tree from series
:param series:
:param labels: 0 for cannot-link, 1 for must-link
:param prototypeidx:
:param classifier: Classifier class.
For example dtw_weighted.DecisionTreeClassifier or tree.DecisionTreeClassifier
:param max_clfs: Maximum number of classifiers to learn
:param min_ig: Minimum information gain
:param savefig: Path to filename to save tree Graphviz visualisation
:param warping_paths_fnc: Function to compute warping paths
:param patternlen: Pattern window size (default None)
:param kwargs: Passed to warping_paths_fnc
:return:
"""
if warping_paths_fnc is None:
warping_paths_fnc = warping_paths
if ignore_idxs is None:
ignore_idxs = set()
features = [[0] * (len(series[prototypeidx]) * 2)] # feature per idx, split in positive and negative
targets = [0] # Do cluster
ml_values = defaultdict(lambda: ([], []))
# print(f"prototype idx = {prototypeidx}")
for idx, label in enumerate(labels):
if idx in ignore_idxs:
continue
cur_features = np.zeros(len(series[prototypeidx]) * 2, dtype=np.double)
cur_features_cnt = np.zeros(len(series[prototypeidx]) * 2, dtype=np.int)
wp_params = {key: kwargs[key] for key in {'window', 'max_dist', 'max_step', 'max_length_diff',
'penalty', 'psi'}.intersection(kwargs.keys())}
s, paths = warping_paths_fnc(series[prototypeidx], series[idx], **wp_params)
path = best_path(paths)
for i_to, i_from in path:
d = series[prototypeidx][i_to] - series[idx][i_from]
# print(f"d{idx}({i_to},{i_from}) = {d}")
if label == 0:
# Cannot-link
pass
elif label == 1:
# Must-link
if d < 0:
ml_values[i_to][0].append(-d)
elif d > 0:
ml_values[i_to][1].append(d)
if d <= 0:
cur_features[i_to * 2] += -d
cur_features_cnt[i_to * 2] += 1
if d >= 0:
cur_features[i_to * 2 + 1] += d
cur_features_cnt[i_to * 2 + 1] += 1
cur_features_cnt[cur_features_cnt == 0] = 1
cur_features = np.divide(cur_features, cur_features_cnt)
features.append(cur_features)
if label == 0:
# print(f"{idx:<2}: CL -> {cur_features[0]:0.3f} / {cur_features[1]:0.3f}")
targets.append(1) # Do not cluster
elif label == 1:
# print(f"{idx:<2}: ML -> {cur_features[0]:0.3f} / {cur_features[1]:0.3f}")
targets.append(0) # Do cluster
else:
raise Exception("Encountered a label that is not 0 (cannot-link) or 1 (must-link): {}".format(label))
if classifier is None:
classifier = DecisionTreeClassifier
features = np.array(features)
targets = np.array(targets)
if savefig is not None:
try:
from sklearn import tree
except ImportError:
logger.error("No figure generated, sklearn is not installed.")
savefig, tree, out_string, feature_names = None, None, None, None
out_string = io.StringIO()
def args(i):
if (i % 2) == 0:
sgn = '-'
cmp = 's>t'
else:
sgn = '+'
cmp = 's<t'
return i, sgn, cmp
feature_names = ["d[{}] ({}, {}, {})".format(i // 2, *args(i))
for i in range(2*len(series[prototypeidx]))]
class_names = ["ML", "CL"]
else:
tree, out_string, feature_names, class_names = None, None, None, None
if patternlen is not None:
cl_values, clfs, importances = dt_windows(features, targets, classifier, patternlen, max_clfs, min_ig, min_purity)
else:
cl_values, clfs, importances = dt_onewindow(features, targets, classifier, max_clfs, min_ig, min_purity)
if savefig is not None:
for clf in clfs:
tree.export_graphviz(clf, out_file=out_string, feature_names=feature_names, class_names=class_names)
print("\n\n", file=out_string)
with open(savefig, "w") as ofile:
print(out_string.getvalue(), file=ofile)
return ml_values, cl_values, clfs, importances
def dt_windows(features, targets, classifier, patternlen, max_clfs, min_ig, min_purity):
cl_values = dict()
clfss = []
ignore_features = set()
clf_w = 1.0
importances = defaultdict(lambda: [0, 0])
nb_features = features.shape[1]
nb_indices = int((nb_features / 2))
max_kd = 0
for idx in range(0, int(nb_indices - patternlen / 2), int(patternlen / 2)):
idx_s = idx * 2
idx_e = idx_s + patternlen * 2
clf = classifier()
cur_features = features[:, idx_s:idx_e]
clf.fit(cur_features, targets, ignore_features=ignore_features, min_ig=min_ig)
logger.debug(f"Learned classifier {len(clfss) + 1}: idx = f{idx}/{idx_s}:f{idx+patternlen}/{idx_e}, "
f"nb nodes = {clf.tree_.nb_nodes}, used features = {clf.tree_.used_features}")
if clf.tree_.nb_nodes <= 1:
continue
clf.set_features(list(range(idx_s, idx_e)))
max_kd = max(max_kd, np.max(clf.tree_.kd))
clfss.append(clf)
clfs = []
for clf_idx, clf in enumerate(clfss):
score = clf.score(max_kd)
logger.debug(f"Clf[{clf_idx:<2}] - Score = {score}, Entropy = {clf.avg_impurity()}, "
f"depth = {clf.tree_.depth}, nbnodes = {clf.tree_.nb_nodes}")
clfs.append((score, -clf.tree_.nb_nodes, clf))
clfs.sort(reverse=True)
min_score = clfs[-1][0]
max_score = clfs[0][0]
minallowed_score = max_score - (max_score - min_score) / 5 # TODO: remove this magic | |
PauliList([j * "X", "-i" + j * "Y", j * "Z"])
self.assertEqual(pauli.delete([0, 2]), PauliList("-i" + j * "Y"))
self.assertEqual(pauli.delete([1, 2]), PauliList(j * "X"))
self.assertEqual(pauli.delete([0, 1]), PauliList(j * "Z"))
with self.subTest(msg="single qubit"):
pauli = PauliList(["IIX", "iIYI", "ZII"])
value = pauli.delete(0, qubit=True)
target = PauliList(["II", "iIY", "ZI"])
self.assertEqual(value, target)
value = pauli.delete(1, qubit=True)
target = PauliList(["IX", "iII", "ZI"])
self.assertEqual(value, target)
value = pauli.delete(2, qubit=True)
target = PauliList(["IX", "iYI", "II"])
self.assertEqual(value, target)
with self.subTest(msg="multiple qubits"):
pauli = PauliList(["IIX", "IYI", "-ZII"])
value = pauli.delete([0, 1], qubit=True)
target = PauliList(["I", "I", "-Z"])
self.assertEqual(value, target)
value = pauli.delete([1, 2], qubit=True)
target = PauliList(["X", "I", "-I"])
self.assertEqual(value, target)
value = pauli.delete([0, 2], qubit=True)
target = PauliList(["I", "Y", "-I"])
self.assertEqual(value, target)
def test_insert(self):
"""Test insert method."""
# Insert single row
for j in range(1, 10):
pauli = PauliList(j * "X")
target0 = PauliList([j * "I", j * "X"])
target1 = PauliList([j * "X", j * "I"])
with self.subTest(msg=f"single row from str ({j})"):
value0 = pauli.insert(0, j * "I")
self.assertEqual(value0, target0)
value1 = pauli.insert(1, j * "I")
self.assertEqual(value1, target1)
with self.subTest(msg=f"single row from PauliList ({j})"):
value0 = pauli.insert(0, PauliList(j * "I"))
self.assertEqual(value0, target0)
value1 = pauli.insert(1, PauliList(j * "I"))
self.assertEqual(value1, target1)
target0 = PauliList(["i" + j * "I", j * "X"])
target1 = PauliList([j * "X", "i" + j * "I"])
with self.subTest(msg=f"single row with phase from str ({j})"):
value0 = pauli.insert(0, "i" + j * "I")
self.assertEqual(value0, target0)
value1 = pauli.insert(1, "i" + j * "I")
self.assertEqual(value1, target1)
with self.subTest(msg=f"single row with phase from PauliList ({j})"):
value0 = pauli.insert(0, PauliList("i" + j * "I"))
self.assertEqual(value0, target0)
value1 = pauli.insert(1, PauliList("i" + j * "I"))
self.assertEqual(value1, target1)
# Insert multiple rows
for j in range(1, 10):
pauli = PauliList("i" + j * "X")
insert = PauliList([j * "I", j * "Y", j * "Z", "-i" + j * "X"])
target0 = insert + pauli
target1 = pauli + insert
with self.subTest(msg=f"multiple-rows from PauliList ({j})"):
value0 = pauli.insert(0, insert)
self.assertEqual(value0, target0)
value1 = pauli.insert(1, insert)
self.assertEqual(value1, target1)
# Insert single column
pauli = PauliList(["X", "Y", "Z", "-iI"])
for i in ["I", "X", "Y", "Z", "iY"]:
phase = "" if len(i) == 1 else i[0]
p = i if len(i) == 1 else i[1]
target0 = PauliList(
[
phase + "X" + p,
phase + "Y" + p,
phase + "Z" + p,
("" if phase else "-i") + "I" + p,
]
)
target1 = PauliList(
[
i + "X",
i + "Y",
i + "Z",
("" if phase else "-i") + p + "I",
]
)
with self.subTest(msg="single-column single-val from str"):
value = pauli.insert(0, i, qubit=True)
self.assertEqual(value, target0)
value = pauli.insert(1, i, qubit=True)
self.assertEqual(value, target1)
with self.subTest(msg="single-column single-val from PauliList"):
value = pauli.insert(0, PauliList(i), qubit=True)
self.assertEqual(value, target0)
value = pauli.insert(1, PauliList(i), qubit=True)
self.assertEqual(value, target1)
# Insert single column with multiple values
pauli = PauliList(["X", "Y", "iZ"])
for i in [["I", "X", "Y"], ["X", "iY", "Z"], ["Y", "Z", "I"]]:
target0 = PauliList(
["X" + i[0], "Y" + i[1] if len(i[1]) == 1 else i[1][0] + "Y" + i[1][1], "iZ" + i[2]]
)
target1 = PauliList([i[0] + "X", i[1] + "Y", "i" + i[2] + "Z"])
with self.subTest(msg="single-column multiple-vals from PauliList"):
value = pauli.insert(0, PauliList(i), qubit=True)
self.assertEqual(value, target0)
value = pauli.insert(1, PauliList(i), qubit=True)
self.assertEqual(value, target1)
# Insert multiple columns from single
pauli = PauliList(["X", "iY", "Z"])
for j in range(1, 5):
for i in [j * "I", j * "X", j * "Y", "i" + j * "Z"]:
phase = "" if len(i) == j else i[0]
p = i if len(i) == j else i[1:]
target0 = PauliList(
[
phase + "X" + p,
("-" if phase else "i") + "Y" + p,
phase + "Z" + p,
]
)
target1 = PauliList([i + "X", ("-" if phase else "i") + p + "Y", i + "Z"])
with self.subTest(msg="multiple-columns single-val from str"):
value = pauli.insert(0, i, qubit=True)
self.assertEqual(value, target0)
value = pauli.insert(1, i, qubit=True)
self.assertEqual(value, target1)
with self.subTest(msg="multiple-columns single-val from PauliList"):
value = pauli.insert(0, PauliList(i), qubit=True)
self.assertEqual(value, target0)
value = pauli.insert(1, PauliList(i), qubit=True)
self.assertEqual(value, target1)
# Insert multiple columns multiple row values
pauli = PauliList(["X", "Y", "-iZ"])
for j in range(1, 5):
for i in [
[j * "I", j * "X", j * "Y"],
[j * "X", j * "Z", "i" + j * "Y"],
[j * "Y", j * "Z", j * "I"],
]:
target0 = PauliList(
[
"X" + i[0],
"Y" + i[1],
("-i" if len(i[2]) == j else "") + "Z" + i[2][-j:],
]
)
target1 = PauliList(
[
i[0] + "X",
i[1] + "Y",
("-i" if len(i[2]) == j else "") + i[2][-j:] + "Z",
]
)
with self.subTest(msg="multiple-column multiple-vals from PauliList"):
value = pauli.insert(0, PauliList(i), qubit=True)
self.assertEqual(value, target0)
value = pauli.insert(1, PauliList(i), qubit=True)
self.assertEqual(value, target1)
def test_commutes(self):
"""Test commutes method."""
# Single qubit Pauli
pauli = PauliList(["I", "X", "Y", "Z", "-iY"])
with self.subTest(msg="commutes single-Pauli I"):
value = list(pauli.commutes("I"))
target = [True, True, True, True, True]
self.assertEqual(value, target)
with self.subTest(msg="commutes single-Pauli X"):
value = list(pauli.commutes("X"))
target = [True, True, False, False, False]
self.assertEqual(value, target)
with self.subTest(msg="commutes single-Pauli Y"):
value = list(pauli.commutes("Y"))
target = [True, False, True, False, True]
self.assertEqual(value, target)
with self.subTest(msg="commutes single-Pauli Z"):
value = list(pauli.commutes("Z"))
target = [True, False, False, True, False]
self.assertEqual(value, target)
with self.subTest(msg="commutes single-Pauli iZ"):
value = list(pauli.commutes("iZ"))
target = [True, False, False, True, False]
self.assertEqual(value, target)
# 2-qubit Pauli
pauli = PauliList(["II", "IX", "YI", "XY", "ZZ", "-iYY"])
with self.subTest(msg="commutes single-Pauli II"):
value = list(pauli.commutes("II"))
target = [True, True, True, True, True, True]
self.assertEqual(value, target)
with self.subTest(msg="commutes single-Pauli IX"):
value = list(pauli.commutes("IX"))
target = [True, True, True, False, False, False]
self.assertEqual(value, target)
with self.subTest(msg="commutes single-Pauli XI"):
value = list(pauli.commutes("XI"))
target = [True, True, False, True, False, False]
self.assertEqual(value, target)
with self.subTest(msg="commutes single-Pauli YI"):
value = list(pauli.commutes("YI"))
target = [True, True, True, False, False, True]
self.assertEqual(value, target)
with self.subTest(msg="commutes single-Pauli IY"):
value = list(pauli.commutes("IY"))
target = [True, False, True, True, False, True]
self.assertEqual(value, target)
with self.subTest(msg="commutes single-Pauli XY"):
value = list(pauli.commutes("XY"))
target = [True, False, False, True, True, False]
self.assertEqual(value, target)
with self.subTest(msg="commutes single-Pauli YX"):
value = list(pauli.commutes("YX"))
target = [True, True, True, True, True, False]
self.assertEqual(value, target)
with self.subTest(msg="commutes single-Pauli ZZ"):
value = list(pauli.commutes("ZZ"))
target = [True, False, False, True, True, True]
self.assertEqual(value, target)
with self.subTest(msg="commutes single-Pauli iYX"):
value = list(pauli.commutes("iYX"))
target = [True, True, True, True, True, False]
self.assertEqual(value, target)
def test_anticommutes(self):
"""Test anticommutes method."""
# Single qubit Pauli
pauli = PauliList(["I", "X", "Y", "Z", "-iY"])
with self.subTest(msg="anticommutes single-Pauli I"):
value = list(pauli.anticommutes("I"))
target = [False, False, False, False, False]
self.assertEqual(value, target)
with self.subTest(msg="anticommutes single-Pauli X"):
value = list(pauli.anticommutes("X"))
target = [False, False, True, True, True]
self.assertEqual(value, target)
with self.subTest(msg="anticommutes single-Pauli Y"):
value = list(pauli.anticommutes("Y"))
target = [False, True, False, True, False]
self.assertEqual(value, target)
with self.subTest(msg="anticommutes single-Pauli Z"):
value = list(pauli.anticommutes("Z"))
target = [False, True, True, False, True]
self.assertEqual(value, target)
with self.subTest(msg="anticommutes single-Pauli iZ"):
value = list(pauli.anticommutes("iZ"))
target = [False, True, True, False, True]
self.assertEqual(value, target)
# 2-qubit Pauli
pauli = PauliList(["II", "IX", "YI", "XY", "ZZ", "iZX"])
with self.subTest(msg="anticommutes single-Pauli II"):
value = list(pauli.anticommutes("II"))
target = [False, False, False, False, False, False]
self.assertEqual(value, target)
with self.subTest(msg="anticommutes single-Pauli IX"):
value = list(pauli.anticommutes("IX"))
target = [False, False, False, True, True, False]
self.assertEqual(value, target)
with self.subTest(msg="anticommutes single-Pauli XI"):
value = list(pauli.anticommutes("XI"))
target = [False, False, True, False, True, True]
self.assertEqual(value, target)
with self.subTest(msg="anticommutes single-Pauli YI"):
value = list(pauli.anticommutes("YI"))
target = [False, False, False, True, True, True]
self.assertEqual(value, target)
with self.subTest(msg="anticommutes single-Pauli IY"):
value = list(pauli.anticommutes("IY"))
target = [False, True, False, False, True, True]
self.assertEqual(value, target)
with self.subTest(msg="anticommutes single-Pauli XY"):
value = list(pauli.anticommutes("XY"))
target = [False, True, True, False, False, False]
self.assertEqual(value, target)
with self.subTest(msg="anticommutes single-Pauli YX"):
value = list(pauli.anticommutes("YX"))
target = [False, False, False, False, False, True]
self.assertEqual(value, target)
with self.subTest(msg="anticommutes single-Pauli ZZ"):
value = list(pauli.anticommutes("ZZ"))
target = [False, True, True, False, | |
"二级类别":
raise serializers.ValidationError("[三级类别]的父类别必须是[二级类别]")
if self.initial_data['classes'] == "四级类别" and list.classes != "三级类别":
raise serializers.ValidationError("[四级类别]的父类别必须是[三级类别]")
return value
class ClientTypeDefinitionSerialize_List(serializers.ModelSerializer):
"""
客户类型定义--list
"""
class Meta:
model = ClientTypeDefinitionModel
fields = ("id", "name", "code", "state", "classes", "auditor", "create_user","create_time","update_time")
class ClientInforDefinitionSerialize_Type(serializers.ModelSerializer):
"""
客户定义--客户类型定义
"""
class Meta:
model = ClientInforDefinitionModel
fields = ("id", "name", "code", "state", "auditor", "create_user")
class ClientTypeDefinitionSerialize_Retrieve(serializers.ModelSerializer):
"""
客户类型定义--retrieve
"""
file = PlanFileSerialize_List(many=True) # 类型文件信息
alter = PlanAlterRecordSerialize_List(many=True) # 审核记录信息
parent = ClientTypeDefinitionSerialize_List(required=False) # 父类别信息
# clientType_child = ClientTypeDefinitionSerialize_List(many=True)# 子类别信息
# clientType_item = ClientInforDefinitionSerialize_Type(many=True)# 附属项信息
class Meta:
model = ClientTypeDefinitionModel
fields = "__all__"
class ClientTypeDefinitionSerialize_Update(serializers.ModelSerializer):
"""
客户类型定义--update
"""
class Meta:
model = ClientTypeDefinitionModel
fields = ("id", "name", "code", "classes", "parent", "attach_attribute",
"file", "desc", "auditor",)
# 所有字段验证
def validate(self, attrs):
if self.instance.state != '新建': # 如果不是新建状态 不能更改信息
raise serializers.ValidationError("当前信息已提交,禁止更改")
return attrs
# 审核者字段验证
def validate_auditor(self, value):
if self.instance.state != '新建': # 如果不是新建状态 该字段不能更改
raise serializers.ValidationError("当前信息已提交,禁止更改")
if settings.SAME_USER != True:
if self.instance.create_user == value: # 审核帐号不能与创建帐号相同
raise serializers.ValidationError("审核帐号不能与创建帐号相同'")
try:
auditor = User.objects.get(username=value)
except Exception as e:
raise serializers.ValidationError("指定的审核账号不存在")
if not auditor.has_perm('plan.admin_clienttypedefinitionmodel'):
raise serializers.ValidationError("指定的审核账号不具备审核权限")
return value
# 父类别字段验证
def validate_parent(self, value):
if self.instance.state != '新建': # 如果不是新建状态 该字段不能更改
raise serializers.ValidationError("当前信息已提交,禁止更改")
if self.initial_data['classes'] == "一级类别": # 判断 类别是否为一级类别
if value != None: # 一级类别不能指定父类别
raise serializers.ValidationError("处于[一级类别]的信息不能指定父类别")
else:
if value is None: # 非一级类别必须指定父类别
raise serializers.ValidationError("处于" + self.initial_data["classes"] + "类别的信息必须指定父类别")
else: # 判断指定的父类别是否符合条件
list = ClientTypeDefinitionModel.objects.get(id=value.id)
if list is None: # 判断 父类别是否存在
raise serializers.ValidationError("指定的父类别不存在")
elif (list.state != "使用中"): # 判断 父类别状态是否合适
raise serializers.ValidationError("指定的父类别不在--'使用状态'")
else: # 判断 子父类别的层级是否合适
if self.initial_data['classes'] == "二级类别" and list.classes != "一级类别":
raise serializers.ValidationError("[二级类别]的父类别必须是[一级类别]'")
if self.initial_data['classes'] == "三级类别" and list.classes != "二级类别":
raise serializers.ValidationError("[三级类别]的父类别必须是[二级类别]")
if self.initial_data['classes'] == "四级类别" and list.classes != "三级类别":
raise serializers.ValidationError("[四级类别]的父类别必须是[三级类别]")
return value
class ClientTypeDefinitionSerialize_Partial(serializers.ModelSerializer):
"""
客户类型定义--partial
"""
class Meta:
model = ClientTypeDefinitionModel
fields = ("id", "state", "alter")
# 所有字段验证
def validate(self, attrs):
try:
del attrs['alter'] # 删除alter字段
except Exception:
pass
return attrs
# 状态字段验证
def validate_state(self, value):
validate_states(self.instance.state, value)
if (self.instance.create_user == self.context['request'].user.username) and\
(self.instance.auditor != self.context['request'].user.username): # 如果当前用户为创建账号但不是审核账号
if not (self.instance.state == "新建" and (value == "审核中" or value == "作废")):
raise serializers.ValidationError("创建者只能将[新建]信息更改成[审核中]或[作废]")
return value
# 审核记录字段验证
def validate_alter(self, value):
obj = ClientTypeDefinitionModel.objects.get(id=self.instance.id).alter
for data in value:
obj.add(data.id)
return value
# endregion
# region 客户类型层级结构 序列化器
class ClientTypeDefinitionSerialize_Fourth(serializers.ModelSerializer):
"""
客户类型层级结构--fourth
"""
class Meta:
model = ClientTypeDefinitionModel
fields = ("id", "name", "code", "state")
class ClientTypeDefinitionSerialize_Third(serializers.ModelSerializer):
"""
客户类型定义--third
"""
clientType_child = ClientTypeDefinitionSerialize_Fourth(many=True) # 子类别信息
class Meta:
model = ClientTypeDefinitionModel
fields = ("id", "name", "code", "state", "clientType_child")
class ClientTypeDefinitionSerialize_Second(serializers.ModelSerializer):
"""
客户类型定义--second
"""
clientType_child = ClientTypeDefinitionSerialize_Third(many=True) # 子类别信息
class Meta:
model = ClientTypeDefinitionModel
fields = ("id", "name", "code", "state", "clientType_child")
class ClientTypeDefinitionSerialize_First(serializers.ModelSerializer):
"""
客户类型定义--fitst
"""
clientType_child = ClientTypeDefinitionSerialize_Second(many=True) # 子类别信息
class Meta:
model = ClientTypeDefinitionModel
fields = ("id", "name", "code", "state","clientType_child")
# endregion
# region 客户信息定义 序列化器
class ClientInforDefinitionSerialize_Create(serializers.ModelSerializer):
"""
客户信息定义--create
"""
state= serializers.HiddenField(default="新建")
create_user = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = ClientInforDefinitionModel
fields = ("id", "name", "code","state","type", "image", "file", "address", "mobile",
"fax", "wechat", "company_name", "company_abbre","attribute1", "attribute2",
"attribute3", "attribute4","attribute5", "desc", "auditor","create_user"
)
# 所有字段验证
def validate(self, attrs):
if not attrs["create_user"].has_perm('plan.add_clientinfordefinitionmodel'): # 如果当前用户没有创建权限
raise serializers.ValidationError("当前用户不具备创建权限'")
if settings.SAME_USER!=True:
if attrs["create_user"].username == attrs["auditor"]: # 审核帐号不能与创建帐号相同
raise serializers.ValidationError("审核帐号不能与创建帐号相同'")
return attrs
# 审核者字段验证
def validate_auditor(self, value):
try:
auditor = User.objects.get(username=value)
except Exception as e:
raise serializers.ValidationError("指定的审核账号不存在")
if not auditor.has_perm('plan.admin_clientinfordefinitionmodel'):
raise serializers.ValidationError("指定的审核账号不具备审核权限")
return value
# 类型字段验证
def validate_type(self, value):
list = ClientTypeDefinitionModel.objects.get(id=value.id)
if list is None: # 判断 父类别是否存在
raise serializers.ValidationError("指定的类型不存在")
elif (list.state != "使用中"): # 判断 父类别状态是否合适
raise serializers.ValidationError("指定的类型不在--'使用状态'")
return value
class ClientInforDefinitionSerialize_List(serializers.ModelSerializer):
"""
客户信息定义--list
"""
type = ClientTypeDefinitionSerialize_List(required=False)
class Meta:
model = ClientInforDefinitionModel
fields = ("id", "name", "code", "state","type","address", "mobile",
"fax", "wechat", "company_name", "company_abbre","auditor","create_user","create_time","update_time")
class ClientInforDefinitionSerialize_Retrieve(serializers.ModelSerializer):
"""
客户信息定义--retrieve
"""
image = PlanImageSerialize_List(many=True)
file = PlanFileSerialize_List(many=True)
alter = PlanAlterRecordSerialize_List(many=True)
type = ClientTypeDefinitionSerialize_List(required=False)
class Meta:
model = ClientInforDefinitionModel
fields = "__all__"
class ClientInforDefinitionSerialize_Update(serializers.ModelSerializer):
"""
客户信息定义--update
"""
class Meta:
model = ClientInforDefinitionModel
fields = ("id", "name", "code", "type", "image", "file", "address", "mobile",
"fax", "wechat", "company_name", "company_abbre","attribute1", "attribute2",
"attribute3", "attribute4","attribute5", "desc", "auditor"
)
# 所有字段验证
def validate(self, attrs):
if self.instance.state != '新建': # 如果不是新建状态 不能更改信息
raise serializers.ValidationError("当前信息已提交,禁止更改")
return attrs
# 审核者字段验证
def validate_auditor(self, value):
if self.instance.state != '新建': # 如果不是新建状态 不能更改信息
raise serializers.ValidationError("当前信息已提交,禁止更改")
if settings.SAME_USER != True:
if self.instance.create_user == value: # 审核帐号不能与创建帐号相同
raise serializers.ValidationError("审核帐号不能与创建帐号相同'")
try:
auditor = User.objects.get(username=value)
except Exception as e:
raise serializers.ValidationError("指定的审核账号不存在")
if not auditor.has_perm('plan.admin_clientinfordefinitionmodel'):
raise serializers.ValidationError("指定的审核账号不具备审核权限")
return value
# 类型字段验证
def validate_type(self, value):
if self.instance.state != '新建': # 如果不是新建状态 该字段不能更改
raise serializers.ValidationError("当前信息已提交,禁止更改")
list = ClientTypeDefinitionModel.objects.get(id=value.id)
if list is None: # 判断 父类别是否存在
raise serializers.ValidationError("指定的类型不存在")
elif (list.state != "使用中"): # 判断 父类别状态是否合适
raise serializers.ValidationError("指定的类型不在--'使用状态'")
return value
class ClientInforDefinitionSerialize_Partial(serializers.ModelSerializer):
"""
客户信息定义--partial
"""
class Meta:
model = ClientInforDefinitionModel
fields = ("id", "state", "alter")
# 所有字段验证
def validate(self, attrs):
try:
del attrs['alter'] # 删除alter字段
except Exception:
pass
return attrs
# 状态字段验证
def validate_state(self, value):
validate_states(self.instance.state, value)
if (self.instance.create_user == self.context['request'].user.username) and\
(self.instance.auditor != self.context['request'].user.username): # 如果当前用户为创建账号但不是审核账号
if not (self.instance.state == "新建" and (value == "审核中" or value == "作废")):
raise serializers.ValidationError("创建者只能将[新建]信息更改成[审核中]或[作废]")
return value
# 审核记录字段验证
def validate_alter(self, value):
obj = ClientInforDefinitionModel.objects.get(id=self.instance.id).alter
for data in value:
obj.add(data.id)
return value
# endregion
# region 销售子订单创建 序列化器
class SalesOrderItemCreateSerialize_Create(serializers.ModelSerializer):
"""
销售订单子项创建--create
"""
state= serializers.HiddenField(default="新建")
create_user = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = SalesOrderItemCreateModel
fields =("id","product_id", "batch","state","sum", "file","attribute1", "attribute2",
"attribute3", "attribute4","attribute5","desc","create_user")
def validate(self, attrs):
try:
product = ProductInforDefinitionModel.objects.get(id=attrs["product_id"]) # 判断指定的产品是否存在
except Exception as e:
raise serializers.ValidationError("指定的产品不存在")
if (product.state != "使用中"): # 判断 状态是否合适
raise serializers.ValidationError("指定的产品不在--'使用状态'")
attrs["productType_code"] = product.type.code # 获取产品类型编码
attrs["productType_name"] = product.type.name # 获取产品类型名称
attrs["product_code"] = product.code # 获取产品编码
attrs["product_name"] = product.name # 获取产品名称
return attrs
class ProductTaskTypeSerialize_ProductTask(serializers.ModelSerializer):
"""
产品生产任务类型定义--产品生产任务创建
"""
class Meta:
model = ProductTaskTypeModel
fields = ("id", "name", "code", "state", "classes")
class ProductTaskCreateSerialize_ProductTaskItem(serializers.ModelSerializer):
"""
产品生产任务创建--产品生产任务子项创建--销售订单子项创建
"""
type=ProductTaskTypeSerialize_ProductTask(required=False)
class Meta:
model = ProductTaskCreateModel
fields = ("id", "name", "code","type","state","update_time",)
class ProductTaskItemCreateSerialize_SalesOrderItem(serializers.ModelSerializer):
"""
产品生产任务子项创建--销售订单子项创建
"""
productTaskItem_parent=ProductTaskCreateSerialize_ProductTaskItem(many=True)
class Meta:
model = ProductTaskItemCreateModel
fields = ("id", "state","sum","completed","route_id","update_time","productTaskItem_parent")
class SalesOrderItemCreateSerialize_List(serializers.ModelSerializer):
"""
销售订单子项创建--list
"""
file = PlanFileSerialize_List(many=True)
salesOrderItem_productTaskItem = ProductTaskItemCreateSerialize_SalesOrderItem(many=True)
class Meta:
model = SalesOrderItemCreateModel
fields = "__all__"
class SalesOrderItemCreateSerialize_Partial(serializers.ModelSerializer):
"""
销售订单子项创建--partial
"""
class Meta:
model = SalesOrderCreateModel
fields = ("id","state")
# 状态字段验证
def validate_state(self, value):
parentState = SalesOrderItemCreateModel.objects.filter(
id=self.instance.id).first().salesOrderItem_parent.all().values('state')
if (parentState[0]['state'] != "使用中" ):
raise serializers.ValidationError("当前订单不处于[使用中状态],禁止更改子项订单状态")
if not (self.instance.state == "等待" and value == "终止"):
raise serializers.ValidationError("子订单只能从[等待状态]更改成[终止状态]")
if not (self.context['request'].user.has_perm('plan.deal_salesordercreatemodel')):
raise serializers.ValidationError("当前用户不具备执行终止订单权限")
# 遍历所有管理子订单项的订单项,如果订单项的所有子项都处于END,则将订单设置成END
data1 = SalesOrderItemCreateModel.objects.filter(id=self.instance.id).first().salesOrderItem_parent.all().values('id')
for item1 in data1: # 遍历所有关联此子项的父项
count = 1
parentModel = SalesOrderCreateModel.objects.filter(id=item1['id']).first()
data2=parentModel.child.all().values('id')
for item2 in data2: # 遍历父项的所有子项
child = SalesOrderItemCreateModel.objects.filter(id=item2['id']).first()
if child.state == "终止":
count += 1
if count ==len(data2):
parentModel.state="终止"
parentModel.save()
return value
# endregion
# region 销售订单创建 序列化器
class SalesOrderCreateSerialize_Create(serializers.ModelSerializer):
"""
销售订单创建--create
"""
state = serializers.HiddenField(default="新建")
create_user = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = SalesOrderCreateModel
fields =("id", "name", "code", "state", "file", "client", "delivery_time","child","attribute1", "attribute2",
"attribute3", "attribute4","attribute5","desc","auditor","create_user")
# 所有字段验证
def validate(self, attrs):
if not attrs["create_user"].has_perm('plan.add_salesordercreatemodel'): # 如果当前用户没有创建权限
raise serializers.ValidationError("当前用户不具备创建权限'")
if settings.SAME_USER!=True:
if attrs["create_user"].username == attrs["auditor"]: # 审核帐号不能与创建帐号相同
raise serializers.ValidationError("审核帐号不能与创建帐号相同'")
return attrs
# 客户字段验证
def validate_client(self, value):
list = ClientInforDefinitionModel.objects.get(id=value.id)
if list is None: # 判断 客户否存在
raise serializers.ValidationError("指定的客户不存在")
elif (list.state != "使用中"): # 判断 客户状态是否合适
raise serializers.ValidationError("指定的客户不在--'使用状态'")
return value
# 审核者字段验证
def validate_auditor(self, value):
try:
auditor = User.objects.get(username=value)
except Exception as e:
raise serializers.ValidationError("指定的审核账号不存在")
if not auditor.has_perm('plan.admin_salesordercreatemodel'):
raise serializers.ValidationError("指定的审核账号不具备审核权限")
return value
class SalesOrderCreateSerialize_List(serializers.ModelSerializer):
"""
销售订单创建--list
"""
client = ClientInforDefinitionSerialize_List()
class Meta:
model = SalesOrderCreateModel
fields = ("id", "name", "code","state","client","delivery_time", "auditor","create_user","create_time","update_time")
class SalesOrderCreateSerialize_Retrieve(serializers.ModelSerializer):
"""
销售订单创建--retrieve
"""
file = PlanFileSerialize_List(many=True)
child = SalesOrderItemCreateSerialize_List(many=True)
alter = PlanAlterRecordSerialize_List(many=True)
client = ClientInforDefinitionSerialize_List()
class Meta:
model = SalesOrderCreateModel
fields = "__all__"
class SalesOrderCreateSerialize_Update(serializers.ModelSerializer):
"""
销售订单创建--update
"""
class Meta:
model = SalesOrderCreateModel
fields = ("id", "name", "code","file", "client", "delivery_time","child" ,"attribute1", "attribute2",
"attribute3", "attribute4","attribute5","desc", "auditor")
# 所有字段验证
def validate(self, attrs):
if self.instance.state != '新建': # 如果不是新建状态 不能更改信息
raise serializers.ValidationError("当前信息已提交,禁止更改")
return attrs
# 客户字段验证
def validate_client(self, value):
if self.instance.state != '新建': # 如果不是新建状态 不能更改信息
raise serializers.ValidationError("当前信息已提交,禁止更改")
list = ClientInforDefinitionModel.objects.get(id=value.id)
if list is None: # 判断 客户否存在
raise serializers.ValidationError("指定的客户不存在")
elif (list.state != "使用中"): # 判断 客户状态是否合适
raise serializers.ValidationError("指定的客户不在--'使用状态'")
return value
# 审核者字段验证
def validate_auditor(self, value):
if self.instance.state != '新建': # 如果不是新建状态 不能更改信息
raise serializers.ValidationError("当前信息已提交,禁止更改")
if settings.SAME_USER != True:
if self.instance.create_user == value: # 审核帐号不能与创建帐号相同
raise serializers.ValidationError("审核帐号不能与创建帐号相同'")
try:
auditor = User.objects.get(username=value)
except Exception as e:
raise serializers.ValidationError("指定的审核账号不存在")
if not auditor.has_perm('plan.admin_salesordercreatemodel'):
raise serializers.ValidationError("指定的审核账号不具备审核权限")
return value
class SalesOrderCreateSerialize_Partial(serializers.ModelSerializer):
"""
销售订单创建--partial
"""
class Meta:
model = SalesOrderCreateModel
| |
to be noised.
Returns:
np.ndarray: Randomly noised image.
"""
if random.random() < self.p:
if self.random_params:
quality = self.get_params(self.min_quality, self.max_quality)
else:
quality = self.max_quality
return EF.compression_jpeg(img, quality=quality)
return img
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
#TBD
class RandomQuantizeNoise(object):
pass
#TBD
#randomly apply noise types. Extend RandomOrder, must find a way to implement
#random parameters for the noise types
'''
class RandomNoise(RandomTransforms):
"""Apply a list of noise transformations in a random order
"""
def __call__(self, img):
order = list(range(len(self.transforms)))
random.shuffle(order)
for i in order:
img = self.transforms[i](img)
return img
'''
class RandomAverageBlur(object):
"""Applying Average blurring filter on the given CV Image randomly with a given probability.
Args:
p (float): probability of the image being noised. Default value is 0.5
kernel_size (int): size of the blur filter to use. Default: 3.
random_params (bool): if enabled, will randomly get a kernel size on each iteration.
"""
def __init__(self, p: float = 0.5, kernel_size: int = 3, random_params: bool = False):
assert isinstance(kernel_size, int) and kernel_size >= 0, 'kernel_size should be a positive integer'
assert isinstance(p, numbers.Number) and p >= 0, 'p should be a positive value'
self.p = p
self.kernel_size = kernel_size
self.random_params = random_params
@staticmethod
def get_params(imgdim):
"""Get kernel size for blur filter in range (3, 11)
Validates that the kernel is larger than the image and an odd integer
Returns:
kernel size to be passed to filter
"""
kernel_size = int(np.random.uniform(3, 11))
if kernel_size > imgdim:
kernel_size = int(np.random.uniform(3, imgdim/2))
kernel_size = int(np.ceil(kernel_size))
if kernel_size % 2 == 0:
kernel_size+=1
return kernel_size
def __call__(self, img):
"""
Args:
img (np.ndarray): Image to be noised.
Returns:
np.ndarray: Randomly noised image.
"""
h = img.shape[0]
if random.random() < self.p:
if self.random_params:
self.kernel_size = self.get_params(h)
return EF.average_blur(img, kernel_size=self.kernel_size)
return img
#The function needs some fixing
class RandomBilateralBlur(object):
"""Applying Bilateral blurring filter on the given CV Image randomly with a given probability.
Args:
p (float): probability of the image being noised. Default value is 0.5
kernel_size (int): size of the blur filter to use. Default: 3. Large filters
(d > 5) are very slow, so it is recommended to use d=5 for real-time
applications, and perhaps d=9 for offline applications that need heavy
noise filtering.
Sigma values: For simplicity, you can set the 2 sigma values to be the same.
If they are small (< 10), the filter will not have much effect, whereas
if they are large (> 150), they will have a very strong effect, making
the image look "cartoonish".
sigmaColor Filter sigma in the color space. A larger value of the parameter
means that farther colors within the pixel neighborhood (see sigmaSpace)
will be mixed together, resulting in larger areas of semi-equal color.
sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter
means that farther pixels will influence each other as long as their colors
are close enough (see sigmaColor ). When d>0, it specifies the neighborhood
size regardless of sigmaSpace. Otherwise, d is proportional to sigmaSpace.
random_params (bool): if enabled, will randomly get a kernel size on each iteration,
as well as sigmaSpace and sigmaColor, using those params as maximums to sample.
"""
def __init__(self, p: float = 0.5, kernel_size: int = 3, sigmaColor: int = 5, sigmaSpace: int = 5, random_params: bool = False):
assert isinstance(kernel_size, int) and kernel_size >= 0, 'kernel_size should be a positive integer'
assert isinstance(sigmaColor, int) and sigmaColor >= 0, 'sigmaColor should be a positive integer'
assert isinstance(sigmaSpace, int) and sigmaSpace >= 0, 'sigmaColor should be a positive integer'
assert isinstance(p, numbers.Number) and p >= 0, 'p should be a positive value'
self.p = p
self.kernel_size = kernel_size
self.sigmaColor = sigmaColor
self.sigmaSpace = sigmaSpace
self.random_params = random_params
@staticmethod
def get_params(imgdim, sigmaColor, sigmaSpace):
"""Get kernel size for bilateral filter in range (3, 9), sigmaColor and sigmaSpace
Validates that the kernel is larger than the image and an odd integer
Returns:
kernel size to be passed to filter
"""
sigmaColor = int(np.random.uniform(20, sigmaColor))
sigmaSpace = int(np.random.uniform(20, sigmaSpace))
kernel_size = int(np.random.uniform(3, 9))
if kernel_size > imgdim:
kernel_size = int(np.random.uniform(3, imgdim/2))
kernel_size = int(np.ceil(kernel_size))
if kernel_size % 2 == 0:
kernel_size+=1
return kernel_size, sigmaColor, sigmaSpace
def __call__(self, img):
"""
Args:
img (np.ndarray): Image to be noised.
Returns:
np.ndarray: Randomly noised image.
"""
h = img.shape[0]
if random.random() < self.p:
if self.random_params:
self.kernel_size, sigmaColor, sigmaSpace = self.get_params(h, self.sigmaColor, self.sigmaSpace)
else:
sigmaColor, sigmaSpace = self.sigmaColor, self.sigmaSpace
return EF.bilateral_blur(img, kernel_size=self.kernel_size)
return img
class RandomBoxBlur(object):
"""Applying Box blurring filter on the given CV Image randomly with a given probability.
Args:
p (float): probability of the image being noised. Default value is 0.5
kernel_size (int): size of the blur filter to use. Default: 3.
random_params (bool): if enabled, will randomly get a kernel size on each iteration.
"""
def __init__(self, p: float = 0.5, kernel_size: int = 3, random_params: bool = False):
assert isinstance(kernel_size, int) and kernel_size >= 0, 'kernel_size should be a positive integer'
assert isinstance(p, numbers.Number) and p >= 0, 'p should be a positive value'
self.p = p
self.kernel_size = kernel_size
self.random_params = random_params
@staticmethod
def get_params(imgdim):
"""Get kernel size for blur filter in range (3, 11)
Validates that the kernel is larger than the image and an odd integer
Returns:
kernel size to be passed to filter
"""
kernel_size = int(np.random.uniform(3, 11))
if kernel_size > imgdim:
kernel_size = int(np.random.uniform(3, imgdim/2))
kernel_size = int(np.ceil(kernel_size))
if kernel_size % 2 == 0:
kernel_size+=1
return kernel_size
def __call__(self, img):
"""
Args:
img (np.ndarray): Image to be noised.
Returns:
np.ndarray: Randomly noised image.
"""
h = img.shape[0]
if random.random() < self.p:
if self.random_params:
self.kernel_size = self.get_params(h)
return EF.box_blur(img, kernel_size=self.kernel_size)
return img
class RandomGaussianBlur(object):
"""Applying Gaussian blurring filter on the given CV Image randomly with a given probability.
Args:
p (float): probability of the image being noised. Default value is 0.5
kernel_size (int): size of the blur filter to use. Default: 3.
random_params (bool): if enabled, will randomly get a kernel size on each iteration.
"""
def __init__(self, p: float = 0.5, kernel_size: int = 3, random_params: bool = False):
assert isinstance(kernel_size, int) and kernel_size >= 0, 'kernel_size should be a positive integer'
assert isinstance(p, numbers.Number) and p >= 0, 'p should be a positive value'
self.p = p
self.kernel_size = kernel_size
self.random_params = random_params
@staticmethod
def get_params(imgdim):
"""Get kernel size for blur filter in range (3, 11)
Validates that the kernel is larger than the image and an odd integer
Returns:
kernel size to be passed to filter
"""
kernel_size = int(np.random.uniform(3, 11))
if kernel_size > imgdim:
kernel_size = int(np.random.uniform(3, imgdim/2))
kernel_size = int(np.ceil(kernel_size))
if kernel_size % 2 == 0:
kernel_size+=1
return kernel_size
def __call__(self, img):
"""
Args:
img (np.ndarray): Image to be noised.
Returns:
np.ndarray: Randomly noised image.
"""
h = img.shape[0]
if random.random() < self.p:
if self.random_params:
self.kernel_size = self.get_params(h)
return EF.gaussian_blur(img, kernel_size=self.kernel_size)
return img
class BayerDitherNoise(object):
r"""Adds colored bayer dithering noise to the image.
Args:
p (float): probability of the image being noised. Default value is 0.5
"""
def __init__(self, p=0.5):
assert isinstance(p, numbers.Number) and p >= 0, 'p should be a positive value'
self.p = p
def __call__(self, img):
"""
Args:
img (np.ndarray): Image to be noised.
Returns:
np.ndarray: Randomly noised image.
"""
if random.random() < self.p:
return EF.noise_dither_bayer(img)
return img
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class FSDitherNoise(object):
r"""Adds colored Floyd–Steinberg dithering noise to the image.
Args:
p (float): probability of the image being noised. Default value is 0.5
samplingF: controls the amount of dithering
"""
def __init__(self, p=0.5, samplingF = 1):
assert isinstance(p, numbers.Number) and p >= 0, 'p should be a positive value'
self.p = p
self.samplingF = samplingF
def __call__(self, img):
"""
Args:
img (np.ndarray): Image to be noised.
Returns:
np.ndarray: Randomly noised image.
"""
if random.random() < self.p:
return EF.noise_dither_fs(img, self.samplingF)
return img
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class | |
# Copyright 2014 Red Hat, Inc.
#
# Author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pprint
import time
import requests
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
from oslo_utils import importutils
from designate import exceptions
from designate.backend import base
from designate.i18n import _LE
LOG = logging.getLogger(__name__)
IPA_DEFAULT_PORT = 443
class IPABaseError(exceptions.Backend):
error_code = 500
error_type = 'unknown_ipa_error'
class IPAAuthError(IPABaseError):
error_type = 'authentication_error'
# map of designate domain parameters to the corresponding
# ipa parameter
# NOTE: ipa manages serial, and does not honor
# increment_serial=False - this means the designate serial
# and the ipa serial will diverge if updates are made
# using increment_serial=False
domain2ipa = {'ttl': 'dnsttl', 'email': 'idnssoarname',
'serial': 'idnssoaserial', 'expire': 'idnssoaexpire',
'minimum': 'idnssoaminimum', 'refresh': 'idnssoarefresh',
'retry': 'idnssoaretry'}
# map of designate record types to ipa
rectype2iparectype = {'A': ('arecord', '%(data)s'),
'AAAA': ('aaaarecord', '%(data)s'),
'MX': ('mxrecord', '%(data)s'),
'CNAME': ('cnamerecord', '%(data)s'),
'TXT': ('txtrecord', '%(data)s'),
'SRV': ('srvrecord', '%(data)s'),
'NS': ('nsrecord', '%(data)s'),
'PTR': ('ptrrecord', '%(data)s'),
'SPF': ('spfrecord', '%(data)s'),
'SSHFP': ('sshfprecord', '%(data)s')}
IPA_INVALID_DATA = 3009
IPA_NOT_FOUND = 4001
IPA_DUPLICATE = 4002
IPA_NO_CHANGES = 4202
class IPAUnknownError(IPABaseError):
pass
class IPACommunicationFailure(IPABaseError):
error_type = 'communication_failure'
pass
class IPAInvalidData(IPABaseError):
error_type = 'invalid_data'
pass
class IPADomainNotFound(IPABaseError):
error_type = 'domain_not_found'
pass
class IPARecordNotFound(IPABaseError):
error_type = 'record_not_found'
pass
class IPADuplicateDomain(IPABaseError):
error_type = 'duplicate_domain'
pass
class IPADuplicateRecord(IPABaseError):
error_type = 'duplicate_record'
pass
ipaerror2exception = {
IPA_INVALID_DATA: {
'dnszone': IPAInvalidData,
'dnsrecord': IPAInvalidData
},
IPA_NOT_FOUND: {
'dnszone': IPADomainNotFound,
'dnsrecord': IPARecordNotFound
},
IPA_DUPLICATE: {
'dnszone': IPADuplicateDomain,
'dnsrecord': IPADuplicateRecord
},
# NOTE: Designate will send updates with all fields
# even if they have not changed value. If none of
# the given values has changed, IPA will return
# this error code - this can be ignored
IPA_NO_CHANGES: {
'dnszone': None,
'dnsrecord': None
}
}
def abs2rel_name(domain, rsetname):
"""convert rsetname from absolute form foo.bar.tld. to the name
relative to the domain. For IPA, if domain is rsetname, then use
"@" as the relative name. If rsetname does not end with a subset
of the domain, the just return the raw rsetname
"""
if rsetname.endswith(domain):
idx = rsetname.rfind(domain)
if idx == 0:
rsetname = "@"
elif idx > 0:
rsetname = rsetname[:idx].rstrip(".")
return rsetname
class IPABackend(base.Backend):
__plugin_name__ = 'ipa'
@classmethod
def get_cfg_opts(cls):
group = cfg.OptGroup(
name='backend:ipa', title="Configuration for IPA Backend"
)
opts = [
cfg.StrOpt('ipa-host', default='localhost.localdomain',
help='IPA RPC listener host - must be FQDN'),
cfg.IntOpt('ipa-port', default=IPA_DEFAULT_PORT,
help='IPA RPC listener port'),
cfg.StrOpt('ipa-client-keytab', default=None,
help='Kerberos client keytab file'),
cfg.StrOpt('ipa-auth-driver-class',
default='designate.backend.impl_ipa.auth.IPAAuth',
help='Class that implements the authentication '
'driver for IPA'),
cfg.StrOpt('ipa-ca-cert', default=None,
help='CA certificate for use with https to IPA'),
cfg.StrOpt('ipa-base-url', default='/ipa',
help='Base URL for IPA RPC, relative to host[:port]'),
cfg.StrOpt('ipa-json-url',
default='/json',
help='URL for IPA JSON RPC, relative to IPA base URL'),
cfg.IntOpt('ipa-connect-retries', default=1,
help='How many times Designate will attempt to retry '
'the connection to IPA before giving up'),
cfg.BoolOpt('ipa-force-ns-use', default=False,
help='IPA requires that a specified '
'name server or SOA MNAME is resolvable - if this '
'option is set, Designate will force IPA to use a '
'given name server even if it is not resolvable'),
cfg.StrOpt('ipa-version', default='2.65',
help='IPA RPC JSON version')
]
return [(group, opts)]
def start(self):
LOG.debug('IPABackend start')
self.request = requests.Session()
authclassname = cfg.CONF[self.name].ipa_auth_driver_class
authclass = importutils.import_class(authclassname)
self.request.auth = \
authclass(cfg.CONF[self.name].ipa_client_keytab,
cfg.CONF[self.name].ipa_host)
ipa_base_url = cfg.CONF[self.name].ipa_base_url
if ipa_base_url.startswith("http"): # full URL
self.baseurl = ipa_base_url
else: # assume relative to https://host[:port]
self.baseurl = "https://" + cfg.CONF[self.name].ipa_host
ipa_port = cfg.CONF[self.name].ipa_port
if ipa_port != IPA_DEFAULT_PORT:
self.baseurl += ":" + str(ipa_port)
self.baseurl += ipa_base_url
ipa_json_url = cfg.CONF[self.name].ipa_json_url
if ipa_json_url.startswith("http"): # full URL
self.jsonurl = ipa_json_url
else: # assume relative to https://host[:port]
self.jsonurl = self.baseurl + ipa_json_url
xtra_hdrs = {'Content-Type': 'application/json',
'Referer': self.baseurl}
self.request.headers.update(xtra_hdrs)
self.request.verify = cfg.CONF[self.name].ipa_ca_cert
self.ntries = cfg.CONF[self.name].ipa_connect_retries
self.force = cfg.CONF[self.name].ipa_force_ns_use
def create_domain(self, context, domain):
LOG.debug('Create Domain %r' % domain)
ipareq = {'method': 'dnszone_add', 'id': 0}
params = [domain['name']]
servers = self.central_service.get_domain_servers(self.admin_context)
# just use the first one for zone creation - add the others
# later, below - use force because designate assumes the NS
# already exists somewhere, is resolvable, and already has
# an A/AAAA record
args = {'idnssoamname': servers[0]['name']}
if self.force:
args['force'] = True
for dkey, ipakey in list(domain2ipa.items()):
if dkey in domain:
args[ipakey] = domain[dkey]
ipareq['params'] = [params, args]
self._call_and_handle_error(ipareq)
# add NS records for all of the other servers
if len(servers) > 1:
ipareq = {'method': 'dnsrecord_add', 'id': 0}
params = [domain['name'], "@"]
args = {'nsrecord': servers[1:]}
if self.force:
args['force'] = True
ipareq['params'] = [params, args]
self._call_and_handle_error(ipareq)
def update_domain(self, context, domain):
LOG.debug('Update Domain %r' % domain)
ipareq = {'method': 'dnszone_mod', 'id': 0}
params = [domain['name']]
args = {}
for dkey, ipakey in list(domain2ipa.items()):
if dkey in domain:
args[ipakey] = domain[dkey]
ipareq['params'] = [params, args]
self._call_and_handle_error(ipareq)
def delete_domain(self, context, domain):
LOG.debug('Delete Domain %r' % domain)
ipareq = {'method': 'dnszone_del', 'id': 0}
params = [domain['name']]
args = {}
ipareq['params'] = [params, args]
self._call_and_handle_error(ipareq)
def create_recordset(self, context, domain, recordset):
LOG.debug('Discarding create_recordset call, not-applicable')
def update_recordset(self, context, domain, recordset):
LOG.debug('Update RecordSet %r / %r' % (domain, recordset))
# designate allows to update a recordset if there are no
# records in it - we should ignore this case
if not self._recset_has_records(context, recordset):
LOG.debug('No records in %r / %r - skipping' % (domain, recordset))
return
# The only thing IPA allows is to change the ttl, since that is
# stored "per recordset"
if 'ttl' not in recordset:
return
ipareq = {'method': 'dnsrecord_mod', 'id': 0}
dname = domain['name']
rsetname = abs2rel_name(dname, recordset['name'])
params = [domain['name'], rsetname]
args = {'dnsttl': recordset['ttl']}
ipareq['params'] = [params, args]
self._call_and_handle_error(ipareq)
def delete_recordset(self, context, domain, recordset):
LOG.debug('Delete RecordSet %r / %r' % (domain, recordset))
# designate allows to delete a recordset if there are no
# records in it - we should ignore this case
if not self._recset_has_records(context, recordset):
LOG.debug('No records in %r / %r - skipping' % (domain, recordset))
return
ipareq = {'method': 'dnsrecord_mod', 'id': 0}
dname = domain['name']
rsetname = abs2rel_name(dname, recordset['name'])
params = [domain['name'], rsetname]
rsettype = rectype2iparectype[recordset['type']][0]
args = {rsettype: None}
ipareq['params'] = [params, args]
self._call_and_handle_error(ipareq)
def create_record(self, context, domain, recordset, record):
LOG.debug('Create Record %r / %r / %r' % (domain, recordset, record))
ipareq = {'method': 'dnsrecord_add', 'id': 0}
params, args = self._rec_to_ipa_rec(domain, recordset, [record])
ipareq['params'] = [params, args]
self._call_and_handle_error(ipareq)
def update_record(self, context, domain, recordset, record):
LOG.debug('Update Record %r / %r / %r' % (domain, recordset, record))
# for modify operations - IPA does not support a way to change
# a particular field in a given record - e.g. for an MX record
# with several values, IPA stores them like this:
# name: "server1.local."
# data: ["10 mx1.server1.local.", "20 mx2.server1.local."]
# we could do a search of IPA, compare the values in the
# returned array - but that adds an additional round trip
# and is error prone
# instead, we just get all of the current values and send
# them in one big modify
criteria = {'recordset_id': record['recordset_id']}
reclist = self.central_service.find_records(self.admin_context,
criteria)
ipareq = {'method': 'dnsrecord_mod', 'id': 0}
params, args = self._rec_to_ipa_rec(domain, recordset, reclist)
ipareq['params'] = [params, args]
self._call_and_handle_error(ipareq)
def delete_record(self, context, domain, recordset, record):
LOG.debug('Delete Record %r / %r / %r' % (domain, recordset, record))
ipareq = {'method': 'dnsrecord_del', 'id': 0}
params, args = self._rec_to_ipa_rec(domain, recordset, [record])
args['del_all'] = 0
ipareq['params'] = [params, args]
self._call_and_handle_error(ipareq)
def ping(self, context):
LOG.debug('Ping')
# NOTE: This call will cause ipa to issue an error, but
# 1) it should not throw an exception
# 2) the response will indicate ipa is running
# 3) the bandwidth usage is minimal
ipareq = {'method': 'dnszone_show', 'id': 0}
params = ['@']
args = {}
ipareq['params'] = [params, args]
retval = {'result': True}
try:
self._call_and_handle_error(ipareq)
| |
= self._runtime_cxn(basename="both_proxies")
assert cxn.gateway == Connection("winner@everything:777")
def multi_hop_works_ok(self):
cxn = self._runtime_cxn(basename="proxyjump_multi")
innermost = cxn.gateway.gateway.gateway
middle = cxn.gateway.gateway
outermost = cxn.gateway
assert innermost == Connection("jumpuser3@jumphost3:411")
assert middle == Connection("jumpuser2@jumphost2:872")
assert outermost == Connection("jumpuser@jumphost:373")
def wildcards_do_not_trigger_recursion(self):
# When #1850 is present, this will RecursionError.
conf = self._runtime_config(basename="proxyjump_recursive")
cxn = Connection("runtime.tld", config=conf)
assert cxn.gateway == Connection("bastion.tld")
assert cxn.gateway.gateway is None
def multihop_plus_wildcards_still_no_recursion(self):
conf = self._runtime_config(
basename="proxyjump_multi_recursive"
)
cxn = Connection("runtime.tld", config=conf)
outer = cxn.gateway
inner = cxn.gateway.gateway
assert outer == Connection("bastion1.tld")
assert inner == Connection("bastion2.tld")
assert inner.gateway is None
def gateway_Connections_get_parent_connection_configs(self):
conf = self._runtime_config(
basename="proxyjump",
overrides={"some_random_option": "a-value"},
)
cxn = Connection("runtime", config=conf)
# Sanity
assert cxn.config is conf
assert cxn.gateway == self._expected_gw
# Real check
assert cxn.gateway.config.some_random_option == "a-value"
# Prove copy not reference
# TODO: would we ever WANT a reference? can't imagine...
assert cxn.gateway.config is not conf
class connect_timeout:
def wins_over_default(self):
assert self._runtime_cxn().connect_timeout == 15
def wins_over_configuration(self):
cxn = self._runtime_cxn(
overrides={"timeouts": {"connect": 17}}
)
assert cxn.connect_timeout == 15
def loses_to_explicit(self):
config = self._runtime_config()
cxn = Connection(
"runtime", config=config, connect_timeout=23
)
assert cxn.connect_timeout == 23
class identity_file:
# NOTE: ssh_config value gets merged w/ (instead of overridden
# by) config and kwarg values; that is tested in the tests for
# open().
def basic_loading_of_value(self):
# By default, key_filename will be empty, and the data from
# the runtime ssh config will be all that appears.
value = self._runtime_cxn().connect_kwargs["key_filename"]
assert value == ["whatever.key", "some-other.key"]
class connect_kwargs:
def defaults_to_empty_dict(self):
assert Connection("host").connect_kwargs == {}
def may_be_given_explicitly(self):
cxn = Connection("host", connect_kwargs={"foo": "bar"})
assert cxn.connect_kwargs == {"foo": "bar"}
def may_be_configured(self):
c = Config(overrides={"connect_kwargs": {"origin": "config"}})
cxn = Connection("host", config=c)
assert cxn.connect_kwargs == {"origin": "config"}
def kwarg_wins_over_config(self):
# TODO: should this be more of a merge-down?
c = Config(overrides={"connect_kwargs": {"origin": "config"}})
cxn = Connection(
"host", connect_kwargs={"origin": "kwarg"}, config=c
)
assert cxn.connect_kwargs == {"origin": "kwarg"}
class inline_ssh_env:
def defaults_to_config_value(self):
assert Connection("host").inline_ssh_env is False
config = Config({"inline_ssh_env": True})
assert Connection("host", config=config).inline_ssh_env is True
def may_be_given(self):
assert Connection("host").inline_ssh_env is False
cxn = Connection("host", inline_ssh_env=True)
assert cxn.inline_ssh_env is True
class from_v1:
def setup(self):
self.env = faux_v1_env()
def _cxn(self, **kwargs):
self.env.update(kwargs)
return Connection.from_v1(self.env)
def must_be_given_explicit_env_arg(self):
cxn = Connection.from_v1(self.env)
assert cxn.host == "localghost"
class obtaining_config:
@patch("fabric.connection.Config.from_v1")
def defaults_to_calling_Config_from_v1(self, Config_from_v1):
Connection.from_v1(self.env)
Config_from_v1.assert_called_once_with(self.env)
@patch("fabric.connection.Config.from_v1")
def may_be_given_config_explicitly(self, Config_from_v1):
# Arguably a dupe of regular Connection constructor behavior,
# but whatever.
Connection.from_v1(env=self.env, config=Config())
assert not Config_from_v1.called
class additional_kwargs:
# I.e. as opposed to what happens to the 'env' kwarg...
def forwards_arbitrary_kwargs_to_init(self):
cxn = Connection.from_v1(
self.env,
connect_kwargs={"foo": "bar"},
inline_ssh_env=True,
connect_timeout=15,
)
assert cxn.connect_kwargs["foo"] == "bar"
assert cxn.inline_ssh_env is True
assert cxn.connect_timeout == 15
def conflicting_kwargs_win_over_v1_env_values(self):
env = Lexicon(self.env)
cxn = Connection.from_v1(
env, host="not-localghost", port=2222, user="remoteuser"
)
assert cxn.host == "not-localghost"
assert cxn.user == "remoteuser"
assert cxn.port == 2222
class var_mappings:
def host_string(self):
cxn = self._cxn() # default is 'localghost'
assert cxn.host == "localghost"
@raises(InvalidV1Env)
def None_host_string_errors_usefully(self):
self._cxn(host_string=None)
def user(self):
cxn = self._cxn(user="space")
assert cxn.user == "space"
class port:
def basic(self):
cxn = self._cxn(port=2222)
assert cxn.port == 2222
def casted_to_int(self):
cxn = self._cxn(port="2222")
assert cxn.port == 2222
def not_supplied_if_given_in_host_string(self):
cxn = self._cxn(host_string="localghost:3737", port=2222)
assert cxn.port == 3737
class string_representation:
"string representations"
def str_displays_repr(self):
c = Connection("meh")
assert str(c) == "<Connection host=meh>"
def displays_core_params(self):
c = Connection(user="me", host="there", port=123)
template = "<Connection host=there user=me port=123>"
assert repr(c) == template
def omits_default_param_values(self):
c = Connection("justhost")
assert repr(c) == "<Connection host=justhost>"
def param_comparison_uses_config(self):
conf = Config(overrides={"user": "zerocool"})
c = Connection(
user="zerocool", host="myhost", port=123, config=conf
)
template = "<Connection host=myhost port=123>"
assert repr(c) == template
def proxyjump_gateway_shows_type(self):
c = Connection(host="myhost", gateway=Connection("jump"))
template = "<Connection host=myhost gw=proxyjump>"
assert repr(c) == template
def proxycommand_gateway_shows_type(self):
c = Connection(host="myhost", gateway="netcat is cool")
template = "<Connection host=myhost gw=proxycommand>"
assert repr(c) == template
class comparison_and_hashing:
def comparison_uses_host_user_and_port(self):
# Just host
assert Connection("host") == Connection("host")
# Host + user
c1 = Connection("host", user="foo")
c2 = Connection("host", user="foo")
assert c1 == c2
# Host + user + port
c1 = Connection("host", user="foo", port=123)
c2 = Connection("host", user="foo", port=123)
assert c1 == c2
def comparison_to_non_Connections_is_False(self):
assert Connection("host") != 15
def hashing_works(self):
assert hash(Connection("host")) == hash(Connection("host"))
def sorting_works(self):
# Hostname...
assert Connection("a-host") < Connection("b-host")
# User...
assert Connection("a-host", user="a-user") < Connection(
"a-host", user="b-user"
)
# then port...
assert Connection("a-host", port=1) < Connection("a-host", port=2)
class open:
def has_no_required_args_and_returns_None(self, client):
assert Connection("host").open() is None
def calls_SSHClient_connect(self, client):
"calls paramiko.SSHClient.connect() with correct args"
Connection("host").open()
client.connect.assert_called_with(
username=get_local_user(), hostname="host", port=22
)
def passes_through_connect_kwargs(self, client):
Connection("host", connect_kwargs={"foobar": "bizbaz"}).open()
client.connect.assert_called_with(
username=get_local_user(),
hostname="host",
port=22,
foobar="bizbaz",
)
def refuses_to_overwrite_connect_kwargs_with_others(self, client):
for key, value, kwargs in (
# Core connection args should definitely not get overwritten!
# NOTE: recall that these keys are the SSHClient.connect()
# kwarg names, NOT our own config/kwarg names!
("hostname", "nothost", {}),
("port", 17, {}),
("username", "zerocool", {}),
# These might arguably still be allowed to work, but let's head
# off confusion anyways.
("timeout", 100, {"connect_timeout": 25}),
):
try:
Connection(
"host", connect_kwargs={key: value}, **kwargs
).open()
except ValueError as e:
err = "Refusing to be ambiguous: connect() kwarg '{}' was given both via regular arg and via connect_kwargs!" # noqa
assert str(e) == err.format(key)
else:
assert False, "Did not raise ValueError!"
def connect_kwargs_protection_not_tripped_by_defaults(self, client):
Connection("host", connect_kwargs={"timeout": 300}).open()
client.connect.assert_called_with(
username=get_local_user(),
hostname="host",
port=22,
timeout=300,
)
def submits_connect_timeout(self, client):
Connection("host", connect_timeout=27).open()
client.connect.assert_called_with(
username=get_local_user(), hostname="host", port=22, timeout=27
)
def is_connected_True_when_successful(self, client):
c = Connection("host")
c.open()
assert c.is_connected is True
def short_circuits_if_already_connected(self, client):
cxn = Connection("host")
# First call will set self.transport to fixture's mock
cxn.open()
# Second call will check .is_connected which will see active==True,
# and short circuit
cxn.open()
assert client.connect.call_count == 1
def is_connected_still_False_when_connect_fails(self, client):
client.connect.side_effect = socket.error
cxn = Connection("host")
try:
cxn.open()
except socket.error:
pass
assert cxn.is_connected is False
def uses_configured_user_host_and_port(self, client):
Connection(user="myuser", host="myhost", port=9001).open()
client.connect.assert_called_once_with(
username="myuser", hostname="myhost", port=9001
)
# NOTE: does more involved stuff so can't use "client" fixture
@patch("fabric.connection.SSHClient")
def uses_gateway_channel_as_sock_for_SSHClient_connect(self, Client):
"uses Connection gateway as 'sock' arg to SSHClient.connect"
# Setup
mock_gw = Mock()
mock_main = Mock()
Client.side_effect = [mock_gw, mock_main]
gw = Connection("otherhost")
gw.open = Mock(wraps=gw.open)
main = Connection("host", gateway=gw)
main.open()
# Expect gateway is also open()'d
gw.open.assert_called_once_with()
# Expect direct-tcpip channel open on 1st client
open_channel = mock_gw.get_transport.return_value.open_channel
kwargs = open_channel.call_args[1]
assert kwargs["kind"] == "direct-tcpip"
assert kwargs["dest_addr"], "host" == 22
# Expect result of that channel open as sock arg to connect()
sock_arg = mock_main.connect.call_args[1]["sock"]
assert sock_arg is open_channel.return_value
@patch("fabric.connection.ProxyCommand")
def uses_proxycommand_as_sock_for_Client_connect(self, moxy, client):
"uses ProxyCommand from gateway as 'sock' arg to SSHClient.connect"
# Setup
main = Connection("host", gateway="net catty %h %p")
main.open()
# Expect ProxyCommand instantiation
moxy.assert_called_once_with("net catty host 22")
# Expect result of that as sock arg to connect()
sock_arg = client.connect.call_args[1]["sock"]
assert sock_arg is moxy.return_value
# TODO: all the various connect-time options such as agent forwarding,
# host acceptance policies, how to auth, etc etc. These are all aspects
# of a given session and not necessarily the same for entire lifetime
# of a Connection object, should it ever disconnect/reconnect.
# TODO: though some/all of those things might want to be set to
# defaults at initialization time...
class connect_kwargs_key_filename:
"connect_kwargs(key_filename=...)"
# TODO: it'd be nice to truly separate CLI from regular (non override
# level) invoke config; as it is, invoke config comes first in expected
# outputs since otherwise there's no way for --identity to "come
# first".
@pytest.mark.parametrize(
"ssh, invoke, kwarg, expected",
[
param(
True,
True,
True,
[
"configured.key",
"kwarg.key",
"ssh-config-B.key",
"ssh-config-A.key",
],
id="All sources",
),
param(False, False, False, [], id="No sources"),
param(
True,
False,
False,
["ssh-config-B.key", "ssh-config-A.key"],
id="ssh_config only",
),
param(
False,
True,
False,
["configured.key"],
id="Invoke-level config only",
),
param(
False,
False,
True,
["kwarg.key"],
id="Connection kwarg only",
),
param(
True,
True,
False,
["configured.key", "ssh-config-B.key", "ssh-config-A.key"],
id="ssh_config + invoke config, no kwarg",
),
param(
True,
False,
True,
["kwarg.key", "ssh-config-B.key", "ssh-config-A.key"],
id="ssh_config + kwarg, no Invoke-level config",
),
param(
False,
True,
True,
["configured.key", "kwarg.key"],
id="Invoke-level config + kwarg, no ssh_config",
),
],
)
def merges_sources(self, client, ssh, invoke, kwarg, expected):
config_kwargs = {}
if ssh:
# SSH config with | |
1 , 4 , 0 , 192 , (3, 0, None, None) , 0 , )),
(( 'Copy' , 'Item' , ), 61490, (61490, (), [ (16393, 10, None, None) , ], 1 , 1 , 4 , 0 , 196 , (3, 0, None, None) , 0 , )),
(( 'Delete' , ), 61514, (61514, (), [ ], 1 , 1 , 4 , 0 , 200 , (3, 0, None, None) , 0 , )),
(( 'Display' , 'Modal' , ), 61606, (61606, (), [ (12, 17, None, None) , ], 1 , 1 , 4 , 1 , 204 , (3, 0, None, None) , 0 , )),
(( 'Move' , 'DestFldr' , 'Item' , ), 61492, (61492, (), [ (9, 1, None, "IID('{00063006-0000-0000-C000-000000000046}')") ,
(16393, 10, None, None) , ], 1 , 1 , 4 , 0 , 208 , (3, 0, None, None) , 0 , )),
(( 'PrintOut' , ), 61491, (61491, (), [ ], 1 , 1 , 4 , 0 , 212 , (3, 0, None, None) , 0 , )),
(( 'Save' , ), 61512, (61512, (), [ ], 1 , 1 , 4 , 0 , 216 , (3, 0, None, None) , 0 , )),
(( 'SaveAs' , 'Path' , 'Type' , ), 61521, (61521, (), [ (8, 1, None, None) ,
(12, 17, None, None) , ], 1 , 1 , 4 , 1 , 220 , (3, 0, None, None) , 0 , )),
(( 'AutoForwarded' , 'AutoForwarded' , ), 5, (5, (), [ (16395, 10, None, None) , ], 1 , 2 , 4 , 0 , 224 , (3, 0, None, None) , 0 , )),
(( 'AutoForwarded' , 'AutoForwarded' , ), 5, (5, (), [ (11, 1, None, None) , ], 1 , 4 , 4 , 0 , 228 , (3, 0, None, None) , 0 , )),
(( 'DeferredDeliveryTime' , 'DeferredDeliveryTime' , ), 15, (15, (), [ (16391, 10, None, None) , ], 1 , 2 , 4 , 0 , 232 , (3, 0, None, None) , 0 , )),
(( 'DeferredDeliveryTime' , 'DeferredDeliveryTime' , ), 15, (15, (), [ (7, 1, None, None) , ], 1 , 4 , 4 , 0 , 236 , (3, 0, None, None) , 0 , )),
(( 'DeleteAfterSubmit' , 'DeleteAfterSubmit' , ), 3585, (3585, (), [ (16395, 10, None, None) , ], 1 , 2 , 4 , 0 , 240 , (3, 0, None, None) , 0 , )),
(( 'DeleteAfterSubmit' , 'DeleteAfterSubmit' , ), 3585, (3585, (), [ (11, 1, None, None) , ], 1 , 4 , 4 , 0 , 244 , (3, 0, None, None) , 0 , )),
(( 'ExpiryTime' , 'ExpiryTime' , ), 21, (21, (), [ (16391, 10, None, None) , ], 1 , 2 , 4 , 0 , 248 , (3, 0, None, None) , 0 , )),
(( 'ExpiryTime' , 'ExpiryTime' , ), 21, (21, (), [ (7, 1, None, None) , ], 1 , 4 , 4 , 0 , 252 , (3, 0, None, None) , 0 , )),
(( 'FlagDueBy' , 'FlagDueBy' , ), 48, (48, (), [ (16391, 10, None, None) , ], 1 , 2 , 4 , 0 , 256 , (3, 0, None, None) , 0 , )),
(( 'FlagDueBy' , 'FlagDueBy' , ), 48, (48, (), [ (7, 1, None, None) , ], 1 , 4 , 4 , 0 , 260 , (3, 0, None, None) , 0 , )),
(( 'FlagRequest' , 'FlagRequest' , ), 34096, (34096, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 264 , (3, 0, None, None) , 0 , )),
(( 'FlagRequest' , 'FlagRequest' , ), 34096, (34096, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 268 , (3, 0, None, None) , 0 , )),
(( 'FlagStatus' , 'FlagStatus' , ), 4240, (4240, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 272 , (3, 0, None, None) , 0 , )),
(( 'FlagStatus' , 'FlagStatus' , ), 4240, (4240, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 276 , (3, 0, None, None) , 0 , )),
(( 'OriginatorDeliveryReportRequested' , 'OriginatorDeliveryReportRequested' , ), 35, (35, (), [ (16395, 10, None, None) , ], 1 , 2 , 4 , 0 , 280 , (3, 0, None, None) , 0 , )),
(( 'OriginatorDeliveryReportRequested' , 'OriginatorDeliveryReportRequested' , ), 35, (35, (), [ (11, 1, None, None) , ], 1 , 4 , 4 , 0 , 284 , (3, 0, None, None) , 0 , )),
(( 'ReceivedTime' , 'ReceivedTime' , ), 3590, (3590, (), [ (16391, 10, None, None) , ], 1 , 2 , 4 , 0 , 288 , (3, 0, None, None) , 0 , )),
(( 'ReceivedTime' , 'ReceivedTime' , ), 3590, (3590, (), [ (7, 1, None, None) , ], 1 , 4 , 4 , 0 , 292 , (3, 0, None, None) , 0 , )),
(( 'Recipients' , 'Recipients' , ), 63508, (63508, (), [ (16393, 10, None, "IID('{0006303B-0000-0000-C000-000000000046}')") , ], 1 , 2 , 4 , 0 , 296 , (3, 0, None, None) , 0 , )),
(( 'ReminderSet' , 'ReminderSet' , ), 34051, (34051, (), [ (16395, 10, None, None) , ], 1 , 2 , 4 , 0 , 300 , (3, 0, None, None) , 0 , )),
(( 'ReminderSet' , 'ReminderSet' , ), 34051, (34051, (), [ (11, 1, None, None) , ], 1 , 4 , 4 , 0 , 304 , (3, 0, None, None) , 0 , )),
(( 'ReminderTime' , 'ReminderTime' , ), 34050, (34050, (), [ (16391, 10, None, None) , ], 1 , 2 , 4 , 0 , 308 , (3, 0, None, None) , 0 , )),
(( 'ReminderTime' , 'ReminderTime' , ), 34050, (34050, (), [ (7, 1, None, None) , ], 1 , 4 , 4 , 0 , 312 , (3, 0, None, None) , 0 , )),
(( 'ReplyRecipients' , 'ReplyRecipients' , ), 61459, (61459, (), [ (16393, 10, None, "IID('{0006303B-0000-0000-C000-000000000046}')") , ], 1 , 2 , 4 , 0 , 316 , (3, 0, None, None) , 0 , )),
(( 'SaveSentMessageFolder' , 'SaveSentMessageFolder' , ), 62465, (62465, (), [ (16393, 10, None, "IID('{00063006-0000-0000-C000-000000000046}')") , ], 1 , 2 , 4 , 0 , 320 , (3, 0, None, None) , 0 , )),
(( 'SaveSentMessageFolder' , 'SaveSentMessageFolder' , ), 62465, (62465, (), [ (9, 1, None, "IID('{00063006-0000-0000-C000-000000000046}')") , ], 1 , 8 , 4 , 0 , 324 , (3, 0, None, None) , 0 , )),
(( 'SenderName' , 'SenderName' , ), 3098, (3098, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 328 , (3, 0, None, None) , 0 , )),
(( 'Sent' , 'Sent' , ), 62466, (62466, (), [ (16395, 10, None, None) , ], 1 , 2 , 4 , 0 , 332 , (3, 0, None, None) , 0 , )),
(( 'SentOn' , 'SentOn' , ), 57, (57, (), [ (16391, 10, None, None) , ], 1 , 2 , 4 , 0 , 336 , (3, 0, None, None) , 0 , )),
(( 'Submitted' , 'Submitted' , ), 62467, (62467, (), [ (16395, 10, None, None) , ], 1 , 2 , 4 , 0 , 340 , (3, 0, None, None) , 0 , )),
(( 'Forward' , 'Item' , ), 63507, (63507, (), [ (16397, 10, None, "IID('{00061036-0000-0000-C000-000000000046}')") , ], 1 , 1 , 4 , 0 , 344 , (3, 0, None, None) , 0 , )),
(( 'GetAssociatedAppointment' , 'AddToCalendar' , 'Item' , ), 63328, (63328, (), [ (11, 1, None, None) ,
(16397, | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# __
# Author : <NAME> and team
# Tool : Source
# Usage : ./Source.py example.com (or) python Source.py example.com
# Description: This scanner automates the process of security scanning by using a
# multitude of available linux security tools and some custom scripts.
#
# Importing the libraries
import sys
import socket
import subprocess
import os
import time
import signal
import random
import string
import threading
import re
from urllib.parse import urlsplit
# Scan Time Elapser
intervals = (
('h', 3600),
('m', 60),
('s', 1),
)
def display_time(seconds, granularity=3):
result = []
seconds = seconds + 1
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
result.append("{}{}".format(value, name))
return ' '.join(result[:granularity])
def url_maker(url):
if not re.match(r'http(s?)\:', url):
url = 'http://' + url
parsed = urlsplit(url)
host = parsed.netloc
if host.startswith('www.'):
host = host[4:]
return host
def check_internet():
os.system('ping -c1 github.com > rs_net 2>&1')
if "0% packet loss" in open('rs_net').read():
val = 1
else:
val = 0
os.system('rm rs_net > /dev/null 2>&1')
return val
# Initializing the color module class
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
BADFAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
BG_ERR_TXT = '\033[41m' # For critical errors and crashes
BG_HEAD_TXT = '\033[100m'
BG_ENDL_TXT = '\033[46m'
BG_CRIT_TXT = '\033[45m'
BG_HIGH_TXT = '\033[41m'
BG_MED_TXT = '\033[43m'
BG_LOW_TXT = '\033[44m'
BG_INFO_TXT = '\033[42m'
# Classifies the Vulnerability's Severity
def vul_info(val):
result =''
if val == 'c':
result = bcolors.BG_CRIT_TXT+" critical "+bcolors.ENDC
elif val == 'h':
result = bcolors.BG_HIGH_TXT+" high "+bcolors.ENDC
elif val == 'm':
result = bcolors.BG_MED_TXT+" medium "+bcolors.ENDC
elif val == 'l':
result = bcolors.BG_LOW_TXT+" low "+bcolors.ENDC
else:
result = bcolors.BG_INFO_TXT+" info "+bcolors.ENDC
return result
# Legends
proc_high = bcolors.BADFAIL + "●" + bcolors.ENDC
proc_med = bcolors.WARNING + "●" + bcolors.ENDC
proc_low = bcolors.OKGREEN + "●" + bcolors.ENDC
# Links the vulnerability with threat level and remediation database
def vul_remed_info(v1,v2,v3):
print(bcolors.BOLD+"Vulnerability Threat Level"+bcolors.ENDC)
print("\t"+vul_info(v2)+" "+bcolors.WARNING+str(tool_resp[v1][0])+bcolors.ENDC)
print(bcolors.BOLD+"Vulnerability Definition"+bcolors.ENDC)
print("\t"+bcolors.BADFAIL+str(tools_fix[v3-1][1])+bcolors.ENDC)
print(bcolors.BOLD+"Vulnerability Remediation"+bcolors.ENDC)
print("\t"+bcolors.OKGREEN+str(tools_fix[v3-1][2])+bcolors.ENDC)
# Source Help Context
def helper():
print(bcolors.OKBLUE+"Information:"+bcolors.ENDC)
print("------------")
print("\t./Source.py example.com: Scans the domain example.com")
print( "\t./Source.py --update : Updates the scanner to the latest version.")
print( "\t./Source.py --help : Displays this help context.")
print( bcolors.OKBLUE+"Interactive:"+bcolors.ENDC)
print( "------------")
print( "\tCtrl+C: Skips current test.")
print( "\tCtrl+Z: Quits Source.")
print( bcolors.OKBLUE+"Legends:"+bcolors.ENDC)
print( "--------")
print( "\t["+proc_high+"]: Scan process may take longer times (not predictable).")
print( "\t["+proc_med+"]: Scan process may take less than 10 minutes.")
print( "\t["+proc_low+"]: Scan process may take less than a minute or two.")
print( bcolors.OKBLUE+"Vulnerability Information:"+bcolors.ENDC)
print( "--------------------------")
print( "\t"+vul_info('c')+": Requires immediate attention as it may lead to compromise or service unavailability.")
print( "\t"+vul_info('h')+" : May not lead to an immediate compromise, but there are high chances of probability.")
print( "\t"+vul_info('m')+" : Attacker may correlate multiple vulnerabilities of this type to launch a sophisticated attack.")
print( "\t"+vul_info('l')+" : Not a serious issue, but it is recommended to attend the finding.")
print( "\t"+vul_info('i')+" : Not classified as a vulnerability, simply an useful informational alert to be considered.\n")
# Clears Line
def clear():
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
# Source Logo
def logo():
print(bcolors.WARNING)
print("VULNEROUS RAPID WEB APP ANALYSER")
print (bcolors.ENDC)
# Initiliazing the idle loader/spinner class
class Spinner:
busy = False
delay = 0.05
@staticmethod
def spinning_cursor():
while 1:
for cursor in '|/\\': yield cursor #←↑↓→
#for cursor in '←↑↓→': yield cursor
def __init__(self, delay=None):
self.spinner_generator = self.spinning_cursor()
if delay and float(delay): self.delay = delay
def spinner_task(self):
try:
while self.busy:
#sys.stdout.write(next(self.spinner_generator))
print( bcolors.BG_ERR_TXT+next(self.spinner_generator)+bcolors.ENDC),
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\b')
sys.stdout.flush()
except (KeyboardInterrupt, SystemExit):
#clear()
print( "\n\t"+ bcolors.BG_ERR_TXT+"Source received a series of Ctrl+C hits. Quitting..." +bcolors.ENDC)
sys.exit(1)
def start(self):
self.busy = True
threading.Thread(target=self.spinner_task).start()
def stop(self):
try:
self.busy = False
time.sleep(self.delay)
except (KeyboardInterrupt, SystemExit):
#clear()
print( "\n\t"+ bcolors.BG_ERR_TXT+"Source received a series of Ctrl+C hits. Quitting..." +bcolors.ENDC)
sys.exit(1)
# End ofloader/spinner class
# Instantiating the spinner/loader class
spinner = Spinner()
# Scanners that will be used and filename rotation (default: enabled (1))
tool_names = [
["host","Host - Checks for existence of IPV6 address.","host",1],
["aspnet_config_err","ASP.Net Misconfiguration - Checks for ASP.Net Misconfiguration.","wget",1],
["wp_check","WordPress Checker - Checks for WordPress Installation.","wget",1],
["drp_check", "Drupal Checker - Checks for Drupal Installation.","wget",1],
["joom_check", "Joomla Checker - Checks for Joomla Installation.","wget",1],
["uniscan","Uniscan - Checks for robots.txt & sitemap.xml","uniscan",1],
["wafw00f","Wafw00f - Checks for Application Firewalls.","wafw00f",1],
["nmap","Nmap - Fast Scan [Only Few Port Checks]","nmap",1],
["theharvester","The Harvester - Scans for emails using Google's passive search.","theharvester",1],
["dnsrecon","DNSRecon - Attempts Multiple Zone Transfers on Nameservers.","dnsrecon",1],
["fierce","Fierce - Attempts Zone Transfer [No Brute Forcing]","fierce",1],
["dnswalk","DNSWalk - Attempts Zone Transfer.","dnswalk",1],
["whois","WHOis - Checks for Administrator's Contact Information.","whois",1],
["nmap_header","Nmap [XSS Filter Check] - Checks if XSS Protection Header is present.","nmap",1],
["nmap_sloris","Nmap [Slowloris DoS] - Checks for Slowloris Denial of Service Vulnerability.","nmap",1],
["sslyze_hbleed","SSLyze - Checks only for Heartbleed Vulnerability.","sslyze",1],
["nmap_hbleed","Nmap [Heartbleed] - Checks only for Heartbleed Vulnerability.","nmap",1],
["nmap_poodle","Nmap [POODLE] - Checks only for Poodle Vulnerability.","nmap",1],
["nmap_ccs","Nmap [OpenSSL CCS Injection] - Checks only for CCS Injection.","nmap",1],
["nmap_freak","Nmap [FREAK] - Checks only for FREAK Vulnerability.","nmap",1],
["nmap_logjam","Nmap [LOGJAM] - Checks for LOGJAM Vulnerability.","nmap",1],
["sslyze_ocsp","SSLyze - Checks for OCSP Stapling.","sslyze",1],
["sslyze_zlib","SSLyze - Checks for ZLib Deflate Compression.","sslyze",1],
["sslyze_reneg","SSLyze - Checks for Secure Renegotiation Support and Client Renegotiation.","sslyze",1],
["sslyze_resum","SSLyze - Checks for Session Resumption Support with [Session IDs/TLS Tickets].","sslyze",1],
["lbd","LBD - Checks for DNS/HTTP Load Balancers.","lbd",1],
["golismero_dns_malware","Golismero - Checks if the domain is spoofed or hijacked.","golismero",1],
["golismero_heartbleed","Golismero - Checks only for Heartbleed Vulnerability.","golismero",1],
["golismero_brute_url_predictables","Golismero - BruteForces for certain files on the Domain.","golismero",1],
["golismero_brute_directories","Golismero - BruteForces for certain directories on the Domain.","golismero",1],
["golismero_sqlmap","Golismero - SQLMap [Retrieves only the DB Banner]","golismero",1],
["dirb","DirB - Brutes the target for Open Directories.","dirb",1],
["xsser","XSSer - Checks for Cross-Site Scripting [XSS] Attacks.","xsser",1],
["golismero_ssl_scan","Golismero SSL Scans - Performs SSL related Scans.","golismero",1],
["golismero_zone_transfer","Golismero Zone Transfer - Attempts Zone Transfer.","golismero",1],
["golismero_nikto","Golismero Nikto Scans - Uses Nikto Plugin to detect vulnerabilities.","golismero",1],
["golismero_brute_subdomains","Golismero Subdomains Bruter - Brute Forces Subdomain Discovery.","golismero",1],
["dnsenum_zone_transfer","DNSEnum - Attempts Zone Transfer.","dnsenum",1],
["fierce_brute_subdomains","Fierce Subdomains Bruter - Brute Forces Subdomain Discovery.","fierce",1],
["dmitry_email","DMitry - Passively Harvests Emails from the Domain.","dmitry",1],
["dmitry_subdomains","DMitry - Passively Harvests Subdomains from the Domain.","dmitry",1],
["nmap_telnet","Nmap [TELNET] - Checks if TELNET service is running.","nmap",1],
["nmap_ftp","Nmap [FTP] - Checks if FTP service is running.","nmap",1],
["nmap_stuxnet","Nmap [STUXNET] - Checks if the host is affected by STUXNET Worm.","nmap",1],
["webdav","WebDAV - Checks if WEBDAV enabled on Home directory.","davtest",1],
["golismero_finger","Golismero - Does a fingerprint on the Domain.","golismero",1],
["uniscan_filebrute","Uniscan - Brutes for Filenames on the Domain.","uniscan",1],
["uniscan_dirbrute", "Uniscan - Brutes Directories on the Domain.","uniscan",1],
["uniscan_ministresser", "Uniscan - Stress Tests the Domain.","uniscan",1],
["uniscan_rfi","Uniscan - Checks for LFI, RFI and RCE.","uniscan",1],#50
["uniscan_xss","Uniscan - Checks for XSS, SQLi, BSQLi & Other Checks.","uniscan",1],
["nikto_xss","Nikto - Checks for Apache Expect XSS Header.","nikto",1],
["nikto_subrute","Nikto - Brutes Subdomains.","nikto",1],
["nikto_shellshock","Nikto - Checks for Shellshock Bug.","nikto",1],
["nikto_internalip","Nikto - Checks for Internal IP Leak.","nikto",1],
["nikto_putdel","Nikto - Checks for HTTP PUT DEL.","nikto",1],
["nikto_headers","Nikto - Checks the Domain Headers.","nikto",1],
["nikto_ms01070","Nikto - Checks for MS10-070 Vulnerability.","nikto",1],
["nikto_servermsgs","Nikto - Checks for Server Issues.","nikto",1],
["nikto_outdated","Nikto - Checks if Server is Outdated.","nikto",1],
["nikto_httpoptions","Nikto - Checks for HTTP Options on the Domain.","nikto",1],
["nikto_cgi","Nikto - Enumerates CGI Directories.","nikto",1],
["nikto_ssl","Nikto - Performs SSL Checks.","nikto",1],
["nikto_sitefiles","Nikto - Checks for any interesting files on the Domain.","nikto",1],
["nikto_paths","Nikto - Checks for Injectable Paths.","nikto",1],
["dnsmap_brute","DNSMap - Brutes Subdomains.","dnsmap",1],
["nmap_sqlserver","Nmap - Checks for MS-SQL Server DB","nmap",1],
["nmap_mysql", "Nmap - Checks for MySQL DB","nmap",1],
["nmap_oracle", "Nmap - Checks for ORACLE DB","nmap",1],
["nmap_rdp_udp","Nmap - Checks for Remote Desktop Service over UDP","nmap",1],
["nmap_rdp_tcp","Nmap - Checks for Remote Desktop Service over TCP","nmap",1],
["nmap_full_ps_tcp","Nmap - Performs a Full TCP Port Scan","nmap",1],
["nmap_full_ps_udp","Nmap - Performs a Full UDP Port Scan","nmap",1],
["nmap_snmp","Nmap - Checks for SNMP Service","nmap",1],
["aspnet_elmah_axd","Checks for ASP.net Elmah Logger","wget",1],
["nmap_tcp_smb","Checks for SMB Service over TCP","nmap",1],
["nmap_udp_smb","Checks for SMB Service over UDP","nmap",1],
["wapiti","Wapiti - Checks for SQLi, RCE, XSS and Other Vulnerabilities","wapiti",1],
["nmap_iis","Nmap - Checks for IIS WebDAV","nmap",1],
["whatweb","WhatWeb - Checks for X-XSS Protection Header","whatweb",1]
]
# Command that is used to initiate the tool (with parameters and extra params)
tool_cmd = [
["host ",""],
["wget -O temp_aspnet_config_err --tries=1 ","/%7C~.aspx"],
["wget -O temp_wp_check --tries=1 ","/wp-admin"],
["wget -O temp_drp_check --tries=1 ","/user"],
["wget -O temp_joom_check --tries=1 ","/administrator"],
["uniscan -e -u ",""],
["wafw00f ",""],
["nmap -F --open -Pn ",""],
["theharvester -l 50 -b google -d ",""],
["dnsrecon -d | |
key_certs):
"""
Validates we're properly signed by the signing certificates.
.. versionadded:: 1.6.0
:param list key_certs: :class:`~stem.descriptor.networkstatus.KeyCertificates`
to validate the consensus against
:raises: **ValueError** if an insufficient number of valid signatures are present.
"""
# sha1 hash of the body and header
local_digest = self._digest_for_content(b'network-status-version', b'directory-signature ')
valid_digests, total_digests = 0, 0
required_digests = len(self.signatures) / 2.0
signing_keys = dict([(cert.fingerprint, cert.signing_key) for cert in key_certs])
for sig in self.signatures:
if sig.identity not in signing_keys:
continue
signed_digest = self._digest_for_signature(signing_keys[sig.identity], sig.signature)
total_digests += 1
if signed_digest == local_digest:
valid_digests += 1
if valid_digests < required_digests:
raise ValueError('Network Status Document has %i valid signatures out of %i total, needed %i' % (valid_digests, total_digests, required_digests))
def get_unrecognized_lines(self):
if self._lazy_loading:
self._parse(self._header_entries, False, parser_for_line = self.HEADER_PARSER_FOR_LINE)
self._parse(self._footer_entries, False, parser_for_line = self.FOOTER_PARSER_FOR_LINE)
self._lazy_loading = False
return super(NetworkStatusDocumentV3, self).get_unrecognized_lines()
def meets_consensus_method(self, method):
"""
Checks if we meet the given consensus-method. This works for both votes and
consensuses, checking our 'consensus-method' and 'consensus-methods'
entries.
:param int method: consensus-method to check for
:returns: **True** if we meet the given consensus-method, and **False** otherwise
"""
if self.consensus_method is not None:
return self.consensus_method >= method
elif self.consensus_methods is not None:
return bool([x for x in self.consensus_methods if x >= method])
else:
return False # malformed document
def _compare(self, other, method):
if not isinstance(other, NetworkStatusDocumentV3):
return False
return method(str(self).strip(), str(other).strip())
def _header(self, document_file, validate):
content = bytes.join(b'', _read_until_keywords((AUTH_START, ROUTERS_START, FOOTER_START), document_file))
entries = _descriptor_components(content, validate)
header_fields = [attr[0] for attr in HEADER_STATUS_DOCUMENT_FIELDS]
if validate:
# all known header fields can only appear once except
for keyword, values in list(entries.items()):
if len(values) > 1 and keyword in header_fields and keyword != 'package' and keyword != 'shared-rand-commit':
raise ValueError("Network status documents can only have a single '%s' line, got %i" % (keyword, len(values)))
if self._default_params:
self.params = dict(DEFAULT_PARAMS)
self._parse(entries, validate, parser_for_line = self.HEADER_PARSER_FOR_LINE)
# should only appear in consensus-method 7 or later
if not self.meets_consensus_method(7) and 'params' in list(entries.keys()):
raise ValueError("A network status document's 'params' line should only appear in consensus-method 7 or later")
_check_for_missing_and_disallowed_fields(self, entries, HEADER_STATUS_DOCUMENT_FIELDS)
# default consensus_method and consensus_methods based on if we're a consensus or vote
if self.is_consensus and not self.consensus_method:
self.consensus_method = 1
elif self.is_vote and not self.consensus_methods:
self.consensus_methods = [1]
else:
self._header_entries = entries
self._entries.update(entries)
def _footer(self, document_file, validate):
entries = _descriptor_components(document_file.read(), validate)
footer_fields = [attr[0] for attr in FOOTER_STATUS_DOCUMENT_FIELDS]
if validate:
for keyword, values in list(entries.items()):
# all known footer fields can only appear once except...
# * 'directory-signature' in a consensus
if len(values) > 1 and keyword in footer_fields:
if not (keyword == 'directory-signature' and self.is_consensus):
raise ValueError("Network status documents can only have a single '%s' line, got %i" % (keyword, len(values)))
self._parse(entries, validate, parser_for_line = self.FOOTER_PARSER_FOR_LINE)
# Check that the footer has the right initial line. Prior to consensus
# method 9 it's a 'directory-signature' and after that footers start with
# 'directory-footer'.
if entries:
if self.meets_consensus_method(9):
if list(entries.keys())[0] != 'directory-footer':
raise ValueError("Network status document's footer should start with a 'directory-footer' line in consensus-method 9 or later")
else:
if list(entries.keys())[0] != 'directory-signature':
raise ValueError("Network status document's footer should start with a 'directory-signature' line prior to consensus-method 9")
_check_for_missing_and_disallowed_fields(self, entries, FOOTER_STATUS_DOCUMENT_FIELDS)
else:
self._footer_entries = entries
self._entries.update(entries)
def _check_params_constraints(self):
"""
Checks that the params we know about are within their documented ranges.
"""
for key, value in self.params.items():
minimum, maximum = PARAM_RANGE.get(key, (MIN_PARAM, MAX_PARAM))
# there's a few dynamic parameter ranges
if key == 'cbtclosequantile':
minimum = self.params.get('cbtquantile', minimum)
elif key == 'cbtinitialtimeout':
minimum = self.params.get('cbtmintimeout', minimum)
if value < minimum or value > maximum:
raise ValueError("'%s' value on the params line must be in the range of %i - %i, was %i" % (key, minimum, maximum, value))
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def _check_for_missing_and_disallowed_fields(document, entries, fields):
"""
Checks that we have mandatory fields for our type, and that we don't have
any fields exclusive to the other (ie, no vote-only fields appear in a
consensus or vice versa).
:param NetworkStatusDocumentV3 document: network status document
:param dict entries: ordered keyword/value mappings of the header or footer
:param list fields: expected field attributes (either
**HEADER_STATUS_DOCUMENT_FIELDS** or **FOOTER_STATUS_DOCUMENT_FIELDS**)
:raises: **ValueError** if we're missing mandatory fields or have fields we shouldn't
"""
missing_fields, disallowed_fields = [], []
for field, in_votes, in_consensus, mandatory in fields:
if mandatory and ((document.is_consensus and in_consensus) or (document.is_vote and in_votes)):
# mandatory field, check that we have it
if field not in entries.keys():
missing_fields.append(field)
elif (document.is_consensus and not in_consensus) or (document.is_vote and not in_votes):
# field we shouldn't have, check that we don't
if field in entries.keys():
disallowed_fields.append(field)
if missing_fields:
raise ValueError('Network status document is missing mandatory field: %s' % ', '.join(missing_fields))
if disallowed_fields:
raise ValueError("Network status document has fields that shouldn't appear in this document type or version: %s" % ', '.join(disallowed_fields))
def _parse_int_mappings(keyword, value, validate):
# Parse a series of 'key=value' entries, checking the following:
# - values are integers
# - keys are sorted in lexical order
results, seen_keys = {}, []
for entry in value.split(' '):
try:
if '=' not in entry:
raise ValueError("must only have 'key=value' entries")
entry_key, entry_value = entry.split('=', 1)
try:
# the int() function accepts things like '+123', but we don't want to
if entry_value.startswith('+'):
raise ValueError()
entry_value = int(entry_value)
except ValueError:
raise ValueError("'%s' is a non-numeric value" % entry_value)
if validate:
# parameters should be in ascending order by their key
for prior_key in seen_keys:
if prior_key > entry_key:
raise ValueError('parameters must be sorted by their key')
results[entry_key] = entry_value
seen_keys.append(entry_key)
except ValueError as exc:
if not validate:
continue
raise ValueError("Unable to parse network status document's '%s' line (%s): %s'" % (keyword, exc, value))
return results
def _parse_dirauth_source_line(descriptor, entries):
# "dir-source" nickname identity address IP dirport orport
value = _value('dir-source', entries)
dir_source_comp = value.split(' ')
if len(dir_source_comp) < 6:
raise ValueError("Authority entry's 'dir-source' line must have six values: dir-source %s" % value)
if not stem.util.tor_tools.is_valid_nickname(dir_source_comp[0].rstrip('-legacy')):
raise ValueError("Authority's nickname is invalid: %s" % dir_source_comp[0])
elif not stem.util.tor_tools.is_valid_fingerprint(dir_source_comp[1]):
raise ValueError("Authority's v3ident is invalid: %s" % dir_source_comp[1])
elif not dir_source_comp[2]:
# https://trac.torproject.org/7055
raise ValueError("Authority's hostname can't be blank: dir-source %s" % value)
elif not stem.util.connection.is_valid_ipv4_address(dir_source_comp[3]):
raise ValueError("Authority's address isn't a valid IPv4 address: %s" % dir_source_comp[3])
elif not stem.util.connection.is_valid_port(dir_source_comp[4], allow_zero = True):
raise ValueError("Authority's DirPort is invalid: %s" % dir_source_comp[4])
elif not stem.util.connection.is_valid_port(dir_source_comp[5]):
raise ValueError("Authority's ORPort is invalid: %s" % dir_source_comp[5])
descriptor.nickname = dir_source_comp[0]
descriptor.v3ident = dir_source_comp[1]
descriptor.hostname = dir_source_comp[2]
descriptor.address = dir_source_comp[3]
descriptor.dir_port = None if dir_source_comp[4] == '0' else int(dir_source_comp[4])
descriptor.or_port = int(dir_source_comp[5])
descriptor.is_legacy = descriptor.nickname.endswith('-legacy')
_parse_legacy_dir_key_line = _parse_forty_character_hex('legacy-dir-key', 'legacy_dir_key')
_parse_vote_digest_line = _parse_forty_character_hex('vote-digest', 'vote_digest')
class DirectoryAuthority(Descriptor):
"""
Directory authority information obtained from a v3 network status document.
Authorities can optionally use a legacy format. These are no longer found in
practice, but have the following differences...
* The authority's nickname ends with '-legacy'.
* There's no **contact** or **vote_digest** attribute.
:var str nickname: **\*** authority's nickname
:var str v3ident: **\*** identity key fingerprint used to sign votes and consensus
:var str hostname: **\*** hostname of the authority
:var str address: **\*** authority's IP address
:var int dir_port: **\*** authority's DirPort
:var int or_port: **\*** authority's ORPort
:var bool is_legacy: **\*** if the authority's using the legacy format
:var str contact: contact information, this is included if is_legacy is **False**
**Consensus Attributes:**
:var str vote_digest: digest of the authority that contributed to the consensus, this is included if is_legacy is **False**
**Vote Attributes:**
:var str legacy_dir_key: fingerprint of and obsolete identity key
:var stem.descriptor.networkstatus.KeyCertificate key_certificate: **\***
authority's key certificate
:var bool is_shared_randomness_participate: **\*** **True** if this authority
participates in establishing a shared random value, **False** otherwise
:var list shared_randomness_commitments: **\*** list of
:data:`~stem.descriptor.networkstatus.SharedRandomnessCommitment` entries
:var int | |
<filename>archive/src/indigox/periodictable.py
from indigox.exception import IndigoSearchError
class _Element(object):
def __init__(self, name, symbol, group, period, number, mass, atomic_radii,
covalent_radii, vdw_radii, chi, hyper=None):
self.name = name
self.symbol = symbol
self.group = group
self.period = period
self.number = number
self.mass = mass
self.atomic_radii = atomic_radii
self.covalent_radii = covalent_radii
self.vdw_radii = vdw_radii
self.chi = chi
if group < 13:
self.valence = group
elif group == 18 and symbol == 'He':
self.valence = 2
else:
self.valence = group - 10
if group == 1 or symbol == 'He':
self.octet = 2
elif group == 2:
self.octet = 4
elif group == 13:
self.octet = 6
else:
self.octet = 8
if hyper is None:
self.hyper = self.octet
else:
self.hyper = hyper
def __str__(self):
return self.symbol
def __eq__(self, c):
if self.symbol == c or self.number == c or self.name == c:
return True
return object.__eq__(self, c)
class _PeriodicTable(object):
def __init__(self, elements):
self.elements_number = dict()
self.elements_name = dict()
self.elements_symbol = dict()
for e in elements:
self.elements_number[e.number] = e
self.elements_name[e.name] = e
self.elements_symbol[e.symbol] = e
def __getattr__(self, name):
return self[name]
def __getitem__(self, name):
try:
int(name)
except ValueError:
intable = False
else:
intable = True
if not intable and name.title() in self.elements_name:
return self.elements_name[name.title()]
elif not intable and name.title() in self.elements_symbol:
return self.elements_symbol[name.title()]
elif intable and int(name) in self.elements_number:
return self.elements_number[int(name)]
else:
raise IndigoSearchError('Unknown element type: {}'
.format(name.title()))
_elements = [
# Name Symbol Group Period AtomicNumber Mass Radius Cvradius VdWradius chi
_Element("Nullium", "X", 18, 9, 0, 0.0000, 0.00, 0.0, 0.0, 0.0 ),
_Element("Actinium", "Ac", 3, 7, 89, 227.0278, 1.88, 0.0, 0.0, 1.3 ),
_Element("Aluminum", "Al", 13, 3, 13, 26.981539, 1.43, 1.25, 2.05, 1.61),
_Element("Americium", "Am", 0, 7, 95, 243.0614, 1.73, 0.0, 0.0, 1.3 ),
_Element("Antimony", "Sb", 15, 5, 51, 121.76, 1.82, 1.41, 2.2, 2.05),
_Element("Argon", "Ar", 18, 3, 18, 39.948, 1.74, 0.0, 1.91, 0.0 ),
_Element("Arsenic", "As", 15, 4, 33, 74.92159, 1.25, 1.21, 2.0, 2.18),
_Element("Astatine", "At", 17, 6, 85, 209.9871, 0.0, 0.0, 0.0, 1.96),
_Element("Barium", "Ba", 2, 6, 56, 137.327, 2.17, 1.98, 0.0, 0.89),
_Element("Berkelium", "Bk", 0, 7, 97, 247.0703, 1.70, 0.0, 0.0, 1.3 ),
_Element("Beryllium", "Be", 2, 2, 4, 9.012182, 1.13, 0.89, 0.0, 1.57),
_Element("Bismuth", "Bi", 15, 6, 83, 208.98037, 1.55, 1.52, 2.4, 2.0 ),
_Element("Bohrium", "Bh", 7, 7, 107, 262.12, 0.0, 0.0, 0.0, 0.0 ),
_Element("Boron", "B" , 13, 2, 5, 10.811, 0.83, 0.88, 2.08, 2.04),
_Element("Bromine", "Br", 17, 4, 35, 79.904, 0.0, 1.14, 1.95, 2.96, 12),
_Element("Cadmium", "Cd", 12, 5, 48, 112.411, 1.49, 1.41, 0.0, 1.69),
_Element("Caesium", "Cs", 1, 6, 55, 132.90543, 2.654, 2.35, 2.62, 0.79),
_Element("Calcium", "Ca", 2, 4, 20, 40.078, 1.97, 1.74, 0.0, 1.0 ),
_Element("Californium", "Cf", 0, 7, 98, 251.0796, 1.69, 0.0, 0.0, 1.3 ),
_Element("Carbon", "C", 14, 2, 6, 12.011, 0.77, 0.77, 1.85, 2.55),
_Element("Cerium", "Ce", 0, 6, 58, 140.115, 1.825, 1.65, 0.0, 1.12),
_Element("Chlorine", "Cl", 17, 3, 17, 35.4527, 0.0, 0.99, 1.81, 3.16),
_Element("Chromium", "Cr", 6, 4, 24, 51.9961, 1.25, 0.0, 0.0, 1.66),
_Element("Cobalt", "Co", 9, 4, 27, 58.9332, 1.25, 1.16, 0.0, 1.88),
_Element("Copper", "Cu", 11, 4, 29, 63.546, 1.28, 1.17, 0.0, 1.9 ),
_Element("Curium", "Cm", 0, 7, 96, 247.0703, 1.74, 0.0, 0.0, 1.3 ),
_Element("Dubnium", "Db", 4, 7, 104, 261.11, 0.0, 0.0, 0.0, 0.0 ),
_Element("Dysprosium", "Dy", 0, 6, 66, 162.5, 1.77, 1.59, 0.0, 1.23),
_Element("Einsteinium", "Es", 0, 7, 99, 252.083, 2.03, 0.0, 0.0, 1.3 ),
_Element("Erbium", "Er", 0, 6, 68, 167.26, 1.76, 1.57, 0.0, 1.25),
_Element("Europium", "Eu", 0, 6, 63, 151.965, 2.04, 1.85, 0.0, 1.2 ),
_Element("Fermium", "Fm", 0, 7, 100, 257.0951, 0.0, 0.0, 0.0, 1.3 ),
_Element("Fluorine", "F" , 17, 2, 9, 18.9984032, 0.709, 0.58, 1.35, 3.98),
_Element("Francium", "Fr", 1, 7, 87, 223.0197, 2.7, 0.0, 0.0, 0.7 ),
_Element("Gadolinium", "Gd", 0, 6, 64, 157.25, 1.8, 1.61, 0.0, 0.94),
_Element("Gallium", "Ga", 13, 4, 31, 69.723, 1.22, 1.25, 0.0, 1.81),
_Element("Germanium", "Ge", 14, 4, 32, 72.61, 1.23, 1.22, 0.0, 2.01),
_Element("Gold", "Au", 11, 6, 79, 196.96654, 1.44, 1.34, 0.0, 2.0 ),
_Element("Hafnium", "Hf", 4, 6, 72, 178.49, 1.56, 1.44, 0.0, 1.5 ),
_Element("Hahnium", "Hn", 8, 7, 108, 0.0, 0.0, 0.0, 0.0, 0.0 ),
_Element("Helium", "He", 18, 1, 2, 4.002602, 1.28, 0.0, 1.22, 0.0 ),
_Element("Holmium", "Ho", 0, 6, 67, 164.93032, 1.77, 1.58, 0.0, 1.24),
_Element("Hydrogen", "H" , 1, 1, 1, 1.00797, 0.78, 0.3, 1.2, 2.2 ),
_Element("Indium", "In", 13, 5, 49, 114.818, 1.63, 1.5, 0.0, 1.78),
_Element("Iodine", "I" , 17, 5, 53, 126.90447, 0.0, 1.33, 2.15, 2.66),
_Element("Iridium", "Ir", 9, 6, 77, 192.217, 1.36, 1.26, 0.0, 2.28),
_Element("Iron", "Fe", 8, 4, 26, 55.845, 1.24, 1.16, 0.0, 1.83),
_Element("Joliotium", "Jl", 5, 7, 105, 262.114, 0.0, 0.0, 0.0, 0.0 ),
_Element("Krypton", "Kr", 18, 4, 36, 83.80, 0.0, 1.89, 1.98, 0.0 ),
_Element("Lanthanum", "La", 3, 6, 57, 138.9055, 1.88, 1.69, 0.0, 1.1 ),
_Element("Lawrencium", "Lr", 3, 7, 103, 262.11, 0.0, 0.0, 0.0, 0.0 ),
_Element("Lead", "Pb", 14, 6, 82, 207.2, 1.75, 1.54, 0.0, 2.02),
_Element("Lithium", "Li", 1, 2, 3, 6.941, 1.52, 1.23, 0.0, 0.98),
_Element("Lutetium", "Lu", 3, 6, 71, 174.967, 1.72, 1.56, 0.0, 1.3 ),
_Element("Magnesium", "Mg", 2, 3, 12, 24.30506, 1.6, 1.36, 0.0, 1.31),
_Element("Manganese", "Mn", 7, 4, 25, 54.93805, 1.24, 1.77, 0.0, 1.55),
_Element("Meitnerium", "Mt", 9, 7, 109, 0.0, 0.0, 0.0, 0.0, 0.0 ),
_Element("Mendelevium", "Md", 0, 7, 101, 258.1, 0.0, 0.0, 0.0, 1.3 ),
_Element("Mercury", "Hg", 12, 6, 80, 200.59, 1.60, 1.44, 0.0, 1.8 ),
_Element("Molybdenum", "Mo", 6, 5, 42, 95.94, 1.36, 1.29, 0.0, 2.16),
_Element("Neodymium", "Nd", 0, 6, 60, 144.24, 1.82, 1.64, 0.0, 1.14),
_Element("Neon", "Ne", 18, 2, 10, 20.1797, 0.0, 0.0, 1.6, 0.0 ),
_Element("Neptunium", "Np", 0, 7, 93, 237.0482, 1.5, 0.0, 0.0, 1.28),
_Element("Nickel", "Ni", 10, 4, 28, 58.6934, 1.25, 1.15, 0.0, 1.91),
_Element("Niobium", "Nb", 5, 5, 41, 92.90638, 1.43, 1.34, 0.0, 1.6 ),
_Element("Nitrogen", "N" , 15, 2, 7, 14.00674, 0.71, 0.7, 1.54, 3.04),
_Element("Nobelium", "No", 0, 7, 102, 259.1009, 0.0, 0.0, 0.0, 0.0 ),
_Element("Osmium", "Os", 8, 6, 76, 190.23, 1.35, 1.26, 0.0, 2.2 ),
_Element("Oxygen", "O" , 16, 2, 8, 15.9994, 0.6, 0.66, 1.4, 3.44),
_Element("Palladium", "Pd", 10, 5, 46, 106.42, 1.38, 1.28, 0.0, 2.2 ),
_Element("Phosphorus", "P" , 15, 3, 15, 30.973762, 1.15, 1.10, 1.9, 2.19, 10),
_Element("Platinum", "Pt", 10, 6, 78, 195.08, 1.38, 1.29, 0.0, 2.54),
_Element("Plutonium", "Pu", 7, 0, 94, 244.0642, 0.0, 0.0, 0.0, 1.3 ),
_Element("Polonium", "Po", 16, 6, 84, 208.9824, 1.67, 1.53, 0.0, 2.2 ),
_Element("Potassium", "K" , 1, 4, 19, 39.0983, 2.27, 2.03, 2.31, 0.82),
_Element("Praseodymium", "Pr", 0, 6, 59, 140.90765, 1.83, 1.65, 0.0, 1.13),
_Element("Promethium", "Pm", 0, 6, 61, 144.9127, 1.81, 0.0, 0.0, 0.94),
_Element("Protactinium", "Pa", 0, 7, 91, 231.03588, 1.61, 0.0, 0.0, 1.38),
_Element("Radium", "Ra", 2, 7, 88, 226.0254, 2.23, 0.0, 0.0, 0.89),
_Element("Radon", "Rn", 18, 6, 86, 222.0176, 0.0, 0.0, 0.0, 0.7 ),
_Element("Rhenium", "Re", 7, 6, 75, 186.207, 1.37, 1.28, 0.0, 2.2 ),
_Element("Rhodium", "Rh", 9, 5, 45, 102.9055, 1.34, 1.25, 0.0, 2.28),
_Element("Rubidium", "Rb", 1, 5, 37, 85.4678, 1.475, 0.0, 2.44, 0.82),
_Element("Ruthenium", "Ru", 8, 5, 44, 101.07, 1.34, 1.24, 0.0, 2.2 ),
_Element("Rutherfordium", "Rf", 6, 7, 106, 263.118, 0.0, 0.0, 0.0, 0.0 ),
_Element("Samarium", "Sm", 0, 6, 62, 150.36, 1.8, 1.66, 0.0, 1.17),
_Element("Scandium", "Sc", 3, 4, 21, 44.95591, 1.61, 1.44, 0.0, 1.36),
_Element("Selenium", "Se", 16, 4, 34, 78.96, 2.15, 1.17, 2.0, 2.55),
_Element("Silicon", "Si", 14, 3, 14, 28.0855, 1.17, 1.17, 2.0, 1.9 ),
_Element("Silver", "Ag", 11, 5, 47, 107.8682, 1.44, 1.34, 0.0, 1.93),
_Element("Sodium", "Na", 1, 3, 11, 22.989768, 1.54, 0.0, 2.31, 0.93),
_Element("Strontium", "Sr", 2, 5, 38, 87.62, 2.15, 1.92, 0.0, 0.95),
_Element("Sulfur", "S" , 16, 3, 16, 32.066, 1.04, 1.04, 1.85, 2.58, 12),
_Element("Tantalum", "Ta", 5, 6, 73, 180.9479, 1.43, 1.34, 0.0, 2.36),
_Element("Technetium", "Tc", 7, 5, 43, 98.9072, 1.36, 0.0, 0.0, 1.9 ),
_Element("Tellurium", "Te", 16, 5, 52, 127.6, 1.43, 1.37, 2.2, 2.1 ),
_Element("Terbium", "Tb", 0, 6, 65, 158.92534, 1.78, 1.59, 0.0, 1.22),
_Element("Thallium", "Tl", 13, 6, 81, 204.3833, 1.7, 1.55, 0.0, 2.33),
_Element("Thorium", "Th", 0, 7, 90, 232.0381, 1.80, | |
# Copyright 2008 <NAME>, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Serialization of sensor_msgs.PointCloud2 messages.
Author: <NAME>
ROS 2 port by <NAME>
File originally ported from:
https://github.com/ros/common_msgs/blob/f48b00d43cdb82ed9367e0956db332484f676598/
sensor_msgs/src/sensor_msgs/point_cloud2.py
"""
import array
from collections import namedtuple
import sys
from typing import Iterable, List, NamedTuple, Optional
import numpy as np
from numpy.lib.recfunctions import (structured_to_unstructured,
unstructured_to_structured)
from sensor_msgs.msg import PointCloud2, PointField
from std_msgs.msg import Header
_DATATYPES = {}
_DATATYPES[PointField.INT8] = np.dtype(np.int8)
_DATATYPES[PointField.UINT8] = np.dtype(np.uint8)
_DATATYPES[PointField.INT16] = np.dtype(np.int16)
_DATATYPES[PointField.UINT16] = np.dtype(np.uint16)
_DATATYPES[PointField.INT32] = np.dtype(np.int32)
_DATATYPES[PointField.UINT32] = np.dtype(np.uint32)
_DATATYPES[PointField.FLOAT32] = np.dtype(np.float32)
_DATATYPES[PointField.FLOAT64] = np.dtype(np.float64)
DUMMY_FIELD_PREFIX = 'unnamed_field'
def read_points(
cloud: PointCloud2,
field_names: Optional[List[str]] = None,
skip_nans: bool = False,
uvs: Optional[Iterable] = None,
reshape_organized_cloud: bool = False) -> np.ndarray:
"""
Read points from a sensor_msgs.PointCloud2 message.
:param cloud: The point cloud to read from sensor_msgs.PointCloud2.
:param field_names: The names of fields to read. If None, read all fields.
(Type: Iterable, Default: None)
:param skip_nans: If True, then don't return any point with a NaN value.
(Type: Bool, Default: False)
:param uvs: If specified, then only return the points at the given
coordinates. (Type: Iterable, Default: None)
:param reshape_organized_cloud: Returns the array as an 2D organized point cloud if set.
:return: Structured NumPy array containing all points.
"""
assert isinstance(cloud, PointCloud2), \
'Cloud is not a sensor_msgs.msg.PointCloud2'
# Cast bytes to numpy array
points = np.ndarray(
shape=(cloud.width * cloud.height, ),
dtype=dtype_from_fields(cloud.fields),
buffer=cloud.data)
# Keep only the requested fields
if field_names is not None:
assert all(field_name in points.dtype.names for field_name in field_names), \
'Requests field is not in the fields of the PointCloud!'
# Mask fields
points = points[list(field_names)]
# Swap array if byte order does not match
if bool(sys.byteorder != 'little') != bool(cloud.is_bigendian):
points = points.byteswap(inplace=True)
# Check if we want to drop points with nan values
if skip_nans and not cloud.is_dense:
# Init mask which selects all points
not_nan_mask = np.ones(len(points), dtype=bool)
for field_name in points.dtype.names:
# Only keep points without any non values in the mask
not_nan_mask = np.logical_and(
not_nan_mask, ~np.isnan(points[field_name]))
# Select these points
points = points[not_nan_mask]
# Select points indexed by the uvs field
if uvs is not None:
# Don't convert to numpy array if it is already one
if not isinstance(uvs, np.ndarray):
uvs = np.fromiter(uvs, int)
# Index requested points
points = points[uvs]
# Cast into 2d array if cloud is 'organized'
if reshape_organized_cloud and cloud.height > 1:
points = points.reshape(cloud.width, cloud.height)
return points
def read_points_numpy(
cloud: PointCloud2,
field_names: Optional[List[str]] = None,
skip_nans: bool = False,
uvs: Optional[Iterable] = None,
reshape_organized_cloud: bool = False) -> np.ndarray:
"""
Read equally typed fields from sensor_msgs.PointCloud2 message as a unstructured numpy array.
This method is better suited if one wants to perform math operations
on e.g. all x,y,z fields.
But it is limited to fields with the same dtype as unstructured numpy arrays
only contain one dtype.
:param cloud: The point cloud to read from sensor_msgs.PointCloud2.
:param field_names: The names of fields to read. If None, read all fields.
(Type: Iterable, Default: None)
:param skip_nans: If True, then don't return any point with a NaN value.
(Type: Bool, Default: False)
:param uvs: If specified, then only return the points at the given
coordinates. (Type: Iterable, Default: None)
:param reshape_organized_cloud: Returns the array as an 2D organized point cloud if set.
:return: Numpy array containing all points.
"""
assert all(cloud.fields[0].datatype == field.datatype for field in cloud.fields[1:]), \
'All fields need to have the same datatype. Use `read_points()` otherwise.'
structured_numpy_array = read_points(
cloud, field_names, skip_nans, uvs, reshape_organized_cloud)
return structured_to_unstructured(structured_numpy_array)
def read_points_list(
cloud: PointCloud2,
field_names: Optional[List[str]] = None,
skip_nans: bool = False,
uvs: Optional[Iterable] = None) -> List[NamedTuple]:
"""
Read points from a sensor_msgs.PointCloud2 message.
This function returns a list of namedtuples. It operates on top of the
read_points method. For more efficient access use read_points directly.
:param cloud: The point cloud to read from. (Type: sensor_msgs.PointCloud2)
:param field_names: The names of fields to read. If None, read all fields.
(Type: Iterable, Default: None)
:param skip_nans: If True, then don't return any point with a NaN value.
(Type: Bool, Default: False)
:param uvs: If specified, then only return the points at the given
coordinates. (Type: Iterable, Default: None]
:return: List of namedtuples containing the values for each point
"""
assert isinstance(cloud, PointCloud2), \
'cloud is not a sensor_msgs.msg.PointCloud2'
if field_names is None:
field_names = [f.name for f in cloud.fields]
Point = namedtuple('Point', field_names)
return [Point._make(p) for p in read_points(cloud, field_names,
skip_nans, uvs)]
def dtype_from_fields(fields: Iterable[PointField]) -> np.dtype:
"""
Convert a Iterable of sensor_msgs.msg.PointField messages to a np.dtype.
:param fields: The point cloud fields.
(Type: iterable of sensor_msgs.msg.PointField)
:returns: NumPy datatype
"""
# Create a lists containing the names, offsets and datatypes of all fields
field_names = []
field_offsets = []
field_datatypes = []
for i, field in enumerate(fields):
# Datatype as numpy datatype
datatype = _DATATYPES[field.datatype]
# Name field
if field.name == '':
name = f'{DUMMY_FIELD_PREFIX}_{i}'
else:
name = field.name
# Handle fields with count > 1 by creating subfields with a suffix consiting
# of "_" followed by the subfield counter [0 -> (count - 1)]
assert field.count > 0, "Can't process fields with count = 0."
for a in range(field.count):
# Add suffix if we have multiple subfields
if field.count > 1:
subfield_name = f'{name}_{a}'
else:
subfield_name = name
assert subfield_name not in field_names, 'Duplicate field names are not allowed!'
field_names.append(subfield_name)
# Create new offset that includes subfields
field_offsets.append(field.offset + a * datatype.itemsize)
field_datatypes.append(datatype.str)
# Create a tuple for each field containing name and data type
return np.dtype({
'names': field_names,
'formats': field_datatypes,
'offsets': field_offsets,
})
def create_cloud(
header: Header,
fields: Iterable[PointField],
points: Iterable) -> PointCloud2:
"""
Create a sensor_msgs.msg.PointCloud2 message.
:param header: The point cloud header. (Type: std_msgs.msg.Header)
:param fields: The point cloud fields.
(Type: iterable of sensor_msgs.msg.PointField)
:param points: The point cloud points. List of iterables, i.e. one iterable
for each point, with the elements of each iterable being the
values of the fields for that point (in the same order as
the fields parameter)
:return: The point cloud as sensor_msgs.msg.PointCloud2
"""
# Check if input is numpy array
if isinstance(points, np.ndarray):
# Check if this is an unstructured array
if points.dtype.names is None:
assert all(fields[0].datatype == field.datatype for field in fields[1:]), \
'All fields need to have the same datatype. Pass a structured NumPy array \
with multiple dtypes otherwise.'
# Convert unstructured to structured array
points = unstructured_to_structured(
points,
dtype=dtype_from_fields(fields))
else:
assert points.dtype == dtype_from_fields(fields), \
'PointFields and structured NumPy array dtype do not match for all fields! \
Check their field order, names and types.'
else:
# Cast python objects to structured NumPy array (slow)
points = np.array(
# Points need to be tuples in the structured array
list(map(tuple, | |
<reponame>eugenevinitsky/cdc_bottlenecks<filename>flow/agents/centralized_PPO.py<gh_stars>1-10
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""An example of customizing PPO to leverage a centralized critic."""
import argparse
import numpy as np
from gym.spaces import Dict
from ray import tune
from ray.rllib.agents.ppo.ppo import PPOTrainer
from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy, KLCoeffMixin, BEHAVIOUR_LOGITS
from ray.rllib.evaluation.postprocessing import compute_advantages, \
Postprocessing
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.tf_policy import LearningRateSchedule, \
EntropyCoeffSchedule, ACTION_LOGP
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.tf.recurrent_tf_modelv2 import RecurrentTFModelV2
from ray.rllib.models.model import restore_original_dimensions
from ray.rllib.utils.annotations import override
from ray.rllib.models.tf.fcnet_v2 import FullyConnectedNetwork
from ray.rllib.utils.explained_variance import explained_variance
from ray.rllib.utils import try_import_tf
from flow.agents.custom_ppo import AttributeMixin
from flow.agents.ImitationPPO import PPOLoss
tf = try_import_tf()
CENTRAL_OBS = "central_obs"
OPPONENT_ACTION = "opponent_action"
parser = argparse.ArgumentParser()
parser.add_argument("--stop", type=int, default=100000)
#TODOy
class CentralizedCriticModel(TFModelV2):
"""Multi-agent model that implements a centralized VF."""
# TODO(@evinitsky) make this work with more than boxes
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
super(CentralizedCriticModel, self).__init__(
obs_space, action_space, num_outputs, model_config, name)
# Base of the model
self.model = FullyConnectedNetwork(obs_space, action_space,
num_outputs, model_config, name)
self.register_variables(self.model.variables())
# Central VF maps (obs, opp_ops, opp_act) -> vf_pred
self.max_num_agents = model_config['custom_options']['max_num_agents']
self.obs_space_shape = obs_space.shape[0]
self.obs_space = obs_space
other_obs = tf.keras.layers.Input(shape=(obs_space.shape[0] * self.max_num_agents, ), name="central_obs")
central_vf_dense = tf.keras.layers.Dense(
model_config['custom_options']['central_vf_size'], activation=tf.nn.tanh, name="c_vf_dense")(other_obs)
central_vf_out = tf.keras.layers.Dense(
1, activation=None, name="c_vf_out")(central_vf_dense)
self.central_vf = tf.keras.Model(
inputs=[other_obs], outputs=central_vf_out)
self.register_variables(self.central_vf.variables)
def forward(self, input_dict, state, seq_lens):
return self.model.forward(input_dict, state, seq_lens)
def central_value_function(self, central_obs):
return tf.reshape(
self.central_vf(
[central_obs]), [-1])
def value_function(self):
return self.model.value_function() # not used
# TODO(@evinitsky) support recurrence
class CentralizedCriticModelRNN(RecurrentTFModelV2):
"""Example of using the Keras functional API to define a RNN model."""
def __init__(self,
obs_space,
action_space,
num_outputs,
model_config,
name,
hiddens_size=64,
cell_size=64):
super(CentralizedCriticModelRNN, self).__init__(obs_space, action_space, num_outputs,
model_config, name)
self.cell_size = cell_size
# Define input layers
input_layer = tf.keras.layers.Input(
shape=(None, obs_space.shape[0]), name="inputs")
state_in_h = tf.keras.layers.Input(shape=(cell_size, ), name="h")
state_in_c = tf.keras.layers.Input(shape=(cell_size, ), name="c")
seq_in = tf.keras.layers.Input(shape=(), name="seq_in")
# Preprocess observation with a hidden layer and send to LSTM cell
dense1 = tf.keras.layers.Dense(
hiddens_size, activation=tf.nn.relu, name="dense1")(input_layer)
lstm_out, state_h, state_c = tf.keras.layers.LSTM(
cell_size, return_sequences=True, return_state=True, name="lstm")(
inputs=dense1,
mask=tf.sequence_mask(seq_in),
initial_state=[state_in_h, state_in_c])
# Postprocess LSTM output with another hidden layer and compute values
logits = tf.keras.layers.Dense(
self.num_outputs,
activation=tf.keras.activations.linear,
name="logits")(lstm_out)
values = tf.keras.layers.Dense(
1, activation=None, name="values")(lstm_out)
# Create the RNN model
self.model = tf.keras.Model(
inputs=[input_layer, seq_in, state_in_h, state_in_c],
outputs=[logits, values, state_h, state_c])
self.register_variables(self.model.variables)
self.model.summary()
#TODO(@evinitsky) add layer sharing to the VF
# Create the centralized VF
# Central VF maps (obs, opp_ops, opp_act) -> vf_pred
self.max_num_agents = model_config.get("max_num_agents", 120)
self.obs_space_shape = obs_space.shape[0]
other_obs = tf.keras.layers.Input(shape=(obs_space.shape[0] * self.max_num_agents,), name="all_agent_obs")
central_vf_dense = tf.keras.layers.Dense(
model_config.get("central_vf_size", 64), activation=tf.nn.tanh, name="c_vf_dense")(other_obs)
central_vf_out = tf.keras.layers.Dense(
1, activation=None, name="c_vf_out")(central_vf_dense)
self.central_vf = tf.keras.Model(
inputs=[other_obs], outputs=central_vf_out)
self.register_variables(self.central_vf.variables)
@override(RecurrentTFModelV2)
def forward_rnn(self, inputs, state, seq_lens):
model_out, self._value_out, h, c = self.model([inputs, seq_lens] +
state)
return model_out, [h, c]
@override(ModelV2)
def get_initial_state(self):
return [
np.zeros(self.cell_size, np.float32),
np.zeros(self.cell_size, np.float32),
]
def central_value_function(self, central_obs):
return tf.reshape(
self.central_vf(
[central_obs]), [-1])
def value_function(self):
return tf.reshape(self._value_out, [-1]) # not used
class CentralizedValueMixin(object):
"""Add methods to evaluate the central value function from the model."""
def __init__(self):
# TODO(@evinitsky) clean up naming
self.central_value_function = self.model.central_value_function(
self.get_placeholder(CENTRAL_OBS)
)
def compute_central_vf(self, central_obs):
feed_dict = {
self.get_placeholder(CENTRAL_OBS): central_obs,
}
return self.get_session().run(self.central_value_function, feed_dict)
# Grabs the opponent obs/act and includes it in the experience train_batch,
# and computes GAE using the central vf predictions.
def centralized_critic_postprocessing(policy,
sample_batch,
other_agent_batches=None,
episode=None):
if policy.loss_initialized():
assert other_agent_batches is not None
time_span = (sample_batch['t'][0], sample_batch['t'][-1])
other_agent_times = {agent_id:
(other_agent_batches[agent_id][1]["t"][0],
other_agent_batches[agent_id][1]["t"][-1])
for agent_id in other_agent_batches.keys()}
# find agents whose time overlaps with the current agent
rel_agents = {agent_id: other_agent_time for agent_id,
other_agent_time in
other_agent_times.items()
if time_overlap(time_span, other_agent_time)}
if len(rel_agents) > 0:
other_obs = {agent_id:
other_agent_batches[agent_id][1]["obs"].copy()
for agent_id in rel_agents.keys()}
padded_agent_obs = {agent_id:
overlap_and_pad_agent(
time_span,
rel_agent_time,
other_obs[agent_id])
for agent_id,
rel_agent_time in rel_agents.items()}
# okay, now we need to stack and sort
central_obs_list = [padded_obs for padded_obs in padded_agent_obs.values()]
# sort by absolute position
# if isinstance(policy.model.obs_space.original_space, Dict):
# central_obs_list = sorted(central_obs_list, key=lambda x:
# restore_original_dimensions(x, policy.model.obs_space)["obs"][0])
# central_obs_batch = np.hstack(central_obs_list)
# else:
# central_obs_list = sorted(central_obs_list, key=lambda x: x[0])
# central_obs_batch = np.hstack(central_obs_list)
# central_obs_list = sorted(central_obs_list, key=lambda x: x[0])
final_stack = []
for i in range(central_obs_list[0].shape[0]):
# sort based on the zero element, which we guarantee is absolute position
elems = sorted([elem[i, :] for elem in central_obs_list], key=lambda x: x[0])
final_stack.append(np.hstack(elems))
central_obs_batch = np.array(final_stack)
central_obs_batch = np.hstack((sample_batch["obs"], central_obs_batch))
else:
central_obs_batch = sample_batch["obs"]
max_vf_agents = policy.model.max_num_agents
num_agents = len(rel_agents) + 1
if num_agents < max_vf_agents:
diff = max_vf_agents - num_agents
zero_pad = np.zeros((central_obs_batch.shape[0],
policy.model.obs_space_shape * diff))
central_obs_batch = np.hstack((central_obs_batch,
zero_pad))
elif num_agents > max_vf_agents:
print("Too many agents!")
# also record the opponent obs and actions in the trajectory
sample_batch[CENTRAL_OBS] = central_obs_batch
# overwrite default VF prediction with the central VF
sample_batch[SampleBatch.VF_PREDS] = policy.compute_central_vf(sample_batch[CENTRAL_OBS])
else:
# policy hasn't initialized yet, use zeros
#TODO(evinitsky) put in the right shape
obs_shape = sample_batch[SampleBatch.CUR_OBS].shape[1]
obs_shape = (1, obs_shape * (policy.model.max_num_agents))
sample_batch[CENTRAL_OBS] = np.zeros(obs_shape)
# TODO(evinitsky) put in the right shape. Will break if actions aren't 1
sample_batch[SampleBatch.VF_PREDS] = np.zeros(1, dtype=np.float32)
# hack to catch the fact that we are never done
if 't' in sample_batch.keys():
completed = (sample_batch['t'][-1] < policy.horizon - 1)
else:
completed = False
if not completed and policy.loss_initialized():
next_state = []
for i in range(policy.num_state_tensors()):
next_state.append([sample_batch["state_out_{}".format(i)][-1]])
last_r = policy.compute_central_vf(sample_batch[CENTRAL_OBS][-1][np.newaxis, ...])[0]
else:
net_outflow = 0.0
if episode is not None:
outflow = np.array(episode.user_data['outflow']) / 2000.0
final_time = sample_batch['t'][-1]
net_outflow = sum(outflow[final_time:])
if policy.terminal_reward:
sample_batch["rewards"][-1] += net_outflow
last_r = 0.0
train_batch = compute_advantages(
sample_batch,
last_r,
policy.config["gamma"],
policy.config["lambda"],
use_gae=policy.config["use_gae"])
return train_batch
def time_overlap(time_span, agent_time):
"""Check if agent_time overlaps with time_span"""
if agent_time[0] <= time_span[1] and agent_time[1] >= time_span[0]:
return True
else:
return False
def overlap_and_pad_agent(time_span, agent_time, obs):
"""take the part of obs that overlaps, pad to length time_span
Arguments:
time_span (tuple): tuple of the first and last time that the agent
of interest is in the system
agent_time (tuple): tuple of the first and last time that the
agent whose obs we are padding is in the system
obs (np.ndarray): observations of the agent whose time is
agent_time
"""
assert time_overlap(time_span, agent_time)
# FIXME(ev) some of these conditions can be combined
# no padding needed
if agent_time[0] == time_span[0] and agent_time[1] == time_span[1]:
return obs
# agent enters before time_span starts and exits before time_span end
if agent_time[0] < time_span[0] and agent_time[1] < time_span[1]:
non_overlap_time = time_span[0] - agent_time[0]
missing_time = time_span[1] - agent_time[1]
overlap_obs = obs[non_overlap_time:]
padding = np.zeros((missing_time, obs.shape[1]))
return np.concatenate((overlap_obs, padding))
# agent enters after time_span starts and exits after time_span ends
elif agent_time[0] > time_span[0] and agent_time[1] > time_span[1]:
non_overlap_time = agent_time[1] - time_span[1]
overlap_obs = obs[:-non_overlap_time]
missing_time = agent_time[0] - time_span[0]
padding = np.zeros((missing_time, obs.shape[1]))
return np.concatenate((padding, overlap_obs))
# agent time is entirely contained in time_span
elif agent_time[0] >= time_span[0] and agent_time[1] <= time_span[1]:
missing_left = agent_time[0] - time_span[0]
missing_right = time_span[1] - agent_time[1]
obs_concat = obs
if missing_left > 0:
padding = np.zeros((missing_left, obs.shape[1]))
obs_concat = np.concatenate((padding, obs_concat))
if missing_right > 0:
padding = np.zeros((missing_right, obs.shape[1]))
obs_concat = np.concatenate((obs_concat, padding))
return obs_concat
# agent time totally contains time_span
elif agent_time[0] <= time_span[0] and agent_time[1] >= time_span[1]:
non_overlap_left = time_span[0] - agent_time[0]
non_overlap_right = agent_time[1] - time_span[1]
overlap_obs = obs
if non_overlap_left > 0:
overlap_obs = overlap_obs[non_overlap_left:]
if non_overlap_right > 0:
overlap_obs = overlap_obs[:-non_overlap_right]
return overlap_obs
# Copied from PPO but optimizing the central value function
def loss_with_central_critic(policy, model, dist_class, train_batch):
CentralizedValueMixin.__init__(policy)
logits, state = model.from_batch(train_batch)
action_dist = dist_class(logits, model)
policy.loss_obj = PPOLoss(
policy.action_space,
dist_class,
model,
train_batch[Postprocessing.VALUE_TARGETS],
train_batch[Postprocessing.ADVANTAGES],
train_batch[SampleBatch.ACTIONS],
train_batch[BEHAVIOUR_LOGITS],
train_batch[ACTION_LOGP],
train_batch[SampleBatch.VF_PREDS],
action_dist,
policy.central_value_function,
policy.kl_coeff,
tf.ones_like(train_batch[Postprocessing.ADVANTAGES], dtype=tf.bool),
entropy_coeff=policy.entropy_coeff,
clip_param=policy.config["clip_param"],
vf_clip_param=policy.config["vf_clip_param"],
vf_loss_coeff=policy.config["vf_loss_coeff"],
use_gae=policy.config["use_gae"],
model_config=policy.config["model"])
return policy.loss_obj.loss
def new_ppo_surrogate_loss(policy, model, dist_class, train_batch):
loss = loss_with_central_critic(policy, model, dist_class, train_batch)
return loss
def setup_mixins(policy, obs_space, action_space, config):
# copied from PPO
AttributeMixin.__init__(policy, config)
KLCoeffMixin.__init__(policy, config)
EntropyCoeffSchedule.__init__(policy, config["entropy_coeff"],
config["entropy_coeff_schedule"])
LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
# hack: put in a noop VF so some of the inherited PPO code runs
policy.value_function = tf.zeros(
tf.shape(policy.get_placeholder(SampleBatch.CUR_OBS))[0])
def central_vf_stats(policy, train_batch, grads):
# Report the explained variance of the central value function.
return {
"vf_explained_var": explained_variance(
train_batch[Postprocessing.VALUE_TARGETS],
policy.central_value_function),
}
def kl_and_loss_stats(policy, train_batch):
print(train_batch["rewards"])
return {
"cur_kl_coeff": tf.cast(policy.kl_coeff, tf.float64),
"cur_lr": tf.cast(policy.cur_lr, tf.float64),
"total_loss": policy.loss_obj.loss,
"policy_loss": policy.loss_obj.mean_policy_loss,
"vf_loss": policy.loss_obj.mean_vf_loss,
"vf_explained_var": explained_variance(
train_batch[Postprocessing.VALUE_TARGETS],
policy.model.value_function()),
"vf_preds": train_batch[Postprocessing.VALUE_TARGETS],
"kl": policy.loss_obj.mean_kl,
"entropy": policy.loss_obj.mean_entropy,
"entropy_coeff": tf.cast(policy.entropy_coeff, tf.float64),
"avg_rew": train_batch["rewards"][-1]
}
CCPPO = PPOTFPolicy.with_updates(
name="CCPPO",
| |
= None
if 'tags' in kwargs:
tags = tags_to_oset(kwargs.pop('tags'))
# if this tensor has owners, update their ``tag_map``.
if self.check_owners():
for ref, tid in self._owners.values():
ref()._modify_tensor_tags(self.tags, tags, tid)
self._tags = tags
if 'left_inds' in kwargs:
self.left_inds = kwargs.pop('left_inds')
if kwargs:
raise ValueError(f"Option(s) {kwargs} not valid.")
if len(self.inds) != ndim(self.data):
raise ValueError("Mismatch between number of data dimensions and "
"number of indices supplied.")
if self.left_inds and any(i not in self.inds for i in self.left_inds):
raise ValueError(f"The 'left' indices {self.left_inds} are "
f"not found in {self.inds}.")
def isel(self, selectors, inplace=False):
"""Select specific values for some dimensions/indices of this tensor,
thereby removing them. Analogous to ``X[:, :, 3, :, :]`` with arrays.
Parameters
----------
selectors : dict[str, int]
Mapping of index(es) to which value to take.
inplace : bool, optional
Whether to select inplace or not.
Returns
-------
Tensor
Examples
--------
>>> T = rand_tensor((2, 3, 4), inds=('a', 'b', 'c'))
>>> T.isel({'b': -1})
Tensor(shape=(2, 4), inds=('a', 'c'), tags=())
See Also
--------
TensorNetwork.isel
"""
T = self if inplace else self.copy()
new_inds = tuple(ix for ix in self.inds if ix not in selectors)
data_loc = tuple(selectors.get(ix, slice(None)) for ix in self.inds)
T.modify(apply=lambda x: x[data_loc], inds=new_inds, left_inds=None)
return T
isel_ = functools.partialmethod(isel, inplace=True)
def add_tag(self, tag):
"""Add a tag to this tensor. Unlike ``self.tags.add`` this also updates
any TensorNetworks viewing this Tensor.
"""
# TODO: make this more efficient with inplace |= ?
self.modify(tags=itertools.chain(self.tags, (tag,)))
def expand_ind(self, ind, size):
"""Inplace increase the size of the dimension of ``ind``, the new array
entries will be filled with zeros.
Parameters
----------
name : str
Name of the index to expand.
size : int, optional
Size of the expanded index.
"""
if ind not in self.inds:
raise ValueError(f"Tensor has no index '{ind}'.")
size_current = self.ind_size(ind)
pads = [
(0, size - size_current) if i == ind else (0, 0)
for i in self.inds
]
self.modify(data=do('pad', self.data, pads, mode='constant'))
def new_ind(self, name, size=1, axis=0):
"""Inplace add a new index - a named dimension. If ``size`` is
specified to be greater than one then the new array entries will be
filled with zeros.
Parameters
----------
name : str
Name of the new index.
size : int, optional
Size of the new index.
axis : int, optional
Position of the new index.
"""
new_inds = list(self.inds)
# list.insert has different behavior to expand_dims for -ve. axis
if axis < 0:
axis = len(new_inds) + axis + 1
new_inds.insert(axis, name)
new_data = do('expand_dims', self.data, axis=axis)
self.modify(data=new_data, inds=new_inds)
if size > 1:
self.expand_ind(name, size)
new_bond = new_bond
def new_ind_with_identity(self, name, left_inds, right_inds, axis=0):
"""Inplace add a new index, where the newly stacked array entries form
the identity from ``left_inds`` to ``right_inds``. Selecting 0 or 1 for
the new index ``name`` thus is like 'turning off' this tensor if viewed
as an operator.
Parameters
----------
name : str
Name of the new index.
left_inds : tuple[str]
Names of the indices forming the left hand side of the operator.
right_inds : tuple[str]
Names of the indices forming the right hand side of the operator.
The dimensions of these must match those of ``left_inds``.
axis : int, optional
Position of the new index.
"""
ldims = tuple(map(self.ind_size, left_inds))
x_id = do('eye', prod(ldims), dtype=self.dtype, like=self.data)
x_id = do('reshape', x_id, ldims + ldims)
t_id = Tensor(x_id, inds=left_inds + right_inds)
t_id.transpose_(*self.inds)
new_data = do('stack', (self.data, t_id.data), axis=axis)
new_inds = list(self.inds)
new_inds.insert(axis, name)
self.modify(data=new_data, inds=new_inds)
def conj(self, inplace=False):
"""Conjugate this tensors data (does nothing to indices).
"""
t = self if inplace else self.copy()
t.modify(apply=conj)
return t
conj_ = functools.partialmethod(conj, inplace=True)
@property
def H(self):
"""Conjugate this tensors data (does nothing to indices).
"""
return self.conj()
@property
def shape(self):
return self._data.shape
@property
def ndim(self):
return len(self._inds)
@property
def size(self):
# more robust than calling _data.size (e.g. for torch) - consider
# adding do('size', x) to autoray?
return prod(self.shape)
@property
def dtype(self):
return self._data.dtype
def iscomplex(self):
return iscomplex(self.data)
def astype(self, dtype, inplace=False):
"""Change the type of this tensor to ``dtype``.
"""
T = self if inplace else self.copy()
if T.dtype != dtype:
T.modify(apply=lambda data: astype(data, dtype))
return T
astype_ = functools.partialmethod(astype, inplace=True)
def max_dim(self):
"""Return the maximum size of any dimension, or 1 if scalar.
"""
if self.ndim == 0:
return 1
return max(self.shape)
def ind_size(self, ind):
"""Return the size of dimension corresponding to ``ind``.
"""
return int(self.shape[self.inds.index(ind)])
def shared_bond_size(self, other):
"""Get the total size of the shared index(es) with ``other``.
"""
return bonds_size(self, other)
def inner_inds(self):
"""
"""
ind_freqs = frequencies(self.inds)
return tuple(i for i in self.inds if ind_freqs[i] == 2)
def transpose(self, *output_inds, inplace=False):
"""Transpose this tensor - permuting the order of both the data *and*
the indices. This operation is mainly for ensuring a certain data
layout since for most operations the specific order of indices doesn't
matter.
Note to compute the tranditional 'transpose' of an operator within a
contraction for example, you would just use reindexing not this.
Parameters
----------
output_inds : sequence of str
The desired output sequence of indices.
inplace : bool, optional
Perform the tranposition inplace.
Returns
-------
tt : Tensor
The transposed tensor.
See Also
--------
transpose_like, reindex
"""
t = self if inplace else self.copy()
output_inds = tuple(output_inds) # need to re-use this.
if set(t.inds) != set(output_inds):
raise ValueError("'output_inds' must be permutation of the current"
f" tensor indices, but {set(t.inds)} != "
f"{set(output_inds)}")
current_ind_map = {ind: i for i, ind in enumerate(t.inds)}
perm = tuple(current_ind_map[i] for i in output_inds)
t.modify(apply=lambda x: transpose(x, perm), inds=output_inds)
return t
transpose_ = functools.partialmethod(transpose, inplace=True)
def transpose_like(self, other, inplace=False):
"""Transpose this tensor to match the indices of ``other``, allowing
for one index to be different. E.g. if
``self.inds = ('a', 'b', 'c', 'x')`` and
``other.inds = ('b', 'a', 'd', 'c')`` then 'x' will be aligned with 'd'
and the output inds will be ``('b', 'a', 'x', 'c')``
Parameters
----------
other : Tensor
The tensor to match.
inplace : bool, optional
Perform the tranposition inplace.
Returns
-------
tt : Tensor
The transposed tensor.
See Also
--------
transpose
"""
t = self if inplace else self.copy()
diff_ix = set(t.inds) - set(other.inds)
if len(diff_ix) > 1:
raise ValueError("More than one index don't match, the transpose "
"is therefore not well-defined.")
# if their indices match, just plain transpose
if not diff_ix:
t.transpose_(*other.inds)
else:
di, = diff_ix
new_ix = (i if i in t.inds else di for i in other.inds)
t.transpose_(*new_ix)
return t
transpose_like_ = functools.partialmethod(transpose_like, inplace=True)
def trace(
self,
left_inds,
right_inds,
preserve_tensor=False,
inplace=False
):
"""Trace index or indices ``left_inds`` with ``right_inds``, removing
them.
Parameters
----------
left_inds : str or sequence of str
The left indices to trace, order matching ``right_inds``.
right_inds : str or sequence of str
The right indices to trace, order matching ``left_inds``.
preserve_tensor : bool, optional
If ``True``, a tensor will be returned even if no indices remain.
inplace : bool, optional
Perform the trace inplace.
Returns
-------
z : Tensor or scalar
"""
t = self if inplace else self.copy()
if isinstance(left_inds, str):
left_inds = (left_inds,)
if isinstance(right_inds, str):
right_inds = (right_inds,)
if len(left_inds) != len(right_inds):
raise ValueError(f"Can't trace {left_inds} with {right_inds}.")
remap = {}
for lix, rix in zip(left_inds, right_inds):
remap[lix] = lix
remap[rix] = lix
old_inds, new_inds = [], []
for ix in t.inds:
nix = remap.pop(ix, None)
if nix is not None:
old_inds.append(nix)
else:
old_inds.append(ix)
new_inds.append(ix)
if remap:
raise ValueError(f"Indices {tuple(remap)} not found.")
old_inds, new_inds = tuple(old_inds), tuple(new_inds)
eq = _inds_to_eq((old_inds,), new_inds)
t.modify(apply=lambda x: do('einsum', eq, x, like=x),
inds=new_inds, left_inds=None)
if not preserve_tensor and not new_inds:
data_out = t.data
if isinstance(data_out, np.ndarray):
data_out = realify_scalar(data_out.item())
return data_out
return t
def sum_reduce(self, ind, inplace=False):
"""Sum over index ``ind``, removing it from this tensor.
Parameters
----------
ind : str
The index to sum over.
inplace : bool, optional
Whether to perform the reduction inplace.
Returns
-------
Tensor
"""
t = self if inplace else self.copy()
axis | |
<filename>vernon/vkl.py
# -*- mode: python; coding: utf-8 -*-
# Copyright 2017 <NAME> and collaborators.
# Licensed under the MIT License.
"""Modeling the population of radiation belt electrons numerically using the
(V_{C_g},K,L^*) PDE coordinate space of Subbotin & Shprits
(2012JGRA..117.5205S, 10.1029/2011JA017467).
This module is the central place where the problem parameters are specified
and the relevant coefficients are computed.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
from pwkit import cgs
from pwkit.numutil import broadcastize
from six.moves import range
import sympy
def bigy(y):
"""An approximation of the magic function Y(y) for dipolar fields.
We use the approximation from Schulz & Lanzerotti 1974.
The following approximation looks fancier, but fails at y = 0, which
is not what happens in the exact definition, so we do *not* use it:
T0 = 1.3802 # 1 + ln(2 + 3**0.5)/(2 * 3**0.5)
T1 = 0.7405 # pi * 2**0.5 / 6
Y = 2 * (1 - y) * T0 + (T0 - T1) * (y * np.log(y) + 2 * y - 2 * np.sqrt(y))
Note that this function gets called with both Numpy arrays and Sympy
expressions.
"""
return 2.760346 + 2.357194 * y - 5.11754 * y**0.75
def approx_invert_Yony(r):
"""Given Y(y)/y, compute an approximate value for `y`. This is accurate to no
worse than ~0.07, which isn't great.
"""
A = 1.23
C = 9.02
gamma = 0.73
small_r = 1. / (1 + C * r)
large_r = A / (r + 1e-10)
w_small = (r + 1e-10)**(-gamma)
w_large = r**gamma
return np.clip((small_r * w_small + large_r * w_large) / (w_small + w_large), 0., 1.)
@broadcastize(1)
def numerical_invert_Yony(r):
"""Given r = Y(y)/y, compute alpha and y = sin(alpha) using Newton's method.
This routine extends the definition such that if r < 0, the angle alpha is
considered to lie in the range 90-180 degrees. This has nice continuous
behavior with the original definition.
"""
from scipy.optimize import newton
y = np.empty(r.shape)
neg = (r < 0)
r[neg] = -r[neg]
guess = approx_invert_Yony(r)
for i in range(r.size):
y.flat[i] = newton(lambda y: bigy(y)/y - r.flat[i], guess.flat[i])
alpha = np.arcsin(y)
alpha[neg] = np.pi - alpha[neg]
return y, alpha
class Coordinates(object):
def __init__(self, v, k_signed, l):
self.v = v
self.k_signed = k_signed
self.l = l
@classmethod
def new_demo(cls, *, B0=None, nv=21, nk=21, nl=15):
v_lo = 1e-7
v_hi = 1e0
l_lo = 1.1
l_hi = 7.0
# recall: k_min => theta_max and vice versa! Y/y = 33 => alpha ~= 4 degr.
k_hat_lo = (33. * (l_lo / B0)**0.5)**0.2
k_hat_hi = 0.
v = np.logspace(np.log10(v_lo), np.log10(v_hi), nv).reshape((1, 1, -1))
ks = (np.linspace(k_hat_lo, k_hat_hi, nk)**5).reshape((1, -1, 1))
l = np.linspace(l_lo, l_hi, nl).reshape((-1, 1, 1))
return cls(v, ks, l)
class ModelBuilder(object):
"""TODO: we may eventually want more flexibility and pluggability for
different magnetic field shapes, different ways of calculating various
coefficients, etc. But for now let's just get it working.
"""
def __init__(self, *, Cg=None):
"""Cg is a key variable that relates the V/K/L coordinates to the other
physical quantities.
"""
self.constants = {}
self.V = sympy.var('V')
self.Ksigned = sympy.var('Ksigned')
self.L = sympy.var('L') # dropping the asterisk superscript
self.Cg = sympy.var('Cg')
self.constants[self.Cg] = float(Cg)
self.K = sympy.Abs(self.Ksigned)
self.mu = self.V / (self.K + self.Cg)**2
# Lots of things depend on the pitch angle alpha or its sine `y`,
# which is obnoxious to compute given V/K/L. So, we compute it
# numerically and couch the rest of our equations in terms of it.
self.y = sympy.var('y')
# To be finalized later:
self.G = None
self.GD_VV = None
self.GD_VK = None
self.GD_KK = None
self.GD_LL = None
def dipole(self, *, B0=None, radius=None):
self.B0 = sympy.var('B0')
self.constants[self.B0] = float(B0)
self.radius = sympy.var('radius')
self.constants[self.radius] = float(radius)
self.B = self.B0 * self.L**-3
self.dPhi_dL = -2 * sympy.pi * self.B0 * self.radius**2 * self.L**-2
return self
def particle(self, *, m0=None, c_squared=None):
self.m0 = sympy.var('m0')
self.constants[self.m0] = float(m0)
self.c_squared = sympy.var('c^2')
self.constants[self.c_squared] = float(c_squared)
# Helpful related quantities.
self.p_squared = 2 * self.m0 * self.B * self.mu / self.y**2
mc2 = self.m0 * self.c_squared
self.Ekin = sympy.sqrt(self.p_squared * self.c_squared + mc2**2) - mc2
self.gamma = self.Ekin / mc2 + 1
self.beta = sympy.sqrt(1 - self.gamma**-2)
return self
def electron_cgs(self):
return self.particle(m0=cgs.me, c_squared=cgs.c**2)
def basic_radial_diffusion(self, *, D0, n):
self.brd_D0 = sympy.var('brd_D0')
self.constants[self.brd_D0] = float(D0)
self.brd_n = sympy.var('brd_n')
self.constants[self.brd_n] = float(n)
self.D_LL = self.brd_D0 * self.L**self.brd_n
return self
def summers_pa_coefficients(self, alpha_star, R, x_m, delta_x, max_wave_lat):
self.s05_alpha_star = float(alpha_star)
self.s05_R = float(R)
self.s05_x_m = float(x_m)
self.s05_delta_x = float(delta_x)
self.s05_max_wave_lat = float(max_wave_lat)
# We embed the assumption that Dap = Dpa.
self.Daa = sympy.var('Daa')
self.Dap = sympy.var('Dap')
self.Dpa = self.Dap
self.Dpp = sympy.var('Dpp')
return self
def synchrotron_losses_cgs(self):
"""Set the loss rate to be the one implied by synchrotron theory. We have to
assume that we're in cgs because the expression involves the Thompson
cross-section.
Note that (gamma beta)**2 = (p / mc)**2 so we could dramatically
simplify the Sympy expressions used for those terms. But it's not like
that computation is the bottleneck here, in terms of either time or
precision.
"""
Psynch = (cgs.sigma_T * sympy.sqrt(self.c_squared) * self.beta**2 *
self.gamma**2 * self.B**2 / (6 * sympy.pi))
self.loss_rate = Psynch / self.Ekin
return self
def _finalize_g(self):
self.G = sympy.sqrt(8 * self.m0 * self.V) * (self.K + self.Cg)**-3 * self.dPhi_dL
def _finalize_dvk(self):
y = self.y
B = self.B
cosa = sympy.sqrt(1 - y**2)
# Magic functions; SS12 eqn A5; see also Schulz & Lanzerotti 1974.
# See bigy() on the choice of the approximation used.
Y = bigy(y)
# Schulz 1991:
T = 1.380173 - 0.639693 * y ** 0.75
# SS12 Equation E7, matrixified:
jac = [[0, 0], [0, 0]]
jac[0][0] = y * cosa * self.p_squared / (self.m0 * B) # dV/dp
q = Y * self.L * sympy.sqrt(B) / y + self.Cg
jac[0][1] = ( # dV/da
y * cosa * self.p_squared / (self.m0 * B) *
q * (q - self.L * sympy.sqrt(B) * 2 * T / y)
)
jac[1][0] = 0 # dK/dp
jac[1][1] = -2 * cosa * L * sympy.sqrt(B) * T / y**2 # dK/da
# Transforming diffusion coefficients from p-alpha-L to V-K-L -- the
# last coordinate is unchanged.
D_pa = [[self.Dpp, self.Dpa], [self.Dap, self.Daa]]
D_VKL = [[0, 0, 0], [0, 0, 0], [0, 0, self.D_LL]]
for i in (0, 1):
for j in (0, 1):
s = 0
for k in (0, 1):
for l in (0, 1):
s += jac[i][k] * D_pa[k][l] * jac[j][l]
D_VKL[i][j] = s
# Final diffusion tensor coefficients. These are the transformed coefficients
# multiplied by the spatially-dependent component of G.
self.GD_VV = self.G * D_VKL[0][0]
self.GD_VK = self.G * D_VKL[0][1]
self.GD_KK = self.G * D_VKL[1][1]
self.GD_LL = self.G * D_VKL[2][2]
def make_sampler(self, coords):
if self.G is None:
self._finalize_g()
if self.GD_VV is None:
self._finalize_dvk()
return Sampler(self, coords)
class Sampler(object):
def __init__(self, mb, coords):
"""*mb* is a ModelBuilder instance. The preferred way to create one of these
objects is to call `ModelBuilder.make_sampler()`.
*coords* is a Coordinates instance.
"""
self.mb = mb
self.c = coords
# Precompute `y` since it's used pretty much everywhere.
Yony = self._eval(mb.Ksigned / (mb.L * sympy.sqrt(mb.B)), with_y=False)
y, alpha = numerical_invert_Yony(Yony)
self._y = y
self._alpha_deg = alpha * 180 / np.pi
# Avoid computing these unless they're definitely needed, although
# summers2005 is now fast enough that it's not a huge deal.
self._daa = self._dap = self._dpp = None
self._gdvv = self._gdvk = self._gdkk = None
def _eval(self, expr, *, with_y=True, with_dap=False):
expr = expr.subs(self.mb.constants.items())
sym_args = (self.mb.V, self.mb.Ksigned, self.mb.L)
lit_args = (self.c.v, self.c.k_signed, self.c.l)
if with_y:
sym_args += (self.mb.y,)
lit_args += (self._y,)
if with_dap:
sym_args += (self.mb.Daa, self.mb.Dap, self.mb.Dpp)
lit_args += (self._daa, self._dap, self._dpp)
func = sympy.lambdify(sym_args, expr, 'numpy')
return func(*lit_args)
def y(self):
return self._y
def alpha_deg(self):
return self._alpha_deg
def G(self):
return self._eval(self.mb.G)
def mu(self):
return self._eval(self.mb.mu)
def B(self):
return self._eval(self.mb.B)
def Ekin_mev(self):
"We assume that Ekin is in cgs."
return self._eval(self.mb.Ekin) * cgs.evpererg * 1e-6
def | |
<gh_stars>1-10
import cv2
import numpy as np
import warnings
from ..general.parameters import get_param_val, get_method_key
from .cmap import colour_array
from ..customexceptions.annotator_error import *
from ..user_methods import *
warnings.simplefilter('ignore')
"""
--------------------------------------------------------------------------------------
--------------------------------------------------------------------------------------
Text annotation
--------------------------------------------------------------------------------------
--------------------------------------------------------------------------------------
"""
def text_label(frame, data, f, parameters=None, call_num=None):
"""
Text labels place a static label on an image at specific location.
Notes
-----
This function is for adding titles or info that doesn't change
Parameters
----------
text
Text to be displayed
position
Coordinates of upper left corner of text
font_colour
Colour of font specified in (B,G,R) format where values are integers from 0-255
font_size
Size of font
font_thickness
Thickness of font
Args
----
frame
This is the unmodified frame of the input movie
data
This is the dataframe that stores all the tracked data
f
frame index
parameters
Nested dictionary like object (same as .param files or output from general.param_file_creator.py)
call_num
Usually None but if multiple calls are made modifies method name with get_method_key
Returns
-----------
annotated frame : np.ndarray
"""
try:
method_key = get_method_key('text_label', call_num=call_num)
text=parameters[method_key]['text']
position = parameters[method_key]['position']
annotated_frame=cv2.putText(frame, text, position, cv2.FONT_HERSHEY_COMPLEX_SMALL,
int(parameters[method_key]['font_size']),
parameters[method_key]['font_colour'],
int(parameters[method_key]['font_thickness']),
cv2.LINE_AA)
return annotated_frame
except Exception as e:
raise TextLabelError(e)
def var_label(frame, data, f, parameters=None, call_num=None):
"""
Var labels puts text on an image at specific location for each frame. The value
displayed in that frame is mapped to a column in the dataframe. The values next
to each frame should all be the same for that column. Use for example to
specify the temperature.
Notes
-----
This function is for adding data specific to a single frame. For example
you could indicate the temperature of the sample or time.
The data for a given frame should be stored in a particular column
specified in the 'var_column' section of the dictionary.
Parameters
----------
var_column
Column name containing the info to be displayed on each frame
position
Coordinates of upper left corner of text
font_colour
Colour of font specified in (B,G,R) format where values are integers from 0-255
font_size
Size of font
font_thickness
Thickness of font
Args
----
frame
This is the unmodified frame of the input movie
data
This is the dataframe that stores all the tracked data
f
frame index
parameters
Nested dictionary like object (same as .param files or output from general.param_file_creator.py)
call_num
Usually None but if multiple calls are made modifies method name with get_method_key
Returns
-----------
annotated frame : np.ndarray
"""
try:
method_key = get_method_key('var_label', call_num=call_num)
var_column=parameters[method_key]['var_column']
if var_column == 'index':
text = str(f)
else:
print('test')
info = np.unique(data.df.loc[f, var_column])[0]
text = str(info)
position = parameters[method_key]['position']
annotated_frame=cv2.putText(frame, text, position, cv2.FONT_HERSHEY_COMPLEX_SMALL,
int(parameters[method_key]['font_size']),
parameters[method_key]['font_colour'],
int(parameters[method_key]['font_thickness']),
cv2.LINE_AA)
return annotated_frame
except Exception as e:
raise VarLabelError(e)
def particle_labels(frame, data, f, parameters=None, call_num=None):
"""
Annotates image with particle info from one column. The most common use
is to indicate the particle index but any column of data could be used.
Notes
-----
For particle ids to be meaningful, you must have already run
'processed part' with linking selected.
This is particularly useful if you want to extract information about
specific particles. Annotate their ids to identify the reference
id of the one you are interested in and then you can pull the subset
of processed data out. See examples in Jupyter notebook. Any particle
level data can however be displayed.
Parameters
----------
values_column
Name of column containing particle info to be displayed.
position
Coordinates of upper left corner of text
font_colour
Colour of font specified in (B,G,R) format where values are integers from 0-255
font_size
Size of font
font_thickness
Thickness of font
Args
----
frame
This is the unmodified frame of the input movie
data
This is the dataframe that stores all the tracked data
f
frame index
parameters
Nested dictionary like object (same as .param files or output from general.param_file_creator.py)
call_num
Usually None but if multiple calls are made modifies method name with get_method_key
Returns
-----------
annotated frame : np.ndarray
"""
try:
method_key = get_method_key('particle_labels', call_num=None)
x = data.get_info(f, 'x')
y = data.get_info(f, 'y')
particle_values = data.get_info(f, parameters[method_key]['values_column'])#.astype(int)
df_empty = np.isnan(particle_values[0])
if np.all(df_empty):
return frame
for index, particle_val in enumerate(particle_values):
frame = cv2.putText(frame, str(particle_val), (int(x[index]), int(y[index])),
cv2.FONT_HERSHEY_COMPLEX_SMALL,
int(parameters[method_key]['font_size']),
parameters[method_key]['font_colour'],
int(parameters[method_key]['font_thickness']),
cv2.LINE_AA)
return frame
except Exception as e:
raise ParticleLabelsError(e)
"""
--------------------------------------------------------------------------------------
--------------------------------------------------------------------------------------
Particle annotation
--------------------------------------------------------------------------------------
--------------------------------------------------------------------------------------
"""
def _get_class_subset(data, f, parameters, method=None):
"""
Internal function to get subset of particles
"""
try:
classifier_column= parameters[method]['classifier_column']
if classifier_column is None:
subset_df = data.df.loc[f]
else:
classifier = get_param_val(parameters[method]['classifier'])
temp = data.df.loc[f]
subset_df = temp[temp[classifier_column] == classifier]
return subset_df
except Exception as e:
raise GetClassSubsetError(e)
def boxes(frame, data, f, parameters=None, call_num=None):
"""
Boxes places a rotated rectangle on the image that encloses the contours of specified particles.
Notes
-----
This method requires you to have used contours for the tracking and run boxes
in postprocessing.
Parameters
----------
cmap_type
Options are 'static' or 'dynamic'
cmap_column
Name of column containing data to specify colour in dynamic mode,
cmap_max
Specifies max data value for colour map in dynamic mode
cmap_scale
Scale factor for colour map
colour
Colour to be used for static cmap_type (B,G,R) values from 0-255
classifier_column
None selects all particles, column name of classifier values to specify subset of particles
classifier
The value in the classifier column which applies to subset (True or False)
thickness
Thickness of box. -1 fills the box in
Args
----
frame
This is the unmodified frame of the input movie
data
This is the dataframe that stores all the tracked data
f
frame index
parameters
Nested dictionary like object (same as .param files or output from general.param_file_creator.py)
call_num
Usually None but if multiple calls are made modifies method name with get_method_key
Returns
-----------
annotated frame : np.ndarray
"""
try:
method_key = get_method_key('boxes', call_num=call_num)
thickness = get_param_val(parameters[method_key]['thickness'])
subset_df = _get_class_subset(data, f, parameters, method=method_key)
box_pts = subset_df[['box_pts']].values
if np.shape(box_pts)[0] == 1:
df_empty = np.isnan(box_pts[0])
if np.all(df_empty):
#0 boxes
return frame
colours = colour_array(subset_df, f, parameters, method=method_key)
sz = np.shape(frame)
for index, box in enumerate(box_pts):
frame = _draw_contours(frame, box, col=colours[index],
thickness=int(get_param_val(parameters[method_key]['thickness'])))
return frame
except Exception as e:
raise BoxesError(e)
def _contour_inside_img(sz, contour):
inside=True
frame_contour = np.array([[0,0],[0,sz[0]],[sz[1],sz[0]],[sz[1],0]])
for pt in contour[0]:
if cv2.pointPolygonTest(frame_contour, tuple(pt), False) < 0:
inside = False
return inside
def circles(frame, data, f, parameters=None, call_num=None):
"""
Circles places a ring on every specified particle
Parameters
----------
xdata_column
Name of column to use for x coordinates
ydata_column
Name of column to use for y coordinates
rad_from_data
Specify radius manually: False or use measured rad: True. Only works
for Hough transform.
radius
If rad_from_data = False this specifies the radius of circle
cmap_type
Options are static or dynamic
cmap_column
Name of column containing data to specify colour in dynamic mode,#for dynamic
cmap_max
Specifies max data value for colour map in dynamic mode
cmap_scale
Scale factor for colour map
colour
Colour to be used for static cmap_type (B,G,R) values from 0-255
classifier_column
None - selects all particles, column name of classifier values to apply to subset of particles
classifier
The value in the classifier column to apply colour map to (True or False)
thickness
Thickness of circle. -1 fills the circle in solidly.
Args
----
frame : np.ndarray
This is the unmodified frame of the input movie
data : pandas dataframe
This is the dataframe that stores all the tracked data
f : int
frame index
parameters : dict
Nested dictionary like object (same as .param files or output from general.param_file_creator.py)
call_num : int or None
Usually None but if multiple calls are made modifies method name with get_method_key
Returns
-----------
annotated frame : np.ndarray
"""
try:
method_key = get_method_key('circles', call_num=call_num)
x_col_name = parameters[method_key]['xdata_column']
y_col_name = parameters[method_key]['ydata_column']
r_col_name = parameters[method_key]['rdata_column']
if get_param_val(parameters[method_key]['rad_from_data']):
subset_df = _get_class_subset(data, f, parameters, method=method_key)
circles = subset_df[[x_col_name, y_col_name, r_col_name]].values
else:
data.add_particle_property('user_rad', get_param_val(parameters[method_key]['user_rad']))
subset_df = _get_class_subset(data, f, parameters, method=method_key)
circles = subset_df[[x_col_name, y_col_name, 'user_rad']].values
thickness = | |
`string` from: `"MIN_COMBINED", "MIN_FIRST", "SCALED"`. Defaults to `"MIN_COMBINED"`.
narrow_range: An optional `bool`. Defaults to `False`.
axis: An optional `int`. Defaults to `-1`.
dtype: An optional `tf.DType` from: `tf.bfloat16, tf.float32`. Defaults to `tf.float32`.
Type of the output tensor. Currently Dequantize supports float and bfloat16.
If 'dtype' is 'bfloat16', it only supports 'MIN_COMBINED' mode.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "Dequantize", name,
tld.op_callbacks, input, min_range, max_range, "mode", mode,
"narrow_range", narrow_range, "axis", axis, "dtype", dtype)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return dequantize_eager_fallback(
input, min_range, max_range, mode=mode, narrow_range=narrow_range,
axis=axis, dtype=dtype, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if mode is None:
mode = "MIN_COMBINED"
mode = _execute.make_str(mode, "mode")
if narrow_range is None:
narrow_range = False
narrow_range = _execute.make_bool(narrow_range, "narrow_range")
if axis is None:
axis = -1
axis = _execute.make_int(axis, "axis")
if dtype is None:
dtype = _dtypes.float32
dtype = _execute.make_type(dtype, "dtype")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Dequantize", input=input, min_range=min_range, max_range=max_range,
mode=mode, narrow_range=narrow_range, axis=axis,
dtype=dtype, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"), "mode", _op.get_attr("mode"),
"narrow_range", _op._get_attr_bool("narrow_range"), "axis",
_op._get_attr_int("axis"), "dtype", _op._get_attr_type("dtype"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Dequantize", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Dequantize = tf_export("raw_ops.Dequantize")(_ops.to_raw_op(dequantize))
def dequantize_eager_fallback(input, min_range, max_range, mode, narrow_range, axis, dtype, name, ctx):
if mode is None:
mode = "MIN_COMBINED"
mode = _execute.make_str(mode, "mode")
if narrow_range is None:
narrow_range = False
narrow_range = _execute.make_bool(narrow_range, "narrow_range")
if axis is None:
axis = -1
axis = _execute.make_int(axis, "axis")
if dtype is None:
dtype = _dtypes.float32
dtype = _execute.make_type(dtype, "dtype")
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx)
min_range = _ops.convert_to_tensor(min_range, _dtypes.float32)
max_range = _ops.convert_to_tensor(max_range, _dtypes.float32)
_inputs_flat = [input, min_range, max_range]
_attrs = ("T", _attr_T, "mode", mode, "narrow_range", narrow_range, "axis",
axis, "dtype", dtype)
_result = _execute.execute(b"Dequantize", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Dequantize", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('linalg.tensor_diag', v1=['linalg.tensor_diag', 'diag'])
@deprecated_endpoints('diag')
def diag(diagonal, name=None):
r"""Returns a diagonal tensor with a given diagonal values.
Given a `diagonal`, this operation returns a tensor with the `diagonal` and
everything else padded with zeros. The diagonal is computed as follows:
Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of
rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
`output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.
For example:
```
# 'diagonal' is [1, 2, 3, 4]
tf.diag(diagonal) ==> [[1, 0, 0, 0]
[0, 2, 0, 0]
[0, 0, 3, 0]
[0, 0, 0, 4]]
```
Args:
diagonal: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
Rank k tensor where k is at most 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `diagonal`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "Diag", name, tld.op_callbacks,
diagonal)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return diag_eager_fallback(
diagonal, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
diag, (), dict(diagonal=diagonal, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"Diag", diagonal=diagonal, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
diag, (), dict(diagonal=diagonal, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"Diag", _inputs_flat, _attrs, _result)
_result, = _result
return _result
Diag = tf_export("raw_ops.Diag")(_ops.to_raw_op(diag))
def diag_eager_fallback(diagonal, name, ctx):
_attr_T, (diagonal,) = _execute.args_to_matching_eager([diagonal], ctx)
_inputs_flat = [diagonal]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"Diag", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"Diag", _inputs_flat, _attrs, _result)
_result, = _result
return _result
@_dispatch.add_dispatch_list
@tf_export('linalg.tensor_diag_part', v1=['linalg.tensor_diag_part', 'diag_part'])
@deprecated_endpoints('diag_part')
def diag_part(input, name=None):
r"""Returns the diagonal part of the tensor.
This operation returns a tensor with the `diagonal` part
of the `input`. The `diagonal` part is computed as follows:
Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a
tensor of rank `k` with dimensions `[D1,..., Dk]` where:
`diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
For example:
```
# 'input' is [[1, 0, 0, 0]
[0, 2, 0, 0]
[0, 0, 3, 0]
[0, 0, 0, 4]]
tf.diag_part(input) ==> [1, 2, 3, 4]
```
Args:
input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
Rank k tensor where k is even and not zero.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "DiagPart", name,
tld.op_callbacks, input)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return diag_part_eager_fallback(
input, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
diag_part, (), dict(input=input, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
# Add nodes to the TensorFlow graph.
try:
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"DiagPart", input=input, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
diag_part, (), dict(input=input, name=name)
)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("T", _op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"DiagPart", _inputs_flat, _attrs, _result)
_result, = _result
return _result
DiagPart = tf_export("raw_ops.DiagPart")(_ops.to_raw_op(diag_part))
def diag_part_eager_fallback(input, name, ctx):
_attr_T, (input,) = _execute.args_to_matching_eager([input], ctx)
_inputs_flat = [input]
_attrs = ("T", _attr_T)
_result = _execute.execute(b"DiagPart", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"DiagPart", _inputs_flat, _attrs, _result)
_result, = _result
return _result
def edit_distance(hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape, normalize=True, name=None):
r"""Computes the (possibly normalized) Levenshtein Edit Distance.
The inputs are variable-length sequences provided by SparseTensors
(hypothesis_indices, hypothesis_values, hypothesis_shape)
and
(truth_indices, truth_values, truth_shape).
The inputs are:
Args:
hypothesis_indices: A `Tensor` of type `int64`.
The indices of the hypothesis list SparseTensor.
This is an N x R int64 matrix.
hypothesis_values: A `Tensor`.
The values of the hypothesis list SparseTensor.
This is an N-length vector.
hypothesis_shape: A `Tensor` of type `int64`.
The shape of the hypothesis list SparseTensor.
This is an R-length vector.
truth_indices: A `Tensor` of type `int64`.
The indices of the truth list SparseTensor.
This is an M x R int64 matrix.
truth_values: A `Tensor`. Must have the same type as `hypothesis_values`.
The values of the truth list SparseTensor.
This is an M-length vector.
truth_shape: A `Tensor` of type `int64`. truth indices, vector.
normalize: An optional `bool`. Defaults to `True`.
boolean (if true, edit distances are normalized by length of truth).
The output is:
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "EditDistance", name,
tld.op_callbacks, hypothesis_indices, hypothesis_values,
hypothesis_shape, truth_indices, truth_values, truth_shape,
"normalize", normalize)
return _result
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
except _core._FallbackException:
pass
try:
return edit_distance_eager_fallback(
hypothesis_indices, hypothesis_values, hypothesis_shape,
truth_indices, truth_values, truth_shape, normalize=normalize,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
# Add nodes to the TensorFlow graph.
if normalize is None:
normalize = True
normalize = _execute.make_bool(normalize, "normalize")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"EditDistance", hypothesis_indices=hypothesis_indices,
hypothesis_values=hypothesis_values,
hypothesis_shape=hypothesis_shape,
truth_indices=truth_indices,
truth_values=truth_values, truth_shape=truth_shape,
normalize=normalize, name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("normalize", _op._get_attr_bool("normalize"), "T",
_op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"EditDistance", _inputs_flat, _attrs, _result)
_result, = _result
return _result
EditDistance = tf_export("raw_ops.EditDistance")(_ops.to_raw_op(edit_distance))
def edit_distance_eager_fallback(hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape, normalize, name, ctx):
if normalize is None:
normalize = True
normalize = _execute.make_bool(normalize, "normalize")
_attr_T, _inputs_T = _execute.args_to_matching_eager([hypothesis_values, truth_values], ctx)
(hypothesis_values, truth_values) = _inputs_T
hypothesis_indices = _ops.convert_to_tensor(hypothesis_indices, _dtypes.int64)
hypothesis_shape = _ops.convert_to_tensor(hypothesis_shape, _dtypes.int64)
truth_indices = _ops.convert_to_tensor(truth_indices, _dtypes.int64)
truth_shape = _ops.convert_to_tensor(truth_shape, _dtypes.int64)
_inputs_flat = [hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape]
_attrs | |
#!/usr/bin/env python
########################################################################
#
# To build panda using this script, type 'makepanda.py' on unix
# or 'makepanda.bat' on windows, and examine the help-text.
# Then run the script again with the appropriate options to compile
# panda3d.
#
########################################################################
try:
import sys, os, platform, time, stat, re, getopt, threading, signal, shutil
if sys.platform == "darwin" or sys.version_info >= (2, 6):
import plistlib
if sys.version_info >= (3, 0):
import queue
else:
import Queue as queue
except KeyboardInterrupt:
raise
except:
print("You are either using an incomplete or an old version of Python!")
print("Please install the development package of Python and try again.")
exit(1)
from makepandacore import *
from distutils.util import get_platform
import time
import os
import sys
########################################################################
##
## PARSING THE COMMAND LINE OPTIONS
##
## You might be tempted to change the defaults by editing them
## here. Don't do it. Instead, create a script that compiles
## panda with your preferred options. Or, create
## a 'makepandaPreferences' file and put it into your python path.
##
########################################################################
COMPILER=0
INSTALLER=0
WHEEL=0
RUNTESTS=0
GENMAN=0
COMPRESSOR="zlib"
THREADCOUNT=0
CFLAGS=""
CXXFLAGS=""
LDFLAGS=""
RTDIST=0
RTDIST_VERSION=None
RUNTIME=0
DISTRIBUTOR=""
VERSION=None
DEBVERSION=None
WHLVERSION=None
RPMRELEASE="1"
GIT_COMMIT=None
P3DSUFFIX=None
MAJOR_VERSION=None
COREAPI_VERSION=None
PLUGIN_VERSION=None
OSXTARGET=None
OSX_ARCHS=[]
HOST_URL=None
global STRDXSDKVERSION, BOOUSEINTELCOMPILER
STRDXSDKVERSION = 'default'
WINDOWS_SDK = None
MSVC_VERSION = None
BOOUSEINTELCOMPILER = False
OPENCV_VER_23 = False
PLATFORM = None
COPY_PYTHON = True
if "MACOSX_DEPLOYMENT_TARGET" in os.environ:
OSXTARGET=os.environ["MACOSX_DEPLOYMENT_TARGET"]
PkgListSet(["PYTHON", "DIRECT", # Python support
"GL", "GLES", "GLES2"] + DXVERSIONS + ["TINYDISPLAY", "NVIDIACG", # 3D graphics
"EGL", # OpenGL (ES) integration
"EIGEN", # Linear algebra acceleration
"OPENAL", "FMODEX", # Audio playback
"VORBIS", "OPUS", "FFMPEG", "SWSCALE", "SWRESAMPLE", # Audio decoding
"ODE", "PHYSX", "BULLET", "PANDAPHYSICS", # Physics
"SPEEDTREE", # SpeedTree
"ZLIB", "PNG", "JPEG", "TIFF", "OPENEXR", "SQUISH", # 2D Formats support
] + MAYAVERSIONS + MAXVERSIONS + [ "FCOLLADA", "ASSIMP", "EGG", # 3D Formats support
"FREETYPE", "HARFBUZZ", # Text rendering
"VRPN", "OPENSSL", # Transport
"FFTW", # Algorithm helpers
"ARTOOLKIT", "OPENCV", "DIRECTCAM", "VISION", # Augmented Reality
"GTK2", # GTK2 is used for PStats on Unix
"MFC", "WX", "FLTK", # Used for web plug-in only
"ROCKET", # GUI libraries
"CARBON", "COCOA", # Mac OS X toolkits
"X11", # Unix platform support
"PANDATOOL", "PVIEW", "DEPLOYTOOLS", # Toolchain
"SKEL", # Example SKEL project
"PANDAFX", # Some distortion special lenses
"PANDAPARTICLESYSTEM", # Built in particle system
"CONTRIB", # Experimental
"SSE2", "NEON", # Compiler features
])
CheckPandaSourceTree()
def keyboardInterruptHandler(x,y):
exit("keyboard interrupt")
signal.signal(signal.SIGINT, keyboardInterruptHandler)
########################################################################
##
## Command-line parser.
##
## You can type "makepanda --help" to see all the options.
##
########################################################################
def usage(problem):
if (problem):
print("")
print("Error parsing command-line input: %s" % (problem))
print("")
print("Makepanda generates a 'built' subdirectory containing a")
print("compiled copy of Panda3D. Command-line arguments are:")
print("")
print(" --help (print the help message you're reading now)")
print(" --verbose (print out more information)")
print(" --runtime (build a runtime build instead of an SDK build)")
print(" --tests (run the test suite)")
print(" --installer (build an installer)")
print(" --wheel (build a pip-installable .whl)")
print(" --optimize X (optimization level can be 1,2,3,4)")
print(" --version X (set the panda version number)")
print(" --lzma (use lzma compression when building Windows installer)")
print(" --distributor X (short string identifying the distributor of the build)")
print(" --outputdir X (use the specified directory instead of 'built')")
print(" --host URL (set the host url (runtime build only))")
print(" --threads N (use the multithreaded build system. see manual)")
print(" --osxtarget N (the OS X version number to build for (OS X only))")
print(" --universal (build universal binaries (OS X only))")
print(" --override \"O=V\" (override dtool_config/prc option value)")
print(" --static (builds libraries for static linking)")
print(" --target X (experimental cross-compilation (android only))")
print(" --arch X (target architecture for cross-compilation)")
print("")
for pkg in PkgListGet():
p = pkg.lower()
print(" --use-%-9s --no-%-9s (enable/disable use of %s)"%(p, p, pkg))
if sys.platform != 'win32':
print(" --<PKG>-incdir (custom location for header files of thirdparty package)")
print(" --<PKG>-libdir (custom location for library files of thirdparty package)")
print("")
print(" --nothing (disable every third-party lib)")
print(" --everything (enable every third-party lib)")
print(" --directx-sdk=X (specify version of DirectX SDK to use: jun2010, aug2009, mar2009, aug2006)")
print(" --windows-sdk=X (specify Windows SDK version, eg. 7.0, 7.1 or 10. Default is 7.1)")
print(" --msvc-version=X (specify Visual C++ version, eg. 10, 11, 12, 14. Default is 14)")
print(" --use-icl (experimental setting to use an intel compiler instead of MSVC on Windows)")
print("")
print("The simplest way to compile panda is to just type:")
print("")
print(" makepanda --everything")
print("")
os._exit(1)
def parseopts(args):
global INSTALLER,WHEEL,RUNTESTS,RTDIST,RUNTIME,GENMAN,DISTRIBUTOR,VERSION
global COMPRESSOR,THREADCOUNT,OSXTARGET,OSX_ARCHS,HOST_URL
global DEBVERSION,WHLVERSION,RPMRELEASE,GIT_COMMIT,P3DSUFFIX,RTDIST_VERSION
global STRDXSDKVERSION, WINDOWS_SDK, MSVC_VERSION, BOOUSEINTELCOMPILER
global COPY_PYTHON
# Options for which to display a deprecation warning.
removedopts = [
"use-touchinput", "no-touchinput", "no-awesomium", "no-directscripts",
]
# All recognized options.
longopts = [
"help","distributor=","verbose","runtime","osxtarget=","tests",
"optimize=","everything","nothing","installer","wheel","rtdist","nocolor",
"version=","lzma","no-python","threads=","outputdir=","override=",
"static","host=","debversion=","rpmrelease=","p3dsuffix=","rtdist-version=",
"directx-sdk=", "windows-sdk=", "msvc-version=", "clean", "use-icl",
"universal", "target=", "arch=", "git-commit=", "no-copy-python",
] + removedopts
anything = 0
optimize = ""
target = None
target_arch = None
universal = False
clean_build = False
for pkg in PkgListGet():
longopts.append("use-" + pkg.lower())
longopts.append("no-" + pkg.lower())
longopts.append(pkg.lower() + "-incdir=")
longopts.append(pkg.lower() + "-libdir=")
try:
opts, extras = getopt.getopt(args, "", longopts)
for option, value in opts:
if (option=="--help"): raise Exception
elif (option=="--optimize"): optimize=value
elif (option=="--installer"): INSTALLER=1
elif (option=="--tests"): RUNTESTS=1
elif (option=="--wheel"): WHEEL=1
elif (option=="--verbose"): SetVerbose(True)
elif (option=="--distributor"): DISTRIBUTOR=value
elif (option=="--rtdist"): RTDIST=1
elif (option=="--runtime"): RUNTIME=1
elif (option=="--genman"): GENMAN=1
elif (option=="--everything"): PkgEnableAll()
elif (option=="--nothing"): PkgDisableAll()
elif (option=="--threads"): THREADCOUNT=int(value)
elif (option=="--outputdir"): SetOutputDir(value.strip())
elif (option=="--osxtarget"): OSXTARGET=value.strip()
elif (option=="--universal"): universal = True
elif (option=="--target"): target = value.strip()
elif (option=="--arch"): target_arch = value.strip()
elif (option=="--nocolor"): DisableColors()
elif (option=="--version"):
match = re.match(r'^\d+\.\d+\.\d+', value)
if not match:
usage("version requires three digits")
WHLVERSION = value
VERSION = match.group()
elif (option=="--lzma"): COMPRESSOR="lzma"
elif (option=="--override"): AddOverride(value.strip())
elif (option=="--static"): SetLinkAllStatic(True)
elif (option=="--host"): HOST_URL=value
elif (option=="--debversion"): DEBVERSION=value
elif (option=="--rpmrelease"): RPMRELEASE=value
elif (option=="--git-commit"): GIT_COMMIT=value
elif (option=="--p3dsuffix"): P3DSUFFIX=value
elif (option=="--rtdist-version"): RTDIST_VERSION=value
# Backward compatibility, OPENGL was renamed to GL
elif (option=="--use-opengl"): PkgEnable("GL")
elif (option=="--no-opengl"): PkgDisable("GL")
elif (option=="--directx-sdk"):
STRDXSDKVERSION = value.strip().lower()
if STRDXSDKVERSION == '':
print("No DirectX SDK version specified. Using 'default' DirectX SDK search")
STRDXSDKVERSION = 'default'
elif (option=="--windows-sdk"):
WINDOWS_SDK = value.strip().lower()
elif (option=="--msvc-version"):
MSVC_VERSION = value.strip().lower()
elif (option=="--use-icl"): BOOUSEINTELCOMPILER = True
elif (option=="--clean"): clean_build = True
elif (option=="--no-copy-python"): COPY_PYTHON = False
elif (option[2:] in removedopts):
Warn("Ignoring removed option %s" % (option))
else:
for pkg in PkgListGet():
if option == "--use-" + pkg.lower():
PkgEnable(pkg)
break
elif option == "--no-" + pkg.lower():
PkgDisable(pkg)
break
elif option == "--" + pkg.lower() + "-incdir":
PkgSetCustomLocation(pkg)
IncDirectory(pkg, value)
break
elif option == "--" + pkg.lower() + "-libdir":
PkgSetCustomLocation(pkg)
LibDirectory(pkg, value)
break
if (option == "--everything" or option.startswith("--use-")
or option == "--nothing" or option.startswith("--no-")):
anything = 1
except:
usage(sys.exc_info()[1])
if not anything:
if RUNTIME:
PkgEnableAll()
else:
usage("You should specify a list of packages to use or --everything to enable all packages.")
if (RTDIST and RUNTIME):
usage("Options --runtime and --rtdist cannot be specified at the same time!")
if (optimize=="" and (RTDIST or RUNTIME)): optimize = "4"
elif (optimize==""): optimize = "3"
if OSXTARGET:
try:
maj, min = OSXTARGET.strip().split('.')
OSXTARGET = int(maj), int(min)
assert OSXTARGET[0] == 10
except:
usage("Invalid setting for OSXTARGET")
else:
OSXTARGET = None
if target is not None or target_arch is not None:
SetTarget(target, target_arch)
if universal:
if target_arch:
exit("--universal is incompatible with --arch")
OSX_ARCHS.append("i386")
if OSXTARGET:
osxver = OSXTARGET
else:
maj, min = platform.mac_ver()[0].split('.')[:2]
osxver = int(maj), int(min)
if osxver[1] < 6:
OSX_ARCHS.append("ppc")
else:
OSX_ARCHS.append("x86_64")
elif HasTargetArch():
OSX_ARCHS.append(GetTargetArch())
try:
SetOptimize(int(optimize))
assert GetOptimize() in [1, 2, 3, 4]
except:
usage("Invalid setting for OPTIMIZE")
if GIT_COMMIT is not None and not re.match("^[a-f0-9]{40}$", GIT_COMMIT):
usage("Invalid SHA-1 hash given for --git-commit option!")
if GetTarget() == 'windows':
if not MSVC_VERSION:
print("No MSVC version specified. Defaulting to 14 (Visual Studio 2015).")
MSVC_VERSION = (14, 0)
else:
try:
MSVC_VERSION = tuple(int(d) for d in MSVC_VERSION.split('.'))[:2]
if (len(MSVC_VERSION) == 1):
MSVC_VERSION += (0,)
except:
usage("Invalid setting for --msvc-version")
if MSVC_VERSION < (14, 0):
warn_prefix = "%sERROR:%s " % (GetColor("red"), GetColor())
print("=========================================================================")
print(warn_prefix + "Support for MSVC versions before 2015 has been discontinued.")
print(warn_prefix + "For more information, or any questions, please visit:")
print(warn_prefix + " https://github.com/panda3d/panda3d/issues/288")
print("=========================================================================")
sys.stdout.flush()
time.sleep(1.0)
sys.exit(1)
if not WINDOWS_SDK:
print("No Windows SDK version specified. Defaulting to '7.1'.")
WINDOWS_SDK = '7.1'
if clean_build and os.path.isdir(GetOutputDir()):
print("Deleting %s" % (GetOutputDir()))
shutil.rmtree(GetOutputDir())
parseopts(sys.argv[1:])
########################################################################
##
## Handle environment variables.
##
########################################################################
if ("CFLAGS" in os.environ):
CFLAGS = os.environ["CFLAGS"].strip()
if ("CXXFLAGS" in os.environ):
CXXFLAGS = os.environ["CXXFLAGS"].strip()
if ("RPM_OPT_FLAGS" in os.environ):
CFLAGS += " " + os.environ["RPM_OPT_FLAGS"].strip()
CXXFLAGS += " " + os.environ["RPM_OPT_FLAGS"].strip()
if ("LDFLAGS" in | |
<filename>yuno/collection.py
"""
collection.py
A collection is a set of documents.
"""
import inspect
import threading
import typing
import pymongo.database
import pymongo.collection
from yuno import encoder, objects, database
from yuno.cursor import Cursor
from yuno.direction import IndexDirectionType, SortDirectionType
from yuno.watch import OperationType, Watch
class DocumentsCursor(Cursor):
def next(self) -> "objects.YunoDict":
return super().next()
def __iter__(self) -> typing.Iterator["objects.YunoDict"]:
return super().__iter__()
class YunoCollection(object):
"""
An object that represents a collection in the database.
"""
__type__: "objects.YunoDict" = None
"""The default document type"""
__overwritten__ = {"__type__", "__overwritten__", "__name__", "__annotations__", "__database__", "__collection__", "__class__", # we need to overwrite this to avoid getting the super class
"__init__", "count", "find", "index", "aggregate", "update", "watch", "on", "_watch_loop", "__realtime__", "__callbacks__", "__delitem__", "__delattr__", "__setitem__", "__setattr__", "__getitem__", "__getattr__", "__repr__"}
__name__: str
"""The name of the collection"""
__annotations__: typing.Dict[str, type]
"""The documents annotations for the collection"""
__database__: "database.YunoDatabase"
"""The database this collection is in"""
__collection__: pymongo.collection.Collection
"""The PyMongo collection object"""
__realtime__: bool = False
"""Whether the collection updates in realtime or not"""
__callbacks__: typing.Dict[OperationType, typing.List[typing.Callable]] = {}
"""The callbacks registered for realtime updates"""
def __init__(self, database: "database.YunoDatabase", name: str = "__yuno_test__") -> None:
"""
Create a new collection
Parameters
----------
database: YunoDatabase
The database this collection is in
name: str, default="__yuno_test__"
The name of the collection
"""
if self.__type__ is None:
super().__setattr__("__type__", objects.YunoDict)
super().__setattr__("__name__", str(name))
super().__setattr__("__annotations__", self.__annotations__ if hasattr(self, "__annotations__") else {})
super().__setattr__("__database__", database)
super().__setattr__("__collection__", database.__database__.get_collection(name))
threading.Thread(target=self._watch_loop, daemon=True).start()
def count(self, filter: dict = None, **kwargs) -> int:
"""
Returns the number of documents in the collection.
Parameters
----------
filter: dict, default=None
The filter to apply to the count.
**kwargs:
Keyword arguments to pass to pymongo's `count_documents`· method.
"""
filter = filter if filter is not None else {}
return self.__collection__.count_documents(filter, **kwargs)
def find(self, filter: dict = None, include: typing.List[str] = None, exclude: typing.List[str] = None, limit: int = 0, sort: typing.List[typing.Tuple[str, SortDirectionType]] = None, defered: bool = False, **kwargs) -> typing.Union[DocumentsCursor, typing.List["objects.YunoDict"]]:
"""
Find documents in the collection
Parameters
----------
filter: dict, default=None
A dictionary of filters to apply to the query.
include: list[str], default=None
A list of attributes to include in the result.
exclude: list[str], default=None
A list of attributes to exclude from the result.
limit: int, default=0
The maximum number of documents to return.
sort: list[tuple[str, SortDirectionType]], default=None
A list of tuples of attributes to sort by.
Each tuple is a field and the direction to sort by.
defered: bool, default=False
If True, a generator will be returned and results will be yielded when necessary.
If False, the results will be returned immediately and everything will be in memory.
**kwargs:
Keyword arguments to pass to the find method
You can therefore use the function like so:
>>> collection.find(name="John", age={"$gt": 18})
[YunoDict({"username": "Animenosekai", "rank": 1}), YunoDict({"username": "Anise", "rank": 2})]
Returns
-------
list[YunoDict]
A list of documents
"""
filter = filter if filter is not None else {}
filter.update(kwargs)
filter = {str(k): encoder.YunoBSONEncoder().default(v) for k, v in filter.items()}
projection = {str(field): True for field in (include or [])}
projection.update({str(field): False for field in (exclude or [])})
if len(projection) > 0:
projection["_id"] = True # Always include _id
else: # If there are no fields to include or exclude, we don't need any projection
projection = None
if defered:
def type_encode(obj: dict):
name = obj.get("_id")
cast = self.__annotations__.get(name, self.__type__)
annotations = encoder.get_annotations(cast)
data = {k: encoder.YunoTypeEncoder().default(
v,
_type=annotations.get(k, None),
field=k,
collection=self,
_id=name
) for k, v in obj.items()}
return cast(_id=name, collection=self, field="", data=data)
return DocumentsCursor(self.__collection__.find(filter=filter, projection=projection, limit=limit, sort=sort), verification=type_encode)
results: typing.List[objects.YunoDict] = []
for doc in self.__collection__.find(filter=filter, projection=projection, limit=limit, sort=sort):
name = doc.get("_id")
cast = self.__annotations__.get(name, self.__type__)
annotations = encoder.get_annotations(cast)
data = {k: encoder.YunoTypeEncoder().default(
v,
_type=annotations.get(k, None),
field=k,
collection=self,
_id=name
) for k, v in doc.items()}
# results.append(TypeEncoder.default(doc, _type=cast, field="", collection=self, _id=name))
results.append(cast(_id=name, collection=self, field="", data=data))
return results
def index(self, keys: typing.Union[str, typing.List[typing.Tuple[str, IndexDirectionType]]], name: str = None, unique: bool = True, background: bool = True, sparse: bool = True, **kwargs) -> None:
"""
Creates an index for this collection
Parameters
----------
keys: str or list[tuple[str, IndexDirectionType]]
The keys to index.
name: str
The name of the index.
unique: bool
Whether the index should be unique.
background: bool
Whether the index should be created in the background.
sparse: bool
Whether documents without the field should be ignored or not.
**kwargs:
Keyword arguments to pass to pymongo's create_index method.
"""
default = {
"background": background,
"unique": unique,
"sparse": sparse
}
if name is not None:
default["name"] = name
default.update(kwargs)
self.__collection__.create_index(keys, **default)
# TODO: Implement update() and aggregate()
def update(self, *args, **kwargs):
"""
Update a document in the collection
"""
return self.__collection__.update_one(*args, **kwargs)
def aggregate(self, pipeline, *args, **kwargs):
"""
Get an aggregation of documents in the collection
"""
return self.__collection__.aggregate(pipeline, *args, **kwargs)
def _watch_loop(self):
"""
Internal method that watches the database for changes.
Also calls all of the callbacks that are registered to the object on the specific operations.
"""
if not self.__realtime__:
return
watch = self.watch(error_limit=10) # we raise the limit a little bit to be sure we don't miss any changes
for event in watch:
if not self.__realtime__:
break
for callback, blocking in self.__callbacks__.get(event.operation, []):
specs = inspect.getfullargspec(callback).args
kwargs = {}
if "event" in specs:
kwargs["event"] = event
if "client" in specs:
kwargs["client"] = self.__database__.__client__
if "database" in specs:
kwargs["database"] = self.__database__
if "collection" in specs:
kwargs["collection"] = self
if blocking:
callback(**kwargs)
else:
threading.Thread(target=callback, kwargs=kwargs, daemon=True).start()
watch.close()
def watch(self, operations: typing.List[OperationType] = None, pipeline: typing.List[dict] = None, full_document: str = None, error_limit: int = 3, error_expiration: float = 60, **kwargs) -> Watch:
"""
Returns an iterator (Watch) to watch the collection for changes.
Parameters
----------
operations: list[OperationType]
The operations to watch for.
pipeline: list[dict]
The pipeline to watch for.
full_document: str
The full_document to watch for.
error_limit: int
The number of errors to allow before raising an exception.
error_expiration: float
The number of seconds to wait before raising an exception.
kwargs:
The kwargs to pass to the watch.
Returns
-------
Watch
The watch object.
Example
--------
>>> watch = collection.watch()
>>> for event in watch:
>>> print(event)
"""
final_pipeline = []
if operations:
final_pipeline.append({"$match": {"operationType": {"$in": operations}}})
final_pipeline.extend(pipeline if pipeline else [])
return Watch(self.__collection__, pipeline=final_pipeline, full_document=full_document, error_limit=error_limit, error_expiration=error_expiration, **kwargs)
def on(self, operation: OperationType, callback: typing.Callable, blocking: bool = False) -> None:
"""
Registers a callback to be called when a certain operation is performed on the current object.
This implies that __realtime__ is set to True.
The callback will be called upon the update of the object.
Parameters
----------
operation: OperationType
The operation to watch for.
callback: typing.Callable
The callback to be called.
"""
try:
self.__callbacks__[operation].append((callback, blocking))
except Exception:
self.__callbacks__[operation] = [(callback, blocking)]
self.__realtime__ = True
def __delitem__(self, name: str) -> None:
"""
Deletes a document from the collection.
Example
--------
>>> del collection["special_document"]
"""
self.__collection__.delete_one({"_id": name})
def __delattr__(self, name: str) -> None:
"""
Deletes a document from the collection.
Example
--------
>>> del collection.special_document
"""
self.__delitem__(name)
def __setitem__(self, name: str, value: dict) -> None:
"""
Replaces or sets a document in the collection.
Example
--------
>>> collection["special_document"] = {"_id": "special_document", "name": "Special Document"}
# Initial Document
# {"_id": "special_document", "name": "Test", "favorites": 2}
# Updated Document
# {"_id": "special_document", "name": "Special Document"}
"""
self.__collection__.replace_one({"_id": name}, encoder.YunoBSONEncoder().default(value), upsert=True)
def __setattr__(self, name: str, value: dict) -> None:
"""
Replaces or sets a document in the collection.
Example
--------
>>> collection.special_document = {"_id": "special_document", "name": "Special Document"}
# Initial Document
# {"_id": "special_document", "name": "Test", "favorites": 2}
# Updated Document
# {"_id": "special_document", "name": "Special Document"}
"""
if name == "__name__":
if value != self.__name__:
self.__init__(database=self.__database__, name=value) # reinitializing the collection because it's a different one
return
if name == "__realtime__":
if not self.__realtime__ and value:
super().__setattr__(name, value)
threading.Thread(target=self._watch_loop, daemon=True).start()
return
return super().__setattr__(name, value)
self.__setitem__(name, value)
def __getitem__(self, name: str) -> "objects.YunoDict":
"""
Gets a document from the collection.
Example
--------
>>> document = collection["special_document"]
"""
data = self.find(_id=name, limit=1)
if len(data) <= 0:
raise KeyError("No document with name '{}' found".format(name))
return data[0]
| |
0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/shelf_A1.bam'},
10000: {'type': 'nodepath', 'name': 'crateField',
'comment': '',
'parentEntId': 0,
'pos': Point3(0.0, -51.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Point3(1.0, 1.0, 1.0)},
10004: {'type': 'nodepath', 'name': 'row0',
'comment': '',
'parentEntId': 10018,
'pos': Point3(0.0, -6.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10006: {'type': 'nodepath', 'name': 'row1',
'comment': '',
'parentEntId': 10018,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0)},
10007: {'type': 'nodepath', 'name': 'crates',
'comment': '',
'parentEntId': 10000,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Point3(1.0, 1.0, 0.800000011921)},
10010: {'type': 'nodepath', 'name': 'row2',
'comment': '',
'parentEntId': 10018,
'pos': Point3(0.0, 6.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10017: {'type': 'nodepath', 'name': 'wall5',
'comment': '',
'parentEntId': 10063,
'pos': Point3(0.0, 90.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Point3(1.0, 1.0, 1.0)},
10018: {'type': 'nodepath', 'name': 'crateSquare0',
'comment': 'Y=N*18',
'parentEntId': 10007,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10019: {'type': 'nodepath', 'name': 'crateSquare1',
'comment': 'Y=N*18',
'parentEntId': 10007,
'pos': Point3(0.0, 18.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10020: {'type': 'nodepath', 'name': 'row2',
'comment': '',
'parentEntId': 10019,
'pos': Point3(0.0, 6.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10021: {'type': 'nodepath', 'name': 'row0',
'comment': '',
'parentEntId': 10019,
'pos': Point3(0.0, -6.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10022: {'type': 'nodepath', 'name': 'row1',
'comment': '',
'parentEntId': 10019,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0)},
10031: {'type': 'nodepath', 'name': 'crateSquare3',
'comment': 'Y=N*18',
'parentEntId': 10007,
'pos': Point3(0.0, 54.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10032: {'type': 'nodepath', 'name': 'crateSquare2',
'comment': 'Y=N*18',
'parentEntId': 10007,
'pos': Point3(0.0, 36.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10033: {'type': 'nodepath', 'name': 'row2',
'comment': '',
'parentEntId': 10032,
'pos': Point3(0.0, 6.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10034: {'type': 'nodepath', 'name': 'row0',
'comment': '',
'parentEntId': 10032,
'pos': Point3(0.0, -6.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10035: {'type': 'nodepath', 'name': 'row1',
'comment': '',
'parentEntId': 10032,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0)},
10044: {'type': 'nodepath', 'name': 'row2',
'comment': '',
'parentEntId': 10031,
'pos': Point3(0.0, 6.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10045: {'type': 'nodepath', 'name': 'row0',
'comment': '',
'parentEntId': 10031,
'pos': Point3(0.0, -6.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10046: {'type': 'nodepath', 'name': 'stompers',
'comment': '',
'parentEntId': 10000,
'pos': Point3(-1.0, 0.0, 4.40000009537),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10047: {'type': 'nodepath', 'name': 'row1',
'comment': '',
'parentEntId': 10031,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0)},
10059: {'type': 'nodepath', 'name': 'wall6',
'comment': '',
'parentEntId': 10063,
'pos': Point3(0.0, 108.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Point3(1.0, 1.0, 1.0)},
10062: {'type': 'nodepath', 'name': 'wall7',
'comment': '',
'parentEntId': 10063,
'pos': Point3(0.0, 124.5, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Point3(1.0, 1.39999997616, 1.0)},
10063: {'type': 'nodepath', 'name': 'walls',
'comment': '',
'parentEntId': 10000,
'pos': Point3(0.0, -0.019999999553, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0)},
10065: {'type': 'nodepath', 'name': 'wall0',
'comment': '',
'parentEntId': 10063,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Point3(1.0, 1.0, 1.0)},
10070: {'type': 'nodepath', 'name': 'leftBranch',
'comment': '',
'parentEntId': 10000,
'pos': Point3(-17.8978881836, 72.0, 0.0),
'hpr': Vec3(90.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0)},
10071: {'type': 'nodepath', 'name': 'crateSquare0',
'comment': 'Y=N*18',
'parentEntId': 10153,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10072: {'type': 'nodepath', 'name': 'row2',
'comment': '',
'parentEntId': 10071,
'pos': Point3(0.0, 6.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10073: {'type': 'nodepath', 'name': 'row0',
'comment': '',
'parentEntId': 10071,
'pos': Point3(0.0, -6.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10074: {'type': 'nodepath', 'name': 'row1',
'comment': '',
'parentEntId': 10071,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0)},
10075: {'type': 'nodepath', 'name': 'frontCrateRow',
'comment': '',
'parentEntId': 10007,
'pos': Point3(0.0, -12.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(0.844285488129, 1.0, 1.0)},
10080: {'type': 'nodepath', 'name': 'crateSquare4',
'comment': 'Y=N*18',
'parentEntId': 10007,
'pos': Point3(0.0, 72.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10081: {'type': 'nodepath', 'name': 'row2',
'comment': '',
'parentEntId': 10080,
'pos': Point3(0.0, 6.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10082: {'type': 'nodepath', 'name': 'row0',
'comment': '',
'parentEntId': 10080,
'pos': Point3(0.0, -6.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10083: {'type': 'nodepath', 'name': 'row1',
'comment': '',
'parentEntId': 10080,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0)},
10092: {'type': 'nodepath', 'name': 'wall1',
'comment': '',
'parentEntId': 10063,
'pos': Point3(0.0, 18.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Point3(1.0, 1.0, 1.0)},
10095: {'type': 'nodepath', 'name': 'wall2',
'comment': '',
'parentEntId': 10063,
'pos': Point3(0.0, 36.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Point3(1.0, 1.0, 1.0)},
10098: {'type': 'nodepath', 'name': 'wall3',
'comment': '',
'parentEntId': 10063,
'pos': Point3(0.0, 54.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Point3(1.0, 1.0, 1.0)},
10101: {'type': 'nodepath', 'name': 'wall4',
'comment': '',
'parentEntId': 10063,
'pos': Point3(0.0, 72.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Point3(1.0, 1.0, 1.0)},
10104: {'type': 'nodepath', 'name': 'crateSquare5',
'comment': 'Y=N*18',
'parentEntId': 10007,
'pos': Point3(0.0, 90.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10105: {'type': 'nodepath', 'name': 'row2',
'comment': '',
'parentEntId': 10104,
'pos': Point3(0.0, 6.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10106: {'type': 'nodepath', 'name': 'row0',
'comment': '',
'parentEntId': 10104,
'pos': Point3(0.0, -6.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10107: {'type': 'nodepath', 'name': 'row1',
'comment': '',
'parentEntId': 10104,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0)},
10117: {'type': 'nodepath', 'name': 'crateSquare6',
'comment': 'Y=N*18',
'parentEntId': 10007,
'pos': Point3(0.0, 108.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10118: {'type': 'nodepath', 'name': 'row2',
'comment': '',
'parentEntId': 10117,
'pos': Point3(0.0, 6.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10119: {'type': 'nodepath', 'name': 'row0',
'comment': '',
'parentEntId': 10117,
'pos': Point3(0.0, -6.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10120: {'type': 'nodepath', 'name': 'row1',
'comment': '',
'parentEntId': 10117,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0)},
10135: {'type': 'nodepath', 'name': 'crateSquare1',
'comment': 'Y=N*18',
'parentEntId': 10153,
'pos': Point3(0.0, 18.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10136: {'type': 'nodepath', 'name': 'row2',
'comment': '',
'parentEntId': 10135,
'pos': Point3(0.0, 6.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10137: {'type': 'nodepath', 'name': 'row0',
'comment': '',
'parentEntId': 10135,
'pos': Point3(0.0, -6.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1},
10138: {'type': | |
x: Centre of the area to sample (X coordinate).
@param y: Centre of the area to sample (Y coordinate).
@param dx: Optional size of the area to sample (X coordinate).
@param dy: Optional size of the area to sample (Y coordinate).
@return: Floating point value.
"""
pass
def saveToScript():
"""saveToScript(filename, fileContent) -> None
Saves the fileContent with the given filename.
"""
pass
def saveUserPreset():
"""saveUserPreset(node, presetName) -> None
Saves a node's current knob values as a user preset.
@param presetName: Name of the preset to create.
@return: bool."""
pass
def saveWindowLayout():
"""saveWindowLayout(i=-1) -> None
Saves the current window layout.
@param i: Optional layout index. If this is omitted or set to a negative value, save as the default layout.
@return: None.
WARNING - DEPRECATED ( nuke.saveWindowLayout ):
This method is deprecared. The Save action in the Workspace Menu corresponding to the input argument will be triggered.
hiero.ui.saveWorkspace(name) should be called with the new workspace name."""
pass
def scriptClear():
"""Clears a Nuke script and resets all the root knobs to user defined knob defaults. To reset to compiled in defaults only pass in resetToCompiledDefaults=True."""
pass
def scriptSaveAndClear(filename, ignoreUnsavedChanges):
""" scriptSaveAndClear(filename=None, ignoreUnsavedChanges=False) -> None
Calls nuke.scriptSave and nuke.scriptClear
@param filename: Save to this file name without changing the script name in the
project.
@param ignoreUnsavedChanges: Optional. If set to True scripSave will be called,
ignoring any unsaved changes
@return: True when sucessful. False if the user cancels the operation. In this
case nuke.scripClear will not be called
"""
pass
def scriptExit():
"""Exit the Application if 'forceExit' is True, otherwise 'nuke.scriptSaveAndClear' will be called
@param forceExit: Optional parameter. Forces the Application to close.
@return: None."""
pass
def scriptName():
"""scriptName() -> String
Return the current script's file name"""
pass
def scriptNew():
"""Start a new script. Returns True if successful."""
pass
def scriptOpen():
"""Opens a new script containing the contents of the named file."""
pass
def scriptReadFile():
"""Read nodes from a file."""
pass
def scriptReadText():
"""Read nodes from a string."""
pass
def scriptSave(filename=None):
"""scriptSave(filename=None) -> bool
Saves the current script to the current file name. If there is no current file name and Nuke is running in GUI mode, the user is asked for a name using the file chooser.
@param filename: Save to this file name without changing the script name in the project (use scriptSaveAs() if you want it to change).
@return: True if the file was saved, otherwise an exception is thrown."""
pass
def scriptSaveAndClear(filename, ignoreUnsavedChanges):
""" scriptSaveAndClear(filename=None, ignoreUnsavedChanges=False) -> None
Calls nuke.scriptSave and nuke.scriptClear
@param filename: Save to this file name without changing the script name in the
project.
@param ignoreUnsavedChanges: Optional. If set to True scripSave will be called,
ignoring any unsaved changes
@return: True when sucessful. False if the user cancels the operation. In this
case nuke.scripClear will not be called
"""
pass
def scriptSaveAs():
"""scriptSaveAs(filename=None, overwrite=-1) -> None
Saves the current script with the given file name if supplied, or (in GUI mode) asks the user for one using the file chooser. If Nuke is not running in GUI mode, you must supply a filename.
@param filename: Saves the current script with the given file name if supplied, or (in GUI mode) asks the user for one using the file chooser.
@param overwrite: If 1 (true) always overwrite; if 0 (false) never overwrite; otherwise, in GUI mode ask the user, in terminal do same as False. Default is -1, meaning 'ask the user'."""
pass
def scriptSource():
"""Same as scriptReadFile()."""
pass
def script_directory():
"""None"""
pass
def selectAll():
"""selectAll() -> None
Select all nodes in the DAG.
@return: None"""
pass
def selectConnectedNodes():
""" Selects all nodes in the tree of the selected node. """
pass
def selectPattern():
"""selectPattern() -> None
Selects nodes according to a regular expression matching pattern, entered through an input dialog. The pattern can include wildcards ('?' and '*') as well as regular expressions. The expressions are checked against the node name, label, class, and associated file names.
@return: None"""
pass
def selectSimilar(matchType):
"""selectSimilar(matchType) -> None
Selects nodes that match a node in the current selection based on matchType criteria.
@param matchType: One of nuke.MATCH_CLASS, nuke.MATCH_LABEL, nuke.MATCH_COLOR.
@return: None.
"""
pass
def selectedNode():
"""selectedNode() -> Node.
Returns the 'node the user is thinking about'.
If several nodes are selected, this returns one of them. The one returned will be an 'output' node in that no other selected nodes
use that node as an input. If no nodes are selected, then if the last thing typed was a hotkey this returns the node the cursor is pointing at.
If none, or the last event was not a hotkey, this produces a 'No node selected' error.
@return: Node.
"""
pass
def selectedNodes(filter=None):
"""selectedNodes(filter) -> List.
Returns a list of all selected nodes in the current group. An attempt is made to return them in 'useful' order where inputs are done before the final node, so commands applied to this list go from top-down.
@param filter: Optional class of Node. Instructs the algorithm to apply only to a specific class of nodes.
@return: The list of selected nodes.
"""
pass
def setPreset():
"""setPreset(nodeClassName, presetName, knobValues) -> None
Create a node preset for the given node using the supplied knob values
@param nodeClassName: Name of the node class to create a preset for.
@param presetName: Name of the preset to create.
@param knobValues: A dictionary containing a set of knob names and preset values.
@return: bool."""
pass
def setReadOnlyPresets(readOnly):
"""setReadOnlyPresets(readOnly) -> None
Sets whether newly created presets should be added in read-only mode.
Read-only presets can be applied to a node, but can't be overwritten or deleted.
:param readOnly:
"""
pass
def setUserPreset():
"""setUserPreset(nodeClassName, presetName, knobValues) -> None
Create a node preset for the given node using the supplied knob values
@param nodeClassName: Name of the node class to create a preset for.
@param presetName: Name of the preset to create.
@param knobValues: A dictionary containing a set of knob names and preset values.
@return: bool."""
pass
def show():
"""show(n, forceFloat) -> None
Opens a window for each named node, as though the user double-clicked on them. For normal operators this opens the
control panel, for viewers it opens the viewer, for groups it opens the control panel.
@param n: Optional node argument. Default is the current node.
@param forceFloat: Optional python object. If it evaluates to True it will open the window as a floating panel. Default is False.
@return: None"""
pass
def showBookmarkChooser(n):
"""showBookmarkChooser(n) -> None
Show bookmark chooser search box.
@return: None
:param n: """
pass
def showCreateViewsDialog(views):
"""showCreateViewsDialog(views) -> void
Show a dialog to prompt the user to add or create missing views.
@param views: List of views to be created.
@return: An integer value representing the choice the user selected: nuke.ADD_VIEWS, nuke.REPLACE_VIEWS or nuke.DONT_CREATE_VIEWS"""
pass
def showDag(n):
"""showDag(n) -> None
Show the tree view of a group node or opens a node control panel.
@param n: Optional Group.
@return: None"""
pass
def showInfo(n):
"""showInfo(n) -> str
Returns a long string of debugging information about each node and
the operators it is currently managing. You should not rely on its
contents or format being the same in different versions of Nuke.
@param n: Optional node argument.
@return: String.
"""
pass
def showSettings():
"""showSettings() -> None
Show the settings of the current group.
@return: None"""
pass
def splayNodes():
"""splayNodes() -> None
Deprecated. Use Group.splaySelectedNodes.
@return: None"""
pass
def startPerformanceTimers():
"""startPerformanceTimers() -> None
Start keeping track of accumulated time on the performance timers, and display the accumulated time in the DAG.
"""
pass
def stderr_redirector():
"""Internal to Nuke. Not for public use."""
pass
def stopPerformanceTimers():
"""stopPerformanceTimers() -> None
Stop keeping track of accumulated time on the performance timers, and cease displaying the accumulated time in the DAG.
"""
pass
def stripFrameRange(clipname):
"""stripFrameRange(clipname) -> string
Strip out the frame range from a clipname, leaving a file path (still possibly with variables).
@param clipname: The clipname.
@return: The name without the frame range."""
pass
def suspendPathProcessing():
"""suspendPathProcessing() -> | |
import logging
logging.root.setLevel(logging.INFO)
import six, ptypes
from ptypes import *
import datetime, time
## General structures
class MSTime(pbinary.struct):
_fields_ = [
(5, 'Hour'),
(6, 'Minute'),
(5, '2Seconds'),
]
def time(self):
h, m, ds = (self[fld] for fld in ['Hour', 'Minute', '2Seconds'])
return datetime.time(h % 24, m % 60, (2 * ds) % 60)
def summary(self):
if self['Hour'] < 24 and self['Minute'] < 60 and 2 * self['2Seconds'] < 60:
res = self.time()
return res.isoformat()
return super(MSTime, self).summary()
class MSDate(pbinary.struct):
_fields_ = [
(7, 'Year'),
(4, 'Month'),
(5, 'Day'),
]
def date(self):
y, m, d = (self[fld] for fld in ['Year', 'Month', 'Day'])
return datetime.date(1980 + y, m, d)
def summary(self):
res = self.date()
return res.isoformat()
class VersionMadeBy(pint.enum, pint.uint16_t):
_values_ = [(n, i) for i, n in enumerate(('MSDOS', 'Amiga', 'OpenVMS', 'Unix', 'VM/CMS', 'Atari ST', 'OS/2', 'Macintosh', 'Z-System', 'CP/M', 'Windows', 'MVS', 'VSE', 'Acorn', 'VFAT', 'Alternate MVS', 'BeOS', 'Tandem', 'Os/400', 'OSX'))]
class VersionNeeded(pint.enum, pint.uint16_t):
_values_ = []
class DataDescriptor(pstruct.type):
_fields_ = [
(pint.uint32_t, 'crc-32'),
(pint.uint32_t, 'compressed size'),
(pint.uint32_t, 'uncompressed size'),
]
def summary(self):
return 'crc-32={:08X} compressed-size={:d} uncompressed-size={:d}'.format(self['crc-32'].int(), self['compressed size'].int(), self['uncompressed size'].int())
class ZipDataDescriptor(pstruct.type):
Signature = 0x08074b50
class _SignatureScan(parray.terminated):
_object_ = pint.uint8_t
def isTerminator(self, value):
if len(self.value) > 3:
octets = pint.uint32_t().set(ZipDataDescriptor.Signature)
return self[-4:].serialize() == octets.serialize()
return False
def int(self):
return self[-4:].cast(pint.uint32_t).int()
def data(self):
return self.serialize()[:-4]
_fields_ = [
(_SignatureScan, 'data'),
(DataDescriptor, 'descriptor'),
]
def summary(self):
return 'descriptor={{{:s}}} data={{...}}'.format(self['descriptor'].summary())
def data(self, **kwds):
return self['data'].data()
@pbinary.littleendian
class BitFlags(pbinary.flags):
_fields_ = [
(2, 'Reserved'),
(1, 'MaskedDirectory'),
(1, 'PKEnhanced'),
(1, 'UTF8'),
(4, 'unused'),
(1, 'StrongEncryption'),
(1, 'CompressedPatchData'),
(1, 'EnhancedDeflating'),
(1, 'PostDescriptor'),
(2, 'Compression'),
(1, 'Encrypted'),
]
## Compression methods
class CompressionMethod(pint.enum, pint.uint16_t):
_values_ = [
('Stored', 0),
('Shrunk', 1), # LZW
('Reduced(1)', 2), # Expanding
('Reduced(2)', 3),
('Reduced(3)', 4),
('Reduced(4)', 5),
('Imploded', 6), # Shannon-Fano
('Tokenized', 7),
('Deflated', 8), # zlib?
('Deflate64', 9), # zlib64
('PKImplode', 10), # old IBM TERSE
('BZIP2', 12), # bz2
('LZMA', 14), # lzma
('Terse', 18), # IBM TERSE
('LZ77', 19),
('WavPack', 97), # audio
('PPMd', 98),
]
class CompressionMethodFlags(ptype.definition):
cache = {}
class unknown(pbinary.struct):
_fields_ = [(2, 'unknown')]
default = unknown
@CompressionMethodFlags.define
class MethodImplodingFlags(pbinary.flags):
type = 6
_fields_ = [(1, '8kDictionary'), (1, '3Shannon-FanoTrees')]
class MethodDeflatingFlags(pbinary.struct):
_fields_ = [(2, 'Quality')]
@CompressionMethodFlags.define
class MethodDeflatingFlags8(MethodDeflatingFlags):
type = 8
@CompressionMethodFlags.define
class MethodDeflatingFlags9(MethodDeflatingFlags):
type = 9
@CompressionMethodFlags.define
class MethodLZMAFlags(pbinary.flags):
type = 14
_fields_ = [(1, 'EOS'), (1, 'unused')]
## Extra data field mappings
class ExtraField(ptype.definition):
cache = {}
class Extra_NTFS_TagType(ptype.definition):
attribute, cache = 'tag', {}
class Extra_NTFS_Tag(pstruct.type):
_fields_ = [
(pint.uint16_t, 'Tag'),
(pint.uint16_t, 'Size'),
(lambda self: Extra_NTFS_TagType.get(self['Tag'].li.int(), length=self['Size'].li.int()), 'Attribute'),
]
def summary(self):
return "({:+#x}) {:s}".format(self['Size'].int(), self['Attribute'].summary())
@Extra_NTFS_TagType.define
class NTFS_Attributes(pstruct.type):
tag = 1
class TenthOfAMicrosecond(pint.uint64_t):
def datetime(self):
res, epoch = self.int(), datetime.datetime(1601, 1, 1, tzinfo=datetime.timezone.utc)
return epoch + datetime.timedelta(microseconds=res * 1e-1)
def summary(self):
tzinfo = datetime.timezone(datetime.timedelta(seconds=-(time.altzone if time.daylight else time.timezone)))
res = self.datetime().astimezone(tzinfo)
return res.isoformat()
_fields_ = [
(TenthOfAMicrosecond, 'Mtime'),
(TenthOfAMicrosecond, 'Atime'),
(TenthOfAMicrosecond, 'Ctime'),
]
def summary(self):
mtime, atime, ctime = (self[fld].datetime() for fld in ['Mtime', 'Atime', 'Ctime'])
return "Mtime={:s} Atime={:s} Ctime={:s}".format(mtime.isoformat(), atime.isoformat(), ctime.isoformat())
@ExtraField.define
class Extra_NTFS(pstruct.type):
type = 0x000a
_fields_ = [
(pint.uint32_t, 'Reserved'),
(lambda self: dyn.blockarray(Extra_NTFS_Tag, self.blocksize() - self['Reserved'].li.size()), 'Tags'),
]
def summary(self):
return "Reserved={:#x} [{:s}]".format(self['Reserved'].int(), ', '.join(item.summary() for item in self['Tags']))
# FIXME: Add these from section 4.6
class Extensible_data_field(pstruct.type):
def __unknown(self):
cb = sum(self[k].li.size() for k in ('id','size','data'))
return dyn.block(self.blocksize() - cb)
def __data(self):
id, size = (self[item].li for item in ['id', 'size'])
return ExtraField.get(id.int(), blocksize=lambda self, bs=size.int(): bs)
_fields_ = [
(pint.uint16_t, 'id'),
(pint.uint16_t, 'size'),
(__data, 'data'),
(__unknown, 'unknown'),
]
def summary(self):
return "{:s} {:s}".format(self['data'].classname(), self['data'].summary())
## File records
class ZipRecord(ptype.definition):
cache = {}
attribute = 'signature'
class LocalFileHeader(pstruct.type):
signature = 0, 0x04034b50
@ZipRecord.define
class LocalFileHeader32(LocalFileHeader):
signature = 32, 0x04034b50
def __extra_field(self):
cb = self['extra field length'].li
return dyn.clone(Extensible_data_field, blocksize=lambda s, bs=cb.int(): bs)
def __file_data(self):
desc = self.p.DirectoryRecord['data descriptor'] if hasattr(self.p, 'DirectoryRecord') else self['data descriptor'].li
return dyn.block(desc['compressed size'].int())
def __post_data_descriptor(self):
if hasattr(self.p, 'DirectoryRecord'):
flags = self.p.DirectoryRecord['general purpose bit flag']
return DataDescriptor if flags['PostDescriptor'] else ptype.undefined
flags = self['general purpose bit flag'].li
return ZipDataDescriptor if flags['PostDescriptor'] else ptype.undefined
_fields_ = [
(pint.uint16_t, 'version needed to extract'),
(BitFlags, 'general purpose bit flag'),
(CompressionMethod, 'compression method'),
(pbinary.littleendian(MSTime), 'last mod file time'),
(pbinary.littleendian(MSDate), 'last mod file date'),
(DataDescriptor, 'data descriptor'),
(pint.uint16_t, 'file name length'),
(pint.uint16_t, 'extra field length'),
(lambda self: dyn.clone(pstr.string, length=self['file name length'].li.int()), 'file name'),
(__extra_field, 'extra field'),
# XXX: if encrypted, include encryption header here
(__file_data, 'file data'),
# XXX: i think this record is actually encoded within the file data
(__post_data_descriptor, 'post data descriptor'),
]
def summary(self):
needed = self['version needed to extract']
method, desc = self.Method(), self.Descriptor()
dt = datetime.datetime.combine(self['last mod file date'].date(), self['last mod file time'].time())
return "{:s} (version {:d}) {!r} datetime={:s} compressed={:#x} uncompressed={:#x} crc32={:#x}{:s}".format(method.str(), needed.int(), self.Name(), dt.isoformat(), desc['compressed size'].int(), desc['uncompressed size'].int(), desc['crc-32'].int(), " {:s}".format(self['general purpose bit flag'].summary()) if self['general purpose bit flag'].int() > 0 else '')
def Name(self):
return self['file name'].str()
def Method(self):
return self['compression method']
def Descriptor(self):
PostDescriptorQ = self['general purpose bit flag'].o['PostDescriptor']
return self['post data descriptor']['descriptor'] if PostDescriptorQ else self['data descriptor']
def Data(self):
PostDescriptorQ = self['general purpose bit flag'].o['PostDescriptor']
return self['post data descriptor'].data() if PostDescriptorQ else self['file data'].serialize()
def extract(self, **kwds):
res = self.Data()
if not kwds.get('decompress', False):
logging.debug('Extracting {:d} bytes of compressed content'.format(len(res)))
return res
method = self['compression method']
if method['Stored']:
logging.debug('Decompressing ({:s}) {:d} bytes of content.'.format('Uncompressed', len(res)))
return res
elif method['Deflated']:
import zlib
logging.debug('Decompressing ({:s}) {:d} bytes of content.'.format('Zlib', len(res)))
return zlib.decompress(res, -zlib.MAX_WBITS)
elif method['BZIP2']:
import bz2
logging.debug('Decompressing ({:s}) {:d} bytes of content.'.format('BZip2', len(res)))
return bz2.decompress(res)
elif method['LZMA']:
import lzma
logging.debug('Decompressing ({:s}) {:d} bytes of content.'.format('Lzma', len(res)))
return lzma.decompress(res)
raise ValueError(method)
def listing(self):
cls, index, ofs, bs = self.classname(), int(self.getparent(Record).name()), self.getparent(Record).getoffset(), self.getparent(Record).size()
filename, meth, descr = self.Name(), self.Method(), self.Descriptor()
dt = datetime.datetime.combine(self['last mod file date'].date(), self['last mod file time'].time())
return '{{{:d}}} {:s} ({:x}{:+x}) {!r} method={:s} {:s} timestamp={:s}'.format(index, cls, ofs, bs, filename, meth.str(), descr.summary(), dt.isoformat())
class CentralDirectoryEntry(pstruct.type):
signature = 0, 0x02014b50
@ZipRecord.define
class CentralDirectoryEntry32(CentralDirectoryEntry):
signature = 32, 0x02014b50
def __relative_offset_of_local_header(self):
t = dyn.clone(Record, DirectoryRecord=self)
return dyn.pointer(t, pint.uint32_t)
def __file_name(self):
res = self['file name length'].li
return dyn.clone(pstr.string, length=res.int())
def __extra_field(self):
cb = self['extra field length'].li
return dyn.clone(Extensible_data_field, blocksize=lambda s, bs=cb.int(): bs)
def __file_comment(self):
res = self['file comment length'].li
return dyn.clone(pstr.string, length=res.int())
_fields_ = [
(VersionMadeBy, 'version made by'),
(VersionNeeded, 'version needed to extract'),
(BitFlags, 'general purpose bit flag'),
(CompressionMethod, 'compression method'),
(pbinary.littleendian(MSTime), 'last mod file time'),
(pbinary.littleendian(MSDate), 'last mod file date'),
(DataDescriptor, 'data descriptor'),
(pint.uint16_t, 'file name length'),
(pint.uint16_t, 'extra field length'),
(pint.uint16_t, 'file comment length'),
(pint.uint16_t, 'disk number start'),
(pint.uint16_t, 'internal file attributes'),
(pint.uint32_t, 'external file attributes'),
(__relative_offset_of_local_header, 'relative offset of local header'),
(__file_name, 'file name'),
(__extra_field, 'extra field'),
(__file_comment, 'file comment'),
]
def summary(self):
disk, offset = (self[item] for item in ['disk number start', 'relative offset of local header'])
version, needed = (self[item] for item in ['version made by', 'version needed to extract'])
method, desc = self.Method(), self.Descriptor()
dt = datetime.datetime.combine(self['last mod file date'].date(), self['last mod file time'].time())
return "disk#{:d} {:s} (version={:d}<{:d}) offset={:+#x} {!r}{:s} datetime={:s} compressed={:#x} uncompressed={:#x} crc32={:#x}{:s}".format(disk.int(), method.str(), needed.int(), version.int(), offset.int(), self.Name(), " ({:s})".format(self['file comment'].str()) if self['file comment length'].int() else '', dt.isoformat(), desc['compressed size'].int(), desc['uncompressed size'].int(), desc['crc-32'].int(), " {:s}".format(self['general purpose bit flag'].summary()) if self['general purpose bit flag'].int() > 0 else '')
def Name(self):
return self['file name'].str()
def Method(self):
return self['compression method']
def Descriptor(self):
return self['data descriptor']
def Comment(self):
return self['file comment'].str()
def extract(self, **kwds):
return self.serialize()
def listing(self):
cls, index, ofs, bs = self.classname(), int(self.getparent(Record).name()), self.getparent(Record).getoffset(), self.getparent(Record).size()
filename, meth, descr = self.Name(), self.Method(), self.Descriptor()
dt = datetime.datetime.combine(self['last mod file date'].date(), self['last mod file time'].time())
return '{{{:d}}} {:s} ({:x}{:+x}) {!r} version-made-by={:s} version-needed-to-extract={:s} compression-method={:s} {:s} timestamp={:s} disk-number-start={:d} internal-file-attributes={:#x} external-file-attributes={:#x}'.format(index, cls, ofs, bs, filename, self['version made by'].str(), self['version needed to extract'].str(), meth.str(), descr.summary(), dt.isoformat(), self['disk number start'].int(), self['internal file attributes'].int(), self['external file attributes'].int()) + ('// {:s}'.format(self.Comment()) if self['file comment length'].int() > 0 else '')
class EndOfCentralDirectory(pstruct.type):
signature = 0, 0x06054b50
@ZipRecord.define
class EndOfCentralDirectory32(EndOfCentralDirectory):
signature = 32, 0x06054b50
_fields_ = [
(pint.uint16_t, 'number of this disk'),
(pint.uint16_t, 'number of the disk with the start of the central directory'),
(pint.uint16_t, 'total number of entries in the central directory on this disk'),
(pint.uint16_t, 'total number of entries in the central directory'),
(pint.uint32_t, 'size of the central directory'),
| |
if isinstance(self.nonlin, SVFModel):
# propagate backward by scaling and squaring
g, h = spatial.exp_backward(vel00, g, h, steps=self.nonlin.steps)
sumgrad = g.mul_(factor) if sumgrad is None else sumgrad.add_(g, alpha=factor)
if hess:
sumhess = h.mul_(factor) if sumhess is None else sumhess.add_(h, alpha=factor)
sumloss = llx.mul_(factor) if sumloss is None else sumloss.add_(llx, alpha=factor)
# add regularization term
vgrad = self.nonlin.regulariser(vel0)
llv = 0.5 * vel0.flatten().dot(vgrad.flatten())
if grad:
sumgrad += vgrad
del vgrad
# print objective
llx = sumloss.item()
sumloss += llv
sumloss += self.lla
self.loss_value = sumloss.item()
if self.verbose and not in_line_search:
llv = llv.item()
self.llv = llv
ll = sumloss.item()
self.all_ll.append(ll)
lla = self.lla
self.n_iter += 1
line = '(nonlin) | '
line += f'{self.n_iter:03d} | {llx:12.6g} + {llv:12.6g} + {lla:12.6g} = {ll:12.6g}'
if self.ll_prev is not None:
gain = (self.ll_prev - ll) / max(abs(self.ll_max - ll), 1e-8)
line += f' | {gain:12.6g}'
print(line, end='\r')
self.ll_prev = ll
self.ll_max = max(self.ll_max, ll)
out = [sumloss]
if grad:
out.append(sumgrad)
if hess:
out.append(sumhess)
return tuple(out) if len(out) > 1 else out[0]
def do_affine(self, logaff, grad=False, hess=False, in_line_search=False):
"""Forward pass for updating the affine component (nonlin is not None)"""
sumloss = None
sumgrad = None
sumhess = None
# build affine and displacement field
logaff0 = logaff
aff_pos = self.affine.position[0].lower()
if any(loss.symmetric for loss in self.losses):
aff0, iaff0, gaff0, igaff0 = self.affine.exp2(logaff0, grad=True,
cache_result=not in_line_search)
phi0, iphi0 = self.nonlin.exp2(cache_result=True, recompute=False)
else:
iaff0 = None
aff0, gaff0 = self.affine.exp(logaff0, grad=True,
cache_result=not in_line_search)
phi0 = self.nonlin.exp(cache_result=True, recompute=False)
iphi0 = None
# register temporary "backward" loss for symmetric losses
losses = []
for loss in self.losses:
losses.append(loss)
if loss.symmetric:
bwdloss = copy.copy(loss)
bwdloss.moving, bwdloss.fixed = loss.fixed, loss.moving
bwdloss.symmetric = 'backward'
losses.append(bwdloss)
has_printed = False
for loss in losses:
factor = loss.factor
if loss.symmetric:
factor = factor / 2
if loss.symmetric == 'backward':
phi00 = iphi0
aff00 = iaff0
gaff00 = igaff0
else:
phi00 = phi0
aff00 = aff0
gaff00 = gaff0
is_level0 = True
for moving, fixed in zip(loss.moving, loss.fixed): # pyramid
# build complete warp
if aff_pos in 'fs':
aff_right = spatial.affine_matmul(aff00, fixed.affine)
aff_right = spatial.affine_lmdiv(self.nonlin.affine, aff_right)
gaff_right = torch.matmul(gaff00, fixed.affine)
gaff_right = linalg.lmdiv(self.nonlin.affine, gaff_right)
else:
aff_right = spatial.affine_lmdiv(self.nonlin.affine, fixed.affine)
gaff_right = None
if aff_pos in 'ms':
aff_left = spatial.affine_matmul(aff00, self.nonlin.affine)
aff_left = spatial.affine_lmdiv(moving.affine, aff_left)
gaff_left = torch.matmul(gaff00, self.nonlin.affine)
gaff_left = linalg.lmdiv(moving.affine, gaff_left)
else:
aff_left = spatial.affine_lmdiv(moving.affine, self.nonlin.affine)
gaff_left = None
if _almost_identity(aff_right) and fixed.shape == self.nonlin.shape:
right = None
phi = spatial.identity_grid(fixed.shape, **utils.backend(phi00))
phi += phi00
else:
right = spatial.affine_grid(aff_right, fixed.shape)
phi = regutils.smart_pull_grid(phi00, right)
phi += right
phi_right = phi
if _almost_identity(aff_left) and moving.shape == self.nonlin.shape:
left = None
else:
left = spatial.affine_grid(aff_left, self.nonlin.shape)
phi = spatial.affine_matvec(aff_left, phi)
# forward
warped, mask = moving.pull(phi, mask=True)
if fixed.masked:
if mask is None:
mask = fixed.mask
else:
mask = mask * fixed.mask
if not has_printed and self.verbose > 1 and not in_line_search \
and loss.symmetric != 'backward':
is_level0 = False
has_printed = True
init = spatial.affine_lmdiv(moving.affine, fixed.affine)
if _almost_identity(init) and moving.shape == fixed.shape:
init = moving.dat
else:
init = spatial.affine_grid(init, fixed.shape)
init = moving.pull(init)
self.mov2fix(fixed.dat, init, warped, dim=fixed.dim,
title=f'(affine) {self.n_iter:03d}')
# gradient/Hessian of the log-likelihood in observed space
g = h = None
if not grad and not hess:
llx = loss.loss.loss(warped, fixed.dat, dim=fixed.dim, mask=mask)
elif not hess:
llx, g = loss.loss.loss_grad(warped, fixed.dat, dim=fixed.dim, mask=mask)
else:
llx, g, h = loss.loss.loss_grad_hess(warped, fixed.dat, dim=fixed.dim, mask=mask)
def compose_grad(g, h, g_mu, g_aff):
"""
g, h : gradient/Hessian of loss wrt moving image
g_mu : spatial gradients of moving image
g_aff : gradient of affine matrix wrt Lie parameters
returns g, h: gradient/Hessian of loss wrt Lie parameters
"""
# Note that `h` can be `None`, but the functions I
# use deal with this case correctly.
dim = g_mu.shape[-1]
g = jg(g_mu, g)
h = jhj(g_mu, h)
g, h = regutils.affine_grid_backward(g, h)
dim2 = dim * (dim + 1)
g = g.reshape([*g.shape[:-2], dim2])
g_aff = g_aff[..., :-1, :]
g_aff = g_aff.reshape([*g_aff.shape[:-2], dim2])
g = linalg.matvec(g_aff, g)
if h is not None:
h = h.reshape([*h.shape[:-4], dim2, dim2])
h = g_aff.matmul(h).matmul(g_aff.transpose(-1, -2))
h = h.abs().sum(-1).diag_embed()
return g, h
# compose with spatial gradients
if grad or hess:
g0, g = g, None
h0, h = h, None
if aff_pos in 'ms':
g_left = regutils.smart_push(g0, phi_right, shape=self.nonlin.shape)
h_left = regutils.smart_push(h0, phi_right, shape=self.nonlin.shape)
mugrad = moving.pull_grad(left, rotate=False)
g_left, h_left = compose_grad(g_left, h_left, mugrad, gaff_left)
g = g_left
h = h_left
if aff_pos in 'fs':
g_right = g0
h_right = h0
mugrad = moving.pull_grad(phi, rotate=False)
jac = spatial.grid_jacobian(phi0, right, type='disp', extrapolate=False)
jac = torch.matmul(aff_left[:-1, :-1], jac)
mugrad = linalg.matvec(jac.transpose(-1, -2), mugrad)
g_right, h_right = compose_grad(g_right, h_right, mugrad, gaff_right)
g = g_right if g is None else g.add_(g_right)
h = h_right if h is None else h.add_(h_right)
if loss.symmetric == 'backward':
g = g.neg_()
sumgrad = g.mul_(factor) if sumgrad is None else sumgrad.add_(g, alpha=factor)
if hess:
sumhess = h.mul_(factor) if sumhess is None else sumhess.add_(h, alpha=factor)
sumloss = llx.mul_(factor) if sumloss is None else sumloss.add_(llx, alpha=factor)
# TODO add regularization term
lla = 0
# print objective
llx = sumloss.item()
sumloss += lla
sumloss += self.llv
self.loss_value = sumloss.item()
if self.verbose and not in_line_search:
self.n_iter += 1
ll = sumloss.item()
self.all_ll.append(ll)
llv = self.llv
line = '(affine) | '
line += f'{self.n_iter:03d} | {llx:12.6g} + {llv:12.6g} + {lla:12.6g} = {ll:12.6g}'
if self.ll_prev is not None:
gain = (self.ll_prev - ll) / max(abs(self.ll_max - ll), 1e-8)
line += f' | {gain:12.6g}'
print(line, end='\r')
self.ll_prev = ll
self.ll_max = max(self.ll_max, ll)
out = [sumloss]
if grad:
out.append(sumgrad)
if hess:
out.append(sumhess)
return tuple(out) if len(out) > 1 else out[0]
def do_affine_only(self, logaff, grad=False, hess=False, in_line_search=False):
"""Forward pass for updating the affine component (nonlin is None)"""
sumloss = None
sumgrad = None
sumhess = None
# build affine and displacement field
logaff0 = logaff
aff0, iaff0, gaff0, igaff0 = self.affine.exp2(logaff0, grad=True)
# register temporary "backward" loss for symmetric losses
losses = []
for loss in self.losses:
losses.append(loss)
if loss.symmetric:
bwdloss = copy.copy(loss)
bwdloss.moving, bwdloss.fixed = loss.fixed, loss.moving
bwdloss.symmetric = 'backward'
losses.append(bwdloss)
has_printed = False
for loss in losses:
factor = loss.factor
if loss.symmetric:
factor = factor / 2
if loss.symmetric == 'backward':
aff00 = iaff0
gaff00 = igaff0
else:
aff00 = aff0
gaff00 = gaff0
is_level0 = True
for moving, fixed in zip(loss.moving, loss.fixed): # pyramid
# build complete warp
aff = spatial.affine_matmul(aff00, fixed.affine)
aff = spatial.affine_lmdiv(moving.affine, aff)
gaff = torch.matmul(gaff00, fixed.affine)
gaff = linalg.lmdiv(moving.affine, gaff)
phi = spatial.affine_grid(aff, fixed.shape)
# forward
warped, mask = moving.pull(phi, mask=True)
if fixed.masked:
if mask is None:
mask = fixed.mask
else:
mask = mask * fixed.mask
if not has_printed and self.verbose > 1 and not in_line_search \
and loss.symmetric != 'backward':
is_level0 = False
has_printed = True
init = spatial.affine_lmdiv(moving.affine, fixed.affine)
if _almost_identity(init) and moving.shape == fixed.shape:
init = moving.dat
else:
init = spatial.affine_grid(init, fixed.shape)
init = moving.pull(init)
self.mov2fix(fixed.dat, init, warped, dim=fixed.dim,
title=f'(affine) {self.n_iter:03d}')
# gradient/Hessian of the log-likelihood in observed space
g = h = None
if not grad and not hess:
llx = loss.loss.loss(warped, fixed.dat, dim=fixed.dim, mask=mask)
elif not hess:
llx, g = loss.loss.loss_grad(warped, fixed.dat, dim=fixed.dim, mask=mask)
else:
llx, g, h = loss.loss.loss_grad_hess(warped, fixed.dat, dim=fixed.dim, mask=mask)
def compose_grad(g, h, g_mu, g_aff):
"""
g, h : gradient/Hessian of loss wrt moving image
g_mu : spatial gradients of moving image
g_aff : gradient of affine matrix wrt Lie parameters
returns g, h: gradient/Hessian of loss wrt Lie parameters
"""
# Note that `h` can be `None`, but the functions I
# use deal with this case correctly.
dim = g_mu.shape[-1]
g = jg(g_mu, g)
h = jhj(g_mu, h)
g, h = regutils.affine_grid_backward(g, h)
dim2 = dim * (dim + 1)
g = g.reshape([*g.shape[:-2], dim2])
g_aff = g_aff[..., :-1, :]
g_aff = g_aff.reshape([*g_aff.shape[:-2], dim2])
g = linalg.matvec(g_aff, g)
if h is not None:
h = h.reshape([*h.shape[:-4], dim2, dim2])
h = g_aff.matmul(h).matmul(g_aff.transpose(-1, -2))
h = h.abs().sum(-1).diag_embed()
| |
import py
import ctypes
from pypy.rlib.objectmodel import specialize, we_are_translated
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.jit.codegen.model import AbstractRGenOp, GenLabel, GenBuilder
from pypy.jit.codegen.model import GenVar, GenConst, CodeGenSwitch
from pypy.jit.codegen.model import ReplayBuilder, dummy_var
from pypy.jit.codegen.i386.codebuf import CodeBlockOverflow
from pypy.jit.codegen.i386.operation import *
from pypy.jit.codegen.i386.regalloc import RegAllocator, DEBUG_STACK
from pypy.jit.codegen.i386.regalloc import gv_frame_base, StorageInStack
from pypy.jit.codegen.i386.regalloc import Place, OpAbsorbPlace, OpTouch
from pypy.jit.codegen.i386.regalloc import write_stack_reserve, write_stack_adj
from pypy.jit.codegen import conftest
from pypy.rpython.annlowlevel import llhelper
DEBUG_TRAP = conftest.option.trap
# ____________________________________________________________
class IntConst(GenConst):
def __init__(self, value):
self.value = value
@specialize.arg(1)
def revealconst(self, T):
return cast_int_to_whatever(T, self.value)
def __repr__(self):
"NOT_RPYTHON"
try:
return "const=%s" % (imm(self.value).assembler(),)
except TypeError: # from Symbolics
return "const=%r" % (self.value,)
def repr(self):
return "const=$%s" % (self.value,)
class AddrConst(GenConst):
def __init__(self, addr):
self.addr = addr
@specialize.arg(1)
def revealconst(self, T):
return cast_adr_to_whatever(T, self.addr)
def __repr__(self):
"NOT_RPYTHON"
return "const=%r" % (self.addr,)
def repr(self):
return "const=<0x%x>" % (llmemory.cast_adr_to_int(self.addr),)
@specialize.arg(0)
def cast_int_to_whatever(T, value):
if isinstance(T, lltype.Ptr):
return lltype.cast_int_to_ptr(T, value)
elif T is llmemory.Address:
return llmemory.cast_int_to_adr(value)
else:
return lltype.cast_primitive(T, value)
@specialize.arg(0)
def cast_whatever_to_int(T, value):
if isinstance(T, lltype.Ptr):
return lltype.cast_ptr_to_int(value)
elif T is llmemory.Address:
return llmemory.cast_adr_to_int(value)
else:
return lltype.cast_primitive(lltype.Signed, value)
@specialize.arg(0)
def cast_adr_to_whatever(T, addr):
if T is llmemory.Address:
return addr
elif isinstance(T, lltype.Ptr):
return llmemory.cast_adr_to_ptr(addr, T)
elif T is lltype.Signed:
return llmemory.cast_adr_to_int(addr)
else:
assert 0, "XXX not implemented"
# ____________________________________________________________
class FlexSwitch(CodeGenSwitch):
def __init__(self, rgenop, graphctx, reg, inputargs_gv, inputoperands):
self.rgenop = rgenop
self.graphctx = graphctx
self.reg = reg
self.inputargs_gv = inputargs_gv
self.inputoperands = inputoperands
self.defaultcaseaddr = 0
def initialize(self, mc):
self.graphctx.write_stack_adj(mc, initial=False)
self._reserve(mc)
default_builder = Builder(self.rgenop, self.graphctx,
self.inputargs_gv, self.inputoperands)
start = self.nextfreepos
end = self.endfreepos
fullmc = self.rgenop.InMemoryCodeBuilder(start, end)
default_builder.set_coming_from(fullmc)
fullmc.done()
default_builder.update_defaultcaseaddr_of = self
default_builder.start_writing()
return default_builder
def _reserve(self, mc):
RESERVED = 11*4+5 # XXX quite a lot for now :-/
pos = mc.tell()
mc.UD2()
mc.write('\x00' * (RESERVED-1))
self.nextfreepos = pos
self.endfreepos = pos + RESERVED
def _reserve_more(self):
start = self.nextfreepos
end = self.endfreepos
newmc = self.rgenop.open_mc()
self._reserve(newmc)
self.rgenop.close_mc(newmc)
fullmc = self.rgenop.InMemoryCodeBuilder(start, end)
fullmc.JMP(rel32(self.nextfreepos))
fullmc.done()
def add_case(self, gv_case):
rgenop = self.rgenop
targetbuilder = Builder(self.rgenop, self.graphctx,
self.inputargs_gv, self.inputoperands)
try:
self._add_case(gv_case, targetbuilder)
except CodeBlockOverflow:
self._reserve_more()
self._add_case(gv_case, targetbuilder)
targetbuilder.start_writing()
return targetbuilder
def _add_case(self, gv_case, targetbuilder):
start = self.nextfreepos
end = self.endfreepos
mc = self.rgenop.InMemoryCodeBuilder(start, end)
value = gv_case.revealconst(lltype.Signed)
mc.CMP(self.reg, imm(value))
targetbuilder.set_coming_from(mc, Conditions['E'])
pos = mc.tell()
assert self.defaultcaseaddr != 0
mc.JMP(rel32(self.defaultcaseaddr))
mc.done()
self.nextfreepos = pos
# ____________________________________________________________
GC_MALLOC = lltype.Ptr(lltype.FuncType([lltype.Signed], llmemory.Address))
def gc_malloc(size):
from pypy.rpython.lltypesystem.lloperation import llop
return llop.call_boehm_gc_alloc(llmemory.Address, size)
def gc_malloc_fnaddr():
"""Returns the address of the Boehm 'malloc' function."""
if we_are_translated():
gc_malloc_ptr = llhelper(GC_MALLOC, gc_malloc)
return lltype.cast_ptr_to_int(gc_malloc_ptr)
else:
# <pedronis> don't do this at home
import threading
if not isinstance(threading.currentThread(), threading._MainThread):
import py
py.test.skip("must run in the main thread")
try:
from ctypes import cast, c_void_p, util
path = util.find_library('gc')
if path is None:
raise ImportError("Boehm (libgc) not found")
boehmlib = ctypes.cdll.LoadLibrary(path)
except ImportError, e:
import py
py.test.skip(str(e))
else:
GC_malloc = boehmlib.GC_malloc
return cast(GC_malloc, c_void_p).value
def peek_word_at(addr):
# now the Very Obscure Bit: when translated, 'addr' is an
# address. When not, it's an integer. It just happens to
# make the test pass, but that's probably going to change.
if we_are_translated():
return addr.signed[0]
else:
from ctypes import cast, c_void_p, c_int, POINTER
p = cast(c_void_p(addr), POINTER(c_int))
return p[0]
def poke_word_into(addr, value):
# now the Very Obscure Bit: when translated, 'addr' is an
# address. When not, it's an integer. It just happens to
# make the test pass, but that's probably going to change.
if we_are_translated():
addr.signed[0] = value
else:
from ctypes import cast, c_void_p, c_int, POINTER
p = cast(c_void_p(addr), POINTER(c_int))
p[0] = value
# ____________________________________________________________
class Builder(GenBuilder):
coming_from = 0
update_defaultcaseaddr_of = None
paused_alive_gv = None
order_dependency = None
keepalives_gv = None
def __init__(self, rgenop, graphctx, inputargs_gv, inputoperands):
self.rgenop = rgenop
self.graphctx = graphctx
self.inputargs_gv = inputargs_gv
self.inputoperands = inputoperands
self.operations = []
def start_writing(self):
self.paused_alive_gv = None
def generate_block_code(self, final_vars_gv, final_operands=None,
renaming=True):
self.insert_keepalives()
if self.order_dependency is not None:
self.order_dependency.force_generate_code()
self.order_dependency = None
allocator = RegAllocator(self.operations)
allocator.set_final(final_vars_gv, final_operands)
if not renaming:
assert final_operands is None
final_vars_gv = allocator.varsused() # unique final vars
allocator.compute_lifetimes()
allocator.init_reg_alloc(self.inputargs_gv, self.inputoperands)
mc = self.start_mc()
allocator.generate_operations(mc)
if final_operands is not None:
allocator.generate_final_moves(final_vars_gv, final_operands)
#print 'NSTACKMAX==============>', allocator.nstackmax
self.graphctx.ensure_stack_vars(allocator.nstackmax)
del self.operations[:]
if renaming:
self.inputargs_gv = [GenVar() for v in final_vars_gv]
else:
# just keep one copy of each Variable that is alive
self.inputargs_gv = final_vars_gv
self.inputoperands = [allocator.get_operand(v) for v in final_vars_gv]
return mc
def insert_keepalives(self):
if self.keepalives_gv is not None:
self.operations.append(OpTouch(self.keepalives_gv))
self.keepalives_gv = None
def enter_next_block(self, kinds, args_gv):
# we get better register allocation if we write a single large mc block
self.insert_keepalives()
for i in range(len(args_gv)):
op = OpSameAs(args_gv[i])
args_gv[i] = op
self.operations.append(op)
lbl = Label(self)
lblop = OpLabel(lbl, args_gv)
self.operations.append(lblop)
return lbl
def set_coming_from(self, mc, insncond=INSN_JMP):
self.coming_from_cond = insncond
self.coming_from = mc.tell()
insnemit = EMIT_JCOND[insncond]
insnemit(mc, rel32(-1))
self.coming_from_end = mc.tell()
def start_mc(self):
mc = self.rgenop.open_mc()
# update the coming_from instruction
start = self.coming_from
if start:
targetaddr = mc.tell()
end = self.coming_from_end
fallthrough = targetaddr == end
if self.update_defaultcaseaddr_of: # hack for FlexSwitch
self.update_defaultcaseaddr_of.defaultcaseaddr = targetaddr
fallthrough = False
if fallthrough:
# the jump would be with an offset 0, i.e. it would go
# exactly after itself, so we don't really need the jump
# instruction at all and we can overwrite it and continue.
mc.seekback(end - start)
targetaddr = start
else:
# normal case: patch the old jump to go to targetaddr
oldmc = self.rgenop.InMemoryCodeBuilder(start, end)
insn = EMIT_JCOND[self.coming_from_cond]
insn(oldmc, rel32(targetaddr))
oldmc.done()
self.coming_from = 0
return mc
def _jump_if(self, cls, gv_condition, args_for_jump_gv):
newbuilder = Builder(self.rgenop, self.graphctx,
list(args_for_jump_gv), None)
newbuilder.order_dependency = self
self.operations.append(cls(gv_condition, newbuilder))
return newbuilder
def jump_if_false(self, gv_condition, args_for_jump_gv):
return self._jump_if(JumpIfNot, gv_condition, args_for_jump_gv)
def jump_if_true(self, gv_condition, args_for_jump_gv):
return self._jump_if(JumpIf, gv_condition, args_for_jump_gv)
def finish_and_goto(self, outputargs_gv, targetlbl):
operands = targetlbl.inputoperands
if operands is None:
# jumping to a label in a builder whose code has not been
# generated yet - this builder could be 'self', in the case
# of a tight loop
self.pause_writing(outputargs_gv)
targetlbl.targetbuilder.force_generate_code()
self.start_writing()
operands = targetlbl.inputoperands
assert operands is not None
mc = self.generate_block_code(outputargs_gv, operands)
mc.JMP(rel32(targetlbl.targetaddr))
mc.done()
self.rgenop.close_mc(mc)
def finish_and_return(self, sigtoken, gv_returnvar):
gvs = [gv_returnvar]
mc = self.generate_block_code(gvs, [eax])
# --- epilogue ---
mc.MOV(esp, ebp)
mc.POP(ebp)
mc.POP(edi)
mc.POP(esi)
mc.POP(ebx)
mc.RET()
# ----------------
mc.done()
self.rgenop.close_mc(mc)
def pause_writing(self, alive_gv):
self.paused_alive_gv = alive_gv
return self
def force_generate_code(self):
alive_gv = self.paused_alive_gv
if alive_gv is not None:
self.paused_alive_gv = None
mc = self.generate_block_code(alive_gv, renaming=False)
self.set_coming_from(mc)
mc.done()
self.rgenop.close_mc(mc)
def end(self):
pass
@specialize.arg(1)
def genop1(self, opname, gv_arg):
cls = getopclass1(opname)
if cls is None: # identity
return gv_arg
op = cls(gv_arg)
self.operations.append(op)
return op
@specialize.arg(1)
def genraisingop1(self, opname, gv_arg):
cls = getopclass1(opname)
op = cls(gv_arg)
self.operations.append(op)
op_excflag = OpFetchCC(op.ccexcflag)
self.operations.append(op_excflag)
return op, op_excflag
@specialize.arg(1)
def genop2(self, opname, gv_arg1, gv_arg2):
cls = getopclass2(opname)
op = cls(gv_arg1, gv_arg2)
self.operations.append(op)
return op
@specialize.arg(1)
def genraisingop2(self, opname, gv_arg1, gv_arg2):
cls = getopclass2(opname)
op = cls(gv_arg1, gv_arg2)
self.operations.append(op)
op_excflag = OpFetchCC(op.ccexcflag)
self.operations.append(op_excflag)
return op, op_excflag
def genop_ptr_iszero(self, kind, gv_ptr):
cls = getopclass1('ptr_iszero')
op = cls(gv_ptr)
self.operations.append(op)
return op
def genop_ptr_nonzero(self, kind, gv_ptr):
cls = getopclass1('ptr_nonzero')
op = cls(gv_ptr)
self.operations.append(op)
return op
def genop_ptr_eq(self, kind, gv_ptr1, gv_ptr2):
cls = getopclass2('ptr_eq')
op = cls(gv_ptr1, gv_ptr2)
self.operations.append(op)
return op
def genop_ptr_ne(self, kind, gv_ptr1, gv_ptr2):
cls = getopclass2('ptr_ne')
op = cls(gv_ptr1, gv_ptr2)
self.operations.append(op)
return op
def genop_cast_int_to_ptr(self, kind, gv_int):
return gv_int # identity
def genop_same_as(self, kind, gv_x):
if gv_x.is_const: # must always return a var
op = OpSameAs(gv_x)
self.operations.append(op)
return op
else:
return gv_x
def genop_call(self, sigtoken, gv_fnptr, args_gv):
op = OpCall(sigtoken, gv_fnptr, list(args_gv))
self.operations.append(op)
return op
def genop_malloc_fixedsize(self, size):
# XXX boehm only, no atomic/non atomic distinction for now
op = OpCall(MALLOC_SIGTOKEN,
IntConst(gc_malloc_fnaddr()),
[IntConst(size)])
self.operations.append(op)
return op
def genop_malloc_varsize(self, varsizealloctoken, gv_size):
# XXX boehm only, no atomic/non atomic distinction for now
# XXX no overflow checking for now
opsz = OpComputeSize(varsizealloctoken, gv_size)
self.operations.append(opsz)
opmalloc = OpCall(MALLOC_SIGTOKEN,
IntConst(gc_malloc_fnaddr()),
[opsz])
self.operations.append(opmalloc)
lengthtoken, _, _ = varsizealloctoken
self.operations.append(OpSetField(lengthtoken, opmalloc, gv_size))
return opmalloc
def genop_getfield(self, fieldtoken, gv_ptr):
op = OpGetField(fieldtoken, gv_ptr)
self.operations.append(op)
return op
def genop_setfield(self, fieldtoken, gv_ptr, gv_value):
self.operations.append(OpSetField(fieldtoken, gv_ptr, gv_value))
def genop_getsubstruct(self, (offset, fieldsize), gv_ptr):
op = OpIntAdd(gv_ptr, IntConst(offset))
self.operations.append(op)
return op
def genop_getarrayitem(self, arraytoken, gv_array, gv_index):
op = OpGetArrayItem(arraytoken, gv_array, gv_index)
self.operations.append(op)
return op
def genop_setarrayitem(self, | |
'green')
plt.plot(repression_threshold_results[:,0]/10000,
repression_threshold_results[:,10], color = 'blue')
# plt.axvline( 23000 )
plt.axvline( 3.14 )
# plt.fill_between(repression_threshold_results[:,0],
# repression_threshold_results[:,2] + repression_threshold_results[:,3],
# np.max(repression_threshold_results[:,2]- repression_threshold_results[:,3],0),
# lw = 0, color = 'grey')
plt.xlabel('Repression threshold/1e4')
plt.ylabel('Coherence')
plt.ylim(0,1)
my_figure.add_subplot(633)
# plt.plot(repression_threshold_results[:,0],
plt.errorbar(repression_threshold_results[:,0]/10000,
repression_threshold_results[:,3]/10000,
yerr = repression_threshold_results[:,4]/10000, color = 'black')
plt.errorbar(repression_threshold_results[:,0]/10000,
repression_threshold_results[:,8]/10000,
yerr = repression_threshold_results[:,9]/10000, color = 'green')
plt.plot(repression_threshold_results[:,0]/10000,
repression_threshold_results[:,5]/10000, color = 'grey')
# plt.axvline( 23000 )
plt.axvline( 3.14 )
# plt.fill_between(repression_threshold_results[:,0],
# repression_threshold_results[:,2] + repression_threshold_results[:,3],
# np.max(repression_threshold_results[:,2]- repression_threshold_results[:,3],0),
# lw = 0, color = 'grey')
plt.ylim(0,15)
plt.xlabel('Repression threshold/1e4')
plt.ylabel('Expression/1e4')
########
#
# MRNA DEGRADATION
#
########
mrna_degradation_results = np.zeros((number_of_parameter_points,12))
index = 0
for mu_m in np.linspace(0.0001,np.log(2)/15,number_of_parameter_points):
these_rna_values, these_protein_values = hes5.generate_multiple_trajectories(
number_of_trajectories = number_of_trajectories,
duration = 1500,
repression_threshold = 31400,
mRNA_degradation_rate = mu_m,
protein_degradation_rate = np.log(2)/90,
translation_rate = 29,
basal_transcription_rate = 11,
transcription_delay = 29,
initial_mRNA = 3,
initial_protein = 31400,
equilibration_time = 1000)
these_langevin_rna_values, these_langevin_protein_values = hes5.generate_multiple_langevin_trajectories(
number_of_trajectories = number_of_trajectories*2,
duration = 1500*5,
repression_threshold = 31400,
mRNA_degradation_rate = mu_m,
protein_degradation_rate = np.log(2)/90,
translation_rate = 29,
basal_transcription_rate = 11,
transcription_delay = 29,
initial_mRNA = 3,
initial_protein = 31400,
equilibration_time = 1000)
_, this_coherence, this_period = hes5.calculate_power_spectrum_of_trajectories(
these_protein_values )
_, this_langevin_coherence, this_langevin_period = hes5.calculate_power_spectrum_of_trajectories(
these_langevin_protein_values )
_, this_ode_mean = hes5.calculate_steady_state_of_ode(
repression_threshold = 31400,
mRNA_degradation_rate = mu_m,
protein_degradation_rate = np.log(2)/90,
translation_rate = 29,
basal_transcription_rate = 11
)
this_theoretical_power_spectrum = hes5.calculate_theoretical_power_spectrum_at_parameter_point(
repression_threshold = 31400,
mRNA_degradation_rate = mu_m,
protein_degradation_rate = np.log(2)/90.0,
translation_rate = 29,
basal_transcription_rate = 11,
transcription_delay = 29)
this_theoretical_coherence, this_theoretical_period = hes5.calculate_coherence_and_period_of_power_spectrum(
this_theoretical_power_spectrum)
#
mrna_degradation_results[index,0] = mu_m
mrna_degradation_results[index,1] = this_period
mrna_degradation_results[index,2] = this_coherence
mrna_degradation_results[index,3] = np.mean(these_protein_values[:,1:])
mrna_degradation_results[index,4] = np.std(these_protein_values[:,1:])
mrna_degradation_results[index,5] = this_ode_mean
mrna_degradation_results[index,6] = this_langevin_period
mrna_degradation_results[index,7] = this_langevin_coherence
mrna_degradation_results[index,8] = np.mean(these_langevin_protein_values[:,1:])
mrna_degradation_results[index,9] = np.std(these_langevin_protein_values[:,1:])
mrna_degradation_results[index,10] = this_theoretical_coherence
mrna_degradation_results[index,11] = this_theoretical_period
index +=1
np.save(os.path.join(os.path.dirname(__file__),
'output','mrna_degradation_results.npy'), mrna_degradation_results)
# mrna_degradation_results = np.load(os.path.join(os.path.dirname(__file__),
# 'output','mrna_degradation_results.npy'))
my_figure.add_subplot(634)
plt.plot(mrna_degradation_results[:,0],
mrna_degradation_results[:,1], color = 'black')
plt.plot(mrna_degradation_results[:,0],
mrna_degradation_results[:,6], color = 'green')
plt.plot(mrna_degradation_results[:,0],
mrna_degradation_results[:,11], color = 'blue')
plt.axvline( np.log(2)/30 )
plt.gca().locator_params(axis='x', tight = True, nbins=4)
plt.xlabel('mRNA degradation [1/min]')
plt.ylabel('Period [min]')
plt.ylim(0,700)
my_figure.add_subplot(635)
plt.plot(mrna_degradation_results[:,0],
mrna_degradation_results[:,2], color = 'black')
plt.plot(mrna_degradation_results[:,0],
mrna_degradation_results[:,7], color = 'green')
plt.plot(mrna_degradation_results[:,0],
mrna_degradation_results[:,10], color = 'blue')
plt.gca().locator_params(axis='x', tight = True, nbins=4)
plt.axvline( np.log(2)/30 )
# plt.fill_between(repression_threshold_results[:,0],
# repression_threshold_results[:,2] + repression_threshold_results[:,3],
# np.max(repression_threshold_results[:,2]- repression_threshold_results[:,3],0),
# lw = 0, color = 'grey')
plt.xlabel('mRNA degradation [1/min]')
plt.ylabel('Coherence')
plt.ylim(0,1)
my_figure.add_subplot(636)
# plt.plot(repression_threshold_results[:,0],
plt.errorbar(mrna_degradation_results[:,0],
mrna_degradation_results[:,3]/10000,
yerr = mrna_degradation_results[:,4]/10000, color = 'black')
plt.errorbar(mrna_degradation_results[:,0],
mrna_degradation_results[:,8]/10000,
yerr = mrna_degradation_results[:,9]/10000, color = 'green')
plt.plot(mrna_degradation_results[:,0],
mrna_degradation_results[:,5]/10000, color = 'grey')
plt.axvline( np.log(2)/30 )
plt.gca().locator_params(axis='x', tight = True, nbins=4)
plt.ylim(0,15)
plt.xlabel('mRNA degradation [1/min]')
plt.ylabel('Expression/1e4')
########
#
# PROTEIN DEGRADATION
#
########
protein_degradation_results = np.zeros((number_of_parameter_points,12))
index = 0
for mu_p in np.linspace(0.0001,np.log(2)/15,number_of_parameter_points):
these_rna_values, these_protein_values = hes5.generate_multiple_trajectories(
number_of_trajectories = number_of_trajectories,
duration = 1500,
repression_threshold = 31400,
mRNA_degradation_rate = np.log(2)/30.0,
# protein_degradation_rate = np.log(2)/90,
protein_degradation_rate = mu_p,
translation_rate = 29,
basal_transcription_rate = 11,
transcription_delay = 29,
initial_mRNA = 3,
initial_protein = 31400,
equilibration_time = 1000)
these_langevin_rna_values, these_langevin_protein_values = hes5.generate_multiple_langevin_trajectories(
number_of_trajectories = number_of_trajectories*2,
duration = 1500*5,
repression_threshold = 31400,
mRNA_degradation_rate = np.log(2)/30.0,
# protein_degradation_rate = np.log(2)/90,
protein_degradation_rate = mu_p,
translation_rate = 29,
basal_transcription_rate = 11,
transcription_delay = 29,
initial_mRNA = 3,
initial_protein = 31400,
equilibration_time = 1000)
_, this_coherence, this_period = hes5.calculate_power_spectrum_of_trajectories(
these_protein_values )
_, this_langevin_coherence, this_langevin_period = hes5.calculate_power_spectrum_of_trajectories(
these_langevin_protein_values )
_, this_ode_mean = hes5.calculate_steady_state_of_ode(
repression_threshold = 31400,
mRNA_degradation_rate = np.log(2)/30,
protein_degradation_rate = mu_p,
translation_rate = 29,
basal_transcription_rate = 11
)
this_theoretical_power_spectrum = hes5.calculate_theoretical_power_spectrum_at_parameter_point(
repression_threshold = 31400,
mRNA_degradation_rate = np.log(2)/30.0,
protein_degradation_rate = mu_p,
translation_rate = 29,
basal_transcription_rate = 11,
transcription_delay = 29)
this_theoretical_coherence, this_theoretical_period = hes5.calculate_coherence_and_period_of_power_spectrum(
this_theoretical_power_spectrum)
#
protein_degradation_results[index,0] = mu_p
protein_degradation_results[index,1] = this_period
protein_degradation_results[index,2] = this_coherence
protein_degradation_results[index,3] = np.mean(these_protein_values[:,1:])
protein_degradation_results[index,4] = np.std(these_protein_values[:,1:])
protein_degradation_results[index,5] = this_ode_mean
protein_degradation_results[index,6] = this_langevin_period
protein_degradation_results[index,7] = this_langevin_coherence
protein_degradation_results[index,8] = np.mean(these_langevin_protein_values[:,1:])
protein_degradation_results[index,9] = np.std(these_langevin_protein_values[:,1:])
protein_degradation_results[index,10] = this_theoretical_coherence
protein_degradation_results[index,11] = this_theoretical_period
index +=1
np.save(os.path.join(os.path.dirname(__file__),
'output','protein_degradation_results.npy'), protein_degradation_results)
# protein_degradation_results = np.load(os.path.join(os.path.dirname(__file__),
# 'output','protein_degradation_results.npy'))
my_figure.add_subplot(637)
plt.plot(protein_degradation_results[:,0],
protein_degradation_results[:,1], color = 'black')
plt.plot(protein_degradation_results[:,0],
protein_degradation_results[:,6], color = 'green')
plt.plot(protein_degradation_results[:,0],
protein_degradation_results[:,11], color = 'blue')
plt.axvline( np.log(2)/90 )
plt.gca().locator_params(axis='x', tight = True, nbins=4)
plt.xlabel('Hes5 degradation [1/min]')
plt.ylabel('Period [min]')
plt.ylim(0,700)
my_figure.add_subplot(638)
plt.plot(protein_degradation_results[:,0],
protein_degradation_results[:,2], color = 'black')
plt.plot(protein_degradation_results[:,0],
protein_degradation_results[:,7], color = 'green')
plt.plot(protein_degradation_results[:,0],
protein_degradation_results[:,10], color = 'blue')
plt.axvline( np.log(2)/90 )
plt.gca().locator_params(axis='x', tight = True, nbins=4)
# plt.fill_between(repression_threshold_results[:,0],
# repression_threshold_results[:,2] + repression_threshold_results[:,3],
# np.max(repression_threshold_results[:,2]- repression_threshold_results[:,3],0),
# lw = 0, color = 'grey')
plt.xlabel('Hes5 degradation [1/min]')
plt.ylabel('Coherence')
plt.ylim(0,1)
my_figure.add_subplot(639)
# plt.plot(repression_threshold_results[:,0],
plt.errorbar(protein_degradation_results[:,0],
protein_degradation_results[:,3]/10000,
yerr = protein_degradation_results[:,4]/10000, color = 'black')
plt.errorbar(protein_degradation_results[:,0],
protein_degradation_results[:,8]/10000,
yerr = protein_degradation_results[:,9]/10000, color = 'green')
plt.plot(protein_degradation_results[:,0],
protein_degradation_results[:,5]/10000, color = 'grey')
plt.axvline( np.log(2)/90 )
plt.gca().locator_params(axis='x', tight = True, nbins=4)
plt.ylim(0,15)
plt.xlabel('Hes5 degradation [1/min]')
plt.ylabel('Expression/1e4')
########
#
# TIME DELAY
#
########
time_delay_results = np.zeros((number_of_parameter_points,12))
index = 0
for tau in np.linspace(5.0,40.0,number_of_parameter_points):
these_rna_values, these_protein_values = hes5.generate_multiple_trajectories(
number_of_trajectories = number_of_trajectories,
duration = 1500,
repression_threshold = 31400,
mRNA_degradation_rate = np.log(2)/30.0,
protein_degradation_rate = np.log(2)/90,
translation_rate = 29,
basal_transcription_rate = 11,
transcription_delay = tau,
initial_mRNA = 3,
initial_protein = 31400,
equilibration_time = 1000)
these_langevin_rna_values, these_langevin_protein_values = hes5.generate_multiple_langevin_trajectories(
number_of_trajectories = number_of_trajectories*2,
duration = 1500*5,
repression_threshold = 31400,
mRNA_degradation_rate = np.log(2)/30.0,
protein_degradation_rate = np.log(2)/90,
translation_rate = 29,
basal_transcription_rate = 11,
transcription_delay = tau,
initial_mRNA = 3,
initial_protein = 31400,
equilibration_time = 1000)
this_theoretical_power_spectrum = hes5.calculate_theoretical_power_spectrum_at_parameter_point(
repression_threshold = 31400,
mRNA_degradation_rate = np.log(2)/30.0,
protein_degradation_rate = np.log(2)/90,
translation_rate = 29,
basal_transcription_rate = 11,
transcription_delay = tau)
this_theoretical_coherence, this_theoretical_period = hes5.calculate_coherence_and_period_of_power_spectrum(
this_theoretical_power_spectrum)
_, this_coherence, this_period = hes5.calculate_power_spectrum_of_trajectories(
these_protein_values )
_, this_langevin_coherence, this_langevin_period = hes5.calculate_power_spectrum_of_trajectories(
these_langevin_protein_values )
_, this_ode_mean = hes5.calculate_steady_state_of_ode(
repression_threshold = 31400,
mRNA_degradation_rate = np.log(2)/30,
protein_degradation_rate = np.log(2)/90,
translation_rate = 29,
basal_transcription_rate = 11
)
time_delay_results[index,0] = tau
time_delay_results[index,1] = this_period
time_delay_results[index,2] = this_coherence
time_delay_results[index,3] = np.mean(these_protein_values[:,1:])
time_delay_results[index,4] = np.std(these_protein_values[:,1:])
time_delay_results[index,5] = this_ode_mean
time_delay_results[index,6] = this_langevin_period
time_delay_results[index,7] = this_langevin_coherence
time_delay_results[index,8] = np.mean(these_langevin_protein_values[:,1:])
time_delay_results[index,9] = np.std(these_langevin_protein_values[:,1:])
time_delay_results[index,10] = this_theoretical_coherence
time_delay_results[index,11] = this_theoretical_period
index +=1
#
np.save(os.path.join(os.path.dirname(__file__),
'output','time_delay_results.npy'), time_delay_results)
# time_delay_results = np.load(os.path.join(os.path.dirname(__file__),
# 'output','time_delay_results.npy'))
my_figure.add_subplot(6,3,10)
plt.plot(time_delay_results[:,0],
time_delay_results[:,1], color = 'black')
plt.plot(time_delay_results[:,0],
time_delay_results[:,6], color = 'green')
plt.plot(time_delay_results[:,0],
time_delay_results[:,11], color = 'blue')
plt.axvline( 29.0 )
# plt.gca().locator_params(axis='x', tight = True, nbins=4)
plt.xlabel('Time delay [min]')
plt.gca().locator_params(axis='x', tight = True, nbins=4)
plt.ylabel('Period [min]')
plt.ylim(0,700)
my_figure.add_subplot(6,3,11)
plt.plot(time_delay_results[:,0],
time_delay_results[:,2], color = 'black')
plt.plot(time_delay_results[:,0],
time_delay_results[:,7], color = 'green')
plt.plot(time_delay_results[:,0],
time_delay_results[:,10], color = 'blue')
plt.axvline( 29.0 )
# plt.gca().locator_params(axis='x', tight = True, nbins=4)
# plt.fill_between(repression_threshold_results[:,0],
# repression_threshold_results[:,2] + repression_threshold_results[:,3],
# np.max(repression_threshold_results[:,2]- repression_threshold_results[:,3],0),
# lw = 0, color = 'grey')
plt.gca().locator_params(axis='x', tight = True, nbins=4)
plt.xlabel('Time delay [min]')
plt.ylabel('Coherence')
plt.ylim(0,1)
my_figure.add_subplot(6,3,12)
# plt.plot(repression_threshold_results[:,0],
plt.errorbar(time_delay_results[:,0],
time_delay_results[:,3]/10000,
yerr = time_delay_results[:,4]/10000, color = 'black')
plt.errorbar(time_delay_results[:,0],
time_delay_results[:,8]/10000,
yerr = time_delay_results[:,9]/10000, color = 'green')
plt.plot(time_delay_results[:,0],
time_delay_results[:,5]/10000, color = 'grey')
# plt.axvline( 23000 )
plt.axvline( 29.0 )
plt.gca().locator_params(axis='x', tight = True, nbins=4)
plt.ylim(0,15)
plt.xlabel('Time delay [min]')
plt.ylabel('Expression/1e4')
########
#
# TRANSLATION RATE
#
########
translation_rate_results = np.zeros((number_of_parameter_points,12))
index = 0
for alpha_p in np.linspace(1.0,100.0,number_of_parameter_points):
these_rna_values, these_protein_values = hes5.generate_multiple_trajectories(
number_of_trajectories = number_of_trajectories,
duration = 1500,
repression_threshold = 31400,
mRNA_degradation_rate = np.log(2)/30.0,
protein_degradation_rate = np.log(2)/90,
# translation_rate = 29,
translation_rate = alpha_p,
basal_transcription_rate = 11,
transcription_delay = 29,
initial_mRNA = 3,
initial_protein = 31400,
equilibration_time = 1000)
these_langevin_rna_values, these_langevin_protein_values = hes5.generate_multiple_langevin_trajectories(
number_of_trajectories = number_of_trajectories*2,
duration = 1500*5,
repression_threshold = 31400,
mRNA_degradation_rate = np.log(2)/30.0,
protein_degradation_rate = np.log(2)/90,
# translation_rate = 29,
translation_rate = alpha_p,
basal_transcription_rate = 11,
transcription_delay = 29,
initial_mRNA = 3,
initial_protein = 31400,
equilibration_time = 1000)
_, this_coherence, this_period = hes5.calculate_power_spectrum_of_trajectories(
these_protein_values )
_, this_langevin_coherence, this_langevin_period = hes5.calculate_power_spectrum_of_trajectories(
these_langevin_protein_values )
_, this_ode_mean = hes5.calculate_steady_state_of_ode(
repression_threshold = 31400,
mRNA_degradation_rate = np.log(2)/30,
protein_degradation_rate = np.log(2)/90,
translation_rate = alpha_p,
basal_transcription_rate = 11
)
this_theoretical_power_spectrum = hes5.calculate_theoretical_power_spectrum_at_parameter_point(
repression_threshold = 31400,
mRNA_degradation_rate = np.log(2)/30.0,
protein_degradation_rate = np.log(2)/90,
translation_rate = alpha_p,
basal_transcription_rate = 11,
transcription_delay = 29)
this_theoretical_coherence, this_theoretical_period = hes5.calculate_coherence_and_period_of_power_spectrum(
this_theoretical_power_spectrum)
#
translation_rate_results[index,0] = alpha_p
translation_rate_results[index,1] = this_period
translation_rate_results[index,2] = this_coherence
translation_rate_results[index,3] = np.mean(these_protein_values[:,1:])
translation_rate_results[index,4] = np.std(these_protein_values[:,1:])
translation_rate_results[index,5] = this_ode_mean
translation_rate_results[index,6] = this_langevin_period
translation_rate_results[index,7] = this_langevin_coherence
translation_rate_results[index,8] = np.mean(these_langevin_protein_values[:,1:])
translation_rate_results[index,9] = np.std(these_langevin_protein_values[:,1:])
translation_rate_results[index,10] = this_theoretical_coherence
translation_rate_results[index,11] = this_theoretical_period
index +=1
np.save(os.path.join(os.path.dirname(__file__),
'output','translation_rate_results.npy'), translation_rate_results)
# translation_rate_results = np.load(os.path.join(os.path.dirname(__file__),
# 'output','translation_rate_results.npy'))
my_figure.add_subplot(6,3,13)
plt.plot(translation_rate_results[:,0],
translation_rate_results[:,1], color = 'black')
plt.plot(translation_rate_results[:,0],
translation_rate_results[:,6], color = 'green')
plt.plot(translation_rate_results[:,0],
translation_rate_results[:,11], color = 'blue')
plt.axvline( | |
<gh_stars>1-10
"""
For comparing a predicted interaction XML against a gold standard
"""
import sys, os
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/..")
#print os.path.dirname(os.path.abspath(__file__))+"/.."
from Utils.ProgressCounter import ProgressCounter
from optparse import OptionParser
import Core.ExampleUtils as ExampleUtils
from Core.IdSet import IdSet
import Utils.TableUtils as TableUtils
import Core.SentenceGraph as SentenceGraph
import copy
from collections import defaultdict
# for entities to match, they have to have the same head offsets and same type
def compareEntitiesSimple(e1,e2,tokens=None):
#if not "headOffset" in e1:
# raise Exception("Entity " + str(e1.get("id")) + " has no 'headOffset' attribute")
#if not "headOffset" in e2:
# raise Exception("Entity " + str(e2.get("id")) + " has no 'headOffset' attribute")
if e1.get("headOffset") == e2.get("headOffset") and e1.get("type") == e2.get("type"):
return True
else:
return False
def compareEntitiesStrict(e1,e2,tokens=None):
# HORRIBLE HACK
if e1.get("charOffset")[:-1] == e1.get("headOffset")[:-1]:
e1.set("charOffset", e1.get("headOffset"))
if e2.get("charOffset")[:-1] == e2.get("headOffset")[:-1]:
e2.set("charOffset", e2.get("headOffset"))
if e1.get("charOffset") == e2.get("charOffset") and e1.get("type") == e2.get("type"):
return True
else:
return False
# not used
def compareEntitiesByGENIARelaxedOffsetMethod(e1, e2, tokens):
e1Offset = Range.charOffsetToSingleTuple(e1.get("charOffset"))
e2Offset = Range.charOffsetToSingleTuple(e2.get("charOffset"))
goldOffset = [99999999999,-999999999999999]
for i in range(len(tokens)):
token = tokens[i]
tokenOffset = Range.charOffsetToSingleTuple(token.get("charOffset"))
if Range.overlap(tokenOffset,e2Offset):
if i > 0:
prevOffset = Range.charOffsetToSingleTuple(tokens[i-1].get("charOffset"))
else:
prevOffset = tokenOffset
if goldOffset[0] > prevOffset[0]:
goldOffset[0] = prevOffset[0]
if i < len(tokens)-1:
nextOffset = Range.charOffsetToSingleTuple(tokens[i+1].get("charOffset"))
else:
nextOffset = tokenOffset
if goldOffset[1] < nextOffset[1]:
goldOffset[1] = nextOffset[1]
if e1Offset[0] >= goldOffset[1] and e1Offset[1] <= goldOffset[1]:
return True
else:
return False
# Produces a mapping that connects matching entities from prediction (from)
# to gold standard (to).
def mapEntities(entitiesFrom, entitiesTo, tokens=None, compareFunction=compareEntitiesSimple):
entityMap = {}
for entityFrom in entitiesFrom:
entityMap[entityFrom] = []
for entityTo in entitiesTo:
if compareFunction(entityFrom, entityTo, tokens):
entityMap[entityFrom].append(entityTo)
return entityMap
## Splits merged types generated from overlapping entities/edges into their components
#def getElementTypes(element):
# typeName = element.get("type")
# if typeName.find("---") != -1:
# return typeName.split("---")
# else:
# return [typeName]
def getEventPredictions(entityMap, allGoldEntities, interactionMap, classSet, negativeClassId):
examples = []
predictions = []
id = "Unknown.x0"
# analyze events
for predictedEntity, goldEntities in entityMap.iteritems():
if predictedEntity.get("given") == "True":
continue
found = False
predictedEntityType = predictedEntity.get("type")
for goldEntity in goldEntities:
goldEntityType = goldEntity.get("type")
if predictedEntityType != goldEntityType: # whatever the arguments, this is a false positive
examples.append( [id, classSet.getId(goldEntity.get("type")), None, None] )
predictions.append( [classSet.getId(predictedEntity.get("type"))] )
else: # mapped entity types match, check the arguments
if interactionMap[predictedEntity.get("id")]: # arguments are correct, this is a true positive
examples.append( [id, classSet.getId(goldEntity.get("type")), None, None] )
predictions.append( [classSet.getId(predictedEntity.get("type"))] )
else: # an error in arguments, this is a false positive for the type of the entity
examples.append( [id, negativeClassId, None, None] )
predictions.append( [classSet.getId(predictedEntity.get("type"))] )
found = True # entitiesTo has at least one item
if not found: # false positive prediction due to entity span not being in gold
examples.append( [id, negativeClassId, None, None] )
predictions.append( [classSet.getId(predictedEntity.get("type"))] )
# mappedTargetEntities will contain all gold entities for which is mapped at least
# one predicted entity. Those gold entities not in mappedTargetEntities are then
# undetected ones, i.e. false negatives.
mappedTargetEntities = set()
for eList in entityMap.values():
for e in eList:
mappedTargetEntities.add(e)
for e in allGoldEntities:
if e.get("given") == "True":
continue
if not e in mappedTargetEntities: # false negative gold
examples.append( [id, classSet.getId(e.get("type")), None, None] )
predictions.append( [negativeClassId] )
#predictions.append( ((id, classSet.getId(e.get("type"))), negativeClassId, None, None) )
assert len(examples) == len(predictions)
return examples, predictions
# Uses the mapped entities to give predictions for a single sentence
def getEntityPredictions(entityMap, targetEntities, classSet, negativeClassId):
examples = []
predictions = []
id = "Unknown.x0"
for entityFrom, entitiesTo in entityMap.iteritems():
if entityFrom.get("given") == "True":
continue
found = False
for entityTo in entitiesTo:
examples.append( [id, classSet.getId(entityTo.get("type")), None, None] )
predictions.append( [classSet.getId(entityFrom.get("type"))] )
#predictions.append( ((id, classSet.getId(entityTo.get("type"))), classSet.getId(entityFrom.get("type")), None, None) )
found = True # entitiesTo has at least one item
if not found: # false positive prediction
examples.append( [id, negativeClassId, None, None] )
predictions.append( [classSet.getId(entityFrom.get("type"))] )
#predictions.append( ((id, negativeClassId), classSet.getId(entityFrom.get("type")), None, None) )
# mappedTargetEntities will contain all gold entities for which is mapped at least
# one predicted entity. Those gold entities not in mappedTargetEntities are then
# undetected ones, i.e. false negatives.
mappedTargetEntities = set()
for eList in entityMap.values():
for e in eList:
mappedTargetEntities.add(e)
for e in targetEntities:
if e.get("given") == "True":
continue
if not e in mappedTargetEntities: # false negative gold
examples.append( [id, classSet.getId(e.get("type")), None, None] )
predictions.append( [negativeClassId] )
#predictions.append( ((id, classSet.getId(e.get("type"))), negativeClassId, None, None) )
assert len(examples) == len(predictions)
return examples, predictions
# Uses mapped entities and predicted and gold interactions to provide
# predictions for the interactions
def getInteractionPredictions(interactionsFrom, interactionsTo, entityMap, classSet, negativeClassId, counts, verbose=False):
examples = []
predictions = []
id = "Unknown.x0"
fromEntityIdToElement = {}
for key in entityMap.keys():
entityId = key.get("id")
assert not fromEntityIdToElement.has_key(entityId), entityId
fromEntityIdToElement[entityId] = key
# Keep track of false positives caused by false positive entities
falseEntity = defaultdict(lambda: defaultdict(int))
toInteractionsWithPredictions = set()
events = {}
for predictedEntity in entityMap.keys():
events[predictedEntity.get("id")] = True # mark all events as positive (if no arguments, gold or predicted, remains positive)
for interactionFrom in interactionsFrom:
goldE1Ids = []
goldE2Ids = []
if interactionFrom.get("e1") not in fromEntityIdToElement or interactionFrom.get("e2") not in fromEntityIdToElement:
print >> sys.stderr, "Warning, interaction", interactionFrom.get("id"), [interactionFrom.get("e1"), interactionFrom.get("e2")], "links to a non-existing entity"
else:
# Select gold entities for entity-ids referred to in the predicted interaction
for goldEntity in entityMap[fromEntityIdToElement[interactionFrom.get("e1")]]:
goldE1Ids.append(goldEntity.get("id"))
for goldEntity in entityMap[fromEntityIdToElement[interactionFrom.get("e2")]]:
goldE2Ids.append(goldEntity.get("id"))
if len(goldE1Ids) == 0 or len(goldE2Ids) == 0:
falseEntity[interactionFrom.get("type")][0] += 1
found = False
# Go through all gold interactions
for interactionTo in interactionsTo:
if interactionTo.get("e1") in goldE1Ids and interactionTo.get("e2") in goldE2Ids: # this gold interaction matches the predicted one
toInteractionsWithPredictions.add(interactionTo)
examples.append( [id, classSet.getId(interactionTo.get("type")),None,None] )
predictions.append( [classSet.getId(interactionFrom.get("type"))] )
found = True
if verbose:
print "predicted", counts["predicted"], interactionFrom.get("id"), "matches gold", interactionTo.get("id")
if not found: # false positive prediction
examples.append( [id,negativeClassId,None,None] )
predictions.append( [classSet.getId(interactionFrom.get("type"))] )
events[interactionFrom.get("e1")] = False # false positive argument -> incorrect event
if verbose:
print "predicted", counts["predicted"], interactionFrom.get("id"), "is a false positive"
counts["predicted"] +=1
# Get ids of gold entities that had a correct prediction
reverseEntityMap = {}
for predictedEntity, goldEntities in entityMap.iteritems():
for goldEntity in goldEntities:
#assert goldEntity.get("id") not in reverseEntityMap, (predictedEntity.get("id"), [x.get("id") for x in goldEntities])
# One gold entity can map to more than one predicted entities,
# due to predicted entities created by splitting a prediction
if goldEntity.get("id") not in reverseEntityMap:
reverseEntityMap[goldEntity.get("id")] = []
reverseEntityMap[goldEntity.get("id")].append(predictedEntity.get("id"))
mappedGoldEntities = reverseEntityMap.keys()
# Process gold interactions that did not have a prediction
for interactionTo in interactionsTo:
if interactionTo not in toInteractionsWithPredictions: # false negative gold
examples.append( [id, classSet.getId(interactionTo.get("type")), None, None] )
predictions.append( [negativeClassId] )
#predictions.append( ((id, classSet.getId(interactionTo.get("type"))), negativeClassId, None, None) )
if interactionTo.get("e1") not in mappedGoldEntities or interactionTo.get("e2") not in mappedGoldEntities:
falseEntity[interactionTo.get("type")][1] += 1
if interactionTo.get("e1") in reverseEntityMap: # mark an event false due to a missing gold interaction
for predictedEntityId in reverseEntityMap[interactionTo.get("e1")]:
events[predictedEntityId] = False # missing argument -> incorrect event
if verbose:
print "gold", interactionTo.get("id"), "has no matching prediction"
assert len(examples) == len(predictions)
return examples, predictions, falseEntity, events
# Compares a prediction (from) to a gold (to) sentence
def processDocument(fromDocumentSentences, toDocumentSentences, target, classSets, negativeClassId, entityMatchFunction, verbose=False, counts=None):
#splitMerged(fromSentence) # modify element tree to split merged elements into multiple elements
if toDocumentSentences != None:
assert len(fromDocumentSentences) == len(toDocumentSentences)
else:
toDocumentSentences = [None] * len(fromDocumentSentences)
entityMap = {}
allToEntities = []
for fromSentence, toSentence in zip(fromDocumentSentences, toDocumentSentences):
if toSentence != None:
assert fromSentence.sentence.get("id") == toSentence.sentence.get("id")
entitiesFrom = []
for e in fromSentence.entities:
if e.get("type") != "neg":
entitiesFrom.append(e)
entitiesTo = []
if toSentence != None:
entitiesTo = toSentence.entities
allToEntities.extend(entitiesTo)
tokens = fromSentence.tokens
# map predicted entities to gold entities
sentenceEntityMap = mapEntities(entitiesFrom, entitiesTo, tokens, compareFunction=entityMatchFunction)
for entity in sentenceEntityMap.keys():
assert entity not in entityMap
entityMap[entity] = sentenceEntityMap[entity]
# select interactions
fromInteractions = []
for fromSentence in fromDocumentSentences:
for interaction in fromSentence.interactions + fromSentence.pairs:
if interaction.get("type") != "neg":
fromInteractions.append(interaction)
toInteractions = []
for toSentence in toDocumentSentences:
if toSentence != None:
toInteractions.extend(toSentence.interactions)
toInteractions.extend(toSentence.pairs)
# get predictions for predicted edges/entities vs. gold edges/entities
entityPredictions = []
interactionPredictions = []
falseEntity = defaultdict(lambda: defaultdict(int))
if target == "entities" or target == "both":
entityExamples, entityPredictions = getEntityPredictions(entityMap, allToEntities, classSets["entity"], negativeClassId)
if target == "interactions" or target == | |
advice_topic_page.page_ptr,
# NB: article_page is deliberately NOT in this list
]
@pytest.mark.skip(reason='We need more of the page tree ported before we can test this.')
def test_base_content_page__ancestors_in_app__involving_folder_pages():
pass
@pytest.mark.django_db
def test_base_content_page__get_breadcrumbs(
domestic_homepage,
domestic_site,
):
advice_topic_page = TopicLandingPageFactory(
title='Advice',
parent=domestic_homepage,
)
article_page = ArticlePageFactory(
article_title='test article',
parent=advice_topic_page,
)
assert article_page.get_breadcrumbs() == [
# NB: domestic homepage is deliberately NOT in this list
{
'title': advice_topic_page.title,
'url': advice_topic_page.url,
},
{
'title': article_page.title,
'url': article_page.url,
}
# NB: article_page IS in this list
]
@pytest.mark.skip(reason='We need more of the page tree ported before we can test this.')
def test_base_content_page__get_breadcrumbs__using_breadcrumbs_label_field():
pass
class TopicLandingPageTests(SetUpLocaleMixin, WagtailPageTests):
def test_allowed_parents(self):
self.assertAllowedParentPageTypes(
TopicLandingPage,
{
DomesticHomePage,
GreatDomesticHomePage,
},
)
def test_allowed_children(self):
self.assertAllowedSubpageTypes(
TopicLandingPage,
{
ArticlePage,
ArticleListingPage,
},
)
def test_slug_is_autogenerated(self):
DomesticHomePageFactory(slug='root')
homepage = DomesticHomePage.objects.get(url_path='/')
hello_page = DomesticHomePage(title='Hello world')
homepage.add_child(instance=hello_page)
advice_topic_page = TopicLandingPage(
title='Advice',
)
homepage.add_child(instance=advice_topic_page)
retrieved_page_1 = TopicLandingPage.objects.get(id=advice_topic_page.id)
self.assertEqual(retrieved_page_1.slug, 'advice')
def test_child_pages(self):
advice_topic_page = TopicLandingPageFactory(
title='Advice',
)
article_list_one = ArticleListingPage(
title='list one',
landing_page_title='List One',
)
article_list_two = ArticleListingPage(
title='list two',
landing_page_title='List Two',
)
article_list_three = ArticleListingPage(
title='list three',
landing_page_title='List Three',
)
# note deliberate out-of-sequence ordering here
advice_topic_page.add_child(instance=article_list_two)
advice_topic_page.add_child(instance=article_list_one)
advice_topic_page.add_child(instance=article_list_three)
advice_topic_page.refresh_from_db()
self.assertEqual(
[x for x in advice_topic_page.child_pages()],
[article_list_two, article_list_one, article_list_three],
)
article_list_three.live = False
article_list_three.save()
self.assertEqual(
[x for x in advice_topic_page.child_pages()],
[article_list_two, article_list_one],
)
class MarketsTopicLandingPageTests(SetUpLocaleMixin, WagtailPageTests):
def test_allowed_parents(self):
self.assertAllowedParentPageTypes(
MarketsTopicLandingPage,
{
DomesticHomePage,
GreatDomesticHomePage,
},
)
def test_allowed_children(self):
self.assertAllowedSubpageTypes(
MarketsTopicLandingPage,
{CountryGuidePage},
)
def test_slug_is_autogenerated(self):
DomesticHomePageFactory(slug='root')
homepage = DomesticHomePage.objects.get(url_path='/')
hello_page = DomesticHomePage(title='Hello world')
homepage.add_child(instance=hello_page)
markets_topic_page = MarketsTopicLandingPage(
title='Markets',
)
homepage.add_child(instance=markets_topic_page)
retrieved_page_2 = MarketsTopicLandingPage.objects.get(
id=markets_topic_page.id,
)
self.assertEqual(retrieved_page_2.slug, 'markets')
def _make_country_guide_pages(self, parent_page, count):
_now = tz_now()
for i in range(count):
CountryGuidePageFactory(
parent=parent_page,
title=f'Test GCP {i}',
live=True,
last_published_at=_now - timedelta(minutes=i),
)
def test_sort_results(self):
DomesticHomePageFactory(slug='root')
homepage = DomesticHomePage.objects.get(url_path='/')
markets_topic_page = MarketsTopicLandingPage(title='Markets')
homepage.add_child(instance=markets_topic_page)
self._make_country_guide_pages(markets_topic_page, 23)
pages = CountryGuidePage.objects.all()
# Order by title
request = RequestFactory().get('/?sortby=title')
sorted_pages = markets_topic_page.sort_results(
request,
pages,
)
self.assertEqual(sorted_pages[0], CountryGuidePage.objects.get(title='Test GCP 0'))
self.assertEqual([x for x in pages.order_by('title')], [y for y in sorted_pages])
# Last published at
request = RequestFactory().get('/?sortby=last_published_at')
sorted_pages = markets_topic_page.sort_results(
request,
pages,
)
# Note that the results are flipped from ascending to descending to show
# most recently edited first
self.assertEqual(sorted_pages[0], pages.order_by('-last_published_at').first())
self.assertEqual([x for x in pages.order_by('-last_published_at')], [y for y in sorted_pages])
def test_sort_results__sanitises_input(self):
mock_pages_queryset = mock.Mock(name='mock_pages_queryset')
markets_page = MarketsTopicLandingPageFactory()
for bad_args in (
'?sortby=body',
'?sortby=created_at',
'?sortby=;delete * from auth_user',
'?sortby=;delete%20*%20from%20auth_user',
'?other=foo',
):
with self.subTest(bad_args=bad_args):
mock_pages_queryset.order_by.reset_mock()
request = RequestFactory().get(f'/{bad_args}')
markets_page.sort_results(
request,
mock_pages_queryset,
)
# 'title' is the fallback sort_by field
mock_pages_queryset.order_by.assert_called_once_with('title')
def test_get_context(self):
request = RequestFactory().get('/?sortby=last_published_at')
DomesticHomePageFactory(slug='root')
homepage = DomesticHomePage.objects.get(url_path='/')
markets_topic_page = MarketsTopicLandingPage(title='Markets')
homepage.add_child(instance=markets_topic_page)
self._make_country_guide_pages(markets_topic_page, 21)
output = markets_topic_page.get_context(request)
self.assertEqual(len(output['paginated_results']), 18)
self.assertEqual(output['sortby'], 'last_published_at')
def test_get_context__pagination(self):
DomesticHomePageFactory(slug='root')
homepage = DomesticHomePage.objects.get(url_path='/')
markets_topic_page = MarketsTopicLandingPage(title='Markets')
homepage.add_child(instance=markets_topic_page)
self._make_country_guide_pages(markets_topic_page, 21)
assert CountryGuidePage.objects.count() == 21
request = RequestFactory().get('/?page=1') # 1-18 should be on page 1
output = markets_topic_page.get_context(request)
self.assertEqual(len(output['paginated_results']), 18)
self.assertEqual(
output['paginated_results'][0],
CountryGuidePage.objects.first(),
)
output = markets_topic_page.get_context(request)
request = RequestFactory().get('/?page=2') # 19-21 should be on page 2
output = markets_topic_page.get_context(request)
self.assertEqual(len(output['paginated_results']), 3)
# final result should be the last CGP
self.assertEqual(
output['paginated_results'][2],
CountryGuidePage.objects.order_by('title').last(),
)
def test_get_context__handles_paginator_abuse(self):
DomesticHomePageFactory(slug='root')
homepage = DomesticHomePage.objects.get(url_path='/')
markets_topic_page = MarketsTopicLandingPage(title='Markets')
homepage.add_child(instance=markets_topic_page)
self._make_country_guide_pages(markets_topic_page, 21)
for bad_args in (
'?page=112312312312413124', # will raise EmptyPage
'?page=BAD WORDS', # will raise PageNotAnInteger
'?page=;delete * from auth_user', # will raise PageNotAnInteger
'?page=;delete%20*%20from%20auth_user', # will raise PageNotAnInteger
):
with self.subTest(bad_args=bad_args):
request = RequestFactory().get(f'/{bad_args}')
output = markets_topic_page.get_context(request)
self.assertEqual(len(output['paginated_results']), 18)
# defaults to the first page of results
self.assertEqual(
output['paginated_results'][0],
CountryGuidePage.objects.first(),
)
class MarketsTopicLandingPageFilteringTests(SetUpLocaleMixin, WagtailPageTests):
fixtures = ['markets_filtering_fixtures.json']
def setUp(self):
# Ensure we have the expected data loaded (from the fixture)
assert core_models.IndustryTag.objects.count() == 42
assert core_models.Region.objects.count() == 22
assert core_models.Country.objects.count() == 269
DomesticHomePageFactory(slug='root')
homepage = DomesticHomePage.objects.get(url_path='/')
self.markets_topic_page = MarketsTopicLandingPage(title='Markets')
homepage.add_child(instance=self.markets_topic_page)
def _make_simplistic_country_guide_pages(self, parent_page, count):
for i in range(count):
CountryGuidePageFactory(
parent=parent_page,
title=f'Test GCP {i}',
live=True,
)
def _build_market_guide_for_filtering_tests(self, parent_page):
self.country_lookup = {}
for country_name in [
'Brazil',
'Australia',
'France',
'New Zealand',
'Germany',
'United States',
]:
self.country_lookup[country_name] = CountryGuidePageFactory(
parent=parent_page,
title=country_name,
live=True,
country=core_models.Country.objects.get(
name=country_name,
),
)
_get_tag = core_models.IndustryTag.objects.get
self.country_lookup['Australia'].tags.add(_get_tag(name='Sport'))
self.country_lookup['Brazil'].tags.add(_get_tag(name='Aerospace'))
self.country_lookup['Brazil'].tags.add(_get_tag(name='Engineering'))
self.country_lookup['France'].tags.add(_get_tag(name='Aerospace'))
self.country_lookup['France'].tags.add(_get_tag(name='Food and drink'))
self.country_lookup['France'].tags.add(_get_tag(name='Technology'))
self.country_lookup['Germany'].tags.add(_get_tag(name='Technology'))
self.country_lookup['United States'].tags.add(_get_tag(name='Leisure and tourism'))
self.country_lookup['New Zealand'].tags.add(_get_tag(name='Leisure and tourism'))
for cgp in self.country_lookup.values():
cgp.save() # To persist the tags
def test_get_relevant_markets__no_filtering(self):
self._make_simplistic_country_guide_pages(self.markets_topic_page, 23)
request = RequestFactory().get('/markets/')
self.assertEqual(
self.markets_topic_page.get_relevant_markets(request).count(),
23,
)
def test_get_relevant_markets__filtering__single_sector(self):
self._build_market_guide_for_filtering_tests(
parent_page=self.markets_topic_page,
)
request = RequestFactory().get('/markets/?sector=Aerospace')
results = self.markets_topic_page.get_relevant_markets(request)
self.assertEqual(
[x for x in results],
[
self.country_lookup['Brazil'],
self.country_lookup['France'],
],
)
def test_get_relevant_markets__filtering__multiple_sectors(self):
self._build_market_guide_for_filtering_tests(
parent_page=self.markets_topic_page,
)
request = RequestFactory().get('/markets/?sector=Aerospace§or=Technology')
results = self.markets_topic_page.get_relevant_markets(request)
self.assertEqual(
[x for x in results],
[
self.country_lookup['Brazil'],
self.country_lookup['France'],
self.country_lookup['Germany'],
],
)
request = RequestFactory().get('/markets/?sector=Sport§or=Leisure+and+tourism')
results = self.markets_topic_page.get_relevant_markets(request)
self.assertEqual(
[x for x in results],
[
self.country_lookup['Australia'],
self.country_lookup['New Zealand'],
self.country_lookup['United States'],
],
)
def test_get_relevant_markets__filtering__single_region(self):
self._build_market_guide_for_filtering_tests(
parent_page=self.markets_topic_page,
)
request = RequestFactory().get('/markets/?region=Western+Europe')
results = self.markets_topic_page.get_relevant_markets(request)
self.assertEqual(
[x for x in results],
[
self.country_lookup['France'],
self.country_lookup['Germany'],
],
)
def test_get_relevant_markets__filtering__multiple_regions(self):
self._build_market_guide_for_filtering_tests(
parent_page=self.markets_topic_page,
)
request = RequestFactory().get('/markets/?region=Western+Europe®ion=Oceania')
results = self.markets_topic_page.get_relevant_markets(request)
self.assertEqual(
[x for x in results],
[
self.country_lookup['Australia'],
self.country_lookup['France'],
self.country_lookup['Germany'],
self.country_lookup['New Zealand'],
],
)
def test_get_relevant_markets__filtering__single_region_and_single_sector(self):
self._build_market_guide_for_filtering_tests(
parent_page=self.markets_topic_page,
)
request = RequestFactory().get('/markets/?sector=Aerospace®ion=South+America')
results = self.markets_topic_page.get_relevant_markets(request)
self.assertEqual(
[x for x in results],
[
self.country_lookup['Brazil'],
],
)
request = RequestFactory().get('/markets/?sector=Leisure+and+tourism®ion=North+America')
results = self.markets_topic_page.get_relevant_markets(request)
self.assertEqual(
[x for x in results],
[
self.country_lookup['United States'],
],
)
request = RequestFactory().get('/markets/?region=Western+Europe§or=Technology')
results = self.markets_topic_page.get_relevant_markets(request)
self.assertEqual(
[x for x in results],
[
self.country_lookup['France'],
self.country_lookup['Germany'],
],
)
def test_get_relevant_markets__filtering__multiple_regions_and_sectors(self):
self._build_market_guide_for_filtering_tests(
parent_page=self.markets_topic_page,
)
request = RequestFactory().get('/markets/?sector=Aerospace§or=Sport®ion=South+America®ion=Oceania')
results = self.markets_topic_page.get_relevant_markets(request)
self.assertEqual(
[x for x in results],
[
self.country_lookup['Australia'],
self.country_lookup['Brazil'],
],
)
def test_get_relevant_markets__no_results(self):
self._build_market_guide_for_filtering_tests(
parent_page=self.markets_topic_page,
)
request = RequestFactory().get('/markets/?sector=Mining')
results = self.markets_topic_page.get_relevant_markets(request)
self.assertEqual([x for x in results], [])
request = RequestFactory().get('/markets/?region=Antarctica')
results = self.markets_topic_page.get_relevant_markets(request)
self.assertEqual([x for x in results], [])
@pytest.mark.django_db
def test_markets_page__no_results__page_content(
domestic_homepage,
domestic_site,
client,
):
markets_topic_page = MarketsTopicLandingPageFactory(
title='Markets',
slug='markets',
parent=domestic_homepage,
)
response = client.get(markets_topic_page.url + '?region=Antarctica')
soup = BeautifulSoup(response.content, 'html.parser')
body_text = soup.get_text().replace(' ', '').replace('\n', '')
links = soup.find_all('a')
# lack of space `inAntarctica` is correct for this test, where we've stripped whitespace
assert ("Currently, we don't have any market guides with information inAntarctica.") in body_text
assert (
'There are other ways the Department for International Trade '
'can help you sell your product in an overseas market.'
) in body_text
# Brittle tests warning
assert str(links[21]) == (
'<a class="link" href="http://exred.trade.great:8007/export-opportunities/">'
'Browse our export opportunities service to find opportunities to sell your product in overseas markets</a>'
)
assert str(links[22]) == (
'<a class="link" href="http://exred.trade.great:8007/contact/office-finder">'
'Get in touch with a trade adviser to discuss your export business plan</a>'
)
assert str(links[23]) == ('<a class="view-markets link bold margin-top-15" href="/markets/">Clear all filters</a>')
class ArticleListingPageTests(SetUpLocaleMixin, WagtailPageTests):
def test_allowed_parents(self):
self.assertAllowedParentPageTypes(
ArticleListingPage,
{
CountryGuidePage,
TopicLandingPage,
},
)
def test_allowed_children(self):
self.assertAllowedSubpageTypes(
ArticleListingPage,
{
ArticlePage,
},
)
def test_get_articles(self):
listing_page = ArticleListingPageFactory(
title='Test listing page',
landing_page_title='Test Listing Page',
)
for i in range(5):
_title = f'Article {i}'
ArticlePageFactory(title=_title, article_title=_title, parent=listing_page)
last_article = ArticlePage.objects.last()
orphan_article = ArticlePageFactory(
title='Orphan',
article_title='Orphan',
parent=None,
)
self.assertEqual(
# QuerySets are not directly comparable
[x for x in listing_page.get_articles()],
[x for x in ArticlePage.objects.exclude(id=orphan_article.id)],
)
last_article.live = False
last_article.save()
self.assertEqual(
# QuerySets are not directly comparable
[x for x in listing_page.get_articles()],
[
x
for x in ArticlePage.objects.exclude(
id__in=[orphan_article.id, last_article.id],
)
],
)
def test_get_articles_count(self):
listing_page = ArticleListingPageFactory(
title='Test listing page',
landing_page_title='Test Listing Page',
)
for i in range(5):
_title = f'Article {i}'
ArticlePageFactory(title=_title, article_title=_title, parent=listing_page)
last_article = ArticlePage.objects.last()
ArticlePageFactory(
title='Orphan',
article_title='Orphan',
parent=None,
)
self.assertEqual(ArticlePage.objects.count(), 6)
self.assertEqual(listing_page.get_articles_count(), 5)
last_article.live = False
last_article.save()
self.assertEqual(listing_page.get_articles_count(), 4)
class ArticlePageTests(SetUpLocaleMixin, WagtailPageTests):
def test_allowed_parents(self):
self.assertAllowedParentPageTypes(
ArticlePage,
{
CountryGuidePage,
StructuralPage,
ArticleListingPage,
TopicLandingPage,
},
)
def test_allowed_children(self):
self.assertAllowedSubpageTypes(
ArticlePage,
{},
)
def test_get_context(self):
request = RequestFactory().get('/example-path/')
page = ArticlePageFactory(
title='Test Article Page',
article_title='Test Article',
)
# ArticlePage subclasses SocialLinksPageMixin, which populates
# the 'social_links' key in the context
with mock.patch('domestic.models.build_social_links') as mock_build_social_links:
output = page.get_context(request=request)
mock_build_social_links.assert_called_once_with(request, 'Test Article Page')
assert 'social_links' in output
@pytest.mark.django_db
def test_article_page_get_absolute_url(domestic_site, domestic_homepage, en_locale):
page = ArticlePageFactory(
title='Test Article Page',
article_title='Test Article',
parent=domestic_homepage,
)
assert page.get_url() == '/test-article-page/'
with override_settings(BASE_URL='https://example.com'):
assert page.get_absolute_url() == 'https://example.com/test-article-page/'
# also confirm trailing slash on BASE_URL is handled
with override_settings(BASE_URL='https://example.com/'):
assert page.get_absolute_url() == 'https://example.com/test-article-page/'
@pytest.mark.parametrize(
'related_page_data',
(
(
{'title': 'Article ONE', 'rel_name': 'related_page_one'},
{'title': 'Article TWO', 'rel_name': 'related_page_two'},
{'title': 'Article THREE', 'rel_name': 'related_page_three'},
),
(
{'title': 'Article ONE', 'rel_name': 'related_page_one'},
{'title': 'Article TWO', 'rel_name': 'related_page_two'},
),
(
| |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Functional test for GradientDescent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.math import diag_jacobian
from tensorflow.python.framework import test_util as tf_test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
@test_util.test_all_tf_execution_regimes
class StochasticGradientLangevinDynamicsOptimizerTest(test_util.TestCase):
def testBasic(self):
if tf.executing_eagerly():
return
for dtype in [tf.half, tf.float32, tf.float64]:
with self.cached_session():
var0 = tf.Variable([1.1, 2.1], dtype=dtype)
var1 = tf.Variable([3., 4.], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
decay_rate = 0.53
sgd_optimizer = tfp.optimizer.StochasticGradientLangevinDynamics(
3., preconditioner_decay_rate=decay_rate)
sgd_op = sgd_optimizer.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(tf1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.1, 2.1], self.evaluate(var0))
self.assertAllCloseAccordingToType([3., 4.], self.evaluate(var1))
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
grads_scaled = (0.5 * 0.1 /
np.sqrt(decay_rate + (1. - decay_rate) * 0.1**2 + 1e-8))
# Note that `tfp.math.diag_jacobian(xs=var, ys=grad)` returns zero
# tensor
self.assertAllCloseAccordingToType(
[1.1 - 3. * grads_scaled, 2.1 - 3. * grads_scaled],
self.evaluate(var0))
grads_scaled = (0.5 * 0.01 / np.sqrt(
decay_rate + (1. - decay_rate) * 0.01**2 + 1e-8))
self.assertAllCloseAccordingToType(
[3. - 3. * grads_scaled, 4. - 3. * grads_scaled],
self.evaluate(var1))
self.assertAllCloseAccordingToType(
1, self.evaluate(sgd_optimizer.iterations))
def testBasicMultiInstance(self):
if tf.executing_eagerly():
return
for dtype in [tf.half, tf.float32, tf.float64]:
with self.cached_session():
var0 = tf.Variable([1.1, 2.1], dtype=dtype)
var1 = tf.Variable([3., 4.], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
vara = tf.Variable([1.1, 2.1], dtype=dtype)
varb = tf.Variable([3., 4.], dtype=dtype)
gradsa = tf.constant([0.1, 0.1], dtype=dtype)
gradsb = tf.constant([0.01, 0.01], dtype=dtype)
decay_rate = 0.5
sgd_optimizer = tfp.optimizer.StochasticGradientLangevinDynamics(
3., preconditioner_decay_rate=decay_rate)
sgd_op = sgd_optimizer.apply_gradients(
zip([grads0, grads1], [var0, var1]))
sgd_optimizer2 = tfp.optimizer.StochasticGradientLangevinDynamics(
3., preconditioner_decay_rate=decay_rate)
sgd_op2 = sgd_optimizer2.apply_gradients(
zip([gradsa, gradsb], [vara, varb]))
self.evaluate(tf1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.1, 2.1], self.evaluate(var0))
self.assertAllCloseAccordingToType([3., 4.], self.evaluate(var1))
self.assertAllCloseAccordingToType([1.1, 2.1], self.evaluate(vara))
self.assertAllCloseAccordingToType([3., 4.], self.evaluate(varb))
# Run 1 step of sgd
self.evaluate(sgd_op)
self.evaluate(sgd_op2)
# Validate updated params
grads_scaled = (0.5 * 0.1 /
np.sqrt(decay_rate + (1. - decay_rate) * 0.1**2 + 1e-8))
self.assertAllCloseAccordingToType(
[1.1 - 3. * grads_scaled, 2.1 - 3. * grads_scaled],
self.evaluate(var0))
self.assertAllCloseAccordingToType(
[1.1 - 3. * grads_scaled, 2.1 - 3. * grads_scaled],
self.evaluate(vara))
grads_scaled = (0.5 * 0.01 / np.sqrt(
decay_rate + (1 - decay_rate) * 0.01**2 + 1e-8))
self.assertAllCloseAccordingToType(
[3. - 3. * grads_scaled, 4. - 3. * grads_scaled],
self.evaluate(var1))
self.assertAllCloseAccordingToType(
[3. - 3. * grads_scaled, 4. - 3. * grads_scaled],
self.evaluate(varb))
self.assertAllCloseAccordingToType(
1, self.evaluate(sgd_optimizer.iterations))
self.assertAllCloseAccordingToType(
1, self.evaluate(sgd_optimizer2.iterations))
def testTensorLearningRate(self):
if tf.executing_eagerly():
return
for dtype in [tf.half, tf.float32, tf.float64]:
with self.cached_session():
var0 = tf.Variable([1.1, 2.1], dtype=dtype)
var1 = tf.Variable([3., 4.], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
lrate = tf.constant(3.0)
decay_rate = 0.5
sgd_op = tfp.optimizer.StochasticGradientLangevinDynamics(
lrate, preconditioner_decay_rate=tf.constant(
decay_rate)).apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(tf1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.1, 2.1], self.evaluate(var0))
self.assertAllCloseAccordingToType([3., 4.], self.evaluate(var1))
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
grads_scaled = (0.5 * 0.1 /
np.sqrt(decay_rate + (1. - decay_rate) * 0.1**2 + 1e-8))
# Note that `tfp.math.diag_jacobian(xs=var, ys=grad)` returns zero
# tensor
self.assertAllCloseAccordingToType(
[1.1 - 3. * grads_scaled, 2.1 - 3. * grads_scaled],
self.evaluate(var0))
grads_scaled = (0.5 * 0.01 / np.sqrt(
decay_rate + (1. - decay_rate) * 0.01**2 + 1e-8))
self.assertAllCloseAccordingToType(
[3. - 3. * grads_scaled, 4. - 3. * grads_scaled],
self.evaluate(var1))
@tf_test_util.run_deprecated_v1
def testGradWrtRef(self):
if tf.executing_eagerly():
return
for dtype in [tf.half, tf.float32, tf.float64]:
with self.cached_session():
opt = tfp.optimizer.StochasticGradientLangevinDynamics(3.0)
values = [1., 3.]
vars_ = [tf.Variable([v], dtype=dtype) for v in values]
loss = lambda: vars_[0] + vars_[1] # pylint: disable=cell-var-from-loop
grads_and_vars = opt._compute_gradients(loss, vars_)
self.evaluate(tf1.global_variables_initializer())
for grad, _ in grads_and_vars:
self.assertAllCloseAccordingToType([1.], self.evaluate(grad))
def testBurnin(self):
if tf.executing_eagerly():
return
for burnin_dtype in [tf.int8, tf.int16, tf.int32, tf.int64]:
with self.cached_session():
var0 = tf.Variable([1.1, 2.1], dtype=tf.float32)
grads0 = tf.constant([0.1, 0.1], dtype=tf.float32)
decay_rate = 0.53
sgd_optimizer = tfp.optimizer.StochasticGradientLangevinDynamics(
3.,
preconditioner_decay_rate=decay_rate,
burnin=tf.constant(10, dtype=burnin_dtype))
sgd_op = sgd_optimizer.apply_gradients([(grads0, var0)])
self.evaluate(tf1.global_variables_initializer())
# Validate that iterations is initialized to 0.
self.assertAllCloseAccordingToType(
0, self.evaluate(sgd_optimizer.iterations))
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate that iterations is incremented.
self.assertAllCloseAccordingToType(
1, self.evaluate(sgd_optimizer.iterations))
def testWithGlobalStep(self):
if tf.executing_eagerly():
return
for dtype in [tf.float32, tf.float64]:
with self.cached_session():
step = tf.Variable(0, dtype=tf.int64)
var0 = tf.Variable([1.1, 2.1], dtype=dtype)
var1 = tf.Variable([3., 4.], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
decay_rate = 0.1
sgd_opt = tfp.optimizer.StochasticGradientLangevinDynamics(
3., preconditioner_decay_rate=decay_rate)
sgd_opt.iterations = step
sgd_op = sgd_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(tf1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.1, 2.1], self.evaluate(var0))
self.assertAllCloseAccordingToType([3., 4.], self.evaluate(var1))
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params and step
grads_scaled = (0.5 * 0.1 /
np.sqrt(decay_rate + (1. - decay_rate) * 0.1**2 + 1e-8))
# Note that `tfp.math.diag_jacobian(xs=var, ys=grad)` returns zero
# tensor
self.assertAllCloseAccordingToType(
[1.1 - 3. * grads_scaled, 2.1 - 3. * grads_scaled],
self.evaluate(var0))
grads_scaled = (0.5 * 0.01 / np.sqrt(
decay_rate + (1. - decay_rate) * 0.01**2 + 1e-8))
self.assertAllCloseAccordingToType(
[3. - 3. * grads_scaled, 4. - 3. * grads_scaled],
self.evaluate(var1))
self.assertAllCloseAccordingToType(1, self.evaluate(step))
def testSparseBasic(self):
if tf.executing_eagerly():
return
for dtype in [tf.half, tf.float32, tf.float64]:
with self.cached_session():
var0 = tf.Variable([[1.1], [2.1]], dtype=dtype)
var1 = tf.Variable([[3.], [4.]], dtype=dtype)
grads0 = tf.IndexedSlices(
tf.constant([0.1], shape=[1, 1], dtype=dtype),
tf.constant([0]), tf.constant([2, 1]))
grads1 = tf.IndexedSlices(
tf.constant([0.01], shape=[1, 1], dtype=dtype),
tf.constant([1]), tf.constant([2, 1]))
decay_rate = 0.9
sgd_op = tfp.optimizer.StochasticGradientLangevinDynamics(
3., preconditioner_decay_rate=decay_rate).apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(tf1.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.1], [2.1]], self.evaluate(var0))
self.assertAllCloseAccordingToType([[3.], [4.]], self.evaluate(var1))
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
grads_scaled = (0.5 * 0.1 /
np.sqrt(decay_rate + (1. - decay_rate) * 0.1**2 + 1e-8))
# Note that `tfp.math.diag_jacobian(xs=var, ys=grad)` returns zero
# tensor
self.assertAllCloseAccordingToType([[1.1 - 3. * grads_scaled], [2.1]],
self.evaluate(var0))
grads_scaled = (0.5 * 0.01 / np.sqrt(
decay_rate + (1. - decay_rate) * 0.01**2 + 1e-8))
self.assertAllCloseAccordingToType(
[[3. - 3. * 0], [4. - 3. * grads_scaled]], self.evaluate(var1))
def testPreconditionerComputedCorrectly(self):
"""Test that SGLD step is computed correctly for a 3D Gaussian energy."""
if tf.executing_eagerly():
return
with self.cached_session():
dtype = np.float32
# Target function is the energy function of normal distribution
true_mean = dtype([0, 0, 0])
true_cov = dtype([[1, 0.25, 0.25], [0.25, 1, 0.25], [0.25, 0.25, 1]])
# Target distribution is defined through the Cholesky decomposition
chol = tf.linalg.cholesky(true_cov)
target = tfd.MultivariateNormalTriL(loc=true_mean, scale_tril=chol)
var_1 = tf.Variable(name='var_1', initial_value=[1., 1.])
var_2 = tf.Variable(name='var_2', initial_value=[1.])
var = [var_1, var_2]
# Set up the learning rate and the optimizer
learning_rate = .5
optimizer_kernel = tfp.optimizer.StochasticGradientLangevinDynamics(
learning_rate=learning_rate, burnin=1)
# Target function
def target_fn(x, y):
# Stack the input tensors together
z = tf.concat([x, y], axis=-1) - true_mean
return -target.log_prob(z)
grads = tf.gradients(ys=target_fn(*var), xs=var)
# Update value of `var` with one iteration of the SGLD (without the
# normal perturbation, since `burnin > 0`)
step = optimizer_kernel.apply_gradients(zip(grads, var))
# True theoretical value of `var` after one iteration
decay_tensor = tf.cast(optimizer_kernel._decay_tensor, var[0].dtype)
diagonal_bias = tf.cast(optimizer_kernel._diagonal_bias, var[0].dtype)
learning_rate = tf.cast(optimizer_kernel._learning_rate, var[0].dtype)
velocity = [(decay_tensor * tf.ones_like(v)
+ (1 - decay_tensor) * tf.square(g))
for v, g in zip(var, grads)]
preconditioner = [tf.math.rsqrt(vel + diagonal_bias) for vel in velocity]
# Compute second order gradients
_, grad_grads = diag_jacobian(
xs=var,
ys=grads)
# Compute gradient of the preconditioner (compute the gradient manually)
preconditioner_grads = [-(g * g_g * (1. - decay_tensor) * p**3.)
for g, g_g, p in zip(grads, grad_grads,
preconditioner)]
# True theoretical value of `var` after one iteration
var_true = [v - learning_rate * 0.5 * (p * g - p_g)
for v, p, g, p_g in zip(var, preconditioner, grads,
preconditioner_grads)]
self.evaluate(tf1.global_variables_initializer())
var_true_ = self.evaluate(var_true)
self.evaluate(step)
var_ = self.evaluate(var) # new | |
10), keep_size=False)
pads each side by a random value from the range 0px to 10px (the value
is sampled per side). After padding, the images are NOT resized to
their original size (i.e. the images may end up having different
heights/widths).
>>> aug = iaa.CropAndPad(px=((0, 10), (0, 5), (0, 10), (0, 5)))
pads the top and bottom by a random value from the range 0px to 10px
and the left and right by a random value in the range 0px to 5px.
>>> aug = iaa.CropAndPad(percent=(0, 0.1))
pads each side by a random value from the range 0 percent to
10 percent. (Percent with respect to the side's size, e.g. for the
top side it uses the image's height.)
>>> aug = iaa.CropAndPad(percent=([0.05, 0.1], [0.05, 0.1], [0.05, 0.1], [0.05, 0.1]))
pads each side by either 5 percent or 10 percent.
>>> aug = iaa.CropAndPad(px=(-10, 10))
samples per side and image a value v from the discrete range [-10..10]
and either crops (negative value) or pads (positive value) the side
by v pixels.
"""
def __init__(self, px=None, percent=None, pad_mode="constant", pad_cval=0, keep_size=True, sample_independently=True, name=None, deterministic=False, random_state=None):
super(CropAndPad, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.all_sides = None
self.top = None
self.right = None
self.bottom = None
self.left = None
if px is None and percent is None:
self.mode = "noop"
elif px is not None and percent is not None:
raise Exception("Can only pad by pixels or percent, not both.")
elif px is not None:
self.mode = "px"
if ia.is_single_integer(px):
self.all_sides = iap.Deterministic(px)
elif isinstance(px, tuple):
ia.do_assert(len(px) in [2, 4])
def handle_param(p):
if ia.is_single_integer(p):
return iap.Deterministic(p)
elif isinstance(p, tuple):
ia.do_assert(len(p) == 2)
ia.do_assert(ia.is_single_integer(p[0]))
ia.do_assert(ia.is_single_integer(p[1]))
return iap.DiscreteUniform(p[0], p[1])
elif isinstance(p, list):
ia.do_assert(len(p) > 0)
ia.do_assert(all([ia.is_single_integer(val) for val in p]))
return iap.Choice(p)
elif isinstance(p, iap.StochasticParameter):
return p
else:
raise Exception("Expected int, tuple of two ints, list of ints or StochasticParameter, got type %s." % (type(p),))
if len(px) == 2:
#self.top = self.right = self.bottom = self.left = handle_param(px)
self.all_sides = handle_param(px)
else: # len == 4
self.top = handle_param(px[0])
self.right = handle_param(px[1])
self.bottom = handle_param(px[2])
self.left = handle_param(px[3])
elif isinstance(px, iap.StochasticParameter):
self.top = self.right = self.bottom = self.left = px
else:
raise Exception("Expected int, tuple of 4 ints/tuples/lists/StochasticParameters or StochasticParameter, got type %s." % (type(px),))
else: # = elif percent is not None:
self.mode = "percent"
if ia.is_single_number(percent):
ia.do_assert(-1.0 < percent)
#self.top = self.right = self.bottom = self.left = Deterministic(percent)
self.all_sides = iap.Deterministic(percent)
elif isinstance(percent, tuple):
ia.do_assert(len(percent) in [2, 4])
def handle_param(p):
if ia.is_single_number(p):
return iap.Deterministic(p)
elif isinstance(p, tuple):
ia.do_assert(len(p) == 2)
ia.do_assert(ia.is_single_number(p[0]))
ia.do_assert(ia.is_single_number(p[1]))
ia.do_assert(-1.0 < p[0])
ia.do_assert(-1.0 < p[1])
return iap.Uniform(p[0], p[1])
elif isinstance(p, list):
ia.do_assert(len(p) > 0)
ia.do_assert(all([ia.is_single_number(val) for val in p]))
ia.do_assert(all([-1.0 < val for val in p]))
return iap.Choice(p)
elif isinstance(p, iap.StochasticParameter):
return p
else:
raise Exception("Expected int, tuple of two ints, list of ints or StochasticParameter, got type %s." % (type(p),))
if len(percent) == 2:
#self.top = self.right = self.bottom = self.left = handle_param(percent)
self.all_sides = handle_param(percent)
else: # len == 4
self.top = handle_param(percent[0])
self.right = handle_param(percent[1])
self.bottom = handle_param(percent[2])
self.left = handle_param(percent[3])
elif isinstance(percent, iap.StochasticParameter):
self.top = self.right = self.bottom = self.left = percent
else:
raise Exception("Expected number, tuple of 4 numbers/tuples/lists/StochasticParameters or StochasticParameter, got type %s." % (type(percent),))
pad_modes_available = set(["constant", "edge", "linear_ramp", "maximum", "median", "minimum", "reflect", "symmetric", "wrap"])
if pad_mode == ia.ALL:
self.pad_mode = iap.Choice(list(pad_modes_available))
elif ia.is_string(pad_mode):
ia.do_assert(pad_mode in pad_modes_available)
self.pad_mode = iap.Deterministic(pad_mode)
elif isinstance(pad_mode, list):
ia.do_assert(all([v in pad_modes_available for v in pad_mode]))
self.pad_mode = iap.Choice(pad_mode)
elif isinstance(pad_mode, iap.StochasticParameter):
self.pad_mode = pad_mode
else:
raise Exception("Expected pad_mode to be ia.ALL or string or list of strings or StochasticParameter, got %s." % (type(pad_mode),))
self.pad_cval = iap.handle_discrete_param(pad_cval, "pad_cval", value_range=(0, 255), tuple_to_uniform=True, list_to_choice=True, allow_floats=True)
self.keep_size = keep_size
self.sample_independently = sample_independently
def _augment_images(self, images, random_state, parents, hooks):
input_dtypes = meta.copy_dtypes_for_restore(images)
result = []
nb_images = len(images)
seeds = random_state.randint(0, 10**6, (nb_images,))
for i in sm.xrange(nb_images):
seed = seeds[i]
height, width = images[i].shape[0:2]
crop_top, crop_right, crop_bottom, crop_left, pad_top, pad_right, pad_bottom, pad_left, pad_mode, pad_cval = self._draw_samples_image(seed, height, width)
image_cr = images[i][crop_top:height-crop_bottom, crop_left:width-crop_right, :]
if any([pad_top > 0, pad_right > 0, pad_bottom > 0, pad_left > 0]):
if image_cr.ndim == 2:
pad_vals = ((pad_top, pad_bottom), (pad_left, pad_right))
else:
pad_vals = ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0))
if pad_mode == "constant":
image_cr_pa = np.pad(image_cr, pad_vals, mode=pad_mode, constant_values=pad_cval)
elif pad_mode == "linear_ramp":
image_cr_pa = np.pad(image_cr, pad_vals, mode=pad_mode, end_values=pad_cval)
else:
image_cr_pa = np.pad(image_cr, pad_vals, mode=pad_mode)
else:
image_cr_pa = image_cr
if self.keep_size:
image_cr_pa = ia.imresize_single_image(image_cr_pa, (height, width))
result.append(image_cr_pa)
if ia.is_np_array(images):
if self.keep_size:
# this converts the list to an array of original input dtype
result = np.array(result) # without this, restore_augmented_images_dtypes_() expects input_dtypes to be a list
meta.restore_augmented_images_dtypes_(result, input_dtypes)
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
# TODO add test
result = []
nb_heatmaps = len(heatmaps)
seeds = random_state.randint(0, 10**6, (nb_heatmaps,))
for i in sm.xrange(nb_heatmaps):
seed = seeds[i]
height_image, width_image = heatmaps[i].shape[0:2]
height_heatmaps, width_heatmaps = heatmaps[i].arr_0to1.shape[0:2]
vals = self._draw_samples_image(seed, height_image, width_image)
crop_image_top, crop_image_right, crop_image_bottom, crop_image_left, \
pad_image_top, pad_image_right, pad_image_bottom, pad_image_left, \
_pad_mode, _pad_cval = vals
if (height_image, width_image) != (height_heatmaps, width_heatmaps):
crop_top = int(round(height_heatmaps * (crop_image_top/height_image)))
crop_right = int(round(width_heatmaps * (crop_image_right/width_image)))
crop_bottom = int(round(height_heatmaps * (crop_image_bottom/height_image)))
crop_left = int(round(width_heatmaps * (crop_image_left/width_image)))
crop_top, crop_right, crop_bottom, crop_left = self._prevent_zero_size(height_heatmaps, width_heatmaps, crop_top, crop_right, crop_bottom, crop_left)
pad_top = int(round(height_heatmaps * (pad_image_top/height_image)))
pad_right = int(round(width_heatmaps * (pad_image_right/width_image)))
pad_bottom = int(round(height_heatmaps * (pad_image_bottom/height_image)))
pad_left = int(round(width_heatmaps * (pad_image_left/width_image)))
else:
crop_top = crop_image_top
crop_right = crop_image_right
crop_bottom = crop_image_bottom
crop_left = crop_image_left
pad_top = pad_image_top
pad_right = pad_image_right
pad_bottom = pad_image_bottom
pad_left = pad_image_left
arr_cr = heatmaps[i].arr_0to1[crop_top:height_heatmaps-crop_bottom, crop_left:width_heatmaps-crop_right, :]
if any([pad_top > 0, pad_right > 0, pad_bottom > 0, pad_left > 0]):
if arr_cr.ndim == 2:
pad_vals = ((pad_top, pad_bottom), (pad_left, pad_right))
else:
pad_vals = ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0))
arr_cr_pa = np.pad(arr_cr, pad_vals, mode="constant", constant_values=0)
else:
arr_cr_pa = arr_cr
heatmaps[i].arr_0to1 = arr_cr_pa
if self.keep_size:
heatmaps[i] = heatmaps[i].scale((height_heatmaps, width_heatmaps))
else:
heatmaps[i].shape = (
heatmaps[i].shape[0] - crop_top - crop_bottom + pad_top + pad_bottom,
heatmaps[i].shape[1] - crop_left - crop_right + pad_left + pad_right
) + heatmaps[i].shape[2:]
result.append(heatmaps[i])
return result
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
result = []
nb_images = len(keypoints_on_images)
seeds = random_state.randint(0, 10**6, (nb_images,))
for i, keypoints_on_image in enumerate(keypoints_on_images):
seed = seeds[i]
height, width = keypoints_on_image.shape[0:2]
#top, right, bottom, left = self._draw_samples_image(seed, height, width)
crop_top, crop_right, crop_bottom, crop_left, pad_top, pad_right, pad_bottom, pad_left, _pad_mode, _pad_cval = self._draw_samples_image(seed, height, width)
shifted = keypoints_on_image.shift(x=-crop_left+pad_left, y=-crop_top+pad_top)
shifted.shape = (
height - crop_top - crop_bottom + pad_top + pad_bottom,
width - crop_left - crop_right + pad_left + pad_right
) + shifted.shape[2:]
if self.keep_size:
result.append(shifted.on(keypoints_on_image.shape))
else:
result.append(shifted)
return result
def _draw_samples_image(self, seed, height, width):
random_state = ia.new_random_state(seed)
if self.mode == "noop":
top = right = bottom = left = 0
else:
if self.all_sides is not None:
if self.sample_independently:
samples = self.all_sides.draw_samples((4,), random_state=random_state)
top, right, bottom, left = samples
else:
sample = self.all_sides.draw_sample(random_state=random_state)
top = right = bottom = left = sample
else:
top = self.top.draw_sample(random_state=random_state)
right = self.right.draw_sample(random_state=random_state)
bottom = self.bottom.draw_sample(random_state=random_state)
left = self.left.draw_sample(random_state=random_state)
if self.mode == "px":
# no change necessary for pixel values
pass
elif self.mode == "percent":
# percentage values have to be transformed to pixel values
top = int(round(height * top))
right = int(round(width * right))
bottom = int(round(height * bottom))
left = int(round(width * left))
else:
raise Exception("Invalid mode")
crop_top = (-1) * top if top < 0 else 0
crop_right = (-1) * right if right < 0 else 0
crop_bottom = (-1) * bottom if bottom < 0 else 0
crop_left = (-1) * left if left < 0 else 0
pad_top = top if top > 0 else 0
pad_right = right if right > 0 else 0
pad_bottom = bottom if bottom > 0 else 0
pad_left = left if left > 0 else 0
pad_mode = self.pad_mode.draw_sample(random_state=random_state)
pad_cval = self.pad_cval.draw_sample(random_state=random_state)
pad_cval = np.clip(np.round(pad_cval), 0, 255).astype(np.uint8)
crop_top, crop_right, crop_bottom, crop_left = self._prevent_zero_size(height, width, | |
<reponame>kifarid/ray
import gym
import logging
import numpy as np
from ray.rllib.utils.framework import try_import_jax, try_import_tf, \
try_import_torch
jax, _ = try_import_jax()
tf1, tf, tfv = try_import_tf()
if tf1:
eager_mode = None
try:
from tensorflow.python.eager.context import eager_mode
except (ImportError, ModuleNotFoundError):
pass
torch, _ = try_import_torch()
logger = logging.getLogger(__name__)
def framework_iterator(config=None,
frameworks=("tf2", "tf", "tfe", "torch"),
session=False):
"""An generator that allows for looping through n frameworks for testing.
Provides the correct config entries ("framework") as well
as the correct eager/non-eager contexts for tfe/tf.
Args:
config (Optional[dict]): An optional config dict to alter in place
depending on the iteration.
frameworks (Tuple[str]): A list/tuple of the frameworks to be tested.
Allowed are: "tf2", "tf", "tfe", "torch", and None.
session (bool): If True and only in the tf-case: Enter a tf.Session()
and yield that as second return value (otherwise yield (fw, None)).
Also sets a seed (42) on the session to make the test
deterministic.
Yields:
str: If enter_session is False:
The current framework ("tf2", "tf", "tfe", "torch") used.
Tuple(str, Union[None,tf.Session]: If enter_session is True:
A tuple of the current fw and the tf.Session if fw="tf".
"""
config = config or {}
frameworks = [frameworks] if isinstance(frameworks, str) else \
list(frameworks)
# Both tf2 and tfe present -> remove "tfe" or "tf2" depending on version.
if "tf2" in frameworks and "tfe" in frameworks:
frameworks.remove("tfe" if tfv == 2 else "tf2")
for fw in frameworks:
# Skip non-installed frameworks.
if fw == "torch" and not torch:
logger.warning(
"framework_iterator skipping torch (not installed)!")
continue
if fw != "torch" and not tf:
logger.warning("framework_iterator skipping {} (tf not "
"installed)!".format(fw))
continue
elif fw == "tfe" and not eager_mode:
logger.warning("framework_iterator skipping tf-eager (could not "
"import `eager_mode` from tensorflow.python)!")
continue
elif fw == "tf2" and tfv != 2:
logger.warning(
"framework_iterator skipping tf2.x (tf version is < 2.0)!")
continue
elif fw == "jax" and not jax:
logger.warning("framework_iterator skipping JAX (not installed)!")
continue
assert fw in ["tf2", "tf", "tfe", "torch", "jax", None]
# Do we need a test session?
sess = None
if fw == "tf" and session is True:
sess = tf1.Session()
sess.__enter__()
tf1.set_random_seed(42)
print("framework={}".format(fw))
config["framework"] = fw
eager_ctx = None
# Enable eager mode for tf2 and tfe.
if fw in ["tf2", "tfe"]:
eager_ctx = eager_mode()
eager_ctx.__enter__()
assert tf1.executing_eagerly()
# Make sure, eager mode is off.
elif fw == "tf":
assert not tf1.executing_eagerly()
yield fw if session is False else (fw, sess)
# Exit any context we may have entered.
if eager_ctx:
eager_ctx.__exit__(None, None, None)
elif sess:
sess.__exit__(None, None, None)
def check(x, y, decimals=5, atol=None, rtol=None, false=False):
"""
Checks two structures (dict, tuple, list,
np.array, float, int, etc..) for (almost) numeric identity.
All numbers in the two structures have to match up to `decimal` digits
after the floating point. Uses assertions.
Args:
x (any): The value to be compared (to the expectation: `y`). This
may be a Tensor.
y (any): The expected value to be compared to `x`. This must not
be a tf-Tensor, but may be a tfe/torch-Tensor.
decimals (int): The number of digits after the floating point up to
which all numeric values have to match.
atol (float): Absolute tolerance of the difference between x and y
(overrides `decimals` if given).
rtol (float): Relative tolerance of the difference between x and y
(overrides `decimals` if given).
false (bool): Whether to check that x and y are NOT the same.
"""
# A dict type.
if isinstance(x, dict):
assert isinstance(y, dict), \
"ERROR: If x is dict, y needs to be a dict as well!"
y_keys = set(x.keys())
for key, value in x.items():
assert key in y, \
"ERROR: y does not have x's key='{}'! y={}".format(key, y)
check(
value,
y[key],
decimals=decimals,
atol=atol,
rtol=rtol,
false=false)
y_keys.remove(key)
assert not y_keys, \
"ERROR: y contains keys ({}) that are not in x! y={}".\
format(list(y_keys), y)
# A tuple type.
elif isinstance(x, (tuple, list)):
assert isinstance(y, (tuple, list)),\
"ERROR: If x is tuple, y needs to be a tuple as well!"
assert len(y) == len(x),\
"ERROR: y does not have the same length as x ({} vs {})!".\
format(len(y), len(x))
for i, value in enumerate(x):
check(
value,
y[i],
decimals=decimals,
atol=atol,
rtol=rtol,
false=false)
# Boolean comparison.
elif isinstance(x, (np.bool_, bool)):
if false is True:
assert bool(x) is not bool(y), \
"ERROR: x ({}) is y ({})!".format(x, y)
else:
assert bool(x) is bool(y), \
"ERROR: x ({}) is not y ({})!".format(x, y)
# Nones or primitives.
elif x is None or y is None or isinstance(x, (str, int)):
if false is True:
assert x != y, "ERROR: x ({}) is the same as y ({})!".format(x, y)
else:
assert x == y, \
"ERROR: x ({}) is not the same as y ({})!".format(x, y)
# String comparison.
elif hasattr(x, "dtype") and x.dtype == np.object:
try:
np.testing.assert_array_equal(x, y)
if false is True:
assert False, \
"ERROR: x ({}) is the same as y ({})!".format(x, y)
except AssertionError as e:
if false is False:
raise e
# Everything else (assume numeric or tf/torch.Tensor).
else:
if tf1 is not None:
# y should never be a Tensor (y=expected value).
if isinstance(y, (tf1.Tensor, tf1.Variable)):
# In eager mode, numpyize tensors.
if tf.executing_eagerly():
y = y.numpy()
else:
raise ValueError(
"`y` (expected value) must not be a Tensor. "
"Use numpy.ndarray instead")
if isinstance(x, (tf1.Tensor, tf1.Variable)):
# In eager mode, numpyize tensors.
if tf1.executing_eagerly():
x = x.numpy()
# Otherwise, use a new tf-session.
else:
with tf1.Session() as sess:
x = sess.run(x)
return check(
x,
y,
decimals=decimals,
atol=atol,
rtol=rtol,
false=false)
if torch is not None:
if isinstance(x, torch.Tensor):
x = x.detach().cpu().numpy()
if isinstance(y, torch.Tensor):
y = y.detach().cpu().numpy()
# Using decimals.
if atol is None and rtol is None:
# Assert equality of both values.
try:
np.testing.assert_almost_equal(x, y, decimal=decimals)
# Both values are not equal.
except AssertionError as e:
# Raise error in normal case.
if false is False:
raise e
# Both values are equal.
else:
# If false is set -> raise error (not expected to be equal).
if false is True:
assert False, \
"ERROR: x ({}) is the same as y ({})!".format(x, y)
# Using atol/rtol.
else:
# Provide defaults for either one of atol/rtol.
if atol is None:
atol = 0
if rtol is None:
rtol = 1e-7
try:
np.testing.assert_allclose(x, y, atol=atol, rtol=rtol)
except AssertionError as e:
if false is False:
raise e
else:
if false is True:
assert False, \
"ERROR: x ({}) is the same as y ({})!".format(x, y)
def check_learning_achieved(tune_results, min_reward, evaluation=False):
"""Throws an error if `min_reward` is not reached within tune_results.
Checks the last iteration found in tune_results for its
"episode_reward_mean" value and compares it to `min_reward`.
Args:
tune_results: The tune.run returned results object.
min_reward (float): The min reward that must be reached.
Raises:
ValueError: If `min_reward` not reached.
"""
last_result = tune_results.trials[0].last_result
avg_reward = last_result["episode_reward_mean"] if not evaluation else \
last_result["evaluation"]["episode_reward_mean"]
if avg_reward < min_reward:
raise ValueError("`stop-reward` of {} not reached!".format(min_reward))
print("ok")
def check_compute_single_action(trainer,
include_state=False,
include_prev_action_reward=False):
"""Tests different combinations of arguments for trainer.compute_action.
Args:
trainer (Trainer): The Trainer object to test.
include_state (bool): Whether to include the initial state of the
Policy's Model in the `compute_action` call.
include_prev_action_reward (bool): Whether to include the prev-action
and -reward in the `compute_action` call.
Raises:
ValueError: If anything unexpected happens.
"""
try:
pol = trainer.get_policy()
except AttributeError:
pol = trainer.policy
model = pol.model
action_space = pol.action_space
for what in [pol, trainer]:
if what is trainer:
method_to_test = trainer.compute_action
# Get the obs-space from Workers.env (not Policy) due to possible
# pre-processor up front.
worker_set = getattr(trainer, "workers",
getattr(trainer, "_workers", None))
assert worker_set
if isinstance(worker_set, list):
obs_space = trainer.get_policy().observation_space
else:
obs_space = worker_set.local_worker().for_policy(
lambda p: p.observation_space)
obs_space = getattr(obs_space, "original_space", obs_space)
else:
method_to_test = pol.compute_single_action
obs_space = pol.observation_space
for explore in [True, False]:
for full_fetch in ([False, True] if what is trainer else [False]):
call_kwargs = {}
if what is trainer:
call_kwargs["full_fetch"] = full_fetch
else:
call_kwargs["clip_actions"] = True
obs = obs_space.sample()
# Framestacking w/ | |
<filename>src/clims/legacy/utility/testing.py<gh_stars>0
"""
Various helpers for mocking data quickly, in either unit tests or notebooks.
"""
from __future__ import absolute_import
from clims.legacy.domain import *
from clims.legacy.service.dilution.service import *
from mock import MagicMock
from clims.legacy.context import ExtensionContext
class DilutionTestDataHelper:
"""
A helper for creating mock containers and artifacts related to Dilution, in as simple a way
as possible, even for end-users testing things in notebooks, but can also be used in tests.
"""
def __init__(self, concentration_ref, create_well_order=Container.DOWN_FIRST):
self.default_source = "source"
self.default_target = "target"
self.containers = dict()
# Default input/output containers used if the user doesn't provide them:
self.create_container(self.default_source, True)
self.create_container(self.default_target, False)
self.concentration_unit = DilutionSettings._parse_conc_ref(concentration_ref)
assert self.concentration_unit is not None
# TODO: Change the Container domain object so that it can add analytes to
# the next available position
self.well_enumerator = self.containers[self.default_source].enumerate_wells(
create_well_order)
self.pairs = list()
def set_default_containers(self, source_postfix, target_postfix):
self.default_source = "source{}".format(source_postfix)
self.default_target = "target{}".format(target_postfix)
def create_container(self, container_id, is_source):
container = Container(container_type=Container.CONTAINER_TYPE_96_WELLS_PLATE,
container_id=container_id, name=container_id, is_source=is_source)
self.containers[container_id] = container
return container
def get_container_by_name(self, container_name, is_source):
"""Returns a container by name, creating it if it doesn't exist yet"""
if container_name not in self.containers:
self.containers[container_name] = self.create_container(container_name, is_source)
return self.containers[container_name]
def _create_analyte(self, is_input, partial_name, analyte_type=Analyte, samples=None):
# TODO: This code is not specific to the Dilution test cases, move it to a
# more generic class.
name = "{}-{}".format("in" if is_input else "out", partial_name)
project = Project("IntegrationTest")
if not samples:
samples = [Sample("S_" + name, "S_" + name, project)]
ret = analyte_type(
api_resource=None,
is_input=is_input,
id=name,
name=name,
samples=samples)
return ret
def create_pooled_pairs(self, pool_size):
"""
Creates n pairs that are pooled, i.e. there are n analytes that are mapped to m analytes, where m < n.
The wells in the source container are [A1, B2, ...]
NOTE: Currently we model the REST API interface when it comes to pools, but it would probably
be an improvement to introduce new domain objects, Pool and PoolInput that would
be used in this case to simplify the use of the API.
"""
source_analytes = list()
for i in range(1, pool_size + 1):
source_container = self.get_container_by_name("source{}".format(i), True)
name = "analyte{}".format(i)
analyte = self._create_analyte(True, name, Analyte)
source_container.append(analyte)
source_analytes.append(analyte)
# Now create one analyte for the output, but containing all the input samples
samples = [analyte.sample() for analyte in source_analytes]
target_analyte = self._create_analyte(False, "analyte1", samples=samples)
target_container = self.get_container_by_name(self.default_target, False)
target_container.append(target_analyte)
for source_analyte in source_analytes:
yield ArtifactPair(source_analyte, target_analyte)
def create_pair(self, pos_from=None, pos_to=None, source_container_name=None, target_container_name=None,
source_type=Analyte, target_type=Analyte):
if source_container_name is None:
source_container_name = self.default_source
if target_container_name is None:
target_container_name = self.default_target
source_container = self.get_container_by_name(source_container_name, True)
target_container = self.get_container_by_name(target_container_name, False)
if pos_from is None:
well = self.well_enumerator.next()
pos_from = well.position
if pos_to is None:
pos_to = pos_from
name = "FROM:{}".format(pos_from)
pair = ArtifactPair(self._create_analyte(True, name, source_type),
self._create_analyte(False, name, target_type))
source_container.set_well_update_artifact(pos_from, artifact=pair.input_artifact)
target_container.set_well_update_artifact(pos_to, artifact=pair.output_artifact)
self.pairs.append(pair)
return pair
def create_dilution_pair(self, conc1, vol1, conc2, vol2, pos_from=None, pos_to=None,
source_type=Analyte, target_type=Analyte,
source_container_name=None, target_container_name=None):
"""Creates an analyte pair ready for dilution"""
pair = self.create_pair(pos_from, pos_to,
source_type=source_type, target_type=target_type,
source_container_name=source_container_name,
target_container_name=target_container_name)
concentration_unit = DilutionSettings.concentration_unit_to_string(self.concentration_unit)
conc_source_udf = "Conc. Current ({})".format(concentration_unit)
conc_target_udf = "Target conc. ({})".format(concentration_unit)
pair.input_artifact.udf_map = UdfMapping({conc_source_udf: conc1,
"Current sample volume (ul)": vol1})
pair.output_artifact.udf_map = UdfMapping({conc_source_udf: conc1,
"Current sample volume (ul)": vol1,
"Target vol. (ul)": vol2,
conc_target_udf: conc2,
"Dil. calc target vol": None,
"Dil. calc target conc.": None,
"Dil. calc source vol": None})
return pair
# TODO: MERGE WITH ABOVE!
def create_dilution_pair2(self, pair, conc1, vol1, conc2, vol2):
"""
Given a pair (e.g. built with create_pair), expands it so that it looks like we expect pairs to look
if they take part in a dilution.
"""
concentration_unit = DilutionSettings.concentration_unit_to_string(self.concentration_unit)
conc_source_udf = "Conc. Current ({})".format(concentration_unit)
conc_target_udf = "Target conc. ({})".format(concentration_unit)
pair.input_artifact.udf_map = UdfMapping({conc_source_udf: conc1,
"Current sample volume (ul)": vol1})
pair.output_artifact.udf_map = UdfMapping({conc_source_udf: conc1,
"Current sample volume (ul)": vol1,
"Target vol. (ul)": vol2,
conc_target_udf: conc2,
"Dil. calc target vol": None,
"Dil. calc target conc.": None,
"Dil. calc source vol": None})
return pair
def mock_context(**kwargs):
"""Creates a mock with the service provided as keyword arguments, filling the rest with MagicMock"""
# TODO: Needs to be updated when the signature is updated. Fix that (or use a better approach)
for arg in ["session", "artifact_service", "file_service", "current_user", "step_logger_service",
"step_repo", "legacy_service", "dilution_service", "process_service",
"upload_file_service", "validation_service"]:
kwargs.setdefault(arg, MagicMock())
return ExtensionContext(**kwargs)
class TestExtensionContext(object):
"""
A helper (wrapper) for creating test ExtensionContext objects, which are used for integration tests of the
type where you want to mock all repositories, but keep the services hooked up as they would be in production.
Wraps that kind of mocked ExtensionContext and provides various convenience methods for adding data to the mocked
repositories.
The idea is that this should be usable by users that have little knowledge about how the framework works.
"""
def __init__(self):
session = MagicMock()
step_repo = MagicMock()
step_repo.all_artifacts = self._all_artifacts
user = User("Integration", "Tester", "<EMAIL>", "IT")
step_repo.get_process = MagicMock(return_value=Process(
None, "24-1234", user, None, "http://not-avail"))
os_service = MagicMock()
file_repository = MagicMock()
legacy_service = MagicMock()
process_type = ProcessType(None, None, name="Some process")
step_repo.current_user = MagicMock(return_value=user)
step_repo.get_process_type = MagicMock(return_value=process_type)
self.context = ExtensionContext.create_mocked(
session, step_repo, os_service, file_repository, legacy_service)
# TODO: only mocking this one method of the validation_service for now (quick fix)
self.context.validation_service.handle_single_validation = MagicMock()
self.context.logger = MagicMock()
self._shared_files = list()
self._analytes = list()
def logged_validation_results(self):
return [call[0][0]
for call in self.context.validation_service.handle_single_validation.call_args_list]
def count_logged_validation_results_of_type(self, t):
return len([result for result in self.logged_validation_results() if type(result) == t])
def count_logged_validation_results_with_msg(self, msg):
return len([result for result in self.logged_validation_results()
if result.msg == msg])
def _all_artifacts(self):
return self._shared_files + self._analytes
def add_shared_result_file(self, f):
assert f.name is not None, "You need to supply a name"
f.id = "92-{}".format(len(self._shared_files))
f.api_resource = MagicMock()
self._shared_files.append((None, f))
def add_udf_to_step(self, key, value):
if self.context.current_step.udf_map is None:
self.context.current_step.udf_map = UdfMapping()
self.context.current_step.udf_map.add(key, value)
def set_user(self, user_name):
pass
def add_analyte_pair(self, input, output):
# TODO: Set id and name if not provided
self._analytes.append((input, output))
def add_analyte_pairs(self, pairs):
self._analytes.extend((pair.input_artifact, pair.output_artifact) for pair in pairs)
class TestExtensionWrapper(object):
"""Similar to TestExtensionContext, but wraps an entire extension"""
def __init__(self, extension_type):
self.context_wrapper = TestExtensionContext()
self.extension = extension_type(self.context_wrapper.context)
class StepScenario(object):
"""Describes a scenario in a step in the application that we want to mock, e.g. place samples."""
def __init__(self, context_wrapper):
self.input_containers = list()
self.analytes = list()
self.pairs = list()
self.analytes = list()
self.context_wrapper = context_wrapper
def create_analyte(self, is_input, name, analyte_id, analyte_type=Analyte, samples=None):
project = Project("IntegrationTest")
if not samples:
samples = [Sample(name, name, project)]
ret = analyte_type(
api_resource=None,
is_input=is_input,
id=analyte_id,
name=name,
samples=samples)
self.analytes.append(ret)
return ret
class PoolSamplesScenario(StepScenario):
"""A 'scenario' mocks a particular set of actions made in the UI and sets up mock objects accordingly
Note that some of the methods return this class so that it can be used in a fluent api fashion, but the same
methods can also be used referring to previously added objects."""
def __init__(self, context_wrapper):
super(PoolSamplesScenario, self).__init__(context_wrapper)
self.pools = list()
def add_input_container(self, name=None, size=None, container_id=None):
if name is None:
name = ''
if size is None:
size = PlateSize(height=8, width=12)
if container_id is None:
container_id = "incont_{}".format(len(self.input_containers))
container = Container(name=name, size=size, container_id=container_id)
self.input_containers.append(container)
return self
def add_input_analyte(self, name=None, analyte_id=None, input_container_ref=-1):
"""Adds an input analyte to the last container added"""
last_container = self.input_containers[input_container_ref]
if analyte_id is None:
analyte_id = "analyte_{}-{}".format(last_container.id, len(last_container.occupied))
if name is None:
name = analyte_id
analyte = self.create_analyte(True, name, analyte_id)
last_container.append(analyte)
return self
def create_pool(self, name=None, analyte_id=None):
pool = self.create_analyte(False, name, analyte_id)
pool.samples = list() # Emptying it, as the helper creates them by default
self.pools.append(pool)
return self
def add_to_pool(self, pool_ref=-1, analyte_ref=-1, input_container_ref=-1):
pool = self.pools[pool_ref]
input_analyte = self.input_containers[input_container_ref].occupied[analyte_ref].artifact
pool.samples.append(input_analyte.sample())
pair = pool.pair_as_output(input_analyte)
self.pairs.append(pair)
self.context_wrapper.add_analyte_pair(pair.input_artifact, pair.output_artifact)
return self
def to_string(self, compressed=True):
"""Returns a more detailed string representation than __str__"""
ret = list()
ret.append("Input containers")
ret.append("----------------")
for container in self.input_containers:
ret.append(container.to_string(compressed))
ret.append("Pools")
ret.append("-----")
for pool in self.pools:
ret.append(pool.name)
return "\n".join(map(str, ret))
class PoolSamplesWithDilutionScenario(PoolSamplesScenario):
"""A StepScenario that sets a step up for pooling and dilution with the exact UDFs we require at SNP&SEQ"""
def __init__(self, context_wrapper, concentration_unit):
super(PoolSamplesWithDilutionScenario, self).__init__(context_wrapper)
self.concentration_unit = concentration_unit
def dilution_vals(self, conc, vol, analyte_ref=-1):
"""Sets the values required for dilution (conc and vol) to the analyte that was added last to the scenario"""
analyte = self.analytes[analyte_ref]
if analyte.is_input:
analyte.udf_map | |
<gh_stars>0
'''Tasks specific to the IsMore project.'''
from __future__ import division
from collections import OrderedDict
import time
import datetime
import os
import re
import pdb
import pickle
import tables
import math
import traceback
import numpy as np
import pandas as pd
import random
import multiprocessing as mp
import subprocess
from random import shuffle
import random
import copy
# from django.db import models
# from db.tracker import TaskEntry, Task
from riglib.experiment import traits, Sequence, generate, FSMTable, StateTransitions
from riglib.stereo_opengl.window import WindowDispl2D, FakeWindow
from riglib.stereo_opengl.primitives import Circle, Sector, Line
from riglib.bmi import clda, extractor, train
from riglib.bmi.bmi import Decoder, BMISystem, GaussianStateHMM, BMILoop, GaussianState, MachineOnlyFilter
from ismore import plants, settings, ismore_bmi_lib
from ismore.common_state_lists import *
from features.bmi_task_features import LinearlyDecreasingAssist, LinearlyDecreasingHalfLife
from features.simulation_features import SimTime, SimHDF
from ismore.brainamp import rda
from utils.angle_utils import *
from utils.util_fns import *
from utils.constants import *
# from db.tracker import models
from utils.ringbuffer import RingBuffer
from features.generator_features import Autostart
import pygame
from riglib.plants import RefTrajectories
from ismore.filter import Filter
from scipy.signal import butter,lfilter
import brainamp_channel_lists
from utils.constants import *
#import playsound
np.set_printoptions(suppress=True)
np.set_printoptions(precision=5)
#################################### DEFINITIONS ----------------------------------------------
###### Colors ######
COLORS = {
'black': (0, 0, 0, 1),
'red': (1, 0, 0, 1),
'red_grasp': (1, 0, 0, 1),
'grasp': (1, 0, 0, 1),
'pinch': (1, 0, 0, 1),
'green': (0, 1, 0, 1),
'green_point': (0, 1, 0, 1),
'point': (0, 1, 0, 1),
'blue': (0, 0, 1, 1),
'blue_up': (0, 0, 1, 1),
'up': (0, 0, 1, 1),
'rest': (1, 1, 1, 1),
'white': (1, 1, 1, 1),
'magenta': (0, 1, 0, 0),
'brown': (29, 74, 100, 24),
'yellow': (0, 0, 1, 0),
'down': (1, 0, 0, 1),
'linear_red': (1, 0, 0, 1),
'circular': (0, 0, 1, 1),
'wrist_ext': (1, 0, 0, 1),
}
###### Options to select in interface ######
plant_type_options = ['IsMore','ArmAssist', 'ReHand', 'DummyPlant', 'IsMorePlantHybridBMISoftSafety']
DoF_control_options = ['IsMore','ArmAssist', 'ReHand', 'ReHand-Pronosup', 'ReHand-Pronosup-FingersDisabled']
DoF_target_options = ['IsMore','ArmAssist', 'ReHand', 'ReHand-Pronosup', 'ReHand-Pronosup-FingersDisabled']
arm_side_options = ['left','right']
clda_update_methods = ['RML', 'Smoothbatch', ]
languages_list = ['english', 'deutsch', 'castellano', 'euskara']
speed_options = ['very-low','low', 'medium','high']
fb_input_options = ['standard', 'andreita']
channel_list_options = brainamp_channel_lists.channel_list_options
#---------------------------------------------- DEFINITIONS ###############################################
################################################ FUNCTIONS ################################################
def check_plant_and_DoFs(plant_type, DoF_control, DoF_target):
'''
Function to check if the connected plant_type and the selected DoFs for control and target accomplishment are compatible.
Output: the indexes of the selected DoFs depending on the connected plant_type.
'''
plant_and_DoFs_correct = True
#check if the selected DoF_control is possible with the selected plant_type
if plant_type in ['ArmAssist', 'ReHand']:
if DoF_control.startswith(plant_type) == False:
plant_and_DoFs_correct = False
print "DoF_control selected not possible for the selected plant_type"
#check if the selected DoF_target is possible with the selected DoF_control
if DoF_control != 'IsMore':
if DoF_target.startswith(DoF_control) == False:
plant_and_DoFs_correct = False
print "DoF_target selected not possible for the selected DoF_control"
if plant_and_DoFs_correct == True:
# define DoF target indexes for each case
if DoF_target == 'ArmAssist' and plant_type in ['ArmAssist', 'IsMore']:
DoF_target_idx_init = 0
DoF_target_idx_end = 3
elif DoF_target == 'ReHand' and plant_type == 'IsMore':
DoF_target_idx_init = 3
DoF_target_idx_end = 7
elif DoF_target in ['ReHand-Pronosup', 'ReHand-Pronosup-FingersDisabled'] and plant_type == 'IsMore':
DoF_target_idx_init = 6
DoF_target_idx_end = 7
elif DoF_target == 'ReHand' and plant_type == 'ReHand':
DoF_target_idx_init = 0
DoF_target_idx_end = 3
elif DoF_target in ['ReHand-Pronosup', 'ReHand-Pronosup-FingersDisabled'] and plant_type == 'ReHand':
DoF_target_idx_init = 3
DoF_target_idx_end = 4
elif DoF_target == 'IsMore' and plant_type == 'IsMore':
DoF_target_idx_init = 0
DoF_target_idx_end = 7
# define DoF control indexes for each case
if DoF_control == 'ArmAssist' and plant_type =='IsMore':
DoF_not_control_idx_init = 3
DoF_not_control_idx_end = 7
elif DoF_control == 'ArmAssist' and plant_type == 'ArmAssist':
DoF_not_control_idx_init = np.nan
DoF_not_control_idx_end = np.nan
elif DoF_control == 'ReHand' and plant_type == 'IsMore':
DoF_not_control_idx_init = 0
DoF_not_control_idx_end = 3
elif DoF_control in ['ReHand-Pronosup', 'ReHand-Pronosup-FingersDisabled'] and plant_type == 'IsMore':
DoF_not_control_idx_init = 0
DoF_not_control_idx_end = 6
elif DoF_control == 'ReHand' and plant_type == 'ReHand':
DoF_not_control_idx_init = np.nan
DoF_not_control_idx_end = np.nan
elif DoF_control in ['ReHand-Pronosup', 'ReHand-Pronosup-FingersDisabled'] and plant_type == 'ReHand':
DoF_not_control_idx_init = 0
DoF_not_control_idx_end = 3
elif DoF_control == 'IsMore' and plant_type == 'IsMore':
DoF_not_control_idx_init = np.nan
DoF_not_control_idx_end = np.nan
else:
print "ERROR!!! Plant and selected target or control DoFs incorrect!!!"
return [DoF_target_idx_init,DoF_target_idx_end, DoF_not_control_idx_init,DoF_not_control_idx_end]
############################################## BASIC CLASSES ################################################
class IsMoreBase(WindowDispl2D):
'''
A base class for all IsMore tasks. Creates the appropriate plant object
and updates the display of the plant at every iteration of the task.
'''
window_size = traits.Tuple((500, 281), desc='Size of window to display the plant position/angle')
# window_size = traits.Tuple((1920, 1080), desc='Size of window to display the plant position/angle')
starting_pos = settings.starting_pos
update_rest = True
plant_type = traits.OptionsList(*plant_type_options, bmi3d_input_options=plant_type_options, desc='Device connected, data will be acquired from this plant')
#simulate = traits.Bool(False, desc='Use simulation "plant" without UDP communication')
arm_side = traits.OptionsList(*arm_side_options, bmi3d_input_options=arm_side_options, desc='arm side wearing the exo')
show_FB_window = traits.OptionsList(*fb_input_options, bmi3d_input_options=fb_input_options, desc='')
exclude_parent_traits = ["show_environment"]
def __init__(self, *args, **kwargs):
super(IsMoreBase, self).__init__(*args, **kwargs)
self.ssm = ismore_bmi_lib.SSM_CLS_DICT[self.plant_type]()
self.ssm_states = [s.name for s in self.ssm.states]
self.pos_states = [s.name for s in self.ssm.states if s.order == 0]
self.vel_states = [s.name for s in self.ssm.states if s.order == 1]
print 'self.vel_states', self.vel_states
if 0: #self.simulate:
# use locally running IsMoreBasesimulated ArmAssist and/or ReHand
# for which we can magically set the initial position
self.plant = plants.NONUDP_PLANT_CLS_DICT[self.plant_type]()
self.plant.set_pos(self.starting_pos[self.pos_states].values)
else:
self.plant = plants.UDP_PLANT_CLS_DICT[self.plant_type]()
print 'self.pos_states', self.pos_states
print 'plant_type', self.plant_type
self.plant_pos_raw = pd.Series(self.plant.get_pos_raw(), self.pos_states)
self.plant_pos = pd.Series(self.plant.get_pos(), self.pos_states)
self.plant_vel_raw = pd.Series(self.plant.get_vel_raw(), self.vel_states)
self.plant_vel = pd.Series(self.plant.get_vel(), self.vel_states)
self.add_dtype('plant_pos', 'f8', (len(self.plant_pos_raw),))
#self.add_dtype('plant_pos_filt', 'f8', (len(self.plant_pos),))
self.add_dtype('plant_vel', 'f8', (len(self.plant_vel_raw),))
#self.add_dtype('plant_vel_filt', 'f8', (len(self.plant_vel),))
self.add_dtype('plant_type', np.str_, 40)
self.add_dtype('ts', 'f8', (1,))
# self.add_dtype('DoF_control', np.str_, 40)
# self.add_dtype('DoF_target', np.str_, 40)
self.init_plant_display()
self.update_plant_display()
pygame.mixer.init()
#if a targets_matrix is being used in the task, show the target positions in the display window
if 'targets_matrix' in locals()['kwargs']:
self.display_targets()
else:
print 'no targets matrix'
def _set_workspace_size(self):
MAT_SIZE = settings.MAT_SIZE
border = 10. # TODO -- difference between this and self.display_border?
self.workspace_bottom_left = np.array([ 0. - border,
0. - border])
self.workspace_top_right = np.array([MAT_SIZE[0] + border,
MAT_SIZE[1] + border])
def init(self):
self.plant.init()
super(IsMoreBase, self).init()
if settings.WATCHDOG_ENABLED:
self.plant.watchdog_enable(settings.WATCHDOG_TIMEOUT)
def run(self):
self.plant.start()
try:
super(IsMoreBase, self).run()
finally:
self.plant.stop()
def _play_sound(self, fpath, fname):
print 'play sound: ', fname
if hasattr(self, 'replace_ya_w_pausa'):
if self.replace_ya_w_pausa == 'Yes':
if fname[0] == 'go':
fname = ['rest']
for filename in fname:
# print 'filename ', filename
if filename == 'circular':
filename = 'circular_big'
sound_fname = os.path.join(fpath, filename + '.wav')
pygame.mixer.music.load(sound_fname)
pygame.mixer.music.play()
elif '_' in filename or ' ' in filename:
# First see if there's a file with exact name:
if os.path.isfile(os.path.join(fpath, filename + '.wav')):
pygame.mixer.music.load(os.path.join(fpath, filename + '.wav'))
pygame.mixer.music.play()
else:
# try:
# Next try replacing with spaces:
# Red to green
if '_' in filename:
filename = filename.replace('_', ' ')
key = ' '
elif ' ' in filename:
filename = filename.replace(' ', '_')
key = '_'
if os.path.isfile(os.path.join(fpath, filename + '.wav')):
pygame.mixer.music.load(os.path.join(fpath, filename + '.wav'))
pygame.mixer.music.play()
else:
#try:
# Next try splitting up the names:
fi1 = filename.find(key)
filename1 = filename[:fi1]
if os.path.isfile(os.path.join(fpath, filename1 + '.wav')):
#sound_fname = os.path.join(fpath, filename1 + '.wav')
pygame.mixer.music.load(os.path.join(fpath, filename1 + '.wav'))
pygame.mixer.music.play()
x = 0
while pygame.mixer.music.get_busy():
x += 1
filename2 = filename[filename.find(key)+1:]
if os.path.isfile(os.path.join(fpath, filename2 + '.wav')):
pygame.mixer.music.load(os.path.join(fpath, filename2 + '.wav'))
pygame.mixer.music.play()
else:
# 3 legged:
fi2 = filename.find(key, fi1+1)
filename2 = filename[fi1+1:fi2]
filename3 = filename[fi2+1:]
sound_fname = os.path.join(fpath, filename2 + '.wav')
pygame.mixer.music.load(sound_fname)
pygame.mixer.music.play()
y = 0
while pygame.mixer.music.get_busy():
y+=1
sound_fname = os.path.join(fpath, filename3 + '.wav')
pygame.mixer.music.load(sound_fname)
pygame.mixer.music.play()
else:
print 'cant play: ', filename
else:
sound_fname = os.path.join(fpath, filename + '.wav')
pygame.mixer.music.load(sound_fname)
pygame.mixer.music.play()
def _cycle(self):
self.task_data['ts']= time.time()
self.plant.write_feedback()
if settings.VERIFY_PLANT_DATA_ARRIVAL:
self.verify_plant_data_arrival(settings.VERIFY_PLANT_DATA_ARRIVAL_TIME)
super(IsMoreBase, self)._cycle()
# Note: All classes that inherit from this class should probably call
# the following code at some point during their _cycle methods
# self.plant_pos[:] = self.plant.get_pos()
# self.plant_vel[:] = self.plant.get_vel()
# self.update_plant_display()
# self.task_data['plant_pos'] = self.plant_pos.values
# self.task_data['plant_vel'] = self.plant_vel.values
def verify_plant_data_arrival(self, n_secs):
time_since_started = time.time() - self.plant.ts_start_data
last_ts_arrival = self.plant.last_data_ts_arrival()
if self.plant_type in ['ArmAssist', 'ReHand']:
if time_since_started > n_secs:
if last_ts_arrival == 0:
print 'No %s data has arrived at all' % self.plant_type
else:
t_elapsed = time.time() - last_ts_arrival
if t_elapsed > n_secs:
print 'No %s data in the last %.1f s' % (self.plant_type, t_elapsed)
# Run IsMore application automatically in a second terminal
# if (self.plant_type == 'ArmAssist' and t_elapsed > 3):
| |
<gh_stars>0
#!/usr/bin/env python3 -u -B
import argparse
import contextlib
import errno
import io
import json
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), os.path.pardir))
import backups_lib
__package__ = backups_lib.__package__
from . import backups_main
from . import checkpoint_lib
from . import lib
from . import lib_test_util
from . import test_main
from .test_util import AssertEquals
from .test_util import AssertLinesEqual
from .test_util import AssertNotEquals
from .test_util import BaseTestCase
from .test_util import CreateDir
from .test_util import CreateFile
from .test_util import CreateSymlink
from .test_util import DeleteFileOrDir
from .test_util import DoBackupsMain
from .test_util import SetMTime
from .test_util import SetPacificTimezone
from .test_util import SetXattr
from .test_util import TempDir
from .test_util import Xattr
from .lib_test_util import ApplyFakeDiskImageHelperLevel
from .lib_test_util import CollapseApfsOperationsInOutput
from .lib_test_util import CreateGoogleDriveRemoteFile
from .lib_test_util import DoDumpManifest
from .lib_test_util import DoVerifyManifest
from .lib_test_util import GetManifestItemized
from .lib_test_util import HandleGetPass
from .lib_test_util import HandleGoogleDriveRemoteFiles
from .lib_test_util import SetHdiutilCompactOnBatteryAllowed
from .lib_test_util import SetOmitUidAndGidInPathInfoToString
from .checkpoint_lib_test_util import DoCreate
def RsyncPaths(from_path, to_path, checksum=True, dry_run=False,
filters=checkpoint_lib.STAGED_BACKUP_DEFAULT_FILTERS):
cmd = [lib.GetRsyncBin(),
'-aXi',
'--delete',
'--numeric-ids',
'--no-specials',
'--no-devices']
if checksum:
cmd.append('--checksum')
if dry_run:
cmd.append('-n')
if filters is not None:
for a_filter in filters:
cmd.append(a_filter.GetRsyncArg())
cmd.append(lib.MakeRsyncDirname(from_path))
cmd.append(lib.MakeRsyncDirname(to_path))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
text=True)
output = []
with p.stdout:
for line in p.stdout:
line = line.strip()
if not line:
continue
pieces = line.split(None, 1)
assert len(pieces) == 2
output.append((lib.DecodeRsyncEncodedString(pieces[1]), pieces[0]))
if p.wait():
print('\n'.join([ '%s %s' % (change, path) for (path, change) in output ]))
raise Exception('Rsync failed')
output.sort()
return [ '%s %s' % (change, path) for (path, change) in output ]
def AssertEmptyRsync(from_path, to_path, checksum=True):
AssertLinesEqual(RsyncPaths(from_path, to_path, checksum=checksum, dry_run=True), [])
def AssertBasisInfoFileEquals(metadata_path, basis_path=None):
basis_info_path = os.path.join(metadata_path, lib.BASIS_INFO_FILENAME)
if basis_path is None:
AssertEquals(False, os.path.exists(basis_info_path))
return
else:
AssertEquals(True, os.path.exists(basis_info_path))
with open(basis_info_path) as in_file:
json_data = json.load(in_file)
AssertEquals(os.path.basename(basis_path), json_data['basis_filename'])
def AssertFileContents(expected_contents, path):
with open(path, 'r') as in_f:
AssertEquals(expected_contents, in_f.read())
def AssertCheckpointsList(checkpoints_dir, expected_list):
AssertLinesEqual(
expected_list,
[p for p in sorted(os.listdir(checkpoints_dir))
if checkpoint_lib.CheckpointPathParts.IsMatchingPath(p)])
def VerifyCheckpointContents(manifest, root_dir, prev_manifest=None):
expected_paths = set()
for path in manifest.GetPaths():
manifest_path_info = manifest.GetPathInfo(path)
prev_path_info = None
if prev_manifest:
prev_path_info = prev_manifest.GetPathInfo(path)
itemized = lib.PathInfo.GetItemizedDiff(prev_path_info, manifest_path_info)
if itemized.HasDiffs():
expected_paths.add(path)
for path in list(expected_paths):
parent_dir = os.path.dirname(path)
while parent_dir:
expected_paths.add(parent_dir)
parent_dir = os.path.dirname(parent_dir)
for path in expected_paths:
manifest_path_info = manifest.GetPathInfo(path)
full_path = os.path.join(root_dir, path)
src_path_info = lib.PathInfo.FromPath(path, full_path)
if src_path_info.HasFileContents():
src_path_info.sha256 = lib.Sha256(full_path)
itemized = lib.PathInfo.GetItemizedDiff(src_path_info, manifest_path_info)
if itemized.HasDiffs():
raise Exception('Mismatched checkpoint contents: %s' % itemized)
def GetManifestDiffItemized(manifest1, manifest2):
itemized_outputs = []
for itemized in manifest2.GetDiffItemized(manifest1):
itemized_outputs.append(str(itemized))
return itemized_outputs
def GetManifestProtoDump(manifest):
proto_outputs = []
for path in manifest.GetPaths():
proto_outputs.append(str(manifest.GetPathInfo(path).ToProto()))
return proto_outputs
def CreateGoogleDriveRemoteFile(parent_dir, filename):
path = CreateFile(parent_dir, filename, contents='IGNORE')
xattr_data = Xattr(path)
xattr_data[lib.GOOGLE_DRIVE_MIME_TYPE_XATTR_KEY] = (
('%sdocument' % lib.GOOGLE_DRIVE_REMOTE_FILE_MIME_TYPE_PREFIX).encode('ascii'))
return path
def DoApply(src_checkpoint_path, dest_root, dry_run=False, expected_output=[]):
args = []
if dry_run:
args.append('--dry-run')
args.extend(['apply-checkpoint',
'--checksum-all',
'--src-checkpoint-path', src_checkpoint_path,
'--dest-root', dest_root])
output = io.StringIO()
AssertEquals(backups_main.Main(args, output), True)
output_lines = []
for line in output.getvalue().strip().split('\n'):
if not line:
continue
output_lines.append(line)
output.close()
AssertLinesEqual(output_lines, expected_output)
def DoStrip(checkpoint_path, defragment=True, defragment_iterations=None,
dry_run=False, expected_output=[]):
cmd_args = ['strip-checkpoint',
'--checkpoint-path', checkpoint_path]
if not defragment:
cmd_args.append('--no-defragment')
if defragment_iterations is not None:
cmd_args.extend(['--defragment-iterations', str(defragment_iterations)])
output_lines = DoBackupsMain(cmd_args, dry_run=dry_run, expected_output=None)
output_lines = CollapseApfsOperationsInOutput(output_lines)
AssertLinesEqual(output_lines, expected_output)
class CreateDryRunTestCase(BaseTestCase):
def test(self):
with TempDir() as test_dir:
with ApplyFakeDiskImageHelperLevel() as should_run:
if should_run:
self.RunTest(test_dir)
def RunTest(self, test_dir):
checkpoints_dir = CreateDir(test_dir, 'checkpoints')
src_root = CreateDir(test_dir, 'src')
parent1 = CreateDir(src_root, 'par!')
file1 = CreateFile(parent1, 'f_\r', contents='small contents')
file2 = CreateFile(parent1, 'f2')
file3 = CreateFile(parent1, 'f3')
SetMTime(parent1)
SetMTime(src_root)
DoCreate(
src_root, checkpoints_dir, '1', dry_run=True,
expected_output=['>d+++++++ .',
'>d+++++++ par!',
'>f+++++++ par!/f2',
'>f+++++++ par!/f3',
'>f+++++++ par!/f_\\r',
'Transferring 5 paths (14b)'])
AssertLinesEqual(os.listdir(checkpoints_dir), [])
checkpoint1, manifest1 = DoCreate(
src_root, checkpoints_dir, '1',
expected_output=['>d+++++++ .',
'>d+++++++ par!',
'>f+++++++ par!/f2',
'>f+++++++ par!/f3',
'>f+++++++ par!/f_\\r',
'Transferring 5 paths (14b)'])
try:
VerifyCheckpointContents(manifest1, checkpoint1.GetContentRootPath())
AssertLinesEqual(GetManifestItemized(manifest1),
['.d....... .',
'.d....... par!',
'.f....... par!/f2',
'.f....... par!/f3',
'.f....... par!/f_\\r'])
AssertEmptyRsync(src_root, checkpoint1.GetContentRootPath())
finally:
checkpoint1.Close()
AssertCheckpointsList(checkpoints_dir, ['1.sparseimage'])
SetMTime(file1, None)
file2 = CreateFile(parent1, 'f2', contents='abc')
DoCreate(
src_root, checkpoints_dir, '2', dry_run=True,
last_checkpoint_path=checkpoint1.GetImagePath(),
expected_output=['>fcs..... par!/f2',
'.f..t.... par!/f_\\r',
'Transferring 2 of 5 paths (17b of 17b)'])
AssertCheckpointsList(checkpoints_dir, ['1.sparseimage'])
class CreateTestCase(BaseTestCase):
def test(self):
with ApplyFakeDiskImageHelperLevel(
min_fake_disk_image_level=lib_test_util.FAKE_DISK_IMAGE_LEVEL_HIGH, test_case=self) as should_run:
if should_run:
with TempDir() as test_dir:
self.RunTest(test_dir)
def RunTest(self, test_dir):
checkpoints_dir = CreateDir(test_dir, 'checkpoints')
src_root = CreateDir(test_dir, 'src')
parent1 = CreateDir(src_root, 'par!')
file1 = CreateFile(parent1, 'f_\r')
file2 = CreateFile(parent1, 'f2')
file3 = CreateFile(parent1, 'f3')
file6_from = CreateFile(parent1, 'file6_from', contents='file6_contents')
file_skip1 = CreateFile(src_root, 'SKIP1')
file_skip1 = CreateFile(parent1, '2.skp')
CreateFile(src_root, checkpoint_lib.STAGED_BACKUP_DIR_MERGE_FILENAME,
contents=['exclude /SKIP1',
'exclude *.skp'])
checkpoint_manifest_only, manifest_only = DoCreate(
src_root, checkpoints_dir, 'manifest_only',
manifest_only=True,
expected_output=['>d+++++++ .',
'>f+++++++ .staged_backup_filter',
'>d+++++++ par!',
'>f+++++++ par!/f2',
'>f+++++++ par!/f3',
'>f+++++++ par!/f_\\r',
'>f+++++++ par!/file6_from',
'Transferring 7 paths (43b)'])
try:
AssertLinesEqual(GetManifestItemized(manifest_only),
['.d....... .',
'.f....... .staged_backup_filter',
'.d....... par!',
'.f....... par!/f2',
'.f....... par!/f3',
'.f....... par!/f_\\r',
'.f....... par!/file6_from'])
finally:
checkpoint_manifest_only.Close()
checkpoint1, manifest1 = DoCreate(
src_root, checkpoints_dir, '1',
expected_output=['>d+++++++ .',
'>f+++++++ .staged_backup_filter',
'>d+++++++ par!',
'>f+++++++ par!/f2',
'>f+++++++ par!/f3',
'>f+++++++ par!/f_\\r',
'>f+++++++ par!/file6_from',
'Transferring 7 paths (43b)'])
try:
VerifyCheckpointContents(manifest1, checkpoint1.GetContentRootPath())
AssertLinesEqual(GetManifestItemized(manifest1),
['.d....... .',
'.f....... .staged_backup_filter',
'.d....... par!',
'.f....... par!/f2',
'.f....... par!/f3',
'.f....... par!/f_\\r',
'.f....... par!/file6_from'])
AssertEmptyRsync(src_root, checkpoint1.GetContentRootPath())
AssertBasisInfoFileEquals(checkpoint1.GetMetadataPath(), None)
finally:
checkpoint1.Close()
checkpoint2, manifest2 = DoCreate(src_root, checkpoints_dir, '2',
last_checkpoint_path=checkpoint1.GetImagePath(),
readonly=False)
try:
VerifyCheckpointContents(manifest2, checkpoint2.GetContentRootPath(), prev_manifest=manifest1)
AssertLinesEqual(GetManifestDiffItemized(manifest1, manifest2), [])
AssertLinesEqual(RsyncPaths(src_root, checkpoint2.GetContentRootPath()),
['.d..t....... ./',
'>f++++++++++ .staged_backup_filter',
'cd++++++++++ par!/',
'>f++++++++++ par!/f2',
'>f++++++++++ par!/f3',
'>f++++++++++ par!/f_\r',
'>f++++++++++ par!/file6_from'])
AssertBasisInfoFileEquals(checkpoint2.GetMetadataPath(), checkpoint1.GetImagePath())
DoVerifyManifest(src_root, manifest2.GetPath(),
expected_success=False,
expected_output=['*f.delete SKIP1',
'*f.delete par!/2.skp'])
DoVerifyManifest(checkpoint2.GetContentRootPath(), manifest2.GetPath())
finally:
checkpoint2.Close()
SetXattr(src_root, 'example', b'example_value')
SetXattr(src_root, 'example2', b'example_value2')
SetMTime(file1, None)
file2 = CreateFile(parent1, 'f2', contents='abc')
checkpoint3, manifest3 = DoCreate(
src_root, checkpoints_dir, '3',
last_checkpoint_path=checkpoint2.GetImagePath(),
expected_output=['.d......x .',
'>fcs..... par!/f2',
'.f..t.... par!/f_\\r',
'Transferring 3 of 7 paths (3b of 46b)'],
readonly=False)
try:
VerifyCheckpointContents(manifest3, checkpoint3.GetContentRootPath(), prev_manifest=manifest2)
AssertLinesEqual(GetManifestItemized(manifest3),
GetManifestItemized(manifest1))
AssertLinesEqual(GetManifestDiffItemized(manifest2, manifest3),
['.d......x .',
'>fcs..... par!/f2',
'.f..t.... par!/f_\\r'])
AssertBasisInfoFileEquals(checkpoint3.GetMetadataPath(), checkpoint2.GetImagePath())
DoVerifyManifest(checkpoint3.GetContentRootPath(), manifest3.GetPath(),
expected_success=False,
expected_output=['>f+++++++ .staged_backup_filter',
'>f+++++++ par!/f3',
'>f+++++++ par!/file6_from'])
checkpoint2 = checkpoint_lib.Checkpoint.Open(checkpoint2.GetImagePath(), readonly=False)
try:
AssertLinesEqual(RsyncPaths(src_root, checkpoint2.GetContentRootPath()),
['.d........x. ./',
'>fcs........ par!/f2',
'.f..t....... par!/f_\r'])
finally:
checkpoint2.Close()
AssertLinesEqual(RsyncPaths(src_root, checkpoint3.GetContentRootPath()),
['>f++++++++++ .staged_backup_filter',
'>f++++++++++ par!/f3',
'>f++++++++++ par!/file6_from'])
finally:
checkpoint3.Close()
file2 = CreateFile(parent1, 'f2', contents='def')
SetMTime(parent1, 1510000000)
parent2 = CreateDir(src_root, 'par2')
file2b = CreateFile(parent2, 'f2b', contents='def')
DeleteFileOrDir(file6_from)
file6_to = CreateFile(parent1, 'file6_to', contents='file6_contents')
file6_to2 = CreateFile(parent1, 'file6_to2', contents='file6_contents')
file6_to3 = CreateFile(parent1, 'file6_to3', contents='file6_contents_notmatch')
def PreSyncContentsTestHook(checkpoint_creator):
CreateFile(parent1, 'f2', contents='ghi')
SetXattr(parent1, 'example', b'example_value_5')
SetMTime(parent1, 1520000000)
CreateFile(parent2, 'f2b', contents='jkl')
SetMTime(parent2, 1520000000)
SetMTime(file6_to2, 1520000000)
file6_to3 = CreateFile(parent1, 'file6_to3', contents='file6_contents')
checkpoint_lib.CheckpointCreator.PRE_SYNC_CONTENTS_TEST_HOOK = PreSyncContentsTestHook
try:
checkpoint4, manifest4 = DoCreate(
src_root, checkpoints_dir, '4',
last_checkpoint_path=checkpoint3.GetImagePath(),
readonly=False,
expected_output=['.d..t.... par!',
'>fc...... par!/f2',
'*f.delete par!/file6_from',
' replaced by duplicate: .f....... par!/file6_to',
' replaced by duplicate: .f....... par!/file6_to2',
'>f+++++++ par!/file6_to',
' replacing duplicate: .f....... par!/file6_from',
'>f+++++++ par!/file6_to2',
' replacing duplicate: .f....... par!/file6_from',
'>f+++++++ par!/file6_to3',
'>d+++++++ par2',
'>f+++++++ par2/f2b',
'*** Warning: Paths changed since syncing, checking...',
'.d..t...x par!',
'>fc...... par!/f2',
'>f+++++++ par!/file6_to2',
' replacing similar: .f..t.... par!/file6_from',
'>f+++++++ par!/file6_to3',
' replacing duplicate: .f....... par!/file6_from',
'>d+++++++ par2',
'>f+++++++ par2/f2b',
'Transferring 13 of 17 paths (91b of 120b)'])
try:
VerifyCheckpointContents(manifest4, checkpoint4.GetContentRootPath(), prev_manifest=manifest3)
AssertLinesEqual(GetManifestDiffItemized(manifest3, manifest4),
['.d..t...x par!',
'>fc...... par!/f2',
'*f.delete par!/file6_from',
'>f+++++++ par!/file6_to',
'>f+++++++ par!/file6_to2',
'>f+++++++ par!/file6_to3',
'>d+++++++ par2',
'>f+++++++ par2/f2b'])
AssertLinesEqual(RsyncPaths(src_root, checkpoint4.GetContentRootPath()),
['>f++++++++++ .staged_backup_filter',
'>f++++++++++ par!/f3',
'>f++++++++++ par!/f_\r'])
AssertBasisInfoFileEquals(checkpoint4.GetMetadataPath(), checkpoint3.GetImagePath())
DoVerifyManifest(checkpoint4.GetContentRootPath(), manifest4.GetPath())
finally:
checkpoint4.Close()
finally:
checkpoint_lib.CheckpointCreator.PRE_SYNC_CONTENTS_TEST_HOOK = None
file4 = CreateFile(parent1, 'f4')
SetMTime(parent1, 1510000000)
DeleteFileOrDir(file6_to2)
DeleteFileOrDir(file6_to3)
def PreSyncContentsTestHook(checkpoint_creator):
file4_stat = os.lstat(os.path.join(parent1, 'f4'))
if file4_stat.st_mtime == 1500000000:
CreateFile(parent1, 'f4', mtime=1520000000)
elif file4_stat.st_mtime == 1520000000:
CreateFile(parent1, 'f4', mtime=1530000000)
SetMTime(parent1, 1530000000)
checkpoint_lib.CheckpointCreator.PRE_SYNC_CONTENTS_TEST_HOOK = PreSyncContentsTestHook
try:
checkpoint5, manifest5 = DoCreate(
src_root, checkpoints_dir, '5',
last_checkpoint_path=checkpoint4.GetImagePath(),
readonly=False,
expected_output=['.d..t.... par!',
'>f+++++++ par!/f4',
'*f.delete par!/file6_to2',
' replaced by similar: .f..t.... par!/file6_to',
'*f.delete par!/file6_to3',
' replaced by duplicate: .f....... par!/file6_to',
'*** Warning: Paths changed since syncing, checking...',
'>f+++++++ par!/f4',
'*** Warning: Paths changed since syncing, checking...',
'.d..t.... par!',
'>f+++++++ par!/f4',
'Transferring 5 of 13 paths (0b of 49b)'])
try:
VerifyCheckpointContents(manifest5, checkpoint5.GetContentRootPath(), prev_manifest=manifest4)
AssertLinesEqual(GetManifestDiffItemized(manifest4, manifest5),
['.d..t.... par!',
'>f+++++++ par!/f4',
'*f.delete par!/file6_to2',
'*f.delete par!/file6_to3'])
AssertLinesEqual(RsyncPaths(src_root, checkpoint5.GetContentRootPath()),
['>f++++++++++ .staged_backup_filter',
'>f++++++++++ par!/f2',
'>f++++++++++ par!/f3',
'>f++++++++++ par!/f_\r',
'>f++++++++++ par!/file6_to',
'cd++++++++++ par2/',
'>f++++++++++ par2/f2b'])
AssertBasisInfoFileEquals(checkpoint5.GetMetadataPath(), checkpoint4.GetImagePath())
DoVerifyManifest(checkpoint5.GetContentRootPath(), manifest5.GetPath())
finally:
checkpoint5.Close()
finally:
checkpoint_lib.CheckpointCreator.PRE_SYNC_CONTENTS_TEST_HOOK = None
file5 = CreateFile(src_root, 'f5')
SetMTime(src_root, 1510000000)
class CreateWithFilterMergeTestCase(BaseTestCase):
def test(self):
with TempDir() as test_dir:
with ApplyFakeDiskImageHelperLevel() as should_run:
if should_run:
self.RunTest(test_dir)
def RunTest(self, test_dir):
checkpoints_dir = CreateDir(test_dir, 'checkpoints')
src_root = CreateDir(test_dir, 'src')
parent1 = CreateDir(src_root, 'par')
parent_skip1 = CreateDir(src_root, 'par_skip')
file1 = CreateFile(parent1, 'f1')
file2 = CreateFile(parent1, 'f2')
file3 = CreateFile(parent_skip1, 'f3')
filter_merge_path = CreateFile(
test_dir, 'filter_merge',
contents=['exclude *.skp',
'include /par',
'include /par/**',
'exclude *'])
CreateFile(parent1, 'SKIP1')
CreateFile(parent1, '2.skp')
CreateFile(parent1, checkpoint_lib.STAGED_BACKUP_DIR_MERGE_FILENAME,
contents=['exclude /SKIP1'])
checkpoint1, manifest1 = DoCreate(
src_root, checkpoints_dir, '1',
filter_merge_path=filter_merge_path,
expected_output=['>d+++++++ .',
'>d+++++++ par',
'>f+++++++ par/.staged_backup_filter',
'>f+++++++ par/f1',
'>f+++++++ par/f2',
'Transferring 5 paths (15b)'])
try:
VerifyCheckpointContents(manifest1, checkpoint1.GetContentRootPath())
AssertLinesEqual(GetManifestItemized(manifest1),
['.d....... .',
'.d....... par',
'.f....... par/.staged_backup_filter',
'.f....... par/f1',
'.f....... | |
limitations.
event_parser (Parser): parses log stream into events and saves them to
event file.
**kwargs (dict): additional kwargs to pass onto the communication setup.
Returns:
SwitchboardDefault: instance of SwitchboardDefault.
Raises:
SwitchboardCreationError: if communication type not recognized.
"""
if communication_type == "GENERIC_PROCESS":
# Backwards compatibility with VDM
communication_type = "PtyProcessComms"
if communication_type not in extensions.communication_types:
raise errors.SwitchboardCreationError(
device_name,
"Communication type {!r} is not in supported types: {}".format(
communication_type, extensions.communication_types.keys()))
if not log_path:
log_path = self.create_log_path(device_name)
logger.info("{} logging to file {}", device_name, log_path)
comm_type_class = extensions.communication_types[communication_type]
method_args = inspect.getfullargspec(
comm_type_class.__init__).args[1:] # remove self
bad_keys = set(kwargs.keys()) - set(method_args)
if bad_keys:
raise errors.SwitchboardCreationError(
device_name,
"Communication Type {} does not support args {}. Supported: {}"
.format(communication_type, bad_keys, method_args))
try:
comm_inst = comm_type_class(communication_address, **kwargs)
switchboard_kwargs = comm_inst.get_switchboard_kwargs()
additional_kwargs = {
"device_name": device_name,
"log_path": log_path,
"force_slow": force_slow,
"parser": event_parser,
"exception_queue": self._exception_queue,
"max_log_size": self.max_log_size,
}
switchboard_kwargs.update(additional_kwargs)
return switchboard.SwitchboardDefault(**switchboard_kwargs)
except Exception as err:
raise errors.SwitchboardCreationError(device_name, repr(err))
def delete(self, device_name, save_changes=True):
"""Delete the device from config dict and file.
Args:
device_name (str): name, serial_number, alias, or adb_serial of the
device.
save_changes (bool): if True, updates the config files.
Raises:
DeviceError: Device not found.
Returns:
None: if save_changes is True.
tuple[dict, dict]: if save_changes is False, returns the new device
configs: (devices, device_options).
"""
devices = copy.deepcopy(self.persistent_dict)
device_options = copy.deepcopy(self.options_dict)
other_devices = copy.deepcopy(self.other_persistent_dict)
other_device_options = copy.deepcopy(self.other_options_dict)
device_name_arg = device_name
device_name = self._get_device_name(device_name, raise_error=True)
if device_name in devices and device_name in device_options:
del devices[device_name]
del device_options[device_name]
elif device_name in other_devices and device_name in other_device_options:
del other_devices[device_name]
del other_device_options[device_name]
else:
raise errors.DeviceError(
"Unable to find device {}".format(device_name_arg))
device_config, device_options_config = self._make_device_configs(
devices, other_devices, device_options, other_device_options)
if save_changes: # save and reload the config.
self._save_config_to_file(device_config, self.device_file_name)
self._save_config_to_file(device_options_config,
self.device_options_file_name)
self.reload_configuration()
logger.info("Deleted {}".format(device_name_arg))
else:
return (device_config, device_options_config)
def detect(self,
force_overwrite=False,
static_ips=None,
log_directory=None,
save_changes=True,
device_configs=None):
"""Detect new devices not present in config files.
Args:
force_overwrite (bool): Erase the current configs completely and
re-detect everything.
static_ips (list): list of static ips to detect.
log_directory (str): alternative location to store log from default.
save_changes (bool): if True, updates the config files.
device_configs (None or tuple[dict, dict]): device configs
(persistent, options) to pass to the device detector. If None, uses
the current Manager configs.
Returns:
None: if save_changes is True.
tuple[dict, dict]: if save_changes is False, returns
the new device configs: (devices, device_options).
Note:
Overwrite saves the files to a backup directory.
"""
if device_configs is None:
device_config, options_config = self._make_device_configs(
self.persistent_dict, self.other_persistent_dict, self.options_dict,
self.other_options_dict)
else:
device_config, options_config = device_configs
if not static_ips:
static_ips = []
elif isinstance(static_ips, str):
static_ips = [ip_addr for ip_addr in static_ips.split(",") if ip_addr]
if not log_directory:
log_directory = self.log_directory
if force_overwrite:
comm_ports = [
a_dict["persistent"].get("console_port_name", "")
for name, a_dict in self._devices.items()
]
static_ips += [
comm_port for comm_port in comm_ports
if host_utils.is_static_ip(comm_port)
]
if save_changes:
self.overwrite_configs()
device_config, options_config = self._make_device_configs({}, {}, {}, {})
detector = device_detector.DeviceDetector(
manager=self,
log_directory=log_directory,
persistent_configs=device_config,
options_configs=options_config,
supported_auxiliary_device_classes=self
.get_supported_auxiliary_device_classes())
new_device_config, new_options_config = detector.detect_all_new_devices(
static_ips)
if save_changes:
self._save_config_to_file(new_device_config, self.device_file_name)
self._save_config_to_file(new_options_config,
self.device_options_file_name)
self.reload_configuration()
self.devices()
else:
return (new_device_config, new_options_config)
def devices(self):
"""Prints a summary of device info.
"""
self._print_device_info_by_category("gazoo")
self._print_device_info_by_category("other")
logger.info("{} total Gazoo device(s) available.".format(
len(self.get_connected_devices())))
def download_keys(self):
"""Downloads all required GDM keys if they don't exist locally."""
for key_info in extensions.keys:
host_utils.verify_key(key_info)
@classmethod
def get_all_supported_capabilities(cls):
"""Returns a map of all capability names supported by GDM.
Returns:
dict: map from capability name (str) to capability interface name
(str).
Example: {"file_transfer": "filetransferbase"}.
"""
return copy.copy(extensions.capabilities)
@classmethod
def get_all_supported_capability_interfaces(cls):
"""Returns a map of all capability interface classes supported by GDM.
Returns:
dict: map from interface name (str) to capability interface class
(type).
Example: {"filetransferbase": <class FileTransferBase>}.
"""
return copy.copy(extensions.capability_interfaces)
@classmethod
def get_all_supported_capability_flavors(cls):
"""Returns a map of all capability flavor classes supported by GDM.
Returns:
dict: map from flavor name (str) to capability flavor class (type).
Example: {"filetransferscp": <class FileTransferScp>}.
"""
return copy.copy(extensions.capability_flavors)
@classmethod
def get_all_supported_device_classes(cls):
"""Returns a list of all supported primary, sim, and auxiliary devices.
Returns:
list: All supported device types. Returns just categories asked for if
requested.
"""
all_classes = copy.copy(extensions.auxiliary_devices)
all_classes += copy.copy(extensions.primary_devices)
all_classes += copy.copy(extensions.virtual_devices)
return all_classes
def get_connected_devices(self, category="gazoo"):
"""Retrieve a list of connected devices for the category specified.
Args:
category (str): device category ('gazoo', 'other', or 'all') to
retrieve.
Returns:
list: List of known connected devices.
Note:
If category is not specified then a list of all devices will be
returned.
"""
devices = self.get_devices(category)
connected_devices = []
for name in devices:
if self.is_device_connected(name, category):
connected_devices.append(name)
return connected_devices
def get_device_configuration(self, identifier, category="all"):
"""Returns the configuration for the device.
Args:
identifier (str): Name or alias to search for.
category (str): device category ('gazoo', 'other' or 'all') to
retrieve.
Returns:
dict: Configuration obtained for the device found.
Raises:
DeviceError: If identifier does not unique identify the device.
Note:
If category is not specified then all devices will be used to find
the matching identifier.
"""
# returns the device configuration
device_name = self._get_device_name(identifier, category, raise_error=True)
return self._get_device_configuration(device_name, category)
def get_open_device_names(self):
"""Returns a list of open device names.
Returns:
list: open device names
"""
return list(self._open_devices.keys())
def get_open_device(self, identifier):
"""Returns device object if device is open.
Args:
identifier (str): device name, serial_number etc.
Returns:
GazooDeviceBase: device object
Raises:
DeviceError: if device not currently open
"""
device_name = self._get_device_name(identifier, raise_error=True)
if device_name not in self._open_devices:
raise errors.DeviceError(
"Device {} is not currently open".format(identifier))
else:
return self._open_devices[device_name]
def get_open_devices(self):
"""Returns list of device objects."""
return list(self._open_devices.values())
def get_device_prop(self, device_name, prop=None):
"""Gets an prop's value for device or GDM configuration depends on identifier.
Args:
device_name (str): "manager", name, serial_number, alias, or
adb_serial of the device.
prop (str): Public prop available in device_options.json or gdm.json.
Default is None.
Returns:
dict: device properties dicts if prop is None
value: value of valid prop
"""
if self._is_manager_config(device_name):
return self._get_config_prop(prop)
else:
return self._get_device_prop(device_name, prop)
@classmethod
def get_supported_auxiliary_device_classes(cls):
return copy.copy(extensions.auxiliary_devices)
@classmethod
def get_supported_auxiliary_device_types(cls):
return [
a_cls.DEVICE_TYPE
for a_cls in cls.get_supported_auxiliary_device_classes()
]
@classmethod
def get_supported_device_capabilities(cls, device_type):
"""Returns a list of names of capabilities supported by the device type.
This is a wrapper around GazooDeviceBase.get_supported_capabilities() to
allow specifying device_type as a string.
Args:
device_type (str): device type to query for supported capabilities.
Returns:
list: list of capability names supported by this device type.
For example, (["file_transfer", "usb_hub"]).
"""
device_class = cls.get_supported_device_class(device_type)
return device_class.get_supported_capabilities()
@classmethod
def get_supported_device_capability_flavors(cls, device_type):
"""Returns a set of all capability flavor classes supported by the device type.
This is a wrapper around GazooDeviceBase.get_supported_capability_flavors()
to allow specifying device_type as a string.
Args:
device_type (str): device type to query for supported capability flavors.
Returns:
set: capability flavor classes supported by this device type.
Example: {<class 'DevicePowerDefault'>, <class 'FileTransferScp'>}.
"""
device_class = cls.get_supported_device_class(device_type)
return device_class.get_supported_capability_flavors()
@classmethod
def get_supported_device_class(cls, device_type):
"""Converts device type to device class.
Args:
device_type (str): device type.
Returns:
class: GazooDeviceBase-based class.
Raises:
DeviceError: if unknown type.
"""
classes = [
device_class for device_class in cls.get_all_supported_device_classes()
if device_class.DEVICE_TYPE == device_type
]
if classes:
return classes[0]
else:
close_matches = difflib.get_close_matches(
device_type, cls.get_supported_device_types())
raise errors.DeviceError(
"Device type {} is not known. Close matches: {}".format(
device_type, ", ".join(close_matches)))
@classmethod
def get_supported_device_types(cls):
"""Returns a list of all supported device types.
Returns:
list: All supported device types.
"""
return [
a_cls.DEVICE_TYPE for a_cls in cls.get_all_supported_device_classes()
]
@classmethod
def get_supported_primary_device_classes(cls):
return copy.copy(extensions.primary_devices)
@classmethod
def get_supported_primary_device_types(cls):
return [
a_cls.DEVICE_TYPE
for a_cls in cls.get_supported_primary_device_classes()
]
@classmethod
def get_supported_virtual_device_classes(cls):
return copy.copy(extensions.virtual_devices)
@classmethod
def get_supported_virtual_device_types(cls):
return [
a_cls.DEVICE_TYPE
for a_cls in cls.get_supported_virtual_device_classes()
]
def is_device_connected(self, identifier, category="all"):
"""Determine if device match identifier provided is connected for the category specified.
Args:
identifier (str): Name or alias to search for.
category (str): device category ('gazoo', 'other', or 'all') to
retrieve.
Returns:
bool: True if the matching devices is connected. False otherwise.
Raises:
ValueError: Identifier does not unique identify the device.
Note:
If category is not specified then the list of all devices will be used
to find the matching identifier.
"""
device_name = self._get_device_name(identifier, category, raise_error=True)
device_config = self._get_device_configuration(device_name, category)
device_type = device_config["persistent"]["device_type"].lower()
| |
< Cursor[0] < 160:
UI.Ba_Slc = 12
rlr = 0
elif 160 < Cursor[0] < 189:
UI.Ba_Slc = 13
rlr = 1
elif 190 < Cursor[0] < 220:
UI.Ba_Slc = 14
rlr = 2
elif 221 < Cursor[0] < 250:
UI.Ba_Slc = 15
if UI.color_picker == -1:
UI.color_picker = 0
UI.color_picked = -1
elif event.button == 2:
if shift:
tsf = 0
else:
tsf = 2
elif event.button == 3:
if UI.RP_Slc==3:
tsf = 1
elif UI.RP_Slc==4:
tsf = 3
elif UI.RP_Slc==5:
tsf = 4
elif UI.Ba_Slc ==3 or UI.Ba_Slc ==11:
tsf = 5
elif UI.Ba_Slc == 18 and not crop:
ratx[0] = Cursor[0] / 1249 * 33.3
raty[0] = Cursor[1] / 749 * 19.9
crop = True
elif event.button == 4:
UI.cursor_3D_pos[2] += 1
elif event.button == 5:
UI.cursor_3D_pos[2] -= 1
elif event.type == MOUSEBUTTONUP:
if tsf==0 or tsf==1:
if tsf==1 and UI.LP_intMdl_Slc>-1 or tsf==1 and UI.LP_Mdl_Slc>-1 or tsf==1 and UI.LP_Hrc_Slc==0:
if UI.int and UI.LP_Mdl_Slc==0:
if UI.LP_Hrc_Slc == 0:
for i in range(len(self.MDLH)):
pos = UI.MdlH_POS[i]
pos[0] += tx / 5
pos[1] += ty / 5
pos[2] += tz / 5
tx = ty = tz = 0
elif UI.LP_intMdl_Slc==-1 and UI.LP_Hrc_Slc>0:
break
else:
pos = UI.MdlH_POS[UI.LP_intMdl_Slc]
pos[0] += tx / 5
pos[1] += ty / 5
pos[2] += tz / 5
tx = ty = tz = 0
elif UI.LP_Mdl_Slc > -1:
pos = UI.Mdl_POS[UI.LP_Mdl_Slc]
pos[0] += tx / 5
pos[1] += ty / 5
pos[2] += tz / 5
tx = ty = tz = 0
else:
shift = False
elif tsf==3 and UI.LP_intMdl_Slc>-1 or tsf==3 and UI.LP_Mdl_Slc>-1:
if UI.int and UI.LP_Mdl_Slc==0:
deg = UI.MdlH_DEG[UI.LP_intMdl_Slc]
elif UI.LP_Mdl_Slc>-1:
deg = UI.Mdl_DEG[UI.LP_Mdl_Slc]
deg[0] += ry
deg[1] += rx
deg[2] += rz
for i in range(3):
if deg[i]>360 or deg[i]<-360:
n = deg[i] / 360
final = deg[i] - 360 * int(n)
deg[i] = final
rx = ry = rz = 0
elif tsf==4 and UI.LP_intMdl_Slc>-1 or tsf==4 and UI.LP_Mdl_Slc>-1:
if UI.int and UI.LP_Mdl_Slc==0:
scl = UI.MdlH_SCL[UI.LP_intMdl_Slc]
elif UI.LP_Mdl_Slc > -1:
scl = UI.Mdl_SCL[UI.LP_Mdl_Slc]
scl[0] += sx/500
scl[1] += sy/500
scl[2] += sz/500
for i in range(3):
if scl[i]<0:
scl[i] = 0
sx = sy = sz = 0
elif tsf == 5:
if UI.Ba_Slc==3:
pos = UI.Back[3]
elif UI.Ba_Slc==11:
pos = UI.Base[3]
pos[2] += int(ty/10)
if pos[2]>999:
pos[2] =999
elif pos[2]<-999:
pos[2] = -999
ty = 0
elif roller:
roller = False
self.RELoadOBJ()
elif rlr>-1:
rlr = -1
if UI.Light_Set>-1:
UI.Light_Set = -10
elif UI.Ba_Slc>-1:
UI.Ba_Slc = -1
elif UI.Ba_Slc == 18 and crop:
ratx[1] = Cursor[0]/1249*33.3
raty[1] = Cursor[1]/749*19.9
UI.Ba_Slc = -1
crop = False
ax = ay = az = False
UI.axis = -1
tsf = -1
elif event.type == MOUSEMOTION:
i,j = event.rel
if tsf==0:
if shift:
UI.cursor_3D_pos[0] += i
UI.cursor_3D_pos[1] -= j
elif tsf==1:
if 0<=yrot<45 or 315<=yrot<=360 or -45<=yrot<0 or -360<=yrot<-315:
if 0 <= xrot < 45 or 315 <= xrot <= 360 or -45 <= xrot < 0 or -360 <= xrot < -315:
if ax:
tx += i
elif ay:
ty -= j
elif az:
tz -= i
else:
tx += i
ty -= j
elif 45<=xrot<135 or -315<=xrot<-225:
if ax:
tx += i
elif ay:
ty -= j
elif az:
tz += i
else:
tz += i
ty -= j
elif 135 <= xrot < 225 or -225 <= xrot < -135:
if ax:
tx -= i
elif ay:
ty -= j
elif az:
tz += i
else:
tx -= i
ty -= j
elif 225<=xrot<315 or -135<=xrot<-45:
if ax:
tx -= i
elif ay:
ty -= j
elif az:
tz -= i
else:
tz -= i
ty -= j
elif 45 <= yrot < 135 or -315 <= yrot < -225:
if 0 <= xrot < 45 or 315 <= xrot <= 360 or -45 <= xrot < 0 or -360 <= xrot < -315:
if ax:
tx += i
elif ay:
ty -= i
elif az:
tz += j
else:
tx += i
tz += j
elif 45 <= xrot < 135 or -315 <= xrot < -225:
if ax:
tx -= j
elif ay:
ty -= i
elif az:
tz += i
else:
tz += i
tx -= j
elif 135 <= xrot < 225 or -225 <= xrot < -135:
if ax:
tx -= i
elif ay:
ty -= i
elif az:
tz -= j
else:
tx -= i
tz -= j
elif 225 <= xrot < 315 or -135 <= xrot < -45:
if ax:
tx += j
elif ay:
ty -= i
elif az:
tz -= i
else:
tz -= i
tx += j
elif 135 <= yrot < 225 or -225 <= yrot < -135:
if 0 <= xrot < 45 or 315 <= xrot <= 360 or -45 <= xrot < 0 or -360 <= xrot < -315:
if ax:
tx += i
elif ay:
ty += j
elif az:
tz += i
else:
tx += i
ty += j
elif 45<=xrot<135 or -315<=xrot<-225:
if ax:
tx -= i
elif ay:
ty += j
elif az:
tz += i
else:
tz += i
ty += j
elif 135 <= xrot < 225 or -225 <= xrot < -135:
if ax:
tx -= i
elif ay:
ty += j
elif az:
tz -= i
else:
tx -= i
ty += j
elif 225<=xrot<315 or -135<=xrot<-45:
if ax:
tx += i
elif ay:
ty += j
elif az:
tz -= i
else:
tz -= i
ty += j
elif 225 <= yrot < 315 or -135 <= yrot < -45:
if 0 <= xrot < 45 or 315 <= xrot <= 360 or -45 <= xrot < 0 or -360 <= xrot < -315:
if ax:
tx += i
elif ay:
ty += i
elif az:
tz -= j
else:
tx += i
tz -= j
elif 45 <= xrot < 135 or -315 <= xrot < -225:
if ax:
tx += j
elif ay:
ty += i
elif az:
tz += i
else:
tz += i
tx += j
elif 135 <= xrot < 225 or -225 <= xrot < -135:
if ax:
tx -= i
elif ay:
ty += i
elif az:
tz += j
else:
tx -= i
tz += j
elif 225 <= xrot < 315 or -135 <= xrot < -45:
if ax:
tx -= j
elif ay:
ty += i
elif az:
tz -= i
else:
tz -= i
tx -= j
elif tsf==2:
xrot += i
yrot += j
elif tsf==3:
if ax:
ry -= j
elif ay:
rx -= i
elif az:
rz += i
else:
rx -= i
ry -= j
elif tsf==4:
if ax:
sx += i
elif ay:
sy += i
elif az:
sz += i
else:
sx += i
sy += i
sz += i
elif tsf==5:
ty -= j
elif roller:
if UI.int and UI.LP_Mdl_Slc==0:
if UI.LP_Hrc_Slc==0:
for i in range(len(self.MDLH)):
if 0 <= UI.MdlH_Alpha[i] <= 1:
UI.MdlH_Alpha[i] += j / 100
if UI.MdlH_Alpha[i] < 0:
UI.MdlH_Alpha[i] = 0
elif UI.MdlH_Alpha[i] > 1:
UI.MdlH_Alpha[i] = 1
else:
if 0<=UI.MdlH_Alpha[UI.LP_intMdl_Slc]<=1:
UI.MdlH_Alpha[UI.LP_intMdl_Slc] += j/100
if UI.MdlH_Alpha[UI.LP_intMdl_Slc]<0:
UI.MdlH_Alpha[UI.LP_intMdl_Slc] = 0
elif UI.MdlH_Alpha[UI.LP_intMdl_Slc]>1:
UI.MdlH_Alpha[UI.LP_intMdl_Slc] = 1
else:
if 0<=UI.Mdl_Alpha[UI.LP_Mdl_Slc]<=1:
UI.Mdl_Alpha[UI.LP_Mdl_Slc] += j/100
if UI.Mdl_Alpha[UI.LP_Mdl_Slc]<0:
UI.Mdl_Alpha[UI.LP_Mdl_Slc] = 0
elif UI.Mdl_Alpha[UI.LP_Mdl_Slc]>1:
UI.Mdl_Alpha[UI.LP_Mdl_Slc] = 1
elif rlr > -1:
if UI.Light_Set == 0:
UI.Light_Props[UI.Light_Set] += i
if UI.Light_Props[UI.Light_Set]<0:
UI.Light_Props[UI.Light_Set] = 0
elif UI.Light_Props[UI.Light_Set]>128:
UI.Light_Props[UI.Light_Set] = 128
else:
if UI.Light_Set>0:
Props = UI.Light_Props[int(UI.Light_Set/10)]
Props[rlr] += i/100
if Props[rlr]<0:
Props[rlr] = 0
elif Props[rlr]>1:
Props[rlr] = 1
elif 4<=UI.Ba_Slc<=6 or 12<=UI.Ba_Slc<=14:
if 4<=UI.Ba_Slc<=6:
RGB = UI.Back[4]
else:
RGB = UI.Base[4]
RGB[rlr] += i
if RGB[rlr]>255:
RGB[rlr] = 255
elif RGB[rlr]<0:
RGB[rlr]=0
elif event.type == pygame.KEYDOWN:
### Data Amend ######
if event.key == K_ESCAPE:
if tsf==1 or tsf==3 or tsf==4:
tx = | |
if statusbar:
self.statusbar = statusbar
def confirm(self, s):
raise NotImplementedError
def notify(self, s, n=10, wait_for_keypress=False):
raise NotImplementedError
def file_prompt(self, s):
raise NotImplementedError
class SourceNotFound(Exception):
"""Exception raised when the requested source could not be found."""
class Repl:
"""Implements the necessary guff for a Python-repl-alike interface
The execution of the code entered and all that stuff was taken from the
Python code module, I had to copy it instead of inheriting it, I can't
remember why. The rest of the stuff is basically what makes it fancy.
It reads what you type, passes it to a lexer and highlighter which
returns a formatted string. This then gets passed to echo() which
parses that string and prints to the curses screen in appropriate
colours and/or bold attribute.
The Repl class also keeps two stacks of lines that the user has typed in:
One to be used for the undo feature. I am not happy with the way this
works. The only way I have been able to think of is to keep the code
that's been typed in in memory and re-evaluate it in its entirety for each
"undo" operation. Obviously this means some operations could be extremely
slow. I'm not even by any means certain that this truly represents a
genuine "undo" implementation, but it does seem to be generally pretty
effective.
If anyone has any suggestions for how this could be improved, I'd be happy
to hear them and implement it/accept a patch. I researched a bit into the
idea of keeping the entire Python state in memory, but this really seems
very difficult (I believe it may actually be impossible to work) and has
its own problems too.
The other stack is for keeping a history for pressing the up/down keys
to go back and forth between lines.
XXX Subclasses should implement echo, current_line, cw
"""
def __init__(self, interp, config):
"""Initialise the repl.
interp is a Python code.InteractiveInterpreter instance
config is a populated bpython.config.Struct.
"""
self.config = config
self.cut_buffer = ""
self.buffer = []
self.interp = interp
self.interp.syntaxerror_callback = self.clear_current_line
self.match = False
self.rl_history = History(
duplicates=config.hist_duplicates, hist_size=config.hist_length
)
# all input and output, stored as old style format strings
# (\x01, \x02, ...) for cli.py
self.screen_hist = []
self.history = [] # commands executed since beginning of session
self.redo_stack = []
self.evaluating = False
self.matches_iter = MatchesIterator()
self.funcprops = None
self.arg_pos = None
self.current_func = None
self.highlighted_paren = None
self._C = {}
self.prev_block_finished = 0
self.interact = Interaction(self.config)
# previous pastebin content to prevent duplicate pastes, filled on call
# to repl.pastebin
self.prev_pastebin_content = ""
self.prev_pastebin_url = ""
self.prev_removal_url = ""
# Necessary to fix mercurial.ui.ui expecting sys.stderr to have this
# attribute
self.closed = False
if self.config.hist_file.exists():
try:
self.rl_history.load(
self.config.hist_file, getpreferredencoding() or "ascii"
)
except OSError:
pass
self.module_gatherer = ModuleGatherer(
skiplist=self.config.import_completion_skiplist
)
self.completers = autocomplete.get_default_completer(
config.autocomplete_mode, self.module_gatherer
)
if self.config.pastebin_helper:
self.paster = PasteHelper(self.config.pastebin_helper)
else:
self.paster = PastePinnwand(
self.config.pastebin_url, self.config.pastebin_expiry,
)
@property
def ps1(self) -> str:
return getattr(sys, "ps1", ">>> ")
@property
def ps2(self) -> str:
return getattr(sys, "ps2", "... ")
def startup(self):
"""
Execute PYTHONSTARTUP file if it exits. Call this after front
end-specific initialisation.
"""
filename = os.environ.get("PYTHONSTARTUP")
if filename:
encoding = inspection.get_encoding_file(filename)
with open(filename, encoding=encoding) as f:
source = f.read()
self.interp.runsource(source, filename, "exec", encode=False)
def current_string(self, concatenate=False):
"""If the line ends in a string get it, otherwise return ''"""
tokens = self.tokenize(self.current_line)
string_tokens = list(
takewhile(
token_is_any_of([Token.String, Token.Text]), reversed(tokens)
)
)
if not string_tokens:
return ""
opening = string_tokens.pop()[1]
string = list()
for (token, value) in reversed(string_tokens):
if token is Token.Text:
continue
elif opening is None:
opening = value
elif token is Token.String.Doc:
string.append(value[3:-3])
opening = None
elif value == opening:
opening = None
if not concatenate:
string = list()
else:
string.append(value)
if opening is None:
return ""
return "".join(string)
def get_object(self, name):
attributes = name.split(".")
obj = eval(attributes.pop(0), self.interp.locals)
while attributes:
with inspection.AttrCleaner(obj):
obj = getattr(obj, attributes.pop(0))
return obj
@classmethod
def _funcname_and_argnum(cls, line):
"""Parse out the current function name and arg from a line of code."""
# each list in stack:
# [full_expr, function_expr, arg_number, opening]
# arg_number may be a string if we've encountered a keyword
# argument so we're done counting
stack = [["", "", 0, ""]]
try:
for (token, value) in Python3Lexer().get_tokens(line):
if token is Token.Punctuation:
if value in "([{":
stack.append(["", "", 0, value])
elif value in ")]}":
full, _, _, start = stack.pop()
expr = start + full + value
stack[-1][1] += expr
stack[-1][0] += expr
elif value == ",":
try:
stack[-1][2] += 1
except TypeError:
stack[-1][2] = ""
stack[-1][1] = ""
stack[-1][0] += value
elif value == ":" and stack[-1][3] == "lambda":
expr = stack.pop()[0] + ":"
stack[-1][1] += expr
stack[-1][0] += expr
else:
stack[-1][1] = ""
stack[-1][0] += value
elif (
token is Token.Number
or token in Token.Number.subtypes
or token is Token.Name
or token in Token.Name.subtypes
or token is Token.Operator
and value == "."
):
stack[-1][1] += value
stack[-1][0] += value
elif token is Token.Operator and value == "=":
stack[-1][2] = stack[-1][1]
stack[-1][1] = ""
stack[-1][0] += value
elif token is Token.Number or token in Token.Number.subtypes:
stack[-1][1] = value
stack[-1][0] += value
elif token is Token.Keyword and value == "lambda":
stack.append([value, "", 0, value])
else:
stack[-1][1] = ""
stack[-1][0] += value
while stack[-1][3] in "[{":
stack.pop()
_, _, arg_number, _ = stack.pop()
_, func, _, _ = stack.pop()
return func, arg_number
except IndexError:
return None, None
def get_args(self):
"""Check if an unclosed parenthesis exists, then attempt to get the
argspec() for it. On success, update self.funcprops,self.arg_pos and
return True, otherwise set self.funcprops to None and return False"""
self.current_func = None
if not self.config.arg_spec:
return False
func, arg_number = self._funcname_and_argnum(self.current_line)
if not func:
return False
try:
if inspection.is_eval_safe_name(func):
f = self.get_object(func)
else:
try:
fake_cursor = self.current_line.index(func) + len(func)
f = simpleeval.evaluate_current_attribute(
fake_cursor, self.current_line, self.interp.locals
)
except simpleeval.EvaluationError:
return False
if inspect.isclass(f):
class_f = None
if hasattr(f, "__init__") and f.__init__ is not object.__init__:
class_f = f.__init__
if (
(not class_f or not inspection.getfuncprops(func, class_f))
and hasattr(f, "__new__")
and f.__new__ is not object.__new__
and
# py3
f.__new__.__class__ is not object.__new__.__class__
):
class_f = f.__new__
if class_f:
f = class_f
except Exception:
# another case of needing to catch every kind of error
# since user code is run in the case of descriptors
# XXX: Make sure you raise here if you're debugging the completion
# stuff !
return False
self.current_func = f
self.funcprops = inspection.getfuncprops(func, f)
if self.funcprops:
self.arg_pos = arg_number
return True
self.arg_pos = None
return False
def get_source_of_current_name(self):
"""Return the unicode source code of the object which is bound to the
current name in the current input line. Throw `SourceNotFound` if the
source cannot be found."""
obj = self.current_func
try:
if obj is None:
line = self.current_line
if not line.strip():
raise SourceNotFound(_("Nothing to get source of"))
if inspection.is_eval_safe_name(line):
obj = self.get_object(line)
return inspection.get_source_unicode(obj)
except (AttributeError, NameError) as e:
msg = _("Cannot get source: %s") % (e,)
except OSError as e:
msg = f"{e}"
except TypeError as e:
if "built-in" in f"{e}":
msg = _("Cannot access source of %r") % (obj,)
else:
msg = _("No source code found for %s") % (self.current_line,)
raise SourceNotFound(msg)
def set_docstring(self):
self.docstring = None
if not self.get_args():
self.funcprops = None
if self.current_func is not None:
try:
self.docstring = pydoc.getdoc(self.current_func)
except IndexError:
self.docstring = None
else:
# pydoc.getdoc() returns an empty string if no
# docstring was found
if not self.docstring:
self.docstring = None
# What complete() does:
# Should we show the completion box? (are there matches, or is there a
# docstring to show?)
# Some completions should always be shown, other only if tab=True
# set the current docstring to the "current function's" docstring
# Populate the matches_iter object with new matches from the current state
# if none, clear the matches iterator
# If | |
has been provided, record it.
elif name in ("hyphenation", "hyph"):
data_append(config, data, "hyphenation", t_vec(config, t))
# If pinyin reading has been provided, record it (this is reading
# of a Chinese word in romanized forms, i.e., western characters).
elif name == "pinyin reading of":
data_extend(config, data, "pinyin", t_vec(config, t))
# XXX what other potentially useful information might be available?
# Parse word senses for the part-of-speech.
for node in p.lists():
for item in node.items:
txt = str(item)
if txt.startswith("*::"):
continue # Possibly a bug in wikitextparser
sense = {}
parse_sense(config, sense, txt, True)
for node2 in node.sublists():
for item2 in node2.items:
parse_sense(config, sense, str(item2), False)
for node3 in node2.sublists():
for item3 in node3.items:
parse_sense(config, sense, str(item3), False)
for tag in add_tags:
if tag not in sense.get("tags", ()):
data_append(config, sense, "tags", "plural")
data_append(config, data, "senses", sense)
# XXX there might be word senses encoded in other ways, without using
# a list for them. Do some tests to find out how common this is.
if not data.get("senses"):
if config.pos not in ("character", "symbol", "letter"):
config.warning("no senses found in section {}"
"".format(pos_sectitle))
def parse_pronunciation(config, data, text, p):
"""Extracts pronunciation information for the word."""
assert isinstance(config, WiktionaryConfig)
assert isinstance(data, dict)
assert isinstance(text, str)
def parse_variant(text):
variant = {}
sense = None
p = wikitextparser.parse(text)
for t in p.templates:
name = t.name.strip()
# Silently ignore templates that we don't care about.
if name == "sense":
# Some words, like "house" (English) have a two-level structure
# with different pronunciations for verbs and nouns, with
# {{sense|...}} used to distinguish them
sense = t_arg(config, t, 1)
# Pronunciation may be qualified by
# accent/dialect/variant. These are recorded under
# "tags". See
# https://en.wiktionary.org/wiki/Module:accent_qualifier/data
elif name in ("a", "accent"):
data_extend(config, variant, "accent", clean_quals(config, t_vec(config, t)))
# These other qualifiers and context markers may be used for
# similar things, but their values are less well defined.
elif name in ("qual", "qualifier", "q", "qf"):
data_extend(config, variant, "tags", t_vec(config, t))
elif name in ("lb", "context",
"term-context", "tcx", "term-label", "tlb", "i"):
data_extend(config, variant, "tags", clean_quals(config, t_vec(config, t)[1:]))
# Various tags seem to indicate topical categories that the
# word belongs to. These are added under "topics".
elif name in ("topics", "categorize", "catlangname", "c", "C",
"cln",
"top", "categorise", "catlangcode"):
for topic in t_vec(config, t)[1:]:
data_append(config, data, "topics", {"word": topic})
# Extact IPA pronunciation specification under "ipa".
elif name in ("IPA", "ipa"):
vec = t_vec(config, t)
for ipa in vec[1:]:
data_append(config, variant, "ipa", ipa)
elif name in ("IPAchar", "audio-IPA"):
# These are used in text to format as IPA characters
# or to specify inline audio
pass
# Extract special variants of the IPA template. Store these as
# dictionaries under "special_ipa".
elif re.search("IPA", name):
data_append(config, variant, "special_ipa",
t_dict(config, t))
# If English pronunciation (enPR) has been specified, record them
# under "enpr".
elif name == "enPR":
data_append(config, variant, "enpr", t_arg(config, t, 1))
# There are also some other forms of pronunciation information that
# we collect; it is not yet clear what all these mean.
elif name in ("it-stress",):
data_append(config, variant, "stress", t_arg(config, t, 1))
elif name == "PIE root":
data_append(config, variant, "pie_root", t_arg(config, t, 2))
# If an audio file has been specified for the word,
# collect those under "audios".
elif name in ("audio", "audio-pron"):
data_append(config, variant, "audios",
(t_arg(config, t, "lang"),
t_arg(config, t, 1),
t_arg(config, t, 2)))
# If homophones have been specified, collect those under
# "homophones".
elif name in ("homophones", "homophone"):
data_extend(config, variant, "homophones", t_vec(config, t))
elif name == "hyphenation":
# This is often in pronunciation, but we'll store it at top
# level in the entry
data_append(config, data, "hyphenation", t_vec(config, t))
# These templates are silently ignored for pronunciation information
# collection purposes.
elif name in ("inflection of", "l", "link", "l-self",
"m", "w", "W", "label",
"gloss", "zh-m", "zh-l", "ja-l", "wtorw",
"ux", "ant", "syn", "synonyms", "antonyms",
"wikipedia", "Wikipedia",
"alternative form of", "alt form",
"altform", "alt-form", "abb", "rareform",
"alter", "hyph", "honoraltcaps",
"non-gloss definition", "n-g", "non-gloss",
"ngd",
"senseid", "defn", "ja-r", "ja-l", "ja-r/args",
"ja-r/multi",
"place:Brazil/state",
"place:Brazil/municipality",
"place", "taxlink",
"pedlink", "vern", "prefix", "affix",
"suffix", "wikispecies", "ISBN", "slim-wikipedia",
"swp", "comcatlite", "forename",
"given name", "surname", "head"):
continue
# Any templates matching these are silently ignored for
# pronunciation information collection purposes.
elif re.search(r"^R:|^RQ:|^&|"
r"-form|-def|-verb|-adj|-noun|-adv|"
r"-prep| of$|"
r"-romanization|-romanji|-letter|"
r"^en-|^fi-|",
name):
continue
else:
# Warn about unhandled templates.
config.unrecognized_template(t, "pronunciation")
if sense:
variant["sense"] = sense
# If we got some useful pronunciation information, save it
# under "sounds" in the word entry.
if len(set(variant.keys()) - set(["tags", "sense", "accent"])):
data_append(config, data, "pronunciations", variant)
# If the pronunciation section does not contain a list, parse it all
# as a single pronunciation variant. Otherwise parse each list item
# separately.
spans = []
for node in p.lists():
spans.append(node.span)
for item in node.items:
parse_variant(str(item))
for s, e in reversed(spans):
text = text[:s] + text[e:]
text = text.strip()
if text:
parse_variant(text)
def parse_linkage(config, data, kind, text, p, sense_text=None):
"""Parses links to other words, such as synonyms, hypernyms, etc.
```kind``` identifies the default type for such links (based on section
header); however, it is not entirely reliable. The particular template
types used may also indicate what type of link it is; we trust that
information more."""
added = set()
def parse_item(text, kind, is_item):
sense_text = None
qualifiers = []
def add_linkage(kind, v):
nonlocal qualifiers
v = v.strip()
if v.startswith("See also"): # Used to refer to thesauri
return
if v.find(" Thesaurus:") >= 0: # Thesaurus links handled separately
return
if v.lower() == "see also":
return
if v.startswith("Category:"):
# These are probably from category links at the end of page,
# which could end up in any section.
return
if v.startswith(":Category:"):
v = v[1:]
elif v.startswith("See "):
v = v[4:]
if v.endswith("."):
v = v[:-1]
v = v.strip()
if v in ("", "en",):
return
key = (kind, v, sense_text, tuple(sorted(qualifiers)))
if key in added:
return
added.add(key)
v = {"word": v}
if sense_text:
v["sense"] = sense_text
if qualifiers:
v["tags"] = qualifiers
data_append(config, data, kind, v)
qualifiers = []
# Parse the item text.
p = wikitextparser.parse(text)
if len(text) < 200 and text and text[0] not in "*:":
item = clean_value(config, text)
if item:
if item.startswith("For more, see "):
item = item[14:]
links = []
for t in p.templates:
name = t.name.strip()
if name == "sense":
sense_text = t_arg(config, t, 1)
elif name == "l":
links.append((kind, t_arg(config, t, 2)))
elif name == "qualifier":
qualifiers.extend(t_vec(config, t))
if links:
saved_qualifiers = []
for kind, link in links:
qualifiers = saved_qualifiers
add_linkage(kind, link)
return
found = False
for m in re.finditer(r"''+([^']+)''+", text):
v = m.group(1)
v = clean_value(config, v)
if v.startswith("(") and v.endswith(")"):
# XXX These seem to often be qualifiers
sense_text = v[1:-1]
continue
add_linkage(kind, v)
found = True
if found:
return
m = re.match(r"^\((([^)]|\([^)]+\))*)\):? ?(.*)$", item)
if m:
q = m.group(1)
sense_text = q
item = m.group(3)
# Parenthesized parts at the end often contain extra stuff
# that we don't want
item = re.sub(r"\([^)]+\)\s*", "", item)
# Semicolons and dashes commonly occur here in phylum hypernyms
for v in item.split("; "):
for vv in v.split(", "):
vv = vv.split(" - ")[0]
add_linkage(kind, vv)
# Add thesaurus links
if kind == "synonyms":
for t in p.wikilinks:
target = t.target.strip()
if target.startswith("Thesaurus:"):
add_linkage("synonyms", target)
return
# Iterate over all templates
for t in p.templates:
name = t.name.strip()
# Link tags just use the default kind
if name in ("l", "link", "l/ja", "1"):
add_linkage(kind, t_arg(config, t, 2))
# Wikipedia links also suggest a linkage of the default kind
elif name in ("wikipedia", "Wikipedia", "w", "wp"):
add_linkage(kind, t_arg(config, t, 1))
elif name in ("w2",):
add_linkage(kind, t_arg(config, t, 2))
# Japanese links seem to commonly use "ja-r" template.
# Use the default linkage for them, and collect the
# "hiragana" mapping for the catagana term when available
# (actually using them would require later
# postprocessing).
elif name in ("ja-r", | |
= JOB_STATUS_DICT['waiting']
# update upload jobs to "running" for files A, B, and C for DABS submissions or for the upload job in FABS
upload_jobs = [job for job in jobs if job.job_type_id in [JOB_TYPE_DICT['file_upload']] and
job.file_type_id in initial_file_types]
for job in upload_jobs:
job.job_status_id = JOB_STATUS_DICT['running']
sess.commit()
# call finalize job for the upload jobs for files A, B, and C for DABS submissions and the only job for FABS,
# which will kick off the rest of the process for DABS and indicate to the user that the validations are done
# for FABS
for job in upload_jobs:
FileHandler.finalize(job.job_id)
return JsonResponse.create(StatusCode.OK, {'message': 'Success'})
def move_certified_files(self, submission, certify_history, is_local):
""" Copy all files within the certified submission to the correct certified files bucket/directory. FABS
submissions also create a file containing all the published rows
Args:
submission: submission for which to move the files
certify_history: a CertifyHistory object to use for timestamps and to update once the files are moved
is_local: a boolean indicating whether the application is running locally or not
"""
try:
self.s3manager
except AttributeError:
self.s3manager = S3Handler()
sess = GlobalDB.db().session
submission_id = submission.submission_id
log_data = {
'message': 'Starting move_certified_files',
'message_type': 'BrokerDebug',
'submission_id': submission_id,
'submission_type': 'FABS' if submission.d2_submission else 'DABS'
}
logger.debug(log_data)
# get the list of upload jobs
jobs = sess.query(Job).filter(Job.submission_id == submission_id,
Job.job_type_id == JOB_TYPE_DICT['file_upload'],
Job.filename.isnot(None)).all()
original_bucket = CONFIG_BROKER['aws_bucket']
new_bucket = CONFIG_BROKER['certified_bucket']
agency_code = submission.cgac_code if submission.cgac_code else submission.frec_code
# warning file doesn't apply to FABS submissions
possible_warning_files = [FILE_TYPE_DICT['appropriations'], FILE_TYPE_DICT['program_activity'],
FILE_TYPE_DICT['award_financial']]
# set the route within the bucket
if submission.d2_submission:
created_at_date = certify_history.created_at
route_vars = ['FABS', agency_code, created_at_date.year, '{:02d}'.format(created_at_date.month)]
else:
route_vars = [agency_code, submission.reporting_fiscal_year, submission.reporting_fiscal_period // 3,
certify_history.certify_history_id]
new_route = '/'.join([str(var) for var in route_vars]) + '/'
for job in jobs:
log_data['job_id'] = job.job_id
# non-local instances create a new path, local instances just use the existing one
if not is_local:
old_path_sections = job.filename.split('/')
new_path = new_route + old_path_sections[-1]
else:
new_path = job.filename
# get the warning file name for this file
warning_file = None
if job.file_type_id in possible_warning_files:
# warning file is in the new path for non-local instances and just in its normal place for local ones
if not is_local:
# create names and move warning file
warning_file_name = report_file_name(submission_id, True, job.file_type.name)
warning_file = new_route + warning_file_name
self.s3manager.copy_file(original_bucket=original_bucket, new_bucket=new_bucket,
original_path='errors/' + warning_file_name, new_path=warning_file)
else:
warning_file = CONFIG_SERVICES['error_report_path'] + report_file_name(submission_id, True,
job.file_type.name)
comment = None
if submission.d2_submission:
# FABS published submission, create the FABS published rows file
log_data['message'] = 'Generating published FABS file from publishable rows'
logger.info(log_data)
new_path = create_fabs_published_file(sess, submission_id, new_route)
else:
# DABS certified submission
# get the comment relating to the file
comment = sess.query(Comment).\
filter_by(submission_id=submission_id, file_type_id=job.file_type_id).one_or_none()
if comment:
comment = comment.comment
# only actually move the files if it's not a local submission
if not is_local:
self.s3manager.copy_file(original_bucket=original_bucket, new_bucket=new_bucket,
original_path=job.filename, new_path=new_path)
# create the certified_files_history for this file
file_history = CertifiedFilesHistory(certify_history_id=certify_history.certify_history_id,
submission_id=submission_id, file_type_id=job.file_type_id,
filename=new_path, comment=comment,
warning_filename=warning_file)
sess.add(file_history)
# FABS submissions don't have cross-file validations or comments
if not submission.d2_submission:
# Adding cross-file warnings
cross_list = {'B': 'A', 'C': 'B', 'D1': 'C', 'D2': 'C'}
for key, value in cross_list.items():
first_file = FILE_TYPE_DICT_LETTER_NAME[value]
second_file = FILE_TYPE_DICT_LETTER_NAME[key]
# create warning file path
if not is_local:
warning_file_name = report_file_name(submission_id, True, first_file, second_file)
warning_file = new_route + warning_file_name
# move the file if we aren't local
self.s3manager.copy_file(original_bucket=original_bucket, new_bucket=new_bucket,
original_path='errors/' + warning_file_name, new_path=warning_file)
else:
warning_file = CONFIG_SERVICES['error_report_path'] + report_file_name(submission_id, True,
first_file, second_file)
# add certified history
file_history = CertifiedFilesHistory(certify_history_id=certify_history.certify_history_id,
submission_id=submission_id, filename=None, file_type_id=None,
comment=None, warning_filename=warning_file)
sess.add(file_history)
# Only move the file if we have any certified comments
num_cert_comments = sess.query(CertifiedComment).filter_by(submission_id=submission_id).count()
if num_cert_comments > 0:
filename = 'submission_{}_comments.csv'.format(str(submission_id))
if not is_local:
old_path = '{}/{}'.format(str(submission.submission_id), filename)
new_path = new_route + filename
# Copy the file if it's a non-local submission
self.s3manager.copy_file(original_bucket=original_bucket, new_bucket=new_bucket,
original_path=old_path, new_path=new_path)
else:
new_path = ''.join([CONFIG_BROKER['broker_files'], filename])
file_history = CertifiedFilesHistory(certify_history_id=certify_history.certify_history_id,
submission_id=submission_id, filename=new_path, file_type_id=None,
comment=None, warning_filename=None)
sess.add(file_history)
sess.commit()
log_data['message'] = 'Completed move_certified_files'
logger.debug(log_data)
def revert_certified_error_files(self, sess, certify_history_id):
""" Copy warning files (non-locally) back to the errors folder and revert error files to just headers for a
submission that is being reverted to certified status
Args:
sess: the database connection
certify_history_id: the ID of the CertifyHistory object that represents the latest certification
"""
warning_files = sess.query(CertifiedFilesHistory.warning_filename). \
filter(CertifiedFilesHistory.certify_history_id == certify_history_id,
CertifiedFilesHistory.warning_filename.isnot(None)).all()
for warning in warning_files:
warning = warning.warning_filename
# Getting headers and file names
if 'cross' in warning:
error = warning.replace('_warning_', '_')
headers = ValidationManager.cross_file_report_headers
else:
error = warning.replace('warning', 'error')
headers = ValidationManager.report_headers
# Moving/clearing files
if not self.is_local:
s3_resource = boto3.resource('s3', region_name=CONFIG_BROKER['aws_region'])
submission_bucket = CONFIG_BROKER['aws_bucket']
certified_bucket = CONFIG_BROKER['certified_bucket']
error_file_name = os.path.basename(error)
warning_file_name = os.path.basename(warning)
error_file_path = ''.join([CONFIG_SERVICES['error_report_path'], error_file_name])
# Create clean error file
with open(error_file_path, 'w', newline='') as error_file:
error_csv = csv.writer(error_file, delimiter=',', quoting=csv.QUOTE_MINIMAL, lineterminator='\n')
error_csv.writerow(headers)
error_file.close()
# Write error file
with open(error_file_path, 'rb') as csv_file:
s3_resource.Object(submission_bucket, 'errors/' + error_file_name).put(Body=csv_file)
csv_file.close()
os.remove(error_file_path)
# Copy warning file back over
S3Handler.copy_file(original_bucket=certified_bucket, new_bucket=submission_bucket,
original_path=warning, new_path='errors/' + warning_file_name)
else:
with open(error, 'w', newline='') as error_file:
error_csv = csv.writer(error_file, delimiter=',', quoting=csv.QUOTE_MINIMAL, lineterminator='\n')
error_csv.writerow(headers)
error_file.close()
def get_submission_comments(submission):
""" Fetch comments for this submission, indexed by file letter
Args:
submission: the submission to gather comments for
Returns:
JsonResponse object with the contents of the comments in a key/value pair of letter/comments
"""
sess = GlobalDB.db().session
result = {letter: '' for letter in FILE_TYPE_DICT_LETTER.values() if letter != 'FABS'}
comments = sess.query(Comment).filter_by(submission_id=submission.submission_id)
for comment in comments:
letter = FILE_TYPE_DICT_LETTER[comment.file_type_id]
result[letter] = comment.comment
return JsonResponse.create(StatusCode.OK, result)
def update_submission_comments(submission, comment_request, is_local):
""" Clear existing comments and replace them with the provided set.
Args:
submission: submission to update the comments for
comment_request: the contents of the request from the API
is_local: a boolean indicating whether the application is running locally or not
"""
# If the submission has been certified, set its status to updated when new comments are made.
if submission.publish_status_id == PUBLISH_STATUS_DICT['published']:
submission.publish_status_id = PUBLISH_STATUS_DICT['updated']
json = comment_request or {}
# clean input
comments_json = {key.upper(): value.strip() for key, value in json.items()
if isinstance(value, str) and value.strip()}
sess = GlobalDB.db().session
# Delete old comments, fetch just in case
sess.query(Comment).filter_by(submission_id=submission.submission_id).delete(synchronize_session='fetch')
comments = []
for file_type_id, letter in FILE_TYPE_DICT_LETTER.items():
if letter in comments_json and letter != 'FABS':
comments.append(Comment(
submission_id=submission.submission_id,
file_type_id=file_type_id,
comment=comments_json[letter]
))
sess.add_all(comments)
sess.commit()
# Preparing for the comments file
filename = 'submission_{}_comments.csv'.format(submission.submission_id)
local_file = ''.join([CONFIG_BROKER['broker_files'], filename])
file_path = local_file if is_local else '{}/{}'.format(str(submission.submission_id), filename)
headers = ['File', 'Comment']
# Generate a file containing all the comments for a given submission
comment_query = sess.query(FileType.name, Comment.comment).\
join(FileType, Comment.file_type_id == FileType.file_type_id).\
filter(Comment.submission_id == submission.submission_id)
# Generate the file locally, then place in S3
write_stream_query(sess, comment_query, local_file, file_path, is_local, header=headers)
return JsonResponse.create(StatusCode.OK, {})
def get_comments_file(submission, is_local):
""" Retrieve the comments file for a specific submission.
Args:
submission: the submission to get the comments file for
is_local: a boolean indicating whether the application is running locally or not
Returns:
A JsonResponse containing the url to the file if one exists, JsonResponse error containing the details of
the error if something went wrong
"""
sess = GlobalDB.db().session
num_comments = sess.query(Comment).filter_by(submission_id=submission.submission_id).count()
# if we have at least one comment, we have a file to return
if num_comments > 0:
filename = 'submission_{}_comments.csv'.format(submission.submission_id)
if is_local:
# when local, can just grab the path
url = os.path.join(CONFIG_BROKER['broker_files'], filename)
else:
url = S3Handler().get_signed_url(str(submission.submission_id), filename,
url_mapping=CONFIG_BROKER['submission_bucket_mapping'],
method='get_object')
return JsonResponse.create(StatusCode.OK, {'url': url})
return JsonResponse.error(ValueError('This submission does not have any comments associated with it'),
StatusCode.CLIENT_ERROR)
def create_fabs_published_file(sess, submission_id, new_route):
""" Create a file containing all the published rows from this submission_id
Args:
sess: the current DB session
submission_id: ID of the submission the file is being created for
new_route: the path to the new file
Returns:
The full path to the newly created/uploaded file
"""
# create timestamped name and paths
timestamped_name = S3Handler.get_timestamped_filename('submission_{}_published_fabs.csv'.format(submission_id))
local_filename = ''.join([CONFIG_BROKER['broker_files'], timestamped_name])
upload_name = ''.join([new_route, timestamped_name])
# write file and stream to S3
fabs_query = published_fabs_query({'sess': sess, 'submission_id': submission_id})
headers = [key for key in fileD2.mapping]
write_stream_query(sess, fabs_query, local_filename, upload_name, g.is_local, header=headers, | |
import time
import numpy as np
from .peeler_tools import *
from .peeler_tools import _dtype_spike
from .peeler_engine_base import PeelerEngineGeneric
from .peakdetector import get_peak_detector_class
import matplotlib.pyplot as plt
from . import pythran_tools
if hasattr(pythran_tools, '__pythran__'):
HAVE_PYTHRAN = True
else:
HAVE_PYTHRAN = False
try:
import numba
HAVE_NUMBA = True
from .numba_tools import numba_loop_sparse_dist
except ImportError:
HAVE_NUMBA = False
from .cltools import HAVE_PYOPENCL, OpenCL_Helper
if HAVE_PYOPENCL:
import pyopencl
mf = pyopencl.mem_flags
class PeelerEngineClassic(PeelerEngineGeneric):
def change_params(self, **kargs):
PeelerEngineGeneric.change_params(self, **kargs)
if self.argmin_method == 'opencl' and self.catalogue['centers0'].size>0:
#~ if self.use_opencl_with_sparse and self.catalogue['centers0'].size>0:
OpenCL_Helper.initialize_opencl(self, cl_platform_index=self.cl_platform_index, cl_device_index=self.cl_device_index)
#~ self.ctx = pyopencl.create_some_context(interactive=False)
#~ self.queue = pyopencl.CommandQueue(self.ctx)
centers = self.catalogue['centers0']
nb_channel = centers.shape[2]
peak_width = centers.shape[1]
nb_cluster = centers.shape[0]
kernel = kernel_opencl%{'nb_channel': nb_channel,'peak_width':peak_width,
'wf_size':peak_width*nb_channel,'nb_cluster' : nb_cluster}
prg = pyopencl.Program(self.ctx, kernel)
opencl_prg = prg.build(options='-cl-mad-enable')
self.kern_waveform_distance = getattr(opencl_prg, 'waveform_distance')
wf_shape = centers.shape[1:]
one_waveform = np.zeros(wf_shape, dtype='float32')
self.one_waveform_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE| mf.COPY_HOST_PTR, hostbuf=one_waveform)
self.catalogue_center_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE| mf.COPY_HOST_PTR, hostbuf=centers)
self.waveform_distance = np.zeros((nb_cluster), dtype='float32')
self.waveform_distance_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE| mf.COPY_HOST_PTR, hostbuf=self.waveform_distance)
#~ mask[:] = 0
self.sparse_mask_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE| mf.COPY_HOST_PTR, hostbuf=self.sparse_mask.astype('u1'))
rms_waveform_channel = np.zeros(nb_channel, dtype='float32')
self.rms_waveform_channel_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE| mf.COPY_HOST_PTR, hostbuf=rms_waveform_channel)
self.cl_global_size = (centers.shape[0], centers.shape[2])
#~ self.cl_local_size = None
self.cl_local_size = (centers.shape[0], 1) # faster a GPU because of memory access
#~ self.cl_local_size = (1, centers.shape[2])
def initialize_before_each_segment(self, **kargs):
PeelerEngineGeneric.initialize_before_each_segment(self, **kargs)
# force engine to global
p = dict(self.catalogue['peak_detector_params'])
p.pop('engine')
p.pop('method')
self.peakdetector_method = 'global'
self.peakdetector_engine = 'numpy'
PeakDetector_class = get_peak_detector_class(self.peakdetector_method, self.peakdetector_engine)
chunksize = self.fifo_size-2*self.n_span # not the real chunksize here
self.peakdetector = PeakDetector_class(self.sample_rate, self.nb_channel,
chunksize, self.internal_dtype, self.geometry)
self.peakdetector.change_params(**p)
self.mask_not_already_tested = np.ones(self.fifo_size - 2 * self.n_span, dtype='bool')
def detect_local_peaks_before_peeling_loop(self):
# negative mask 1: not tested 0: already tested
self.mask_not_already_tested[:] = True
self.local_peaks_mask = self.peakdetector.get_mask_peaks_in_chunk(self.fifo_residuals)
#~ peak_inds, = np.nonzero(self.local_peaks_mask )
#~ peak_chans = np.argmin(self.fifo_residuals[peak_inds, :], axis=1)
#~ peak_inds = peak_inds + self.n_span
#~ fig, ax = plt.subplots()
#~ plot_sigs = self.fifo_residuals.copy()
#~ for c in range(self.nb_channel):
#~ plot_sigs[:, c] += c*30
#~ ax.plot(plot_sigs, color='k')
#~ ampl = plot_sigs[peak_inds, peak_chans]
#~ ax.scatter(peak_inds, ampl, color='r')
#~ for peak_ind in peak_inds:
#~ ax.axvline(peak_ind)
#~ plt.show()
def select_next_peak(self):
# TODO find faster
local_peaks_indexes, = np.nonzero(self.local_peaks_mask & self.mask_not_already_tested)
if self._plot_debug:
print('select_next_peak', local_peaks_indexes + self.n_span)
#~ print(local_peaks_indexes.size)
#~ print('select_next_peak')
#~ print(local_peaks_indexes + self.n_span )
if local_peaks_indexes.size>0:
local_peaks_indexes += self.n_span
#~ if self._plot_debug:
#~ print('select_next_peak', local_peaks_indexes)
amplitudes = np.max(np.abs(self.fifo_residuals[local_peaks_indexes, :]), axis=1)
ind = np.argmax(amplitudes)
return local_peaks_indexes[ind], None
#~ return local_peaks_indexes[0]
else:
return LABEL_NO_MORE_PEAK, None
def on_accepted_spike(self, spike):
# remove spike prediction from fifo residuals
left_ind = spike.index + self.n_left
cluster_idx = self.catalogue['label_to_index'][spike.cluster_label]
pos, pred = make_prediction_one_spike(spike.index, cluster_idx, spike.jitter, self.fifo_residuals.dtype, self.catalogue)
self.fifo_residuals[pos:pos+self.peak_width, :] -= pred
# this prevent search peaks in the zone until next "reset_to_not_tested"
self.set_already_tested_spike_zone(spike.index, cluster_idx)
def set_already_tested(self, peak_ind, peak_chan):
self.mask_not_already_tested[peak_ind - self.n_span] = False
def set_already_tested_spike_zone(self, peak_ind, cluster_idx):
self.mask_not_already_tested[peak_ind + self.n_left - self.n_span:peak_ind + self.n_right- self.n_span] = False
def reset_to_not_tested(self, good_spikes):
self.local_peaks_mask = self.peakdetector.get_mask_peaks_in_chunk(self.fifo_residuals)
#~ self.mask_not_already_tested[:] = True
for spike in good_spikes:
peak_ind = spike.index
self.mask_not_already_tested[peak_ind + self.n_left - self.n_span:peak_ind + self.n_right- self.n_span] = True
#~ for spike in good_spikes:
#~ peak_ind = spike.index
#~ # TODO here make enlarge a bit with maximum_jitter_shift
#~ sl1 = slice(peak_ind + self.n_left - 1 - self.n_span, peak_ind + self.n_right + 1 + self.n_span)
#~ sl2 = slice(peak_ind + self.n_left - 1 - self.n_span, peak_ind + self.n_right + 1- self.n_span)
#~ self.local_peaks_mask[sl2] = self.peakdetector.get_mask_peaks_in_chunk(self.fifo_residuals[sl1, :])
#~ # set neighboor untested
#~ self.mask_not_already_tested[peak_ind - self.peak_width - self.n_span:peak_ind + self.peak_width - self.n_span] = True
def get_no_label_peaks(self):
# nolabel_indexes, = np.nonzero(~self.mask_not_already_tested)
mask = self.peakdetector.get_mask_peaks_in_chunk(self.fifo_residuals)
nolabel_indexes, = np.nonzero(mask)
nolabel_indexes += self.n_span
nolabel_indexes = nolabel_indexes[nolabel_indexes<(self.chunksize+self.n_span)]
bad_spikes = np.zeros(nolabel_indexes.shape[0], dtype=_dtype_spike)
bad_spikes['index'] = nolabel_indexes
bad_spikes['cluster_label'] = LABEL_UNCLASSIFIED
return bad_spikes
def get_best_template(self, left_ind, peak_chan):
assert peak_chan is None
waveform = self.fifo_residuals[left_ind:left_ind+self.peak_width,:]
if self.argmin_method == 'opencl':
rms_waveform_channel = np.sum(waveform**2, axis=0).astype('float32')
pyopencl.enqueue_copy(self.queue, self.one_waveform_cl, waveform)
pyopencl.enqueue_copy(self.queue, self.rms_waveform_channel_cl, rms_waveform_channel)
event = self.kern_waveform_distance(self.queue, self.cl_global_size, self.cl_local_size,
self.one_waveform_cl, self.catalogue_center_cl, self.sparse_mask_cl,
self.rms_waveform_channel_cl, self.waveform_distance_cl)
pyopencl.enqueue_copy(self.queue, self.waveform_distance, self.waveform_distance_cl)
cluster_idx = np.argmin(self.waveform_distance)
shift = None
elif self.argmin_method == 'pythran':
s = pythran_tools.pythran_loop_sparse_dist(waveform,
self.catalogue['centers0'], self.sparse_mask)
cluster_idx = np.argmin(s)
shift = None
elif self.argmin_method == 'numba':
#~ s = numba_loop_sparse_dist(waveform, self.catalogue['centers0'], self.sparse_mask)
#~ cluster_idx = np.argmin(s)
#~ shift = None
shifts = list(range(-self.maximum_jitter_shift, self.maximum_jitter_shift+1))
all_s = []
for shift in shifts:
waveform = self.fifo_residuals[left_ind+shift:left_ind+self.peak_width+shift,:]
s = numba_loop_sparse_dist(waveform, self.catalogue['centers0'], self.sparse_mask)
all_s.append(s)
all_s = np.array(all_s)
shift_ind, cluster_idx = np.unravel_index(np.argmin(all_s, axis=None), all_s.shape)
shift = shifts[shift_ind]
#~ print(shift, cluster_idx)
#~ if self._plot_debug:
#~ fig, ax = plt.subplots()
#~ ax.plot(shifts, all_s, marker='o')
#~ ax.set_title(f'{left_ind-self.n_left} {shift}')
#~ s = numba_loop_sparse_dist(waveform, self.catalogue['centers0'], self.sparse_mask)
#~ cluster_idx = np.argmin(s)
elif self.argmin_method == 'numpy':
# replace by this (indentique but faster, a but)
d = self.catalogue['centers0']-waveform[None, :, :]
d *= d
#s = d.sum(axis=1).sum(axis=1) # intuitive
#s = d.reshape(d.shape[0], -1).sum(axis=1) # a bit faster
s = np.einsum('ijk->i', d) # a bit faster
cluster_idx = np.argmin(s)
shift = None
else:
raise(NotImplementedError())
#~ print('get_best_template', left_ind-self.n_left)
#~ if 16000 < (left_ind-self.n_left) <16400:
#~ if self._plot_debug:
#~ fig, ax = plt.subplots()
#~ chan_order = np.argsort(self.distances[0, :])
#~ channels = self.channels_adjacency[chan_ind]
#~ channels = chan_order
#~ wf = waveform[:, channels]
#~ wf0 = self.catalogue['centers0'][cluster_idx, :, :][:, channels]
#~ wf = waveform
#~ wf0 = self.catalogue['centers0'][cluster_idx, :, :]
#~ wf= waveform
#~ ax.plot(wf.T.flatten(), color='k')
#~ ax.plot(wf0.T.flatten(), color='m')
#~ plot_chan = channels.tolist().index(chan_ind)
#~ plot_chan = chan_ind
#~ ax.axvline(plot_chan * self.peak_width - self.n_left)
#~ ax.set_title(f'cluster_idx {cluster_idx}')
#~ plt.show()
#~ label = self.catalogue['cluster_labels'][cluster_idx]
return cluster_idx, shift
def accept_tempate(self, left_ind, cluster_idx, jitter):
#~ self._debug_nb_accept_tempate += 1
#~ import matplotlib.pyplot as plt
# criteria mono channel = old implementation
#~ keep_template = np.sum(wf**2) > np.sum((wf-(wf0+jitter1*wf1+jitter1**2/2*wf2))**2)
if jitter is None:
# this must have a jitter
jitter = 0
if np.abs(jitter) > (self.maximum_jitter_shift - 0.5):
return False
# criteria multi channel
mask = self.sparse_mask[cluster_idx]
full_wf0 = self.catalogue['centers0'][cluster_idx,: , :][:, mask]
full_wf1 = self.catalogue['centers1'][cluster_idx,: , :][:, mask]
full_wf2 = self.catalogue['centers2'][cluster_idx,: , :][:, mask]
# waveform L2 on mask
waveform = self.fifo_residuals[left_ind:left_ind+self.peak_width,:]
full_wf = waveform[:, :][:, mask]
wf_nrj = np.sum(full_wf**2, axis=0)
# prediction L2 on mask
label = self.catalogue['cluster_labels'][cluster_idx]
weight = self.weight_per_template[label]
pred_wf = (full_wf0+jitter*full_wf1+jitter**2/2*full_wf2)
residual_nrj = np.sum((full_wf-pred_wf)**2, axis=0)
# criteria per channel
#~ thresh = 0.9
thresh = 0.7
#~ thresh = 0.5
crietria_weighted = (wf_nrj>residual_nrj).astype('float') * weight
accept_template = np.sum(crietria_weighted) >= thresh * np.sum(weight)
label = self.catalogue['clusters'][cluster_idx]['cluster_label']
#~ if True:
#~ if np.random.rand()<0.05:
#~ if label == 151:
#~ if self._plot_debug:
#~ print('label == 151', 'cluster_idx', cluster_idx)
#~ max_chan_ind = self.catalogue['clusters'][cluster_idx]['extremum_channel']
#~ fig, ax = plt.subplots()
#~ ax.plot(self.fifo_residuals[:, max_chan_ind])
#~ ax.scatter([left_ind-self.n_left], [self.fifo_residuals[left_ind-self.n_left, max_chan_ind]], color='r')
#~ fig, axs = plt.subplots(nrows=2, sharex=True)
#~ axs[0].plot(full_wf.T.flatten(), color='b')
#~ if accept_template:
#~ axs[0].plot(pred_wf.T.flatten(), color='g')
#~ else:
#~ axs[0].plot(pred_wf.T.flatten(), color='r')
#~ axs[0].plot((full_wf-pred_wf).T.flatten(), color='m')
#~ plt.show()
#DEBUG
#~ label = self.catalogue['cluster_labels'][cluster_idx]
#~ if label in (10, ):
#~ print('accept_tempate',accept_template, 'label', label)
#~ print(wf_nrj>res_nrj)
#~ print(weight)
#~ print(crietria_weighted)
#~ print(np.sum(crietria_weighted), np.sum(weight), np.sum(crietria_weighted)/np.sum(weight))
#~ print()
#~ #ENDDEBUG
return accept_template
def _plot_before_peeling_loop(self):
fig, ax = plt.subplots()
plot_sigs = self.fifo_residuals.copy()
self._plot_sigs_before = plot_sigs
#~ chan_order = np.argsort(self.distances[0, :])
for c in range(self.nb_channel):
#~ for c in chan_order:
plot_sigs[:, c] += c*30
ax.plot(plot_sigs, color='k')
ax.axvline(self.fifo_size - self.n_right, color='r')
ax.axvline(-self.n_left, color='r')
mask = self.peakdetector.get_mask_peaks_in_chunk(self.fifo_residuals)
nolabel_indexes, = np.nonzero(mask)
nolabel_indexes += self.n_span
for ind in nolabel_indexes:
ax.axvline(ind, ls='--')
#~ plt.show()
def _plot_label_unclassified(self, left_ind, peak_chan, cluster_idx, jitter):
fig, ax = plt.subplots()
wf = self.fifo_residuals[left_ind:left_ind+self.peak_width, :]
wf0 = self.catalogue['centers0'][cluster_idx, :, :]
ax.plot(wf.T.flatten(), color='b')
ax.plot(wf0.T.flatten(), color='g')
ax.set_title(f'label_unclassified {left_ind-self.n_left} {cluster_idx}')
def _plot_after_peeling_loop(self, good_spikes):
fig, ax = plt.subplots()
plot_sigs = self.fifo_residuals.copy()
#~ chan_order = np.argsort(self.distances[0, :])
for c in range(self.nb_channel):
#~ for c in chan_order:
plot_sigs[:, c] += c*30
ax.plot(plot_sigs, color='k')
ax.plot(self._plot_sigs_before, color='b')
ax.axvline(self.fifo_size - self.n_right, color='r')
ax.axvline(-self.n_left, color='r')
for ind in np.nonzero(~self.mask_not_already_tested)[0] + self.n_span:
ax.axvline(ind, ls='-', color='g')
| |
squares sequentially from 00 to 39 we can concatenate these two-digit numbers to produce strings that correspond with sets of squares.
Statistically it can be shown that the three most popular squares, in order,
are JAIL (6.24%) = Square 10, E3 (3.18%) = Square 24,
and GO (3.09%) = Square 00.
So these three most popular squares can be listed with the six-digit modal string: 102400.
If, instead of using two 6-sided dice, two 4-sided dice are used, find the six-digit modal string.
'''
'''
Problem 85
By counting carefully it can be seen that a rectangular grid measuring 3 by 2 contains eighteen rectangles:
Although there exists no rectangular grid that contains exactly two million rectangles, find the area of the grid with the nearest solution.
'''
def p85():
# sumN = n(1+n)/2
# sumM = m(1+m)/2
# totalRectangles = nm(1+n)(1+m)/4
min_val = 0
min_grid = 0
L = 2000000
min_diff = L
for m in range(3, 101):
for n in range(1, m):
diff = L - ((m * n * (1 + m) * (1 + n)) / 4)
if min_diff > abs(diff):
min_diff = abs(diff)
min_val = L + diff if diff < 0 else L - diff
min_grid = m * n
print('[85]: ', min_grid, min_val)
'''
Problem 86
A spider, S, sits in one corner of a cuboid room, measuring 6 by 5 by 3, and a fly, F, sits in the opposite corner.
By travelling on the surfaces of the room the shortest "straight line" distance from S to F is 10 and the path is shown on the diagram.
However, there are up to three "shortest" path candidates for any given cuboid and the shortest route doesn't always have integer length.
It can be shown that there are exactly 2060 distinct cuboids, ignoring rotations, with integer dimensions, up to a maximum size of M by M by M,
for which the shortest route has integer length when M = 100.
This is the least value of M for which the number of solutions first exceeds two thousand;
the number of solutions when M = 99 is 1975.
Find the least value of M such that the number of solutions first exceeds one million.
'''
'''
Problem 87
The smallest number expressible as the sum of a prime square, prime cube, and prime fourth power is 28. In fact, there are exactly four numbers below fifty that can be expressed in such a way:
28 = 2^2 + 2^3 + 2^4
33 = 3^2 + 2^3 + 2^4
49 = 5^2 + 2^3 + 2^4
47 = 2^2 + 3^3 + 2^4
How many numbers below fifty million can be expressed as the sum of a prime square, prime cube, and prime fourth power?
'''
def p87():
# square 7071^2 = 49999041
# cube 368^3 = 49836032
# fourth power 84^4 = 49787136
primes = prime_sieve(7100)
sq = [p for p in primes if p <= 7071]
cb = [p for p in primes if p <= 368]
fp = [p for p in primes if p <= 84]
ret = []
for s in sq:
for c in cb:
for f in fp:
res = s ** 2 + c ** 3 + f ** 4
if res < 50000000:
ret.append(res)
print('[87]: ', len(set(ret)))
'''
Problem 88
A natural number, N, that can be written as the sum and product of a given set of at least two natural numbers,
{a1, a2, ... , ak} is called a product-sum number: N = a1 + a2 + ... + ak = a1 × a2 × ... × ak.
For example, 6 = 1 + 2 + 3 = 1 × 2 × 3.
For a given set of size, k, we shall call the smallest N with this property a minimal product-sum number.
The minimal product-sum numbers for sets of size, k = 2, 3, 4, 5, and 6 are as follows.
k=2: 4 = 2 × 2 = 2 + 2
k=3: 6 = 1 × 2 × 3 = 1 + 2 + 3
k=4: 8 = 1 × 1 × 2 × 4 = 1 + 1 + 2 + 4
k=5: 8 = 1 × 1 × 2 × 2 × 2 = 1 + 1 + 2 + 2 + 2
k=6: 12 = 1 × 1 × 1 × 1 × 2 × 6 = 1 + 1 + 1 + 1 + 2 + 6
Hence for 2≤k≤6, the sum of all the minimal product-sum numbers is 4+6+8+12 = 30;
note that 8 is only counted once in the sum.
In fact, as the complete set of minimal product-sum numbers for 2≤k≤12 is {4, 6, 8, 12, 15, 16}, the sum is 61.
What is the sum of all the minimal product-sum numbers for 2≤k≤12000?
'''
'''
Problem 89
For a number written in Roman numerals to be considered valid
there are basic rules which must be followed.
Even though the rules allow some numbers to be expressed in more than
one way there is always a "best" way of writing a particular number.
For example, it would appear that there are at least six ways of writing the number sixteen:
IIIIIIIIIIIIIIII
VIIIIIIIIIII
VVIIIIII
XIIIIII
VVVI
XVI
However, according to the rules only XIIIIII and XVI are valid,
and the last example is considered to be the most efficient,
as it uses the least number of numerals.
The 11K text file, roman.txt (right click and 'Save Link/Target As...'), contains one thousand numbers written in valid,
but not necessarily minimal, Roman numerals; see About... Roman Numerals for the definitive rules for this problem.
Find the number of characters saved by writing each of these in their minimal form.
Note: You can assume that all the Roman numerals in the file contain no more than four consecutive identical units.
'''
'''
Problem 90
Each of the six faces on a cube has a different digit (0 to 9) written on it;
the same is done to a second cube.
By placing the two cubes side-by-side in different positions we can form a variety of 2-digit numbers.
For example, the square number 64 could be formed:
In fact, by carefully choosing the digits on both cubes it is possible to display all of the square numbers
below one-hundred: 01, 04, 09, 16, 25, 36, 49, 64, and 81.
For example, one way this can be achieved is
by placing {0, 5, 6, 7, 8, 9} on one cube
and {1, 2, 3, 4, 8, 9} on the other cube.
However, for this problem we shall allow the 6 or 9 to be turned upside-down so that an arrangement like
{0, 5, 6, 7, 8, 9} and {1, 2, 3, 4, 6, 7} allows for all nine square numbers to be displayed;
otherwise it would be impossible to obtain 09.
In determining a distinct arrangement we are interested in the digits on each cube, not the order.
{1, 2, 3, 4, 5, 6} is equivalent to {3, 6, 4, 1, 2, 5}
{1, 2, 3, 4, 5, 6} is distinct from {1, 2, 3, 4, 5, 9}
But because we are allowing 6 and 9 to be reversed, the two distinct sets in the last example both represent the extended set {1, 2, 3, 4, 5, 6, 9} for the purpose of forming 2-digit numbers.
How many distinct arrangements of the two cubes allow for all of the square numbers to be displayed?
'''
'''
Problem 91
The points P (x1, y1) and Q (x2, y2) are plotted at integer co-ordinates and are joined to the origin, O(0,0), to form ΔOPQ.
There are exactly fourteen triangles containing a right angle that can be formed when each co-ordinate lies between 0 and 2 inclusive; that is,0 ≤ x1, y1, x2, y2 ≤ 2.
Given that 0 ≤ x1, y1, x2, y2 ≤ 50, how many right triangles can be formed?
'''
'''
Problem 92
A number chain is created by continuously adding the square of the digits in a number to form a new number until it has been seen before.
For example,
44 → 32 → 13 → 10 → 1 → 1
85 → 89 → 145 → 42 → 20 → 4 | |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Convolutional-recurrent layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras._impl.keras import activations
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras import constraints
from tensorflow.python.keras._impl.keras import initializers
from tensorflow.python.keras._impl.keras import regularizers
from tensorflow.python.keras._impl.keras.engine import InputSpec
from tensorflow.python.keras._impl.keras.engine import Layer
from tensorflow.python.keras._impl.keras.engine.base_layer import shape_type_conversion
from tensorflow.python.keras._impl.keras.layers.recurrent import _generate_dropout_mask
from tensorflow.python.keras._impl.keras.layers.recurrent import RNN
from tensorflow.python.keras._impl.keras.utils import conv_utils
from tensorflow.python.keras._impl.keras.utils import generic_utils
from tensorflow.python.util.tf_export import tf_export
class ConvRNN2D(RNN):
"""Base class for convolutional-recurrent layers.
Arguments:
cell: A RNN cell instance. A RNN cell is a class that has:
- a `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- a `state_size` attribute. This can be a single integer
(single state) in which case it is
the number of channels of the recurrent state
(which should be the same as the number of channels of the cell
output). This can also be a list/tuple of integers
(one size per state). In this case, the first entry
(`state_size[0]`) should be the same as
the size of the cell output.
return_sequences: Boolean. Whether to return the last output.
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
input_shape: Use this argument to specify the shape of the
input when this layer is the first one in a model.
Input shape:
5D tensor with shape:
`(samples, timesteps, channels, rows, cols)`
if data_format='channels_first' or 5D tensor with shape:
`(samples, timesteps, rows, cols, channels)`
if data_format='channels_last'.
Output shape:
- if `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each 5D tensor with shape:
`(samples, timesteps, filters, new_rows, new_cols)`
if data_format='channels_first'
or 5D tensor with shape:
`(samples, timesteps, new_rows, new_cols, filters)`
if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
- if `return_sequences`: 5D tensor with shape:
`(samples, timesteps, filters, new_rows, new_cols)`
if data_format='channels_first'
or 5D tensor with shape:
`(samples, timesteps, new_rows, new_cols, filters)`
if data_format='channels_last'.
- else, 4D tensor with shape:
`(samples, filters, new_rows, new_cols)`
if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)`
if data_format='channels_last'.
Masking:
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an Embedding layer with the `mask_zero` parameter
set to `True`.
Note on using statefulness in RNNs:
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
- if sequential model:
`batch_input_shape=(...)` to the first layer in your model.
- if functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers,
e.g. `(32, 10, 100, 100, 32)`.
Note that the number of rows and columns should be specified
too.
- specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
Note on specifying the initial state of RNNs:
You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`. The value of
`initial_state` should be a tensor or list of tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by
calling `reset_states` with the keyword argument `states`. The value of
`states` should be a numpy array or list of numpy arrays representing
the initial state of the RNN layer.
Note on passing external constants to RNNs:
You can pass "external" constants to the cell using the `constants`
keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This
requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
"""
def __init__(self,
cell,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if unroll:
raise TypeError('Unrolling isn\'t possible with '
'convolutional RNNs.')
if isinstance(cell, (list, tuple)):
# The StackedConvRNN2DCells isn't implemented yet.
raise TypeError('It is not possible at the moment to'
'stack convolutional cells.')
super(ConvRNN2D, self).__init__(cell,
return_sequences,
return_state,
go_backwards,
stateful,
unroll,
**kwargs)
self.input_spec = [InputSpec(ndim=5)]
self.states = None
@shape_type_conversion
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
cell = self.cell
if cell.data_format == 'channels_first':
rows = input_shape[3]
cols = input_shape[4]
elif cell.data_format == 'channels_last':
rows = input_shape[2]
cols = input_shape[3]
rows = conv_utils.conv_output_length(rows,
cell.kernel_size[0],
padding=cell.padding,
stride=cell.strides[0],
dilation=cell.dilation_rate[0])
cols = conv_utils.conv_output_length(cols,
cell.kernel_size[1],
padding=cell.padding,
stride=cell.strides[1],
dilation=cell.dilation_rate[1])
if cell.data_format == 'channels_first':
output_shape = input_shape[:2] + (cell.filters, rows, cols)
elif cell.data_format == 'channels_last':
output_shape = input_shape[:2] + (rows, cols, cell.filters)
if not self.return_sequences:
output_shape = output_shape[:1] + output_shape[2:]
if self.return_state:
output_shape = [output_shape]
if cell.data_format == 'channels_first':
output_shape += [(input_shape[0], cell.filters, rows, cols)
for _ in range(2)]
elif cell.data_format == 'channels_last':
output_shape += [(input_shape[0], rows, cols, cell.filters)
for _ in range(2)]
return output_shape
@shape_type_conversion
def build(self, input_shape):
# Note input_shape will be list of shapes of initial states and
# constants if these are passed in __call__.
if self._num_constants is not None:
constants_shape = input_shape[-self._num_constants:]
else:
constants_shape = None
if isinstance(input_shape, list):
input_shape = input_shape[0]
batch_size = input_shape[0] if self.stateful else None
self.input_spec[0] = InputSpec(shape=(batch_size, None) + input_shape[2:5])
# allow cell (if layer) to build before we set or validate state_spec
if isinstance(self.cell, Layer):
step_input_shape = (input_shape[0],) + input_shape[2:]
if constants_shape is not None:
self.cell.build([step_input_shape] + constants_shape)
else:
self.cell.build(step_input_shape)
# set or validate state_spec
if hasattr(self.cell.state_size, '__len__'):
state_size = list(self.cell.state_size)
else:
state_size = [self.cell.state_size]
if self.state_spec is not None:
# initial_state was passed in call, check compatibility
if self.cell.data_format == 'channels_first':
ch_dim = 1
elif self.cell.data_format == 'channels_last':
ch_dim = 3
if [spec.shape[ch_dim] for spec in self.state_spec] != state_size:
raise ValueError(
'An initial_state was passed that is not compatible with '
'`cell.state_size`. Received `state_spec`={}; '
'However `cell.state_size` is '
'{}'.format([spec.shape for spec in self.state_spec],
self.cell.state_size))
else:
if self.cell.data_format == 'channels_first':
self.state_spec = [InputSpec(shape=(None, dim, None, None))
for dim in state_size]
elif self.cell.data_format == 'channels_last':
self.state_spec = [InputSpec(shape=(None, None, None, dim))
for dim in state_size]
if self.stateful:
self.reset_states()
self.built = True
def get_initial_state(self, inputs):
# (samples, timesteps, rows, cols, filters)
initial_state = K.zeros_like(inputs)
# (samples, rows, cols, filters)
initial_state = K.sum(initial_state, axis=1)
shape = list(self.cell.kernel_shape)
shape[-1] = self.cell.filters
initial_state = self.cell.input_conv(initial_state,
K.zeros(tuple(shape)),
padding=self.cell.padding)
if hasattr(self.cell.state_size, '__len__'):
return [initial_state for _ in self.cell.state_size]
else:
return [initial_state]
def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
inputs, initial_state, constants = self._standardize_args(
inputs, initial_state, constants)
if initial_state is None and constants is None:
return super(ConvRNN2D, self).__call__(inputs, **kwargs)
# If any of `initial_state` or `constants` are specified and are | |
<reponame>Supybot/Supybot
###
# Copyright (c) 2002-2005, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import sys
import time
import supybot.conf as conf
import supybot.ircdb as ircdb
import supybot.utils as utils
from supybot.commands import *
import supybot.ircmsgs as ircmsgs
import supybot.ircutils as ircutils
import supybot.schedule as schedule
import supybot.callbacks as callbacks
class Admin(callbacks.Plugin):
def __init__(self, irc):
self.__parent = super(Admin, self)
self.__parent.__init__(irc)
self.joins = {}
self.pendingNickChanges = {}
def do437(self, irc, msg):
"""Nick/channel temporarily unavailable."""
target = msg.args[0]
t = time.time() + 30
if irc.isChannel(target): # We don't care about nicks.
# Let's schedule a rejoin.
networkGroup = conf.supybot.networks.get(irc.network)
def rejoin():
irc.queueMsg(networkGroup.channels.join(target))
# We don't need to schedule something because we'll get another
# 437 when we try to join later.
schedule.addEvent(rejoin, t)
self.log.info('Scheduling a rejoin to %s at %s; '
'Channel temporarily unavailable.', target, t)
else:
irc = self.pendingNickChanges.get(irc, None)
if irc is not None:
def nick():
irc.queueMsg(ircmsgs.nick(target))
schedule.addEvent(nick, t)
self.log.info('Scheduling a nick change to %s at %s; '
'Nick temporarily unavailable.', target, t)
else:
self.log.debug('Got 437 without Admin.nick being called.')
def do471(self, irc, msg):
try:
channel = msg.args[1]
(irc, msg) = self.joins.pop(channel)
irc.error('Cannot join %s, it\'s full.' % channel)
except KeyError:
self.log.debug('Got 471 without Admin.join being called.')
def do473(self, irc, msg):
try:
channel = msg.args[1]
(irc, msg) = self.joins.pop(channel)
irc.error('Cannot join %s, I was not invited.' % channel)
except KeyError:
self.log.debug('Got 473 without Admin.join being called.')
def do474(self, irc, msg):
try:
channel = msg.args[1]
(irc, msg) = self.joins.pop(channel)
irc.error('Cannot join %s, I am banned.' % channel)
except KeyError:
self.log.debug('Got 474 without Admin.join being called.')
def do475(self, irc, msg):
try:
channel = msg.args[1]
(irc, msg) = self.joins.pop(channel)
irc.error('Cannot join %s, my keyword was wrong.' % channel)
except KeyError:
self.log.debug('Got 475 without Admin.join being called.')
def do515(self, irc, msg):
try:
channel = msg.args[1]
(irc, msg) = self.joins.pop(channel)
irc.error('Cannot join %s, I\'m not identified with the NickServ.'
% channel)
except KeyError:
self.log.debug('Got 515 without Admin.join being called.')
def doJoin(self, irc, msg):
if msg.prefix == irc.prefix:
try:
del self.joins[msg.args[0]]
except KeyError:
s = 'Joined a channel without Admin.join being called.'
self.log.debug(s)
def doInvite(self, irc, msg):
channel = msg.args[1]
if channel not in irc.state.channels:
if conf.supybot.alwaysJoinOnInvite() or \
ircdb.checkCapability(msg.prefix, 'admin'):
self.log.info('Invited to %s by %s.', channel, msg.prefix)
networkGroup = conf.supybot.networks.get(irc.network)
irc.queueMsg(networkGroup.channels.join(channel))
conf.supybot.networks.get(irc.network).channels().add(channel)
else:
self.log.warning('Invited to %s by %s, but '
'supybot.alwaysJoinOnInvite was False and '
'the user lacked the "admin" capability.',
channel, msg.prefix)
def join(self, irc, msg, args, channel, key):
"""<channel> [<key>]
Tell the bot to join the given channel. If <key> is given, it is used
when attempting to join the channel.
"""
if not irc.isChannel(channel):
irc.errorInvalid('channel', channel, Raise=True)
networkGroup = conf.supybot.networks.get(irc.network)
networkGroup.channels().add(channel)
if key:
networkGroup.channels.key.get(channel).setValue(key)
maxchannels = irc.state.supported.get('maxchannels', sys.maxint)
if len(irc.state.channels) + 1 > maxchannels:
irc.error('I\'m already too close to maximum number of '
'channels for this network.', Raise=True)
irc.queueMsg(networkGroup.channels.join(channel))
irc.noReply()
self.joins[channel] = (irc, msg)
join = wrap(join, ['validChannel', additional('something')])
def channels(self, irc, msg, args):
"""takes no arguments
Returns the channels the bot is on. Must be given in private, in order
to protect the secrecy of secret channels.
"""
L = irc.state.channels.keys()
if L:
utils.sortBy(ircutils.toLower, L)
irc.reply(format('%L', L))
else:
irc.reply('I\'m not currently in any channels.')
channels = wrap(channels, ['private'])
def do484(self, irc, msg):
irc = self.pendingNickChanges.get(irc, None)
if irc is not None:
irc.error('My connection is restricted, I can\'t change nicks.')
else:
self.log.debug('Got 484 without Admin.nick being called.')
def do433(self, irc, msg):
irc = self.pendingNickChanges.get(irc, None)
if irc is not None:
irc.error('Someone else is already using that nick.')
else:
self.log.debug('Got 433 without Admin.nick being called.')
def do435(self, irc, msg):
irc = self.pendingNickChanges.get(irc, None)
if irc is not None:
irc.error('That nick is currently banned.')
else:
self.log.debug('Got 435 without Admin.nick being called.')
def do438(self, irc, msg):
irc = self.pendingNickChanges.get(irc, None)
if irc is not None:
irc.error(format('I can\'t change nicks, the server said %q.',
msg.args[2]), private=True)
else:
self.log.debug('Got 438 without Admin.nick being called.')
def doNick(self, irc, msg):
if msg.nick == irc.nick or msg.args[0] == irc.nick:
try:
del self.pendingNickChanges[irc]
except KeyError:
self.log.debug('Got NICK without Admin.nick being called.')
def nick(self, irc, msg, args, nick):
"""[<nick>]
Changes the bot's nick to <nick>. If no nick is given, returns the
bot's current nick.
"""
if nick:
conf.supybot.nick.setValue(nick)
irc.queueMsg(ircmsgs.nick(nick))
self.pendingNickChanges[irc.getRealIrc()] = irc
else:
irc.reply(irc.nick)
nick = wrap(nick, [additional('nick')])
def part(self, irc, msg, args, channel, reason):
"""[<channel>] [<reason>]
Tells the bot to part the list of channels you give it. <channel> is
only necessary if you want the bot to part a channel other than the
current channel. If <reason> is specified, use it as the part
message.
"""
if channel is None:
if irc.isChannel(msg.args[0]):
channel = msg.args[0]
else:
irc.error(Raise=True)
try:
network = conf.supybot.networks.get(irc.network)
network.channels().remove(channel)
except KeyError:
pass
if channel not in irc.state.channels:
irc.error('I\'m not in %s.' % channel, Raise=True)
irc.queueMsg(ircmsgs.part(channel, reason or msg.nick))
if msg.nick in irc.state.channels[channel].users:
irc.noReply()
else:
irc.replySuccess()
part = wrap(part, [optional('validChannel'), additional('text')])
class capability(callbacks.Commands):
def add(self, irc, msg, args, user, capability):
"""<name|hostmask> <capability>
Gives the user specified by <name> (or the user to whom <hostmask>
currently maps) the specified capability <capability>
"""
# Ok, the concepts that are important with capabilities:
#
### 1) No user should be able to elevate his privilege to owner.
### 2) Admin users are *not* superior to #channel.ops, and don't
### have God-like powers over channels.
### 3) We assume that Admin users are two things: non-malicious and
### and greedy for power. So they'll try to elevate their
### privilege to owner, but they won't try to crash the bot for
### no reason.
# Thus, the owner capability can't be given in the bot. Admin
# users can only give out capabilities they have themselves (which
# will depend on supybot.capabilities and its child default) but
# generally means they can't mess with channel capabilities.
if ircutils.strEqual(capability, 'owner'):
irc.error('The "owner" capability can\'t be added in the bot.'
' Use the supybot-adduser program (or edit the '
'users.conf file yourself) to add an owner '
'capability.')
return
if ircdb.isAntiCapability(capability) or \
ircdb.checkCapability(msg.prefix, capability):
user.addCapability(capability)
ircdb.users.setUser(user)
irc.replySuccess()
else:
irc.error('You can\'t add capabilities you don\'t have.')
add = wrap(add, ['otherUser', 'lowered'])
def remove(self, irc, msg, args, user, capability):
"""<name|hostmask> <capability>
Takes from the user specified by <name> (or the user to whom
<hostmask> currently maps) the specified capability <capability>
"""
if ircdb.checkCapability(msg.prefix, capability) or \
ircdb.isAntiCapability(capability):
try:
user.removeCapability(capability)
ircdb.users.setUser(user)
irc.replySuccess()
except KeyError:
irc.error('That user doesn\'t have that capability.')
else:
s = 'You can\'t remove capabilities you don\'t have.'
irc.error(s)
remove = wrap(remove, ['otherUser','lowered'])
class ignore(callbacks.Commands):
def add(self, irc, msg, args, hostmask, expires):
"""<hostmask|nick> [<expires>]
This will set a persistent ignore on <hostmask> or the hostmask
currently associated with <nick>. <expires> | |
#!/usr/bin/env python
import numpy as np
def cb_op(oper_A, t_mat):
"""
Change the basis of an operator :math:`\hat{O}` from one basis :math:`A`: :math:`\\psi^{A}_{i}` to another basis :math:`B`: :math:`\\phi^{B}_{j}`.
.. math::
O^{\\prime} = T^{\dagger} O T,
T_{ij} = <\\psi^{A}_{i}|\\phi^{B}_{j}>.
Parameters
----------
oper_A : 2d array
The matrix form of operator :math:`\hat{O}` in basis :math:`A`.
t_mat : 2d array
The unitary transformation matrix from basis :math:`A` to basis :math:`B`, namely, :math:`T_{ij} = <\\psi^{A}_{i}|\\phi^{B}_{j}>`.
Returns
-------
oper_B : 2d array
The matrix form of operator :math:`\hat{O}` in basis :math:`B`.
"""
oper_B = np.dot(np.dot(np.conj(np.transpose(t_mat)), oper_A), t_mat)
return oper_B
def cb_op2(oper_A, TL, TR):
"""
Change the basis of an operator :math:`\hat{O}`.
.. math::
O^{\\prime} = (TL)^{\dagger} O (TR),
Parameters
----------
oper_A : 2d array
The matrix form of operator :math:`\hat{O}` in basis :math:`A`.
TL : 2d array
The unitary transformation matrix applied on the left.
TR : 2d array
The unitary transformation matrix applied on the right.
Returns
-------
oper_B : 2d array
The matrix form of operator :math:`\hat{O}` after the transformation.
"""
oper_B = np.dot(np.dot(np.conj(np.transpose(TL)), oper_A), TR)
return oper_B
def tmat_c2r(case, ispin=False):
"""
Get the unitary transformation matrix from the basis of complex spherical harmonics to real spherical harmonics.
Parameters
----------
case : string
Label for different systems.
- 'p': for :math:`p`-shell
- 't2g': for :math:`t_{2g}`-shell
- 'd': for :math:`d`-shell
- 'f': for :math:`f`-shell
ispin : logical
Whether including spin degree of freedom or not (default: False).
Returns
-------
t_c2r : 2d complex array
The transformation matrix.
"""
sqrt2 = np.sqrt(2.0)
ci = np.complex128(0.0+1.0j)
cone = np.complex128(1.0+0.0j)
# p orbitals: px, py, pz
if case.strip() == 'p':
norbs = 3
t_c2r = np.zeros((norbs, norbs), dtype=np.complex128)
# px=1/sqrt(2)( |1,-1> - |1,1> )
t_c2r[0,0] = cone/sqrt2
t_c2r[2,0] = -cone/sqrt2
# py=i/sqrt(2)( |1,-1> + |1,1> )
t_c2r[0,1] = ci/sqrt2
t_c2r[2,1] = ci/sqrt2
# pz=|1,0>
t_c2r[1,2] = cone
# t2g orbitals in the t2g subspace, here, we use the so-called T-P equivalence,
# t2g orbitals behave like the effective orbital angular momentum leff=1
# dzx ~ py, dzy ~ px, dxy ~ pz
elif case.strip() == 't2g':
norbs = 3
t_c2r = np.zeros((norbs, norbs), dtype=np.complex128)
# dzx --> py=i/sqrt(2)( |1,-1> + |1,1> )
t_c2r[0,0] = ci/sqrt2
t_c2r[2,0] = ci/sqrt2
# dzy --> px=1/sqrt(2)( |1,-1> - |1,1> )
t_c2r[0,1] = cone/sqrt2
t_c2r[2,1] = -cone/sqrt2
# dxy --> pz=|1,0>
t_c2r[1,2] = cone
# d orbitals: dz2, dzx, dzy, dx2-y2, dxy
elif case.strip() == 'd':
norbs = 5
t_c2r = np.zeros((norbs, norbs), dtype=np.complex128)
# dz2=|2,0>
t_c2r[2,0] = cone
# dzx=1/sqrt(2)( |2,-1> - |2,1> )
t_c2r[1,1] = cone/sqrt2
t_c2r[3,1] = -cone/sqrt2
# dzy=i/sqrt(2)( |2,-1> + |2,1> )
t_c2r[1,2] = ci/sqrt2
t_c2r[3,2] = ci/sqrt2
# dx2-y2=1/sqrt(2)( |2,-2> + |2,2> )
t_c2r[0,3] = cone/sqrt2
t_c2r[4,3] = cone/sqrt2
# dxy=i/sqrt(2)( |2,-2> - |2,2> )
t_c2r[0,4] = ci/sqrt2
t_c2r[4,4] = -ci/sqrt2
# f orbitals, please NOTE that this real form of the f orbitals is not the
# basis of the representation of the cubic point group, please call the
# function ``tmat_r2cub" to get the transformation matrix from this basis
# to the cubic basis that is the representation of the cubic point group.
elif case.strip() == 'f':
norbs = 7
t_c2r = np.zeros((norbs, norbs), dtype=np.complex128)
# fz3 = |3,0>
t_c2r[3, 0] = cone
# fxz2 = 1/sqrt(2)( |3,-1> - |3,1> )
t_c2r[2, 1] = cone/sqrt2
t_c2r[4, 1] = -cone/sqrt2
# fyz2 = i/sqrt(2)( |3,-1> + |3,1> )
t_c2r[2, 2] = ci/sqrt2
t_c2r[4, 2] = ci/sqrt2
# fz(x2-y2) = 1/sqrt(2)( |3,-2> + |3,2> )
t_c2r[1, 3] = cone/sqrt2
t_c2r[5, 3] = cone/sqrt2
# fxyz = i/sqrt(2)( |3,-2> - |3,2> )
t_c2r[1, 4] = ci/sqrt2
t_c2r[5, 4] = -ci/sqrt2
# fx(x2-3y2) = 1/sqrt(2) ( |3,-3> - |3,3> )
t_c2r[0, 5] = cone/sqrt2
t_c2r[6, 5] = -cone/sqrt2
# fy(3x2-y2) = i/sqrt(2) ( |3,-3> + |3,3> )
t_c2r[0, 6] = ci/sqrt2
t_c2r[6, 6] = ci/sqrt2
else:
print("don't support tmat_c2r for this case: ", case)
return
# the spin order is: up dn up dn ... up dn
if ispin:
ntot_orbs=2*norbs
t_c2r_spin = np.zeros((ntot_orbs,ntot_orbs), dtype=np.complex128)
# spin up
t_c2r_spin[0:ntot_orbs:2, 0:ntot_orbs:2] = t_c2r
# spin dn
t_c2r_spin[1:ntot_orbs:2, 1:ntot_orbs:2] = t_c2r
return t_c2r_spin
else:
return t_c2r
def tmat_r2c(case, ispin=False):
"""
Get the unitary transformation matrix from the basis of complex spherical harmonics to real spherical harmonics.
Parameters
----------
case : string
Label for different systems.
- 'p': for :math:`p`-shell
- 't2g': for :math:`t_{2g}`-shell
- 'd': for :math:`d`-shell
- 'f': for :math:`f`-shell
ispin : logical
Whether including spin degree of freedom or not (default: False).
Returns
-------
t_r2c : 2d complex array
The transformation matrix.
"""
t_r2c = np.conj(np.transpose(tmat_c2r(case, ispin)))
return t_r2c
def tmat_r2cub_f(ispin=False):
"""
Get the transformation matrix from real spherical harmonics to the cubic spherical harmonics
that is the representation of the cubic point group, only for :math:`f` system.
Parameters
----------
ispin : logical
Whether including spin degree of freedom or not (default: False).
Returns
-------
t_r2cub : 2d complex array
The transformation matrix.
"""
a = np.sqrt(10.0) / 4.0 + 0.0j
b = np.sqrt(6.0) / 4.0 + 0.0j
c = 1.0 + 0.0j
norbs = 7
t_r2cub = np.zeros((norbs,norbs), dtype=np.complex128)
# T1u
# fx3 = -sqrt(6)/4 fxz2 + sqrt(10)/4 fx(x2-3y2)
t_r2cub[1, 0] = -b
t_r2cub[5, 0] = a
# fy3 = -sqrt(6)/4 fyz2 - sqrt(10)/4 fy(3x2-y2)
t_r2cub[2, 1] = -b
t_r2cub[6, 1] = -a
# fz3 = fz3
t_r2cub[0, 2] = c
# T2u
# fx(y2-z2) = -sqrt(10)/4 fxz2 - sqrt(6)/4 fx(x2-3y2)
t_r2cub[1, 3] = -a
t_r2cub[5, 3] = -b
# fy(z2-x2) = sqrt(10)/4 fyz2 - sqrt(6)/4 fy(3x2-y2)
t_r2cub[2, 4] = a
t_r2cub[6, 4] = -b
# fz(x2-y2) = fz(x2-y2)
t_r2cub[3, 5] = c
# A2u
# fxyz = fxyz
t_r2cub[4, 6] = c
if ispin:
ntot_orbs = 2 * norbs
t_r2cub_spin = np.zeros((ntot_orbs, ntot_orbs), dtype=np.complex128)
# spin up
t_r2cub_spin[0:ntot_orbs:2, 0:ntot_orbs:2] = t_r2cub
# spin dn
t_r2cub_spin[1:ntot_orbs:2, 1:ntot_orbs:2] = t_r2cub
return t_r2cub_spin
else:
return t_r2cub
def tmat_cub2r_f(ispin=False):
"""
Get the transformation matrix from the cubic spherical harmonics to real spherical harmonics, only for :math:`f` system.
Parameters
----------
ispin : logical
Whether including spin degree of freedom or not (default: False).
Returns
-------
t_cub2r : 2d complex array
The transformation matrix.
"""
t_cub2r = np.conj( np.transpose( tmat_r2cub(ispin) ) )
return t_cub2r
def tmat_c2j(l):
"""
Get the transformation matrix from the complex spherical harmonics to
the :math:`|j^2,j_z>` basis in which the spin-oribt coupling Hamiltonian is diagonal.
The orbital order is:
:math:`|j=l-1/2, -j>, |j=l-1/2, -j+1>, ... |j=l-1/2, +j>,`
:math:`|j=l+1/2, -j>, |j=l+1/2, -j+1>, ..., |j=l+1/2, +j>`.
Parameters
----------
l : int
Quantum number of orbital angular momentum.
Returns
-------
t_c2j : 2d complex array
The transformation matrix.
"""
if l == 1:
t_c2j = np.zeros((6, 6), dtype=np.complex128)
t_c2j[0,0] = -np.sqrt(2.0/3.0)
t_c2j[3,0] = np.sqrt(1.0/3.0)
t_c2j[2,1] = -np.sqrt(1.0/3.0)
t_c2j[5,1] = np.sqrt(2.0/3.0)
t_c2j[1,2] = 1.0
t_c2j[0,3] = np.sqrt(1.0/3.0)
t_c2j[3,3] = np.sqrt(2.0/3.0)
t_c2j[2,4] = np.sqrt(2.0/3.0)
t_c2j[5,4] = np.sqrt(1.0/3.0)
t_c2j[4,5] = 1.0
return t_c2j
elif l == 2:
t_c2j = np.zeros((10, 10), dtype=np.complex128)
t_c2j[0,0] = -np.sqrt(4.0/5.0)
t_c2j[3,0] = np.sqrt(1.0/5.0)
t_c2j[2,1] = -np.sqrt(3.0/5.0)
t_c2j[5,1] = np.sqrt(2.0/5.0)
t_c2j[4,2] = -np.sqrt(2.0/5.0)
t_c2j[7,2] = np.sqrt(3.0/5.0)
t_c2j[6,3] = -np.sqrt(1.0/5.0)
t_c2j[9,3] = np.sqrt(4.0/5.0)
t_c2j[1,4] = 1.0
t_c2j[0,5] = np.sqrt(1.0/5.0)
t_c2j[3,5] = np.sqrt(4.0/5.0)
t_c2j[2,6] = np.sqrt(2.0/5.0)
t_c2j[5,6] = np.sqrt(3.0/5.0)
t_c2j[4,7] = np.sqrt(3.0/5.0)
t_c2j[7,7] = np.sqrt(2.0/5.0)
t_c2j[6,8] = np.sqrt(4.0/5.0)
t_c2j[9,8] = np.sqrt(1.0/5.0)
t_c2j[8,9] = 1.0
return t_c2j
elif l == 3:
t_c2j = np.zeros((14,14), dtype=np.complex128)
t_c2j[0,0] = -np.sqrt(6.0/7.0)
t_c2j[3,0] = np.sqrt(1.0/7.0)
t_c2j[2,1] = -np.sqrt(5.0/7.0)
t_c2j[5,1] = np.sqrt(2.0/7.0)
t_c2j[4,2] = -np.sqrt(4.0/7.0)
t_c2j[7,2] = np.sqrt(3.0/7.0)
t_c2j[6,3] = -np.sqrt(3.0/7.0)
t_c2j[9,3] = np.sqrt(4.0/7.0)
t_c2j[8,4] = -np.sqrt(2.0/7.0)
t_c2j[11,4] = np.sqrt(5.0/7.0)
t_c2j[10,5] = -np.sqrt(1.0/7.0)
t_c2j[13,5] = np.sqrt(6.0/7.0)
t_c2j[1,6] = 1.0
t_c2j[0,7] = np.sqrt(1.0/7.0)
t_c2j[3,7] = np.sqrt(6.0/7.0)
t_c2j[2,8] = np.sqrt(2.0/7.0)
t_c2j[5,8] = np.sqrt(5.0/7.0)
t_c2j[4,9] = np.sqrt(3.0/7.0)
t_c2j[7,9] = np.sqrt(4.0/7.0)
t_c2j[6,10] = np.sqrt(4.0/7.0)
t_c2j[9,10] | |
return wordfq
def count_words(wordlst: list):
"""
count words in tweet text from list of list, dict, or str
:param wordlst: list of tweets
:return: word count, tweet count
"""
wrd_count: int = 0
tw_count: int = 0
for tw in wordlst:
if isinstance(tw, dict):
tw_wrds: list = tw['text'].split()
elif isinstance(tw, str):
tw_wrds: list = tw.split()
else:
tw_wrds: list = tw
tw_count += 1
wrd_count += len(tw_wrds)
return wrd_count, tw_count
def sort_freq(freqdict):
"""
sort_freq reads word:frequency key:val pairs from dict, and returns a list sorted from
highest to lowest frequency word
:param freqdict:
:return: list named aux
"""
aux: list = []
for k, v in freqdict.items():
aux.append((v, k))
aux.sort(reverse=True)
return aux
def cloud_prep(wrd_tok_lst):
"""
do_cloud calls this to create kind of a text blob from tweets for cloud to process
:param wrd_tok_lst: preferably list of list of word tokens for tweets
:return:
"""
cloud_text = io.StringIO(newline="")
for tok_rec in wrd_tok_lst:
if isinstance(tok_rec, str):
cloud_text.write(tok_rec + " ")
else:
for a_tw in tok_rec:
if isinstance(a_tw, list):
cloud_text.write(" ".join([str(x) for x in a_tw]) + " ")
if isinstance(a_tw, str):
# if simple list of text for each tweet:
cloud_text.write(a_tw + " ")
return cloud_text.getvalue()
def apply_vader(sent_lst: list):
"""
apply Vader's Valence scoring of words, symbols and phrases for social media sentiment,
continuous negative-positive range, 4 scores: compound, neg, neutral, and pos.
application of phrases and idioms, negation and punctuation (ex. ???).
can add to or modify Vader 'constants' for terms and values.
Vader is optimized to handle sentiment on short posts like Tweets.
\n Author Credits:
Hutto,C.J. & Gilbert,E.E. (2014). VADER: Parsimonious Rule-based Model for Sentiment
Analysis of Social Media Text. Eighth International Conference on Weblogs and Social
Media (ICWSM-14). Ann Arbor, MI, June 2014.
:param sent_lst: list of dict or list of str with Tweet text
:return: Vscores list of Vader sentiment scores, plus Tweet index info I embedded
"""
vscores: list = []
for snt_x in sent_lst:
if isinstance(snt_x, list):
tmpdct: dict = Vsi.polarity_scores(" ".join([str(x) for x in snt_x]))
elif isinstance(snt_x, str):
tmpdct: dict = Vsi.polarity_scores(snt_x)
elif isinstance(snt_x, dict):
tmpdct: dict = Vsi.polarity_scores(snt_x['text'])
tmpdct.update(snt_x)
else:
print("apply_vader got incorrectly formatted Tweets as parameter")
break
vscores.append(tmpdct)
cmp_tot: float = 0.0
v_len = len(vscores)
for vidx in range(v_len):
v_rec = vscores[vidx]
cmp_tot += v_rec['compound']
cmp_avg = cmp_tot / v_len
prnt_str: str = "Average Vader compound score = %1.2f for %d Tweets" %(cmp_avg, v_len)
print(prnt_str)
return vscores
def summarize_vader(vader_scores: list, top_lim: int = 10):
"""
adds compound, negative, neutral, and positive components of sentence sentiment for a
set of sentences or all sentences in corpus.
:param vader_scores: list of scores built from apply_vader method
:param top_lim: integer indicating number of top scores to summarize
:return: None
"""
rec_count: int = len(vader_scores)
print("\nsummarize_vader: Top Sentiment for %d total Tweets:" %rec_count)
print(" showing top %d compound, neutral, negative and positive sentiment" %top_lim)
def get_top(scoretyp: str, toplimit: int):
"""
inner Fx: get top score for score type, sort by descending absolute value
:param toplimit: number of scores to identify, such as top 10
:param scoretyp: str to indicate Vader compound, negative, neutral or positive
:return:
"""
srtd = sorted(vader_scores, key=lambda x: fabs(x.get(scoretyp)), reverse=True)
tops: list = srtd[:toplimit]
return tops
def describe_score(scoretyp: str):
"""
gives average, minimum and maximum for a type of sentiment score
:param scoretyp: str as compound, neu, neg, or pos
:return: n/a
"""
typ_tot: float = sum(vader_scores[x][scoretyp] for x in range(rec_count))
if scoretyp == "neu":
typestr = "4. Neutral"
typ_avg: float = typ_tot / rec_count
elif scoretyp == "neg":
typestr = "3. Negative"
typ_avg: float = typ_tot / rec_count
elif scoretyp == "pos":
typestr = "2. Positive"
typ_avg: float = typ_tot / rec_count
else:
typestr = "1. Compound (aggregate)"
typ_avg: float = typ_tot / rec_count
typ_min: float = min([vader_scores[x][scoretyp] for x in range(rec_count)])
typ_max: float = max([vader_scores[x][scoretyp] for x in range(rec_count)])
print(" %s " %typestr, end="")
print(" Average= %1.3f, Minimum= %1.3f, Maximum= %1.3f" %(typ_avg, typ_min, typ_max))
return
def show_with_text(typ, tops: list):
"""
prints applicable sentiment score along with text of Tweet
:param typ: string with formal Vader sentiment type (neu, pos, neg, compound)
:param tops: list of top tweets by sentiment type, number of tweets= top_lim
:return: None
"""
print("Printing top %d tweets by %s sentiment:" %(top_lim, typ))
for tws in tops:
# print(" %s sentiment= % 2.2f, '%d'" %(typ, tws[typ],
# twlst[int(tws['ord_id'])]['text']))
print(" %s sentiment= % 1.3f on %s" % (typ, tws[typ], tws['date']))
if typ in ['compound', 'neg']:
print(" Tweet txt: %s" % tws['text'][:100])
return
for x in ["compound", "pos", "neg"]:
describe_score(x)
top_list = get_top(x, top_lim)
show_with_text(x, top_list)
print("")
return None
def get_next_by_val(lst, field: str, val: float):
"""
takes a list of dict, sorts by descending value of chosen field, then finds first matching
index value (ordinal ID number) which is LESS THAN the identified target value
:param lst: a list of dict, that is: list of Tweets where dict keys are Tweet fields
:param field: str field name for retweet/quote count, fave count or sentiment value
:param val: integer or float value to be found in sorted field values
:return: ordinal index number, or -1 if error/not found
"""
lenx: int = len(lst)
if field in ['compound', 'neg', 'pos']:
srtd: list = sorted(lst, key=lambda x: fabs(x.get(field)), reverse=True)
else:
srtd: list = sorted(lst, key=lambda x: x.get(field), reverse=True)
for x in range(lenx):
if field in ['compound', 'neg', 'pos']:
if fabs(srtd[x][field]) <= fabs(val):
return x
else:
if int(srtd[x][field]) <= val:
return x
return -1
def get_pctle_sentiment(twlst: list, ptile: int = 0, quota: int = 0):
"""
create list of Tweets in top quartile for compound sentiment score
:param twlst: list of Vader scores
:param ptile: integer from 0 to 99 indicating percentile above which to include
:param quota: alternate to percentile is to specify quota- x records to select
:return: list of str: Tweets in top quartile by sentiment
"""
totlen: int = len(twlst)
srtd: list = sorted(twlst, key=lambda x: fabs(x.get('compound')), reverse=True)
if quota != 0:
quota = int(quota)
print("selecting top %d tweets by quota provided" % quota)
else:
top_pcnt: int = 100 - ptile
quota = round(totlen * (top_pcnt / 100), ndigits=0)
quota = int(quota)
print("get_pctle_sentiment: selecting top %d Tweets out of %d" % (quota, totlen))
tops: list = srtd[:quota]
med_sent: float = tops[round(quota * 0.5)]['compound']
top_sent: float = tops[0]['compound']
sent_80: int = get_next_by_val(twlst, "compound", 0.80)
print(" compound sentiment of 0.8 occurs at rec %d of %d" % (sent_80, totlen))
print(" filtered: top sentiment is %1.2f, median is %1.2f" % (top_sent, med_sent))
print(" least (abs) sentiment in filtered is: %1.3f" % tops[quota - 1]['compound'])
return tops
def get_pctle_qrr(twlst: list, ptile: int = 0, quota: int = 0):
"""
create list of Tweets in top quartile for qrr count
:param twlst: list of dict of Tweets w/quoted/retweeted/reply counts
:param ptile: integer from 0 to 99 indicating percentile above which to include
:param quota: identify an integer number of records instead of a percentile
:return: list of dict: Tweets in top quartile by popularity count
"""
totlen: int = len(twlst)
srtd: list = sorted(twlst, key=lambda x: x.get('qrr'), reverse=True)
if quota != 0:
quota: int = int(quota)
print("selecting top %d tweets by quota provided" % quota)
else:
top_pcnt: int = 100 - ptile
quota: int = round(totlen * (top_pcnt / 100), ndigits=0)
quota: int = int(quota)
print("get_pctle_qrr: getting top %d Tweets out of %d by qrr count" % (quota, totlen))
tops: list = srtd[:quota]
med_qrr: float = tops[round(quota * 0.5)]['qrr']
top_qrr: float = tops[0]['qrr']
qrr_50: int = get_next_by_val(twlst, "qrr", 50)
print(" qrr of 50 occurs at record %d of %d" % (qrr_50, totlen))
print(" filtered: top qrr is %d, median is %d" % (top_qrr, med_qrr))
print(" least included qrr is: %d" % (tops[quota - 1]['qrr']))
return tops
def get_pctle_fave(twlst: list, ptile: int = 0, quota: int = 0):
"""
create list of Tweets in top quartile for qrr count
:param twlst: list of dict of Tweets | |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.oauth2 import service_account # type: ignore
import pkg_resources
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.cloud.certificate_manager_v1.services.certificate_manager import pagers
from google.cloud.certificate_manager_v1.types import certificate_manager
from .transports.base import DEFAULT_CLIENT_INFO, CertificateManagerTransport
from .transports.grpc import CertificateManagerGrpcTransport
from .transports.grpc_asyncio import CertificateManagerGrpcAsyncIOTransport
class CertificateManagerClientMeta(type):
"""Metaclass for the CertificateManager client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[CertificateManagerTransport]]
_transport_registry["grpc"] = CertificateManagerGrpcTransport
_transport_registry["grpc_asyncio"] = CertificateManagerGrpcAsyncIOTransport
def get_transport_class(
cls,
label: str = None,
) -> Type[CertificateManagerTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class CertificateManagerClient(metaclass=CertificateManagerClientMeta):
"""API Overview
Certificates Manager API allows customers to see and manage all
their TLS certificates.
Certificates Manager API service provides methods to manage
certificates, group them into collections, and create serving
configuration that can be easily applied to other Cloud resources
e.g. Target Proxies.
Data Model
The Certificates Manager service exposes the following resources:
- ``Certificate`` which describes a single TLS certificate.
- ``CertificateMap`` which describes a collection of certificates
that can be attached to a target resource.
- ``CertificateMapEntry`` which describes a single configuration
entry that consists of a SNI and a group of certificates. It's a
subresource of CertificateMap.
Certificate, CertificateMap and CertificateMapEntry IDs have to
match "^[a-z0-9-]{1,63}$" regexp, which means that
- only lower case letters, digits, and hyphen are allowed
- length of the resource ID has to be in [1,63] range.
Provides methods to manage Cloud Certificate Manager entities.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "certificatemanager.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CertificateManagerClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CertificateManagerClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> CertificateManagerTransport:
"""Returns the transport used by the client instance.
Returns:
CertificateManagerTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def certificate_path(
project: str,
location: str,
certificate: str,
) -> str:
"""Returns a fully-qualified certificate string."""
return (
"projects/{project}/locations/{location}/certificates/{certificate}".format(
project=project,
location=location,
certificate=certificate,
)
)
@staticmethod
def parse_certificate_path(path: str) -> Dict[str, str]:
"""Parses a certificate path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/certificates/(?P<certificate>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def certificate_map_path(
project: str,
location: str,
certificate_map: str,
) -> str:
"""Returns a fully-qualified certificate_map string."""
return "projects/{project}/locations/{location}/certificateMaps/{certificate_map}".format(
project=project,
location=location,
certificate_map=certificate_map,
)
@staticmethod
def parse_certificate_map_path(path: str) -> Dict[str, str]:
"""Parses a certificate_map path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/certificateMaps/(?P<certificate_map>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def certificate_map_entry_path(
project: str,
location: str,
certificate_map: str,
certificate_map_entry: str,
) -> str:
"""Returns a fully-qualified certificate_map_entry string."""
return "projects/{project}/locations/{location}/certificateMaps/{certificate_map}/certificateMapEntries/{certificate_map_entry}".format(
project=project,
location=location,
certificate_map=certificate_map,
certificate_map_entry=certificate_map_entry,
)
@staticmethod
def parse_certificate_map_entry_path(path: str) -> Dict[str, str]:
"""Parses a certificate_map_entry path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/certificateMaps/(?P<certificate_map>.+?)/certificateMapEntries/(?P<certificate_map_entry>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def dns_authorization_path(
project: str,
location: str,
dns_authorization: str,
) -> str:
"""Returns a fully-qualified dns_authorization string."""
return "projects/{project}/locations/{location}/dnsAuthorizations/{dns_authorization}".format(
project=project,
location=location,
dns_authorization=dns_authorization,
)
@staticmethod
def parse_dns_authorization_path(path: str) -> Dict[str, str]:
"""Parses a dns_authorization path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/dnsAuthorizations/(?P<dns_authorization>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def target_https_proxies_path(
project: str,
location: str,
target_https_proxy: str,
) -> str:
"""Returns a fully-qualified target_https_proxies string."""
return "projects/{project}/locations/{location}/targetHttpsProxies/{target_https_proxy}".format(
project=project,
location=location,
target_https_proxy=target_https_proxy,
)
@staticmethod
def parse_target_https_proxies_path(path: str) -> Dict[str, str]:
"""Parses a target_https_proxies path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/targetHttpsProxies/(?P<target_https_proxy>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def target_ssl_proxies_path(
project: str,
location: str,
target_ssl_proxy: str,
) -> str:
"""Returns a fully-qualified target_ssl_proxies string."""
return "projects/{project}/locations/{location}/targetSslProxies/{target_ssl_proxy}".format(
project=project,
location=location,
target_ssl_proxy=target_ssl_proxy,
)
@staticmethod
def parse_target_ssl_proxies_path(path: str) -> Dict[str, str]:
"""Parses a target_ssl_proxies path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/targetSslProxies/(?P<target_ssl_proxy>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(
billing_account: str,
) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(
folder: str,
) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(
folder=folder,
)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(
organization: str,
) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(
organization=organization,
)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(
project: str,
) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(
project=project,
)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(
project: str,
location: str,
) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert | |
= s1.indexOf(s2);
if (n == -1) return '';
return s1.substring(0, n);
},
'string', false, []],
'substring-after': [function(s1, s2) {
if (arguments.length != 2) {
throw Error('Function substring-after expects (string, string)');
}
s1 = s1.string(this);
s2 = s2.string(this);
var n = s1.indexOf(s2);
if (n == -1) return '';
return s1.substring(n + s2.length);
},
'string', false, []],
'string-length': [function(s) {
switch (arguments.length) {
case 0:
s = wa.to('string', this.node);
break;
case 1:
s = s.string(this);
break;
default:
throw Error('Function string-length expects (string?)');
break;
}
return s.length;
},
'number', false, [true, false]],
'normalize-space': [function(s) {
switch (arguments.length) {
case 0:
s = wa.to('string', this.node);
break;
case 1:
s = s.string(this);
break;
default:
throw Error('Function normalize-space expects (string?)');
break;
}
return s.replace(/\s+/g, ' ').replace(/^ /, '').replace(/ $/, '');
},
'string', false, [true, false]],
translate: [function(s1, s2, s3) {
if (arguments.length != 3) {
throw Error('Function translate expects (string, string, string)');
}
s1 = s1.string(this);
s2 = s2.string(this);
s3 = s3.string(this);
var Le = [];
for (var i = 0,
l = s2.length; i < l; i++) {
var ch = s2.charAt(i);
if (!Le[ch]) Le[ch] = s3.charAt(i) || '';
}
for (var t = '',
i = 0,
l = s1.length; i < l; i++) {
var ch = s1.charAt(i);
var Me = Le[ch];
t += (Me != ca) ? Me: ch;
}
return t;
},
'string', false, []],
'boolean': [function(b) {
if (arguments.length != 1) {
throw Error('Function boolean expects (object)');
}
return b.bool(this)
},
'boolean', false, []],
not: [function(b) {
if (arguments.length != 1) {
throw Error('Function not expects (object)');
}
return ! b.bool(this)
},
'boolean', false, []],
'true': [function() {
if (arguments.length != 0) {
throw Error('Function true expects ()');
}
return true;
},
'boolean', false, []],
'false': [function() {
if (arguments.length != 0) {
throw Error('Function false expects ()');
}
return false;
},
'boolean', false, []],
lang: [function(s) {
return false;
},
'boolean', false, []],
number: [function(n) {
switch (arguments.length) {
case 0:
n = wa.to('number', this.node);
break;
case 1:
n = n.number(this);
break;
default:
throw Error('Function number expects (object?)');
break;
}
return n;
},
'number', false, [true, false]],
sum: [function(ns) {
var Ne, n, i, l;
if (arguments.length != 1 || !(ns = ns.evaluate(this)).isNodeSet) {
throw Error('Function sum expects (nodeset)');
}
Ne = ns.list();
n = 0;
for (i = 0, l = Ne.length; i < l; i++) {
n += wa.to('number', Ne[i]);
}
return n;
},
'number', false, []],
floor: [function(n) {
if (arguments.length != 1) {
throw Error('Function floor expects (number)');
}
n = n.number(this);
return Math.floor(n);
},
'number', false, []],
ceiling: [function(n) {
if (arguments.length != 1) {
throw Error('Function ceiling expects (number)');
}
n = n.number(this);
return Math.ceil(n);
},
'number', false, []],
round: [function(n) {
if (arguments.length != 1) {
throw Error('Function round expects (number)');
}
n = n.number(this);
return Math.round(n);
},
'number', false, []]
};
qa.parse = function(Oe) {
var Pe, func = new qa(Oe.next());
Oe.next();
while (Oe.peek() != ')') {
if (Oe.empty()) {
throw Error('missing function argument list');
}
Pe = oa.parse(Oe);
func.arg(Pe);
if (Oe.peek() != ',') break;
Oe.next();
}
if (Oe.empty()) {
throw Error('unclosed function argument list');
}
if (Oe.next() != ')') {
Oe.back();
throw Error('bad token: ' + Oe.next());
}
return func
};
qa.prototype = new Ma();
qa.prototype.evaluate = function(Qe) {
return this.func.apply(Qe, this.args);
};
qa.prototype.arg = function(Re) {
this.args.push(Re);
if (Re.needContextPosition) {
this.needContextPosition = true;
}
var Se = this.args;
if (Re.needContextNode) {
Se.needContexNode = true;
}
this.needContextNode = Se.needContextNode || this.needContextNodeInfo[Se.length];
};
qa.prototype.show = function(Te) {
Te = Te || '';
var t = '';
t += Te + 'function: ' + this.name + '\\n';
Te += ' ';
if (this.args.length) {
t += Te + 'arguments: ' + '\\n';
Te += ' ';
for (var i = 0; i < this.args.length; i++) {
t += this.args[i].show(Te);
}
}
return t;
};
var Ze = {
uuid: 1,
get: function($e) {
return $e.__ba || ($e.__ba = this.uuid++);
}
};
if (!window.NodeSet && window.defaultConfig) window.NodeSet = null;
ta = function() {
this.length = 0;
this.nodes = [];
this.seen = {};
this.idIndexMap = null;
this.reserveDels = [];
};
ta.prototype.isNodeSet = true;
ta.prototype.isSorted = true;
ta.prototype.merge = function(af) {
this.isSorted = false;
if (af.only) {
return this.push(af.only);
}
if (this.only) {
var bf = this.only;
delete this.only;
this.push(bf);
this.length--;
}
var cf = af.nodes;
for (var i = 0,
l = cf.length; i < l; i++) {
this._add(cf[i]);
}
};
ta.prototype.sort = function() {
if (this.only) return;
if (this.sortOff) return;
if (!this.isSorted) {
this.isSorted = true;
this.idIndexMap = null;
var ef = this.nodes;
ef.sort(function(a, b) {
if (a == b) return 0;
if (a.compareDocumentPosition) {
var ff = a.compareDocumentPosition(b);
if (ff & 2) return 1;
if (ff & 4) return - 1;
return 0;
} else {
var gf = a,
node2 = b,
ancestor1 = a,
ancestor2 = b,
deep1 = 0,
deep2 = 0;
while (ancestor1 = ancestor1.parentNode) deep1++;
while (ancestor2 = ancestor2.parentNode) deep2++;
if (deep1 > deep2) {
while (deep1--!=deep2) gf = gf.parentNode;
if (gf == node2) return 1;
} else if (deep2 > deep1) {
while (deep2--!=deep1) node2 = node2.parentNode;
if (gf == node2) return - 1;
}
while ((ancestor1 = gf.parentNode) != (ancestor2 = node2.parentNode)) {
gf = ancestor1;
node2 = ancestor2;
}
while (gf = gf.nextSibling) if (gf == node2) return - 1;
return 1;
}
});
}
};
ta.prototype.reserveDelByNodeID = function(id, rf, sf) {
var tf = this.createIdIndexMap();
var uf;
if (uf = tf[id]) {
if (sf && (this.length - rf - 1) > uf || !sf && rf < uf) {
var vf = {
value: uf,
order: String.fromCharCode(uf),
toString: function() {
return this.order
},
valueOf: function() {
return this.value
}
};
this.reserveDels.push(vf);
}
}
};
ta.prototype.reserveDelByNode = function(wf, xf, yf) {
this.reserveDelByNodeID(Ze.get(wf), xf, yf);
};
ta.prototype.doDel = function() {
if (!this.reserveDels.length) return;
if (this.length < 0x10000) {
var zf = this.reserveDels.sort(function(a, b) {
return b - a
});
} else {
var zf = this.reserveDels.sort(function(a, b) {
return b - a
});
}
for (var i = 0,
l = zf.length; i < l; i++) {
this.del(zf[i]);
}
this.reserveDels = [];
this.idIndexMap = null;
};
ta.prototype.createIdIndexMap = function() {
if (this.idIndexMap) {
return this.idIndexMap;
} else {
var Af = this.idIndexMap = {};
var Bf = this.nodes;
for (var i = 0,
l = Bf.length; i < l; i++) {
var Cf = Bf[i];
var id = Ze.get(Cf);
Af[id] = i;
}
return Af;
}
};
ta.prototype.del = function(Ff) {
this.length--;
if (this.only) {
delete this.only;
} else {
var Gf = this.nodes.splice(Ff, 1)[0];
if (this._first == Gf) {
delete this._first;
delete this._firstSourceIndex;
delete this._firstSubIndex;
}
delete this.seen[Ze.get(Gf)];
}
};
ta.prototype.delDescendant = function(Hf, If) {
if (this.only) return;
var Jf = Hf.nodeType;
if (Jf != 1 && Jf != 9) return;
if (Da.applewebkit2) return;
if (!Hf.contains) {
if (Jf == 1) {
var Kf = Hf;
Hf = {
contains: function(Lf) {
return Lf.compareDocumentPosition(Kf) & 8;
}
};
} else {
Hf = {
contains: function() {
return true;
}
};
}
}
var Mf = this.nodes;
for (var i = If + 1; i < Mf.length; i++) {
if (Hf.contains(Mf[i])) {
this.del(i);
i--;
}
}
};
ta.prototype._add = function(Nf, Of) {
var Qf = this.seen;
var id = Ze.get(Nf);
if (Qf[id]) return true;
Qf[id] = true;
this.length++;
if (Of) this.nodes.unshift(Nf);
else this.nodes.push(Nf);
};
ta.prototype.unshift = function(Rf) {
if (!this.length) {
this.length++;
this.only = Rf;
return
}
if (this.only) {
var Sf = this.only;
delete this.only;
this.unshift(Sf);
this.length--;
}
return this._add(Rf, true);
};
ta.prototype.push = function(Tf) {
if (!this.length) {
this.length++;
this.only = Tf;
return;
}
if (this.only) {
var Uf = this.only;
delete this.only;
this.push(Uf);
this.length--;
}
return this._add(Tf);
};
ta.prototype.first = function() {
if (this.only) return this.only;
if (this.nodes.length > 1) this.sort();
| |
ref_log = os.path.join(mr_workdir, '{0}_ref.log'.format(pdb_code))
ref_map = os.path.join(mr_workdir, '{0}_refmac_2fofcwt.map'.format(pdb_code))
diff_map = os.path.join(mr_workdir, '{0}_refmac_fofcwt.map'.format(pdb_code))
pdb, mtz, map_, dmap, mr_log, ref_log = list(self.adjust_paths_of_files(
[ref_pdb, ref_mtz, ref_map, diff_map, mr_log, ref_log]
))
self.store_entry_in_rvapi_meta(
i + 1, "latt", pdb_code, pdb, mtz, map_, dmap, False)
self.output_result_files(
download_sec, dmap, map_, mtz, pdb)
self.output_log_files(logfile_sec, mr_log, ref_log)
except KeyError:
logger.debug("No result found at position %s", (i + 1))
else:
for i in range(0, results_to_display):
try:
df = pandas.read_csv(lattice_results)
pdb_code = df.loc[i][0]
mr_log = None
ref_pdb = os.path.join(self.work_dir, 'latt', 'mr_search', 'mr_models', '{}.pdb'.format(pdb_code))
ref_mtz = None
ref_log = None
ref_map = None
diff_map = None
pdb, mtz, map_, dmap, mr_log, ref_log = list(self.adjust_paths_of_files(
[ref_pdb, ref_mtz, ref_map, diff_map, mr_log, ref_log]
))
if i == 0:
best = True
else:
best = False
self.store_entry_in_rvapi_meta(
i + 1, "latt", pdb_code, pdb, mtz, map_, dmap, best)
except KeyError:
logger.debug("No result found at position %s", (i + 1))
def create_contaminant_results_tab(self, contaminant_results, contaminant_mr_results, results_to_display):
"""Function to create the contaminant results tab
Parameters
----------
contaminant_results : str
Path to the file containing the contaminant results
contaminant_mr_results : str
Path to the file containing the contaminant MR results
results_to_display : int
Number of results to display
Returns
-------
object
Page containing the results from the contaminant search
"""
self._create_contaminant_results_tab()
if os.path.isfile(contaminant_results):
section_title = 'Contaminant AMORE Rotation Search Results'
uid = str(uuid.uuid4())
sec = section_title.replace(" ", "_") + uid
tab = self.contaminant_results_tab_id
table = "table" + uid
pyrvapi.rvapi_add_section(sec, section_title, tab, 0, 0, 1, 1, False)
table_title = "Contaminant AMORE Rotation Search Results"
pyrvapi.rvapi_add_table1(sec + "/" + table, table_title, 2, 0, 1, 1, 100)
df = pandas.read_csv(contaminant_results)
self.create_table(df, table)
section_title = "AMORE Rotation Search Graphs"
uid = str(uuid.uuid4())
graph_sec = section_title.replace(" ", "_") + uid
graph_widget = "graphWidget" + uid
pyrvapi.rvapi_add_section(graph_sec, section_title, tab, 0, 0, 1, 1, True)
self.create_graphs(df, graph_sec, graph_widget)
if os.path.isfile(contaminant_mr_results):
section_title = 'Molecular Replacement Search Results'
uid = str(uuid.uuid4())
sec = section_title.replace(" ", "_") + uid
tab = self.contaminant_results_tab_id
table = "table" + uid
pyrvapi.rvapi_add_section(sec, section_title, tab, 0, 0, 1, 1, False)
table_title = "Molecular Replacement Search Results"
pyrvapi.rvapi_add_table1(sec + "/" + table, table_title, 2, 0, 1, 1, 100)
df = pandas.read_csv(contaminant_mr_results)
self.create_table(df, table)
self.contaminant_df = df
section_title = 'Top {0} Contaminant Search Downloads'.format(results_to_display)
uid = str(uuid.uuid4())
download_sec = section_title.replace(" ", "_") + uid
pyrvapi.rvapi_add_section(download_sec, section_title, tab, 0, 0, 1, 1, True)
section_title = 'Top {0} Contaminant Search Log Files'.format(results_to_display)
uid = str(uuid.uuid4())
logfile_sec = section_title.replace(" ", "_") + uid
pyrvapi.rvapi_add_section(logfile_sec, section_title, tab, 0, 0, 1, 1, False)
for i in range(0, results_to_display):
try:
pdb_code = df.loc[i][0]
mr_workdir = os.path.join(self.work_dir, 'output_files')
mr_log = os.path.join(mr_workdir, '{0}_mr.log'.format(pdb_code))
ref_pdb = os.path.join(mr_workdir, '{0}_refinement_output.pdb'.format(pdb_code))
ref_mtz = os.path.join(mr_workdir, '{0}_refinement_output.mtz'.format(pdb_code))
ref_log = os.path.join(mr_workdir, '{0}_ref.log'.format(pdb_code))
ref_map = os.path.join(mr_workdir, '{0}_refmac_2fofcwt.map'.format(pdb_code))
diff_map = os.path.join(mr_workdir, '{0}_refmac_fofcwt.map'.format(pdb_code))
pdb, mtz, map_, dmap, mr_log, ref_log = list(self.adjust_paths_of_files(
[ref_pdb, ref_mtz, ref_map, diff_map, mr_log, ref_log]
))
self.store_entry_in_rvapi_meta(
i + 1, "cont", pdb_code, pdb, mtz, map_, dmap, False)
self.output_result_files(
download_sec, dmap, map_, mtz, pdb)
self.output_log_files(logfile_sec, mr_log, ref_log)
except KeyError:
logger.debug("No result found at position %s", (i + 1))
def create_morda_db_results_tab(self, morda_db_results, morda_db_mr_results, results_to_display):
"""Function to create the MoRDa Database results tab
Parameters
----------
morda_db_results : str
Path to the file containing the MoRDa db results
morda_db_mr_results : str
Path to the file containing the MoRDa db MR results
results_to_display : int
Number of results to display
Returns
-------
object
Page containing the results from the MoRDa db search
"""
self._create_morda_db_results_tab()
if os.path.isfile(morda_db_results):
section_title = 'MoRDa database AMORE Rotation Search Results'
uid = str(uuid.uuid4())
sec = section_title.replace(" ", "_") + uid
tab = self.morda_db_results_tab_id
table = "table" + uid
pyrvapi.rvapi_add_section(sec, section_title, tab, 0, 0, 1, 1, False)
table_title = "MoRDa datbase AMORE Rotation Search Results"
pyrvapi.rvapi_add_table1(sec + "/" + table, table_title, 2, 0, 1, 1, 100)
df = pandas.read_csv(morda_db_results)
self.create_table(df, table)
section_title = "AMORE Rotation Search Graphs"
uid = str(uuid.uuid4())
graph_sec = section_title.replace(" ", "_") + uid
graph_widget = "graphWidget" + uid
pyrvapi.rvapi_add_section(graph_sec, section_title, tab, 0, 0, 1, 1, True)
self.create_graphs(df, graph_sec, graph_widget)
if os.path.isfile(morda_db_mr_results):
section_title = 'Molecular Replacement Search Results'
uid = str(uuid.uuid4())
sec = section_title.replace(" ", "_") + uid
tab = self.morda_db_results_tab_id
table = "table" + uid
pyrvapi.rvapi_add_section(sec, section_title, tab, 0, 0, 1, 1, False)
table_title = "Molecular Replacement Search Results"
pyrvapi.rvapi_add_table1(sec + "/" + table, table_title, 2, 0, 1, 1, 100)
df = pandas.read_csv(morda_db_mr_results)
self.create_table(df, table)
self.morda_db_df = df
section_title = 'Top {0} MoRDa database Search Downloads'.format(results_to_display)
uid = str(uuid.uuid4())
download_sec = section_title.replace(" ", "_") + uid
pyrvapi.rvapi_add_section(download_sec, section_title, tab, 0, 0, 1, 1, True)
section_title = 'Top {0} MoRDa database Search Log Files'.format(results_to_display)
uid = str(uuid.uuid4())
logfile_sec = section_title.replace(" ", "_") + uid
pyrvapi.rvapi_add_section(logfile_sec, section_title, tab, 0, 0, 1, 1, False)
for i in range(0, results_to_display):
try:
pdb_code = df.loc[i][0]
mr_workdir = os.path.join(self.work_dir, 'output_files')
mr_log = os.path.join(mr_workdir, '{0}_mr.log'.format(pdb_code))
ref_pdb = os.path.join(mr_workdir, '{0}_refinement_output.pdb'.format(pdb_code))
ref_mtz = os.path.join(mr_workdir, '{0}_refinement_output.mtz'.format(pdb_code))
ref_log = os.path.join(mr_workdir, '{0}_ref.log'.format(pdb_code))
ref_map = os.path.join(mr_workdir, '{0}_refmac_2fofcwt.map'.format(pdb_code))
diff_map = os.path.join(mr_workdir, '{0}_refmac_fofcwt.map'.format(pdb_code))
pdb, mtz, map_, dmap, mr_log, ref_log = list(self.adjust_paths_of_files(
[ref_pdb, ref_mtz, ref_map, diff_map, mr_log, ref_log]
))
self.store_entry_in_rvapi_meta(
i + 1, "full", pdb_code, pdb, mtz, map_, dmap, False)
self.output_result_files(
download_sec, dmap, map_, mtz, pdb)
self.output_log_files(logfile_sec, mr_log, ref_log)
except KeyError:
logger.debug("No result found at position %s", (i + 1))
def display_summary_tab(self):
"""Function to create the MoRDa Database results tab
Returns
-------
object
Page containing a summary of the best results from SIMBAD
"""
self._create_summary_tab()
if self.lattice_df is None:
lattice_score = 1
else:
try:
lattice_score = self.lattice_df['final_r_free'][0]
except IndexError:
lattice_score = 1
if self.contaminant_df is None:
contaminant_score = 1
else:
try:
contaminant_score = self.contaminant_df['final_r_free'][0]
except IndexError:
contaminant_score = 1
if self.morda_db_df is None:
morda_db_score = 1
else:
try:
morda_db_score = self.morda_db_df['final_r_free'][0]
except IndexError:
morda_db_score = 1
section_title = 'SIMBAD Summary'
uid = str(uuid.uuid4())
sec = section_title.replace(" ", "_") + uid
tab = self.summary_tab_id
if lattice_score == 1 and contaminant_score == 1 and morda_db_score == 1:
msg = "No solution was found by SIMBAD"
pyrvapi.rvapi_add_section(sec, section_title, tab, 0, 0, 1, 1, True)
pyrvapi.rvapi_add_text(msg, sec, 2, 0, 1, 1)
else:
if lattice_score <= contaminant_score and lattice_score <= morda_db_score:
pdb_code = self.lattice_df.loc[0][0]
r_fact = self.lattice_df['final_r_fact'][0]
r_free = self.lattice_df['final_r_free'][0]
source = "latt"
elif contaminant_score <= lattice_score and contaminant_score <= morda_db_score:
pdb_code = self.contaminant_df.loc[0][0]
r_fact = self.contaminant_df['final_r_fact'][0]
r_free = self.contaminant_df['final_r_free'][0]
source = "cont"
elif morda_db_score <= lattice_score and morda_db_score <= contaminant_score:
pdb_code = self.morda_db_df.loc[0][0]
r_fact = self.morda_db_df['final_r_fact'][0]
r_free = self.morda_db_df['final_r_free'][0]
source = "morda"
else:
logger.debug('Unexpected result')
return
mr_workdir = os.path.join(self.work_dir, 'output_files', pdb_code)
mr_log = os.path.join(mr_workdir, '{0}_mr.log'.format(pdb_code))
ref_log = os.path.join(mr_workdir, '{0}_ref.log'.format(pdb_code))
ref_pdb = os.path.join(mr_workdir, '{0}_refinement_output.pdb'.format(pdb_code))
ref_map = os.path.join(mr_workdir, '{0}_refmac_2fofcwt.map'.format(pdb_code))
ref_mtz = os.path.join(mr_workdir, '{0}_refinement_output.mtz'.format(pdb_code))
diff_map = os.path.join(mr_workdir, '{0}_refmac_fofcwt.map'.format(pdb_code))
msg = 'The best search model found by SIMBAD was {0}. \
This gave an R/Rfact of {1:.3f} and an R/Rfree of {2:.3f}. \
An R/Rfree lower than 0.450 is indicative of a \
solution. Values above this may also be indicative of a correct solution \
but you should examine the maps through the graphical map viewer for \
verification.'.format(pdb_code, r_fact, r_free)
pyrvapi.rvapi_add_section(sec, section_title, tab, 0, 0, 1, 1, True)
pyrvapi.rvapi_add_text(msg, sec, 2, 0, 1, 1)
section_title = 'Best SIMBAD result Downloads'
uid = str(uuid.uuid4())
download_sec = section_title.replace(" ", "_") + uid
pyrvapi.rvapi_add_section(download_sec, section_title, tab, 0, 0, 1, 1, True)
section_title = 'Best SIMBAD result Log Files'
uid = str(uuid.uuid4())
logfile_sec = section_title.replace(" ", "_") + uid
pyrvapi.rvapi_add_section(logfile_sec, section_title, tab, 0, 0, 1, 1, False)
pdb, mtz, map_, dmap, mr_log, ref_log = list(
self.adjust_paths_of_files([ref_pdb, ref_mtz, ref_map, diff_map, mr_log, ref_log]))
for e in self.rvapi_meta.results:
if e["name"] == pdb_code and e["source"] == source:
e["best"] = True
self.output_result_files(download_sec, dmap, map_, mtz, pdb)
self.output_log_files(logfile_sec, mr_log, ref_log)
def display_citation_tab(self):
"""Function to display citations for programs used within SIMBAD
Returns
-------
object
Section containing the relevant citations
"""
self._create_citation_tab()
args = self.get_arguments_from_log(self.logfile)
refMgr = reference_manager.ReferenceManager(args)
bibtex_file = refMgr.save_citations_to_file(self.work_dir)
if self.ccp4i2:
# The horror of ccp4i2 means that this all gets dumped into xml so we can't use any markup tags
tdata = refMgr.citations_as_text
else:
tdata = refMgr.methods_as_html
tdata += refMgr.citations_as_html
tdata += '<hr><p>A | |
default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.invitation_tickets_id_target_portal_member_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: InvitationTicket id (required)
:param bool refresh:
:return: PortalMember
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.invitation_tickets_id_target_portal_member_get_with_http_info(id, **kwargs)
else:
(data) = self.invitation_tickets_id_target_portal_member_get_with_http_info(id, **kwargs)
return data
def invitation_tickets_id_target_portal_member_get_with_http_info(self, id, **kwargs):
"""
Fetches belongsTo relation targetPortalMember.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.invitation_tickets_id_target_portal_member_get_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: InvitationTicket id (required)
:param bool refresh:
:return: PortalMember
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'refresh']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method invitation_tickets_id_target_portal_member_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `invitation_tickets_id_target_portal_member_get`")
collection_formats = {}
resource_path = '/InvitationTickets/{id}/targetPortalMember'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'refresh' in params:
query_params['refresh'] = params['refresh']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortalMember',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def invitation_tickets_id_target_team_get(self, id, **kwargs):
"""
Fetches belongsTo relation targetTeam.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.invitation_tickets_id_target_team_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: InvitationTicket id (required)
:param bool refresh:
:return: Team
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.invitation_tickets_id_target_team_get_with_http_info(id, **kwargs)
else:
(data) = self.invitation_tickets_id_target_team_get_with_http_info(id, **kwargs)
return data
def invitation_tickets_id_target_team_get_with_http_info(self, id, **kwargs):
"""
Fetches belongsTo relation targetTeam.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.invitation_tickets_id_target_team_get_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: InvitationTicket id (required)
:param bool refresh:
:return: Team
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'refresh']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method invitation_tickets_id_target_team_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `invitation_tickets_id_target_team_get`")
collection_formats = {}
resource_path = '/InvitationTickets/{id}/targetTeam'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'refresh' in params:
query_params['refresh'] = params['refresh']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Team',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def invitation_tickets_id_target_team_member_get(self, id, **kwargs):
"""
Fetches belongsTo relation targetTeamMember.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.invitation_tickets_id_target_team_member_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: InvitationTicket id (required)
:param bool refresh:
:return: TeamMember
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.invitation_tickets_id_target_team_member_get_with_http_info(id, **kwargs)
else:
(data) = self.invitation_tickets_id_target_team_member_get_with_http_info(id, **kwargs)
return data
def invitation_tickets_id_target_team_member_get_with_http_info(self, id, **kwargs):
"""
Fetches belongsTo relation targetTeamMember.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.invitation_tickets_id_target_team_member_get_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: InvitationTicket id (required)
:param bool refresh:
:return: TeamMember
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'refresh']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method invitation_tickets_id_target_team_member_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `invitation_tickets_id_target_team_member_get`")
collection_formats = {}
resource_path = '/InvitationTickets/{id}/targetTeamMember'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'refresh' in params:
query_params['refresh'] = params['refresh']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TeamMember',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def invitation_tickets_id_target_template_get(self, id, **kwargs):
"""
Fetches belongsTo relation targetTemplate.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.invitation_tickets_id_target_template_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: InvitationTicket id (required)
:param bool refresh:
:return: Template
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.invitation_tickets_id_target_template_get_with_http_info(id, **kwargs)
else:
(data) = self.invitation_tickets_id_target_template_get_with_http_info(id, **kwargs)
return data
def invitation_tickets_id_target_template_get_with_http_info(self, id, **kwargs):
"""
Fetches belongsTo relation targetTemplate.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.invitation_tickets_id_target_template_get_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: InvitationTicket id (required)
:param bool refresh:
:return: Template
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'refresh']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method invitation_tickets_id_target_template_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `invitation_tickets_id_target_template_get`")
collection_formats = {}
resource_path = '/InvitationTickets/{id}/targetTemplate'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'refresh' in params:
query_params['refresh'] = params['refresh']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication | |
else:
policy_dir = getRequiredDirectory("policydir", "policy")
pd = path(policy_dir)
if not pd.isdir():
print("Policy dir %s does not exist, creating it" % policy_dir)
pd.mkdir()
else:
for domain in domains:
if policytypes[domain] != 'hdc':
if Settings.config.has_section("policy_" + domain):
policy_dir = getRequiredDirectory("policydir", "policy_" + domain)
else:
policy_dir = getRequiredDirectory("policydir", "policy")
pd = path(policy_dir)
if not pd.isdir():
print("Policy dir %s does not exist, creating it" % policy_dir)
pd.mkdir()
cd = path(conf_dir)
if not cd.isdir():
print("Config dir %s does not exist, creating it" % conf_dir)
cd.mkdir()
ld = path(log_dir)
if not ld.isdir():
print("Log dir %s does not exist, creating it" % log_dir)
ld.mkdir()
# optional config settings
if numtrainbatches:
gnumtrainbatches = int(numtrainbatches)
else:
gnumtrainbatches = getOptionalConfigInt("numtrainbatches", 1)
if traindialogsperbatch:
gtraindialogsperbatch = int(traindialogsperbatch)
else:
gtraindialogsperbatch = getOptionalConfigInt("traindialogsperbatch", 100)
if trainerrorrate:
gtrainerrorrate = int(trainerrorrate)
else:
gtrainerrorrate = getOptionalConfigInt("trainerrorrate", 0)
if testerrorrate:
gtesterrorrate = int(testerrorrate)
else:
gtesterrorrate = getOptionalConfigInt("testerrorrate",0)
if trainsourceiteration:
gtrainsourceiteration = int(trainsourceiteration)
else:
gtrainsourceiteration = getOptionalConfigInt("trainsourceiteration",0)
if numtestdialogs:
gnumtestdialogs = int(numtestdialogs)
else:
gnumtestdialogs = getOptionalConfigInt("numtestdialogs", 50)
gnumbatchtestdialogs = getOptionalConfigInt("numbatchtestdialogs", 20)
gtesteverybatch = getOptionalConfigBool("testeverybatch",True)
gdeleteprevpolicy = getOptionalConfigBool("deleteprevpolicy", False)
if seed is not None and not 'seed' in configId:
if seed >= 100 and seed < 200:
seed_string = 'seed{}-'.format(seed - 100)
else:
seed_string = 'seed{}-'.format(seed)
else:
seed_string = ''
if mode == "train":
if gnumtrainbatches>1:
enditeration = gtrainsourceiteration+gnumtrainbatches
logfile = "%s-%s%02d.%d-%d.train.log" % (configId, seed_string,gtrainerrorrate,gtrainsourceiteration+1,enditeration)
else:
logfile = "%s-%s%02d.%d.train.log" % (configId, seed_string, gtrainerrorrate, gtrainsourceiteration + 1)
elif mode == "eval":
if testenderrorrate:
logfile = "%s-%s%02d.%d.eval.%02d-%02d.log" % (configId, seed_string,gtrainerrorrate,iteration,
gtesterrorrate,testenderrorrate)
else:
if type(iteration) == str:
logfile = "{}_vs_{}-{}.eval.log".format(configId, iteration, seed_string[:-1])
else:
logfile = "%s-%s%02d.%d.eval.%02d.log" % (configId, seed_string, gtrainerrorrate, iteration, gtesterrorrate)
elif mode == "chat":
logfile = "%s-%s%02d.%d.chat.log" % (configId, seed_string, gtrainerrorrate, gtrainsourceiteration)
else:
print("Unknown initialisation mode:",mode)
exit(0)
print('*** logfile: {} ***'.format(logfile))
Settings.config.set("logging", "file", log_dir + logfile)
if traindomains:
Settings.config.set("GENERAL", "traindomains", traindomains)
if testdomains:
Settings.config.set("GENERAL", "testdomains", testdomains)
if dbprefix:
Settings.config.set("exec_config", "dbprefix", dbprefix)
if not Ontology.global_ontology:
ContextLogger.createLoggingHandlers(config=Settings.config)
logger = ContextLogger.getLogger('')
Ontology.init_global_ontology()
else:
ContextLogger.resetLoggingHandlers()
ContextLogger.createLoggingHandlers(config=Settings.config)
logger = ContextLogger.getLogger('')
Settings.random.seed(int(seed))
if Settings.root == '':
Settings.root = os.getcwd()
logger.info("Seed = %d", seed)
logger.info("Root = %s", Settings.root)
def setupPolicy(domain, configId, trainerr, source_iteration, target_iteration, seed=None):
if Settings.config.has_section("policy_" + domain):
policy_section = "policy_" + domain
else:
policy_section = "policy"
if not str(source_iteration).isdigit():
inpolicyfile = source_iteration
outpolicyfile = source_iteration
elif seed is not None:
inpolicyfile = "%s-seed%s-%02d.%d" % (configId, seed, trainerr, source_iteration)
outpolicyfile = "%s-seed%s-%02d.%d" % (configId, seed, trainerr, target_iteration)
else:
inpolicyfile = "%s-%02d.%d" % (configId, trainerr, source_iteration)
outpolicyfile = "%s-%02d.%d" % (configId, trainerr, target_iteration)
if isSingleDomain:
Settings.config.set(policy_section, "inpolicyfile", policy_dir + inpolicyfile)
Settings.config.set(policy_section, "outpolicyfile", policy_dir + outpolicyfile)
else:
multi_policy_dir = policy_dir + domain
pd = path(multi_policy_dir)
if not pd.isdir():
print("Policy dir %s does not exist, creating it" % multi_policy_dir)
pd.mkdir()
Settings.config.set(policy_section, "inpolicyfile", multi_policy_dir + inpolicyfile)
Settings.config.set(policy_section, "outpolicyfile", multi_policy_dir + outpolicyfile)
return (inpolicyfile, outpolicyfile)
def trainBatch(domain, configId, trainerr, ndialogs, source_iteration, seed=None):
if isSingleDomain:
(inpolicy, outpolicy) = setupPolicy(domain, configId, trainerr, source_iteration, source_iteration + 1, seed=seed)
mess = "*** Training Iteration %s->%s: iter=%d, error-rate=%d, num-dialogs=%d ***" % (
inpolicy, outpolicy, source_iteration, trainerr, ndialogs)
if tracedialog > 0: print(mess)
logger.results(mess)
# make sure that learning is switched on
if Settings.config.has_section("policy_" + domain):
Settings.config.set("policy_" + domain, "learning", 'True')
else:
Settings.config.set("policy", "learning", 'True')
# if gp, make sure to reset scale to config setting
if policytype == "gp":
if Settings.config.has_section("gpsarsa_" + domain):
Settings.config.set("gpsarsa_" + domain, "scale", str(gpscale))
else:
Settings.config.set("gpsarsa", "scale", str(gpscale))
# Define the config file for this iteration
confsavefile = conf_dir + outpolicy + ".train.cfg"
else:
mess = "*** Training Iteration: iter=%d, error-rate=%d, num-dialogs=%d ***" % (
source_iteration, trainerr, ndialogs)
if tracedialog > 0: print(mess)
logger.results(mess)
for dom in domain:
setupPolicy(dom, configId, trainerr, source_iteration, source_iteration + 1, seed=seed)
# make sure that learning is switched on
if Settings.config.has_section("policy_" + dom):
Settings.config.set("policy_" + dom, "learning", 'True')
else:
Settings.config.set("policy", "learning", 'True')
# if gp, make sure to reset scale to config setting
if policytype == "gp":
Settings.config.set("gpsarsa_" + dom, "scale", str(gpscale))
# Define the config file for this iteration
multipolicy = "%s-%02d.%d" % (configId, trainerr, source_iteration + 1)
confsavefile = conf_dir + multipolicy + ".train.cfg"
# Save the config file for this iteration
cf = open(confsavefile, 'w')
Settings.config.write(cf)
error = float(trainerr) / 100.0
# run the system
simulator = Simulate.SimulationSystem(error_rate=error)
simulator.run_dialogs(ndialogs)
if gdeleteprevpolicy:
if isSingleDomain:
if inpolicy[-1] != '0':
if Settings.config.has_section("policy_" + domain):
for f in os.listdir(Settings.config.get('policy_{}'.format(domain), 'policydir')):
if re.search(inpolicy, f):
os.remove(os.path.join(Settings.config.get('policy_{}'.format(domain), 'policydir'), f))
else:
for f in os.listdir(Settings.config.get('policy', 'policydir')):
if re.search(inpolicy, f):
os.remove(os.path.join(Settings.config.get('policy', 'policydir'), f))
def setEvalConfig(domain, configId, evalerr, ndialogs, iteration, seed=None):
(_, policy) = setupPolicy(domain, configId, gtrainerrorrate, iteration, iteration, seed=seed)
if isSingleDomain:
mess = "*** Evaluating %s: error-rate=%d num-dialogs=%d ***" % (policy, evalerr, ndialogs)
else:
mess = "*** Evaluating %s: error-rate=%d num-dialogs=%d ***" % (policy.replace('Multidomain', domain),
evalerr, ndialogs)
if tracedialog > 0: print(mess)
logger.results(mess)
# make sure that learning is switched off
if Settings.config.has_section("policy_" + domain):
Settings.config.set("policy_" + domain, "learning", 'False')
else:
Settings.config.set("policy", "learning", 'False')
# if gp, make sure to reset scale to 1 for evaluation
if policytype == "gp":
if Settings.config.has_section("gpsarsa_" + domain):
Settings.config.set("gpsarsa_" + domain, "scale", "1")
else:
Settings.config.set("gpsarsa", "scale", "1")
# Save a copy of config file
confsavefile = conf_dir + "%s.eval.%02d.cfg" % (policy, evalerr)
cf = open(confsavefile, 'w')
Settings.config.write(cf)
def evalPolicy(domain, configId, evalerr, ndialogs, iteration, seed=None):
if isSingleDomain:
setEvalConfig(domain, configId, evalerr, ndialogs, iteration, seed=seed)
else:
for dom in domains:
setEvalConfig(dom, configId, evalerr, ndialogs, iteration, seed=seed)
error = float(evalerr) / 100.0
# finally run the system
simulator = Simulate.SimulationSystem(error_rate=error)
simulator.run_dialogs(ndialogs)
def getIntParam(line, key):
m = re.search(" %s *= *(\d+)" % (key), line) #what is this parenthesisi placement here and below???
if m is None:
print("Cant find int %s in %s" % (key, line))
exit(0)
return int(m.group(1))
def getFloatRange(line,key):
m = re.search(" %s *= *(\-?\d+\.\d+) *\+- *(\d+\.\d+)" % (key), line)
if m==None:
print("Cant find float %s in %s" % (key, line))
exit(0)
return (float(m.group(1)),float(m.group(2)))
def getDomainFromLog(l):
return l.split()[-1].split(',')
def extractEvalData(lines):
evalData = {}
training = False
domain_list = []
#domain_list = []#['SFRestaurants','SFHotels','Laptops11']
#for dom in domain_list:
# evalData[dom] = {}
cur_domain = None
for l in lines:
if l.find('List of domains:') >= 0:
# get the list of domains from the log by reading the lines where the ontologies are loaded
doms = getDomainFromLog(l)
for domain in doms:
if domain not in domain_list:
domain_list.append(domain)
evalData[domain] = {}
if l.find('*** Training Iteration') >= 0:
iteration = getIntParam(l, 'iter')+1
if iteration in list(evalData.keys()):
print("Duplicate iteration %d" % iteration)
exit(0)
for domain in domain_list:
evalData[domain][iteration] = {}
evalData[domain][iteration]['erate'] = getIntParam(l, 'error-rate')
evalData[domain][iteration]['ndialogs'] = getIntParam(l, 'num-dialogs')
training = True
elif l.find('*** Evaluating')>=0 and not training:
l = l.replace('CR', 'CamRestaurants')
erate = getIntParam(l, 'error-rate')
ll = l[l.find('*** Evaluating') + len('*** Evaluating')+1:]
(ll,x) = ll.split(':')
for domain in domain_list:
if domain in ll:
evalData[domain][erate] = {}
evalData[domain][erate]['policy'] = ll
evalData[domain][erate]['ndialogs'] = getIntParam(l, 'num-dialogs')
elif l.find('Results for domain:') >= 0:
cur_domain = l.split('Results for domain:')[1].split('--')[0].strip()
elif l.find('Average reward') >= 0:
if training:
evalData[cur_domain][iteration]['reward'] = getFloatRange(l, 'Average reward')
else:
evalData[cur_domain][erate]['reward'] = getFloatRange(l, 'Average reward')
elif l.find('Average success') >= 0:
if training:
evalData[cur_domain][iteration]['success'] = getFloatRange(l, 'Average success')
else:
evalData[cur_domain][erate]['success'] = getFloatRange(l, 'Average success')
elif l.find('Average turns') >= 0:
if training:
evalData[cur_domain][iteration]['turns'] = getFloatRange(l, 'Average turns')
else:
evalData[cur_domain][erate]['turns'] = getFloatRange(l, 'Average turns')
return evalData
def plotTrain(dname, rtab, stab, block=True, saveplot=False):
font = {
'weight': 'bold',
'size': 20}
plt.rc('font', **font)
global gplotnum
policylist = sorted(rtab.keys())
ncurves = len(policylist)
plt.figure(gplotnum)
gplotnum += 1
for policy in policylist:
tab = rtab[policy]
plt.subplot(211)
# plt.xlim((800, 4200))
if len(tab['x']) < 2:
plt.axhline(y=tab['y'][0], linestyle='--')
else:
plt.errorbar(tab['x'],tab['y'], yerr=tab['var'], label=policy)
# plt.errorbar(tab['x'], tab['y'], label=policy)
tab = stab[policy]
plt.subplot(212)
# plt.xlim((800, 4200))
if len(tab['x']) < 2:
plt.axhline(y=tab['y'][0], linestyle='--')
else:
plt.errorbar(tab['x'],tab['y'],yerr=tab['var'],label=policy)
# plt.errorbar(tab['x'], tab['y'], label=policy)
plt.subplot(211)
plt.grid()
plt.legend(loc='lower right', fontsize=14) # loc='lower right', best,
plt.title("Performance vs Num Train Dialogues")
plt.ylabel('Reward')
plt.subplot(212)
plt.grid()
plt.legend(loc='lower right', fontsize=14)
plt.xlabel('Num Dialogues')
plt.ylabel('Success')
if saveplot:
if not os.path.exists('_plots'):
os.mkdir('_plots')
plt.savefig('_plots/' + dname + '.png', bbox_inches='tight')
print('plot saved as', dname)
else:
plt.show(block=block)
def plotTest(dname, rtab, stab, block=True, saveplot=False):
global gplotnum
policylist = sorted(rtab.keys())
ncurves = len(policylist)
plt.figure(gplotnum)
gplotnum += 1
for policy in policylist:
tab = | |
thr in client_threads]
topic_conf.del_config()
s3_notification_conf.del_config(notification=notification_name)
# delete the bucket
zones[0].delete_bucket(bucket_name)
http_server.close()
def test_ps_topic():
""" test set/get/delete of topic """
_, ps_zones = init_env()
realm = get_realm()
zonegroup = realm.master_zonegroup()
bucket_name = gen_bucket_name()
topic_name = bucket_name+TOPIC_SUFFIX
# create topic
topic_conf = PSTopic(ps_zones[0].conn, topic_name)
_, status = topic_conf.set_config()
assert_equal(status/100, 2)
# get topic
result, _ = topic_conf.get_config()
# verify topic content
parsed_result = json.loads(result)
assert_equal(parsed_result['topic']['name'], topic_name)
assert_equal(len(parsed_result['subs']), 0)
assert_equal(parsed_result['topic']['arn'],
'arn:aws:sns:' + zonegroup.name + ':' + get_tenant() + ':' + topic_name)
# delete topic
_, status = topic_conf.del_config()
assert_equal(status/100, 2)
# verift topic is deleted
result, status = topic_conf.get_config()
assert_equal(status, 404)
parsed_result = json.loads(result)
assert_equal(parsed_result['Code'], 'NoSuchKey')
def test_ps_topic_with_endpoint():
""" test set topic with endpoint"""
_, ps_zones = init_env()
bucket_name = gen_bucket_name()
topic_name = bucket_name+TOPIC_SUFFIX
# create topic
dest_endpoint = 'amqp://localhost:7001'
dest_args = 'amqp-exchange=amqp.direct&amqp-ack-level=none'
topic_conf = PSTopic(ps_zones[0].conn, topic_name,
endpoint=dest_endpoint,
endpoint_args=dest_args)
_, status = topic_conf.set_config()
assert_equal(status/100, 2)
# get topic
result, _ = topic_conf.get_config()
# verify topic content
parsed_result = json.loads(result)
assert_equal(parsed_result['topic']['name'], topic_name)
assert_equal(parsed_result['topic']['dest']['push_endpoint'], dest_endpoint)
# cleanup
topic_conf.del_config()
def test_ps_notification():
""" test set/get/delete of notification """
zones, ps_zones = init_env()
bucket_name = gen_bucket_name()
topic_name = bucket_name+TOPIC_SUFFIX
# create topic
topic_conf = PSTopic(ps_zones[0].conn, topic_name)
topic_conf.set_config()
# create bucket on the first of the rados zones
zones[0].create_bucket(bucket_name)
# wait for sync
zone_meta_checkpoint(ps_zones[0].zone)
# create notifications
notification_conf = PSNotification(ps_zones[0].conn, bucket_name,
topic_name)
_, status = notification_conf.set_config()
assert_equal(status/100, 2)
# get notification
result, _ = notification_conf.get_config()
parsed_result = json.loads(result)
assert_equal(len(parsed_result['topics']), 1)
assert_equal(parsed_result['topics'][0]['topic']['name'],
topic_name)
# delete notification
_, status = notification_conf.del_config()
assert_equal(status/100, 2)
result, status = notification_conf.get_config()
parsed_result = json.loads(result)
assert_equal(len(parsed_result['topics']), 0)
# TODO should return 404
# assert_equal(status, 404)
# cleanup
topic_conf.del_config()
zones[0].delete_bucket(bucket_name)
def test_ps_notification_events():
""" test set/get/delete of notification on specific events"""
zones, ps_zones = init_env()
bucket_name = gen_bucket_name()
topic_name = bucket_name+TOPIC_SUFFIX
# create topic
topic_conf = PSTopic(ps_zones[0].conn, topic_name)
topic_conf.set_config()
# create bucket on the first of the rados zones
zones[0].create_bucket(bucket_name)
# wait for sync
zone_meta_checkpoint(ps_zones[0].zone)
# create notifications
events = "OBJECT_CREATE,OBJECT_DELETE"
notification_conf = PSNotification(ps_zones[0].conn, bucket_name,
topic_name,
events)
_, status = notification_conf.set_config()
assert_equal(status/100, 2)
# get notification
result, _ = notification_conf.get_config()
parsed_result = json.loads(result)
assert_equal(len(parsed_result['topics']), 1)
assert_equal(parsed_result['topics'][0]['topic']['name'],
topic_name)
assert_not_equal(len(parsed_result['topics'][0]['events']), 0)
# TODO add test for invalid event name
# cleanup
notification_conf.del_config()
topic_conf.del_config()
zones[0].delete_bucket(bucket_name)
def test_ps_subscription():
""" test set/get/delete of subscription """
zones, ps_zones = init_env()
bucket_name = gen_bucket_name()
topic_name = bucket_name+TOPIC_SUFFIX
# create topic
topic_conf = PSTopic(ps_zones[0].conn, topic_name)
topic_conf.set_config()
# create bucket on the first of the rados zones
bucket = zones[0].create_bucket(bucket_name)
# wait for sync
zone_meta_checkpoint(ps_zones[0].zone)
# create notifications
notification_conf = PSNotification(ps_zones[0].conn, bucket_name,
topic_name)
_, status = notification_conf.set_config()
assert_equal(status/100, 2)
# create subscription
sub_conf = PSSubscription(ps_zones[0].conn, bucket_name+SUB_SUFFIX,
topic_name)
_, status = sub_conf.set_config()
assert_equal(status/100, 2)
# get the subscription
result, _ = sub_conf.get_config()
parsed_result = json.loads(result)
assert_equal(parsed_result['topic'], topic_name)
# create objects in the bucket
number_of_objects = 10
for i in range(number_of_objects):
key = bucket.new_key(str(i))
key.set_contents_from_string('bar')
# wait for sync
zone_bucket_checkpoint(ps_zones[0].zone, zones[0].zone, bucket_name)
# get the create events from the subscription
result, _ = sub_conf.get_events()
events = json.loads(result)
for event in events['events']:
log.debug('Event: objname: "' + str(event['info']['key']['name']) + '" type: "' + str(event['event']) + '"')
keys = list(bucket.list())
# TODO: use exact match
verify_events_by_elements(events, keys, exact_match=False)
# delete objects from the bucket
for key in bucket.list():
key.delete()
# wait for sync
zone_meta_checkpoint(ps_zones[0].zone)
zone_bucket_checkpoint(ps_zones[0].zone, zones[0].zone, bucket_name)
# get the delete events from the subscriptions
result, _ = sub_conf.get_events()
for event in events['events']:
log.debug('Event: objname: "' + str(event['info']['key']['name']) + '" type: "' + str(event['event']) + '"')
# TODO: check deletions
# TODO: use exact match
# verify_events_by_elements(events, keys, exact_match=False, deletions=True)
# we should see the creations as well as the deletions
# delete subscription
_, status = sub_conf.del_config()
assert_equal(status/100, 2)
result, status = sub_conf.get_config()
parsed_result = json.loads(result)
assert_equal(parsed_result['topic'], '')
# TODO should return 404
# assert_equal(status, 404)
# cleanup
notification_conf.del_config()
topic_conf.del_config()
zones[0].delete_bucket(bucket_name)
def test_ps_event_type_subscription():
""" test subscriptions for different events """
zones, ps_zones = init_env()
bucket_name = gen_bucket_name()
# create topic for objects creation
topic_create_name = bucket_name+TOPIC_SUFFIX+'_create'
topic_create_conf = PSTopic(ps_zones[0].conn, topic_create_name)
topic_create_conf.set_config()
# create topic for objects deletion
topic_delete_name = bucket_name+TOPIC_SUFFIX+'_delete'
topic_delete_conf = PSTopic(ps_zones[0].conn, topic_delete_name)
topic_delete_conf.set_config()
# create topic for all events
topic_name = bucket_name+TOPIC_SUFFIX+'_all'
topic_conf = PSTopic(ps_zones[0].conn, topic_name)
topic_conf.set_config()
# create bucket on the first of the rados zones
bucket = zones[0].create_bucket(bucket_name)
# wait for sync
zone_meta_checkpoint(ps_zones[0].zone)
zone_bucket_checkpoint(ps_zones[0].zone, zones[0].zone, bucket_name)
# create notifications for objects creation
notification_create_conf = PSNotification(ps_zones[0].conn, bucket_name,
topic_create_name, "OBJECT_CREATE")
_, status = notification_create_conf.set_config()
assert_equal(status/100, 2)
# create notifications for objects deletion
notification_delete_conf = PSNotification(ps_zones[0].conn, bucket_name,
topic_delete_name, "OBJECT_DELETE")
_, status = notification_delete_conf.set_config()
assert_equal(status/100, 2)
# create notifications for all events
notification_conf = PSNotification(ps_zones[0].conn, bucket_name,
topic_name, "OBJECT_DELETE,OBJECT_CREATE")
_, status = notification_conf.set_config()
assert_equal(status/100, 2)
# create subscription for objects creation
sub_create_conf = PSSubscription(ps_zones[0].conn, bucket_name+SUB_SUFFIX+'_create',
topic_create_name)
_, status = sub_create_conf.set_config()
assert_equal(status/100, 2)
# create subscription for objects deletion
sub_delete_conf = PSSubscription(ps_zones[0].conn, bucket_name+SUB_SUFFIX+'_delete',
topic_delete_name)
_, status = sub_delete_conf.set_config()
assert_equal(status/100, 2)
# create subscription for all events
sub_conf = PSSubscription(ps_zones[0].conn, bucket_name+SUB_SUFFIX+'_all',
topic_name)
_, status = sub_conf.set_config()
assert_equal(status/100, 2)
# create objects in the bucket
number_of_objects = 10
for i in range(number_of_objects):
key = bucket.new_key(str(i))
key.set_contents_from_string('bar')
# wait for sync
zone_bucket_checkpoint(ps_zones[0].zone, zones[0].zone, bucket_name)
# get the events from the creation subscription
result, _ = sub_create_conf.get_events()
events = json.loads(result)
for event in events['events']:
log.debug('Event (OBJECT_CREATE): objname: "' + str(event['info']['key']['name']) +
'" type: "' + str(event['event']) + '"')
keys = list(bucket.list())
# TODO: use exact match
verify_events_by_elements(events, keys, exact_match=False)
# get the events from the deletions subscription
result, _ = sub_delete_conf.get_events()
events = json.loads(result)
for event in events['events']:
log.debug('Event (OBJECT_DELETE): objname: "' + str(event['info']['key']['name']) +
'" type: "' + str(event['event']) + '"')
assert_equal(len(events['events']), 0)
# get the events from the all events subscription
result, _ = sub_conf.get_events()
events = json.loads(result)
for event in events['events']:
log.debug('Event (OBJECT_CREATE,OBJECT_DELETE): objname: "' +
str(event['info']['key']['name']) + '" type: "' + str(event['event']) + '"')
# TODO: use exact match
verify_events_by_elements(events, keys, exact_match=False)
# delete objects from the bucket
for key in bucket.list():
key.delete()
# wait for sync
zone_bucket_checkpoint(ps_zones[0].zone, zones[0].zone, bucket_name)
log.debug("Event (OBJECT_DELETE) synced")
# get the events from the creations subscription
result, _ = sub_create_conf.get_events()
events = json.loads(result)
for event in events['events']:
log.debug('Event (OBJECT_CREATE): objname: "' + str(event['info']['key']['name']) +
'" type: "' + str(event['event']) + '"')
# deletions should not change the creation events
# TODO: use exact match
verify_events_by_elements(events, keys, exact_match=False)
# get the events from the deletions subscription
result, _ = sub_delete_conf.get_events()
events = json.loads(result)
for event in events['events']:
log.debug('Event (OBJECT_DELETE): objname: "' + str(event['info']['key']['name']) +
'" type: "' + str(event['event']) + '"')
# only deletions should be listed here
# TODO: use exact match
verify_events_by_elements(events, keys, exact_match=False, deletions=True)
# get the events from the all events subscription
result, _ = sub_create_conf.get_events()
events = json.loads(result)
for event in events['events']:
log.debug('Event (OBJECT_CREATE,OBJECT_DELETE): objname: "' + str(event['info']['key']['name']) +
'" type: "' + str(event['event']) + '"')
# both deletions and creations should be here
# TODO: use exact match
verify_events_by_elements(events, keys, exact_match=False, deletions=False)
# verify_events_by_elements(events, keys, exact_match=False, deletions=True)
# TODO: (1) test deletions (2) test overall number of events
# test subscription deletion when topic is specified
_, status = sub_create_conf.del_config(topic=True)
assert_equal(status/100, 2)
_, status = sub_delete_conf.del_config(topic=True)
assert_equal(status/100, 2)
_, status = sub_conf.del_config(topic=True)
assert_equal(status/100, 2)
# cleanup
notification_create_conf.del_config()
notification_delete_conf.del_config()
notification_conf.del_config()
topic_create_conf.del_config()
topic_delete_conf.del_config()
topic_conf.del_config()
zones[0].delete_bucket(bucket_name)
def test_ps_event_fetching():
""" test incremental fetching of events from a subscription """
zones, ps_zones = init_env()
bucket_name = gen_bucket_name()
topic_name = bucket_name+TOPIC_SUFFIX
# create topic
topic_conf = PSTopic(ps_zones[0].conn, topic_name)
topic_conf.set_config()
# create bucket on the first of the rados zones
bucket = zones[0].create_bucket(bucket_name)
# wait for sync
zone_meta_checkpoint(ps_zones[0].zone)
# create notifications
notification_conf = PSNotification(ps_zones[0].conn, bucket_name,
topic_name)
_, status = notification_conf.set_config()
assert_equal(status/100, 2)
# create subscription
sub_conf = PSSubscription(ps_zones[0].conn, bucket_name+SUB_SUFFIX,
topic_name)
_, status = sub_conf.set_config()
assert_equal(status/100, 2)
# create objects in the bucket
number_of_objects = 100
for i in range(number_of_objects):
key = bucket.new_key(str(i))
key.set_contents_from_string('bar')
# wait for sync
zone_bucket_checkpoint(ps_zones[0].zone, zones[0].zone, bucket_name)
max_events = 15
total_events_count = 0
next_marker = None
all_events = []
while True:
# get the events from the subscription
result, _ = sub_conf.get_events(max_events, next_marker)
| |
does not specify a type.".format(name))
# Rule: If the parameter definition has no default value, the template must provide a value
parameter = parameters.get(name, definition.get('defaultValue'))
if parameter is None:
raise ValueError("A value for parameter '{}' must be provided "
"by the job.".format(name))
# Rule: If the parameter definition specifies 'int', the value provided must be compatible
if definition['type'] == 'int':
try:
_validate_int(parameter, {})
except TypeError:
raise ValueError("'Value '{}' supplied for parameter '{}' must be an "
"integer.".format(parameter, name))
# Rule: if the parameter definition specified 'bool', the value provided must be compatible
elif definition['type'] == 'bool':
try:
_validate_bool(parameter)
except TypeError:
raise ValueError("'Value '{}' supplied for parameter '{}' must be a "
"boolean.".format(parameter, name))
# Rule: Only parameters values defined by the template are permitted
violations = [k for k in parameters if k not in definitions]
if violations:
raise ValueError("Provided parameter(s) {} are not expected "
"by the template.".format(', '.join(violations)))
def _validate_generated_job(job):
"""Validate the partial job generated from an application template prior
to merging it with the original job.
:param dict job: A partial generated job specification to validate.
"""
# Rule: The job generated by an application template may not use properties reserved for job use
# (This is a safety to prevent clever abuse of template syntax
# to specify things that shouldn't be.)
reserved = [k for k in job if k in models.PROPS_RESERVED_FOR_JOBS]
if reserved:
raise ValueError("Application templates may not specify these "
"properties: {}".format(', '.join(reserved)))
# Rule: Templates may only specify properties permitted
unsupported = [k for k in job if k not in models.PROPS_PERMITTED_ON_TEMPLATES]
if unsupported:
raise ValueError("Application templates may not use these "
"properties: {}".format(', '.join(unsupported)))
def _validate_metadata(metadata):
"""Validate the provided metadata is valid.
:param list metadata: A list of metadata dicts.
"""
# Rule: The prefix 'az_batch:' is reserved for our use
# and can't be specified on job nor on template.
violation = [k for k in [m['name'] for m in metadata] if k.startswith('az_batch')]
if violation:
raise ValueError("Metadata item(s) '{}' cannot be used; the prefix 'az_batch:' is "
"reserved for Batch use.".format(', '.join(violation)))
def _validate_parameter(name, content, value):
"""Validate the input parameter is valid for specified template. Checks the following:
Check input fit with parameter type, if yes, convert to correct type
Check input matched with the restriction of parameter
:param str name: The parameter name.
:param dict content: The template parameter definition.
:param str value: The raw parameter value.
:returns: Validated input paramater, otherwise None.
"""
try:
if content['type'] == 'int':
value = _validate_int(value, content)
elif content['type'] == 'bool':
value = _validate_bool(value)
elif content['type'] == 'string':
value = _validate_string(value, content)
if value not in content.get('allowedValues', [value]):
raise ValueError("Allowed values: {}".format(', '.join(content['allowedValues'])))
except TypeError:
raise TypeError("The value '{}' of parameter '{}' is not a {}".format(
name, value, content['type']))
except ValueError as value_error:
raise ValueError(
"The value '{}' of parameter '{}' does not meet the requirement: {}".format(
name, value, str(value_error)))
else:
return value
def _get_template_params(template, param_values):
"""Return all required parameter values for the specified template.
:param dict template: Template JSON object.
:param dict param_values: User provided parameter values.
"""
param_keys = {}
try:
for param, values in template['parameters'].items():
if 'type' not in values:
raise ValueError('Parameter {} does not have type defined'.format(param))
try:
# Support both ARM and dictionary syntax
# ARM: '<PropertyName>' : { 'value' : '<PropertyValue>' }
# Dictionary: '<PropertyName>' : <PropertyValue>'
value = param_values[param]
param_keys[param] = value.get('value') if isinstance(value, dict) else value
except KeyError:
param_keys[param] = values.get('defaultValue')
except KeyError:
pass # No parameters to expand
return param_keys
def _parse_arm_parameter(name, template_obj, parameters):
"""Render the content of an ARM property
:param str name: The name of the property to render.
:param dict template_obj: The loaded contents of the JSON template.
:param dict parameters: The loaded contents of the JSON parameters.
"""
# Find name of root parameter
param_name_end = _find_nested(')', name, 0) # Find end of name
if param_name_end >= len(name):
raise ValueError(
"Template reference misformatted for parameter '{}'".format(name))
# Interpret name of parameter
param_name = _parse_arm_expression(
name[1:param_name_end-1],
template_obj,
parameters)
# Make sure there are defined parameters
if 'parameters' not in template_obj:
raise ValueError("Template defines no parameters but tried to use '{}'".format(param_name))
try:
# Get parameter object
param_def = template_obj['parameters'][param_name]
# Parse nested object if exists
if len(name) > param_name_end+1:
param_def = _parse_nested_object(
param_def,
name[param_name_end+1:],
template_obj,
parameters)
except KeyError:
raise ValueError("Template does not define parameter '{}'".format(param_name))
user_value = param_def.get('defaultValue')
if parameters and param_name in parameters:
# Support both ARM and dictionary syntax
# ARM: '<PropertyName>' : { 'value' : '<PropertyValue>' }
# Dictionary: '<PropertyName>' : <PropertyValue>'
user_value = parameters[param_name]
try:
user_value = user_value['value']
except TypeError:
pass
if user_value is None:
raise errors.MissingParameterValue(
"No value supplied for parameter '{}' and no default value".format(param_name),
parameter_name=param_name,
parameter_description=param_def.get('metadata', {}).get('description'))
if isinstance(user_value, dict):
# If substitute value is a complex object - it may require
# additional parameter substitutions
return _parse_template(json.dumps(user_value), template_obj, parameters)
try:
if param_def['type'] == 'int':
return _validate_int(user_value, param_def)
if param_def['type'] == 'bool':
return _validate_bool(user_value)
if param_def['type'] == 'string':
return _validate_string(user_value, param_def)
except TypeError:
raise TypeError("Value '{}' for parameter '{}' must be a {}.".format(
user_value, param_name, param_def['type']))
else:
raise TypeError("Parameter type '{}' not supported.".format(param_def['type']))
def _parse_nested_object(obj, references, template_obj, parameters):
""" Decouple [] and . notation references. Then applies to object.
:param object obj: Root object being traversed
:param str references: String of references to be decoupled
:param dict template_obj: The loaded contents of the JSON template.
:param dict parameters: The loaded contents of the JSON parameters.
:return: Object referenced
"""
obj_refs = []
ret_obj = obj
var_name = references
# Find and interpret each nested object and add them to a queue
while True:
start_dict = _find_nested('[', var_name, 0)
start_obj = _find_nested('.', var_name, 0)
# Handles nested [] references
if 0 <= start_dict < start_obj:
end_index = _find_nested(']', var_name, start_dict + 1)
obj_ref_str = var_name[start_dict + 1:end_index]
obj_refs.append(
_parse_arm_expression(obj_ref_str, template_obj, parameters))
var_name = var_name[:start_dict] + var_name[end_index + 1:]
# Handles nested . references
elif 0 <= start_obj < start_dict:
next_start_dict = _find_nested('[', var_name, 1)
next_start_obj = _find_nested('.', var_name, 1)
end_index = next_start_dict if next_start_dict < next_start_obj else next_start_obj
end_index = end_index if end_index > start_obj else len(var_name)
obj_ref_str = var_name[start_obj + 1:end_index]
obj_refs.append(
_parse_arm_expression(obj_ref_str, template_obj, parameters))
var_name = var_name[:start_obj] + var_name[end_index:]
else:
break
while obj_refs:
ref = obj_refs.pop(0)
ret_obj = ret_obj[ref]
return ret_obj
def _parse_arm_variable(name, template_obj, parameters):
"""Render the value of an ARM variable.
:param str name: The name of the variable to render.
:param dict template_obj: The loaded contents of the JSON template.
:param dict parameters: The loaded contents of the JSON parameters.
"""
try:
# Get head object referenced
variable_name_end = _find_nested(')', name, 0) # Find end of variable name
if variable_name_end >= len(name):
raise ValueError("Template reference misformatted for variable '{}'".format(name))
variable_name = _parse_arm_expression(
name[1:variable_name_end-1],
template_obj,
parameters) # Make sure inner name is fully parsed
variable_obj = template_obj['variables'][variable_name]
# If there is any text after ')' then there additional references on the object
if len(name) > variable_name_end+1:
variable_obj = _parse_nested_object(
variable_obj,
name[variable_name_end+1:],
template_obj,
parameters)
# parse the result object
variable = _parse_arm_expression(
variable_obj,
template_obj, parameters)
except KeyError:
raise ValueError("Template contains no definition for variable '{}'".format(name))
if isinstance(variable, dict):
# If substitute value is a complex object - it may require
# additional parameter substitutions
return _parse_template(json.dumps(variable), template_obj, parameters)
return variable
def _parse_arm_concat(expression, template_obj, parameters):
"""Evaluate an ARM concat expression.
:param str expression: The concat expression to evaluate.
:param dict template_obj: The loaded contents of the JSON template.
:param dict parameters: The loaded contents of the JSON parameters.
"""
content = ""
index = 0
while index < len(expression):
end = _find_nested(',', expression, index)
argument = expression[index:end].strip()
content += _parse_arm_expression(argument, template_obj, parameters)
index = end + 1
return content
def _parse_arm_expression(expression, template_obj, parameters):
"""Determine if a section of the template is an ARM reference, and calculate
the replacement accordingly. The result will be correctly typed to suit the
parameter definition (e.g. will return a | |
<gh_stars>0
import warnings
import numpy as np
import dateutil
import dateutil.parser
import geopandas as gpd
import pandas as pd
import pytz
import warnings
from shapely import wkt
from shapely.geometry import Point
def localize_timestamp(dt_series, pytz_tzinfo, col_name):
"""
Helper function that adds timezone info to timestamp
Parameters
----------
dt_series: pandas.Series
a pandas datetime series
pytz_tzinfo: str
pytz compatible timezone string. If none UTC will be assumed
col_name: str
Column name for informative warning message
Returns
-------
pd.Series
a timezone aware pandas datetime series
"""
if pytz_tzinfo is None:
warnings.warn("Assuming UTC timezone for column {}".format(col_name))
pytz_tzinfo = 'utc'
timezone = pytz.timezone(pytz_tzinfo)
return dt_series.apply(pd.Timestamp, tz=timezone)
def read_positionfixes_csv(*args, columns=None, tz=None, index_col=object(), crs=None, **kwargs):
"""Wraps the pandas read_csv function, extracts longitude and latitude and
builds a geopandas GeoDataFrame. This also validates that the ingested data
conforms to the trackintel understanding of positionfixes (see
:doc:`/modules/model`).
Parameters
----------
columns : dict, optional
The column names to rename in the format {'old_name':'trackintel_standard_name'}.
tz : str, optional
pytz compatible timezone string. If None UTC is assumed.
index_col : str, optional
column name to be used as index. If None the default index is assumed
as unique identifier.
crs: pyproj.crs or str, optional
Set coordinate reference system. The value can be anything accepted
by pyproj.CRS.from_user_input(), such as an authority string
(eg “EPSG:4326”) or a WKT string.
Returns
-------
GeoDataFrame
A GeoDataFrame containing the positionfixes.
Notes
-----
Note that this function is primarily useful if data is available in a
longitude/latitude format. If your data already contains a WKT column, it
might be easier to just use the GeoPandas import functions.
Examples
--------
>>> trackintel.read_positionfixes_csv('data.csv')
>>> trackintel.read_positionfixes_csv('data.csv', columns={'time':'tracked_at', 'User':'user_id'})
"""
columns = {} if columns is None else columns
# Warning if no 'index_col' parameter is provided
if type(index_col) == object:
warnings.warn("Assuming default index as unique identifier. Pass 'index_col=None' as explicit" +
"argument to avoid a warning when reading csv files.")
elif index_col is not None:
kwargs['index_col'] = index_col
df = pd.read_csv(*args, **kwargs)
df = df.rename(columns=columns)
# construct geom column from lon and lat
df['geom'] = list(zip(df['longitude'], df['latitude']))
df['geom'] = df['geom'].apply(Point)
# transform to datatime
df["tracked_at"] = pd.to_datetime(df["tracked_at"])
# set timezone if none is recognized
for col in ['tracked_at']:
if not pd.api.types.is_datetime64tz_dtype(df[col]):
df[col] = localize_timestamp(dt_series=df[col], pytz_tzinfo=tz, col_name=col)
df = df.drop(['longitude', 'latitude'], axis=1)
gdf = gpd.GeoDataFrame(df, geometry='geom')
if crs:
gdf.set_crs(crs, inplace=True)
assert gdf.as_positionfixes
return gdf
def write_positionfixes_csv(positionfixes, filename, *args, **kwargs):
"""Wraps the pandas to_csv function, but strips the geometry column ('geom') and
stores the longitude and latitude in respective columns.
Parameters
----------
positionfixes : GeoDataFrame
The positionfixes to store to the CSV file.
filename : str
The file to write to.
"""
gdf = positionfixes.copy()
gdf['longitude'] = positionfixes.geometry.apply(lambda p: p.coords[0][0])
gdf['latitude'] = positionfixes.geometry.apply(lambda p: p.coords[0][1])
df = gdf.drop(gdf.geometry.name, axis=1)
df.to_csv(filename, index=True, *args, **kwargs)
def read_triplegs_csv(*args, columns=None, tz=None, index_col=object(), crs=None, **kwargs):
"""Wraps the pandas read_csv function, extracts a WKT for the leg geometry and
builds a geopandas GeoDataFrame. This also validates that the ingested data
conforms to the trackintel understanding of triplegs (see :doc:`/modules/model`).
Parameters
----------
columns : dict, optional
The column names to rename in the format {'old_name':'trackintel_standard_name'}.
tz : str, optional
pytz compatible timezone string. If None UTC is assumed.
index_col : str, optional
column name to be used as index. If None the default index is assumed
as unique identifier.
crs: pyproj.crs or str, optional
Set coordinate reference system. The value can be anything accepted
by pyproj.CRS.from_user_input(), such as an authority string
(eg “EPSG:4326”) or a WKT string.
Returns
-------
GeoDataFrame
A GeoDataFrame containing the triplegs.
Examples
--------
>>> trackintel.read_triplegs_csv('data.csv')
>>> trackintel.read_triplegs_csv('data.csv', columns={'start_time':'started_at', 'User':'user_id'})
"""
columns = {} if columns is None else columns
# Warning if no 'index_col' parameter is provided
if type(index_col) == object:
warnings.warn("Assuming default index as unique identifier. Pass 'index_col=None' as explicit" +
"argument to avoid a warning when reading csv files.")
elif index_col is not None:
kwargs['index_col'] = index_col
df = pd.read_csv(*args, **kwargs)
df = df.rename(columns=columns)
df['geom'] = df['geom'].apply(wkt.loads)
df['started_at'] = df['started_at'].apply(dateutil.parser.parse)
df['finished_at'] = df['finished_at'].apply(dateutil.parser.parse)
# check and/or set timezone
for col in ['started_at', 'finished_at']:
if not pd.api.types.is_datetime64tz_dtype(df[col]):
df[col] = localize_timestamp(dt_series=df[col], pytz_tzinfo=tz, col_name=col)
else:
# dateutil parser timezones are sometimes not compatible with pandas (e.g., in asserts)
tz = df[col].iloc[0].tzinfo.tzname(df[col].iloc[0])
df[col] = df[col].dt.tz_convert(tz)
gdf = gpd.GeoDataFrame(df, geometry='geom')
if crs:
gdf.set_crs(crs, inplace=True)
assert gdf.as_triplegs
return gdf
def write_triplegs_csv(triplegs, filename, *args, **kwargs):
"""Wraps the pandas to_csv function, but transforms the geom into WKT
before writing.
Parameters
----------
triplegs : GeoDataFrame
The triplegs to store to the CSV file.
filename : str
The file to write to.
"""
geo_col_name = triplegs.geometry.name
gdf = pd.DataFrame(triplegs, copy=True)
gdf[geo_col_name] = triplegs.geometry.apply(wkt.dumps)
gdf.to_csv(filename, index=True, *args, **kwargs)
def read_staypoints_csv(*args, columns=None, tz=None, index_col=object(), crs=None, **kwargs):
"""Wraps the pandas read_csv function, extracts a WKT for the staypoint
geometry and builds a geopandas GeoDataFrame. This also validates that
the ingested data conforms to the trackintel understanding of staypoints
(see :doc:`/modules/model`).
Parameters
----------
columns : dict, optional
The column names to rename in the format {'old_name':'trackintel_standard_name'}.
tz : str, optional
pytz compatible timezone string. If None UTC is assumed.
index_col : str, optional
column name to be used as index. If None the default index is assumed
as unique identifier.
crs: pyproj.crs or str, optional
Set coordinate reference system. The value can be anything accepted
by pyproj.CRS.from_user_input(), such as an authority string
(eg “EPSG:4326”) or a WKT string.
Returns
-------
GeoDataFrame
A GeoDataFrame containing the staypoints.
Examples
--------
>>> trackintel.read_staypoints_csv('data.csv')
>>> trackintel.read_staypoints_csv('data.csv', columns={'start_time':'started_at', 'User':'user_id'})
"""
columns = {} if columns is None else columns
# Warning if no 'index_col' parameter is provided
if type(index_col) == object:
warnings.warn("Assuming default index as unique identifier. Pass 'index_col=None' as explicit" +
"argument to avoid a warning when reading csv files.")
elif index_col is not None:
kwargs['index_col'] = index_col
df = pd.read_csv(*args, **kwargs)
df = df.rename(columns=columns)
df['geom'] = df['geom'].apply(wkt.loads)
df['started_at'] = df['started_at'].apply(dateutil.parser.parse)
df['finished_at'] = df['finished_at'].apply(dateutil.parser.parse)
# check and/or set timezone
for col in ['started_at', 'finished_at']:
if not pd.api.types.is_datetime64tz_dtype(df[col]):
df[col] = localize_timestamp(dt_series=df[col], pytz_tzinfo=tz, col_name=col)
else:
# dateutil parser timezones are sometimes not compatible with pandas (e.g., in asserts)
tz = df[col].iloc[0].tzinfo.tzname(df[col].iloc[0])
df[col] = df[col].dt.tz_convert(tz)
gdf = gpd.GeoDataFrame(df, geometry='geom')
if crs:
gdf.set_crs(crs, inplace=True)
assert gdf.as_staypoints
return gdf
def write_staypoints_csv(staypoints, filename, *args, **kwargs):
"""Wraps the pandas to_csv function, but transforms the geom into WKT
before writing.
Parameters
----------
staypoints : GeoDataFrame
The staypoints to store to the CSV file.
filename : str
The file to write to.
"""
geo_col_name = staypoints.geometry.name
gdf = pd.DataFrame(staypoints, copy=True)
gdf[geo_col_name] = staypoints.geometry.apply(wkt.dumps)
gdf.to_csv(filename, index=True, *args, **kwargs)
def read_locations_csv(*args, columns=None, index_col=object(), crs=None, **kwargs):
"""Wraps the pandas read_csv function, extracts a WKT for the location
center (and extent) and builds a geopandas GeoDataFrame. This also
validates that the ingested data conforms to the trackintel understanding
of locations (see :doc:`/modules/model`).
Parameters
----------
columns : dict, optional
The column names to rename in the format {'old_name':'trackintel_standard_name'}.
index_col : str, optional
column name to be used as index. If None the default index is assumed
as unique identifier.
crs: pyproj.crs or str, optional
Set coordinate reference system. The value can be anything accepted
by pyproj.CRS.from_user_input(), such as an authority string
(eg “EPSG:4326”) or a WKT string.
Returns
-------
GeoDataFrame
A GeoDataFrame containing the locations.
Examples
--------
>>> trackintel.read_locations_csv('data.csv')
>>> trackintel.read_locations_csv('data.csv', columns={'start_time':'started_at', 'User':'user_id'})
"""
columns = {} if columns is None else columns
# Warning if no 'index_col' parameter is provided
if type(index_col) == object:
warnings.warn("Assuming default index as unique identifier. Pass 'index_col=None' as explicit" +
"argument to avoid a warning when reading csv files.")
elif index_col is not None:
kwargs['index_col'] = index_col
df = pd.read_csv(*args, **kwargs)
df = df.rename(columns=columns)
df['center'] = df['center'].apply(wkt.loads)
if 'extent' in df.columns:
df['extent'] = df['extent'].apply(wkt.loads)
gdf = gpd.GeoDataFrame(df, geometry='center')
if crs:
gdf.set_crs(crs, inplace=True)
assert gdf.as_locations
return gdf
def write_locations_csv(locations, filename, *args, **kwargs):
"""Wraps the pandas to_csv function, but transforms the center (and
extent) into WKT | |
# -*- coding: utf-8 -*-
import numpy as np
import json
import os
import sys
import time
import warnings
warnings.filterwarnings('ignore')
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.utils
from torchvision import models
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import torchattacks
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
import argparse
from Source.utils import stitch_images
parser = argparse.ArgumentParser(description='✨Welcome to YourBench-Adversarial Attack Robustness Benchmarking & Reporting tools.✨')
parser.add_argument('-a', '--attack_method', required=True, type=str, nargs='*', choices=['FGSM', 'CW', 'PGD', 'DeepFool'], dest='parsedAttackMethod', action='store')
parser.add_argument('-m', '--model', required=True, type=str, choices=['ResNet101_2', 'ResNet18', 'Custom'], dest='parsedModel')
parser.add_argument('-d', '--dataset', required=True, type=str, choices=['CIFAR-10', 'CIFAR-100', 'ImageNet', 'Custom'], dest='parsedDataset')
args = parser.parse_args()
simple_data = False
print(args.parsedAttackMethod) # ['FGSM']
print(args.parsedModel) # WRN
print(args.parsedDataset)
# Hyper Parameter settings
use_cuda = torch.cuda.is_available()
print("PyTorch", torch.__version__)
print("Torchvision", torchvision.__version__)
print("Torchattacks", torchattacks.__version__)
print("Numpy", np.__version__)
# CUDA Settings
USE_CUDA = torch.cuda.is_available()
device = torch.device('cuda:0' if USE_CUDA else 'cpu')
print('학습을 진행하는 기기:',device)
# 1. Load Data
# https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json
# class_idx = json.load(open("./data/imagenet_class_index.json"))
# idx2label = [class_idx[str(k)][1] for k in range(len(class_idx))]
transform = transforms.Compose([
transforms.Resize((299, 299)),
transforms.ToTensor(), # ToTensor : [0, 255] -> [0, 1]
])
# imagnet_data = image_folder_custom_label(root='./data/oneImage', transform=transform, idx2label=idx2label)
# data_loader = torch.utils.data.DataLoader(imagnet_data, batch_size=10, shuffle=False)
if args.parsedDataset == 'CIFAR-10':
cifar10_data = torchvision.datasets.CIFAR10('Data/CIFAR10', download=True, transform=transform)
data_loader = torch.utils.data.DataLoader(cifar10_data, batch_size=5)
simple_data = True
elif args.parsedDataset == 'CIFAR-100':
cifar100_data = torchvision.datasets.CIFAR100('Data/CIFAR100', download=True, transform=transform)
data_loader = torch.utils.data.DataLoader(cifar100_data, batch_size=5)
simple_data = True
elif args.parsedDataset == 'ImageNet':
imagenet_data = torchvision.datasets.ImageNet('Data/ImageNet', download=True, transform=transform)
data_loader = torch.utils.data.DataLoader(imagenet_data, batch_size=5)
class Normalize(nn.Module) :
def __init__(self, mean, std) :
super(Normalize, self).__init__()
self.register_buffer('mean', torch.Tensor(mean))
self.register_buffer('std', torch.Tensor(std))
def forward(self, input):
# Broadcasting
mean = self.mean.reshape(1, 3, 1, 1)
std = self.std.reshape(1, 3, 1, 1)
mean.to(device)
std.to(device)
input.to(device)
return (input - mean) / std
if args.parsedModel == 'ResNet101_2':
norm_layer = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
model = nn.Sequential(
norm_layer,
models.wide_resnet101_2(pretrained=True)
).to(device)
model = model.eval()
elif args.parsedModel == 'ResNet18':
norm_layer = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
model = nn.Sequential(
norm_layer,
models.resnet18(pretrained=True)
).to(device)
model = model.eval()
elif args.parsedModel == 'Custom':
pkg = __import__('custom_net')
model_custom = pkg.my_model(pretrained = False)
#state_dict의 경로를 넣기.
model_custom.load_state_dict(torch.load('./my_model.pth'))
norm_layer = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
model = nn.Sequential(
norm_layer,
model_custom
).cuda()
model = model.eval()
# 3. Attacks
from torchattacks import *
attackMethodDict = {'FGSM': FGSM(model, eps=8/255),
'CW' : CW(model, c=1, lr=0.01, steps=100, kappa=0),
'PGD' : PGD(model, eps=8/255, alpha=2/225, steps=100, random_start=True),
'DeepFool': DeepFool(model, steps=100)}
atks = [
VANILA(model),
#FGSM(model, eps=8/255),
#CW(model, c=1, lr=0.01, steps=100, kappa=0),
#PGD(model, eps=8/255, alpha=2/225, steps=100, random_start=True),
#DeepFool(model, steps=100),
]
# args로 읽어온 정보만 attack한다.
for atk in args.parsedAttackMethod:
atks.append(attackMethodDict[atk])
print(atks)
print("Adversarial Image & Predicted Label")
# +
vanilla_output = []
untargeted_output= []
targeted_output= []
for atk in atks :
print("-"*70)
print(atk)
correct = 0
top5_correct = 0
total = 0
for images, labels in data_loader:
# images : torch.Size([1,3,299,299])
# labels: torch.Size([10]),[7, 5, 388, 1, ...] -> cock, electric_ray, giant_panda...
atk.set_mode_default()
#print(images.shape)
start = time.time()
adv_images = atk(images, labels)
labels = labels.to(device)
outputs = model(adv_images) # outputs: torch.Size([batch_size, 1000]), adversarial image를 모델에 넣은 결과, outputs.data:[batch_size, 1000]
#print(outputs.shape)
#print(outputs.data.shape)
_, pre = torch.max(outputs.data, 1) # 1000 classes중 가장 큰 VALUE 1 남음, value, index 나온다. batch_size>1이면 batch_size크기의 array로 나온다.
_, top_5 = torch.topk(outputs.data, 5)
#print(top_5)
#print(labels.shape)
total += len(images)
correct += (pre == labels).sum()
break
print('Total elapsed time (sec): %.2f' % (time.time() - start))
print('Robust accuracy: %.2f %%' % (100 * float(correct) / total))
if atk.attack == "VANILA":
for i in range (len(atks) - 1):
vanilla_output.append(100 * float(correct) / total)
else:
untargeted_output.append(100 * float(correct) / total)
if atk.attack == ("FGSM" or "CW"):
print("-"*70)
print(atk)
for images, labels in data_loader:
atk.set_mode_targeted_least_likely()
start = time.time()
adv_images = atk(images, labels)
labels = labels.to(device)
outputs = model(adv_images) # outputs: torch.Size([batch_size, 1000]), adversarial image를 모델에 넣은 결과, outputs.data:[batch_size, 1000]
_, pre = torch.max(outputs.data, 1) # 1000 classes중 가장 큰 VALUE 1 남음, value, index 나온다. batch_size>1이면 batch_size크기의 array로 나온다.
_, top_5 = torch.topk(outputs.data, 5)
total += len(images)
correct += (pre == labels).sum()
break
print('Total elapsed time (sec): %.2f' % (time.time() - start))
print('Robust accuracy: %.2f %%' % (100 * float(correct) / total))
targeted_output.append(100 * float(correct) / total)
elif atk.attack != "VANILA" and (atk.attack == "PGD" or "DeepFool"):
targeted_output.append(-10)
# -
print("==================")
print(adv_images.shape)
# 기본 `log_dir` 은 "runs"이며, 여기서는 더 구체적으로 지정하였습니다
writer = SummaryWriter('Tutorials/runs/white_box_attack_image_net')
images = images.cuda()
writer.add_graph(model, images)
writer.close()
#make Data/Generated directory
os.makedirs("./Data/Generated", exist_ok=True)
# Save Image in Folder
for i in range(adv_images.shape[0]):
torchvision.utils.save_image(images[i], fp=f"./Data/Generated/image_original_{i+1}.jpg", normalize=True)
torchvision.utils.save_image(adv_images[i], fp=f"./Data/Generated/image_adv_{i+1}.jpg", normalize=True)
# 4. Report Generating
# matplotlib로 그래프 그리기
x_val =[]
for atk in atks:
if atk.attack == "VANILA":
continue
x_val.append(atk.attack)
plt.plot(x_val, vanilla_output, color='green', label = 'VANILLA')
plt.plot(x_val, untargeted_output, color='blue', label = 'DEFAULT')
plt.plot(x_val, targeted_output, color='red', label = 'TARGETED')
#plt.legend(loc=(0.73,0.775))
plt.legend(loc=(0.0,0.775))
plt.xlabel('Attack Method')
plt.ylabel('Accuracy (%)\nnegative value for unsupported attacks')
plt.savefig(f'./Data/Generated/graph.jpg', dip=300)
#점수 계산하기
total_result = 0
for atk in untargeted_output:
total_result = total_result + atk
total_result = total_result / 0.001 if vanilla_output[0] == 0 else vanilla_output[0] / len(untargeted_output)
total_grade = ""
if simple_data == True:
if total_result >= 45.0:
total_grade = "A"
elif total_result >= 35.0:
total_grade = "B"
elif total_result >= 25.0:
total_grade = "C"
elif total_result >= 15.0:
total_grade = "D"
else:
total_grade = "F"
from fpdf import FPDF
from torchvision.transforms.functional import to_pil_image
from PIL.Image import Image
import PIL
class PDF(FPDF):
def header(self):
self.set_font("Times", "B", 20)
# Moving cursor to the right:
self.cell(80)
self.cell(30, 10, "Benchmark Result", 0, 0, "C")
self.cell(0, 10, ("Grade : " + total_grade if simple_data else "Score : " + str(total_result)),0, 0, "R")
# Performing a line tbreak:
self.ln(20)
def footer(self):
# Position cursor at 1.5 cm from bottom:
self.set_y(-15)
# Setting font: helvetica italic 8
self.set_font("helvetica", "I", 8)
# Printing page number:
self.cell(0, 10, f"Page {self.page_no()}/{{nb}}", 0, 0, "C")
# Instantiation of inherited class
pdf = PDF()
pdf.set_display_mode(zoom='fullwidth',layout='two')
pdf.alias_nb_pages() # 페이지 수에 대한 alias ?
pdf.add_page()
pdf.set_auto_page_break(True)
# Mapped Network
top_y = pdf.get_y()
#pdf.set_font("Times", "B", size=12)
#pdf.cell(0, 10, f"Mapped Network", 0, 1)
#pdf.set_font("Helvetica", "I", 12)
#pdf.cell(0, 10, f"<This function is still working in process.>", 0, 1)
# 1. 성공한 adversarial example들
pdf.set_font("Times", "B", size=12)
pdf.cell(0, 10, f"Succeeded Adversarial examples", 0, 1)
# Effective page width, or just epw
epw = pdf.w - 2*pdf.l_margin
img_size = epw/2 - 20
np_images = (images[0:5].cpu().numpy().transpose(0,2,3,1) * 255).astype(np.uint8)
np_adv_images = (adv_images[0:5].cpu().numpy().transpose(0,2,3,1) * 255).astype(np.uint8)
original_labels = [str(l) for l in labels.cpu().numpy()[0:5]]
predicted_labels = [str(l) for l in pre.cpu().numpy()[0:5]]
outputImage = stitch_images(np_images, np_adv_images, original_labels, predicted_labels)
import cv2
cv2.imwrite("./Data/Generated/stitchedImage.jpg", outputImage)
#torchvision.utils.save_image(outputImage, fp=f"./Data/Generated/stitchedImage.jpg", normalize=True)
pdf.image(f'./Data/Generated/stitchedImage.jpg', w=img_size)
# for i in range(max(5, adv_images.shape[0])):
# pdf.image(f'./Data/Generated/image_original_{i+1}.jpg', w=img_size, h=img_size)
# pdf.set_xy(pdf.get_x() + img_size + 10, pdf.get_y() - img_size)
# pdf.image(f'./Data/Generated/image_adv_{i+1}.jpg', w=img_size, h=img_size)
# pdf.ln(2)
# second column
## 2. table 추가
pdf.set_xy(epw /2 +pdf.l_margin, top_y)
pdf.set_font("Times", "B", size=12)
pdf.cell(epw / 2 + 10, 10, txt=f"Top-5 Accuracy against attacks", border=0, ln=1) # ln: 커서 포지션을 다음줄로 바꾼다.
#pdf.set_xy(epw /2 +pdf.l_margin, pdf.get_y())
# Set column width to 1/4 of effective page width to distribute content
# evenly across table and page
col_width = epw/10
# Since we do not need to draw lines anymore, there is no need to separate
# headers from data matrix.
data = [['Vanilla']+vanilla_output,
['attacks'] + x_val,
['default'] + untargeted_output,
['targeted'] + targeted_output,]
pdf.set_font('Times','',10.0)
pdf.ln(0.5)
# Text height is the same as current font size
th = pdf.font_size
# Here we add more padding by passing 2*th as height
#pdf.set_xy(epw /2 +pdf.l_margin, top_y)
pdf.set_xy(epw /2 +pdf.l_margin, pdf.get_y())
for row in data:
for datum in row:
# Enter data in colums
pdf.cell(col_width, 2*th, str(datum), border=1)
pdf.ln(2*th)
pdf.set_xy(epw /2 +pdf.l_margin, pdf.get_y())
#####################
# 3. attack result graph
pdf.set_xy(epw /2 +pdf.l_margin, pdf.get_y())
#pdf.set_xy(epw /2 +pdf.l_margin, top_y)
pdf.set_font("Times", "B", size=12)
pdf.cell(epw / 2 + 10, 10, f"Attack Results with graph", 0, 1)
pdf.set_xy(epw /2 +pdf.l_margin, pdf.get_y())
pdf.image(f'./Data/Generated/graph.jpg', w=epw /2)
# 4. Advise
pdf.set_xy(epw /2 +pdf.l_margin, pdf.get_y())
pdf.set_font("Times", "B", size=12)
pdf.cell(0, 10, f"Advise for your model robustness", 0, 1)
pdf.set_font("Helvetica", "I", 12)
#pdf.cell(w=0, h=0, txt=f"Your model is significantly weak against CW L2 Attack.Your model is significantly weak against CW L2 Attack. Your model is significantly weak against CW L2 Attack.Your model is significantly weak against CW L2 Attack.,Your model is significantly weak against CW L2 Attack", border=0, 1)
# pdf.write(h=5, txt=f"Your model is significantly weak against CW L2 Attack.Your model is significantly weak against CW L2 Attack. Your model is significantly weak against CW L2 Attack.Your model is significantly weak against CW L2 Attack.,Your model is significantly weak against CW L2 Attack")
pdf.set_xy(epw /2 +pdf.l_margin, pdf.get_y())
advice_data={'0to10 accuracy attacks' : 'None1', '10to100 accuracy attacks' : ''}
advice = ['robustness about your model can vary considering your data sets complexity. ']
advice.append('Your Model cannot defend against' + advice_data['0to10 accuracy attacks'])
if advice_data['10to100 accuracy attacks'] == '':
advice.append(' Your model is hardly robust to given attacks. Is this properly trained? ')
else:
advice.append(' But relatively robust against' + advice_data['10to100 accuracy attacks'])
advice.append('\nThis weakness can be caused from setting hyper parameters, matbe input bias, or input capacity and so many | |
Patronymic name, Matronymic name, etc
:ivar code: Indicates the name element code defined by postal
standard groups like ECCMA, ADIS, UN/PROLIST for postal
services.
:ivar other_attributes:
"""
content: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
"mixed": True,
}
)
type: Optional[str] = field(
default=None,
metadata={
"name": "Type",
"type": "Attribute",
}
)
name_type: Optional[str] = field(
default=None,
metadata={
"name": "NameType",
"type": "Attribute",
}
)
code: Optional[str] = field(
default=None,
metadata={
"name": "Code",
"type": "Attribute",
}
)
other_attributes: Dict = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class Alias:
"""
:ivar content:
:ivar type: Type of Alias. Example: Official, UnOfficial, Close
Circle, etc
:ivar name_type: Defines the name type of Alias. Example: Nick
Name, Pet Name, etc
:ivar code: Indicates the name element code defined by postal
standard groups like ECCMA, ADIS, UN/PROLIST for postal
services.
:ivar other_attributes:
"""
content: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
"mixed": True,
}
)
type: Optional[str] = field(
default=None,
metadata={
"name": "Type",
"type": "Attribute",
}
)
name_type: Optional[str] = field(
default=None,
metadata={
"name": "NameType",
"type": "Attribute",
}
)
code: Optional[str] = field(
default=None,
metadata={
"name": "Code",
"type": "Attribute",
}
)
other_attributes: Dict = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class GenerationIdentifier:
"""
:ivar content:
:ivar type: Defines the type of generation identifier. Example:
Family Titles
:ivar code: Indicates the name element code defined by postal
standard groups like ECCMA, ADIS, UN/PROLIST for postal
services.
:ivar other_attributes:
"""
content: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
"mixed": True,
}
)
type: Optional[str] = field(
default=None,
metadata={
"name": "Type",
"type": "Attribute",
}
)
code: Optional[str] = field(
default=None,
metadata={
"name": "Code",
"type": "Attribute",
}
)
other_attributes: Dict = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class Suffix:
"""
:ivar content:
:ivar type: Defines the type of Suffix. Example: Compressed
Initials, Full suffixes, etc
:ivar code: Indicates the name element code defined by postal
standard groups like ECCMA, ADIS, UN/PROLIST for postal
services.
:ivar other_attributes:
"""
content: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
"mixed": True,
}
)
type: Optional[str] = field(
default=None,
metadata={
"name": "Type",
"type": "Attribute",
}
)
code: Optional[str] = field(
default=None,
metadata={
"name": "Code",
"type": "Attribute",
}
)
other_attributes: Dict = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class GeneralSuffix:
"""
:ivar content:
:ivar type: Defines the type of General Suffix. Example:
Employment Status, Living Status, etc
:ivar code: Indicates the name element code defined by postal
standard groups like ECCMA, ADIS, UN/PROLIST for postal
services.
:ivar other_attributes:
"""
content: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
"mixed": True,
}
)
type: Optional[str] = field(
default=None,
metadata={
"name": "Type",
"type": "Attribute",
}
)
code: Optional[str] = field(
default=None,
metadata={
"name": "Code",
"type": "Attribute",
}
)
other_attributes: Dict = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class JointPersonName:
"""A container to define more than one person name.
Example: <NAME> and Mr.<NAME>
:ivar name_line: Name or part of the name as a free format text. If
the name structure has to be broken down into individual
elements, use PersonName Container.
:ivar person_name: Use this element to specify every member
separately.
:ivar other_element: Use this to import/use/reference name elements
from other namespaces
:ivar joint_name_connector: The connector used to join more than one
person name. Example: <NAME> AND <NAME>, where AND is the
JointNameConnector
:ivar code: Indicates the name element code defined by postal
standard groups like ECCMA, ADIS, UN/PROLIST for postal
services.
:ivar other_attributes:
"""
class Meta:
namespace = X_NL_NAMESPACE
name_line: List[NameLineType] = field(
default_factory=list,
metadata={
"name": "NameLine",
"type": "Element",
"sequential": True,
}
)
person_name: List[PersonName] = field(
default_factory=list,
metadata={
"name": "PersonName",
"type": "Element",
"sequential": True,
}
)
other_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##other",
}
)
joint_name_connector: Optional[str] = field(
default=None,
metadata={
"name": "JointNameConnector",
"type": "Attribute",
}
)
code: Optional[str] = field(
default=None,
metadata={
"name": "Code",
"type": "Attribute",
}
)
other_attributes: Dict = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class NameDetails:
"""
Container for defining the name of a Person or an Organisation.
:ivar name_line: Define name as a free format text. Use this when
the type of the entity (person or organisation) is unknown, or
not broken into individual elements or is beyond the provided
types.
:ivar person_name:
:ivar joint_person_name:
:ivar organisation_name_details:
:ivar party_type: Indicates the type of entity i.e described namely,
Person or an Organisation. An Organisation could be: Club,
Association, Company, etc
:ivar code: Indicates the name element code defined by postal
standard groups like ECCMA, ADIS, UN/PROLIST for postal
services.
:ivar other_attributes:
:ivar addressee_indicator: Specific for name and address where the
addressee is specified. eg. ATTENTION, ter attentie van (in
Holland), etc
:ivar function:
:ivar dependency_name: Container for a name of a dependent person or
organisation. Example: <NAME>, C/O MSI Business Solutions
DependencyType: Person-Person/Person-Organisation Relationship
(care of, wife of, position, etc). Can have sublement with name
structure or reference another top-level element.
:ivar other_element: Use this to import/use/reference name elements
from other namespaces
:ivar name_details_key: Key identifier for the element for not
reinforced references from other elements. Not required to be
unique for the document to be valid, but application may get
confused if not unique. Extend this schema adding unique
contraint if needed.
"""
class Meta:
namespace = X_NL_NAMESPACE
name_line: List[NameLineType] = field(
default_factory=list,
metadata={
"name": "NameLine",
"type": "Element",
}
)
person_name: Optional[PersonName] = field(
default=None,
metadata={
"name": "PersonName",
"type": "Element",
}
)
joint_person_name: Optional[JointPersonName] = field(
default=None,
metadata={
"name": "JointPersonName",
"type": "Element",
}
)
organisation_name_details: Optional[OrganisationNameDetails] = field(
default=None,
metadata={
"name": "OrganisationNameDetails",
"type": "Element",
}
)
party_type: Optional[str] = field(
default=None,
metadata={
"name": "PartyType",
"type": "Attribute",
}
)
code: Optional[str] = field(
default=None,
metadata={
"name": "Code",
"type": "Attribute",
}
)
other_attributes: Dict = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
addressee_indicator: Optional["NameDetails.AddresseeIndicator"] = field(
default=None,
metadata={
"name": "AddresseeIndicator",
"type": "Element",
}
)
function: Optional[Function] = field(
default=None,
metadata={
"name": "Function",
"type": "Element",
}
)
dependency_name: Optional["NameDetails.DependencyName"] = field(
default=None,
metadata={
"name": "DependencyName",
"type": "Element",
}
)
other_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##other",
}
)
name_details_key: Optional[str] = field(
default=None,
metadata={
"name": "NameDetailsKey",
"type": "Attribute",
}
)
@dataclass
class AddresseeIndicator:
"""
:ivar content:
:ivar code: Indicates the name element code defined by postal
standard groups like ECCMA, ADIS, UN/PROLIST for postal
services.
:ivar other_attributes:
"""
content: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
"mixed": True,
}
)
code: Optional[str] = field(
default=None,
metadata={
"name": "Code",
"type": "Attribute",
}
)
other_attributes: Dict = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class DependencyName:
"""
:ivar name_line: Define name as a free format text. Use this
when the type of the entity (person or organisation) is
unknown, or not broken into individual elements or is beyond
the provided types.
:ivar person_name:
:ivar joint_person_name:
:ivar organisation_name_details:
:ivar party_type: Indicates the type of entity i.e described
namely, Person or an Organisation. An Organisation could be:
Club, Association, Company, etc
:ivar code: Indicates the name element code defined by postal
standard groups like ECCMA, ADIS, UN/PROLIST for postal
services.
:ivar other_attributes:
:ivar other_element: Use this to import/use/reference elements
from other namespaces
:ivar dependency_type: Description of the dependency: in trust
of, on behalf of, etc.
:ivar name_details_key_ref: Reference to another NameDetails
element with no foreign key reinforcement. The referenced
element may be out of the document and the document is still
valid.
"""
name_line: List[NameLineType] = field(
default_factory=list,
metadata={
"name": "NameLine",
"type": "Element",
}
)
person_name: Optional[PersonName] = field(
default=None,
metadata={
"name": "PersonName",
"type": "Element",
}
)
joint_person_name: Optional[JointPersonName] = field(
default=None,
metadata={
"name": "JointPersonName",
"type": "Element",
}
)
organisation_name_details: Optional[OrganisationNameDetails] = field(
default=None,
metadata={
"name": "OrganisationNameDetails",
"type": "Element",
}
)
party_type: Optional[str] = field(
default=None,
metadata={
"name": | |
ERROR: type should be string, got "https://en.wikipedia.org/wiki/Prime_number\n 2. http://primes.utm.edu/notes/gaps.html\n\n Examples\n ========\n\n >>> from sympy import primerange, sieve\n >>> print([i for i in primerange(1, 30)])\n [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]\n\n The Sieve method, primerange, is generally faster but it will\n occupy more memory as the sieve stores values. The default\n instance of Sieve, named sieve, can be used:\n\n >>> list(sieve.primerange(1, 30))\n [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]\n\n See Also\n ========\n\n nextprime : Return the ith prime greater than n\n prevprime : Return the largest prime smaller than n\n randprime : Returns a random prime in a given range\n primorial : Returns the product of primes based on condition\n Sieve.primerange : return range from already computed primes\n or extend the sieve to contain the requested\n range.\n \"\"\"\n from sympy.functions.elementary.integers import ceiling\n\n if a >= b:\n return\n # if we already have the range, return it\n if b <= sieve._list[-1]:\n for i in sieve.primerange(a, b):\n yield i\n return\n # otherwise compute, without storing, the desired range.\n\n # wrapping ceiling in as_int will raise an error if there was a problem\n # determining whether the expression was exactly an integer or not\n a = as_int(ceiling(a)) - 1\n b = as_int(ceiling(b))\n while 1:\n a = nextprime(a)\n if a < b:\n yield a\n else:\n return\n\n\ndef randprime(a, b):\n \"\"\" Return a random prime number in the range [a, b).\n\n Bertrand's postulate assures that\n randprime(a, 2*a) will always succeed for a > 1.\n\n References\n ==========\n\n - https://en.wikipedia.org/wiki/Bertrand's_postulate\n\n Examples\n ========\n\n >>> from sympy import randprime, isprime\n >>> randprime(1, 30) #doctest: +SKIP\n 13\n >>> isprime(randprime(1, 30))\n True\n\n See Also\n ========\n\n primerange : Generate all primes in a given range\n\n \"\"\"\n if a >= b:\n return\n a, b = map(int, (a, b))\n n = random.randint(a - 1, b)\n p = nextprime(n)\n if p >= b:\n p = prevprime(b)\n if p < a:\n raise ValueError(\"no primes exist in the specified range\")\n return p\n\n\ndef primorial(n, nth=True):\n \"\"\"\n Returns the product of the first n primes (default) or\n the primes less than or equal to n (when ``nth=False``).\n\n >>> from sympy.ntheory.generate import primorial, randprime, primerange\n >>> from sympy import factorint, Mul, primefactors, sqrt\n >>> primorial(4) # the first 4 primes are 2, 3, 5, 7\n 210\n >>> primorial(4, nth=False) # primes <= 4 are 2 and 3\n 6\n >>> primorial(1)\n 2\n >>> primorial(1, nth=False)\n 1\n >>> primorial(sqrt(101), nth=False)\n 210\n\n One can argue that the primes are infinite since if you take\n a set of primes and multiply them together (e.g. the primorial) and\n then add or subtract 1, the result cannot be divided by any of the\n original factors, hence either 1 or more new primes must divide this\n product of primes.\n\n In this case, the number itself is a new prime:\n\n >>> factorint(primorial(4) + 1)\n {211: 1}\n\n In this case two new primes are the factors:\n\n >>> factorint(primorial(4) - 1)\n {11: 1, 19: 1}\n\n Here, some primes smaller and larger than the primes multiplied together\n are obtained:\n\n >>> p = list(primerange(10, 20))\n >>> sorted(set(primefactors(Mul(*p) + 1)).difference(set(p)))\n [2, 5, 31, 149]\n\n See Also\n ========\n\n primerange : Generate all primes in a given range\n\n \"\"\"\n if nth:\n n = as_int(n)\n else:\n n = int(n)\n if n < 1:\n raise ValueError(\"primorial argument must be >= 1\")\n p = 1\n if nth:\n for i in range(1, n + 1):\n p *= prime(i)\n else:\n for i in primerange(2, n + 1):\n p *= i\n return p\n\n\ndef cycle_length(f, x0, nmax=None, values=False):\n \"\"\"For a given iterated sequence, return a generator that gives\n the length of the iterated cycle (lambda) and the length of terms\n before the cycle begins (mu); if ``values`` is True then the\n terms of the sequence will be returned instead. The sequence is\n started with value ``x0``.\n\n Note: more than the first lambda + mu terms may be returned and this\n is the cost of cycle detection with Brent's method; there are, however,\n generally less terms calculated than would have been calculated if the\n proper ending point were determined, e.g. by using Floyd's method.\n\n >>> from sympy.ntheory.generate import cycle_length\n\n This will yield successive values of i <-- func(i):\n\n >>> def iter(func, i):\n ... while 1:\n ... ii = func(i)\n ... yield ii\n ... i = ii\n ...\n\n A function is defined:\n\n >>> func = lambda i: (i**2 + 1) % 51\n\n and given a seed of 4 and the mu and lambda terms calculated:\n\n >>> next(cycle_length(func, 4))\n (6, 2)\n\n We can see what is meant by looking at the output:\n\n >>> n = cycle_length(func, 4, values=True)\n >>> list(ni for ni in n)\n [17, 35, 2, 5, 26, 14, 44, 50, 2, 5, 26, 14]\n\n There are 6 repeating values after the first 2.\n\n If a sequence is suspected of being longer than you might wish, ``nmax``\n can be used to exit early (and mu will be returned as None):\n\n >>> next(cycle_length(func, 4, nmax = 4))\n (4, None)\n >>> [ni for ni in cycle_length(func, 4, nmax = 4, values=True)]\n [17, 35, 2, 5]\n\n Code modified from:\n https://en.wikipedia.org/wiki/Cycle_detection.\n \"\"\"\n\n nmax = int(nmax or 0)\n\n # main phase: search successive powers of two\n power = lam = 1\n tortoise, hare = x0, f(x0) # f(x0) is the element/node next to x0.\n i = 0\n while tortoise != hare and (not nmax or i < nmax):\n i += 1\n if power == lam: # time to start a new power of two?\n tortoise = hare\n power *= 2\n lam = 0\n if values:\n yield hare\n hare = f(hare)\n lam += 1\n if nmax and i == nmax:\n if values:\n return\n else:\n yield nmax, None\n return\n if not values:\n # Find the position of the first repetition of length lambda\n mu = 0\n tortoise = hare = x0\n for i in range(lam):\n hare = f(hare)\n while tortoise != hare:\n tortoise = f(tortoise)\n hare = f(hare)\n mu += 1\n if mu:\n mu -= 1\n yield lam, mu\n\n\ndef composite(nth):\n \"\"\" Return the nth composite number, with the composite numbers indexed as\n composite(1) = 4, composite(2) = 6, etc....\n\n Examples\n ========\n\n >>> from sympy import composite\n >>> composite(36)\n 52\n >>> composite(1)\n 4\n >>> composite(17737)\n 20000\n\n See Also\n ========\n\n sympy.ntheory.primetest.isprime : Test if n is prime\n primerange : Generate all primes in a given range\n primepi : Return the number of primes less than or equal to n\n prime : Return the nth prime\n compositepi : Return the number of positive composite numbers less than or equal to n\n \"\"\"\n n = as_int(nth)\n if n < 1:\n raise ValueError(\"nth must be a positive integer; composite(1) == 4\")\n composite_arr = [4, 6, 8, 9, 10, 12, 14, 15, 16, 18]\n if n <= 10:\n return composite_arr[n - 1]\n\n a, b = 4, sieve._list[-1]\n if n <= b - primepi(b) - 1:\n while a < b - 1:\n mid = (a + b) >> 1\n if mid - primepi(mid) - 1 > n:\n b = mid\n else:\n a = mid\n if isprime(a):\n a -= 1\n return a\n\n from sympy.functions.special.error_functions import li\n from sympy.functions.elementary.exponential import log\n\n a = 4 # Lower bound for binary search\n b = int(n*(log(n) + log(log(n)))) # Upper bound for the search.\n\n while a < b:\n mid = (a + b) >> 1\n if mid - li(mid) - 1 > n:\n b = mid\n else:\n a = mid + 1\n\n n_composites = a - primepi(a) - 1\n while n_composites > n:\n if not isprime(a):\n n_composites -= 1\n a -= 1\n if isprime(a):\n a -= 1\n return a\n\n\ndef compositepi(n):\n \"\"\" Return the number of positive composite numbers less than or equal to n.\n The first positive composite is 4, i.e. compositepi(4) = 1.\n\n Examples\n ========\n\n >>> from sympy import compositepi\n >>> compositepi(25)\n 15\n >>> compositepi(1000)\n 831\n\n See Also\n ========\n\n sympy.ntheory.primetest.isprime : Test if n is prime\n primerange : Generate all primes in a" | |
<reponame>a2cps/python-vbr
"""Autogenerated 2021-11-16T11:37:36.440435 by redcap_classfiles.py
"""
from ....pgrest import *
from ...constants import Constants
from ..rcconstants import REDCapConstants
from ..rcaptable import RcapTable
__all__ = ["RcapBpisfTheBriefPainInventoryV23ShortFormBpi"]
class RcapBpisfTheBriefPainInventoryV23ShortFormBpi(RcapTable):
"""Bpisf The Brief Pain Inventory V23 Short Form Bpi"""
__redcap_form_name = "bpisf_the_brief_pain_inventory_v23_short_form_bpi"
bpisf_the_brief_pain_inventory_v23_short_form_bpi_id = (
Constants.SERIAL_PRIMARY_KEY_COLUMN
)
bpisf_the_brief_pain_inventory_v23_short_form_bpi_complete = Column(
Integer, ForeignKey("status.status_id")
)
# Field Name was empty in Data Dictionary
# Field Type: text
# Choices: N/A
bpipainanatsiteareatxt = Column(String, nullable=True, comments=None)
# Ignored multiline Field Name in Data Dictionary
# Field Type: radio
# Choices: 0, 0 | 1, 1 | 2, 2 | 3, 3 | 4, 4 | 5, 5 | 6, 6 | 7, 7 | 8, 8 | 9, 9 | 10, 10
bpi_mbm_z1_rate = Column(Integer, nullable=True, comments=None)
# How long have you been experiencing pain in the head / face /...
# Field Type: radio
# Choices: 1, less than 1 month | 2, 1 month or more, but less than 6 months | 3, 6 months or more, but less than 2 years | 4, 2 years or more
bpi_mbm_z1_dur = Column(Integer, nullable=True, comments=None)
# Ignored multiline Field Name in Data Dictionary
# Field Type: radio
# Choices: 0, 0 | 1, 1 | 2, 2 | 3, 3 | 4, 4 | 5, 5 | 6, 6 | 7, 7 | 8, 8 | 9, 9 | 10, 10
bpi_mbm_z2_rate = Column(Integer, nullable=True, comments=None)
# How long have you been experiencing pain in the neck? Choose ...
# Field Type: radio
# Choices: 1, less than 1 month | 2, 1 month or more, but less than 6 months | 3, 6 months or more, but less than 2 years | 4, 2 years or more
bpi_mbm_z2_dur = Column(Integer, nullable=True, comments=None)
# Ignored multiline Field Name in Data Dictionary
# Field Type: radio
# Choices: 0, 0 | 1, 1 | 2, 2 | 3, 3 | 4, 4 | 5, 5 | 6, 6 | 7, 7 | 8, 8 | 9, 9 | 10, 10
bpi_mbm_z3_rate = Column(Integer, nullable=True, comments=None)
# How long have you been experiencing pain in the chest/breast?...
# Field Type: radio
# Choices: 1, less than 1 month | 2, 1 month or more, but less than 6 months | 3, 6 months or more, but less than 2 years | 4, 2 years or more
bpi_mbm_z3_dur = Column(Integer, nullable=True, comments=None)
# Ignored multiline Field Name in Data Dictionary
# Field Type: radio
# Choices: 0, 0 | 1, 1 | 2, 2 | 3, 3 | 4, 4 | 5, 5 | 6, 6 | 7, 7 | 8, 8 | 9, 9 | 10, 10
bpi_mbm_z4_rate = Column(Integer, nullable=True, comments=None)
# How long have you been experiencing pain in the abdomen / pel...
# Field Type: radio
# Choices: 1, less than 1 month | 2, 1 month or more, but less than 6 months | 3, 6 months or more, but less than 2 years | 4, 2 years or more
bpi_mbm_z4_dur = Column(Integer, nullable=True, comments=None)
# Ignored multiline Field Name in Data Dictionary
# Field Type: radio
# Choices: 0, 0 | 1, 1 | 2, 2 | 3, 3 | 4, 4 | 5, 5 | 6, 6 | 7, 7 | 8, 8 | 9, 9 | 10, 10
bpi_mbm_z5_rate = Column(Integer, nullable=True, comments=None)
# How long have you been experiencing pain in the RIGHT side: s...
# Field Type: radio
# Choices: 1, less than 1 month | 2, 1 month or more, but less than 6 months | 3, 6 months or more, but less than 2 years | 4, 2 years or more
bpi_mbm_z5_dur = Column(Integer, nullable=True, comments=None)
# Ignored multiline Field Name in Data Dictionary
# Field Type: radio
# Choices: 0, 0 | 1, 1 | 2, 2 | 3, 3 | 4, 4 | 5, 5 | 6, 6 | 7, 7 | 8, 8 | 9, 9 | 10, 10
bpi_mbm_z6_rate = Column(Integer, nullable=True, comments=None)
# How long have you been experiencing pain in the LEFT side: sh...
# Field Type: radio
# Choices: 1, less than 1 month | 2, 1 month or more, but less than 6 months | 3, 6 months or more, but less than 2 years | 4, 2 years or more
bpi_mbm_z6_dur = Column(Integer, nullable=True, comments=None)
# Ignored multiline Field Name in Data Dictionary
# Field Type: radio
# Choices: 0, 0 | 1, 1 | 2, 2 | 3, 3 | 4, 4 | 5, 5 | 6, 6 | 7, 7 | 8, 8 | 9, 9 | 10, 10
bpi_mbm_z7_rate = Column(Integer, nullable=True, comments=None)
# How long have you been experiencing pain in the upper back /...
# Field Type: radio
# Choices: 1, less than 1 month | 2, 1 month or more, but less than 6 months | 3, 6 months or more, but less than 2 years | 4, 2 years or more
bpi_mbm_z7_dur = Column(Integer, nullable=True, comments=None)
# Ignored multiline Field Name in Data Dictionary
# Field Type: radio
# Choices: 0, 0 | 1, 1 | 2, 2 | 3, 3 | 4, 4 | 5, 5 | 6, 6 | 7, 7 | 8, 8 | 9, 9 | 10, 10
bpi_mbm_z8_rate = Column(Integer, nullable=True, comments=None)
# How long have you been experiencing pain in the RIGHT side: h...
# Field Type: radio
# Choices: 1, less than 1 month | 2, 1 month or more, but less than 6 months | 3, 6 months or more, but less than 2 years | 4, 2 years or more
bpi_mbm_z8_dur = Column(Integer, nullable=True, comments=None)
# Ignored multiline Field Name in Data Dictionary
# Field Type: radio
# Choices: 0, 0 | 1, 1 | 2, 2 | 3, 3 | 4, 4 | 5, 5 | 6, 6 | 7, 7 | 8, 8 | 9, 9 | 10, 10
bpi_mbm_z9_rate = Column(Integer, nullable=True, comments=None)
# How long have you been experiencing pain in the LEFT side: hi...
# Field Type: radio
# Choices: 1, less than 1 month | 2, 1 month or more, but less than 6 months | 3, 6 months or more, but less than 2 years | 4, 2 years or more
bpi_mbm_z9_dur = Column(Integer, nullable=True, comments=None)
# Please rate your surgical site (chest) pain by choosing the n...
# Field Type: radio
# Choices: 0, 0 | 1, 1 | 2, 2 | 3, 3 | 4, 4 | 5, 5 | 6, 6 | 7, 7 | 8, 8 | 9, 9 | 10, 10
bpiworstpainratingss = Column(Integer, nullable=True, comments=None)
# Please rate any other pain (excluding surgical site) by choos...
# Field Type: radio
# Choices: 0, 0 | 1, 1 | 2, 2 | 3, 3 | 4, 4 | 5, 5 | 6, 6 | 7, 7 | 8, 8 | 9, 9 | 10, 10
bpiworstpainratingexclss = Column(Integer, nullable=True, comments=None)
# a. General Activity
# Field Type: radio
# Choices: 0, Does not interfere 0 | 1, 1 | 2, 2 | 3, 3 | 4, 4 | 5, 5 | 6, 6 | 7, 7 | 8, 8 | 9, 9 | 10, Completely interferes 10
bpipainintfrgnrlactvtyscl = Column(Integer, nullable=True, comments=None)
# b. Mood
# Field Type: radio
# Choices: 0, Does not interfere 0 | 1, 1 | 2, 2 | 3, 3 | 4, 4 | 5, 5 | 6, 6 | 7, 7 | 8, 8 | 9, 9 | 10, Completely interferes 10
bpipainintfrmoodscl = Column(Integer, nullable=True, comments=None)
# c. Walking ability
# Field Type: radio
# Choices: 0, Does not interfere 0 | 1, 1 | 2, 2 | 3, 3 | 4, 4 | 5, 5 | 6, 6 | 7, | |
<filename>experiments/bnci_horizon_experiment/bnci_utils.py
import numpy as np
import tensorflow as tf
import nengo_dl
from tensorflow.python.keras import Input, Model
import nengo
from tensorflow.python.keras.layers import Conv2D, Dropout, AveragePooling2D, Flatten, Dense, BatchNormalization, LSTM
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
from sklearn import metrics
# This file contains utility functions that are used across all notebooks during the BNCI dataset experiment
# This is mainly done to reduce the overall boilerplate in the notebooks as the code to run the network would be mostly
# the same in all cases
def load_dataset(dataset_file_path):
"""
Function for loading the dataset from npz numpy file
:param dataset_file_path: path to the dataset file
:return: loaded features and labels
"""
dataset = np.load(dataset_file_path)
return dataset['features'], dataset['labels']
def reshape_dataset(features, labels):
"""
Reshapes the dataset to be usable within NengoDL. The features will be transformed for input shape (14, 360, 1)
and the labels will be one-hot encoded. Note that the labels need to be named 'yes' and 'no' in order for the
metrics calculation function to work
:param features: numpy array containing features
:param labels: numpy array containing labels
:return: transformed features and labels
"""
# Convert labels to one hot encoding
labels = labels.reshape(-1, 1)
labels = OneHotEncoder().fit_transform(labels).toarray()
labels = labels.reshape((labels.shape[0], 1, -1))
# Reshape features for the NN
features = features.reshape((features.shape[0], 14, -1)) # reshape to channels x data
features = features.reshape((features.shape[0], 1, -1)) # add time dimension
return features, labels
def cnn_model(seed):
inp = Input(shape=(14, 360, 1), name='input_layer')
conv1 = Conv2D(filters=32, kernel_size=(5, 5), activation=tf.nn.relu, padding='same')(inp)
dropout1 = Dropout(0.2, seed=seed)(conv1)
avg_pool1 = AveragePooling2D(pool_size=(2, 2))(dropout1)
conv2 = Conv2D(filters=64, kernel_size=(3, 3), activation=tf.nn.relu)(avg_pool1)
dropout2 = Dropout(0.2, seed=seed)(conv2)
avg_pool2 = AveragePooling2D(pool_size=(2, 2))(dropout2)
flatten = Flatten()(avg_pool2)
dense1 = Dense(512, activation=tf.nn.relu)(flatten)
dropout3 = Dropout(0.2, seed=seed)(dense1)
dense2 = Dense(256, activation=tf.nn.relu)(dropout3)
output = Dense(2, activation=tf.nn.softmax, name='output_layer')(dense2)
return Model(inputs=inp, outputs=output)
def original_p300_model(seed=0):
"""
Function to create the model from P300 dataset
:return: Tensorflow model
"""
inp = Input(shape=(14, 360, 1), name='input_layer')
conv2d = Conv2D(filters=6, kernel_size=(3, 3), activation=tf.nn.relu)(inp)
dropout1 = Dropout(0.5, seed=seed)(conv2d)
avg_pooling = AveragePooling2D(pool_size=(1, 8), padding='same')(dropout1)
flatten = Flatten()(avg_pooling)
dense1 = Dense(100, activation=tf.nn.relu)(flatten)
batch_norm = BatchNormalization()(dense1)
dropout2 = Dropout(0.5, seed=seed)(batch_norm)
output = Dense(2, activation=tf.nn.softmax, name='output_layer')(dropout2)
return Model(inputs=inp, outputs=output)
def get_metrics(simulator, output_layer, x_test, y_test, minibatch_size, network_name):
"""
Function for calculating metrics for Nengo simulator
:param minibatch_size: batch size used in simulator to truncate redundant samples in x_test and y_test
:param simulator: simulator instance
:param output_layer: output layer reference
:param x_test: features of the testing subset
:param y_test: labels of the testing subset
:param network_name: name of the network
:return: accuracy, recall and precision metrics
"""
# Truncate the remaining number of samples since the predict function does use minibatch
samples = (x_test.shape[0] // minibatch_size) * minibatch_size
x_test, y_test = x_test[:samples], y_test[:samples]
predictions = simulator.predict(x_test)[output_layer] # get result from output layer when predicting on x_test
predictions = predictions[:, -1, :] # get the last timestep
predictions_argm = np.argmax(predictions, axis=-1) # get predicted label
y_test = np.squeeze(y_test, axis=1) # remove time dimension
y_test_argm = np.argmax(y_test, axis=-1) # get labels
# Here we do not need to change
precision = metrics.precision_score(y_true=y_test_argm, y_pred=predictions_argm,
average='binary') # get precision score
recall = metrics.recall_score(y_true=y_test_argm, y_pred=predictions_argm, average='binary') # get recall
f1 = metrics.f1_score(y_true=y_test_argm, y_pred=predictions_argm, average='binary') # get f1 score
accuracy = metrics.accuracy_score(y_true=y_test_argm, y_pred=predictions_argm) # get accuracy
confusion_matrix = metrics.confusion_matrix(y_true=y_test_argm, y_pred=predictions_argm) # get confusion matrix
# Log the statistics
print(f'{network_name}: accuracy = {accuracy * 100}%, precision = {precision}, '
f'recall = {recall}, f1 = {f1}')
print('Confusion matrix:')
print(confusion_matrix)
return accuracy, precision, recall, f1, confusion_matrix
def get_metrics_keras(model, x_test, y_test, network_name):
"""
Computes metrics in keras environment - i.e., if Nengo is not used (during LSTM simulation)
:param model: reference to the model for calling predict function
:param x_test: testing features
:param y_test: testing labels
:param network_name: name of the network for output
:return:
"""
predictions = model.predict(x_test)
predictions = np.argmax(predictions, axis=-1)
y_test = np.argmax(y_test, axis=-1)
precision = metrics.precision_score(y_true=y_test, y_pred=predictions, average='binary') # get precision score
recall = metrics.recall_score(y_true=y_test, y_pred=predictions, average='binary') # get recall
f1 = metrics.f1_score(y_true=y_test, y_pred=predictions, average='binary') # get f1 score
accuracy = metrics.accuracy_score(y_true=y_test, y_pred=predictions) # get accuracy
confusion_matrix = metrics.confusion_matrix(y_true=y_test, y_pred=predictions) # get confusion matrix
# Log the statistics
print(f'{network_name}: accuracy = {accuracy * 100}%, precision = {precision}, '
f'recall = {recall}, f1 = {f1}')
print('Confusion matrix:')
print(confusion_matrix)
return accuracy, precision, recall, f1, confusion_matrix
def run_ann(model, train, test, params_save_path, iteration, optimizer, loss, callbacks=None, valid=None,
shuffle_training=True,
batch_size=16,
num_epochs=30):
"""
Run analog network with cross-validation
:param batch_size: batch size during training
:param model: reference to the tensorflow model
:param train: pair of training data (x_train, y_train)
:param valid: pair of validation data (x_val, y_val)
:param test: pair of testing data (x_test, y_test)
:param params_save_path: output path to save weights of the network
:param iteration: number of the iteration in CV
:param shuffle_training: shuffle samples
:param num_epochs: number of epochs to train for
:return: accuracy, precision, recall, f1 and confusion matrix from the testing data
"""
x_train, y_train = train[0], train[1]
x_test, y_test = test[0], test[1]
if valid is not None:
x_valid, y_valid = valid[0], valid[1]
converter = nengo_dl.Converter(model)
with nengo_dl.Simulator(converter.net, minibatch_size=batch_size) as simulator:
simulator.compile(optimizer=optimizer,
loss=loss,
metrics=['accuracy'])
input_layer = converter.inputs[model.get_layer('input_layer')] # get the input layer reference
output_layer = converter.outputs[model.get_layer('output_layer')] # get the output layer reference
# fit the model with the training data
simulator.fit(
x={input_layer: x_train}, y={output_layer: y_train},
validation_data=(
{input_layer: x_valid}, {output_layer: y_valid}
) if valid is not None else None,
epochs=num_epochs,
shuffle=shuffle_training,
callbacks=callbacks
# early stop to avoid overfitting
)
simulator.save_params(params_save_path) # save weights to the file
# Get the statistics
accuracy, precision, recall, f1, confusion_matrix = get_metrics(simulator, output_layer, x_test, y_test,
batch_size,
f'{iteration}. CNN')
return {
'accuracy': accuracy,
'precision': precision,
'recall': recall,
'f1': f1,
'confusion_matrix': confusion_matrix
}
def run_snn(model, x_test, y_test, params_load_path, iteration, timesteps=50,
scale_firing_rates=1000,
synapse=0.01,
batch_size=16):
"""
Run model in spiking setting
:param batch_size: batch size
:param model: model reference
:param x_test: testing features
:param y_test: testing labels
:param params_load_path: path to load parameters
:param iteration: number of current iteration
:param timesteps: number of timesteps
:param scale_firing_rates: firing rate scaling
:param synapse: synaptic smoothing
:return: accuracy, precision, recall, f1 and confusion matrix from the testing data
"""
converter = nengo_dl.Converter(
model,
swap_activations={tf.nn.relu: nengo.SpikingRectifiedLinear()},
scale_firing_rates=scale_firing_rates,
synapse=synapse
) # create a Nengo converter object and swap all relu activations with spiking relu
with converter.net:
nengo_dl.configure_settings(stateful=False)
output_layer = converter.outputs[model.get_layer('output_layer')] # output layer for simulator
x_test_tiled = np.tile(x_test, (1, timesteps, 1)) # tile test data to timesteps
with nengo_dl.Simulator(converter.net) as simulator:
simulator.load_params(params_load_path)
# Get the statistics
accuracy, precision, recall, f1, confusion_matrix = get_metrics(simulator, output_layer, x_test_tiled, y_test,
batch_size,
f'{iteration}. CNN (SNN conversion)')
return {
'accuracy': accuracy,
'precision': precision,
'recall': recall,
'f1': f1,
'confusion_matrix': confusion_matrix
}
def create_data_df(ann, snn, num_iterations):
"""
Function takes in ann and snn list with dictionaries containing metrics in each iteration
and maps them to dictionary that can be used in pandas
:param ann: list of ANN results from function run_ann
:param snn: list of SNN results from function run_snn
:param num_iterations: the number of iterations
:return: pandas dataframe with statistics from each iteration for SNN and ANN
"""
return pd.DataFrame({
'iterations': [x for x in range(1, num_iterations + 1)],
'ann_accuracy': [x['accuracy'] for x in ann],
'ann_precision': [x['precision'] for x in ann],
'ann_recall': [x['recall'] for x in ann],
'ann_f1': [x['f1'] for x in ann],
'snn_accuracy': [x['accuracy'] for x in snn],
'snn_precision': [x['precision'] for x in snn],
'snn_recall': [x['recall'] for x in snn],
'snn_f1': [x['f1'] for x in snn]
})
def create_stats_df(df: pd.DataFrame):
"""
Function takes in
:param df: dataframe from create_data_df function
:return: pandas dataframe with
"""
data_stats = {
'models': ['ann', 'snn'],
'average_accuracy': [],
'max_accuracy': [],
'accuracy_std': [],
'average_precision': [],
'max_precision': [],
'average_recall': [],
'max_recall': [],
'average_f1': [],
'max_f1': []
}
# slightly less code if we iterate over snn_{metric_name} in dictionary
for model in ['ann', 'snn']:
data_stats['average_accuracy'].append(df[f'{model}_accuracy'].mean())
data_stats['accuracy_std'].append(df[f'{model}_accuracy'].std())
data_stats['average_precision'].append(df[f'{model}_precision'].mean())
data_stats['average_recall'].append(df[f'{model}_recall'].mean())
data_stats['average_f1'].append(df[f'{model}_f1'].mean())
data_stats['max_accuracy'].append(df[f'{model}_accuracy'].max())
data_stats['max_f1'].append(df[f'{model}_f1'].max())
data_stats['max_precision'].append(df[f'{model}_precision'].max())
data_stats['max_recall'].append(df[f'{model}_recall'].max())
return pd.DataFrame(data_stats)
def print_confusion_matrices(ann, snn=None):
"""
Prints confusion matrix in each iteration
:param ann: list of results for ANN model from run_ann function
:param snn: list of results for SNN model from run_snn function
"""
# | |
<filename>mavsdk/generated/telemetry_pb2_grpc.py
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from . import telemetry_pb2 as telemetry__pb2
class TelemetryServiceStub(object):
"""
Allow users to get vehicle telemetry and state information
(e.g. battery, GPS, RC connection, flight mode etc.) and set telemetry update rates.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SubscribePosition = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribePosition',
request_serializer=telemetry__pb2.SubscribePositionRequest.SerializeToString,
response_deserializer=telemetry__pb2.PositionResponse.FromString,
)
self.SubscribeHome = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribeHome',
request_serializer=telemetry__pb2.SubscribeHomeRequest.SerializeToString,
response_deserializer=telemetry__pb2.HomeResponse.FromString,
)
self.SubscribeInAir = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribeInAir',
request_serializer=telemetry__pb2.SubscribeInAirRequest.SerializeToString,
response_deserializer=telemetry__pb2.InAirResponse.FromString,
)
self.SubscribeLandedState = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribeLandedState',
request_serializer=telemetry__pb2.SubscribeLandedStateRequest.SerializeToString,
response_deserializer=telemetry__pb2.LandedStateResponse.FromString,
)
self.SubscribeArmed = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribeArmed',
request_serializer=telemetry__pb2.SubscribeArmedRequest.SerializeToString,
response_deserializer=telemetry__pb2.ArmedResponse.FromString,
)
self.SubscribeAttitudeQuaternion = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribeAttitudeQuaternion',
request_serializer=telemetry__pb2.SubscribeAttitudeQuaternionRequest.SerializeToString,
response_deserializer=telemetry__pb2.AttitudeQuaternionResponse.FromString,
)
self.SubscribeAttitudeEuler = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribeAttitudeEuler',
request_serializer=telemetry__pb2.SubscribeAttitudeEulerRequest.SerializeToString,
response_deserializer=telemetry__pb2.AttitudeEulerResponse.FromString,
)
self.SubscribeAttitudeAngularVelocityBody = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribeAttitudeAngularVelocityBody',
request_serializer=telemetry__pb2.SubscribeAttitudeAngularVelocityBodyRequest.SerializeToString,
response_deserializer=telemetry__pb2.AttitudeAngularVelocityBodyResponse.FromString,
)
self.SubscribeCameraAttitudeQuaternion = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribeCameraAttitudeQuaternion',
request_serializer=telemetry__pb2.SubscribeCameraAttitudeQuaternionRequest.SerializeToString,
response_deserializer=telemetry__pb2.CameraAttitudeQuaternionResponse.FromString,
)
self.SubscribeCameraAttitudeEuler = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribeCameraAttitudeEuler',
request_serializer=telemetry__pb2.SubscribeCameraAttitudeEulerRequest.SerializeToString,
response_deserializer=telemetry__pb2.CameraAttitudeEulerResponse.FromString,
)
self.SubscribeVelocityNed = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribeVelocityNed',
request_serializer=telemetry__pb2.SubscribeVelocityNedRequest.SerializeToString,
response_deserializer=telemetry__pb2.VelocityNedResponse.FromString,
)
self.SubscribeGpsInfo = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribeGpsInfo',
request_serializer=telemetry__pb2.SubscribeGpsInfoRequest.SerializeToString,
response_deserializer=telemetry__pb2.GpsInfoResponse.FromString,
)
self.SubscribeBattery = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribeBattery',
request_serializer=telemetry__pb2.SubscribeBatteryRequest.SerializeToString,
response_deserializer=telemetry__pb2.BatteryResponse.FromString,
)
self.SubscribeFlightMode = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribeFlightMode',
request_serializer=telemetry__pb2.SubscribeFlightModeRequest.SerializeToString,
response_deserializer=telemetry__pb2.FlightModeResponse.FromString,
)
self.SubscribeHealth = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribeHealth',
request_serializer=telemetry__pb2.SubscribeHealthRequest.SerializeToString,
response_deserializer=telemetry__pb2.HealthResponse.FromString,
)
self.SubscribeRcStatus = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribeRcStatus',
request_serializer=telemetry__pb2.SubscribeRcStatusRequest.SerializeToString,
response_deserializer=telemetry__pb2.RcStatusResponse.FromString,
)
self.SubscribeStatusText = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribeStatusText',
request_serializer=telemetry__pb2.SubscribeStatusTextRequest.SerializeToString,
response_deserializer=telemetry__pb2.StatusTextResponse.FromString,
)
self.SubscribeActuatorControlTarget = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribeActuatorControlTarget',
request_serializer=telemetry__pb2.SubscribeActuatorControlTargetRequest.SerializeToString,
response_deserializer=telemetry__pb2.ActuatorControlTargetResponse.FromString,
)
self.SubscribeActuatorOutputStatus = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribeActuatorOutputStatus',
request_serializer=telemetry__pb2.SubscribeActuatorOutputStatusRequest.SerializeToString,
response_deserializer=telemetry__pb2.ActuatorOutputStatusResponse.FromString,
)
self.SubscribeOdometry = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribeOdometry',
request_serializer=telemetry__pb2.SubscribeOdometryRequest.SerializeToString,
response_deserializer=telemetry__pb2.OdometryResponse.FromString,
)
self.SubscribePositionVelocityNed = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribePositionVelocityNed',
request_serializer=telemetry__pb2.SubscribePositionVelocityNedRequest.SerializeToString,
response_deserializer=telemetry__pb2.PositionVelocityNedResponse.FromString,
)
self.SubscribeGroundTruth = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribeGroundTruth',
request_serializer=telemetry__pb2.SubscribeGroundTruthRequest.SerializeToString,
response_deserializer=telemetry__pb2.GroundTruthResponse.FromString,
)
self.SubscribeFixedwingMetrics = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribeFixedwingMetrics',
request_serializer=telemetry__pb2.SubscribeFixedwingMetricsRequest.SerializeToString,
response_deserializer=telemetry__pb2.FixedwingMetricsResponse.FromString,
)
self.SubscribeImu = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribeImu',
request_serializer=telemetry__pb2.SubscribeImuRequest.SerializeToString,
response_deserializer=telemetry__pb2.ImuResponse.FromString,
)
self.SubscribeHealthAllOk = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribeHealthAllOk',
request_serializer=telemetry__pb2.SubscribeHealthAllOkRequest.SerializeToString,
response_deserializer=telemetry__pb2.HealthAllOkResponse.FromString,
)
self.SubscribeUnixEpochTime = channel.unary_stream(
'/mavsdk.rpc.telemetry.TelemetryService/SubscribeUnixEpochTime',
request_serializer=telemetry__pb2.SubscribeUnixEpochTimeRequest.SerializeToString,
response_deserializer=telemetry__pb2.UnixEpochTimeResponse.FromString,
)
self.SetRatePosition = channel.unary_unary(
'/mavsdk.rpc.telemetry.TelemetryService/SetRatePosition',
request_serializer=telemetry__pb2.SetRatePositionRequest.SerializeToString,
response_deserializer=telemetry__pb2.SetRatePositionResponse.FromString,
)
self.SetRateHome = channel.unary_unary(
'/mavsdk.rpc.telemetry.TelemetryService/SetRateHome',
request_serializer=telemetry__pb2.SetRateHomeRequest.SerializeToString,
response_deserializer=telemetry__pb2.SetRateHomeResponse.FromString,
)
self.SetRateInAir = channel.unary_unary(
'/mavsdk.rpc.telemetry.TelemetryService/SetRateInAir',
request_serializer=telemetry__pb2.SetRateInAirRequest.SerializeToString,
response_deserializer=telemetry__pb2.SetRateInAirResponse.FromString,
)
self.SetRateLandedState = channel.unary_unary(
'/mavsdk.rpc.telemetry.TelemetryService/SetRateLandedState',
request_serializer=telemetry__pb2.SetRateLandedStateRequest.SerializeToString,
response_deserializer=telemetry__pb2.SetRateLandedStateResponse.FromString,
)
self.SetRateAttitude = channel.unary_unary(
'/mavsdk.rpc.telemetry.TelemetryService/SetRateAttitude',
request_serializer=telemetry__pb2.SetRateAttitudeRequest.SerializeToString,
response_deserializer=telemetry__pb2.SetRateAttitudeResponse.FromString,
)
self.SetRateCameraAttitude = channel.unary_unary(
'/mavsdk.rpc.telemetry.TelemetryService/SetRateCameraAttitude',
request_serializer=telemetry__pb2.SetRateCameraAttitudeRequest.SerializeToString,
response_deserializer=telemetry__pb2.SetRateCameraAttitudeResponse.FromString,
)
self.SetRateVelocityNed = channel.unary_unary(
'/mavsdk.rpc.telemetry.TelemetryService/SetRateVelocityNed',
request_serializer=telemetry__pb2.SetRateVelocityNedRequest.SerializeToString,
response_deserializer=telemetry__pb2.SetRateVelocityNedResponse.FromString,
)
self.SetRateGpsInfo = channel.unary_unary(
'/mavsdk.rpc.telemetry.TelemetryService/SetRateGpsInfo',
request_serializer=telemetry__pb2.SetRateGpsInfoRequest.SerializeToString,
response_deserializer=telemetry__pb2.SetRateGpsInfoResponse.FromString,
)
self.SetRateBattery = channel.unary_unary(
'/mavsdk.rpc.telemetry.TelemetryService/SetRateBattery',
request_serializer=telemetry__pb2.SetRateBatteryRequest.SerializeToString,
response_deserializer=telemetry__pb2.SetRateBatteryResponse.FromString,
)
self.SetRateRcStatus = channel.unary_unary(
'/mavsdk.rpc.telemetry.TelemetryService/SetRateRcStatus',
request_serializer=telemetry__pb2.SetRateRcStatusRequest.SerializeToString,
response_deserializer=telemetry__pb2.SetRateRcStatusResponse.FromString,
)
self.SetRateActuatorControlTarget = channel.unary_unary(
'/mavsdk.rpc.telemetry.TelemetryService/SetRateActuatorControlTarget',
request_serializer=telemetry__pb2.SetRateActuatorControlTargetRequest.SerializeToString,
response_deserializer=telemetry__pb2.SetRateActuatorControlTargetResponse.FromString,
)
self.SetRateActuatorOutputStatus = channel.unary_unary(
'/mavsdk.rpc.telemetry.TelemetryService/SetRateActuatorOutputStatus',
request_serializer=telemetry__pb2.SetRateActuatorOutputStatusRequest.SerializeToString,
response_deserializer=telemetry__pb2.SetRateActuatorOutputStatusResponse.FromString,
)
self.SetRateOdometry = channel.unary_unary(
'/mavsdk.rpc.telemetry.TelemetryService/SetRateOdometry',
request_serializer=telemetry__pb2.SetRateOdometryRequest.SerializeToString,
response_deserializer=telemetry__pb2.SetRateOdometryResponse.FromString,
)
self.SetRatePositionVelocityNed = channel.unary_unary(
'/mavsdk.rpc.telemetry.TelemetryService/SetRatePositionVelocityNed',
request_serializer=telemetry__pb2.SetRatePositionVelocityNedRequest.SerializeToString,
response_deserializer=telemetry__pb2.SetRatePositionVelocityNedResponse.FromString,
)
self.SetRateGroundTruth = channel.unary_unary(
'/mavsdk.rpc.telemetry.TelemetryService/SetRateGroundTruth',
request_serializer=telemetry__pb2.SetRateGroundTruthRequest.SerializeToString,
response_deserializer=telemetry__pb2.SetRateGroundTruthResponse.FromString,
)
self.SetRateFixedwingMetrics = channel.unary_unary(
'/mavsdk.rpc.telemetry.TelemetryService/SetRateFixedwingMetrics',
request_serializer=telemetry__pb2.SetRateFixedwingMetricsRequest.SerializeToString,
response_deserializer=telemetry__pb2.SetRateFixedwingMetricsResponse.FromString,
)
self.SetRateImu = channel.unary_unary(
'/mavsdk.rpc.telemetry.TelemetryService/SetRateImu',
request_serializer=telemetry__pb2.SetRateImuRequest.SerializeToString,
response_deserializer=telemetry__pb2.SetRateImuResponse.FromString,
)
self.SetRateUnixEpochTime = channel.unary_unary(
'/mavsdk.rpc.telemetry.TelemetryService/SetRateUnixEpochTime',
request_serializer=telemetry__pb2.SetRateUnixEpochTimeRequest.SerializeToString,
response_deserializer=telemetry__pb2.SetRateUnixEpochTimeResponse.FromString,
)
class TelemetryServiceServicer(object):
"""
Allow users to get vehicle telemetry and state information
(e.g. battery, GPS, RC connection, flight mode etc.) and set telemetry update rates.
"""
def SubscribePosition(self, request, context):
"""Subscribe to 'position' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeHome(self, request, context):
"""Subscribe to 'home position' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeInAir(self, request, context):
"""Subscribe to in-air updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeLandedState(self, request, context):
"""Subscribe to landed state updates
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeArmed(self, request, context):
"""Subscribe to armed updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeAttitudeQuaternion(self, request, context):
"""Subscribe to 'attitude' updates (quaternion).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeAttitudeEuler(self, request, context):
"""Subscribe to 'attitude' updates (Euler).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeAttitudeAngularVelocityBody(self, request, context):
"""Subscribe to 'attitude' updates (angular velocity)
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeCameraAttitudeQuaternion(self, request, context):
"""Subscribe to 'camera attitude' updates (quaternion).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeCameraAttitudeEuler(self, request, context):
"""Subscribe to 'camera attitude' updates (Euler).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeVelocityNed(self, request, context):
"""Subscribe to 'ground speed' updates (NED).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeGpsInfo(self, request, context):
"""Subscribe to 'GPS info' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeBattery(self, request, context):
"""Subscribe to 'battery' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeFlightMode(self, request, context):
"""Subscribe to 'flight mode' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeHealth(self, request, context):
"""Subscribe to 'health' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeRcStatus(self, request, context):
"""Subscribe to 'RC status' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeStatusText(self, request, context):
"""Subscribe to 'status text' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeActuatorControlTarget(self, request, context):
"""Subscribe to 'actuator control target' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeActuatorOutputStatus(self, request, context):
"""Subscribe to 'actuator output status' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeOdometry(self, request, context):
"""Subscribe to 'odometry' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribePositionVelocityNed(self, request, context):
"""Subscribe to 'position velocity' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeGroundTruth(self, request, context):
"""Subscribe to 'ground truth' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeFixedwingMetrics(self, request, context):
"""Subscribe to 'fixedwing metrics' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeImu(self, request, context):
"""Subscribe to 'IMU' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeHealthAllOk(self, request, context):
"""Subscribe to 'HealthAllOk' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeUnixEpochTime(self, request, context):
"""Subscribe to 'unix epoch time' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetRatePosition(self, request, context):
"""Set rate to 'position' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetRateHome(self, request, context):
"""Set rate to 'home position' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetRateInAir(self, request, context):
"""Set rate to in-air updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetRateLandedState(self, request, context):
"""Set rate to landed state updates
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetRateAttitude(self, request, context):
"""Set rate to 'attitude' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetRateCameraAttitude(self, request, context):
"""Set rate of camera attitude updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetRateVelocityNed(self, request, context):
"""Set rate to 'ground speed' updates (NED).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetRateGpsInfo(self, request, context):
"""Set rate to 'GPS info' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetRateBattery(self, request, context):
"""Set rate to 'battery' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetRateRcStatus(self, request, context):
"""Set rate to 'RC status' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetRateActuatorControlTarget(self, request, context):
"""Set rate to 'actuator control target' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetRateActuatorOutputStatus(self, request, context):
"""Set rate to 'actuator output status' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetRateOdometry(self, request, context):
"""Set rate to 'odometry' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetRatePositionVelocityNed(self, request, context):
"""Set rate to 'position velocity' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetRateGroundTruth(self, request, context):
"""Set rate to 'ground truth' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetRateFixedwingMetrics(self, request, context):
"""Set rate to 'fixedwing metrics' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetRateImu(self, request, context):
"""Set rate to 'IMU' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetRateUnixEpochTime(self, request, context):
"""Set rate to 'unix epoch time' updates.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_TelemetryServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'SubscribePosition': grpc.unary_stream_rpc_method_handler(
servicer.SubscribePosition,
request_deserializer=telemetry__pb2.SubscribePositionRequest.FromString,
response_serializer=telemetry__pb2.PositionResponse.SerializeToString,
),
'SubscribeHome': grpc.unary_stream_rpc_method_handler(
servicer.SubscribeHome,
request_deserializer=telemetry__pb2.SubscribeHomeRequest.FromString,
response_serializer=telemetry__pb2.HomeResponse.SerializeToString,
),
'SubscribeInAir': grpc.unary_stream_rpc_method_handler(
servicer.SubscribeInAir,
request_deserializer=telemetry__pb2.SubscribeInAirRequest.FromString,
response_serializer=telemetry__pb2.InAirResponse.SerializeToString,
),
'SubscribeLandedState': grpc.unary_stream_rpc_method_handler(
servicer.SubscribeLandedState,
request_deserializer=telemetry__pb2.SubscribeLandedStateRequest.FromString,
response_serializer=telemetry__pb2.LandedStateResponse.SerializeToString,
),
'SubscribeArmed': grpc.unary_stream_rpc_method_handler(
servicer.SubscribeArmed,
request_deserializer=telemetry__pb2.SubscribeArmedRequest.FromString,
response_serializer=telemetry__pb2.ArmedResponse.SerializeToString,
),
'SubscribeAttitudeQuaternion': grpc.unary_stream_rpc_method_handler(
servicer.SubscribeAttitudeQuaternion,
request_deserializer=telemetry__pb2.SubscribeAttitudeQuaternionRequest.FromString,
response_serializer=telemetry__pb2.AttitudeQuaternionResponse.SerializeToString,
),
'SubscribeAttitudeEuler': grpc.unary_stream_rpc_method_handler(
servicer.SubscribeAttitudeEuler,
request_deserializer=telemetry__pb2.SubscribeAttitudeEulerRequest.FromString,
response_serializer=telemetry__pb2.AttitudeEulerResponse.SerializeToString,
),
'SubscribeAttitudeAngularVelocityBody': grpc.unary_stream_rpc_method_handler(
servicer.SubscribeAttitudeAngularVelocityBody,
request_deserializer=telemetry__pb2.SubscribeAttitudeAngularVelocityBodyRequest.FromString,
response_serializer=telemetry__pb2.AttitudeAngularVelocityBodyResponse.SerializeToString,
),
'SubscribeCameraAttitudeQuaternion': grpc.unary_stream_rpc_method_handler(
servicer.SubscribeCameraAttitudeQuaternion,
request_deserializer=telemetry__pb2.SubscribeCameraAttitudeQuaternionRequest.FromString,
response_serializer=telemetry__pb2.CameraAttitudeQuaternionResponse.SerializeToString,
),
'SubscribeCameraAttitudeEuler': grpc.unary_stream_rpc_method_handler(
servicer.SubscribeCameraAttitudeEuler,
request_deserializer=telemetry__pb2.SubscribeCameraAttitudeEulerRequest.FromString,
response_serializer=telemetry__pb2.CameraAttitudeEulerResponse.SerializeToString,
),
'SubscribeVelocityNed': grpc.unary_stream_rpc_method_handler(
servicer.SubscribeVelocityNed,
request_deserializer=telemetry__pb2.SubscribeVelocityNedRequest.FromString,
response_serializer=telemetry__pb2.VelocityNedResponse.SerializeToString,
),
'SubscribeGpsInfo': grpc.unary_stream_rpc_method_handler(
servicer.SubscribeGpsInfo,
request_deserializer=telemetry__pb2.SubscribeGpsInfoRequest.FromString,
response_serializer=telemetry__pb2.GpsInfoResponse.SerializeToString,
),
'SubscribeBattery': grpc.unary_stream_rpc_method_handler(
servicer.SubscribeBattery,
request_deserializer=telemetry__pb2.SubscribeBatteryRequest.FromString,
response_serializer=telemetry__pb2.BatteryResponse.SerializeToString,
),
'SubscribeFlightMode': grpc.unary_stream_rpc_method_handler(
servicer.SubscribeFlightMode,
request_deserializer=telemetry__pb2.SubscribeFlightModeRequest.FromString,
response_serializer=telemetry__pb2.FlightModeResponse.SerializeToString,
| |
dealing with a unix domain socket
if host.endswith('.sock'):
if not hasattr(socket, "AF_UNIX"):
raise pymongo.errors.ConnectionFailure(
"UNIX-sockets are not supported on this system")
addrinfos = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, host)]
else:
# Don't try IPv6 if we don't support it. Also skip it if host
# is 'localhost' (::1 is fine). Avoids slow connect issues
# like PYTHON-356.
family = socket.AF_INET
if socket.has_ipv6 and host != 'localhost':
family = socket.AF_UNSPEC
# TODO: use Tornado 3's async resolver.
addrinfos = [
(af, socktype, proto, sa) for af, socktype, proto, dummy, sa in
socket.getaddrinfo(host, port, family, socket.SOCK_STREAM)]
err = None
for res in addrinfos:
af, socktype, proto, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
motor_sock = MotorSocket(
sock, self.io_loop, use_ssl=self.use_ssl,
certfile=self.ssl_certfile, keyfile=self.ssl_keyfile,
ca_certs=self.ssl_ca_certs, cert_reqs=self.ssl_cert_reqs)
if af != getattr(socket, 'AF_UNIX', None):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
motor_sock.settimeout(self.conn_timeout or 20.0)
# Important to increment the count before beginning to connect.
self.motor_sock_counter += 1
# MotorSocket pauses this greenlet and resumes when connected.
motor_sock.connect(sa)
return motor_sock
except socket.error, e:
self.motor_sock_counter -= 1
err = e
if sock is not None:
sock.close()
if err is not None:
raise err
else:
# This likely means we tried to connect to an IPv6 only
# host with an OS/kernel or Python interpreter that doesn't
# support IPv6. The test case is Jython2.5.1 which doesn't
# support IPv6 at all.
raise socket.error('getaddrinfo failed')
def connect(self, pair):
"""Connect to Mongo and return a new connected MotorSocket. Note that
the pool does not keep a reference to the socket -- you must call
maybe_return_socket() when you're done with it.
"""
child_gr = greenlet.getcurrent()
main = child_gr.parent
assert main, "Should be on child greenlet"
if self.max_size and self.motor_sock_counter >= self.max_size:
if self.max_waiters and len(self.queue) >= self.max_waiters:
raise self._create_wait_queue_timeout()
waiter = stack_context.wrap(child_gr.switch)
self.queue.append(waiter)
if self.wait_queue_timeout is not None:
deadline = time.time() + self.wait_queue_timeout
timeout = self.io_loop.add_timeout(
deadline,
functools.partial(
child_gr.throw,
pymongo.errors.ConnectionFailure,
self._create_wait_queue_timeout()))
self.waiter_timeouts[waiter] = timeout
# Yield until maybe_return_socket passes spare socket in.
return main.switch()
else:
motor_sock = self.create_connection(pair)
motor_sock.settimeout(self.net_timeout)
return SocketInfo(motor_sock, self.pool_id)
def get_socket(self, pair=None, force=False):
"""Get a socket from the pool.
Returns a :class:`SocketInfo` object wrapping a connected
:class:`MotorSocket`, and a bool saying whether the socket was from
the pool or freshly created.
:Parameters:
- `pair`: optional (hostname, port) tuple
- `force`: optional boolean, forces a connection to be returned
without blocking, even if `max_size` has been reached.
"""
forced = False
if force:
# If we're doing an internal operation, attempt to play nicely with
# max_size, but if there is no open "slot" force the connection
# and mark it as forced so we don't decrement motor_sock_counter
# when it's returned.
if self.motor_sock_counter >= self.max_size:
forced = True
if self.sockets:
sock_info, from_pool = self.sockets.pop(), True
sock_info = self._check(sock_info, pair)
else:
sock_info, from_pool = self.connect(pair), False
sock_info.forced = forced
sock_info.last_checkout = time.time()
return sock_info
def async_get_socket(self, pair=None):
"""Get a ``Future`` which will resolve to a socket."""
loop = self.io_loop
future = Future()
def _get_socket():
# Runs on child greenlet.
try:
result = self.get_socket(pair)
loop.add_callback(functools.partial(future.set_result, result))
except Exception, e:
loop.add_callback(functools.partial(future.set_exception, e))
# Start running the operation on a greenlet.
greenlet.greenlet(_get_socket).switch()
return future
def start_request(self):
raise NotImplementedError("Motor doesn't implement requests")
in_request = end_request = start_request
def discard_socket(self, sock_info):
"""Close and discard the active socket."""
sock_info.close()
def maybe_return_socket(self, sock_info):
"""Return the socket to the pool.
In PyMongo this method only returns the socket if it's not the request
socket, but Motor doesn't do requests.
"""
if sock_info.closed:
if not sock_info.forced:
self.motor_sock_counter -= 1
return
# Give it to the greenlet at the head of the line, or return it to the
# pool, or discard it.
if self.queue:
waiter = self.queue.popleft()
if waiter in self.waiter_timeouts:
self.io_loop.remove_timeout(self.waiter_timeouts.pop(waiter))
with stack_context.NullContext():
self.io_loop.add_callback(functools.partial(waiter, sock_info))
elif (len(self.sockets) < self.max_size
and sock_info.pool_id == self.pool_id):
self.sockets.add(sock_info)
else:
sock_info.close()
if not sock_info.forced:
self.motor_sock_counter -= 1
if sock_info.forced:
sock_info.forced = False
def _check(self, sock_info, pair):
"""This side-effecty function checks if this pool has been reset since
the last time this socket was used, or if the socket has been closed by
some external network error, and if so, attempts to create a new socket.
If this connection attempt fails we reset the pool and reraise the
error.
Checking sockets lets us avoid seeing *some*
:class:`~pymongo.errors.AutoReconnect` exceptions on server
hiccups, etc. We only do this if it's been > 1 second since
the last socket checkout, to keep performance reasonable - we
can't avoid AutoReconnects completely anyway.
"""
error = False
if sock_info.closed:
error = True
elif self.pool_id != sock_info.pool_id:
sock_info.close()
error = True
elif time.time() - sock_info.last_checkout > 1:
if _closed(sock_info.sock):
sock_info.close()
error = True
if not error:
return sock_info
else:
try:
return self.connect(pair)
except socket.error:
self.reset()
raise
def __del__(self):
# Avoid ResourceWarnings in Python 3.
for sock_info in self.sockets:
sock_info.close()
def _create_wait_queue_timeout(self):
return pymongo.errors.ConnectionFailure(
'Timed out waiting for socket from pool with max_size %r and'
' wait_queue_timeout %r' % (
self.max_size, self.wait_queue_timeout))
def callback_from_future(future):
"""Return a callback that sets a Future's result or exception"""
def callback(result, error):
if error:
future.set_exception(error)
else:
future.set_result(result)
return callback
def asynchronize(motor_class, sync_method, has_write_concern, doc=None):
"""Decorate `sync_method` so it accepts a callback or returns a Future.
The method runs on a child greenlet and calls the callback or resolves
the Future when the greenlet completes.
:Parameters:
- `motor_class`: Motor class being created, e.g. MotorClient.
- `sync_method`: Bound method of pymongo Collection, Database,
MongoClient, or Cursor
- `has_write_concern`: Whether the method accepts getLastError options
- `doc`: Optionally override sync_method's docstring
"""
@functools.wraps(sync_method)
def method(self, *args, **kwargs):
check_deprecated_kwargs(kwargs)
loop = self.get_io_loop()
callback = kwargs.pop('callback', None)
if callback:
if not callable(callback):
raise callback_type_error
future = None
else:
future = Future()
def call_method():
# Runs on child greenlet.
# TODO: ew, performance?
try:
result = sync_method(self.delegate, *args, **kwargs)
if callback:
# Schedule callback(result, None) on main greenlet.
loop.add_callback(functools.partial(
callback, result, None))
else:
# Schedule future to be resolved on main greenlet.
loop.add_callback(functools.partial(
future.set_result, result))
except Exception, e:
if callback:
loop.add_callback(functools.partial(
callback, None, e))
else:
loop.add_callback(functools.partial(
future.set_exception, e))
# Start running the operation on a greenlet.
greenlet.greenlet(call_method).switch()
return future
# This is for the benefit of motor_extensions.py, which needs this info to
# generate documentation with Sphinx.
method.is_async_method = True
method.has_write_concern = has_write_concern
name = sync_method.__name__
if name.startswith('__') and not name.endswith("__"):
# Mangle, e.g. Cursor.__die -> Cursor._Cursor__die
classname = motor_class.__delegate_class__.__name__
name = '_%s%s' % (classname, name)
method.pymongo_method_name = name
if doc is not None:
method.__doc__ = doc
return method
class MotorAttributeFactory(object):
"""Used by Motor classes to mark attributes that delegate in some way to
PyMongo. At module import time, each Motor class is created, and MotorMeta
calls create_attribute() for each attr to create the final class attribute.
"""
def create_attribute(self, cls, attr_name):
raise NotImplementedError
class Async(MotorAttributeFactory):
def __init__(self, attr_name, has_write_concern):
"""A descriptor that wraps a PyMongo method, such as insert or remove,
and returns an asynchronous version of the method, which accepts a
callback or returns a Future.
:Parameters:
- `attr_name`: The name of the attribute on the PyMongo class, if
different from attribute on the Motor class
- `has_write_concern`: Whether the method accepts getLastError options
"""
super(Async, self).__init__()
self.attr_name = attr_name
self.has_write_concern = has_write_concern
def create_attribute(self, cls, attr_name):
name = self.attr_name or attr_name
if name.startswith('__'):
# Mangle: __simple_command becomes _MongoClient__simple_command.
name = '_%s%s' % (cls.__delegate_class__.__name__, name)
method = getattr(cls.__delegate_class__, name)
return asynchronize(cls, method, self.has_write_concern)
def wrap(self, original_class):
return WrapAsync(self, original_class)
def unwrap(self, motor_class):
return UnwrapAsync(self, motor_class)
class WrapBase(MotorAttributeFactory):
def __init__(self, prop):
super(WrapBase, self).__init__()
self.property = prop
class WrapAsync(WrapBase):
def __init__(self, prop, original_class):
"""Like Async, but before it executes the callback or resolves the
Future, checks if result is a PyMongo class and wraps it in a Motor
class. E.g., Motor's map_reduce should pass a MotorCollection instead
of a PyMongo Collection to the Future. Uses the wrap() method on the
owner object to do the actual wrapping. E.g.,
Database.create_collection returns a Collection, so MotorDatabase has:
create_collection = AsyncCommand().wrap(Collection)
Once | |
_answer_id, 'ct_type': ct_type, 'name': name, 'parent': parent}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_resolve(self, ct_type, name, parent, _answer_id=0, ts4_expect_ec=0):
"""
Wrapper for D4Root.resolve signed method call
:param _answer_id: uint32
:param ct_type: uint8
:param name: bytes
:param parent: address
"""
_r_ = self.C_.call_method_signed('resolve', {'_answer_id': _answer_id, 'ct_type': ct_type, 'name': name, 'parent': parent}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def resolveFull(self, ct_type, fullname, _answer_id=0, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4Root.resolveFull
:rtype: address
:param _answer_id: uint32
:param ct_type: uint8
:param fullname: bytes
"""
return self.G_resolveFull(ct_type, fullname, _answer_id=0, ts4_expect_ec=ts4_expect_ec)
def G_resolveFull(self, ct_type, fullname, _answer_id=0, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4Root.resolveFull getter
:rtype: address
:param _answer_id: uint32
:param ct_type: uint8
:param fullname: bytes
"""
return self.C_.call_getter('resolveFull', {'_answer_id': _answer_id, 'ct_type': ct_type, 'fullname': fullname}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_resolveFull(self, ct_type, fullname, _answer_id=0, ts4_expect_ec=0):
"""
Wrapper for D4Root.resolveFull raw getter
:rtype: address
:param _answer_id: uint32
:param ct_type: uint8
:param fullname: bytes
"""
return self.C_.call_getter_raw('resolveFull', {'_answer_id': _answer_id, 'ct_type': ct_type, 'fullname': fullname}, expect_ec=ts4_expect_ec)
def M_resolveFull(self, ct_type, fullname, _answer_id=0, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Root.resolveFull method call
:param _answer_id: uint32
:param ct_type: uint8
:param fullname: bytes
"""
_r_ = self.C_.call_method('resolveFull', {'_answer_id': _answer_id, 'ct_type': ct_type, 'fullname': fullname}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_resolveFull(self, ct_type, fullname, _answer_id=0, ts4_expect_ec=0):
"""
Wrapper for D4Root.resolveFull signed method call
:param _answer_id: uint32
:param ct_type: uint8
:param fullname: bytes
"""
_r_ = self.C_.call_method_signed('resolveFull', {'_answer_id': _answer_id, 'ct_type': ct_type, 'fullname': fullname}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def createAuction(self, origin, revision, name, duration, _answer_id=0, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4Root.createAuction
:rtype: address
:param _answer_id: uint32
:param origin: address
:param revision: uint32
:param name: bytes
:param duration: uint8
"""
return self.G_createAuction(origin, revision, name, duration, _answer_id=0, ts4_expect_ec=ts4_expect_ec)
def G_createAuction(self, origin, revision, name, duration, _answer_id=0, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4Root.createAuction getter
:rtype: address
:param _answer_id: uint32
:param origin: address
:param revision: uint32
:param name: bytes
:param duration: uint8
"""
return self.C_.call_getter('createAuction', {'_answer_id': _answer_id, 'origin': origin, 'revision': revision, 'name': name, 'duration': duration}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_createAuction(self, origin, revision, name, duration, _answer_id=0, ts4_expect_ec=0):
"""
Wrapper for D4Root.createAuction raw getter
:rtype: address
:param _answer_id: uint32
:param origin: address
:param revision: uint32
:param name: bytes
:param duration: uint8
"""
return self.C_.call_getter_raw('createAuction', {'_answer_id': _answer_id, 'origin': origin, 'revision': revision, 'name': name, 'duration': duration}, expect_ec=ts4_expect_ec)
def M_createAuction(self, origin, revision, name, duration, _answer_id=0, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Root.createAuction method call
:param _answer_id: uint32
:param origin: address
:param revision: uint32
:param name: bytes
:param duration: uint8
"""
_r_ = self.C_.call_method('createAuction', {'_answer_id': _answer_id, 'origin': origin, 'revision': revision, 'name': name, 'duration': duration}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_createAuction(self, origin, revision, name, duration, _answer_id=0, ts4_expect_ec=0):
"""
Wrapper for D4Root.createAuction signed method call
:param _answer_id: uint32
:param origin: address
:param revision: uint32
:param name: bytes
:param duration: uint8
"""
_r_ = self.C_.call_method_signed('createAuction', {'_answer_id': _answer_id, 'origin': origin, 'revision': revision, 'name': name, 'duration': duration}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def resolveAuction(self, name, _answer_id=0, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4Root.resolveAuction
:rtype: address
:param _answer_id: uint32
:param name: bytes
"""
return self.G_resolveAuction(name, _answer_id=0, ts4_expect_ec=ts4_expect_ec)
def G_resolveAuction(self, name, _answer_id=0, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4Root.resolveAuction getter
:rtype: address
:param _answer_id: uint32
:param name: bytes
"""
return self.C_.call_getter('resolveAuction', {'_answer_id': _answer_id, 'name': name}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_resolveAuction(self, name, _answer_id=0, ts4_expect_ec=0):
"""
Wrapper for D4Root.resolveAuction raw getter
:rtype: address
:param _answer_id: uint32
:param name: bytes
"""
return self.C_.call_getter_raw('resolveAuction', {'_answer_id': _answer_id, 'name': name}, expect_ec=ts4_expect_ec)
def M_resolveAuction(self, name, _answer_id=0, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Root.resolveAuction method call
:param _answer_id: uint32
:param name: bytes
"""
_r_ = self.C_.call_method('resolveAuction', {'_answer_id': _answer_id, 'name': name}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_resolveAuction(self, name, _answer_id=0, ts4_expect_ec=0):
"""
Wrapper for D4Root.resolveAuction signed method call
:param _answer_id: uint32
:param name: bytes
"""
_r_ = self.C_.call_method_signed('resolveAuction', {'_answer_id': _answer_id, 'name': name}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def onAuctionResult(self, winner, exp, name, parent, deploy, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4Root.onAuctionResult
:rtype:
:param winner: address
:param exp: uint32
:param name: bytes
:param parent: address
:param deploy: bool
"""
if ts4_sign:
return self.S_onAuctionResult(winner, exp, name, parent, deploy, ts4_expect_ec=ts4_expect_ec)
else:
return self.M_onAuctionResult(winner, exp, name, parent, deploy, ts4_expect_ec=ts4_expect_ec)
def G_onAuctionResult(self, winner, exp, name, parent, deploy, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4Root.onAuctionResult getter
:rtype:
:param winner: address
:param exp: uint32
:param name: bytes
:param parent: address
:param deploy: bool
"""
return self.C_.call_getter('onAuctionResult', {'winner': winner, 'exp': exp, 'name': name, 'parent': parent, 'deploy': deploy}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_onAuctionResult(self, winner, exp, name, parent, deploy, ts4_expect_ec=0):
"""
Wrapper for D4Root.onAuctionResult raw getter
:rtype:
:param winner: address
:param exp: uint32
:param name: bytes
:param parent: address
:param deploy: bool
"""
return self.C_.call_getter_raw('onAuctionResult', {'winner': winner, 'exp': exp, 'name': name, 'parent': parent, 'deploy': deploy}, expect_ec=ts4_expect_ec)
def M_onAuctionResult(self, winner, exp, name, parent, deploy, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Root.onAuctionResult method call
:param winner: address
:param exp: uint32
:param name: bytes
:param parent: address
:param deploy: bool
"""
_r_ = self.C_.call_method('onAuctionResult', {'winner': winner, 'exp': exp, 'name': name, 'parent': parent, 'deploy': deploy}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_onAuctionResult(self, winner, exp, name, parent, deploy, ts4_expect_ec=0):
"""
Wrapper for D4Root.onAuctionResult signed method call
:param winner: address
:param exp: uint32
:param name: bytes
:param parent: address
:param deploy: bool
"""
_r_ = self.C_.call_method_signed('onAuctionResult', {'winner': winner, 'exp': exp, 'name': name, 'parent': parent, 'deploy': deploy}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def applyAuctionCallback(self, success, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4Root.applyAuctionCallback
:rtype:
:param success: bool
"""
if ts4_sign:
return self.S_applyAuctionCallback(success, ts4_expect_ec=ts4_expect_ec)
else:
return self.M_applyAuctionCallback(success, ts4_expect_ec=ts4_expect_ec)
def G_applyAuctionCallback(self, success, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4Root.applyAuctionCallback getter
:rtype:
:param success: bool
"""
return self.C_.call_getter('applyAuctionCallback', {'success': success}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_applyAuctionCallback(self, success, ts4_expect_ec=0):
"""
Wrapper for D4Root.applyAuctionCallback raw getter
:rtype:
:param success: bool
"""
return self.C_.call_getter_raw('applyAuctionCallback', {'success': success}, expect_ec=ts4_expect_ec)
def M_applyAuctionCallback(self, success, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Root.applyAuctionCallback method call
:param success: bool
"""
_r_ = self.C_.call_method('applyAuctionCallback', {'success': success}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_applyAuctionCallback(self, success, ts4_expect_ec=0):
"""
Wrapper for D4Root.applyAuctionCallback signed method call
:param success: bool
"""
_r_ = self.C_.call_method_signed('applyAuctionCallback', {'success': success}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def subCertDepl(self, name, parent, subname, new_owner, expiry, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4Root.subCertDepl
:rtype:
:param name: bytes
:param parent: address
:param subname: bytes
:param new_owner: address
:param expiry: uint32
"""
if ts4_sign:
return self.S_subCertDepl(name, parent, subname, new_owner, expiry, ts4_expect_ec=ts4_expect_ec)
else:
return self.M_subCertDepl(name, parent, subname, new_owner, expiry, ts4_expect_ec=ts4_expect_ec)
def G_subCertDepl(self, name, parent, subname, new_owner, expiry, ts4_key=None, ts4_expect_ec=0, ts4_decode=False, ts4_decoder=None):
"""
Wrapper for D4Root.subCertDepl getter
:rtype:
:param name: bytes
:param parent: address
:param subname: bytes
:param new_owner: address
:param expiry: uint32
"""
return self.C_.call_getter('subCertDepl', {'name': name, 'parent': parent, 'subname': subname, 'new_owner': new_owner, 'expiry': expiry}, key=ts4_key, expect_ec=ts4_expect_ec, decode=ts4_decode, decoder=ts4_decoder)
def R_subCertDepl(self, name, parent, subname, new_owner, expiry, ts4_expect_ec=0):
"""
Wrapper for D4Root.subCertDepl raw getter
:rtype:
:param name: bytes
:param parent: address
:param subname: bytes
:param new_owner: address
:param expiry: uint32
"""
return self.C_.call_getter_raw('subCertDepl', {'name': name, 'parent': parent, 'subname': subname, 'new_owner': new_owner, 'expiry': expiry}, expect_ec=ts4_expect_ec)
def M_subCertDepl(self, name, parent, subname, new_owner, expiry, ts4_private_key=None, ts4_expect_ec=0, ts4_is_debot=False):
"""
Wrapper for D4Root.subCertDepl method call
:param name: bytes
:param parent: address
:param subname: bytes
:param new_owner: address
:param expiry: uint32
"""
_r_ = self.C_.call_method('subCertDepl', {'name': name, 'parent': parent, 'subname': subname, 'new_owner': new_owner, 'expiry': expiry}, private_key=ts4_private_key, expect_ec=ts4_expect_ec, is_debot=ts4_is_debot)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def S_subCertDepl(self, name, parent, subname, new_owner, expiry, ts4_expect_ec=0):
"""
Wrapper for D4Root.subCertDepl signed method call
:param name: bytes
:param parent: address
:param subname: bytes
:param new_owner: address
:param expiry: uint32
"""
_r_ = self.C_.call_method_signed('subCertDepl', {'name': name, 'parent': parent, 'subname': subname, 'new_owner': new_owner, 'expiry': expiry}, expect_ec=ts4_expect_ec)
if WrapperGlobal.auto_dispatch_messages:
ts4.dispatch_messages()
return _r_
def receiveShards(self, ct_type, name, parent, ts4_expect_ec=0, ts4_sign=False):
"""
Wrapper for D4Root.receiveShards
:rtype:
:param ct_type: uint8
:param name: bytes
:param parent: address
"""
if ts4_sign:
return self.S_receiveShards(ct_type, name, parent, ts4_expect_ec=ts4_expect_ec)
else:
return self.M_receiveShards(ct_type, name, parent, ts4_expect_ec=ts4_expect_ec)
def G_receiveShards(self, ct_type, name, | |
for i in self.selected_data:
self.sizes[i, :] = (self.sizes[i, :] > 0) * size
self._set_view_slice()
self.events.size()
@property
def edge_width(self) -> Union[None, int, float]:
"""float: width used for all point markers."""
return self._edge_width
@edge_width.setter
def edge_width(self, edge_width: Union[None, float]) -> None:
self._edge_width = edge_width
self.events.edge_width()
@property
def edge_color(self) -> str:
"""str: edge color of marker for the next added point."""
return self._edge_color
@edge_color.setter
def edge_color(self, edge_color: str) -> None:
self._edge_color = edge_color
if self._update_properties and len(self.selected_data) > 0:
for i in self.selected_data:
self.edge_colors[i] = edge_color
self.events.edge_color()
@property
def face_color(self) -> str:
"""str: face color of marker for the next added point."""
return self._face_color
@face_color.setter
def face_color(self, face_color: str) -> None:
self._face_color = face_color
if self._update_properties and len(self.selected_data) > 0:
for i in self.selected_data:
self.face_colors[i] = face_color
self.events.face_color()
@property
def selected_data(self):
"""list: list of currently selected points."""
return self._selected_data
@selected_data.setter
def selected_data(self, selected_data):
self._selected_data = list(selected_data)
selected = []
for c in self._selected_data:
if c in self._indices_view:
ind = list(self._indices_view).index(c)
selected.append(ind)
self._selected_view = selected
self._selected_box = self.interaction_box(self._selected_view)
# Update properties based on selected points
index = self._selected_data
edge_colors = list(set([self.edge_colors[i] for i in index]))
if len(edge_colors) == 1:
edge_color = edge_colors[0]
with self.block_update_properties():
self.edge_color = edge_color
face_colors = list(set([self.face_colors[i] for i in index]))
if len(face_colors) == 1:
face_color = face_colors[0]
with self.block_update_properties():
self.face_color = face_color
size = list(
set([self.sizes[i, self.dims.displayed].mean() for i in index])
)
if len(size) == 1:
size = size[0]
with self.block_update_properties():
self.size = size
def interaction_box(self, index):
"""Create the interaction box around a list of points in view.
Parameters
----------
index : list
List of points around which to construct the interaction box.
Returns
----------
box : np.ndarray
4x2 array of corners of the interaction box in clockwise order
starting in the upper-left corner.
"""
if len(index) == 0:
box = None
else:
data = self._data_view[index]
size = self._sizes_view[index]
if data.ndim == 1:
data = np.expand_dims(data, axis=0)
data = points_to_squares(data, size)
box = create_box(data)
return box
@property
def mode(self):
"""str: Interactive mode
Interactive mode. The normal, default mode is PAN_ZOOM, which
allows for normal interactivity with the canvas.
In ADD mode clicks of the cursor add points at the clicked location.
In SELECT mode the cursor can select points by clicking on them or
by dragging a box around them. Once selected points can be moved,
have their properties edited, or be deleted.
"""
return str(self._mode)
@mode.setter
def mode(self, mode):
if isinstance(mode, str):
mode = Mode(mode)
if mode == self._mode:
return
old_mode = self._mode
if mode == Mode.ADD:
self.cursor = 'pointing'
self.interactive = False
self.help = 'hold <space> to pan/zoom'
elif mode == Mode.SELECT:
self.cursor = 'standard'
self.interactive = False
self.help = 'hold <space> to pan/zoom'
elif mode == Mode.PAN_ZOOM:
self.cursor = 'standard'
self.interactive = True
self.help = ''
else:
raise ValueError("Mode not recognized")
if not (mode == Mode.SELECT and old_mode == Mode.SELECT):
self.selected_data = []
self._set_highlight()
self.status = str(mode)
self._mode = mode
self.events.mode(mode=mode)
def _slice_data(self, indices):
"""Determines the slice of points given the indices.
Parameters
----------
indices : sequence of int or slice
Indices to slice with.
Returns
----------
in_slice_data : (N, 2) array
Coordinates of points in the currently viewed slice.
slice_indices : list
Indices of points in the currently viewed slice.
scale : float, (N, ) array
If in `n_dimensional` mode then the scale factor of points, where
values of 1 corresponds to points located in the slice, and values
less than 1 correspond to points located in neighboring slices.
"""
# Get a list of the data for the points in this slice
not_disp = list(self.dims.not_displayed)
disp = list(self.dims.displayed)
indices = np.array(indices)
if len(self.data) > 0:
if self.n_dimensional is True and self.ndim > 2:
distances = abs(self.data[:, not_disp] - indices[not_disp])
sizes = self.sizes[:, not_disp] / 2
matches = np.all(distances <= sizes, axis=1)
in_slice_data = self.data[np.ix_(matches, disp)]
size_match = sizes[matches]
size_match[size_match == 0] = 1
scale_per_dim = (size_match - distances[matches]) / size_match
scale_per_dim[size_match == 0] = 1
scale = np.prod(scale_per_dim, axis=1)
indices = np.where(matches)[0].astype(int)
return in_slice_data, indices, scale
else:
data = self.data[:, not_disp].astype('int')
matches = np.all(data == indices[not_disp], axis=1)
in_slice_data = self.data[np.ix_(matches, disp)]
indices = np.where(matches)[0].astype(int)
return in_slice_data, indices, 1
else:
return [], [], []
def get_value(self):
"""Determine if points at current coordinates.
Returns
----------
selection : int or None
Index of point that is at the current coordinate if any.
"""
in_slice_data = self._data_view
# Display points if there are any in this slice
if len(self._data_view) > 0:
# Get the point sizes
distances = abs(
self._data_view
- [self.coordinates[d] for d in self.dims.displayed]
)
in_slice_matches = np.all(
distances <= np.expand_dims(self._sizes_view, axis=1) / 2,
axis=1,
)
indices = np.where(in_slice_matches)[0]
if len(indices) > 0:
selection = self._indices_view[indices[-1]]
else:
selection = None
else:
selection = None
return selection
def _set_view_slice(self):
"""Sets the view given the indices to slice with."""
in_slice_data, indices, scale = self._slice_data(self.dims.indices)
# Display points if there are any in this slice
if len(in_slice_data) > 0:
# Get the point sizes
sizes = (
self.sizes[np.ix_(indices, self.dims.displayed)].mean(axis=1)
* scale
)
# Update the points node
data = np.array(in_slice_data)
else:
# if no points in this slice send dummy data
data = np.zeros((0, self.dims.ndisplay))
sizes = [0]
self._data_view = data
self._sizes_view = sizes
self._indices_view = indices
# Make sure if changing planes any selected points not in the current
# plane are removed
selected = []
for c in self.selected_data:
if c in self._indices_view:
ind = list(self._indices_view).index(c)
selected.append(ind)
self._selected_view = selected
if len(selected) == 0:
self.selected_data
self._selected_box = self.interaction_box(self._selected_view)
self._set_highlight(force=True)
self._update_thumbnail()
self._update_coordinates()
self.events.set_data()
def _set_highlight(self, force=False):
"""Render highlights of shapes including boundaries, vertices,
interaction boxes, and the drag selection box when appropriate
Parameters
----------
force : bool
Bool that forces a redraw to occur when `True`
"""
# Check if any point ids have changed since last call
if (
self.selected_data == self._selected_data_stored
and self._value == self._value_stored
and np.all(self._drag_box == self._drag_box_stored)
) and not force:
return
self._selected_data_stored = copy(self.selected_data)
self._value_stored = copy(self._value)
self._drag_box_stored = copy(self._drag_box)
if self._mode == Mode.SELECT and (
self._value is not None or len(self._selected_view) > 0
):
if len(self._selected_view) > 0:
index = copy(self._selected_view)
if self._value is not None:
hover_point = list(self._indices_view).index(self._value)
if hover_point in index:
pass
else:
index.append(hover_point)
index.sort()
else:
hover_point = list(self._indices_view).index(self._value)
index = [hover_point]
self._highlight_index = index
else:
self._highlight_index = []
pos = self._selected_box
if pos is None and not self._is_selecting:
pos = np.zeros((0, 2))
elif self._is_selecting:
pos = create_box(self._drag_box)
pos = pos[list(range(4)) + [0]]
else:
pos = pos[list(range(4)) + [0]]
self._highlight_box = pos
self.events.highlight()
def _update_thumbnail(self):
"""Update thumbnail with current points and colors."""
colormapped = np.zeros(self._thumbnail_shape)
colormapped[..., 3] = 1
if len(self._data_view) > 0:
min_vals = [self.dims.range[i][0] for i in self.dims.displayed]
shape = np.ceil(
[
self.dims.range[i][1] - self.dims.range[i][0] + 1
for i in self.dims.displayed
]
).astype(int)
zoom_factor = np.divide(
self._thumbnail_shape[:2], shape[-2:]
).min()
coords = np.floor(
(self._data_view[:, -2:] - min_vals[-2:] + 0.5) * zoom_factor
).astype(int)
coords = np.clip(
coords, 0, np.subtract(self._thumbnail_shape[:2], 1)
)
for i, c in enumerate(coords):
col = self.face_colors[self._indices_view[i]]
colormapped[c[0], c[1], :] = Color(col).rgba
colormapped[..., 3] *= self.opacity
self.thumbnail = colormapped
def add(self, coord):
"""Adds point at coordinate.
Parameters
----------
coord : sequence of indices to add point at
"""
self.data = np.append(self.data, [coord], axis=0)
def remove_selected(self):
"""Removes selected points if any."""
index = copy(self.selected_data)
index.sort()
if len(index) > 0:
self._sizes = np.delete(self._sizes, index, axis=0)
for i in index[::-1]:
del self.edge_colors[i]
del self.face_colors[i]
if self._value in self.selected_data:
self._value = None
self.selected_data = []
self.data = np.delete(self.data, index, axis=0)
def _move(self, index, coord):
"""Moves points relative drag start location.
Parameters
----------
index : list
Integer indices of points to move
coord : tuple
Coordinates to move points to
"""
if len(index) > 0:
disp = list(self.dims.displayed)
if self._drag_start is None:
center = self.data[np.ix_(index, disp)].mean(axis=0)
self._drag_start = np.array(coord)[disp] - center
center = self.data[np.ix_(index, disp)].mean(axis=0)
shift = np.array(coord)[disp] - center - self._drag_start
self.data[np.ix_(index, disp)] = (
self.data[np.ix_(index, disp)] + shift
)
self._set_view_slice()
def _copy_data(self):
| |
"""
@file
@brief Validates runtime for many :epkg:`scikit-learn` operators.
The submodule relies on :epkg:`onnxconverter_common`,
:epkg:`sklearn-onnx`.
"""
import math
import copy
import os
import warnings
from importlib import import_module
import pickle
from time import perf_counter
import numpy
from cpyquickhelper.numbers import measure_time as _c_measure_time
from sklearn.base import BaseEstimator
from sklearn.linear_model._base import LinearModel
from sklearn.model_selection import train_test_split
from sklearn import __all__ as sklearn__all__, __version__ as sklearn_version
from .validate_problems import _problems
class RuntimeBadResultsError(RuntimeError):
"""
Raised when the results are too different from
:epkg:`scikit-learn`.
"""
def __init__(self, msg, obs):
"""
:param msg: to display
:param obs: observations
"""
RuntimeError.__init__(self, msg)
self.obs = obs
def _dictionary2str(di):
el = []
for k in sorted(di):
el.append('{}={}'.format(k, di[k]))
return '/'.join(el)
def modules_list():
"""
Returns modules and versions currently used.
.. runpython::
:showcode:
:rst:
:warningout: DeprecationWarning
from mlprodict.onnxrt.validate.validate_helper import modules_list
from pyquickhelper.pandashelper import df2rst
from pandas import DataFrame
print(df2rst(DataFrame(modules_list())))
"""
def try_import(name):
try:
mod = import_module(name)
except ImportError: # pragma: no cover
return None
return (dict(name=name, version=mod.__version__)
if hasattr(mod, '__version__') else dict(name=name))
rows = []
for name in sorted(['pandas', 'numpy', 'sklearn', 'mlprodict',
'skl2onnx', 'onnxmltools', 'onnx', 'onnxruntime',
'scipy']):
res = try_import(name)
if res is not None:
rows.append(res)
return rows
def _dispsimple(arr, fLOG):
if isinstance(arr, (tuple, list)):
for i, a in enumerate(arr):
fLOG("output %d" % i)
_dispsimple(a, fLOG)
elif hasattr(arr, 'shape'):
if len(arr.shape) == 1:
threshold = 8
else:
threshold = min(
50, min(50 // arr.shape[1], 8) * arr.shape[1])
fLOG(numpy.array2string(arr, max_line_width=120,
suppress_small=True,
threshold=threshold))
else: # pragma: no cover
s = str(arr)
if len(s) > 50:
s = s[:50] + "..."
fLOG(s)
def _merge_options(all_conv_options, aoptions):
if aoptions is None:
return copy.deepcopy(all_conv_options)
if not isinstance(aoptions, dict):
return copy.deepcopy(aoptions) # pragma: no cover
merged = {}
for k, v in all_conv_options.items():
if k in aoptions:
merged[k] = _merge_options(v, aoptions[k])
else:
merged[k] = copy.deepcopy(v)
for k, v in aoptions.items():
if k in all_conv_options:
continue
merged[k] = copy.deepcopy(v)
return merged
def sklearn_operators(subfolder=None, extended=False,
experimental=True):
"""
Builds the list of operators from :epkg:`scikit-learn`.
The function goes through the list of submodule
and get the list of class which inherit from
:epkg:`scikit-learn:base:BaseEstimator`.
:param subfolder: look into only one subfolder
:param extended: extends the list to the list of operators
this package implements a converter for
:param experimental: includes experimental module from
:epkg:`scikit-learn` (see `sklearn.experimental
<https://github.com/scikit-learn/scikit-learn/
tree/master/sklearn/experimental>`_)
:return: the list of found operators
"""
if experimental:
from sklearn.experimental import ( # pylint: disable=W0611
enable_hist_gradient_boosting,
enable_iterative_imputer)
subfolders = sklearn__all__ + ['mlprodict.onnx_conv']
found = []
for subm in sorted(subfolders):
if isinstance(subm, list):
continue # pragma: no cover
if subfolder is not None and subm != subfolder:
continue
if subm == 'feature_extraction':
subs = [subm, 'feature_extraction.text']
else:
subs = [subm]
for sub in subs:
if '.' in sub and sub not in {'feature_extraction.text'}:
name_sub = sub
else:
name_sub = "{0}.{1}".format("sklearn", sub)
try:
mod = import_module(name_sub)
except ModuleNotFoundError:
continue
if hasattr(mod, "register_converters"):
fct = getattr(mod, "register_converters")
cls = fct()
else:
cls = getattr(mod, "__all__", None)
if cls is None:
cls = list(mod.__dict__)
cls = [mod.__dict__[cl] for cl in cls]
for cl in cls:
try:
issub = issubclass(cl, BaseEstimator)
except TypeError:
continue
if cl.__name__ in {'Pipeline', 'ColumnTransformer',
'FeatureUnion', 'BaseEstimator',
'BaseEnsemble', 'BaseDecisionTree'}:
continue
if cl.__name__ in {'CustomScorerTransform'}:
continue
if (sub in {'calibration', 'dummy', 'manifold'} and
'Calibrated' not in cl.__name__):
continue
if issub:
pack = "sklearn" if sub in sklearn__all__ else cl.__module__.split('.')[
0]
found.append(
dict(name=cl.__name__, subfolder=sub, cl=cl, package=pack))
if extended:
from ...onnx_conv import register_converters
with warnings.catch_warnings():
warnings.simplefilter("ignore", ResourceWarning)
models = register_converters(True)
done = set(_['name'] for _ in found)
for m in models:
try:
name = m.__module__.split('.')
except AttributeError as e: # pragma: no cover
raise AttributeError("Unexpected value, m={}".format(m)) from e
sub = '.'.join(name[1:])
pack = name[0]
if m.__name__ not in done:
found.append(
dict(name=m.__name__, cl=m, package=pack, sub=sub))
# let's remove models which cannot predict
all_found = found
found = []
for mod in all_found:
cl = mod['cl']
if hasattr(cl, 'fit_predict') and not hasattr(cl, 'predict'):
continue
if hasattr(cl, 'fit_transform') and not hasattr(cl, 'transform'):
continue
if (not hasattr(cl, 'transform') and
not hasattr(cl, 'predict') and
not hasattr(cl, 'decision_function')):
continue
found.append(mod)
return found
def _measure_time(fct, repeat=1, number=1, first_run=True):
"""
Measures the execution time for a function.
:param fct: function to measure
:param repeat: number of times to repeat
:param number: number of times between two measures
:param first_run: if True, runs the function once before measuring
:return: last result, average, values
"""
res = None
values = []
if first_run:
fct()
for __ in range(repeat):
begin = perf_counter()
for _ in range(number):
res = fct()
end = perf_counter()
values.append(end - begin)
if repeat * number == 1:
return res, values[0], values
return res, sum(values) / (repeat * number), values # pragma: no cover
def _shape_exc(obj):
if hasattr(obj, 'shape'):
return obj.shape
if isinstance(obj, (list, dict, tuple)):
return "[{%d}]" % len(obj)
return None
def dump_into_folder(dump_folder, obs_op=None, is_error=True,
**kwargs):
"""
Dumps information when an error was detected
using :epkg:`*py:pickle`.
:param dump_folder: dump_folder
:param obs_op: obs_op (information)
:param is_error: is it an error or not?
:param kwargs: additional parameters
:return: name
"""
if dump_folder is None:
raise ValueError("dump_folder cannot be None.")
optim = obs_op.get('optim', '')
optim = str(optim)
optim = optim.replace("<class 'sklearn.", "")
optim = optim.replace("<class '", "")
optim = optim.replace(" ", "")
optim = optim.replace(">", "")
optim = optim.replace("=", "")
optim = optim.replace("{", "")
optim = optim.replace("}", "")
optim = optim.replace(":", "")
optim = optim.replace("'", "")
optim = optim.replace("/", "")
optim = optim.replace("\\", "")
parts = (obs_op['runtime'], obs_op['name'], obs_op['scenario'],
obs_op['problem'], optim,
"op" + str(obs_op.get('opset', '-')),
"nf" + str(obs_op.get('n_features', '-')))
name = "dump-{}-{}.pkl".format(
"ERROR" if is_error else "i",
"-".join(map(str, parts)))
name = os.path.join(dump_folder, name)
obs_op = obs_op.copy()
fcts = [k for k in obs_op if k.startswith('lambda')]
for fct in fcts:
del obs_op[fct]
kwargs.update({'obs_op': obs_op})
with open(name, "wb") as f:
pickle.dump(kwargs, f)
return name
def default_time_kwargs():
"""
Returns default values *number* and *repeat* to measure
the execution of a function.
.. runpython::
:showcode:
:warningout: DeprecationWarning
from mlprodict.onnxrt.validate.validate_helper import default_time_kwargs
import pprint
pprint.pprint(default_time_kwargs())
keys define the number of rows,
values defines *number* and *repeat*.
"""
return {
1: dict(number=15, repeat=20),
10: dict(number=10, repeat=20),
100: dict(number=4, repeat=10),
1000: dict(number=4, repeat=4),
10000: dict(number=2, repeat=2),
}
def measure_time(stmt, x, repeat=10, number=50, div_by_number=False,
first_run=True, max_time=None):
"""
Measures a statement and returns the results as a dictionary.
:param stmt: string
:param x: matrix
:param repeat: average over *repeat* experiment
:param number: number of executions in one row
:param div_by_number: divide by the number of executions
:param first_run: if True, runs the function once before measuring
:param max_time: execute the statement until the total goes
beyond this time (approximatively), *repeat* is ignored,
*div_by_number* must be set to True
:return: dictionary
See `Timer.repeat <https://docs.python.org/3/library/timeit.html?timeit.Timer.repeat>`_
for a better understanding of parameter *repeat* and *number*.
The function returns a duration corresponding to
*number* times the execution of the main statement.
"""
if x is None:
raise ValueError("x cannot be None") # pragma: no cover
def fct():
stmt(x)
if first_run:
try:
fct()
except RuntimeError as e: # pragma: no cover
raise RuntimeError("{}-{}".format(type(x), x.dtype)) from e
return _c_measure_time(fct, context={}, repeat=repeat, number=number,
div_by_number=div_by_number, max_time=max_time)
def _multiply_time_kwargs(time_kwargs, time_kwargs_fact, inst):
"""
Multiplies values in *time_kwargs* following strategy
*time_kwargs_fact* for a given model *inst*.
:param time_kwargs: see below
:param time_kwargs_fact: see below
:param inst: :epkg:`scikit-learn` model
:return: new *time_kwargs*
Possible values for *time_kwargs_fact*:
- a integer: multiplies *number* by this number
- `'lin'`: multiplies value *number* for linear models depending
on the number of rows to process (:math:`\\propto 1/\\log_{10}(n)`)
.. runpython::
:showcode:
:warningout: DeprecationWarning
from pprint import pprint
from sklearn.linear_model import LinearRegression
from mlprodict.onnxrt.validate.validate_helper import (
default_time_kwargs, _multiply_time_kwargs)
lr = LinearRegression()
kw = default_time_kwargs()
pprint(kw)
kw2 = _multiply_time_kwargs(kw, 'lin', lr)
pprint(kw2)
"""
if time_kwargs is None:
raise ValueError("time_kwargs cannot be None.") # pragma: no cover
if time_kwargs_fact in ('', None):
return time_kwargs
try:
vi = int(time_kwargs_fact)
time_kwargs_fact = vi
except (TypeError, ValueError):
pass
if isinstance(time_kwargs_fact, int):
time_kwargs_modified = copy.deepcopy(time_kwargs)
for k in time_kwargs_modified:
time_kwargs_modified[k]['number'] *= time_kwargs_fact
return time_kwargs_modified
if time_kwargs_fact == 'lin':
if isinstance(inst, LinearModel):
time_kwargs_modified = copy.deepcopy(time_kwargs)
for k in time_kwargs_modified:
kl = max(int(math.log(k) / math.log(10) + 1e-5), 1)
f = max(int(10 / kl + 0.5), 1)
time_kwargs_modified[k]['number'] *= f
time_kwargs_modified[k]['repeat'] *= 1
| |
<filename>tests/core/test_views.py
import time
from unittest import mock
from bs4 import BeautifulSoup
from directory_constants import cms
from modeltranslation.utils import build_localized_fieldname
import pytest
from rest_framework.serializers import Serializer
from django.forms.models import model_to_dict
from django.urls import reverse
from core import cache, helpers, permissions, serializer_mapping, views
from core.helpers import CachedResponse
from conf.signature import SignatureCheckPermission
from components.models import ComponentsApp
from tests.great_international.factories import InternationalSectorPageFactory
from tests.core.helpers import make_test_video
from .helpers import clean_post_data
@pytest.fixture
def cluster_data(settings):
data = {}
for code, _ in settings.LANGUAGES:
field_name = build_localized_fieldname('article_summaries', lang=code)
data.update(
helpers.nested_form_data({
field_name: helpers.inline_formset([])
})
)
return data
@pytest.mark.django_db
def test_permissions_draft(rf):
view = views.PagesOptionalDraftAPIEndpoint()
param = permissions.DraftTokenPermisison.TOKEN_PARAM
view.request = rf.get('/', {param: 'thing'})
assert view.permission_classes == [
SignatureCheckPermission,
permissions.DraftTokenPermisison
]
@pytest.mark.django_db
def test_permissions_published(rf):
view = views.PagesOptionalDraftAPIEndpoint()
view.request = rf.get('/')
assert view.permission_classes == [
SignatureCheckPermission,
]
@pytest.mark.parametrize('language_code,expected_title', (
('en-gb', 'ENGLISH'),
('de', 'GERMAN'),
('ja', 'JAPANESE'),
('zh-hans', 'SIMPLIFIED CHINESE'),
('fr', 'FRENCH'),
('es', 'SPANISH'),
('pt', 'PORTUGUESE'),
('ar', 'ARABIC'),
))
@pytest.mark.django_db
def test_api_translations_are_loaded_when_available(
client, translated_page, site_with_translated_page_as_root, language_code, expected_title
):
cache.rebuild_all_cache()
# to be added as a query params to all requests
languge_query_params = {'lang': language_code}
# looking up by id
url = reverse('api:api:pages:detail', kwargs={'pk': translated_page.pk})
response = client.get(url, languge_query_params)
assert response.status_code == 200
assert response.json()['title'] == expected_title
# looking up by path and site_id
# NOTE: path should be blank when you want a site root page
url = reverse('api:lookup-by-path', kwargs={
'path': '', 'site_id': site_with_translated_page_as_root.id,
})
response = client.get(url, languge_query_params)
assert response.status_code == 200
assert response.json()['title'] == expected_title
# looking up by slug and service_name
url = reverse('api:lookup-by-slug', kwargs={'slug': translated_page.slug})
query_params = {'service_name': cms.GREAT_INTERNATIONAL}
query_params.update(languge_query_params)
response = client.get(url, query_params)
assert response.status_code == 200
assert response.json()['title'] == expected_title
@pytest.mark.parametrize('language_code', (
'en-gb', 'de', 'ja', 'zh-hans', 'fr', 'es', 'pt', 'ar',
))
@pytest.mark.django_db
def test_api_falls_back_to_english_when_translations_unavailable(
client, untranslated_page, site_with_untranslated_page_as_root, language_code
):
cache.rebuild_all_cache()
# to be added as a query params to all requests
languge_query_params = {'lang': language_code}
# looking up by id
url = reverse(
'api:api:pages:detail',
kwargs={'pk': untranslated_page.pk}
)
response = client.get(url, languge_query_params)
assert response.status_code == 200
assert response.json()['title'] == 'ENGLISH'
# looking up by site_id + path
# NOTE: path should be blank when you want a site root page
url = reverse(
'api:lookup-by-path',
kwargs={'path': '', 'site_id': site_with_untranslated_page_as_root.id}
)
response = client.get(url, languge_query_params)
assert response.status_code == 200
assert response.json()['title'] == 'ENGLISH'
# looking up by service_name + slug
url = reverse(
'api:lookup-by-slug',
kwargs={'slug': untranslated_page.slug}
)
query_params = {'service_name': 'GREAT_INTERNATIONAL'}
query_params.update(languge_query_params)
response = client.get(url, query_params)
assert response.status_code == 200
assert response.json()['title'] == 'ENGLISH'
@pytest.mark.django_db
def test_api_serves_drafts(client, page_with_reversion, site_with_revised_page_as_root):
cache.rebuild_all_cache()
# For applying the draft token as a query param for each request
param_name = permissions.DraftTokenPermisison.TOKEN_PARAM
draft_query_params = {
param_name: page_with_reversion.get_draft_token()
}
# first we'll get a non-draft response for comparison
url = reverse(
'api:api:pages:detail', kwargs={'pk': page_with_reversion.pk}
)
response = client.get(url)
assert response.status_code == 200
data = response.json()
assert data['title'] == 'published-title'
assert data['meta']['url'] == page_with_reversion.get_url()
# get draft version, looking up by id
response = client.get(url, draft_query_params)
assert response.status_code == 200
data = response.json()
assert data['title'] == 'draft-title'
assert data['meta']['url'] == page_with_reversion.get_url(is_draft=True)
# get draft version, looking up by site_id + path
# NOTE: path should be blank when you want a site root page
url = reverse(
'api:lookup-by-path',
kwargs={'path': '', 'site_id': site_with_revised_page_as_root.id}
)
response = client.get(url, draft_query_params)
assert response.status_code == 200
data = response.json()
assert data['title'] == 'draft-title'
assert data['meta']['url'] == page_with_reversion.get_url(is_draft=True)
# get draft version, looking up by service_name + slug
url = reverse(
'api:lookup-by-slug', kwargs={'slug': page_with_reversion.slug}
)
query_params = {'service_name': cms.GREAT_INTERNATIONAL}
query_params.update(draft_query_params)
response = client.get(url, query_params)
assert response.status_code == 200
data = response.json()
assert data['title'] == 'draft-title'
assert data['meta']['url'] == page_with_reversion.get_url(is_draft=True)
@pytest.mark.django_db
def test_copy_upsteam(admin_client, translated_page, image):
translated_page.hero_image = image
translated_page.save()
url = reverse('copy-upstream', kwargs={'pk': translated_page.pk})
response = admin_client.get(url)
assert response.status_code == 200
assert response.context['page'] == translated_page
@pytest.mark.django_db
def test_update_upstream(admin_client, translated_page, image):
translated_page.hero_image = image
translated_page.save()
url = reverse('update-upstream', kwargs={'pk': translated_page.pk})
response = admin_client.get(url)
assert response.status_code == 200
assert response.context['page'] == translated_page
@pytest.mark.django_db
@pytest.mark.parametrize('url_name', ('copy-upstream', 'update-upstream'))
def test_upstream_anon(client, translated_page, image, url_name):
translated_page.hero_image = image
translated_page.save()
url = reverse(url_name, kwargs={'pk': translated_page.pk})
response = client.get(url)
assert response.status_code == 302
@pytest.mark.django_db
@pytest.mark.parametrize('is_edit, expected_template', (
(True, 'wagtailadmin/pages/edit.html'),
(False, 'wagtailadmin/pages/create.html'),
))
def test_add_page_prepopulate(
is_edit, expected_template, international_root_page, translated_page, admin_client, image, cluster_data
):
cache.rebuild_all_cache()
cache.PageIDCache.populate()
url = reverse('preload-add-page')
model_as_dict = model_to_dict(translated_page, exclude=[
'go_live_at',
'expire_at',
'slug',
])
model_as_dict = {key: val for key, val in model_as_dict.items() if val}
post_data = {
**model_as_dict,
'(image)hero_image': image.file.name,
'(image)introduction_column_one_icon': image.file.name,
'(image)introduction_column_two_icon': image.file.name,
'(image)introduction_column_three_icon': image.file.name,
'management-app_label': translated_page._meta.app_label,
'management-model_name': translated_page._meta.model_name,
'management-parent_path': international_root_page.get_url_parts()[2],
'management-site_name': international_root_page.get_site().site_name,
**cluster_data,
}
expected_data = {
**model_as_dict,
'hero_image': str(image.pk),
'introduction_column_one_icon': str(image.pk),
'introduction_column_two_icon': str(image.pk),
'introduction_column_three_icon': str(image.pk),
}
if is_edit:
post_data['management-path'] = expected_data['path'] = translated_page.get_url_parts()[2]
response = admin_client.post(url, clean_post_data(post_data))
assert response.template_name == [expected_template]
assert response.status_code == 200
soup = BeautifulSoup(response.content, 'html.parser')
for name, value in expected_data.items():
element = soup.find(id='id_' + name)
if not element or not value:
continue
if element.name == 'textarea':
actual = element.contents[0].strip()
elif element.name == 'select':
actual = element.find_all('option', selected=True)[0].get('value')
else:
actual = element.get('value')
assert str(actual) == str(value)
@pytest.mark.django_db
def test_add_page_prepopulate_missing_content_type(
translated_page, admin_client, international_root_page, cluster_data
):
url = reverse('preload-add-page')
post_data = model_to_dict(
international_root_page,
exclude=['go_live_at', 'expire_at', 'hero_image']
)
post_data.update(cluster_data)
post_data.update({
'management-app_label': translated_page._meta.app_label,
'management-model_name': 'doesnotexist',
'management-parent_path': international_root_page.get_url_parts()[2],
'management-site_name': translated_page.get_site().site_name,
})
response = admin_client.post(url, clean_post_data(post_data))
assert response.status_code == 404
@pytest.mark.django_db
def test_list_page(admin_client, root_page):
url = reverse('wagtailadmin_explore', args=(root_page.pk,))
response = admin_client.get(url)
assert response.status_code == 200
@pytest.mark.django_db
def test_page_listing(translated_page, admin_client):
url = reverse('wagtailadmin_pages:edit', args=(translated_page.pk,))
response = admin_client.get(url)
assert response.status_code == 200
@pytest.mark.django_db
def test_translations_exposed(translated_page, settings, client):
cache.rebuild_all_cache()
url = reverse('api:api:pages:detail', kwargs={'pk': translated_page.pk})
response = client.get(url)
expected = [[code, label] for code, label in settings.LANGUAGES_LOCALIZED]
assert response.json()['meta']['languages'] == expected
@pytest.mark.django_db
def test_unserializable_page_requested(settings, client):
page = ComponentsApp.objects.create(
title_en_gb='the app',
depth=2,
path='/thing',
)
cache.rebuild_all_cache()
url = reverse('api:api:pages:detail', kwargs={'pk': page.pk})
response = client.get(url)
assert response.status_code == 204
@pytest.mark.django_db
def test_lookup_by_path(international_root_page, page, admin_client):
# Creating a semi-realistic page structure and moving page into it
parent_page = InternationalSectorPageFactory(parent=international_root_page)
page.move(target=parent_page, pos='last-child')
cache.rebuild_all_cache()
# to lookup page, the path should include the parent's slug and
# the page's slug, but NOT that of app_root_page
path = '/'.join([parent_page.slug, page.slug])
response = admin_client.get(reverse(
'api:lookup-by-path', kwargs={'site_id': '1', 'path': path}
))
assert response.status_code == 200
assert response.json()['id'] == page.id
# paths are normalised by the view, so the presence of extra '/'
# characters on either end of the value shouldn't hinder matching
dodgy_path = '///' + path + '///'
response = admin_client.get(reverse(
'api:lookup-by-path', kwargs={'site_id': '1', 'path': dodgy_path}
))
assert response.status_code == 200
assert response.json()['id'] == page.id
@pytest.mark.django_db
def test_lookup_by_path_for_non_existent_page(client):
site_id = 52
path = 'xyz'
response = client.get(reverse(
'api:lookup-by-path', kwargs={'site_id': site_id, 'path': path}
))
assert response.status_code == 404
expected_msg = f"No page found matching site_id '{site_id}' and path '{path}'"
assert response.json() == {'message': expected_msg}
@pytest.mark.django_db
def test_lookup_by_slug(translated_page, admin_client):
cache.rebuild_all_cache()
url = reverse(
'api:lookup-by-slug',
kwargs={
'slug': translated_page.slug,
}
)
response = admin_client.get(url, {'service_name': cms.GREAT_INTERNATIONAL})
assert response.status_code == 200
assert response.json()['id'] == translated_page.id
@pytest.mark.django_db
def test_lookup_by_slug_missing_required_query_param(translated_page,
admin_client):
url = reverse(
'api:lookup-by-slug',
kwargs={
'slug': translated_page.slug,
}
)
response = admin_client.get(url)
assert response.status_code == 400
assert response.json() == {'service_name': 'This parameter is required'}
@pytest.mark.django_db
def test_lookup_by_slug_missing_page(admin_client):
service_name = cms.FIND_A_SUPPLIER
slug = 'thing'
url = reverse('api:lookup-by-slug', kwargs={'slug': slug})
response = admin_client.get(url, {'service_name': service_name})
assert response.status_code == 404
expected_msg = f"No page could be found matching service_name '{service_name}' and slug '{slug}'"
assert response.json() == {'message': expected_msg}
@pytest.mark.django_db
def test_cache_etags_match(admin_client, international_root_page):
service_name = cms.GREAT_INTERNATIONAL
# given there exists a page that is cached
page = InternationalSectorPageFactory.create(parent=international_root_page, live=True)
url = reverse('api:lookup-by-slug', kwargs={'slug': page.slug})
admin_client.get(url, {'service_name': service_name})
# and the cached page is retrieved
response_two = admin_client.get(url, {'service_name': service_name})
# then exposing the same etag in subsequent responses results in 304
response_three = admin_client.get(
url,
{'service_name': service_name},
HTTP_IF_NONE_MATCH=response_two['ETag'],
)
assert response_three.status_code == 304
assert response_three.content == b''
@pytest.mark.django_db
def test_cache_miss_slow_database_read(admin_client, international_root_page):
class SlowSerializer(Serializer):
def to_representation(self, instance):
time.sleep(3)
return {}
service_name = cms.GREAT_INTERNATIONAL
page = InternationalSectorPageFactory.create(parent=international_root_page, live=True)
url = reverse('api:lookup-by-slug', kwargs={'slug': page.slug})
# given the page is very slow to read
with mock.patch.dict(serializer_mapping.MODELS_SERIALIZERS_MAPPING, {page.__class__: SlowSerializer}):
response = admin_client.get(url, {'service_name': service_name})
# then the response results in 501
assert response.status_code == 501
@pytest.mark.django_db
def test_cache_etags_mismatch(admin_client, international_root_page):
service_name = cms.GREAT_INTERNATIONAL
# given there exists a page that is cached
page = InternationalSectorPageFactory.create(parent=international_root_page, live=True)
# when the page is retrieved
url = reverse('api:lookup-by-slug', kwargs={'slug': page.slug})
admin_client.get(url, {'service_name': service_name})
# then exposing the same etag in subsequent responses results in 304
response_two = admin_client.get(
url,
{'service_name': service_name},
HTTP_IF_NONE_MATCH='something-123',
)
assert isinstance(response_two, CachedResponse)
assert response_two.status_code == 200
assert response_two.content
@pytest.mark.django_db
def test_pages_types_view(admin_client):
url = reverse('api:pages-types-list')
response = admin_client.get(url)
assert response.status_code == 200
assert 'types' in |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.