input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
<filename>yfantasy_api/api/league.py
from yfantasy_api.api.terminal import TerminalApi
from yfantasy_api.models import League
class LeagueApi:
"""League Resource API: An api used for querying league resources
Attributes
----------
__yfantasy_api: YahooFantasyApi
The api class responsible for checking the tokens and sending
the http request
__league_key: str
The league key built using the game_code and league_id from the
__yfantasy_api object; the format is <game-code>.l.<league-id>
__url: str
The base url for league resources
path: str
The path to append to the base url; can contain subresources,
filters, or nothing depending on the builder methods called
"""
def __init__(self, yfantasy_api):
"""Initialize a new League Resource API
Parameters
----------
yfantasy_api: YahooFantasyApi
The api class responsible for checking tokens and sending
the http request
"""
self.__yfantasy_api = yfantasy_api
self.__league_key = f'{self.__yfantasy_api.game_id}.l.{self.__yfantasy_api.league_id}'
self.__url = f'league/{self.__league_key}'
self.path = ''
def draft_results(self):
"""Updates the path to include the `draftresults` sub-resource
Returns a DraftResultsCollectionApi object that provides methods
for adding further sub-resources or invoking the query
"""
self.path += '/draftresults'
return DraftResultsCollectionApi(self)
def meta(self):
"""Leaves the path empty to make the call return meta information
Returns a TerminalApi object that provides a `get()` call to
invoke the query.
"""
return TerminalApi(self)
def players(self, start=0, count=25, status=None, search=None, player_keys=None):
"""Updates the path to include the `players` sub-resource
Returns a PlayersCollectionApi object that provides methods
for adding further sub-resources or invoking the query
Parameters
----------
start: int
The value to indicate what offset to start the players
list at. For example: `start=0` begins at the first
player while `start=1` begins at the second. (default: 0)
count: int
The value to indicate how many players to return in the
list. If the value exceeds the max value the server will
ignore it and use the max value. (max: 25, default: 25)
status: str
The player status used to filter the list. If a value
is provided, this will add a `;status=<value>` filter to
the path. The accepted values are 'A' (all), 'FA' (free
agent), 'W' (waivers), 'T' (taken)
search: str
A string to used to filter the list by player names. If
a value is provided, this will add a `;search=<value>`
filter to the path. The server accepts any string and
performs substring matching for all player names. If a
match isn't found the list of players will be empy.
"""
self.path += f'/players;start={start};count={count}'
if search:
self.path += f';search={search}'
if status:
self.path += f';status={status}'
if player_keys:
self.path += f';player_keys={player_keys}'
return PlayersCollectionApi(self)
def scoreboard(self, week=None):
"""Updates the path to include the `scoreboard` sub-resource
Returns a TerminalApi object that provides a `get()` call to
invoke the query.
Parameters
----------
week: int
If a value is provided this will add a `week=<value>`
filter to the path that filters the results by week.
If nothing is provided the server will default to the
current week.
"""
self.path += '/scoreboard'
if week:
self.path += f';week={week}'
return TerminalApi(self)
def settings(self):
"""Updates the path to include the `settings` sub-resource
Returns a TerminalApi object that provides a `get()` call to
invoke the query.
"""
self.path += '/settings'
return TerminalApi(self)
def standings(self):
"""Updates the path to include the `standings` sub-resource
Returns a TerminalApi object that provides a `get()` call to
invoke the query.
"""
self.path += '/standings'
return TerminalApi(self)
def teams(self):
"""Updates the path to include the `teams` sub-resource
Returns a TerminalApi object that provides a `get()` call to
invoke the query.
"""
self.path += '/teams'
return TerminalApi(self)
def transactions(self, ttype=None, team_id=None, count=None, start=None):
"""Updates the path to include the `transactions` sub-resource
Returns a TerminalApi object that provides a `get()` call to
invoke the query.
Parameters
----------
ttype: str
The value to indicate what type of transactions to return
in the list. If a value is provided, this will add a
`;type=<value` filter to the path. The accepted values are
'add' (returns add, drop, and add/drop), 'drop' (returns
add, drop, and add/drop), 'commish', and 'trade'. The values
'waiver' and 'pending_trade' are also accepted, but require
the `team_id` parameter to be provided as well.
team_id: int
The id of the team to use when filtering the list. If a value
is provided, this will add a `;team_key=<gm>.l.<lg>.<value>`.
For simplicity the id is converted into a key for the filter.
count: int
The value to indicate how many transactions to return in the
list. Unlike the players collection, there doesn't seem to be
a maximum value for transactions. (default: 25)
start: int
The value to indicate what offset to start the transactions
list at. For example: `start=0` begins at the most recent
transaction while `start=1` begins at the second most recent.
(default: 0)
"""
self.path += '/transactions'
if ttype in ['waiver', 'pending_trade'] and not team_id:
raise Exception(f'\'team_id\' must be provided when using \'{ttype}\'.')
if ttype:
self.path += f';type={ttype}'
if team_id:
self.path += f';team_key={self.__league_key}.t.{team_id}'
if count:
self.path += f';count={count}'
if start:
self.path += f';start={start}'
return TerminalApi(self)
def get(self):
"""Invoke the Yahoo Fantasy API GET call to query the League Resource
The response json is transformed into a League model
"""
return League(self.__yfantasy_api.get(f'{self.__url}{self.path}')['league'])
class DraftResultsCollectionApi:
"""Draft Results API: Supports querying draft results sub-resources
Attributes
----------
__parent_api
The parent api class that created this object, this parent
api is used when invoking the query or creating the terminal
api object.
"""
def __init__(self, parent_api):
"""Initialize a new Draft Results API object
Parameters
----------
parent_api
The parent api class that created this object, this parent
api is used when invoking the query or creating the terminal
api object.
"""
self.__parent_api = parent_api
def players(self):
"""Updates the path to include the 'players' sub-resource
Returns a TerminalApi object that provides a `get()` call to
invoke the query.
"""
self.__parent_api.path += '/players'
return TerminalApi(self.__parent_api)
def get(self):
"""Invoke the parent API `get()` call
"""
return self.__parent_api.get()
class PlayersCollectionApi:
"""Players Collection API: Supports querying players sub-resources
Attributes
----------
__parent_api
The parent api class that created this object, this parent
api is used when invoking the query or creating the terminal
api object.
"""
def __init__(self, parent_api):
"""Initialize a new Players Collection API object
Parameters
----------
parent_api
The parent api class that created this object, this parent
api is used when invoking the query or creating the terminal
api object.
"""
self.__parent_api = parent_api
def draft_analysis(self):
"""Updates the path to include the 'draft_analysis' sub-resource
Returns a TerminalApi object that provides a `get()` call to
invoke the query.
"""
self.__parent_api.path += '/draft_analysis'
return TerminalApi(self.__parent_api)
def ownership(self):
"""Updates the path to include the 'ownership' sub-resource
Returns a TerminalApi object that provides a `get()` call to
invoke the query.
"""
self.__parent_api.path += '/ownership'
return TerminalApi(self.__parent_api)
def percent_owned(self):
"""Updates the path to include the 'percent_owned' sub-resource
Returns a TerminalApi object that provides a `get()` call to
invoke the query.
"""
self.__parent_api.path += '/percent_owned'
return TerminalApi(self.__parent_api)
def stats(self, date=None, season=None, week=None):
"""Updates the path to include the 'stats' sub-resource
Returns a TerminalApi object that provides a `get()` call to
invoke the query.
This method supports changing the requested scope for player
stats, but only one of `date`, `season`, or `week` can be
provided.
Parameters
----------
date: str
The value to indicate the date of the player stats to return.
If a value is provided this will add a `;type=date;date=<value>`
filter. The date must be provided in a 'YYYY-MM-DD' format.
season: int
The value to indicate the season of the players stats to return.
If a value is provided this will add a `;type=season;season=<value>`
filter.
week:int
The value to indicate the week of the players stats to return.
If a value is provided this will add a `;type=week;week=<value>`
filter.
"""
coverage_filter = self.__build_coverage_filter(date, season, week)
self.__parent_api.path += f'/stats{coverage_filter}'
return TerminalApi(self.__parent_api)
def get(self):
"""Invoke the parent API `get()` call
"""
return self.__parent_api.get()
def __build_coverage_filter(self, date, season, week):
if bool(date) + bool(season) + bool(week) > 1:
raise Exception('Only one of \'date\', \'season\', or \'week\' should be provided.')
elif date:
return f';type=date;date={date}'
elif season:
return f';type=season;season={season}'
elif week:
return f';type=week;week={week}'
else:
return | |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 13 20:45:47 2017
@author: Rachit & Nitesh
"""
import json
class tweetsSenti:
def __init__(self, **kwargs):
return super().__init__(**kwargs)
def searchTweets(self, q, ct):
import numpy as np
import pandas as pd
import re
from twitter import Twitter, OAuth, TwitterHTTPError
from pandas.io.json import json_normalize
from pycountry import countries
ACCESS_TOKEN = '<KEY>'
ACCESS_SECRET = '<KEY>'
consumer_key = 'bto0MsRvjjfkrl4QpndjaUneg'
consumer_secret = '<KEY>'
oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, consumer_key, consumer_secret)
twitterObj = Twitter(auth=oauth)
count = int(ct)
try:
search_results = twitterObj.search.tweets(q=q,count = count)
except TwitterHTTPError:
return "","","","","","","","","",""
if(search_results['statuses']==[]):
return "","","","","","","","","",""
Original_status_df = json_normalize(search_results,['statuses'])
Original_status_df = pd.DataFrame(Original_status_df)
min_id = min(Original_status_df['id'])
max_id = max(Original_status_df['id'])
while len(Original_status_df) < count:
try:
search_results = twitterObj.search.tweets(q=q,count=count,max_id = min_id)
results = json_normalize(search_results,['statuses'])
Original_status_df = Original_status_df.append(results)
min_id = min(results['id'])
max_id = max(results['id'])
except TwitterHTTPError:
return "","","","","","","","","",""
countries_name=[]
Original_status_df = Original_status_df.reset_index()
cleansed_tweets_df = clean_Tweets(Original_status_df)
for c in list(countries):
countries_name.append(c.name)
#countries_name = ['Argentina','Austria','Australia','Brasil','Brazil','Bangladesh','Cameroon','Canada','Cyprus',
# 'Deutschland','Dubai','Ecuador','Egypt',
# 'England','Kenya','Nigeria','Hong Kong','Holand','Finland','Prague','USA','Greece',
# 'Kazakhstan','Thailand','Italy','Italia','India','Israel','Ireland','Pakistan','Polska','Poland',
# 'United States','Germany','Spain','France','Fiji','China','Mexico','Netherlands',
# 'New Zealand','North Korea','Japan','Jordan',
# 'Oman','Palestine','United Arab Emirates','UAE','Portugal','Scotland','Slovakia',
# 'South Africa','Switzerland','Sweden',
# 'Turkey','Peru','Puerto Rico','Russia','Singapore','Chile','United Kingdom','Indonesia','Philippines',
# 'Ukraine','UK','Venezuela','Yemen']
Cleansed_Country_df = Country_of_tweet(cleansed_tweets_df,countries_name)
us_city_state_filter =['Albuquerque','Asheville','Atlanta','Austin','Baltimore','Boston','Columbia','Dallas','Detroit','Denver',
'Las Vegas','Georgia','Miami','Honolulu','Los Angeles','Pensacola','Richmond','Kansas',
'Pheonix City','Washington, DC','NYC',
'San Jose','Seattle','Orlando','Pittsburgh','San Diego','Chicago',
'New York','Phoenix','Mount Prospect',
'Alabama','Alaska','Arkansas','Arizona',
'California','Colorado','Connecticut','Delaware','Florida','Hawaii','Indiana','Iowa','Idaho','Illinois',
'Indiana','Louisiana','Oregon',
'Maryland','Michigan','Minnesota','Maine','Massachusetts','Missouri','Mississippi','Montana',
'Nebraska','New Jersey','New Hampshire','North Carolina','Kentucky','Ohio','Oklahoma',
'New Mexico','Nevada','North Dakota','South Dakota','Pennsylvania','San Francisco',
'Tennessee','Utah','Rhode Island','South Carolina','Washington','West Virginia','Wisconsin','Wyoming',
'Texas','Vermont','Virginia','LA','SF',
'AZ','AL','CA','CO','CT','DE','FL','GA','IA','ID','IL','IN','KY','MA',
'MI','MO','MD','MT','MN','MS','NC','ND','NJ','NH','NY','NV',
'OH','OR','PA','RI','SD','TX','TN','UT','VA','VT','WA','WI','WY','WV']
US_States_df = US_State_of_User(Cleansed_Country_df,us_city_state_filter)
updated_country_df = Updated_country_of_tweet(US_States_df,'USA')
only_country_df = updated_country_df[updated_country_df['Country_User']!=''].reset_index(drop=True)
tweet_df_live_sentiments_df = calculate_sentiment(only_country_df)
country_tweets_count = countryTweetsCount(tweet_df_live_sentiments_df)
usa_tweets_count = usaTweetsCount(country_tweets_count)
converted_country_df = ConvertCountryName(usa_tweets_count)
mean_sentiments_country_df = meanSentimentsCountry(converted_country_df)
mean_sentiments_UsState_df = meanSentimentsUsState(mean_sentiments_country_df)
summary_df_world = dataSummaryWorld(mean_sentiments_UsState_df)
summary_df_Country = dataSummaryCountry(mean_sentiments_UsState_df,'USA')
world_map_df = mean_sentiments_UsState_df[['Country_User_Code','Mean_Polarity_Country','Weighted_Mean_Polarity_Country','Total_Tweets_Country']]
world_map = world_map_df.groupby('Country_User_Code').mean()
UsState_map_df = mean_sentiments_UsState_df[['USA_State_User_Code','Mean_Polarity_USA_State','Weighted_Mean_Polarity_USA_State','Total_Tweets_USA_State']]
UsState_map = UsState_map_df.groupby('USA_State_User_Code').mean()
bar_df = mean_sentiments_UsState_df[['Weighted_Mean_Polarity_Country','Weighted_Mean_Subjectivity_Country','created_at']]
times =pd.to_datetime(bar_df['created_at'])
bar_df.index = times
bar_df = bar_df.resample('T').mean()
bar_string, bar_ids = bar_sentiments(bar_df['Weighted_Mean_Polarity_Country'],bar_df['Weighted_Mean_Subjectivity_Country'],bar_df.index)
world_map_string, world_map_ids = worldMap(world_map['Weighted_Mean_Polarity_Country'], world_map.index)
us_map_string, us_map_ids = UsMapPlot(UsState_map['Weighted_Mean_Polarity_USA_State'],UsState_map.index)
return world_map_string, world_map_ids, us_map_string, us_map_ids, summary_df_world['# Tweets'].sum(), summary_df_world.to_html(justify='justify'),summary_df_Country['# Tweets'].sum(),summary_df_Country.to_html(justify='justify'), bar_string, bar_ids
def clean_Tweets(Original_status_df):
import re
status_row = []
location=[]
tweet_df = Original_status_df[['user','text','created_at']]
for i in range(len(tweet_df)):
status_ = tweet_df.iloc[i,:]['text'].lower()
status_ = re.sub('((www\.[^\s]+)|(https?://[^\s]+))','',status_)
status_ = re.sub('@[^\s]+','',status_)
status_ = re.sub('[^A-Za-z0-9 ]+', '', status_)
status_ = status_.replace('rt','')
status_row.append(status_)
try:
location_ = tweet_df.iloc[i,:]['user']['location']
location.append(location_)
except IndexError:
location.append("")
tweet_df['text'] = status_row
tweet_df['Location_User'] = location
return tweet_df
def Country_of_tweet(dataframe,countries_filter):
import re
list3 =[]
country_names_updated = {'Prague' : 'Czechia','United States':'USA','United Arab Emirates':'UAE',
'Deutschland':'Germany','UK':'United Kingdom','Italia':'Italy','Polska':'Poland',
'Holand':'Netherlands','Brasil':'Brazil'}
for i in range(len(dataframe)):
setblank =0
location = dataframe.iloc[i,:]['Location_User']
if(isinstance(location,str)):
location_split = re.split(r'[-,.\s]',location)
for country in countries_filter:
if('United Arab Emirates' in country or 'United States' in country or 'United Kingdom' in country
or 'New Zealand' in country or 'North Korea' in country):
if(re.search(country,location) or re.search(country.lower(),location.lower()) or re.search(country.upper(),location.upper())):
country_updated = country_names_updated.get(country,country)
list3.append(country_updated)
setblank = 1
break
elif(country in location_split or country.lower() in location_split or country.upper() in location_split):
country_updated = country_names_updated.get(country,country)
list3.append(country_updated)
setblank = 1
break
if(setblank == 0):
list3.append('')
else:
list3.append('')
dataframe['Country_User'] = list3
return dataframe
def US_State_of_User(dataframe,us_city_state):
import re
import us
dummylist =[]
count = 0
city_to_state_names_updated = {'Albuquerque':'New Mexico',
'Atlanta':'Georgia',
'Austin':'Texas',
'Baltimore':'Maryland',
'Boston':'Massachusetts',
'Columbia':'South Carolina',
'Diego':'California',
'Denver':'Colorado',
'Detroit':'Michigan',
'Honolulu':'Hawaii',
'Las Vegas' : 'Nevada',
'Vegas':'Nevada',
'Indianapolis':'Indiana',
'Dallas': 'Texas',
'Seattle': 'Washington',
'NYC':'New York',
'Los Angeles' : 'California',
'Orlando': 'Florida',
'San Diego' : 'California',
'San Jose':'California',
'San Francisco':'California',
'LA':'California',
'SF':'California',
'Pittsburgh':'Pennsylvania',
'Pensacola':'Florida',
'Chicago':'Illinois','Phoenix':'Arizona','Pheonix City':'Albama','Richmond':'Virginia',
'Mount Prospect':'Illinois','Washington DC':'Maryland','washington, DC':'Maryland',
'Miami':'Florida', 'Asheville':'North Carolina','Washington DC':'Maryland',
'AZ':'Arizona','AL':'Alabama','CA':'California','CT':'Connecticut','CO':'Colorado',
'DE':'Delaware','FL':'Florida','GA':'Georgia','ID':'Idaho','IA':'Iowa','IL':'Illinois',
'IN':'Indiana','KY':'Kentucky','MA':'Massachusetts','MD':'Maryland','MI':'Michigan',
'MN':'Minnesota','MS':'Mississippi','MT':'Montana','MO':'Missouri','NC':'North Carolina',
'ND':'North Dakota','NE':'Nebraska','NH':'New Hampshire','NY':'New York',
'NJ':'New Jersey','NV':'Nevada','OH':'Ohio','OR':'Oregon','PA':'Pennsylvania',
'RI':'Rhode Island','TX':'Texas','TN':'Tennessee','SD':'South Dakota','UT':'Utah',
'VA':'Virginia','VT':'Vermont','WA':'Washington','WI':'Wisconsin','WY':'Wyoming',
'WV':'West Virginia'}
for i in range(len(dataframe)):
setblank =0
location_string = dataframe.iloc[i,:]['Location_User']
if(isinstance(location_string,str)):
location_string_split= re.split(r'[,\s]', location_string)
for city_state in us_city_state:
if('New York' in city_state or 'Las Vegas' in city_state or 'Los Angeles' in city_state
or 'North Carolina' in city_state or 'San Francisco' in city_state or 'New Mexico' in city_state
or 'North Dakota' in city_state or 'South Dakota' in city_state or 'Rhode Island' in city_state
or 'Washington, DC' in city_state or 'New Jersey' in city_state or 'Washington DC' in city_state
or 'Washington DC' in city_state or 'New Hampshire' in city_state or 'West Virginia' in city_state):
if(re.search(city_state,location_string) or re.search(city_state.lower(),location_string.lower())
or re.search(city_state.upper(),location_string.upper())):
state_updated = city_to_state_names_updated.get(city_state,city_state)
dummylist.append(state_updated)
setblank = 1
break
elif(city_state in location_string_split or city_state.upper() in location_string_split):
state_updated = city_to_state_names_updated.get(city_state,city_state)
dummylist.append(state_updated)
setblank = 1
break
elif(city_state.lower() in location_string_split):
if(len(city_state)!=2):
state_updated = city_to_state_names_updated.get(city_state,city_state)
dummylist.append(state_updated)
setblank = 1
break
if(setblank == 0):
dummylist.append('')
else:
dummylist.append('')
final_list = []
dataframe['USA_State_User'] = dummylist
map_states_codes = us.states.mapping('name','abbr')
for i in range(len(dummylist)):
final_list.append(map_states_codes.get(dummylist[i]))
for i in range(len(final_list)):
if (final_list[i]==None):
final_list[i]=''
dataframe['USA_State_User_Code'] = final_list
return dataframe
def Updated_country_of_tweet(dataframe,country):
countrylist = []
for i in range(len(dataframe)):
if(dataframe.iloc[i,:]['USA_State_User']!=''):
countrylist.append(country)
else:
countrylist.append(dataframe.iloc[i,:]['Country_User'])
dataframe['Country_User'] = countrylist
return dataframe
def ConvertCountryName(dataframe):
import pycountry as pyc
world_dict = dict()
world_dict['']=''
world_dict['USA'] = 'USA'
world_dict['Dubai'] = 'UAE'
world_dict['Russia'] = 'RUS'
for countryValue in pyc.countries:
country_code = countryValue.alpha_3
country_name = countryValue.name
world_dict[country_name] = country_code
countryCodes =[]
for i in range(len(dataframe)):
try:
country = dataframe.iloc[i,:]['Country_User']
countryCodes.append(world_dict[country])
except KeyError:
countryCodes.append('')
dataframe['Country_User_Code'] = countryCodes
return dataframe
def calculate_sentiment(tweet_df):
from textblob import TextBlob
polarity = []
subjectivity = []
reputation = []
for i in range(len(tweet_df)):
wiki = TextBlob(tweet_df.iloc[i,:]['text'])
polarity.append(wiki.sentiment.polarity)
subjectivity.append(wiki.sentiment.subjectivity)
try:
reputation.append(int(tweet_df.iloc[i,:]['user']['followers_count'])/(int(tweet_df.iloc[i,:]['user']['followers_count'])
+ int(tweet_df.iloc[i,:]['user']['friends_count'])))
except ValueError:
reputation.append(0)
except ZeroDivisionError:
reputation.append(0)
tweet_df['Polarity'] = polarity
tweet_df['Subjectivity']= subjectivity
tweet_df['Reputation'] = reputation
tweet_df['Reputation'] = round(tweet_df['Reputation'],1)
return tweet_df
def countryTweetsCount(dataframe):
import numpy as np
dataframe['Total_Tweets_Country']=int()
for country in dataframe.Country_User.unique():
if(country == ''):
dataframe.loc[dataframe.Country_User==country,'Total_Tweets_Country']= np.nan
else:
dataframe.loc[dataframe.Country_User==country,'Total_Tweets_Country'] = (dataframe[dataframe.Country_User==country].count().values[3])
return dataframe
def usaTweetsCount(dataframe):
import numpy as np
dataframe['Total_Tweets_USA_State']=int()
for state in dataframe.USA_State_User.unique():
if(state == ''):
dataframe.loc[dataframe.USA_State_User==state,'Total_Tweets_USA_State']= np.nan
else:
dataframe.loc[dataframe.USA_State_User==state,'Total_Tweets_USA_State'] = (dataframe[dataframe.USA_State_User==state].count().values[4])
return dataframe
def meanSentimentsCountry(dataframe):
dataframe['Mean_Polarity_Country']=float()
dataframe['Mean_Subjectivity_Country']=float()
dataframe['Mean_Reputation_Country']=float()
dataframe['Weighted_Mean_Polarity_Country']=float()
dataframe['Weighted_Mean_Subjectivity_Country']=float()
for country in dataframe.Country_User.unique():
if(country == ''):
dataframe.loc[dataframe.Country_User==country,'Mean_Polarity_Country'] = ''
dataframe.loc[dataframe.Country_User==country,'Mean_Subjectivity_Country'] = ''
dataframe.loc[dataframe.Country_User==country,'Mean_Reputation_Country'] = ''
else:
dataframe.loc[dataframe.Country_User==country,'Mean_Polarity_Country'] =100 * dataframe[dataframe.Country_User==country].Polarity.mean()
dataframe.loc[dataframe.Country_User==country,'Weighted_Mean_Polarity_Country'] =(1000000 * dataframe[dataframe.Country_User==country].Polarity.mean() * dataframe[dataframe.Country_User==country].Total_Tweets_Country.mean())/dataframe['Total_Tweets_Country'].sum()
dataframe.loc[dataframe.Country_User==country,'Mean_Subjectivity_Country'] =100 * dataframe[dataframe.Country_User==country].Subjectivity.mean()
dataframe.loc[dataframe.Country_User==country,'Weighted_Mean_Subjectivity_Country'] =(1000000 * dataframe[dataframe.Country_User==country].Subjectivity.mean() * dataframe[dataframe.Country_User==country].Total_Tweets_Country.mean())/dataframe['Total_Tweets_Country'].sum()
dataframe.loc[dataframe.Country_User==country,'Mean_Reputation_Country'] =100 * dataframe[dataframe.Country_User==country].Reputation.mean()
return dataframe
def meanSentimentsUsState(dataframe):
dataframe['Mean_Polarity_USA_State']=float()
dataframe['Mean_Subjectivity_USA_State']=float()
dataframe['Mean_Reputation_USA_State']=float()
dataframe['Weighted_Mean_Polarity_USA_State']=float()
for us_state in dataframe.USA_State_User.unique():
if(us_state == ''):
dataframe.loc[dataframe.USA_State_User==us_state,'Mean_Polarity_USA_State'] = ''
dataframe.loc[dataframe.USA_State_User==us_state,'Mean_Subjectivity_USA_State'] = ''
dataframe.loc[dataframe.USA_State_User==us_state,'Mean_Reputation_USA_State'] = ''
else:
dataframe.loc[dataframe.USA_State_User==us_state,'Mean_Polarity_USA_State'] =100 * dataframe[dataframe.USA_State_User==us_state].Polarity.mean()
dataframe.loc[dataframe.USA_State_User==us_state,'Weighted_Mean_Polarity_USA_State'] =(1000000 * dataframe[dataframe.USA_State_User==us_state].Polarity.mean() * dataframe[dataframe.USA_State_User==us_state].Total_Tweets_USA_State.mean())/dataframe['Total_Tweets_USA_State'].sum()
dataframe.loc[dataframe.USA_State_User==us_state,'Mean_Subjectivity_USA_State'] =100 * dataframe[dataframe.USA_State_User==us_state].Subjectivity.mean()
dataframe.loc[dataframe.USA_State_User==us_state,'Mean_Reputation_USA_State'] =100 * dataframe[dataframe.USA_State_User==us_state].Reputation.mean()
return dataframe
def worldMap(polarity,country_code):
from plotly import plotly
import simplejson as json
scl_world = [[-100,"rgb(5, 10, 172)"],\
[0,"rgb(40, 60, 190)"],[200,"rgb(70, 100, 245)"],[400,"rgb(90, 120, 245)"],[600,"rgb(106, 137, 247)"],[800,"rgb(220, 220, 220)"]]
graphs = [dict(data = [dict(type = 'choropleth',locations = country_code,z = polarity,text = country_code,
colorscale = scl_world,
autocolorscale = False, reversescale = True,
marker = dict( line = dict(color = 'rgb(86,81,81)', width = 1)),
colorbar = dict(title = 'Polarity'))],
layout = dict(title = 'World Map (Polarity)',geo = dict(showframe = True,showcoastlines = True,projection = dict(type = 'Mercator')),
autosize=False, width=1200, height=700,
margin=dict(l=0,r=10,b=80,t=90,pad=0)))]
world_map_id = ['World_Map']
world_map_json = json.dumps(graphs, cls=plotly.plotly.utils.PlotlyJSONEncoder)
return world_map_json, world_map_id
def UsMapPlot(polarity,us_state_code):
from plotly import plotly
import simplejson as json
scl_usa = [[0.0, 'rgb(242,240,247)'],[500, 'rgb(218,218,235)'],[1000, 'rgb(188,189,220)'],\
[2000, 'rgb(158,154,200)'],[2000, 'rgb(117,107,177)'],[3000, 'rgb(84,39,143)']]
graphs = [dict( data = [dict(type='choropleth',colorscale = scl_usa, autocolorscale = False,reversescale = True,
locations = us_state_code, z = polarity, locationmode = 'USA-states',
marker = dict(line = dict (color = 'rgb(255,255,255)',width =1)),colorbar = dict(title = "Map Plot"))],
layout = dict(title = 'USA Map (Poalrity)',geo = dict(showframe = True,scope='usa',projection=dict(type='albers usa' ),
showlakes = True,lakecolor = 'rgb(255, 255, 255)'),
autosize=False, width=1200, height=700, margin=dict(l=0,r=10,b=80,t=90,pad=0)))]
usa_map_id = ['Map']
usa_map_json = json.dumps(graphs, cls=plotly.plotly.utils.PlotlyJSONEncoder)
return usa_map_json, usa_map_id
def bar_sentiments(polarity,subjectivity,dates):
from plotly import plotly
import simplejson as json
#==============================================================================
# trace1 = go.Bar(
# x=dates,
# y=polarity,
# name='Polarity'
# )
# trace2 = go.Bar(
# x=dates,
# y=subjectivity,
# name='Subjectivity'
# )
#
# data = [trace1, trace2]
# layout = go.Layout(
# barmode='group'
# )
#==============================================================================
graphs = [dict(data=[dict(x=dates, y=polarity, type='bar', name='Polarity'),dict(x=dates,y=subjectivity,type='bar',
name='Subjectivity'),], layout=dict(autosize=False, width=1800, height=700, margin=dict(l=0,r=10,b=80,t=90,pad=0),showframe = True, title='Bar Plot',barmode='group',bargap=0.10,bargroupgap=0.1))]
bar_id = ['Bar']
basic_bar_json = json.dumps(graphs, cls=plotly.plotly.utils.PlotlyJSONEncoder)
return basic_bar_json,bar_id
def dataSummaryWorld(df):
import pandas as pd
Country=[]
total_tweets_Count =[]
summary_df = pd.DataFrame(columns=('Country','# Tweets'))
for country in df.Country_User.unique():
Country.append(country)
total_tweets_Count.append(int(df[df.Country_User==country]['Total_Tweets_Country'].mean()))
summary_df['Country'] = Country
summary_df['# Tweets'] = total_tweets_Count
#df.groupby(country).mean()
summary_df = summary_df.sort_values(by=['Country']).reset_index(drop=True)
return summary_df
def dataSummaryCountry(df, countryName):
#Check Renaming refactoring
import pandas as pd
columnNameLocation = str(countryName) +'_State'
columnNameTweets = '# Tweets'
country_state=[]
total_tweets_count_state =[]
summary_df_country = pd.DataFrame(columns=(columnNameLocation,columnNameTweets))
for state in df[columnNameLocation+'_User'].unique():
if(state!= ''):
country_state.append(state)
total_tweets_count_state.append(int(df[df[columnNameLocation+'_User']==state]['Total_Tweets_USA_State'].mean()))
summary_df_country[columnNameLocation] = country_state
summary_df_country[columnNameTweets] = total_tweets_count_state
#df.groupby(country).mean()
summary_df_country = | |
reviewchangesets_values)
cursor.executemany("""INSERT INTO reviewfiles (review, changeset, file, deleted, inserted)
SELECT reviewchangesets.review, reviewchangesets.changeset, fileversions.file, COALESCE(SUM(chunks.deleteCount), 0), COALESCE(SUM(chunks.insertCount), 0)
FROM reviewchangesets
JOIN fileversions USING (changeset)
LEFT OUTER JOIN chunks USING (changeset, file)
WHERE reviewchangesets.review=%s
AND reviewchangesets.changeset=%s
GROUP BY reviewchangesets.review, reviewchangesets.changeset, fileversions.file""",
reviewchangesets_values)
new_reviewers, new_watchers = assignChanges(db, user, review, changesets=changesets)
cursor.execute("SELECT include FROM reviewrecipientfilters WHERE review=%s AND uid=0", (review.id,))
try: opt_out = cursor.fetchone()[0] is True
except: opt_out = True
if not new_review:
for user_id in new_reviewers:
new_reviewuser = dbutils.User.fromId(db, user_id)
print "Added reviewer: %s <%s>" % (new_reviewuser.fullname, new_reviewuser.email)
if opt_out:
# If the user has opted out from receiving e-mails about this
# review while only watching it, clear the opt-out now that the
# user becomes a reviewer.
cursor.execute("DELETE FROM reviewrecipientfilters WHERE review=%s AND uid=%s AND include=FALSE", (review.id, user_id))
for user_id in new_watchers:
new_reviewuser = dbutils.User.fromId(db, user_id)
print "Added watcher: %s <%s>" % (new_reviewuser.fullname, new_reviewuser.email)
review.incrementSerial(db)
for changeset in changesets:
review_comment.updateCommentChains(db, user, review, changeset)
if pending_mails is None: pending_mails = []
notify_changesets = filter(lambda changeset: changeset not in silent_changesets, changesets)
if not new_review and notify_changesets:
recipients = review.getRecipients(db)
for to_user in recipients:
pending_mails.extend(mail.sendReviewAddedCommits(db, user, to_user, recipients, review, notify_changesets, tracked_branch=tracked_branch))
mail.sendPendingMails(pending_mails)
review.reviewers.extend([User.fromId(db, user_id) for user_id in new_reviewers])
for user_id in new_watchers:
review.watchers[User.fromId(db, user_id)] = "automatic"
return True
def createReview(db, user, repository, commits, branch_name, summary, description, from_branch_name=None, via_push=False, reviewfilters=None, applyfilters=True, applyparentfilters=False, recipientfilters=None):
cursor = db.cursor()
if via_push:
applyparentfilters = bool(user.getPreference(db, 'review.applyUpstreamFilters'))
branch = dbutils.Branch.fromName(db, repository, branch_name)
if branch is not None:
raise OperationFailure(code="branchexists",
title="Invalid review branch name",
message="""\
<p>There is already a branch named <code>%s</code> in the repository. You have
to select a different name.</p>
<p>If you believe the existing branch was created during an earlier (failed)
attempt to create this review, you can try to delete it from the repository
using the command<p>
<pre> git push <remote> :%s</pre>
<p>and then press the "Submit Review" button on this page again."""
% (htmlutils.htmlify(branch_name), htmlutils.htmlify(branch_name)))
commitset = log_commitset.CommitSet(commits)
if len(commitset.getHeads()) != 1:
raise Exception, "invalid commit-set; multiple heads"
head = commitset.getHeads().pop()
if len(commitset.getTails()) != 1:
tail_id = None
else:
tail_id = gitutils.Commit.fromSHA1(db, repository, commitset.getTails().pop()).getId(db)
if not via_push:
repository.branch(branch_name, head.sha1)
try:
cursor.execute("INSERT INTO branches (repository, name, head, tail, type) VALUES (%s, %s, %s, %s, 'review') RETURNING id", [repository.id, branch_name, head.getId(db), tail_id])
branch_id = cursor.fetchone()[0]
reachable_values = [(branch_id, commit.getId(db)) for commit in commits]
cursor.executemany("INSERT INTO reachable (branch, commit) VALUES (%s, %s)", reachable_values)
cursor.execute("INSERT INTO reviews (type, branch, state, summary, description, applyfilters, applyparentfilters) VALUES ('official', %s, 'open', %s, %s, %s, %s) RETURNING id", (branch_id, summary, description, applyfilters, applyparentfilters))
review = dbutils.Review.fromId(db, cursor.fetchone()[0])
cursor.execute("INSERT INTO reviewusers (review, uid, owner) VALUES (%s, %s, TRUE)", (review.id, user.id))
if reviewfilters is not None:
cursor.executemany("INSERT INTO reviewfilters (review, uid, directory, file, type, creator) VALUES (%s, %s, %s, %s, %s, %s)",
[(review.id, filter_user_id, filter_directory_id, filter_file_id, filter_type, user.id)
for filter_directory_id, filter_file_id, filter_type, filter_delegate, filter_user_id in reviewfilters])
if recipientfilters is not None:
cursor.executemany("INSERT INTO reviewrecipientfilters (review, uid, include) VALUES (%s, %s, %s)",
[(review.id, filter_user_id, filter_include)
for filter_user_id, filter_include in recipientfilters])
addCommitsToReview(db, user, review, commits, new_review=True)
if from_branch_name is not None:
cursor.execute("UPDATE branches SET review=%s WHERE repository=%s AND name=%s", (review.id, repository.id, from_branch_name))
# Reload to get list of changesets added by addCommitsToReview().
review = dbutils.Review.fromId(db, review.id)
pending_mails = []
recipients = review.getRecipients(db)
for to_user in recipients:
pending_mails.extend(mail.sendReviewCreated(db, user, to_user, recipients, review))
db.commit()
mail.sendPendingMails(pending_mails)
return review
except:
if not via_push:
repository.run("branch", "-D", branch_name)
raise
def countDraftItems(db, user, review):
cursor = db.cursor()
cursor.execute("SELECT reviewfilechanges.to, SUM(deleted) + SUM(inserted) FROM reviewfiles JOIN reviewfilechanges ON (reviewfilechanges.file=reviewfiles.id) WHERE reviewfiles.review=%s AND reviewfilechanges.uid=%s AND reviewfilechanges.state='draft' GROUP BY reviewfilechanges.to", (review.id, user.id))
reviewed = unreviewed = 0
for to_state, lines in cursor:
if to_state == "reviewed": reviewed = lines
else: unreviewed = lines
cursor.execute("SELECT reviewfilechanges.to, COUNT(*) FROM reviewfiles JOIN reviewfilechanges ON (reviewfilechanges.file=reviewfiles.id) WHERE reviewfiles.review=%s AND reviewfiles.deleted=0 AND reviewfiles.inserted=0 AND reviewfilechanges.uid=%s AND reviewfilechanges.state='draft' GROUP BY reviewfilechanges.to", (review.id, user.id))
reviewedBinary = unreviewedBinary = 0
for to_state, lines in cursor:
if to_state == "reviewed": reviewedBinary = lines
else: unreviewedBinary = lines
cursor.execute("SELECT count(*) FROM commentchains, comments WHERE commentchains.review=%s AND comments.chain=commentchains.id AND comments.uid=%s AND comments.state='draft'", [review.id, user.id])
comments = cursor.fetchone()[0]
cursor.execute("""SELECT count(*) FROM commentchains, commentchainchanges
WHERE commentchains.review=%s
AND commentchains.state=commentchainchanges.from_state
AND commentchainchanges.chain=commentchains.id
AND commentchainchanges.uid=%s
AND commentchainchanges.state='draft'
AND (commentchainchanges.from_state='addressed' OR commentchainchanges.from_state='closed')
AND commentchainchanges.to_state='open'""",
[review.id, user.id])
reopened = cursor.fetchone()[0]
cursor.execute("""SELECT count(*) FROM commentchains, commentchainchanges
WHERE commentchains.review=%s
AND commentchains.state='open'
AND commentchainchanges.chain=commentchains.id
AND commentchainchanges.uid=%s
AND commentchainchanges.state='draft'
AND commentchainchanges.from_state='open'
AND commentchainchanges.to_state='closed'""",
[review.id, user.id])
closed = cursor.fetchone()[0]
cursor.execute("""SELECT count(*) FROM commentchains, commentchainchanges
WHERE commentchains.review=%s
AND commentchainchanges.chain=commentchains.id
AND commentchainchanges.uid=%s
AND commentchainchanges.state='draft'
AND commentchainchanges.from_type=commentchains.type
AND commentchainchanges.to_type!=commentchains.type""",
[review.id, user.id])
morphed = cursor.fetchone()[0]
return { "reviewedNormal": reviewed,
"unreviewedNormal": unreviewed,
"reviewedBinary": reviewedBinary,
"unreviewedBinary": unreviewedBinary,
"writtenComments": comments,
"reopenedIssues": reopened,
"resolvedIssues": closed,
"morphedChains": morphed }
def getDraftItems(db, user, review):
return "approved=%(reviewedNormal)d,disapproved=%(unreviewedNormal)d,approvedBinary=%(reviewedBinary)d,disapprovedBinary=%(unreviewedBinary)d,comments=%(writtenComments)d,reopened=%(reopenedIssues)d,closed=%(resolvedIssues)d,morphed=%(morphedChains)d" % review.getDraftStatus(db, user)
def renderDraftItems(db, user, review, target):
items = review.getDraftStatus(db, user)
target.addExternalStylesheet("resource/review.css")
target.addExternalScript("resource/review.js")
div = target.div(id='draftStatus')
if any(items.values()):
div.span('draft').text("Draft: ")
approved = items.pop("reviewedNormal", None)
if approved:
div.text(' ')
div.span('approved').text("reviewed %d line%s" % (approved, approved > 1 and "s" or ""))
if any(items.values()): div.text(',')
disapproved = items.pop("unreviewedNormal", None)
if disapproved:
div.text(' ')
div.span('disapproved').text("unreviewed %d line%s" % (disapproved, disapproved > 1 and "s" or ""))
if any(items.values()): div.text(',')
approved = items.pop("reviewedBinary", None)
if approved:
div.text(' ')
div.span('approved-binary').text("reviewed %d binary file%s" % (approved, approved > 1 and "s" or ""))
if any(items.values()): div.text(',')
disapproved = items.pop("unreviewedBinary", None)
if disapproved:
div.text(' ')
div.span('disapproved-binary').text("unreviewed %d binary file%s" % (disapproved, disapproved > 1 and "s" or ""))
if any(items.values()): div.text(',')
comments = items.pop("writtenComments", None)
if comments:
div.text(' ')
div.span('comments').text("wrote %d comment%s" % (comments, comments > 1 and "s" or ""))
if any(items.values()): div.text(',')
reopened = items.pop("reopenedIssues", None)
if reopened:
div.text(' ')
div.span('reopened').text("reopened %d issue%s" % (reopened, reopened > 1 and "s" or ""))
if any(items.values()): div.text(',')
closed = items.pop("resolvedIssues", None)
if closed:
div.text(' ')
div.span('closed').text("resolved %d issue%s" % (closed, closed > 1 and "s" or ""))
if any(items.values()): div.text(',')
morphed = items.pop("morphedChains", None)
if morphed:
div.text(' ')
div.span('closed').text("morphed %d comment%s" % (morphed, morphed > 1 and "s" or ""))
if any(items.values()): div.text(',')
div.text(' ')
buttons = div.span("buttons")
buttons.button(onclick='previewChanges();').text("Preview")
buttons.button(onclick='submitChanges();').text("Submit")
buttons.button(onclick='cancelChanges();').text("Abort")
return True
else:
return False
def addReviewFilters(db, creator, user, review, reviewer_directory_ids, reviewer_file_ids, watcher_directory_ids, watcher_file_ids):
cursor = db.cursor()
cursor.execute("INSERT INTO reviewassignmentstransactions (review, assigner) VALUES (%s, %s) RETURNING id", (review.id, creator.id))
transaction_id = cursor.fetchone()[0]
def add(filter_type, directory_ids, file_ids):
for directory_id, file_id in izip(directory_ids, file_ids):
cursor.execute("""SELECT id, type
FROM reviewfilters
WHERE review=%s
AND uid=%s
AND directory=%s
AND file=%s""",
(review.id, user.id, directory_id, file_id))
row = cursor.fetchone()
if row:
old_filter_id, old_filter_type = row
if old_filter_type == filter_type:
continue
else:
cursor.execute("""DELETE FROM reviewfilters
WHERE id=%s""",
(old_filter_id,))
cursor.execute("""INSERT INTO reviewfilterchanges (transaction, uid, directory, file, type, created)
VALUES (%s, %s, %s, %s, %s, false)""",
(transaction_id, user.id, directory_id, file_id, old_filter_type))
cursor.execute("""INSERT INTO reviewfilters (review, uid, directory, file, type, creator)
VALUES (%s, %s, %s, %s, %s, %s)""",
(review.id, user.id, directory_id, file_id, filter_type, creator.id))
cursor.execute("""INSERT INTO reviewfilterchanges (transaction, uid, directory, file, type, created)
VALUES (%s, %s, %s, %s, %s, true)""",
(transaction_id, user.id, directory_id, file_id, filter_type))
add("reviewer", reviewer_directory_ids, repeat(0))
add("reviewer", repeat(0), reviewer_file_ids)
add("watcher", watcher_directory_ids, repeat(0))
add("watcher", repeat(0), watcher_file_ids)
filters = Filters()
filters.load(db, review=review, user=user)
if user not in review.reviewers and user not in review.watchers and user not in review.owners:
cursor.execute("""INSERT INTO reviewusers (review, uid, type)
VALUES (%s, %s, 'manual')""",
(review.id, user.id,))
delete_files = set()
insert_files = set()
if watcher_directory_ids or watcher_file_ids:
# Unassign changes currently assigned to the affected user.
cursor.execute("""SELECT reviewfiles.id, reviewfiles.file
FROM reviewfiles
JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id)
WHERE reviewfiles.review=%s
AND reviewuserfiles.uid=%s""",
(review.id, user.id))
for review_file_id, file_id in cursor:
if not filters.isReviewer(db, user.id, file_id):
delete_files.add(review_file_id)
if reviewer_directory_ids or reviewer_file_ids:
# Assign changes currently not assigned to the affected user.
cursor.execute("""SELECT reviewfiles.id, reviewfiles.file
FROM reviewfiles
JOIN changesets ON (changesets.id=reviewfiles.changeset)
JOIN commits ON (commits.id=changesets.child)
JOIN gitusers ON (gitusers.id=commits.author_gituser)
LEFT OUTER JOIN usergitemails USING (email)
LEFT OUTER JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id AND reviewuserfiles.uid=%s)
WHERE reviewfiles.review=%s
AND (usergitemails.uid IS NULL OR usergitemails.uid!=%s)
AND reviewuserfiles.uid IS NULL""",
(user.id, review.id, user.id))
for review_file_id, file_id in cursor:
if filters.isReviewer(db, user.id, file_id):
insert_files.add(review_file_id)
if delete_files:
cursor.executemany("DELETE FROM reviewuserfiles WHERE file=%s AND uid=%s",
izip(delete_files, repeat(user.id)))
cursor.executemany("INSERT INTO reviewassignmentchanges (transaction, file, uid, assigned) | |
# ATTENTION! File managed by Puppet. Changes will be overwritten.
from __future__ import print_function
import ConfigParser
import StringIO
import inspect
import itertools
import os
import re
import shlex
import shutil
import subprocess
import threading
import urllib
import saf
from saf.exceptions import *
from saf.packages import em
from saf.packages import requests
import logging
logger = logging.getLogger(__name__)
def method_trace(fn):
from functools import wraps
@wraps(fn)
def wrapper(*my_args, **my_kwargs):
logger.debug(
'>>> %s(%s ; %s ; %s)' % (fn.__name__, inspect.getargspec(fn), my_args, my_kwargs))
out = fn(*my_args, **my_kwargs)
logger.debug('<<< %s' % fn.__name__)
return out
return wrapper
@method_trace
def command_rc(cmd, cwd=None, assert_rc=True, silent=True):
"""
Execute shell command and (optionally, depending on silent flag) print stdout. Return rc
:param cmd: String containing the command (e.g. "git pull")
:param cwd: The directory which will be cwd for the command
:param assert_rc: If True then raise exception if command rc!=0
:param silent: If True then just log.debug(stdout). If False then log.info(stdout)
:raises SafConfigException If rc!=0 (and assert_rc=True)
:return: True if rc=0, False otherwise
"""
# TODO: Cleverly combine this method with command_stdout()
proc = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=cwd)
out, err = [x.decode("utf-8") for x in proc.communicate()]
logger.debug('returned from command with proc.returncode=%s' % proc.returncode)
# remove trailing linefeeds
out = out.rstrip()
err = err.rstrip()
if silent:
logger.debug('stdout:%s' % out)
else:
logger.info('%s' % out)
logger.debug('stderr:%s' % err)
if assert_rc and proc.returncode != 0:
raise SafExecutionException(
"Error (rc:%s) when running %s: %s" % (proc.returncode, cmd, err))
return not proc.returncode
@method_trace
def command_stdout(cmd, cwd=None, assert_rc=True):
"""
Execute shell command. Return stdout
:param cmd: String containing the command (e.g. "git pull")
:param cwd: The directory which will be cwd for the command
:param assert_rc: If True then raise exception if command rc!=0
:raises SafConfigException If rc!=0 (and assert_rc=True)
:return: stdout of process call
"""
proc = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=cwd)
out, err = [x.decode("utf-8") for x in proc.communicate()]
# remove trailing linefeeds
out = out.rstrip()
err = err.rstrip()
logger.debug('returned from command with proc.returncode=%s' % proc.returncode)
logger.debug('stdout:%s' % out)
logger.debug('stderr:%s' % err)
if assert_rc and proc.returncode != 0:
raise SafExecutionException(
"Error (rc:%s) when running %s: %s" % (proc.returncode, cmd, err))
return out
@method_trace
def run_process(cmd, cwd=None, log_output=True):
"""
Run process. Tail output forever. This method is used exclusively for that purpose.
It should be possible to not have a separate function for this purpose but I was unable to
figure that out. It's rather tricky. If you want to try make sure to test all possible cases
:param cmd: The command string (e.g. "git pull")
:param cwd: The directory which will be cwd for the command
:param log_output: Whether to additionally capture the output in the logfile or just print it
:raises SafExecutionException
:return: True if shell command $?=0, False otherwise
"""
if type(cmd) is not str:
raise SafExecutionException('run_process requires a string arg')
cmd = shlex.split(cmd)
if cwd:
logger.debug('running "%s" in directory %s' % (cmd, cwd))
else:
logger.debug('running "%s"' % cmd)
process = None
try:
if log_output:
out_func = logger.info
else:
out_func = print
# http://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python/437888#437888
# http://stackoverflow.com/questions/12057794/python-using-popen-poll-on-background-process#12058609
# Also tried several approaches based on
# http://stackoverflow.com/questions/12523044/how-can-i-tail-a-log-file-in-python#12523371
# but was not able to solve the "tail -f problem" (aka continuous stdout processing)
# Also failed with p.communicate()
def process_stream(myprocess, stream): # output-consuming thread
# stream is either stdout or stderr pipe of the process
next_line = None
buf = ''
while True:
out = stream.read(1)
if out == '' and myprocess.poll() is not None:
break
if out != '':
if out == '\n':
next_line = buf
buf = ''
else:
buf += out
if not next_line:
continue
line = next_line
next_line = None
out_func(line)
stream.close()
process = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout_poller = threading.Thread(target=process_stream, args=(process, process.stdout,))
stdout_poller.daemon = True
stdout_poller.start()
stderr_poller = threading.Thread(target=process_stream, args=(process, process.stderr,))
stderr_poller.daemon = True
stderr_poller.start()
# while process.poll() is None:
# logger.debug('running')
# time.sleep(1)
process.wait()
logger.debug('returned from wait() with process.returncode=%s' % process.returncode)
if stdout_poller and stdout_poller.is_alive():
logger.debug('joining stdout_poller')
stdout_poller.join()
logger.debug('joined stdout_poller')
if stderr_poller and stderr_poller.is_alive():
logger.debug('joining stderr_poller')
stderr_poller.join()
logger.debug('joined stderr_poller')
except OSError as e:
logger.error("Error in call: %s" % e)
raise SafExecutionException(e)
except KeyboardInterrupt:
logger.debug('KeyboardInterrupt')
finally:
rc = 255
termination = 'irregular'
if process and process.returncode is not None:
rc = process.returncode
termination = 'regular'
logger.debug('%s exit, rc: %s' % (termination, rc))
# negated shell returncode equals python boolean
# i.e. $?=0 returns True, $?!=0 returns False
return not rc
@method_trace
def _get_secret():
"""
Retrieve contents of SAF secret file (/app/saf/conf/secret)
:raises SafConfigException if secret not present
:return: string representing the SAF secret
"""
secret_file_name = os.path.join(saf.base_path, 'conf', 'secret')
secret = None
try:
with open(secret_file_name, 'r') as secret_file:
for line in secret_file:
if line.startswith('#'):
continue
else:
secret = line
break
if secret is None:
raise SafConfigException('Missing secret')
return secret
except IOError as e:
raise SafConfigException(e)
@method_trace
def parse_kv_file(file_name):
"""
Retrieve contents of plain key=value file
:param file_name: The name of the file
:raises SafConfigException if the file could not be parsed
:return: dict containing all key/value pairs
"""
try:
parser = ConfigParser.ConfigParser()
# http://stackoverflow.com/questions/19359556/configparser-reads-capital-keys-and-make-them-lower-case#19359720
parser.optionxform = str
with open(file_name) as stream:
# http://stackoverflow.com/questions/2885190/using-pythons-configparser-to-read-a-file-without-section-name
fakefile = StringIO.StringIO("[top]\n" + stream.read())
parser.readfp(fakefile)
result = dict(parser.items('top'))
logger.debug('result:%s' % result)
return result
except IOError as e:
raise SafConfigException('Could not parse file: %s' % e)
@method_trace
def encrypt(literal):
literal = ' '.join(literal)
inf_key = itertools.chain.from_iterable(itertools.repeat(_get_secret()))
result = ''.join(chr(ord(a) ^ ord(b)) for a, b in zip(literal, inf_key)).encode(
'base64').strip()
return '{ENC}%s' % result
@method_trace
def decrypt(literal):
if literal.startswith('{ENC}'):
inf_key = itertools.chain.from_iterable(itertools.repeat(_get_secret()))
result = ''.join(
chr(ord(a) ^ ord(b)) for a, b in zip(literal[5:].decode('base64'), inf_key))
return result
else:
raise SafExecutionException("Decrypted values must start with {ENC}")
@method_trace
def wipe_dir(dir_name):
"""
delete contents of dir_name but leave dir_name in place
:param dir_name: The name of the directory to wipe contents from
:raises SafExecutionException if IOError occurs
"""
# http://stackoverflow.com/questions/185936/delete-folder-contents-in-python#185941
for the_file in os.listdir(dir_name):
file_path = os.path.join(dir_name, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except IOError as e:
raise SafExecutionException(e)
@method_trace
def render_template(file_name, overlay_dict):
"""
Render mixin template to resolved file using empy interpreter
:param file_name: The name of the file
:param overlay_dict: Dictionary containing key=value pairs to replace
:raises SafConfigException if the file could not be rendered
:return: dict containing all key/value pairs
"""
if is_binary(file_name):
logger.debug('is_binary:%s' % file_name)
return
with open(file_name) as f:
data = f.read()
f.close()
# overlay_dict must not be modified because of is_confidential check
temp_dict = dict(overlay_dict)
is_confidential = False
for key in temp_dict.keys():
if temp_dict[key].startswith('{ENC}'):
temp_dict[key] = decrypt(temp_dict[key])
if re.search("@\(?%s\)?" % key, data) is not None:
is_confidential = True
logger.debug('is_confidential:%s' % is_confidential)
interpreter = em.Interpreter()
try:
out = interpreter.expand(data, temp_dict)
except Exception as e:
raise SafExecutionException("Problems rendering %s: %s" % (file_name, str(e)))
with open(file_name, 'w') as f:
if is_confidential:
os.chmod(f.name, 0o600)
f.write(out)
f.close()
# http://stackoverflow.com/questions/3685195/line-up-columns-of-numbers-print-output-in-table-format#3685943
@method_trace
def align_columns(lines, is_left_align=True):
cols = map(lambda *row: [str(field) or '' for field in row], *lines)
widths = [max(len(field) for field in col) for col in cols]
format = ['%%%s%ds' % ('-' if is_left_align else '', width) for width in widths]
return [' '.join(format[:len(row)]) % tuple(row) for row in lines]
# http://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python
@method_trace
def is_binary(file_name):
text_chars = bytearray([7, 8, 9, 10, 12, 13, 27]) + bytearray(range(0x20, 0x7f)) + bytearray(
range(0x80, 0x100))
f = open(file_name, 'rb')
data = f.read(1024)
return bool(data.translate(None, text_chars))
# http://stackoverflow.com/questions/3229419/pretty-printing-nested-dictionaries-in-python
@method_trace
def prettyprint_dict(d, indent=4):
for key, value in sorted(d.iteritems()):
line = ' ' * indent + str(key)
if isinstance(value, dict):
logger.info(line + ':')
prettyprint_dict(value, indent * 2)
else:
logger.info(line + ' : ' + str(value))
# http://stackoverflow.com/questions/1392413/calculating-a-directory-size-using-python
@method_trace
def directory_size(path):
total_size = 0
seen = set()
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
try:
stat = os.stat(fp)
except OSError:
continue
if stat.st_ino in seen:
continue
seen.add(stat.st_ino)
total_size += stat.st_size
return total_size # size in bytes
# http://stackoverflow.com/questions/10123929/python-requests-fetch-a-file-from-a-local-url
class LocalFileAdapter(requests.adapters.BaseAdapter):
"""Protocol Adapter to allow Requests to GET file:// URLs
TODO: Properly handle non-empty hostname portions.
"""
@staticmethod
@method_trace
def _chkpath(method, path):
"""Return an HTTP status for the given filesystem path."""
if method.lower() in ('put', 'delete'):
return 501, "Not Implemented" # TODO
elif method.lower() not in ('get', 'head'):
return 405, "Method Not Allowed"
elif os.path.isdir(path):
return 400, "Path '%s' is not a file" % path
elif not os.path.isfile(path):
return 404, "File '%s' not found" % path
elif | |
Iterate over datasets
datasets = select_metadata(datasets, var_type=var_type)
if var_type == 'feature':
groups = self.group_attributes
else:
groups = [None]
for group_attr in groups:
group_datasets = select_metadata(datasets,
group_attribute=group_attr)
if group_attr is not None:
logger.info("Loading '%s' data of '%s'", var_type, group_attr)
msg = '' if group_attr is None else f" for '{group_attr}'"
if not group_datasets:
raise ValueError(f"No '{var_type}' data{msg} found")
(group_data, x_cube,
weights) = self._get_x_data_for_group(group_datasets, var_type,
group_attr)
x_data = x_data.append(group_data)
# Append weights if desired
if sample_weights is not None:
sample_weights = sample_weights.append(weights)
# Adapt sample_weights if necessary
if sample_weights is not None:
sample_weights.index = pd.MultiIndex.from_tuples(
sample_weights.index, names=self._get_multiindex_names())
logger.info(
"Successfully calculated sample weights for training data "
"using %s", self._cfg['weighted_samples'])
if (sample_weights.max().values[0] /
sample_weights.min().values[0]) > 150.0:
logger.warning(
"Sample weights differ by more than a factor of 150, got "
"a minimum value of %e and a maximum value of %e. This "
"might be caused by differing coordinates in the training "
"cubes",
sample_weights.min().values[0],
sample_weights.max().values[0])
# Convert index back to MultiIndex
x_data.index = pd.MultiIndex.from_tuples(
x_data.index, names=self._get_multiindex_names())
return (x_data, x_cube, sample_weights)
def _extract_y_data(self, datasets, var_type):
"""Extract required y data of type ``var_type`` from ``datasets``."""
allowed_types = ('label', 'prediction_reference')
if var_type not in allowed_types:
raise ValueError(
f"Excepted one of '{allowed_types}' for 'var_type', got "
f"'{var_type}'")
y_data = pd.DataFrame(columns=[self.label], dtype=self._cfg['dtype'])
# Iterate over datasets
datasets = select_metadata(datasets, var_type=var_type)
if var_type == 'label':
groups = self.group_attributes
else:
groups = [None]
for group_attr in groups:
if group_attr is not None:
logger.info("Loading '%s' data of '%s'", var_type, group_attr)
msg = '' if group_attr is None else f" for '{group_attr}'"
group_datasets = select_metadata(datasets,
group_attribute=group_attr)
dataset = self._check_dataset(group_datasets, var_type, self.label,
msg)
if dataset is None:
return None
cube = self._load_cube(dataset)
text = f"{var_type} '{self.label}'{msg}"
self._check_cube_dimensions(cube, None, text)
cube_data = pd.DataFrame(
self._get_cube_data(cube),
columns=[self.label],
index=self._get_multiindex(cube, group_attr=group_attr),
dtype=self._cfg['dtype'],
)
y_data = y_data.append(cube_data)
# Convert index back to MultiIndex
y_data.index = pd.MultiIndex.from_tuples(
y_data.index, names=self._get_multiindex_names())
return y_data
def _get_broadcasted_cube(self, dataset, ref_cube, text=None):
"""Get broadcasted cube."""
msg = '' if text is None else text
target_shape = ref_cube.shape
cube_to_broadcast = self._load_cube(dataset)
data_to_broadcast = np.ma.filled(cube_to_broadcast.data, np.nan)
logger.info("Broadcasting %s from %s to %s", msg,
data_to_broadcast.shape, target_shape)
broadcasted_data = iris.util.broadcast_to_shape(
data_to_broadcast, target_shape, dataset['broadcast_from'])
new_cube = ref_cube.copy(np.ma.masked_invalid(broadcasted_data))
for idx in dataset['broadcast_from']:
new_coord = new_cube.coord(dimensions=idx)
new_coord.points = cube_to_broadcast.coord(new_coord).points
logger.debug("Added broadcasted %s", msg)
return new_cube
def _get_clf_parameters(self, deep=True):
"""Get parameters of pipeline."""
return self._clf.get_params(deep=deep)
def _get_colors_for_features(self, color_coded=True):
"""Get colors for bars of feature importance plot."""
features = self.features_after_preprocessing
if not color_coded:
colors = dict(zip(features, ['b'] * len(features)))
else:
if not np.array_equal(features, self.features):
raise ValueError(
f"Extracting color-coded feature colors is not possible "
f"since features changed after preprocessing, before: "
f"{self.features}, after: {features}")
colors = {}
corrs = self.data['train'][['x', 'y']].corr()
for feature in features:
corr = corrs.loc[('y', self.label), ('x', feature)]
color = 'r' if corr >= 0.0 else 'b'
colors[feature] = color
return colors
def _get_cv_estimator_kwargs(self, cv_estimator, **kwargs):
"""Get keyword arguments for CV estimator class."""
fit_kwargs = self.fit_kwargs
verbosity = self._get_verbosity_parameters(cv_estimator)
cv_kwargs = {
'n_jobs': self._cfg['n_jobs'],
**verbosity,
}
cv_kwargs.update(kwargs)
logger.info("Using keyword argument(s) %s for class %s", cv_kwargs,
cv_estimator)
if isinstance(cv_kwargs.get('cv'), str):
if cv_kwargs['cv'].lower() == 'loo':
cv_kwargs['cv'] = LeaveOneOut()
if cv_kwargs['cv'].lower() == 'logo':
cv_kwargs['cv'] = self._get_logo_cv_kwargs()['cv']
fit_kwargs['groups'] = self._get_logo_cv_kwargs()['groups']
return (cv_kwargs, fit_kwargs)
def _get_features(self):
"""Extract all features from the ``prediction_input`` datasets."""
logger.debug("Extracting features from 'prediction_input' datasets")
pred_name = list(self._datasets['prediction_input'].keys())[0]
pred_name_str = self._get_name(pred_name)
datasets = self._datasets['prediction_input'][pred_name]
(units,
types) = self._get_features_of_datasets(datasets, 'prediction_input',
pred_name)
# Mark categorical variables
categorical = {feature: False for feature in types}
for tag in self._cfg.get('categorical_features', []):
if tag in categorical:
logger.debug("Treating '%s' as categorical feature", tag)
categorical[tag] = True
else:
raise ValueError(
f"Cannot treat '{tag}' as categorical variable, feature "
f"not found")
# Check if features were found
if not units:
raise ValueError(
f"No features for 'prediction_input' data for prediction "
f"'{pred_name_str}' found")
# Check for wrong options
if self._cfg.get('accept_only_scalar_data'):
if 'broadcasted' in types.values():
raise TypeError(
"The use of 'broadcast_from' is not possible if "
"'accept_only_scalar_data' is given")
if 'coordinate' in types.values():
raise TypeError(
"The use of 'coords_as_features' is not possible if "
"'accept_only_scalar_data' is given")
# Convert to DataFrame and sort it
units = pd.DataFrame.from_dict(units,
orient='index',
columns=['units'])
types = pd.DataFrame.from_dict(types,
orient='index',
columns=['types'])
categorical = pd.DataFrame.from_dict(categorical,
orient='index',
columns=['categorical'])
features = pd.concat([units, types, categorical], axis=1).sort_index()
# Return features
logger.info(
"Found %i feature(s) (defined in 'prediction_input' data for "
"prediction '%s')", len(features.index), pred_name_str)
for feature in features.index:
logger.debug("'%s' with units '%s' and type '%s'", feature,
features.units.loc[feature],
features.types.loc[feature])
return features
def _get_features_of_datasets(self, datasets, var_type, pred_name):
"""Extract all features (with units and types) of given datasets."""
pred_name_str = self._get_name(pred_name)
units = {}
types = {}
cube = None
ref_cube = None
for (tag, datasets_) in group_metadata(datasets, 'tag').items():
dataset = datasets_[0]
cube = self._load_cube(dataset)
if 'broadcast_from' not in dataset:
ref_cube = cube
units[tag] = Unit(dataset['units'])
if 'broadcast_from' in dataset:
types[tag] = 'broadcasted'
else:
types[tag] = 'regular'
# Check if reference cube was given
if ref_cube is None:
if cube is None:
raise ValueError(
f"Expected at least one '{var_type}' dataset for "
f" prediction '{pred_name_str}'")
raise ValueError(
f"Expected at least one '{var_type}' dataset for prediction "
f"'{pred_name_str}' without the option 'broadcast_from'")
# Coordinate features
for coord_name in self._cfg.get('coords_as_features', []):
try:
coord = ref_cube.coord(coord_name)
except iris.exceptions.CoordinateNotFoundError:
raise iris.exceptions.CoordinateNotFoundError(
f"Coordinate '{coord_name}' given in 'coords_as_features' "
f"not found in '{var_type}' data for prediction "
f"'{pred_name_str}'")
units[coord_name] = coord.units
types[coord_name] = 'coordinate'
return (units, types)
def _get_group_attributes(self):
"""Get all group attributes from ``label`` datasets."""
logger.debug("Extracting group attributes from 'label' datasets")
grouped_datasets = group_metadata(self._datasets['label'],
'group_attribute',
sort=True)
group_attributes = list(grouped_datasets.keys())
if group_attributes == [None]:
logger.debug("No group attributes given")
else:
logger.info(
"Found %i group attribute(s) (defined in 'label' data)",
len(group_attributes))
logger.debug(pformat(group_attributes))
return np.array(group_attributes)
def _get_label(self):
"""Extract label from training data."""
logger.debug("Extracting label from training datasets")
grouped_datasets = group_metadata(self._datasets['label'], 'tag')
labels = list(grouped_datasets.keys())
if len(labels) > 1:
raise ValueError(f"Expected unique label tag, got {labels}")
units = Unit(self._datasets['label'][0]['units'])
logger.info(
"Found label '%s' with units '%s' (defined in 'label' "
"data)", labels[0], units)
label = pd.DataFrame.from_dict({labels[0]: units},
orient='index',
columns=['units'])
return label
def _get_lime_feature_importance(self, x_pred):
"""Get most important feature given by LIME."""
logger.info(
"Calculating global feature importance using LIME (this may take "
"a while...)")
x_pred = self._impute_nans(x_pred)
# Most important feature for single input
def _most_important_feature(x_single_pred, explainer, predict_fn):
"""Get most important feature for single input."""
explanation = explainer.explain_instance(x_single_pred, predict_fn)
local_exp = explanation.local_exp[1]
sorted_exp = sorted(local_exp, key=lambda elem: elem[0])
norm = sum([abs(elem[1]) for elem in sorted_exp])
return [abs(elem[1]) / norm for elem in sorted_exp]
# Apply on whole input (using multiple processes)
parallel = Parallel(n_jobs=self._cfg['n_jobs'])
lime_feature_importance = parallel(
[delayed(_most_important_feature)(x,
explainer=self._lime_explainer,
predict_fn=self._clf.predict)
for x in x_pred.values]
)
lime_feature_importance = np.array(lime_feature_importance,
dtype=self._cfg['dtype'])
lime_feature_importance = np.moveaxis(lime_feature_importance, -1, 0)
lime_feature_importance = dict(zip(self.features,
lime_feature_importance))
return lime_feature_importance
def _get_logo_cv_kwargs(self):
"""Get :class:`sklearn.model_selection.LeaveOneGroupOut` CV."""
if not self._cfg['group_datasets_by_attributes']:
raise ValueError(
"Cannot create 'LeaveOneGroupOut' CV splitter, "
"'group_datasets_by_attributes' was not given during "
"class initialization")
kwargs = {
'cv': LeaveOneGroupOut(),
'groups': self.data['train'].y.index.get_level_values(0).values,
}
return kwargs
def _get_mask(self, x_data, data_type):
"""Get mask for missing features."""
x_regular = x_data[self.features[self.features_types == 'regular']]
# Get points where no regular feature is given
mask = x_regular.isnull().all(axis=1).values
logger.debug(
"Removing %i %s point(s) where all regular features are missing",
mask.sum(), data_type)
# Get other missing points if desired
if self._cfg['imputation_strategy'] == 'remove':
mask = x_data.isnull().any(axis=1).values
logger.debug(
"Removing total %i %s point(s) where at least one feature is "
"missing (because imputation_strategy = 'remove')", mask.sum(),
data_type)
return mask
def _get_multiindex(self, ref_cube, group_attr=None):
"""Get :class:`pandas.MultiIndex` for data."""
group_attr = self._group_attr_to_pandas_index_str(group_attr)
index = pd.MultiIndex.from_product(
[[group_attr], np.arange(ref_cube.data.size)],
names=self._get_multiindex_names(),
)
return index
def _get_multiindex_names(self):
"""Get names for :class:`pandas.MultiIndex` for data."""
return ['-'.join(self._cfg['group_datasets_by_attributes']), 'index']
def _get_plot_feature(self, feature):
"""Get :obj:`str` of selected ``feature`` and respective units."""
units = self._get_plot_units(self.features_units[feature])
return f'{feature} [{units}]'
def _get_plot_label(self):
"""Get :obj:`str` of label and respective units."""
return f'{self.label} [{self._get_plot_units(self.label_units)}]'
def _get_plot_units(self, units):
"""Get plot units version of specified ``units``."""
return self._cfg['plot_units'].get(str(units), str(units))
def _get_prediction_dict(self, pred_name, x_pred, x_err, y_ref,
get_mlr_model_error=None,
get_lime_importance=False,
get_propagated_errors=False, **kwargs):
"""Get prediction output in a dictionary."""
logger.info("Predicting %i point(s)", len(x_pred.index))
y_preds = self._clf.predict(x_pred, **kwargs)
pred_dict = self._prediction_to_dict(y_preds, **kwargs)
# Estimate error of MLR model itself
if get_mlr_model_error:
| |
node 4
mastercoord.MasterCoord(0.0, 0.5, 0.5), # node 5
mastercoord.MasterCoord(0.0, 0.0, 0.5), # node 6
mastercoord.MasterCoord(0.5, 0.0, 0.0), # node 7
mastercoord.MasterCoord(0.5, 0.5, 0.0), # node 8
mastercoord.MasterCoord(0.5, 0.0, 0.5), # node 9
mastercoord.MasterCoord(0.25, 0.25, 0.25),
mastercoord.MasterCoord(0.15, 0.2, 0.25),
mastercoord.MasterCoord(0.25, 0.1, 0.3)
]
@memcheck
def Interpolate_Linear(self):
self.interpolate('x + 2*y - 3*z')
@memcheck
def Interpolate_Quadratic(self):
self.interpolate(
'x**2 - 2*x*y + 3*z**2 + 2*y**2 + x - y + z + x*z + y*z')
@memcheck
@unittest.expectedFailure
def Interpolate_Cubic(self):
self.interpolate(x**3 + y**3 + z**3)
@memcheck
def Integrate_Constant(self):
self.integrate('1.0', self.volume)
@memcheck
def Integrate_Linear_X(self):
self.integrate('x', 0.5*self.volume*self.size[0])
@memcheck
def Integrate_Linear_Y(self):
self.integrate('y', 0.5*self.volume*self.size[1])
@memcheck
def Integrate_Linear_Z(self):
self.integrate('z', 0.5*self.volume*self.size[2])
@memcheck
def Integrate_Quadratic_X(self):
self.integrate('x*x', (1/3.)*self.volume*self.size[0]**2)
@memcheck
def Integrate_Quadratic_Y(self):
self.integrate('y*y', (1/3.)*self.volume*self.size[1]**2)
@memcheck
def Integrate_Quadratic_Z(self):
self.integrate('z*z', (1/3.)*self.volume*self.size[2]**2)
@memcheck
def Derivative_Constant(self):
self.derivative('1.0', ('0.0', '0.0', '0.0'))
@memcheck
def Derivative_Linear(self):
self.derivative('x+2*y', ('1.0', '2.0', '0.0'))
@memcheck
def Derivative_Quadratic(self):
self.derivative('x**2 + 2*x*y + z**2', ('2*x + 2*y', '2*x', '2*z'))
# Interpolate_Cubic uses the same shape functions as
# Interpolate_Quadratic, but more Gauss points. It only checks
# integration, because it's identical to Interpolate_Quadratic for
# interpolation and differentiation.
class OOF_Mesh_Interpolate_Cubic(OOF_Mesh_Interpolate):
def elementTypes(self):
return ['TET4_10', 'T3_6', 'Q4_8', 'D2_3']
def order(self):
return 3 # determines no. of Gauss points
@memcheck
def Integrate_Quadratic_X(self):
self.integrate('x*x', (1/3.)*self.volume*self.size[0]**2)
@memcheck
def Integrate_Quadratic_Y(self):
self.integrate('y*y', (1/3.)*self.volume*self.size[1]**2)
@memcheck
def Integrate_Quadratic_Z(self):
self.integrate('z*z', (1/3.)*self.volume*self.size[2]**2)
@memcheck
def Integrate_Cubic_X(self):
self.integrate('x*x*x', 0.25*self.volume*self.size[0]**3)
@memcheck
def Integrate_Cubic_Y(self):
self.integrate('y*y*y', 0.25*self.volume*self.size[1]**3)
@memcheck
def Integrate_Cubic_Z(self):
self.integrate('z*z*z', 0.25*self.volume*self.size[2]**3)
@memcheck
@unittest.expectedFailure
def Integrate_Quartic_X(self):
self.integrate('x**4', 0.20*self.volume*self.size[0]**4)
class OOF_Mesh_Interpolate_Quartic(OOF_Mesh_Interpolate):
def elementTypes(self):
return ['TET4_10', 'T3_6', 'Q4_8', 'D2_3']
def order(self):
return 4 # determines no. of Gauss points
@memcheck
def Integrate_Quadratic_X(self):
self.integrate('x*x', (1/3.)*self.volume*self.size[0]**2)
@memcheck
def Integrate_Quadratic_Y(self):
self.integrate('y*y', (1/3.)*self.volume*self.size[1]**2)
@memcheck
def Integrate_Quadratic_Z(self):
self.integrate('z*z', (1/3.)*self.volume*self.size[2]**2)
@memcheck
def Integrate_Cubic_X(self):
self.integrate('x*x*x', 0.25*self.volume*self.size[0]**3)
@memcheck
def Integrate_Cubic_Y(self):
self.integrate('y*y*y', 0.25*self.volume*self.size[1]**3)
@memcheck
def Integrate_Cubic_Z(self):
self.integrate('z*z*z', 0.25*self.volume*self.size[2]**3)
@memcheck
def Integrate_Quartic_X(self):
self.integrate('x**4', 0.20*self.volume*self.size[0]**4)
class OOF_Mesh_Integrate_Higher(OOF_Mesh_Interpolate):
def elementTypes(self):
return ['TET4_10', 'T3_6', 'Q4_8', 'D2_3']
@memcheck
def Integrate(self):
for order in (5,6,7,8):
fn = 'y**{o} + x**{o} + z**{o}'.format(o=order)
print >> sys.stderr, "Integrating", fn
self.integrate(fn,
(1./(order+1))*self.volume*(self.size[0]**order +
self.size[1]**order +
self.size[2]**order),
places=7, order=order)
# Integrate with more gauss points than necessary
for order in (5,6,7):
fn = 'y**{o} + x**{o} + z**{o}'.format(o=order)
print >> sys.stderr, "Integrating", fn
self.integrate(fn,
(1./(order+1))*self.volume*(self.size[0]**order +
self.size[1]**order +
self.size[2]**order),
places=7, order=order+1)
# Tests for interpolation and integration on lower dimensional finite
# elements. These are most easily obtained from the boundaries of the
# mesh. The tests therefore logically belong in the boundary tests or
# after them, not here. But are there other tests before the boundary
# tests that rely on the lower dimensional elements?
class OOF_Mesh_Triangle(OOF_Mesh_Interpolate):
def setUp(self):
OOF_Mesh_Interpolate.setUp(self)
faces = self.msh.getBoundary("Zmax").faceset
self.elements = [f.element for f in faces]
self.area = self.size[0]*self.size[1]
def tearDown(self):
del self.elements
OOF_Mesh_Interpolate.tearDown(self)
def integrate(self, fn, expected, places=7, order=None):
# fn is a string containing a function of x and y, which will
# be integrated over the Zmax face of the Mesh. expected is
# the expected value of the integral.
integral = 0.0
for e in self.elements:
gpts = e.integration_points(order or self.order())
for gpt in gpts:
pt = gpt.coord()
val = eval(fn, {'x':pt[0], 'y':pt[1]});
integral += val*gpt.weight()
self.assertAlmostEqual((expected-integral)/expected, 0.0, places=places)
def interpolate(self, fn):
# fn is a string containing a function of x and y.
self.define_field(fn)
for e in self.elements:
pts = [e.from_master(m) for m in self.mastercoords()]
expected = [eval(fn, {'x':pt[0], 'y':pt[1]}) for pt in pts]
o = e.outputFields(self.msh_obj, Temperature, self.mastercoords())
got = [oo.value() for oo in o]
for (gt, xpctd) in zip(got, expected):
self.assertAlmostEqual(gt, xpctd)
## Derivatives wrt real space coordinates are problematic for 2D
## elements in 3-space. See comments in ElementBase::Jdmasterdx
## in element.C.
# def derivative(self, fn, derivs):
# # fn is a string containing a function of x and y. derivs is
# # a 2-tuple of strings containing the x and y derivatives of fn.
# self.define_field(fn)
# for e in self.elements:
# pts = [e.from_master(m) for m in self. mastercoords()]
# for c in outputClones.SpaceComponent.values:
# if c == 'z':
# continue
# expr = derivs[c.index()]
# expected = [eval(expr, {'x':pt[0], 'y':pt[1]}) for pt in pts]
# o = e.outputFieldDerivs(self.msh_obj, Temperature, c,
# self.mastercoords())
# self.assertEqual(len(o), len(expected))
# got = [oo.value() for oo in o]
# for (gt, xpctd) in zip(got, expected):
# self.assertAlmostEqual(gt, xpctd)
class OOF_Mesh_Triangle_Linear(OOF_Mesh_Triangle):
def elementTypes(self):
return ['TET4_4', 'T3_3', 'Q4_4', 'D2_2']
def order(self):
return 1
# Points at which to test interpolation
def mastercoords(self):
return [mastercoord.masterCoord2D(0., 0.),
mastercoord.masterCoord2D(1., 0.),
mastercoord.masterCoord2D(0., 1.),
mastercoord.masterCoord2D(1/3., 1/3.),
mastercoord.masterCoord2D(0.1, 0.1),
mastercoord.masterCoord2D(0.3, 0.6)
]
@memcheck
def Interpolate_Linear(self):
self.interpolate("2*x - 3*y")
@memcheck
@unittest.expectedFailure
def Interpolate_Quadratic(self):
self.interpolate("2*x*x + 3*y*y")
@memcheck
def Integrate_Linear_X(self):
self.integrate("2*x", self.area*self.size[0])
@memcheck
@unittest.expectedFailure # polynomial order higher than gauss pt order
def Integrate_Quadratic_X(self):
self.integrate("3*x*x", self.area*self.size[0]**2)
@memcheck
def Integrate_Quadratic_X2(self):
self.integrate("3*x*x", self.area*self.size[0]**2, order=2)
@memcheck
def Integrate_Higher_X(self):
for order in range(0, 9):
fn = 'x**{o}'.format(o=order)
print >> sys.stderr, "Integrating", fn
self.integrate(fn, self.area*self.size[0]**order/(order+1),
order=order)
@memcheck
def Integrate_Linear_Y(self):
self.integrate("2*y", self.area*self.size[1])
@memcheck
@unittest.expectedFailure # polynomial order higher than gauss pt order
def Integrate_Quadratic_Y(self):
self.integrate("3*y*y", self.area*self.size[1]**2)
@memcheck
def Integrate_Quadratic_Y2(self):
self.integrate("3*y*y", self.area*self.size[1]**2, order=2)
@memcheck
def Integrate_Higher_Y(self):
for order in range(0, 9):
fn = 'y**{o}'.format(o=order)
print >> sys.stderr, "Integrating", fn
self.integrate(fn, self.area*self.size[1]**order/(order+1),
order=order)
# @memcheck
# def Derivative_Constant(self):
# self.derivative('-3.14', ('0.0', '0.0'))
# @memcheck
# def Derivative_Linear(self):
# self.derivative('x+3*y', ('1.0', '3.0'))
# @memcheck
# @unittest.expectedFailure
# def Derivative_Bilinear(self):
# self.derivative('x*y', ('y', 'x'))
# @memcheck
# @unittest.expectedFailure
# def Derivative_Quadratic(self):
# self.derivative('x**2 + y**2', ('2*x', '2*y'))
class OOF_Mesh_Triangle_Quadratic(OOF_Mesh_Triangle):
def elementTypes(self):
return ['TET4_10', 'T3_6', 'Q4_8', 'D2_3']
def order(self):
return 2 # determines no. of Gauss points
def mastercoords(self):
return [mastercoord.masterCoord2D(0., 0.),
mastercoord.masterCoord2D(1., 0.),
mastercoord.masterCoord2D(0., 1.),
mastercoord.masterCoord2D(0., 0.5),
mastercoord.masterCoord2D(0.5, 0.),
mastercoord.masterCoord2D(0.5, 0.5),
mastercoord.masterCoord2D(1/3., 1/3.),
mastercoord.masterCoord2D(0.1, 0.1),
mastercoord.masterCoord2D(0.3, 0.6)
]
@memcheck
def Interpolate_Linear(self):
self.interpolate("2*x - 3*y")
@memcheck
def Interpolate_Quadratic(self):
self.interpolate("2*x*x + 3*y*y")
# @memcheck
# def Derivative_Constant(self):
# self.derivative('-3.14', ('0.0', '0.0'))
# @memcheck
# def Derivative_Linear(self):
# self.derivative('x+3*y', ('1.0', '3.0'))
# @memcheck
# def Derivative_Bilinear(self):
# self.derivative('x*y', ('y', 'x'))
# @memcheck
# def Derivative_Quadratic(self):
# self.derivative('x**2 + y**2', ('2*x', '2*y'))
# @memcheck
# @unittest.expectedFailure
# def Derivative_Cubic(self):
# self.derivative("x**3 + y**3", ('3*x**2', '3*y**2'))
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
# There is a toolbox for mesh cross-section operations, but it doesn't
# have a special menu, it just makes calls to the menu items tested
# here.
class OOF_Mesh_CrossSection(OOF_Mesh_FieldEquation):
@memcheck
def New(self):
OOF.Mesh.Cross_Section.New(\
name="testcs", mesh="meshtest:skeleton:fe_test",
cross_section=StraightCrossSection(
start=Point(0.2,0.5), end=Point(12.2,4.5)))
self.assertEqual(len(self.msh.allCrossSectionNames()),1)
cs = self.msh.cross_sections['testcs']
self.assertEqual(cs.start, Point(0.2,0.5))
self.assertEqual(cs.end, Point(12.2,4.5))
@memcheck
def Remove(self):
OOF.Mesh.Cross_Section.New(mesh="meshtest:skeleton:fe_test",
name="testcs", cross_section=StraightCrossSection(
start=Point(0.2,0.5), end=Point(12.2,4.5)))
OOF.Mesh.Cross_Section.Remove(
mesh="meshtest:skeleton:fe_test", name="testcs")
self.assertEqual(len(self.msh.allCrossSectionNames()),0)
@memcheck
def Copy(self):
OOF.Mesh.Cross_Section.New(
mesh="meshtest:skeleton:fe_test",
name="testcs", cross_section=StraightCrossSection(
start=Point(0.2,0.5), end=Point(12.2,4.5)))
OOF.Mesh.Cross_Section.Copy(
current="meshtest:skeleton:fe_test", cross_section="testcs",
mesh="meshtest:skeleton:fe_test", name="testcs_copy")
self.assertEqual(len(self.msh.allCrossSectionNames()),2)
cs1 = self.msh.cross_sections['testcs']
cs2 = self.msh.cross_sections['testcs_copy']
self.assertNotEqual(id(cs1),id(cs2))
@memcheck
def Select(self):
# Since the most recent cs is autoselected, we need at least
# two before we can meaningfully test selection.
OOF.Mesh.Cross_Section.New(
mesh="meshtest:skeleton:fe_test",
name="testcs1", cross_section=StraightCrossSection(
start=Point(0.2,0.5), end=Point(12.2,4.5)))
OOF.Mesh.Cross_Section.New(mesh="meshtest:skeleton:fe_test",
name="testcs2", cross_section=StraightCrossSection(
start=Point(4.5,12.2), end=Point(0.2,4.5)))
self.assertEqual(self.msh.cross_sections.selectedName(),
"testcs2")
OOF.Mesh.Cross_Section.Select(
mesh="meshtest:skeleton:fe_test", cross_section="testcs1")
self.assertEqual(self.msh.cross_sections.selectedName(),
"testcs1")
@memcheck
def Deselect(self):
OOF.Mesh.Cross_Section.New(
mesh="meshtest:skeleton:fe_test",
name="testcs", cross_section=StraightCrossSection(
start=Point(0.2,0.5), end=Point(12.2,4.5)))
self.assertEqual(self.msh.cross_sections.selectedName(),"testcs")
OOF.Mesh.Cross_Section.Deselect(mesh="meshtest:skeleton:fe_test")
self.assertEqual(self.msh.cross_sections.selectedName(), None)
@memcheck
def Rename(self):
OOF.Mesh.Cross_Section.New(
mesh="meshtest:skeleton:fe_test",
name="testcs", cross_section=StraightCrossSection(
start=Point(0.2,0.5), end=Point(12.2,4.5)))
cs1 = self.msh.cross_sections['testcs']
OOF.Mesh.Cross_Section.Rename(mesh="meshtest:skeleton:fe_test",
cross_section="testcs", name="bob")
cs2 = self.msh.cross_sections['bob']
self.assertEqual(len(self.msh.allCrossSectionNames()), 1)
self.assertEqual(id(cs1),id(cs2))
@memcheck
def Edit(self):
OOF.Mesh.Cross_Section.New(
mesh="meshtest:skeleton:fe_test",
name="testcs", cross_section=StraightCrossSection(
start=Point(0.2,0.5), end=Point(12.2,4.5)))
OOF.Mesh.Cross_Section.Edit(
mesh="meshtest:skeleton:fe_test", name="testcs",
cross_section=StraightCrossSection(
start=Point(0.3,0.6), end=Point(12.1,4.2)))
self.assertEqual(len(self.msh.allCrossSectionNames()), 1)
cs = self.msh.cross_sections['testcs']
self.assertEqual(cs.start, Point(0.3,0.6))
self.assertEqual(cs.end, Point(12.1, 4.2))
class OOF_Mesh_BoundaryCondition(OOF_Mesh_FieldEquation):
def setUp(self):
# global profile
# from ooflib.engine import profile
# self.all_profiles = profile.AllProfiles
OOF_Mesh_FieldEquation.setUp(self)
# Activate temp and disp fields, set up a well-posed problem.
OOF.Mesh.Field.Define(mesh="meshtest:skeleton:fe_test",
field=Temperature)
OOF.Mesh.Field.Activate(mesh="meshtest:skeleton:fe_test",
field=Temperature)
OOF.Mesh.Field.Define(mesh="meshtest:skeleton:fe_test",
field=Displacement)
OOF.Mesh.Field.Activate(mesh="meshtest:skeleton:fe_test",
field=Displacement)
OOF.Mesh.Equation.Activate(mesh="meshtest:skeleton:fe_test",
equation=Heat_Eqn)
OOF.Mesh.Equation.Activate(mesh="meshtest:skeleton:fe_test",
equation=Force_Balance)
# Still fairly basic.
@memcheck
def NewBC(self):
from ooflib.engine import bdycondition
OOF.Mesh.Boundary_Conditions.New(
name="bc_test", mesh="meshtest:skeleton:fe_test",
condition=DirichletBC(
field=Temperature,
field_component="",
equation=Heat_Eqn,
eqn_component="",
profile=ContinuumProfileXTd(
function="x",timeDerivative='0',timeDerivative2='0'),
boundary="Ymax"))
self.assertEqual(len(self.msh.allBoundaryConds()), 1)
bc = self.msh.getBdyCondition("bc_test")
self.assertEqual(bc.__class__, bdycondition.DirichletBC)
@memcheck
def DeleteBC(self):
OOF.Mesh.Boundary_Conditions.New(
name="bc_test", mesh="meshtest:skeleton:fe_test",
condition=DirichletBC(
field=Temperature,
field_component="",
equation=Heat_Eqn,
eqn_component="",
profile=ContinuumProfileXTd(
function="x",timeDerivative='0',timeDerivative2='0'),
boundary="Ymax"))
OOF.Mesh.Boundary_Conditions.Delete(
mesh="meshtest:skeleton:fe_test", name="bc_test")
self.assertEqual(len(self.msh.allBoundaryConds()), 0)
@memcheck
def RenameBC(self):
from ooflib.engine import bdycondition
OOF.Mesh.Boundary_Conditions.New(
name="bc_test", mesh="meshtest:skeleton:fe_test",
condition=DirichletBC(
field=Temperature,
field_component="",
equation=Heat_Eqn,
eqn_component="",
profile=ContinuumProfileXTd(
function="x",timeDerivative='0',timeDerivative2='0'),
boundary="Ymax"))
OOF.Mesh.Boundary_Conditions.Rename(
mesh="meshtest:skeleton:fe_test", bc="bc_test", name="bob")
self.assertEqual(len(self.msh.allBoundaryConds()), 1)
bc = self.msh.getBdyCondition("bob")
self.assertEqual(bc.__class__, bdycondition.DirichletBC)
@memcheck
def CopyBC(self):
OOF.Mesh.Boundary_Conditions.New(
name="bc_test", mesh="meshtest:skeleton:fe_test",
condition=DirichletBC(
field=Temperature,
field_component="",
equation=Heat_Eqn,
eqn_component="",
profile=ContinuumProfileXTd(
function="x",timeDerivative='0',timeDerivative2='0'),
boundary="Ymax"))
OOF.Mesh.Boundary_Conditions.Copy(
current="meshtest:skeleton:fe_test",
mesh="meshtest:skeleton:fe_test", bc="bc_test",
name="bc_test_copy", boundary="Ymin")
self.assertEqual(len(self.msh.allBoundaryConds()),2)
bc1 = self.msh.getBdyCondition("bc_test")
bc2 = self.msh.getBdyCondition("bc_test_copy")
self.assertNotEqual(id(bc1),id(bc2))
@memcheck
def Copy_AllBC(self):
| |
id_or_person):
s = id_or_person
if type(id_or_person) is not str:
s = id_or_person.id
return s in self.people and self.people[s].death is None
def get_person (self, id1):
economy = self
if id1 in economy.people:
return economy.people[id1]
elif id1 in economy.tombs:
return economy.tombs[id1].person
return None
def die (self, persons):
economy = self
if isinstance(persons, base.Person):
persons = [persons]
for p in persons:
assert p.death is None
dt = Death()
dt.term = economy.term
p.death = dt
tomb = Tomb()
tomb.death_term = economy.term
tomb.person = p
economy.tombs[p.id] = tomb
for p in persons:
if p.dominator_position is None:
continue
p.get_dominator().resign()
for p in persons:
if p.id in economy.dominator_parameters:
economy.dominator_parameters[p.id].economy = None
del economy.dominator_parameters[p.id]
for p in persons:
spouse = None
if p.marriage is not None \
and (p.marriage.spouse == ''
or economy.is_living(p.marriage.spouse)):
spouse = p.marriage.spouse
if p.marriage is not None:
p.die_relation(p.marriage)
for a in p.adulteries:
p.die_relation(a)
# father mother は死んでも情報の更新はないが、child は欲し
# い子供の数に影響するため、更新が必要。
if p.father != '' and economy.is_living(p.father):
economy.people[p.father].die_child(p.id)
if p.mother != '' and economy.is_living(p.mother):
economy.people[p.mother].die_child(p.id)
fst_heir = None
if p.supporting:
if p.supported is not None \
and economy.is_living(p.supported):
p.die_supporting(p.supported)
elif fst_heir is None or p.death.inheritance_share is None:
p.die_supporting(None)
else:
p.die_supporting(fst_heir)
p.supporting = []
if p.supported:
p.die_supported()
p.supported = None
p.supported = fst_heir
class EconomyDM (Economy0):
def new_person (self, district_num, male_rate=0.5,
age_min=18, age_max=50):
economy = self
p = Person()
p.economy = economy
p.sex = 'M' if random.random() < male_rate else 'F'
p.district = district_num
p.id = economy.id_generator.generate(str(p.district) + p.sex)
economy.people[p.id] = p
p.age = random.uniform(age_min, age_max)
p.birth_term = economy.term - int(p.age * 12)
if ARGS.init_zero:
p.prop = 0
else:
p.prop = half_normal_rand(0, ARGS.init_prop_sigma)
x = random.random()
if x < ARGS.peasant_ratio:
if ARGS.no_land:
p.land = 0
else:
p.land = negative_binominal_rand(ARGS.land_r,
ARGS.land_theta) + 1
p.consumption = p.land * ARGS.prop_value_of_land * 0.025 \
+ p.prop * 0.05
p.ambition = random.random()
p.education = random.random()
p.adult_success = np.random.geometric(0.5) - 1
p.want_child_base = random.uniform(2, 12)
p.cum_donation = (p.prop + p.land * ARGS.prop_value_of_land) \
* random.random() * p.age
if p.age < 40:
p.fertility = math.sqrt(random.random())
else:
p.fertility = random.random()
if p.fertility < 0.1:
p.fertility = 0
return p
def new_dominator (self, position, person, adder=0):
economy = self
p = person
if p.id in economy.dominator_parameters:
d = economy.dominator_parameters[p.id]
adder = 0
else:
d = Dominator()
economy.dominator_parameters[p.id] = d
d.id = p.id
d.people_trust = random.random()
d.faith_realization = random.random()
d.disaster_prophecy = random.random()
d.disaster_strategy = random.random()
d.disaster_tactics = random.random()
d.combat_prophecy = random.random()
#d.combat_strategy = random.random()
d.combat_tactics = random.random()
while adder != 0:
sgn = 0
if adder > 0:
adder -= 1
sgn = +1
elif adder < 0:
adder += 1
sgn = -1
for n in ['people_trust',
'faith_realization',
'disaster_prophecy',
'disaster_strategy',
'disaster_tactics',
'combat_prophecy',
# 'combat_strategy',
'combat_tactics']:
u = sgn * random.random() * ARGS.dominator_adder
setattr(d, n, np_clip(getattr(d, n) + u, 0, 1))
d.economy = economy
d.district = p.district
d.position = position
p.dominator_position = position
if position == 'king':
economy.nation.king = d
elif position == 'governor':
economy.nation.districts[p.district].governor = d
elif position == 'vassal':
economy.nation.vassals.append(d)
elif position == 'cavalier':
economy.nation.districts[p.district].cavaliers.append(d)
return d
def delete_dominator (self, person):
economy = self
p = person
position = p.dominator_position
if position is None:
return
if position == 'king':
economy.nation.king = None
elif position == 'governor':
economy.nation.districts[p.district].governor = None
elif position == 'vassal':
economy.nation.vassals = [d for d in economy.nation.vassals
if d.id != p.id]
elif position == 'cavalier':
economy.nation.districts[p.district].cavaliers \
= [d for d in economy.nation.districts[p.district].cavaliers
if d.id != p.id]
p.dominator_position = None
def calc_dominator_work (self, dominator1, work_func):
economy = self
d = dominator1
nation = economy.nation
dist = nation.districts[d.district]
f = work_func
a_king = f(nation.king)
vab = [f(d) for d in nation.vassals]
vht = np.mean([d.soothed_hating_to_king() for d in nation.vassals])
a_vassals = (0.5 + 0.5 * (1 - vht)) \
* ((1/3) * max(vab) + (2/3) * np.mean(vab))
a_governor = (0.75 + 0.25 * (1 - dist.governor.soothed_hating_to_king())) \
* f(dist.governor)
a_cavalier = f(d)
r_king = 0.5 + 0.5 * (1 - d.soothed_hating_to_king())
r_vassals = 3
r_governor = 0.5 + 0.5 * (1 - d.soothed_hating_to_governor())
r_cavalier = 5
p = (r_king * a_king + r_vassals * a_vassals \
+ r_governor * a_governor + r_cavalier * a_cavalier) \
/ (r_king + r_vassals + r_governor + r_cavalier)
p *= 0.75 + 0.25 \
* (1 - max([d.soothed_hating_to_king(), d.soothed_hating_to_governor()]))
p *= dist.tmp_power
return p
def add_family_political_hating (self, people, max_adder):
economy = self
fa = set()
for p in people:
if p.supported is not None:
fa.add(p.supported)
else:
fa.add(p.id)
for pid in fa:
p = economy.people[pid]
for qid in [p.id] + p.supporting:
q = economy.people[qid]
a = random.uniform(0, max_adder)
q.political_hating = np_clip(q.political_hating + a, 0, 1)
def add_political_hating (self, people, max_adder):
economy = self
fa = set()
for p in people:
a = random.uniform(0, max_adder)
p.political_hating = np_clip(p.political_hating + a, 0, 1)
def injure (self, people, max_permanent=0.5, max_temporal=0.5,
permanent_injury_rate=None):
economy = self
if permanent_injury_rate is None:
permanent_injury_rate = ARGS.permanent_injury_rate
fa = set()
for p in people:
b = random.uniform(0, max_temporal)
p.tmp_injured = np_clip(p.tmp_injured + b, 0, 1)
if random.random() < permanent_injury_rate:
a = random.uniform(0, max_permanent)
p.injured = np_clip(p.injured + a, 0, 1)
position_rank_table = {
None: 0,
'cavalier': 1,
'vassal': 2,
'governor': 3,
'king': 4
}
def position_rank (self, pos):
return type(self).position_rank_table[pos]
class Economy (EconomyBT, EconomyDT, EconomyDM):
pass
class EconomyPlot0 (Frozen):
def __init__ (self):
#plt.style.use('bmh')
fig = plt.figure(figsize=(6, 4))
#plt.tight_layout()
self.ax1 = fig.add_subplot(2, 2, 1)
self.ax2 = fig.add_subplot(2, 2, 2)
self.ax3 = fig.add_subplot(2, 2, 3)
self.ax4 = fig.add_subplot(2, 2, 4)
self.options = {}
def plot (self, economy):
ax = self.ax1
ax.clear()
view = ARGS.view_1
if view is not None and view != 'none':
t, f = self.options[view]
ax.set_title('%s: %s' % (term_to_year_month(economy.term), t))
f(ax, economy)
ax = self.ax2
ax.clear()
view = ARGS.view_2
if view is not None and view != 'none':
t, f = self.options[view]
ax.set_title(t)
f(ax, economy)
ax = self.ax3
ax.clear()
view = ARGS.view_3
if view is not None and view != 'none':
t, f = self.options[view]
ax.set_xlabel(t)
f(ax, economy)
ax = self.ax4
ax.clear()
view = ARGS.view_4
if view is not None and view != 'none':
t, f = self.options[view]
ax.set_xlabel(t)
f(ax, economy)
class EconomyPlotEC (EconomyPlot0):
def __init__ (self):
super().__init__()
self.options.update({
'asset': ('Asset', self.view_asset),
'prop': ('Prop', self.view_prop),
'land': ('Land', self.view_land),
'land-vs-prop': ('Land vs Prop', self.view_land_vs_prop),
})
def view_asset (self, ax, economy):
ax.hist(list(map(lambda x: x.asset_value(),
economy.people.values())), bins=ARGS.bins)
def view_prop (self, ax, economy):
ax.hist(list(map(lambda x: x.prop,
economy.people.values())), bins=ARGS.bins)
def view_land (self, ax, economy):
ax.hist(list(map(lambda x: x.land,
economy.people.values())), bins=ARGS.bins)
def view_land_vs_prop (self, ax, economy):
ax.scatter(list(map(lambda x: x.land, economy.people.values())),
list(map(lambda x: x.prop, economy.people.values())),
c="pink", alpha=0.5)
class EconomyPlotBT (EconomyPlot0):
def __init__ (self):
super().__init__()
self.options.update({
'population': ('Population', self.view_population),
'children': ('Children', self.view_children),
'children_wanting': ('Ch Want', self.view_children_wanting),
'male-fertility': ('M Fertility', self.view_male_fertility),
'female-fertility': ('F Fertility', self.view_female_fertility)
})
def view_population (self, ax, economy):
ax.hist([x.age for x in economy.people.values() if x.death is None],
bins=ARGS.bins)
def view_children (self, ax, economy):
x = []
y = []
for p in economy.people.values():
if p.age < 12 or p.death is not None:
continue
x.append(p.age)
y.append(len(p.children))
ax.scatter(x, y, c="pink", alpha=0.5)
def view_children_wanting (self, ax, economy):
x = []
y = []
for p in economy.people.values():
if p.age < 12 or p.death is not None:
continue
x.append(p.age)
y.append(p.children_wanting())
ax.hist(y, bins=ARGS.bins)
#ax.scatter(x, y, c="pink", alpha=0.5)
def view_male_fertility (self, ax, economy):
l = [x.fertility for x in economy.people.values()
if x.sex == 'M' and x.death is None]
n0 = len([True for x in l if x == 0])
l2 = [x for x in l if x != 0]
ax.hist(l2, bins=ARGS.bins)
print("Fertility 0:", n0, "/", len(l), "Other Mean:", np.mean(l2))
def view_female_fertility (self, ax, economy):
l = [x.fertility for x in economy.people.values()
if x.sex == 'F' and x.death is None]
n0 = len([True for x in l if x == 0])
l2 = [x for x in l if x != 0]
ax.hist(l2, bins=ARGS.bins)
print("Fertility 0:", n0, "/", len(l), "Other Mean:", np.mean(l2))
class EconomyPlotAD (EconomyPlot0):
def __init__ (self):
super().__init__()
self.options.update({
'adulteries': ('Adulteries', self.view_adulteries),
'adultery-separability':
('Ad Separability', self.view_adultery_separability),
'adultery-age-vs-years':
('Adultery age vs years', self.view_adultery_age_vs_years)
})
def view_adultery_age_vs_years (self, ax, economy):
m1 = []
m2 = []
for | |
<gh_stars>0
'''The SimpleSystemBase is not a true concurrency environment, but it
runs in the context of the current thread and simply queues actor
sends to call each actor's handler in sequence. This sytem can be
used for simple actor environments where no parallelism is achieved,
but any actor doing long-running or suspend (e.g. i/o) activities
will pause or halt the entire system.
* Synchronous message delivery
* Local system only
* All execution is in the current thread (actor system only runs
when .tell() or .ask() is called).
* createActor() always creates the actor instance immediately before returning.
If used in a multi-threaded application, only the main thread will be
used for Actor functionality (and thus requires the main thread to
call ask(), tell(), or listen() (aka ATL) to enable the Actors to run.
All threads other than the main thread should use a .private() context
to call ATL; this simple system base is not re-entrant and calling ATL
on the main actor system will cause indeterminate results and
corrupted data. When a non-main thread calls a context's ATL, that
thread will block until the main thread makes an ATL call on the main
ActorSystem. A completion of a non-main thread's ATL will run in that
thread, but the main thread will not exit the primary ATL completes.
'''
import logging, string, types, functools
from thespian.actors import *
from thespian.system.utilis import (actualActorClass, partition,
withPossibleInitArgs)
from thespian.system.timing import (timePeriodSeconds, toTimeDeltaOrNone,
ExpirationTimer, currentTime, unexpired)
try:
from logging.config import dictConfig
except ImportError:
# Old python that doesn't contain this...
from thespian.system.dictconfig import dictConfig
from thespian.system import isInternalActorSystemMessage
from thespian.system.messages.status import *
from thespian.system.sourceLoader import loadModuleFromHashSource, SourceHashFinder
import time
import traceback
import threading
try:
import queue as Queue
except ImportError:
import Queue
import weakref
class ActorRef:
"Used internally to store the actual actor instance and associated information."
def __init__(self, actorSystem, parentAddr, addr, inst, mySourceHash=None):
self._system = actorSystem
self._oldm = parentAddr
self._addr = addr
self._inst = inst # briefly None until set
self._mySourceHash = mySourceHash
self._yung = [] # actorAddress of children
# The number of current children is len(self._yung); the
# childCounter keeps track of the total number of children
# this Actor has had in its ENTIRE lifetime (i.e. it is not
# decremented when children are removed from self._yung. The
# childCounter is used when generating the unique ActorAddress
# for a new child.
self._childCounter = 0
@property
def parent(self): return self._oldm
@property
def address(self): return self._addr
@property
def instance(self): return self._inst
@instance.setter
def instance(self, newInstance): self._inst = newInstance
@property
def childCount(self): return self._childCounter
def addChild(self, childAddr):
self._yung.append(childAddr)
self._childCounter = self._childCounter + 1
def removeChild(self, childAddr):
self._yung = [c for c in self._yung if c != childAddr]
def shutdown(self):
for child in self._yung:
self._system._systemBase.actor_send(
self._system._systemBase.actorRegistry[self._system.systemAddress.actorAddressString].address,
child, ActorExitRequest())
# Functionality vectoring for the Actor this represents.
def createActor(self, actorClass, targetActorRequirements, globalName, sourceHash=None):
return self._system._systemBase.newActor(self._addr, actorClass, self._system,
targetActorRequirements, globalName,
sourceHash or self._mySourceHash)
def actor_send(self, toActorAddr, msg):
self._system._systemBase.actor_send(self._addr, toActorAddr, msg)
def wakeupAfter(self, timePeriod, payload):
self._system._systemBase.wakeupAfter(self._addr, timePeriod, payload)
def handleDeadLetters(self, address, enable):
self._system._handleDeadLetters(address, enable)
def registerSourceAuthority(self, address):
self._system._systemBase.registerSourceAuthority(address)
def notifyOnSourceAvailability(self, address, enable):
self._system._systemBase.registerSourceNotifications(address, enable)
def updateCapability(self, capabilityName, capabilityValue):
self._system.updateCapability(capabilityName, capabilityValue)
def loadActorSource(self, fname):
return self._system._systemBase.loadActorSource(fname)
def unloadActorSource(self, sourceHash):
self._system._systemBase.unloadActorSource(sourceHash)
def notifyOnSystemRegistrationChanges(self, address, enable):
pass # ignored: simple systems don't have registration
def logger(self, name=None):
return logging.LoggerAdapter(logging.getLogger(name),
{'actorAddress': self._addr})
# ----------------------------------------------------------------------
_nameValid = string.ascii_letters # what characters are valid in an ActorAddress
_nameValidLen = len(_nameValid) # precompute the size for convenience
def _namegen(v):
if v == 0: return 'a'
x,y = divmod(v, _nameValidLen)
return (_namegen(x) if x else '') + _nameValid[y]
def _newAddress(prefix, childCount):
return ActorAddress(prefix + '~' + _namegen(childCount))
def _newChildAddress(parentRef):
"Returns a new candidate ActorAddress for a child Actor about to be created."
# Note that this address is not fixed/reserved until the child
# is actually created (thereby incrementing the parentRef
# childCount), so calling this multiple times without creating
# the child may return the same value."
return _newAddress(parentRef.address.actorAddressString, parentRef.childCount)
# ----------------------------------------------------------------------
class PendingSend:
"used internally for marshalling pending send operations"
def __init__(self, sender, msg, toActor):
self.sender = sender
self.toActor = toActor
self.msg = msg
self.attempts = 0
def __str__(self): return 'PendingSend(#%d %s -> %s: %s)'%(self.attempts, self.sender, self.toActor, self.msg)
class BadActor(Actor): # useable as a "null" Actor which does nothing.
name = 'BadActor'
def receiveMessage(self, msg, sender):
logging.getLogger('Thespian').debug('BadActor discarding message')
pass # Throws away all messages
class External(Actor):
"""Proxy for a requester outside the system. Messages sent to this
Actor will be queued and delivered as the result of
ActorSystem().ask() and ActorSystem().listen() calls."""
def receiveMessage(self, msg, sender):
if not hasattr(self, 'responses'): self.responses = []
self.responses.append(msg)
def actor_base_receive(actorInst, msg, sender):
logging.getLogger('Thespian').debug('Actor "%s" got message "%s" from "%s"',
actorInst, msg, sender)
actorInst.receiveMessage(msg, sender)
class actorLogFilter(logging.Filter):
def filter(self, logrecord): return 'actorAddress' in logrecord.__dict__
class notActorLogFilter(logging.Filter):
def filter(self, logrecord): return 'actorAddress' not in logrecord.__dict__
import sys
defaultLoggingConfig = {
'version': 1,
'formatters': {
'defaultFmt': {
'format': '%(asctime)s %(levelname)-7s => %(message)s [%(filename)s:%(lineno)s]',
},
'actorFmt': {
'format': '%(asctime)s %(levelname)-7s %(actorAddress)s => %(message)s [%(filename)s:%(lineno)s]',
},
},
'filters': {
'isActorLog': {'()': actorLogFilter},
'notActorLog': {'()': notActorLogFilter},
},
'handlers': {
'actorLogHandler': { 'class': 'logging.StreamHandler',
'level': 'INFO',
'stream': sys.stderr,
'formatter': 'actorFmt',
'filters': [ 'isActorLog' ],
},
'regLogHandler': { 'class': 'logging.StreamHandler',
'level': 'INFO',
'stream': sys.stderr,
'formatter': 'defaultFmt',
'filters': [ 'notActorLog' ],
},
},
'root': { 'handlers': ['actorLogHandler',
'regLogHandler',
],
},
'disable_existing_loggers': False,
}
class WakeupManager(object):
def __init__(self):
# _wakeUps is a list of (targetAddress, ExpirationTimer, payload)
self._wakeUps = []
def _pop_expired_wakeups(self, ct):
exp, self._wakeUps = partition(lambda E: E[1].view(ct).expired(), self._wakeUps)
return exp
def _next_wakeup(self):
"Returns the ExpirationTimer for the next wakeup to occur"
return min([T for A,T,P in self._wakeUps]) if self._wakeUps else None
def _add_wakeup(self, from_actor, time_period, payload):
self._wakeUps.append(
(from_actor, ExpirationTimer(time_period), payload))
def add_wakeups_to_status(self, statusmsg):
# we only pass the affected actor and its associated ExpirationTimer
# but we exclude the payload as it's irrelevant for status info.
statusmsg.addWakeups([(T[0], T[1]) for T in self._wakeUps])
return statusmsg
class ActorSystemBase(WakeupManager):
def __init__(self, system, logDefs = None):
super(ActorSystemBase, self).__init__()
self.system = system
self._pendingSends = []
if logDefs is not False: dictConfig(logDefs or defaultLoggingConfig)
self._primaryActors = []
self._primaryCount = 0
self._globalNames = {}
self.procLimit = 0
self._sources = {} # key = sourcehash, value = encrypted zipfile data
self._sourceAuthority = None # ActorAddress of Source Authority
self._sourceNotifications = [] # list of actor addresses to notify of loads
asys = self._newRefAndActor(system, system.systemAddress,
system.systemAddress,
External)
extreq = self._newRefAndActor(system, system.systemAddress,
ActorAddress('System:ExternalRequester'),
External)
badActor = self._newRefAndActor(system, system.systemAddress,
ActorAddress('System:BadActor'), BadActor)
self.actorRegistry = { # key=ActorAddress string, value=ActorRef
system.systemAddress.actorAddressString: asys,
'System:ExternalRequester': extreq,
'System:BadActor': badActor,
}
self._internalAddresses = list(self.actorRegistry.keys())
self._private_lock = threading.RLock()
self._private_count = 0
system.capabilities['Python Version'] = tuple(sys.version_info)
system.capabilities['Thespian Generation'] = ThespianGeneration
system.capabilities['Thespian Version'] = str(int(time.time()*1000))
system.capabilities['Thespian ActorSystem Name'] = 'simpleSystem'
system.capabilities['Thespian ActorSystem Version'] = 2
system.capabilities['Thespian Watch Supported'] = False
system.capabilities['AllowRemoteActorSources'] = 'No'
def shutdown(self):
while self._sources:
self.unloadActorSource(list(self._sources.keys())[0])
def external_clone(self):
with self._private_lock:
self._private_count += 1
cnt = self._private_count
return ActorSystemPrivate(self, cnt)
def _runSends(self, timeout=None, stop_on_available=False):
numsends = 0
endtime = ExpirationTimer(toTimeDeltaOrNone(timeout))
for endt in unexpired(endtime):
while self._pendingSends:
numsends += 1
if self.procLimit and numsends > self.procLimit:
raise RuntimeError('Too many sends')
self._realizeWakeups()
with self._private_lock:
try:
nextmsg = self._pendingSends.pop(0)
except IndexError:
pass
else:
self._runSingleSend(nextmsg)
if stop_on_available and \
any([not isInternalActorSystemMessage(M)
for M in getattr(stop_on_available.instance,
'responses', [])]):
return
if endt.remaining(forever=-1) == -1:
return
next_wakeup = self._next_wakeup()
if next_wakeup is None or next_wakeup > endt:
return
time.sleep(max(0, timePeriodSeconds(next_wakeup.view().remaining())))
self._realizeWakeups()
def _runSingleSend(self, ps):
if ps.attempts > 4:
# discard message if PoisonMessage deliveries are also
# failing
return
elif ps.attempts > 2:
if isinstance(ps.msg, PoisonMessage):
return # no recursion on Poison
rcvr, sndr, msg = ps.sender, ps.toActor, \
PoisonMessage(ps.msg,
getattr(ps, 'fail_details', None))
else:
rcvr, sndr, msg = ps.toActor, ps.sender, ps.msg
tgt = self.actorRegistry.get(rcvr.actorAddressString, None)
if not tgt:
tgt = self.actorRegistry.get('DeadLetterBox', None)
msg = DeadEnvelope(rcvr.actorAddressString, msg)
if tgt:
if rcvr == self.system.systemAddress and isinstance(msg, ValidatedSource):
self._loadValidatedActorSource(msg.sourceHash, msg.sourceZip)
elif tgt.instance:
if isinstance(msg, Thespian_StatusReq):
self._generateStatusResponse(msg, tgt, sndr)
else:
killActor = isinstance(ps.msg, ActorExitRequest)
self._callActorWithMessage(tgt, ps, msg, sndr)
if killActor and tgt not in [self.actorRegistry[key]
for key in self._internalAddresses]:
self._killActor(tgt, ps)
else:
# This is a Dead Actor and there is no
# DeadLetterHandler. Just discard the message
pass
else:
# Target Actor no longer exists. Handle internal
# messages | |
import logging
import re
import socket
import threading
import time
from collections import deque
import six
from requests import ConnectionError
from requests.exceptions import ChunkedEncodingError
from ..errors import Etcd3WatchCanceled
from ..models import EventEventType
from ..utils import check_param
from ..utils import get_ident
from ..utils import log
EventType = EventEventType
class OnceTimeout(IOError):
"""
Timeout caused by watch once
"""
pass
class KeyValue(object): # pragma: no cover
"""
Model of the key-value of the event
"""
def __init__(self, data):
self._data = data
self.key = data.get('key')
self.create_revision = data.get('create_revision')
self.mod_revision = data.get('mod_revision')
self.value = data.get('value')
self.lease = data.get('lease')
def get(self, key, default=None):
return self._data.get(key, default)
def __getitem__(self, item):
return self._data.get(item)
def __iter__(self):
return iter(self._data)
def __contains__(self, item):
return item in self._data
def __repr__(self):
return "<KeyValue of '%s'>" % self.key
class Event(KeyValue):
"""
Watch event
"""
def __init__(self, data, header=None):
"""
:param data: dict data of a etcdserverpbWatchResponse.events[<mvccpbEvent>]
:param header: the header of etcdserverpbWatchResponse
"""
super(Event, self).__init__(data.kv._data)
self.header = header
self.type = data.type or EventType.PUT # default is PUT
self._data['type'] = self.type
self.prev_kv = None
if 'prev_kv' in data:
self.prev_kv = KeyValue(data.prev_kv._data)
self._data['prev_kv'] = self.prev_kv
def __repr__(self):
return "<WatchEvent %s '%s'>" % (self.type.value, self.key)
class Watcher(object):
@check_param(at_least_one_of=['key', 'all'], at_most_one_of=['range_end', 'prefix', 'all'])
def __init__(self, client, max_retries=-1, key=None, range_end=None, start_revision=None, progress_notify=None,
prev_kv=None, prefix=None, all=None, no_put=False, no_delete=False):
"""
Initialize a watcher
:type client: BaseClient
:param client: client instance of etcd3
:type max_retries: int
:param max_retries: max retries when watch failed due to network problem, -1 means no limit [default: -1]
:type key: str or bytes
:param key: key is the key to register for watching.
:type range_end: str or bytes
:param range_end: range_end is the end of the range [key, range_end) to watch. If range_end is not given,
only the key argument is watched. If range_end is equal to '\0', all keys greater than
or equal to the key argument are watched.
If the range_end is one bit larger than the given key,
then all keys with the prefix (the given key) will be watched.
:type start_revision: int
:param start_revision: start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
:type progress_notify: bool
:param progress_notify: progress_notify is set so that the etcd server will periodically send a WatchResponse with
no events to the new watcher if there are no recent events. It is useful when clients
wish to recover a disconnected watcher starting from a recent known revision.
The etcd server may decide how often it will send notifications based on current load.
:type prev_kv: bool
:param prev_kv: If prev_kv is set, created watcher gets the previous KV before the event happens.
If the previous KV is already compacted, nothing will be returned.
:type prefix: bool
:param prefix: if the key is a prefix [default: False]
:type all: bool
:param all: all the keys [default: False]
:type no_put: bool
:param no_put: filter out the put events at server side before it sends back to the watcher. [default: False]
:type no_delete: bool
:param no_delete: filter out the delete events at server side before it sends back to the watcher. [default: False]
"""
self.client = client
self.revision = None
self.watch_id = None
self.retries = 0
self.errors = deque(maxlen=20)
if max_retries == -1:
max_retries = 9223372036854775807 # maxint
self.max_retries = max_retries
self.callbacks = []
self.callbacks_lock = threading.Lock()
self.watching = False
self.timeout = None # only meaningful for watch_once
self._thread = None
self._resp = None
self._once = False
self.key = key
self.range_end = range_end
self.start_revision = start_revision
self.progress_notify = progress_notify
self.prev_kv = prev_kv
self.prefix = prefix
self.all = all
self.no_put = no_put
self.no_delete = no_delete
def set_default_timeout(self, timeout):
"""
Set the default timeout of watch request
:type timeout: int
:param timeout: timeout in seconds
"""
self.timeout = timeout
def clear_revision(self):
"""
Clear the start_revision that stored in watcher
"""
self.start_revision = None
self.revision = None
def clear_callbacks(self):
"""
Remove all callbacks
"""
with self.callbacks_lock:
self.callbacks = []
def request_create(self):
"""
Start a watch request
"""
if self.revision is not None: # continue last watch
self.start_revision = self.revision + 1
return self.client.watch_create(
key=self.key, range_end=self.range_end, start_revision=self.start_revision,
progress_notify=self.progress_notify, prev_kv=self.prev_kv,
prefix=self.prefix, all=self.all, no_put=self.no_put, no_delete=self.no_delete,
timeout=self.timeout
)
def request_cancel(self): # pragma: no cover
"""
Cancel the watcher [Not Implemented because of etcd3 returns no watch_id]
"""
# once really implemented, the error handling of Etcd3WatchCanceled when manually cancel should be considered
if self.watch_id:
# return self.client.watch_cancel(watch_id=self.watch_id)
pass
@staticmethod
def get_filter(filter):
"""
Get the event filter function
:type filter: callable or regex string or EventType or None
:param filter: will generate a filter function from this param
:return: callable
"""
if callable(filter):
filter_func = filter
elif isinstance(filter, (six.string_types, bytes)):
regex = re.compile(filter)
def py2_filter_func(e):
key = e.key
return regex.match(key)
def py3_filter_func(e):
try:
key = six.text_type(e.key, encoding='utf-8')
except Exception:
return
return regex.match(key)
filter_func = py3_filter_func if six.PY3 else py2_filter_func
elif filter is None:
filter_func = lambda e: True
elif isinstance(filter, EventType):
filter_func = lambda e: e.type == filter
else:
raise TypeError('expect filter to be one of string, EventType, callable got %s' % type(filter))
return filter_func
def onEvent(self, filter_or_cb, cb=None):
"""
Add a callback to a event that matches the filter
If only one param is given, which is filter_or_cb, it will be treated as the callback.
If any event comes, it will be called.
:type filter_or_cb: callable or regex string or EventType
:param filter_or_cb: filter or callback function
:param cb: the callback function
"""
if cb:
filter = filter_or_cb
else:
filter = None
cb = filter_or_cb
if not callable(cb):
raise TypeError('callback should be a callable')
filter_func = self.get_filter(filter)
with self.callbacks_lock:
self.callbacks.append((filter_func, filter, cb))
@check_param(at_least_one_of=['filter', 'cb'])
def unEvent(self, filter=None, cb=None): # noqa # ignore redefinition of filter
"""
remove a callback or filter event that's been previously added via onEvent()
If both parameters are given they are ANDd together; to OR the, make two calls.
:type filter: callable or regex string or EventType
:param filter: the callable filter or regex string or EventType the event to be removed was registerd with
:param cb: the callback funtion the event to be removed was registerd with
"""
with self.callbacks_lock:
for i in reversed(range(len(self.callbacks))):
efilter, eraw_filter, ecb = self.callbacks[i]
if cb is not None and ecb != cb:
continue
if filter is not None and filter not in (efilter, eraw_filter):
continue
del self.callbacks[i]
def dispatch_event(self, event):
"""
Find the callbacks, if callback's filter fits this event, call the callback
:param event: Event
"""
log.debug("dispatching event '%s'" % event)
with self.callbacks_lock:
callbacks = list( cb for filtr, _, cb in self.callbacks if filtr(event) )
for cb in callbacks:
cb(event)
def _ensure_callbacks(self):
if not self.callbacks:
raise TypeError("haven't watch on any event yet, use onEvent to watch a event")
def _ensure_not_watching(self):
if self.watching is True:
raise RuntimeError("already watching")
if self._thread and self._thread.is_alive() and self._thread.ident != get_ident():
raise RuntimeError("watch thread seems running")
def _kill_response_stream(self):
if not self._resp or (self._resp and self._resp.raw.closed):
return
try:
log.debug("closing response stream")
self.request_cancel()
s = socket.fromfd(self._resp.raw._fp.fileno(), socket.AF_INET, socket.SOCK_STREAM)
s.shutdown(socket.SHUT_RDWR)
s.close()
self._resp.raw.close()
self._resp.close()
self._resp.connection.close()
except Exception:
pass
def run(self):
"""
Run the watcher and handel events by callbacks
"""
self._ensure_callbacks()
self._ensure_not_watching()
self.errors.clear()
try:
with self:
for event in self:
self.dispatch_event(event)
finally:
self._kill_response_stream()
self.watching = False
def stop(self):
"""
Stop watching, close the watch stream and exit the daemon thread
"""
log.debug("stop watching")
self.watching = False
self._kill_response_stream()
if self._thread and self._thread.is_alive() and self._thread.ident != get_ident():
self._thread.join()
cancel = stop
def runDaemon(self):
"""
Run Watcher in a daemon thread
"""
self._ensure_callbacks()
self._ensure_not_watching()
t = self._thread = threading.Thread(target=self.run)
t.setDaemon(True)
t.start()
def watch_once(self, filter=None, timeout=None):
"""
watch the filtered event, once have event, return it
if timed out, return None
"""
filter = self.get_filter(filter)
old_timeout = self.timeout
self.timeout = timeout
try:
self._once = True
with self:
for event in self:
if filter(event):
return event
except OnceTimeout:
return
finally:
self.stop()
self._once = False
self.timeout = old_timeout
def __enter__(self):
self._ensure_not_watching()
self._resp = self.request_create()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def __del__(self):
self.stop()
def __iter__(self):
self.errors.clear()
retries = 0
while True:
try:
self.watching = True
| |
"""
Platformer Game
python -m arcade.examples.platform_tutorial.11_animate_character
"""
import math
import os
import arcade
# Constants
SCREEN_WIDTH = 1000
SCREEN_HEIGHT = 650
SCREEN_TITLE = "Platformer"
# Constants used to scale our sprites from their original size
TILE_SCALING = 0.5
CHARACTER_SCALING = TILE_SCALING * 2
COIN_SCALING = TILE_SCALING
SPRITE_PIXEL_SIZE = 128
GRID_PIXEL_SIZE = SPRITE_PIXEL_SIZE * TILE_SCALING
# Movement speed of player, in pixels per frame
PLAYER_MOVEMENT_SPEED = 7
GRAVITY = 1.5
PLAYER_JUMP_SPEED = 30
# How many pixels to keep as a minimum margin between the character
# and the edge of the screen.
LEFT_VIEWPORT_MARGIN = 200
RIGHT_VIEWPORT_MARGIN = 200
BOTTOM_VIEWPORT_MARGIN = 150
TOP_VIEWPORT_MARGIN = 100
PLAYER_START_X = 2
PLAYER_START_Y = 1
# Constants used to track if the player is facing left or right
RIGHT_FACING = 0
LEFT_FACING = 1
LAYER_NAME_MOVING_PLATFORMS = "Moving Platforms"
LAYER_NAME_PLATFORMS = "Platforms"
LAYER_NAME_COINS = "Coins"
LAYER_NAME_BACKGROUND = "Background"
LAYER_NAME_LADDERS = "Ladders"
LAYER_NAME_PLAYER = "Player"
LAYER_NAME_ENEMIES = "Enemies"
def load_texture_pair(filename):
"""
Load a texture pair, with the second being a mirror image.
"""
return [
arcade.load_texture(filename),
arcade.load_texture(filename, flipped_horizontally=True),
]
class Entity(arcade.Sprite):
def __init__(self, name_folder, name_file):
super().__init__()
# Default to facing right
self.facing_direction = RIGHT_FACING
# Used for image sequences
self.cur_texture = 0
self.scale = CHARACTER_SCALING
main_path = f":resources:images/animated_characters/{name_folder}/{name_file}"
self.idle_texture_pair = load_texture_pair(f"{main_path}_idle.png")
self.jump_texture_pair = load_texture_pair(f"{main_path}_jump.png")
self.fall_texture_pair = load_texture_pair(f"{main_path}_fall.png")
# Load textures for walking
self.walk_textures = []
for i in range(8):
texture = load_texture_pair(f"{main_path}_walk{i}.png")
self.walk_textures.append(texture)
# Load textures for climbing
self.climbing_textures = []
texture = arcade.load_texture(f"{main_path}_climb0.png")
self.climbing_textures.append(texture)
texture = arcade.load_texture(f"{main_path}_climb1.png")
self.climbing_textures.append(texture)
# Set the initial texture
self.texture = self.idle_texture_pair[0]
# Hit box will be set based on the first image used. If you want to specify
# a different hit box, you can do it like the code below.
# self.set_hit_box([[-22, -64], [22, -64], [22, 28], [-22, 28]])
self.set_hit_box(self.texture.hit_box_points)
class Enemy(Entity):
def __init__(self, name_folder, name_file):
# Setup parent class
super().__init__(name_folder, name_file)
self.should_update_walk = 0
def update_animation(self, delta_time: float = 1 / 60):
# Figure out if we need to flip face left or right
if self.change_x < 0 and self.facing_direction == RIGHT_FACING:
self.facing_direction = LEFT_FACING
elif self.change_x > 0 and self.facing_direction == LEFT_FACING:
self.facing_direction = RIGHT_FACING
# Idle animation
if self.change_x == 0:
self.texture = self.idle_texture_pair[self.facing_direction]
return
# Walking animation
if self.should_update_walk == 3:
self.cur_texture += 1
if self.cur_texture > 7:
self.cur_texture = 0
self.texture = self.walk_textures[self.cur_texture][self.facing_direction]
self.should_update_walk = 0
return
self.should_update_walk += 1
class RobotEnemy(Enemy):
def __init__(self):
# Set up parent class
super().__init__("robot", "robot")
class ZombieEnemy(Enemy):
def __init__(self):
# Set up parent class
super().__init__("zombie", "zombie")
class PlayerCharacter(Entity):
"""Player Sprite"""
def __init__(self):
# Set up parent class
super().__init__("male_person", "malePerson")
# Track our state
self.jumping = False
self.climbing = False
self.is_on_ladder = False
def update_animation(self, delta_time: float = 1 / 60):
# Figure out if we need to flip face left or right
if self.change_x < 0 and self.facing_direction == RIGHT_FACING:
self.facing_direction = LEFT_FACING
elif self.change_x > 0 and self.facing_direction == LEFT_FACING:
self.facing_direction = RIGHT_FACING
# Climbing animation
if self.is_on_ladder:
self.climbing = True
if not self.is_on_ladder and self.climbing:
self.climbing = False
if self.climbing and abs(self.change_y) > 1:
self.cur_texture += 1
if self.cur_texture > 7:
self.cur_texture = 0
if self.climbing:
self.texture = self.climbing_textures[self.cur_texture // 4]
return
# Jumping animation
if self.change_y > 0 and not self.is_on_ladder:
self.texture = self.jump_texture_pair[self.facing_direction]
return
elif self.change_y < 0 and not self.is_on_ladder:
self.texture = self.fall_texture_pair[self.facing_direction]
return
# Idle animation
if self.change_x == 0:
self.texture = self.idle_texture_pair[self.facing_direction]
return
# Walking animation
self.cur_texture += 1
if self.cur_texture > 7:
self.cur_texture = 0
self.texture = self.walk_textures[self.cur_texture][self.facing_direction]
class MyGame(arcade.Window):
"""
Main application class.
"""
def __init__(self):
"""
Initializer for the game
"""
# Call the parent class and set up the window
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
# Set the path to start with this program
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
# Track the current state of what key is pressed
self.left_pressed = False
self.right_pressed = False
self.up_pressed = False
self.down_pressed = False
self.jump_needs_reset = False
# Our TileMap Object
self.tile_map = None
# Our Scene Object
self.scene = None
# Separate variable that holds the player sprite
self.player_sprite = None
# Our 'physics' engine
self.physics_engine = None
# A Camera that can be used for scrolling the screen
self.camera = None
# A Camera that can be used to draw GUI elements
self.gui_camera = None
self.end_of_map = 0
# Keep track of the score
self.score = 0
# Load sounds
self.collect_coin_sound = arcade.load_sound(":resources:sounds/coin1.wav")
self.jump_sound = arcade.load_sound(":resources:sounds/jump1.wav")
self.game_over = arcade.load_sound(":resources:sounds/gameover1.wav")
def setup(self):
"""Set up the game here. Call this function to restart the game."""
# Setup the Cameras
self.camera = arcade.Camera(self.width, self.height)
self.gui_camera = arcade.Camera(self.width, self.height)
# Map name
map_name = f":resources:tiled_maps/map_with_ladders.json"
# Layer Specific Options for the Tilemap
layer_options = {
LAYER_NAME_PLATFORMS: {
"use_spatial_hash": True,
},
LAYER_NAME_MOVING_PLATFORMS: {
"use_spatial_hash": True,
},
LAYER_NAME_LADDERS: {
"use_spatial_hash": True,
},
LAYER_NAME_COINS: {
"use_spatial_hash": True,
},
}
# Load in TileMap
self.tile_map = arcade.load_tilemap(map_name, TILE_SCALING, layer_options)
# Initiate New Scene with our TileMap, this will automatically add all layers
# from the map as SpriteLists in the scene in the proper order.
self.scene = arcade.Scene.from_tilemap(self.tile_map)
# Keep track of the score
self.score = 0
# Set up the player, specifically placing it at these coordinates.
self.player_sprite = PlayerCharacter()
self.player_sprite.center_x = (
self.tile_map.tiled_map.tile_size[0] * TILE_SCALING * PLAYER_START_X
)
self.player_sprite.center_y = (
self.tile_map.tiled_map.tile_size[1] * TILE_SCALING * PLAYER_START_Y
)
self.scene.add_sprite(LAYER_NAME_PLAYER, self.player_sprite)
# Calculate the right edge of the my_map in pixels
self.end_of_map = self.tile_map.tiled_map.map_size.width * GRID_PIXEL_SIZE
# -- Enemies
enemies_layer = self.tile_map.object_lists[LAYER_NAME_ENEMIES]
for my_object in enemies_layer:
cartesian = self.tile_map.get_cartesian(
my_object.shape[0], my_object.shape[1]
)
enemy_type = my_object.properties["type"]
if enemy_type == "robot":
enemy = RobotEnemy()
elif enemy_type == "zombie":
enemy = ZombieEnemy()
enemy.center_x = math.floor(
cartesian[0] * TILE_SCALING * self.tile_map.tile_width
)
enemy.center_y = math.floor(
(cartesian[1] + 1) * (self.tile_map.tile_height * TILE_SCALING)
)
if "boundary_left" in my_object.properties:
enemy.boundary_left = my_object.properties["boundary_left"]
if "boundary_right" in my_object.properties:
enemy.boundary_right = my_object.properties["boundary_right"]
if "change_x" in my_object.properties:
enemy.change_x = my_object.properties["change_x"]
self.scene.add_sprite(LAYER_NAME_ENEMIES, enemy)
# --- Other stuff
# Set the background color
if self.tile_map.tiled_map.background_color:
arcade.set_background_color(self.tile_map.tiled_map.background_color)
# Create the 'physics engine'
self.physics_engine = arcade.PhysicsEnginePlatformer(
self.player_sprite,
[
self.scene.get_sprite_list(LAYER_NAME_PLATFORMS),
self.scene.get_sprite_list(LAYER_NAME_MOVING_PLATFORMS),
],
gravity_constant=GRAVITY,
ladders=self.scene.get_sprite_list(LAYER_NAME_LADDERS),
)
def on_draw(self):
"""Render the screen."""
# Clear the screen to the background color
arcade.start_render()
# Activate the game camera
self.camera.use()
# Draw our Scene
self.scene.draw()
# Activate the GUI camera before drawing GUI elements
self.gui_camera.use()
# Draw our score on the screen, scrolling it with the viewport
score_text = f"Score: {self.score}"
arcade.draw_text(
score_text,
10,
10,
arcade.csscolor.BLACK,
18,
)
# Draw hit boxes.
# for wall in self.wall_list:
# wall.draw_hit_box(arcade.color.BLACK, 3)
#
# self.player_sprite.draw_hit_box(arcade.color.RED, 3)
def process_keychange(self):
"""
Called when we change a key up/down or we move on/off a ladder.
"""
# Process up/down
if self.up_pressed and not self.down_pressed:
if self.physics_engine.is_on_ladder():
self.player_sprite.change_y = PLAYER_MOVEMENT_SPEED
elif (
self.physics_engine.can_jump(y_distance=10)
and not self.jump_needs_reset
):
self.player_sprite.change_y = PLAYER_JUMP_SPEED
self.jump_needs_reset = True
arcade.play_sound(self.jump_sound)
elif self.down_pressed and not self.up_pressed:
if self.physics_engine.is_on_ladder():
self.player_sprite.change_y = -PLAYER_MOVEMENT_SPEED
# Process up/down when on a ladder and no movement
if self.physics_engine.is_on_ladder():
if not self.up_pressed and not self.down_pressed:
self.player_sprite.change_y = 0
elif self.up_pressed and self.down_pressed:
self.player_sprite.change_y = 0
# Process left/right
if self.right_pressed and not self.left_pressed:
self.player_sprite.change_x = PLAYER_MOVEMENT_SPEED
elif self.left_pressed and not self.right_pressed:
self.player_sprite.change_x = -PLAYER_MOVEMENT_SPEED
else:
self.player_sprite.change_x = 0
def on_key_press(self, key, modifiers):
"""Called whenever a key is pressed."""
if key == arcade.key.UP or key == arcade.key.W:
self.up_pressed = True
elif key == arcade.key.DOWN or key == arcade.key.S:
self.down_pressed = True
elif key == arcade.key.LEFT or key == arcade.key.A:
self.left_pressed = True
elif key == arcade.key.RIGHT or key == arcade.key.D:
self.right_pressed = True
self.process_keychange()
def on_key_release(self, key, modifiers):
"""Called when the user releases a key."""
if key == arcade.key.UP or key == arcade.key.W:
self.up_pressed = False
self.jump_needs_reset = False
elif key == arcade.key.DOWN or key == arcade.key.S:
self.down_pressed = False
elif key == arcade.key.LEFT or key == arcade.key.A:
self.left_pressed = False
elif key == arcade.key.RIGHT or key == arcade.key.D:
self.right_pressed = False
self.process_keychange()
def center_camera_to_player(self):
screen_center_x = self.player_sprite.center_x - (self.camera.viewport_width / 2)
screen_center_y = self.player_sprite.center_y - (
self.camera.viewport_height / 2
)
if screen_center_x < 0:
screen_center_x = 0
if screen_center_y < 0:
screen_center_y = 0
player_centered = screen_center_x, screen_center_y
self.camera.move_to(player_centered, 0.2)
def on_update(self, delta_time):
"""Movement and game logic"""
# Move the player with the physics | |
local access restriction type.
Returns:
dict: The JSON structure.
'''
for note in record_json['notes']:
if note['persistent_id'] == csv_row['persistent_id']:
if 'rights_restriction' in note:
note['rights_restriction']['local_access_restriction_type'].append(csv_row['local_type'])
else:
note['rights_restriction'] = {'local_access_restriction_type': [csv_row['local_type']]}
return record_json
<EMAIL>(logger)
def create_timebound_restriction(self, record_json, csv_row):
'''Creates a timebound restriction type and links it to a note in a descriptive
record.
Parameters:
record_json: The JSON representation of the descriptive record.
csv_row['uri']: The URI of the descriptive record.
csv_row['persistent_id']: The persistent ID of the parent note.
csv_row['begin']: The begin date of the restriction. Format YYYY-MM-DD required.
csv_row['end']: The end date of the restriction. Format YYYY-MM-DD required.
Returns:
dict: The JSON structure.
'''
for note in record_json['notes']:
if note['persistent_id'] == csv_row['persistent_id']:
#this might not work for some of the older records which
#don't have the rights restriction dictionary
note['rights_restriction']['begin'] = csv_row['begin']
note['rights_restriction']['end'] = csv_row['end']
return record_json
#<EMAIL>(logger)
def update_identifiers(self, record_json, csv_row):
'''Moves resource identifiers which are split across multiple fields into a
single field.
Parameters:
record_json: The JSON representation of the parent record.
csv_row['uri']: The URI of the parent record.
csv_row['identifier']: The new identifier.
Returns:
dict: The JSON structure.
'''
record_json['id_0'] = csv_row['identifier']
if 'id_1' in record_json:
del record_json['id_1']
if 'id_2' in record_json:
del record_json['id_2']
if 'id_3' in record_json:
del record_json['id_3']
return record_json
<EMAIL>(logger)
def update_container_type(self, record_json, csv_row):
'''Updates the container type of a top container record.
Parameters:
record_json: The JSON representation of the top container record.
csv_row['uri']: The URI of the top container record.
csv_row['container_type']: The new container type.
Returns:
dict: The JSON structure.
'''
record_json['type'] = container_type
return record_json
<EMAIL>(logger)
def link_agent_to_record(self, record_json, csv_row):
'''Links an agent record to a descriptive record.
Parameters:
record_json: The JSON representation of the descriptive record.
csv_row['agent_uri']: The URI of the agent record.
csv_row['record_uri']: The URI of the descriptive record.
Returns:
dict: The JSON structure.
'''
record_json['linked_agents'].append({'ref': csv_row['agent_uri']})
return record_json
<EMAIL>(logger)
def link_event_to_record(self, record_json, csv_row):
'''Links an event record to a descriptive record.
Parameters:
record_json: The JSON representation of the descriptive record.
csv_row['record_uri']: The URI of the descriptive record.
csv_row['event_uri']: The URI of the event record.
Returns:
dict: The JSON structure.
'''
record_json['linked_events'].append({'ref': csv_row['event_uri']})
return record_json
<EMAIL>(logger)
def link_record_to_classification(self, record_json, csv_row):
'''Links a record to a classification or classification term.
Parameters:
record_json: The JSON representation of the descriptive record.
csv_row['classification_uri']: The URI of the classification term
csv_row['record_uri']: The URI of the record to link.
Returns:
dict: The JSON structure.
Todo:
check if possible to link records to other types of
records such as agents
'''
record_json['linked_records'].append({'ref': csv_row['record_uri']})
return record_json
def update_eng_finding_aid_language(self, record_json, csv_row):
'''Updates a finding aid language value to English (before v 2.8)
Parameters:
record_json: The JSON representation of the descriptive record.
csv_row['uri']: The URI of the descriptive record.
csv_row['finding_aid_language']: The new finding aid language value
Returns:
duct: The JSON structure.
'''
record_json['finding_aid_language'] = "Finding aid written in <language langcode=\"eng\" scriptcode=\"Latn\">English</language>."
return record_json
def update_indicators(self, record_json, csv_row):
'''Updates a top container record with a new indicator.
Parameters:
record_json: The JSON representation of the top container record.
csv_row['uri']: The URI of the top container record.
csv_row['indicator']: The barcode of the top container.
Returns:
dict: The JSON structure.
'''
record_json['indicator'] = csv_row['indicator']
return record_json
def update_barcodes(self, record_json, csv_row):
'''Updates a top container record with barcode.
Parameters:
record_json: The JSON representation of the top container record.
csv_row['uri']: The URI of the top container record.
csv_row['barcode']: The barcode of the top container.
Returns:
dict: The JSON structure.
'''
record_json['barcode'] = csv_row['barcode']
return record_json
def update_barcodes_indicators(self, record_json, csv_row):
'''Updates a top container record with barcode and indicator.
Parameters:
record_json: The JSON representation of the top container record.
csv_row['uri']: The URI of the top container record.
csv_row['barcode']: The barcode of the top container.
csv_row['indicator']: The indicator (box number) of the top container.
Returns:
dict: The JSON structure.
'''
record_json['barcode'] = csv_row['barcode']
record_json['indicator'] = csv_row['indicator']
return record_json
#abstract
<EMAIL>(logger)
def update_top_containers(self, record_json, csv_row):
'''Updates a top container record with barcode and adds a type value of 'Box'
to the record. Also adds LSF as the location.
Parameters:
record_json: The JSON representation of the top container record.
csv_row['tc_uri']: The URI of the top container record.
csv_row['barcode']: The barcode of the top container.
Returns:
dict: The JSON structure.
'''
record_json['barcode'] = csv_row['barcode']
record_json['type'] = 'Box'
new_location = {'jsonmodel_type': 'container_location', 'ref': '/locations/9', 'status': 'current', 'start_date': '2017-03-01'}
record_json['container_locations'].append(new_location)
return record_json
def update_container_location(self, record_json, csv_row):
'''Updates a top container record with a location
Parameters:
record_json: The JSON representation of the top container record.
csv_row['uri']: The URI of the top container record.
csv_row['location_uri']: The barcode of the top container.
Returns:
dict: The JSON structure.
'''
new_location = {'jsonmodel_type': 'container_location', 'ref': csv_row['location_uri'], 'status': 'current', 'start_date': '2017-03-01'}
record_json['container_locations'].append(new_location)
return record_json
def update_title(self, record_json, csv_row):
'''Updates a record title.
Parameters:
record_json: The JSON representation of the top container record.
csv_row['uri']: The URI of the top container record.
csv_row['title']: The new title.
Returns:
dict: The JSON structure.
'''
record_json['title'] = csv_row['title']
return record_json
def update_container_type(self, record_json, csv_row):
'''Updates a container record with a type value of 'Box'.
Parameters:
record_json: The JSON representation of the top container record.
csv_row['uri']: The URI of the top container record.
Returns:
dict: The JSON structure.
'''
record_json['type'] = 'Box'
return record_json
<EMAIL>(logger)
def update_date_begin(self, record_json, csv_row):
'''Updates date subrecords.
Parameters:
record_json: The JSON representation of the parent record.
Returns:
dict: The JSON structure.
'''
for date in record_json['dates']:
date['begin'] = csv_row['begin']
return record_json
def update_event_date(self, record_json, csv_row):
'''Updates a date subrecord with new begin date, an end
date if present, and a label
Parameters:
record_json: The JSON representation of the parent record.
csv_row['uri']: The URI of the parent record
csv_row['begin']: The begin date
csv_row['end']: The end date
csv_row['label']: The date label
Returns:
dict: The JSON structure
'''
record_json['date']['begin'] = csv_row['begin']
if csv_row.get('date_type') != '':
record_json['date']['date_type'] = csv_row['date_type']
if csv_row.get('end') != '':
record_json['date']['end'] = csv_row['end']
return record_json
<EMAIL>(logger)
def update_date_type(self, record_json, csv_row):
'''Checks whether a date lacks end value, or whether the begin and end values
and if either are true changes the date type to 'single'
Parameters:
record_json: The JSON representation of the descriptive record.
csv_row['uri']: The URI of the descriptive record.
Returns:
dict: The JSON structure.
'''
for date in record_json['dates']:
if 'end' not in date:
date['date_type'] = 'single'
elif date['end'] == date['begin']:
date['date_type'] = 'single'
return record_json
def update_box_numbers(self, record_json, csv_row):
'''Updates indicator numbers in top container records.
Parameters:
record_json: The JSON representation of the top container record.
csv_row['uri']: The URI of the top container record.
csv_row['old_box']: The old box number.
csv_row['new_box']: The new box number.
Returns:
dict: The JSON structure.
'''
if record_json['indicator'] == csv_row['old_box']:
record_json['indicator'] = csv_row['new_box']
return record_json
<EMAIL>(logger)
def update_folder_numbers(self, record_json, csv_row):
'''Updates indicator numbers in instance subrecords.
Parameters:
record_json: The JSON representation of the descriptive record.
csv_row['uri']: The URI of the descriptive record.
csv_row['old_folder']: The old folder number.
csv_row['new_folder']: The new folder number.
Returns:
dict: The JSON structure.
'''
for instance in record_json['instances']:
if instance['indicator_2'] == csv_row['old_folder']:
instance['indicator_2'] = csv_row['new_folder']
return record_json
<EMAIL>(logger)
def update_revision_statements(self, record_json, csv_row):
'''Updates a revision statement.
Parameters:
record_json: The JSON representation of the resource record.
csv_row['uri']: The URI of the resource record.
csv_row['revision_date']: The revision date of the resource record.
csv_row['old_text']: The old revision statement.
csv_row['new_text']: The new revision statement.
Returns:
dict: The JSON structure.
'''
for revision_statement in record_json['revision_statements']:
if revision_statement['description'] == csv_row['old_text']:
revision_statement['description'] = csv_row['new_text']
return record_json
<EMAIL>(logger)
def update_notes(self, record_json, csv_row):
'''Updates singlepart or multipart notes.
Parameters:
record_json: The JSON representation of the parent record.
csv_row['uri']: The URI of the parent record.
csv_row['persistent_id']: The persistent ID of the parent note.
csv_row['note_text']: The new note text.
Returns:
dict: The JSON structure.
'''
for note in record_json['notes']:
if note['jsonmodel_type'] == 'note_multipart':
if note['persistent_id'] == csv_row['persistent_id']:
note['subnotes'][0]['content'] = csv_row['note_text']
elif note['jsonmodel_type'] == 'note_singlepart':
if note['persistent_id'] == csv_row['persistent_id']:
note['content'] = [csv_row['note_text']]
return record_json
<EMAIL>(logger)
def update_access_notes(self, record_json, csv_row):
'''Updates existing accessrestrict notes for HM films.
Parameters:
record_json: The JSON representation of the parent record.
csv_row['uri']: The | |
'Weather Service trying to use location-based property when street_or_region_id is None')
self._weather_info[self._street_or_region_id]._next_weather_event_time = value
@property
def _forecast_time(self):
logger.assert_raise(self._street_or_region_id is not None, 'Weather Service trying to use location-based property when street_or_region_id is None')
return self._weather_info[self._street_or_region_id]._forecast_time
@_forecast_time.setter
def _forecast_time(self, value):
logger.assert_raise(self._street_or_region_id is not None, 'Weather Service trying to use location-based property when street_or_region_id is None')
self._weather_info[self._street_or_region_id]._forecast_time = value
@property
def _override_forecast(self):
logger.assert_raise(self._street_or_region_id is not None, 'Weather Service trying to use location-based property when street_or_region_id is None')
return self._weather_info[self._street_or_region_id]._override_forecast
@_override_forecast.setter
def _override_forecast(self, value):
logger.assert_raise(self._street_or_region_id is not None, 'Weather Service trying to use location-based property when street_or_region_id is None')
self._weather_info[self._street_or_region_id]._override_forecast = value
@property
def _override_forecast_season(self):
logger.assert_raise(self._street_or_region_id is not None, 'Weather Service trying to use location-based property when street_or_region_id is None')
return self._weather_info[self._street_or_region_id]._override_forecast_season
@_override_forecast_season.setter
def _override_forecast_season(self, value):
logger.assert_raise(self._street_or_region_id is not None, 'Weather Service trying to use location-based property when street_or_region_id is None')
self._weather_info[self._street_or_region_id]._override_forecast_season = value
@property
def cross_season_override(self):
logger.assert_raise(self._street_or_region_id is not None, 'Weather Service trying to use location-based property when street_or_region_id is None')
return self._weather_info[self._street_or_region_id]._cross_season_override
@cross_season_override.setter
def cross_season_override(self, value):
logger.assert_raise(self._street_or_region_id is not None, 'Weather Service trying to use location-based property when street_or_region_id is None')
self._weather_info[self._street_or_region_id]._cross_season_override = value
@classproperty
def required_packs(cls):
return (
Pack.EP05, Pack.EP10)
@classproperty
def save_error_code(cls):
return persistence_error_types.ErrorCodes.SERVICE_SAVE_FAILED_WEATHER_SERVICE
def save(self, save_slot_data=None, **kwargs):
weather_service_data = WeatherSeasons_pb2.PersistableWeatherService()
for street_or_region_id, data in self._weather_info.items():
with ProtocolBufferRollback(weather_service_data.region_weathers) as (region_weather):
region_weather.region = street_or_region_id
region_weather.weather = data._last_op
if data._current_event is not None:
region_weather.weather_event = data._current_event.guid64
else:
region_weather.weather_event = 0
region_weather.forecast_time_stamp = data._forecast_time
region_weather.next_weather_event_time = data._next_weather_event_time
for forecast in data._forecasts:
region_weather.forecasts.append(forecast.guid64 if forecast is not None else 0)
if data._override_forecast is not None:
region_weather.override_forecast = data._override_forecast.guid64
region_weather.override_forecast_season_stamp = data._override_forecast_season
save_slot_data.gameplay_data.weather_service = weather_service_data
def load(self, **_):
save_slot_data_msg = services.get_persistence_service().get_save_slot_proto_buff()
weather_service_data = save_slot_data_msg.gameplay_data.weather_service
forecast_manager = services.get_instance_manager(sims4.resources.Types.WEATHER_FORECAST)
event_manager = services.get_instance_manager(sims4.resources.Types.WEATHER_EVENT)
snippet_manager = services.get_instance_manager(sims4.resources.Types.SNIPPET)
for region_weather in weather_service_data.region_weathers:
data = WeatherService.RegionWeatherInfo()
self._weather_info[region_weather.region] = data
data._last_op.MergeFrom(region_weather.weather)
data._current_event = event_manager.get(region_weather.weather_event)
data._forecast_time = DateAndTime(region_weather.forecast_time_stamp)
data._next_weather_event_time = DateAndTime(region_weather.next_weather_event_time)
data._forecasts = [forecast_manager.get(forecast_guid) for forecast_guid in region_weather.forecasts]
if None in data._forecasts:
data._forecasts.clear()
data._override_forecast = snippet_manager.get(region_weather.override_forecast)
if data._override_forecast is not None:
if region_weather.override_forecast_season_stamp in SeasonType:
data._override_forecast_season = SeasonType(region_weather.override_forecast_season_stamp)
else:
data._override_forecast = None
data._current_event = None
data._forecasts.clear()
def load_options(self, options_proto):
self._temperature_effects_option = options_proto.temperature_effects_enabled
self._icy_conditions_option = options_proto.icy_conditions_enabled
self._thunder_snow_storms_option = options_proto.thunder_snow_storms_enabled
self._weather_option[PrecipitationType.RAIN] = WeatherOption(options_proto.rain_options)
self._weather_option[PrecipitationType.SNOW] = WeatherOption(options_proto.snow_options)
def save_options(self, options_proto):
options_proto.temperature_effects_enabled = self._temperature_effects_option
options_proto.icy_conditions_enabled = self._icy_conditions_option
options_proto.thunder_snow_storms_enabled = self._thunder_snow_storms_option
options_proto.rain_options = self._weather_option[PrecipitationType.RAIN].value
options_proto.snow_options = self._weather_option[PrecipitationType.SNOW].value
def edit_mode_setup(self):
static_max_weather_element = WeatherElementTuple(1.0, 0, 1.0, 0)
self._street_or_region_id = get_street_or_region_id_with_weather_tuning()
self._trans_info = {}
snow_behavior = get_snow_behavior()
if snow_behavior == SnowBehavior.PERMANENT:
self._trans_info[int(GroundCoverType.SNOW_ACCUMULATION)] = static_max_weather_element
self._trans_info[int(WeatherEffectType.WATER_FROZEN)] = static_max_weather_element
self._trans_info[int(WeatherEffectType.WINDOW_FROST)] = static_max_weather_element
if self._trans_info:
self._send_weather_event_op(update_keytimes=False)
self.update_weather_type(during_load=True)
def on_zone_load(self):
self._street_or_region_id = get_street_or_region_id_with_weather_tuning()
self._current_weather_types = set()
current_time = services.time_service().sim_now
if self._next_weather_event_time == DATE_AND_TIME_ZERO or current_time > self._next_weather_event_time:
self._current_event = None
if self._forecast_time != DATE_AND_TIME_ZERO:
now_days = int(current_time.absolute_days())
day_time_span = create_time_span(days=1)
while now_days > int(self._forecast_time.absolute_days()) and self._forecasts:
del self._forecasts[0]
self._forecast_time = self._forecast_time + day_time_span
elif self._current_event is None:
self._update_trans_info()
self._send_new_weather_event()
else:
self._send_existing_weather_event()
self.update_weather_type(during_load=True)
def on_zone_unload(self):
self._current_weather_types.clear()
for entry in self._weather_aware_objects.values():
entry.clear()
self._add_message_objects.clear()
self._remove_message_objects.clear()
self._key_times.clear()
self._remove_snow_drift_alarm()
self._remove_puddle_alarm()
if self._lightning_collectible_alarm is not None:
alarms.cancel_alarm(self._lightning_collectible_alarm)
self._lightning_collectible_alarm = None
def _send_new_weather_event(self):
self.populate_forecasts(1)
current_region_forecasts = self._forecasts
if current_region_forecasts:
forecast = current_region_forecasts[0]
if forecast is not None:
weather_event, duration = forecast.get_weather_event(self._weather_option, self._thunder_snow_storms_option)
if weather_event is not None:
self.start_weather_event(weather_event, duration)
def _send_existing_weather_event(self):
op = WeatherEventOp(self._last_op)
Distributor.instance().add_op_with_no_owner(op)
self._update_trans_info()
self._update_keytimes()
def _update_trans_info(self):
self._trans_info.clear()
for weather_interop in self._last_op.season_weather_interlops:
if weather_interop.start_value != 0 or weather_interop.end_value != 0:
self._trans_info[int(weather_interop.message_type)] = WeatherElementTuple(weather_interop.start_value, DateAndTime(weather_interop.start_time * date_and_time.REAL_MILLISECONDS_PER_SIM_SECOND), weather_interop.end_value, DateAndTime(weather_interop.end_time * date_and_time.REAL_MILLISECONDS_PER_SIM_SECOND))
def set_weather_option(self, precipitation_type, weather_option):
old_value = self._weather_option[precipitation_type]
if old_value == weather_option:
return
self._weather_option[precipitation_type] = weather_option
self.reset_forecasts()
def reset_forecasts(self, all_regions=True):
if all_regions:
for region_data in self._weather_info.values():
region_data.clear()
else:
self._forecasts.clear()
self._send_new_weather_event()
def set_temperature_effects_enabled(self, enabled):
if self._temperature_effects_option == enabled:
return
self._temperature_effects_option = enabled
sim_infos = services.sim_info_manager().values()
if self.TEMPERATURE_CONTROL_BUFF is None:
logger.error('TEMPERATURE_CONTROL_BUFF is None, meaning this code path has been entered outside of EP05')
return
for sim_info in sim_infos:
self.apply_weather_option_buffs(sim_info)
def set_icy_conditions_enabled(self, enabled):
if self._icy_conditions_option == enabled:
return
self._icy_conditions_option = enabled
self.reset_forecasts()
def set_thunder_snow_storms_enabled(self, enabled):
if self._thunder_snow_storms_option == enabled:
return
self._thunder_snow_storms_option = enabled
self.reset_forecasts()
def apply_weather_option_buffs(self, sim_or_sim_info):
if self.TEMPERATURE_CONTROL_BUFF is None:
logger.error('TEMPERATURE_CONTROL_BUFF is None, meaning this code path has been entered outside of EP05')
return
elif self._temperature_effects_option == False:
sim_or_sim_info.add_buff_from_op(self.TEMPERATURE_CONTROL_BUFF)
else:
sim_or_sim_info.remove_buff_by_type(self.TEMPERATURE_CONTROL_BUFF)
def force_start_weather_event(self, weather_event, duration):
for weather_element in self._trans_info:
self._trans_info[weather_element] = WeatherElementTuple(0, 0, 0, 0)
self._current_event = None
self._send_weather_event_op()
self.start_weather_event(weather_event, duration)
def start_weather_event(self, weather_event, duration):
new_trans_info, next_time = weather_event.get_transition_data(self._current_event, self._trans_info, duration)
self._check_for_static_weather_elements(new_trans_info)
if self._current_event is None:
self._next_weather_event_time = self._create_secondary_weather_elements(new_trans_info, next_time)
else:
self._next_weather_event_time = self._update_secondary_weather_elements(new_trans_info, next_time)
self._current_event = weather_event
self._trans_info = new_trans_info
self._send_weather_event_op()
def _check_for_static_weather_elements(self, new_trans_info):
snow_behavior = get_snow_behavior()
if snow_behavior == SnowBehavior.PERMANENT:
temperature_info = new_trans_info.get(int(WeatherEffectType.TEMPERATURE), None)
if temperature_info is not None:
start_value = temperature_info.start_value
end_value = temperature_info.end_value
update_value = False
if start_value > Temperature.COLD:
start_value = Temperature.COLD
update_value = True
if end_value > Temperature.COLD:
end_value = Temperature.COLD
update_value = True
if update_value:
new_trans_info[int(WeatherEffectType.TEMPERATURE)] = WeatherElementTuple(start_value, temperature_info.start_time, end_value, temperature_info.end_time)
if int(PrecipitationType.RAIN) in new_trans_info:
del new_trans_info[int(PrecipitationType.RAIN)]
def _send_weather_event_op(self, update_keytimes=True):
if self._trans_info:
messages_to_remove = []
self._last_op = WeatherSeasons_pb2.SeasonWeatherInterpolations()
op = WeatherEventOp(self._last_op)
for message_type, data in self._trans_info.items():
op.populate_op(message_type, data.start_value, data.start_time, data.end_value, data.end_time)
if data.start_value == data.end_value == 0.0:
messages_to_remove.append(message_type)
Distributor.instance().add_op_with_no_owner(op)
for message_type in messages_to_remove:
del self._trans_info[message_type]
if update_keytimes:
self._update_keytimes()
def _get_element_interpolation(self, start_time, rate, start_value, target_value):
if rate == 0:
return WeatherElementTuple(start_value, start_time, start_value, start_time)
delta = start_value - target_value
minutes = abs(delta / rate)
end_time = start_time + create_time_span(minutes=minutes)
return WeatherElementTuple(start_value, start_time, target_value, end_time)
def _add_secondary_element_decay(self, new_trans_info, temp, element_type, tuning, time, current_value=None):
if current_value is None:
current_value = self.get_weather_element_value(element_type, default=None)
else:
if temp == Temperature.BURNING:
rate = tuning.burning
else:
if temp == Temperature.HOT:
rate = tuning.hot
else:
if temp == Temperature.WARM:
rate = tuning.warm
else:
if temp == Temperature.COOL:
rate = tuning.cool
else:
if temp == Temperature.COLD:
rate = tuning.cold
else:
if temp == Temperature.FREEZING:
rate = tuning.freezing
else:
logger.error('No secondary element decay rate handled for this temperature: {}', Temperature(temp))
rate = 0
if rate >= 0:
if current_value is None:
return
if current_value <= 0.0 or rate == 0:
new_trans_info[int(element_type)] = WeatherElementTuple(current_value, time, current_value, time)
else:
new_trans_info[int(element_type)] = self._get_element_interpolation(time, rate, current_value, 0.0)
else:
pass
if current_value is None:
current_value = 0.0
elif current_value >= 1.0:
new_trans_info[int(element_type)] = WeatherElementTuple(current_value, time, current_value, time)
else:
new_trans_info[int(element_type)] = self._get_element_interpolation(time, rate, current_value, 1.0)
def start_icy_conditions(self, time, new_trans_info=None, send_op=False):
if new_trans_info is None:
new_trans_info = self._trans_info
key = int(WeatherEffectType.SNOW_ICINESS)
init_value = self.get_weather_element_value(key)
new_trans_info[key] = WeatherElementTuple(init_value or 0.0, time, 1.0, time + create_time_span(minutes=(self.ICY_CONDITIONS.time_to_icy)))
if send_op:
self._send_weather_event_op()
def stop_icy_conditions(self, delay_in_min, send_op=False):
key = int(WeatherEffectType.SNOW_ICINESS)
init_value = self.get_weather_element_value(key)
start_time = services.time_service().sim_now + create_time_span(minutes=delay_in_min)
self._trans_info[key] = WeatherElementTuple(init_value or 0.0, start_time, 0.0, start_time + create_time_span(minutes=(self.ICY_CONDITIONS.time_to_icy_decay)))
if send_op:
self._send_weather_event_op()
def _add_snow_accumulation_decay(self, new_trans_info, temp, time, current_value=None):
key = int(GroundCoverType.SNOW_ACCUMULATION)
if current_value is None:
current_value = self.get_weather_element_value(key)
snow_behavior = get_snow_behavior()
if temp == Temperature.FREEZING:
if snow_behavior != SnowBehavior.NO_SNOW:
rate = self.SNOW_MELT_RATE.freezing
if rate != 0 and current_value >= 0 and current_value < self.FROST_GROUND_ACCUMULATION_MAX:
new_trans_info[key] = self._get_element_interpolation(time, rate, current_value, self.FROST_GROUND_ACCUMULATION_MAX)
else:
new_trans_info[key] = WeatherElementTuple(current_value, time, current_value, time)
return
if current_value == 1.0:
current_value = -1.0
else:
if current_value > 0:
logger.warn("Melting accumulating (>0) snow that isn't at 1")
else:
if current_value == 0.0:
return
if temp == Temperature.BURNING:
rate = self.SNOW_MELT_RATE.burning
else:
if temp == Temperature.HOT:
rate = self.SNOW_MELT_RATE.hot
else:
if temp == Temperature.WARM:
rate = self.SNOW_MELT_RATE.warm
else:
if temp == Temperature.COOL:
rate = self.SNOW_MELT_RATE.cool
else:
if temp == Temperature.COLD:
rate = self.SNOW_MELT_RATE.cold
else:
if temp == Temperature.FREEZING:
rate = 0
else:
logger.error('No snow accumulation rate handled for this temperature: {}', Temperature(temp))
rate = 0
new_trans_info[key] = self._get_element_interpolation(time, rate, current_value, 0.0)
def _add_precipitation_accumulation(self, precip_key, accumulate_key, rate, new_trans_info, time, current_value=None):
data = new_trans_info.get(int(precip_key), None)
if data is None:
start_time = time
target = 0.0
else:
start_time = data.start_time
target = data.end_value
if target == 0.0:
return (
start_time, True)
rate = rate * target
if current_value is None:
current_value = self.get_weather_element_value(int(accumulate_key), time)
elif current_value < 0.0:
logger.warn('Accumulation type {} is trying to accumulate when negative, accumulating to -1 instead.', PrecipitationType(precip_key))
target_value = -1
else:
target_value = 1
new_trans_info[int(accumulate_key)] = self._get_element_interpolation(start_time, rate, current_value, target_value)
return (start_time, False)
def _get_snow_freshness_rate(self, snow_amount):
region_instance = region.get_region_instance_from_zone_id(services.current_zone_id())
| |
<reponame>SugaanthMohan/Database_Hacks<gh_stars>1-10
#! /usr/bin/python3.5
'''
Process : COMPARE SIMILAR TABLES IN DATABASES
Author : <NAME>
Created : Oct 19 2018
Last Modified Date : -
Version Control:
_______________
pandas == 0.23.0
smtplib == Default Package : Python 2.7.12
sys == Default Package : Python 2.7.12
os == Default Package : Python 2.7.12
linecache == Default Package : Python 2.7.12
inspect == Default Package : Python 2.7.12
argparse == Default Package : Python 2.7.12
re == Default Package : Python 2.7.12
ftplib == Default Package : Python 2.7.12
datetime == Default Package : Python 2.7.12
'''
# >>>>>>>>>>>>>>>>>>>>>>> IMPORT STATEMENTS <<<<<<<<<<<<<<<<<<<<<<<<<<<<
# USED FOR SENDING EMAIL
import smtplib
# USED TO PERFORM SYSTEM FUNCTIONS
import sys , os as linux
# USED FOR EXCEPTION HANDLING AND BRIEFING
import linecache
# USED IN LOGGER FOR DETAILED PRINT STATEMENT
import inspect
# USED TO RETRIEVE PARSED ARGUMENTS
import argparse
# REGEX COMPARE
import re as Regex
# READ CSV INPUT FILE
import pandas as pd
# USED FOR LOGGING DATE
import datetime
# USED TO GET DB DATA
import MySQLdb
# USED TO GET DATA FROM CONFIG FILE
import configparser
# USED TO INITIATE GARBAGE COLLECTION
import gc
# USED FOR GARBAGE COLLECTION TIME WAIT
from time import sleep
# >>>>>>>>>>>>>>>>>>>>>>> INITIALIZE GLOBAL VARIABLES <<<<<<<<<<<<<<<<<<<<<<<<<<<<
def globalSet():
# EMAILING LIST
global From,To,now,SaveLocation,SchemaLocation
From = "<EMAIL>"
To = ["<<EMAIL>>"]
now = datetime.datetime.now()
SaveLocation = "/tmp/SaveFiles/"
SchemaLocation = "<SavedSchemaLocations>"
# DATABASE 1 LIST
global GlobDbObj_1,mysql_host_name_1,mysql_user_name_1,mysql_password_1,mysql_database_1,mysql_port_number_1
mysql_host_name_1 = mysql_user_name_1 = mysql_password_1 = mysql_database_1 = ""
GlobDbObj_1 = []
# DATABASE 2 LIST
global GlobDbObj_2,mysql_host_name_2,mysql_user_name_2,mysql_password_2,mysql_database_2,mysql_port_number_2
mysql_host_name_2 = mysql_user_name_2 = mysql_password_2 = mysql_database_2 = ""
GlobDbObj_2 = []
# GET OPTIONS
global DifferedTableNames,OneToMany,Override,Test_comp,Performance
# SET DEFAULT VALUE OF OPTIONS TO False
DifferedTableNames=OneToMany=Override=Test_comp=False
# QUERY LIST
global query
query = {}
# ORIGINAL QUERIES TO USE
query['GET_TABLES_IN_DATABASE'] = "SHOW TABLES IN ? "
query['DESC_TABLE'] = "DESC ? "
query['SELECT_TABLE'] = "SELECT * FROM ? "
query['SELECT_TABLE_LIMIT'] = "SELECT * FROM ? LIMIT ?,?"
query['CHECK_COUNT'] = "SELECT COUNT(*) FROM ? "
# TEST QUERY COMPATIBILITY FOR THE DATABASE
query['TEST_VERSION'] = """
SELECT VERSION();
"""
query['TEST_CREATE_TABLE'] = """
CREATE TEMPORARY TABLE TableCompareTest (
item_name VARCHAR(50),
sort_num INT
);
"""
query['TEST_DESC'] = """
DESC TableCompareTest;
"""
query['TEST_INSERT'] = """
INSERT INTO TableCompareTest
(item_name,sort_num)
VALUES
("Temp1",10),
("Temp2",20),
("Temp3",30);
"""
query['TEST_SELECT'] = """
SELECT * FROM TableCompareTest;
"""
query['TEST_DROP'] = """
DROP TEMPORARY TABLE TableCompareTest;
"""
global ErrorsList
ErrorsList = []
# >>>>>>>>>>>>>>>>>>>>>>> USAGE DEMO <<<<<<<<<<<<<<<<<<<<<<<<<<<<
def demo():
print("""
USAGE :
python35 """+linux.path.abspath(sys.argv[0])+""" --configFile="""+linux.path.abspath(sys.argv[0]).replace("py","ini")+"""
SAMPLE :
python35 """+linux.path.abspath(sys.argv[0])+""" --configFile="""+linux.path.abspath(sys.argv[0]).replace("py","ini")+"""
""")
sys.exit(0)
if not len(sys.argv) > 1 :
demo()
# >>>>>>>>>>>>>>>>>>>>>>> ARGUMENT PARSING <<<<<<<<<<<<<<<<<<<<<<<<<<<<
def Clear_Memory():
gc.collect()
Tlog('Memory Allocation Freeing.. Please wait 10 seconds. ')
sleep(9)
# >>>>>>>>>>>>>>>>>>>>>>> ARGUMENT PARSING <<<<<<<<<<<<<<<<<<<<<<<<<<<<
def args():
parser = argparse.ArgumentParser()
parser.add_argument('-c','--configFile',type=str,help="FORMAT : INI\nCONTAINS ALL REQUIRED SCHEMA DETAILS",required=True)
args = parser.parse_args()
Tlog("PARSING ARGUMENTS COMPLETED")
return args.configFile
# >>>>>>>>>>>>>>>>>>>>>>> EMAIL MODULE USED HERE <<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ADD 2 ARGUMENTS
# First One - Subject of Email to Send
# Second one - Content of the Email to Send.
def Email(Subject_,Content_):
SERVER = "localhost"
# Prepare actual message
message = 'From: {}\nTo: {}\nSubject: {}\n\n{}'.format(From," ,".join(To),Subject_, Content_)
# Send the mail
server = smtplib.SMTP(SERVER)
server.sendmail(From, To, message)
server.quit()
# >>>>>>>>>>>>>>>>>>>>>>> EXCEPTION BRIEFER USED HERE <<<<<<<<<<<<<<<<<<<<<<<<<<<<
def ExceptionBrief():
# CREATE EXCEPTION REPORT
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
return 'EXCEPTION CAPTURED : ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj)
# >>>>>>>>>>>>>>>>>>>>>>> DEFINE THE USED SUB-ROUTINES <<<<<<<<<<<<<<<<<<<<<<<<<<<<
def Tlog(printer_):
now = datetime.datetime.now()
print("\n\t[INFO] ::"+str(now).split('.')[0]+"::"+str(__file__)+"::"+str(inspect.currentframe().f_back.f_lineno)+"::"+str(printer_)+"::\n")
# >>>>>>>>>>>>>>>>>>>>>>> MYDIE MODULE USED HERE <<<<<<<<<<<<<<<<<<<<<<<<<<<<
def mydie(exitCont_):
Tlog("*** ERROR OCCURRED *** : ROLL BACK PROCEDURES EXECUTING BELOW")
Tlog(exitCont_)
Tlog("*** ROLL BACK *** : CLOSING DB CONNECTION 1")
DbDisConnect1()
Tlog("*** ROLL BACK *** : CLOSING DB CONNECTION 2 ")
DbDisConnect2()
Email(Subject_ = __file__+" - RUN TIME ERROR AT : "+str(now), Content_ = exitCont_)
sys.exit(0)
# >>>>>>>>>>>>>>>>>>>>>>> DATABASE CONNECTION 1 CREATED HERE <<<<<<<<<<<<<<<<<<<<<<<<<<<<
def DbConnect1():
"""
DB 1 CONNECTION IS HERE
"""
if len(mysql_port_number_1) == 0:
Database = MySQLdb.connect(host=mysql_host_name_1, user=mysql_user_name_1, passwd=<PASSWORD>, db=mysql_database_1)
else:
Database = MySQLdb.connect(host=mysql_host_name_1, user=mysql_user_name_1, passwd=<PASSWORD>, db=mysql_database_1,port=int(mysql_port_number_2))
Tlog("Connected to Database : "+mysql_database_1+" @"+mysql_host_name_1)
GlobDbObj_1.append(Database)
return Database
# >>>>>>>>>>>>>>>>>>>>>>> DATABASE CONNECTION 2 CREATED HERE <<<<<<<<<<<<<<<<<<<<<<<<<<<<
def DbConnect2():
"""
DB 2 CONNECTION IS HERE
"""
if len(mysql_port_number_2) == 0:
Database = MySQLdb.connect(host=mysql_host_name_2, user=mysql_user_name_2, passwd=<PASSWORD>, db=mysql_database_2)
else:
Database = MySQLdb.connect(host=mysql_host_name_2, user=mysql_user_name_2, passwd=<PASSWORD>, db=mysql_database_2,port=int(mysql_port_number_2))
Tlog("Connected to Database : "+mysql_database_2+" @"+mysql_host_name_2)
GlobDbObj_2.append(Database)
return Database
# >>>>>>>>>>>>>>>>>>>>>>> DATABASE DIS-CONNECTION 1 CREATED HERE <<<<<<<<<<<<<<<<<<<<<<<<<<<<
def DbDisConnect1():
"""
DB 1 DIS-CONNECTION IS HERE
"""
for database in GlobDbObj_1:
database.close()
Tlog("Disconnected from Database : "+mysql_database_1+" @"+mysql_host_name_1)
GlobDbObj_1.remove(database)
# >>>>>>>>>>>>>>>>>>>>>>> DATABASE DIS-CONNECTION 2 CREATED HERE <<<<<<<<<<<<<<<<<<<<<<<<<<<<
def DbDisConnect2():
"""
DB 2 DIS-CONNECTION IS HERE
"""
for database in GlobDbObj_2:
database.close()
Tlog("Disconnected from Database : "+mysql_database_2+" @"+mysql_host_name_2)
GlobDbObj_2.remove(database)
# >>>>>>>>>>>>>>>>>>>>>>> EXECUTE QUERIES HERE <<<<<<<<<<<<<<<<<<<<<<<<<<<<
def sqlCursorExecute(DbObj_,Query):
# CREATE A CURSOR
cursor=DbObj_.cursor() or mydie("\t[Error] Unable to Create Cursor\n")
Tlog("[INFO] Executing Query : "+str(Query)+"")
# EXECUTE THE SELECT QUERY
cursor.execute(Query)
if cursor.rowcount != 0 :
data = cursor.fetchall()
cursor.close()
return data
# CLOSE CURSOR TO AVOID DEPENDENCY ISSUES
else:
# CLOSE CURSOR TO AVOID DEPENDENCY ISSUES
cursor.close()
# >>>>>>>>>>>>>>>>>>>>>>> EXECUTE THE TEST QUERIES HERE <<<<<<<<<<<<<<<<<<<<<<<<<<<<
def sqlDataFrameExecute(DbObj,Query):
df = pd.read_sql(Query, con=DbObj)
return df
# >>>>>>>>>>>>>>>>>>>>>>> DATABASE DIS-CONNECTION 2 CREATED HERE <<<<<<<<<<<<<<<<<<<<<<<<<<<<
def TestQueryStructure(connection_):
"""
USED TO TEST TEMPORARY QUERIES TO CHECK THE SYNTAX.
TestQueryStructure(connection_)
connection_ => Is the live connection to database
"""
# DO VERSION SELECT
sqlCursorExecute(DbObj_ = connection_,Query = query['TEST_VERSION'])
Tlog(' SELECT VERSION -> OK ')
# CREATE A TEMPORARY TABLE
sqlCursorExecute(DbObj_ = connection_,Query = query['TEST_CREATE_TABLE'])
Tlog(' CREATE TABLE QUERY -> OK ')
# DESCRIBE THE TEMPORARY TABLE
sqlCursorExecute(DbObj_ = connection_,Query = query['TEST_DESC'])
Tlog(' DESCRIBE TABLE QUERY -> OK')
# INSERT DATA TO THE TEMPORARY TABLE
sqlCursorExecute(DbObj_ = connection_,Query = query['TEST_INSERT'])
Tlog(' INSERT INTO TABLE QUERY -> OK')
# SELECT DATA FROM THE TEMPORARY TABLE
sqlCursorExecute(DbObj_ = connection_,Query = query['TEST_SELECT'])
Tlog(' SELECT FROM TABLE QUERY -> OK')
# DROP THE TEMPORARY TABLE
sqlCursorExecute(DbObj_ = connection_,Query = query['TEST_DROP'])
Tlog(' DROP TABLE QUERY -> OK')
# >>>>>>>>>>>>>>>>>>>>>>> EXECUTE THE TEST QUERIES HERE <<<<<<<<<<<<<<<<<<<<<<<<<<<<
def prepareQuery(*values,BaseQuery_):
# REPLACE VALUES IN THE BASE QUERY
for value in values:
BaseQuery_ = BaseQuery_.replace('?',str(value),1)
return BaseQuery_
# >>>>>>>>>>>>>>>>>>>>>>> GET THE LIST OF PARAMETERS HERE <<<<<<<<<<<<<<<<<<<<<<<<<<<<
def get_Config_Params(ini_):
"""
PARSE THE CONFIG FILE HERE :
get_Config_Params(ini_)
ini_ => is the file name along with location to be parsed
"""
global mysql_host_name_1,mysql_user_name_1,mysql_password_1,mysql_database_1,mysql_port_number_1
global mysql_host_name_2,mysql_user_name_2,mysql_password_2,mysql_database_2,mysql_port_number_2
global DifferedTableNames,OneToMany,Override,Test_comp,Performance
# CHECK IF THE FILE EXISTS
if linux.path.isfile(ini_) is not True:
mydie("Config File : "+ini_+" does not exist!")
getconfig = configparser.ConfigParser()
getconfig.read(ini_)
getConfigInfo = getconfig['Configs']
mysql_host_name_1 = getConfigInfo['HOST_NAME_1']
mysql_user_name_1 = getConfigInfo['USER_NAME_1']
mysql_password_1 = getConfigInfo['PASSWORD_1']
mysql_database_1 = getConfigInfo['DATABASE_NAME_1']
mysql_host_name_2 = getConfigInfo['HOST_NAME_2']
mysql_user_name_2 = getConfigInfo['USER_NAME_2']
mysql_password_2 = getConfigInfo['PASSWORD_2']
mysql_database_2 = getConfigInfo['DATABASE_NAME_2']
if getconfig.has_option('Configs','PORT_NUMBER_1'):
mysql_port_number_1 = getConfigInfo['PORT_NUMBER_1']
if getconfig.has_option('Configs','PORT_NUMBER_2'):
mysql_port_number_2 = getConfigInfo['PORT_NUMBER_2']
# GET THE OPTIONS INFO
DifferedTableNames = eval(getConfigInfo['DIFFERED_TABLE_NAMES'])
OneToMany = eval(getConfigInfo['ONE_TO_MANY'])
Override = eval(getConfigInfo['OVERRIDE'])
Test_comp = eval(getConfigInfo['TEST_COMPATIBILITY'])
Performance = int(getConfigInfo['PERFORMANCE_LIMIT'])
## TODO
# >>>>>>>>>>>>>>>>>>>>>>> MATCH ALL THE TABLES BELOW <<<<<<<<<<<<<<<<<<<<<<<<<<<<
def MatchAllTables(dataframe1_, dataframe2_,connection1_,connection2_):
pass
# >>>>>>>>>>>>>>>>>>>>>>> MATCH ALL THE TABLES BELOW <<<<<<<<<<<<<<<<<<<<<<<<<<<<
def CompareTableContent(DataFrame_,connection1_,connection2_):
global Performance,ErrorsList
for TableDetails in DataFrame_.values:
firstTable = TableDetails[0].split('|')[0]
secondTable = TableDetails[0].split('|')[1]
Tlog("COMPARING TABLE CONTENTS BETWEEN : " + firstTable + " <=> " + secondTable)
QueryNow = prepareQuery(firstTable,BaseQuery_ = query['CHECK_COUNT'])
count1 = int(sqlCursorExecute(DbObj_ = connection1_ ,Query = QueryNow)[0][0])
QueryNow = prepareQuery(secondTable,BaseQuery_ = query['CHECK_COUNT'])
count2 = int(sqlCursorExecute(DbObj_ = connection2_ ,Query = QueryNow)[0][0])
Tlog(firstTable + " : " + str(count1) +" | "+ secondTable + " : " + str(count2))
# LOAD SCHEMA IF TABLE IS EMPTY
if linux.path.exists(SchemaLocation + mysql_database_1 + "_" + firstTable + "_schema.csv" ) is True :
table_features = pd.read_csv(SchemaLocation + mysql_database_1 + "_" + firstTable + "_schema.csv")
else:
QueryNow = prepareQuery(firstTable,BaseQuery_ = query['DESC_TABLE'])
table_features = pd.read_sql(QueryNow, con=connection1_)
# DO LISTINGS AGAIN
Table_Keys_List = table_features[ (table_features['Key'] != '') & (table_features['Type'] != 'timestamp') ]['Field'].values.tolist()
Table_Not_Keys_List = table_features[ (table_features['Key'] == '') & (table_features['Type'] != 'timestamp') ]['Field'].values.tolist()
if len(Table_Keys_List) == 0:
ErrorsList.append("[ERROR] TABLE " + firstTable + " HAS NO KEYS DEFINED FOR IT. SCHEMA ALSO NOT FOUND !!")
# CLEAR MEMORY OF TABLE FEATURES LIST
del table_features
# LOAD SCHEMA IF TABLE IS EMPTY
if linux.path.exists(SchemaLocation + mysql_database_2 + "_" + secondTable + "_schema.csv" ) is True :
table_features = pd.read_csv(SchemaLocation + mysql_database_2 | |
#!/usr/bin/python2.7
"""
Copyright (C) 2014 Reinventing Geospatial, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>,
or write to the Free Software Foundation, Inc., 59 Temple Place -
Suite 330, Boston, MA 02111-1307, USA.
Author: <NAME>, Reinventing Geospatial Inc (RGi)
Date: 2013-07-12
Requires: sqlite3, argparse
Optional: Python Imaging Library (PIL or Pillow)
Description: Converts a TMS folder into a geopackage with
PNGs for images with transparency and JPEGs for those
without.
Credits:
MapProxy imaging functions: http://mapproxy.org
gdal2mb on github: https://github.com/developmentseed/gdal2mb
Version:
"""
from glob import glob
from scripts.common.zoom_metadata import ZoomMetadata
from scripts.geopackage.geopackage import Geopackage, PRAGMA_MINIMUM_SQLITE_VERSION
from scripts.geopackage.nsg_geopackage import NsgGeopackage
from scripts.geopackage.srs.ellipsoidal_mercator import EllipsoidalMercator
from scripts.geopackage.srs.geodetic import Geodetic
from scripts.geopackage.srs.geodetic_nsg import GeodeticNSG
from scripts.geopackage.srs.mercator import Mercator
from scripts.geopackage.srs.scaled_world_mercator import ScaledWorldMercator
from scripts.packaging.temp_db import TempDB
try:
from cStringIO import StringIO as ioBuffer
except ImportError:
from io import BytesIO as ioBuffer
from time import sleep
from sys import stdout
from sys import version_info
if version_info[0] == 3:
xrange = range
from sqlite3 import sqlite_version
from argparse import ArgumentParser
from sqlite3 import Binary as sbinary
from os import walk
from os.path import split, join, exists
from multiprocessing import cpu_count, Pool
from distutils.version import LooseVersion
try:
from PIL.Image import open as IOPEN
except ImportError:
IOPEN = None
# JPEGs @ 75% provide good quality images with low footprint, use as a default
# PNGs should be used sparingly (mixed mode) due to their high disk usage RGBA
# Options are mixed, jpeg, and png
IMAGE_TYPES = '.png', '.jpeg', '.jpg'
def write_geopackage_header(file_path):
"""
writes geopackage header bytes to the sqlite database at file_path
Args:
file_path:
Returns:
nothing
"""
header = 'GP10'
with open(file_path, 'r+b') as file:
file.seek(68, 0)
file.write(header.encode())
def img_to_buf(img, img_type, jpeg_quality=75):
"""
Returns a buffer array with image binary data for the input image.
This code is based on logic implemented in MapProxy to convert PNG
images to JPEG then return the buffer.
Inputs:
img -- an image on the filesystem to be converted to binary
img_type -- the MIME type of the image (JPG, PNG)
"""
defaults = {}
buf = ioBuffer()
if img_type == 'jpeg':
img.convert('RGB')
# Hardcoding a default compression of 75% for JPEGs
defaults['quality'] = jpeg_quality
elif img_type == 'source':
img_type = img.format
img.save(buf, img_type, **defaults)
buf.seek(0)
return buf
def img_has_transparency(img):
"""
Returns a 0 if the input image has no transparency, 1 if it has some,
and -1 if the image is fully transparent. Tiles *should be a perfect
square (e.g, 256x256), so it can be safe to assume the first dimension
will match the second. This will ensure compatibility with different
tile sizes other than 256x256. This code is based on logic implemented
in MapProxy to check for images that have transparency.
Inputs:
img -- an Image object from the PIL library
"""
size = img.size[0]
if img.mode == 'P':
# For paletted images
if img.info.get('transparency', False):
return True
# Convert to RGBA to check alpha
img = img.convert('RGBA')
if img.mode == 'RGBA':
# Returns the number of pixels in this image that are transparent
# Assuming a tile size of 256, 65536 would be fully transparent
transparent_pixels = img.histogram()[-size]
if transparent_pixels == 0:
# No transparency
return 0
elif 0 < transparent_pixels < (size * size):
# Image has some transparency
return 1
else:
# Image is fully transparent, and can be discarded
return -1
# return img.histogram()[-size]
return False
def file_count(base_dir):
"""
A function that finds all image tiles in a base directory. The base
directory should be arranged in TMS format, i.e. z/x/y.
Inputs:
base_dir -- the name of the TMS folder containing tiles.
Returns:
A list of dictionary objects containing the full file path and TMS
coordinates of the image tile.
"""
print("Calculating number of tiles, this could take a while...")
file_list = []
# Avoiding dots (functional references) will increase performance of
# the loop because they will not be reevaluated each iteration.
for root, sub_folders, files in walk(base_dir):
temp_list = [join(root, f) for f in files if f.endswith(IMAGE_TYPES)]
file_list += temp_list
print("Found {} total tiles.".format(len(file_list)))
return [split_all(item) for item in file_list]
def split_all(path):
"""
Function that parses TMS coordinates from a full images file path.
Inputs:
path -- a full file path to an image tile.
Returns:
A dictionary containing the TMS coordinates of the tile and its full
file path.
"""
parts = []
full_path = path
# Parse out the tms coordinates
# for i in xrange(3):
# head, tail = split(path)
# parts.append(tail)
# path = head
# file_dict = dict(y=int(parts[0].split('.')[0]),
# x=int(parts[1]),
# z=int(parts[2]),
# path=full_path)
head, tail = split(path)
filename = tail.split('.')[0]
x, y = filename.split('_', 1)
z = split(head)[1]
print("x=%s, y=%s, z=%s"%(x, y, z))
file_dict = dict(y=int(y),
x=int(x),
z=int(z),
path=path)
return file_dict
def worker_map(temp_db, tile_dict, extra_args, invert_y):
"""
Function responsible for sending the correct oriented tile data to a
temporary sqlite3 database.
Inputs:
temp_db -- a temporary sqlite3 database that will hold this worker's tiles
tile_dict -- a dictionary with TMS coordinates and file path for a tile
tile_info -- a list of ZoomMetadata objects pre-generated for this tile set
imagery -- the type of image format to send to the sqlite3 database
invert_y -- a function that will flip the Y axis of the tile if present
"""
tile_info = extra_args['tile_info']
imagery = extra_args['imagery']
jpeg_quality = extra_args['jpeg_quality']
zoom = tile_dict['z']
if extra_args['renumber']:
zoom -= 1
level = next((item for item in tile_info if item.zoom == int(zoom)), None)
# fiddle with offsets based on absolute (NSG profile) vs relative row/column numbering
x_row = tile_dict['x'] if extra_args['nsg_profile'] else tile_dict['x'] - level.min_tile_row
if invert_y is not None:
y_column = invert_y(zoom, tile_dict['y'])
if not extra_args['nsg_profile']:
y_offset = invert_y(zoom, level.max_tile_col)
y_column -= y_offset
else:
y_column = tile_dict['y'] if extra_args['nsg_profile'] else tile_dict['y'] - level.min_tile_col
if IOPEN is not None:
print("IOPEN temp_db.insert_image_blob(zoom=%s, x_row=%s, y_column=%s"%(zoom, x_row, y_column))
img = IOPEN(tile_dict['path'], 'r')
data = ioBuffer()
# TODO add options for "mvt" and "GeoJson"
if imagery == 'mixed':
if img_has_transparency(img):
data = img_to_buf(img, 'png', jpeg_quality).read()
else:
data = img_to_buf(img, 'jpeg', jpeg_quality).read()
else:
data = img_to_buf(img, imagery, jpeg_quality).read()
temp_db.insert_image_blob(zoom, x_row, y_column, sbinary(data))
else:
print("NONE IOPEN temp_db.insert_image_blob(zoom=%s, x_row=%s, y_column=%s"%(zoom, x_row, y_column))
file_handle = open(tile_dict['path'], 'rb')
data = buffer(file_handle.read())
temp_db.insert_image_blob(zoom, x_row, y_column, data)
file_handle.close()
def sqlite_worker(file_list, extra_args):
"""
Worker function called by asynchronous processes. This function
iterates through a set of tiles to process them into a TempDB object.
Inputs:
file_list -- an array containing a subset of tiles that will be processed
by this function into a TempDB object
base_dir -- the directory in which the geopackage will be created,
.gpkg.part files will be generated here
metadata -- a ZoomLevelMetadata object containing information about
the tiles in the TMS directory
"""
# TODO create the tempDB by adding the table name and telling which type (tiles/vectortiles)
temp_db = TempDB(extra_args['root_dir'], extra_args['table_name'])
with TempDB(extra_args['root_dir'], extra_args['table_name']) as temp_db:
invert_y = None
if extra_args['lower_left']:
if extra_args['srs'] == 3857:
invert_y = Mercator.invert_y
elif extra_args['srs'] == 4326:
if extra_args['nsg_profile']:
invert_y = GeodeticNSG.invert_y
else:
invert_y = Geodetic.invert_y
elif extra_args['srs'] == 3395:
invert_y = EllipsoidalMercator.invert_y
elif extra_args['srs'] == 9804:
invert_y = ScaledWorldMercator.invert_y
#TODO update for retile
[worker_map(temp_db, item, extra_args, invert_y) for item in file_list]
def allocate(cores, pool, file_list, extra_args):
"""
Recursive function that fairly distributes tiles to asynchronous worker
processes. For N processes and C cores, N=C if C is divisible by 2. If
not, then N is the largest factor of 8 that is still less than C.
"""
if cores is 1:
print("Spawning worker with {} files".format(len(file_list)))
return [pool.apply_async(sqlite_worker, [file_list, extra_args])]
else:
files = len(file_list)
head = allocate(
int(cores / 2), pool, file_list[:int(files / 2)], extra_args)
tail = allocate(
| |
"""High-level sound and video player."""
from __future__ import print_function
from __future__ import division
from builtins import object
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
import pyglet
from pyglet.media.drivers import get_audio_driver, get_silent_audio_driver
from pyglet.media.events import MediaEvent
from pyglet.media.exceptions import MediaException
from pyglet.media.sources.base import SourceGroup, StaticSource
_debug = pyglet.options['debug_media']
class Player(pyglet.event.EventDispatcher):
"""High-level sound and video player.
"""
_last_video_timestamp = None
_texture = None
# Spacialisation attributes, preserved between audio players
_volume = 1.0
_min_distance = 1.0
_max_distance = 100000000.
_position = (0, 0, 0)
_pitch = 1.0
_cone_orientation = (0, 0, 1)
_cone_inner_angle = 360.
_cone_outer_angle = 360.
_cone_outer_gain = 1.
def __init__(self):
# List of queued source groups
self._groups = []
self._audio_player = None
# Desired play state (not an indication of actual state).
self._playing = False
self._paused_time = 0.0
def __del__(self):
"""Release the Player resources."""
self.delete()
def queue(self, source):
"""
Queue the source on this player.
If the player has no source, the player will be paused immediately on this source.
:param pyglet.media.Source source: The source to queue.
"""
if isinstance(source, SourceGroup):
self._groups.append(source)
else:
if (self._groups and
source.audio_format == self._groups[-1].audio_format and
source.video_format == self._groups[-1].video_format):
self._groups[-1].queue(source)
else:
group = SourceGroup(source.audio_format, source.video_format)
group.queue(source)
self._groups.append(group)
self._set_playing(self._playing)
def _set_playing(self, playing):
#stopping = self._playing and not playing
#starting = not self._playing and playing
self._playing = playing
source = self.source
if playing and source:
if not self._audio_player:
self._create_audio_player()
self._audio_player.play()
if source.video_format:
if not self._texture:
self._create_texture()
if self.source.video_format.frame_rate:
period = 1. / self.source.video_format.frame_rate
else:
period = 1. / 30.
pyglet.clock.schedule_interval(self.update_texture, period)
else:
if self._audio_player:
self._audio_player.stop()
pyglet.clock.unschedule(self.update_texture)
def _get_playing(self):
"""
Read-only. Determine if the player state is playing.
The *playing* property is irrespective of whether or not there is
actually a source to play. If *playing* is ``True`` and a source is
queued, it will begin playing immediately. If *playing* is ``False``,
it is implied that the player is paused. There is no other possible
state.
"""
return self._playing
playing = property(_get_playing)
def play(self):
"""
Begin playing the current source.
This has no effect if the player is already playing.
"""
self._set_playing(True)
def pause(self):
"""
Pause playback of the current source.
This has no effect if the player is already paused.
"""
self._set_playing(False)
if self._audio_player:
time = self._audio_player.get_time()
time = self._groups[0].translate_timestamp(time)
if time is not None:
self._paused_time = time
def delete(self):
"""Tear down the player and any child objects."""
if self._audio_player:
self._audio_player.delete()
self._audio_player = None
while self._groups:
del self._groups[0]
def next_source(self):
"""
Move immediately to the next queued source.
There may be a gap in playback while the audio buffer is refilled.
"""
if not self._groups:
return
group = self._groups[0]
if group.has_next():
group.next_source()
return
if self.source.video_format:
self._texture = None
pyglet.clock.unschedule(self.update_texture)
if self._audio_player:
self._audio_player.delete()
self._audio_player = None
del self._groups[0]
if self._groups:
self._set_playing(self._playing)
return
self._set_playing(False)
self.dispatch_event('on_player_eos')
#: :deprecated: Use `next_source` instead.
next = next_source # old API, worked badly with 2to3
def seek(self, time):
"""
Seek for playback to the indicated timestamp in seconds on the current
source. If the timestamp is outside the duration of the source, it
will be clamped to the end.
"""
if not self.source:
return
if _debug:
print('Player.seek(%r)' % time)
self._paused_time = time
self.source.seek(time)
if self._audio_player:
# XXX: According to docstring in AbstractAudioPlayer this cannot be called when the
# player is not stopped
self._audio_player.clear()
if self.source.video_format:
self._last_video_timestamp = None
self.update_texture(time=time)
def _create_audio_player(self):
assert not self._audio_player
assert self._groups
group = self._groups[0]
audio_format = group.audio_format
if audio_format:
audio_driver = get_audio_driver()
else:
audio_driver = get_silent_audio_driver()
self._audio_player = audio_driver.create_audio_player(group, self)
_class = self.__class__
def _set(name):
private_name = '_' + name
value = getattr(self, private_name)
if value != getattr(_class, private_name):
getattr(self._audio_player, 'set_' + name)(value)
_set('volume')
_set('min_distance')
_set('max_distance')
_set('position')
_set('pitch')
_set('cone_orientation')
_set('cone_inner_angle')
_set('cone_outer_angle')
_set('cone_outer_gain')
def _get_source(self):
"""Read-only. The current :py:class:`Source`, or ``None``."""
if not self._groups:
return None
return self._groups[0].get_current_source()
source = property(_get_source)
def _get_time(self):
"""
Read-only. Current playback time of the current source.
The playback time is a float expressed in seconds, with 0.0 being the
beginning of the sound. The playback time returned represents the time
encoded in the source, and may not reflect actual time passed due to
pitch shifting or pausing.
"""
time = None
if self._playing and self._audio_player:
time = self._audio_player.get_time()
time = self._groups[0].translate_timestamp(time)
if time is None:
return self._paused_time
else:
return time
time = property(_get_time)
def _create_texture(self):
video_format = self.source.video_format
self._texture = pyglet.image.Texture.create(
video_format.width, video_format.height, rectangle=True)
self._texture = self._texture.get_transform(flip_y=True)
self._texture.anchor_y = 0
def get_texture(self):
"""
Get the texture for the current video frame.
You should call this method every time you display a frame of video,
as multiple textures might be used. The return value will be None if
there is no video in the current source.
:return: :py:class:`pyglet.image.Texture`
"""
return self._texture
def seek_next_frame(self):
"""Step forwards one video frame in the current Source.
"""
time = self._groups[0].get_next_video_timestamp()
if time is None:
return
self.seek(time)
def update_texture(self, dt=None, time=None):
"""Manually update the texture from the current source. This happens
automatically, so you shouldn't need to call this method.
"""
if time is None:
time = self._audio_player.get_time()
if time is None:
return
if (self._last_video_timestamp is not None and
time <= self._last_video_timestamp):
return
ts = self._groups[0].get_next_video_timestamp()
while ts is not None and ts < time:
self._groups[0].get_next_video_frame() # Discard frame
ts = self._groups[0].get_next_video_timestamp()
if ts is None:
self._last_video_timestamp = None
return
image = self._groups[0].get_next_video_frame()
if image is not None:
if self._texture is None:
self._create_texture()
self._texture.blit_into(image, 0, 0, 0)
self._last_video_timestamp = ts
def _player_property(name, doc=None):
private_name = '_' + name
set_name = 'set_' + name
def _player_property_set(self, value):
setattr(self, private_name, value)
if self._audio_player:
getattr(self._audio_player, set_name)(value)
def _player_property_get(self):
return getattr(self, private_name)
return property(_player_property_get, _player_property_set, doc=doc)
volume = _player_property('volume', doc="""
The volume level of sound playback.
The nominal level is 1.0, and 0.0 is silence.
The volume level is affected by the distance from the listener (if
positioned).
""")
min_distance = _player_property('min_distance', doc="""
The distance beyond which the sound volume drops by half, and within
which no attenuation is applied.
The minimum distance controls how quickly a sound is attenuated as it
moves away from the listener. The gain is clamped at the nominal value
within the min distance. By default the value is 1.0.
The unit defaults to meters, but can be modified with the listener properties.
""")
max_distance = _player_property('max_distance', doc="""
The distance at which no further attenuation is applied.
When the distance from the listener to the player is greater than this
value, attenuation is calculated as if the distance were value. By
default the maximum distance is infinity.
The unit defaults to meters, but can be modified with the listener
properties.
""")
position = _player_property('position', doc="""
The position of the sound | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pylint: disable=method-hidden,C0103,E265,E303,R0914,W0621,W503
"""Module describing the weighted non-linear optimization scheme used to
determine the wavelength sensitivity of the spectrometer using a polynomial
as a model function"""
import os
import sys
import math
import logging
from datetime import datetime
import numpy as np
import scipy.optimize as opt
import matplotlib.pyplot as plt
import compute_series_para
import boltzmann_popln as bp
# ------------------------------------------------------
# ------------------------------------------------------
# RUN PARAMETERS (CHANGE THESE BEFORE RUNNING
# OPTIMIZATION
# ------------------------------------------------------
# LOAD EXPERIMENTAL BAND AREA DATA
# | band area | error |
# | value | value |
# | value | value |
# | value | value |
# without header in the following files
# Change following paths to load expt data
#xaxis = np.loadtxt("Ramanshift_axis")
# Q(J) band intensities --------------------------------
dataD2Q = np.loadtxt("BA_D2_q1.txt")
dataHDQ = np.loadtxt("BA_HD_q1.txt")
dataH2Q = np.loadtxt("BA_H2_q1.txt")
dataD2_Q2 = np.loadtxt("D2_Q2_testdata")
dataD2Q4 = np.loadtxt("BA_D2_q1_J4.txt")
dataD2OS = np.loadtxt("D2_model_O2S0")
# ------------------------------------------------------
# PARALLEL POLARIZATION
# set indices for OJ,QJ and SJ for H2, HD and D2 in the residual functions below
# ------------------------------------------------------
# ----------------------------------------
# norm type
# Do not change the variable name on the LHS
# available norm types : Frobenius, Frobenius_sq, absolute
# lower case : frobenius, frobenius_sq, absolute
# or abbreviations: F , FS , A
norm = 'Frobenius'
# if norm is not set then the default is sum of absolute values
# See readme for more details
# ----------------------------------------
print('Dimension of input data of Q bands')
print('\t', dataH2Q.shape)
print('\t', dataHDQ.shape)
print('\t', dataD2Q.shape)
print('\t', dataD2_Q2.shape)
print('\t', dataD2Q4.shape)
print('\t', dataD2OS.shape)
# ------------------------------------------------------
# SET INIT COEFS
temp_init = np.zeros((1))
temp_init[0] = 296
# initial run will be with above parameters
# ------------------------------------------------
# ------------------------------------------------------
print('\t**********************************************************')
print('\t ')
print('\t This module is for determining the temperature from ')
print('\t observed vibration-rotation Raman intensities of H2, HD and D2. ')
print('\t This module is useful for testing the accuracy of the intensity ')
print('\t calibration procedure. ')
print('\n\t >> Ratios of all observed Raman intensities are treated here as a matrix. << ')
print('\n\t >> This function deals with parallel polarized intensities. << ')
print('\n\t >> Temperature is the only fit parameter here << ')
print('\n\t This modeule requires edit on line 32 to 74 to ')
print('\n\t load and set parameters for the analysis.')
print('\t ')
print('\t**********************************************************')
print('\n\t\t Checking imported data and set params')
data_error=0
if isinstance(dataH2Q, np.ndarray):
print("\t\t ", "dataH2Q found, OK")
else:
print("\t\t ", "dataH2Q not found.")
data_error=1
if isinstance(dataHDQ, np.ndarray):
print("\t\t ", "dataHDQ found, OK")
else:
print("\t\t ", "dataHDQ not found.")
data_error=1
if isinstance(dataD2Q, np.ndarray):
print("\t\t ", "dataD2Q found, OK")
else:
print("\t\t ", "dataD2Q not found.")
data_error=1
print('\n\t\t Analysis parameters:')
print("\t\t Norm (defn of residual): ", norm)
print('\t**********************************************************')
print('\n\t REQUIRED DATA')
print('\t\t\t Ramanshift = vector, the x-axis in relative wavenumbers')
print('\t\t\t band area and error = 2D (2 columns), for H2, HD and D2')
print('\n\t\t\t J_max = scalar, for H2, HD and D2 (to compute reference')
print('\t\t\t\t spectra), See residual functions')
print('\t**********************************************************')
print('\n\t\t\t Example:')
print('\t\t\t run_fit_D2_O2S0 (298 )')
print('\t**********************************************************')
# ------------------------------------------------------
# ------------------------------------------------------
# *******************************************************************
# Set logging ------------------------------------------
fileh = logging.FileHandler('./log_temperature_determination', 'w+')
formatter = logging.Formatter('%(message)s')
fileh.setFormatter(formatter)
log = logging.getLogger() # root logger
for hdlr in log.handlers[:]: # remove all old handlers
log.removeHandler(hdlr)
log.addHandler(fileh) # set the new handler
# ------------------------------------------------------
# Logging starts here
logger = logging.getLogger(os.path.basename(__file__))
log.info(logger)
logging.getLogger().setLevel(logging.INFO)
log.warning(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
log.warning('\n',)
log.error("------------ Run log ------------\n")
log.error("---Temperature determination from Raman intensities---\n")
log.error("---Parallel polarization---\n")
# ------------------------------------------------------
# ------------------------------------------------
# COMMON FUNCTIONS
# ------------------------------------------------
# *******************************************************************
def gen_intensity_mat(arr, index):
"""To obtain the intensity matrix for the numerator or denominator\
in the Intensity ratio matrix
array = 2D array of data where index column contains the intensity
data
index = corresponding to the column which has intensity data
returns => square matrix of intensity ratio : { I(v1)/I(v2) } """
spec1D = arr[:, index]
spec_mat = np.zeros((spec1D.shape[0], spec1D.shape[0]))
for i in range(spec1D.shape[0]):
spec_mat[:, i] = spec1D / spec1D[i]
return spec_mat
# ------------------------------------------------
def clean_mat(square_array):
"""Set the upper triangular portion of square matrix to zero
including the diagonal
input = any square array """
np.fill_diagonal(square_array, 0)
return np.tril(square_array, k=0)
# ------------------------------------------------
def gen_weight(expt_data):
"""To generate the weight matrix from the experimental data 2D array
expt_data = 2D array of expt data where
0 index column is the band area
and
1 index column is the error
"""
error_mat = np.zeros((expt_data.shape[0], expt_data.shape[0]))
for i in range(expt_data.shape[0]):
for j in range(expt_data.shape[0]):
error_mat[i, j] = (expt_data[i, 0] / expt_data[j, 0]) \
* math.sqrt((expt_data[i, 1] / expt_data[i, 0])**2
+ (expt_data[j, 1] / expt_data[j, 0])**2)
# return factor * inverse_square(error_mat)
return inverse(error_mat)
# ------------------------------------------------
def inverse_square(array):
"""return the inverse square of array, for all elements"""
return 1 / (array**2)
# ------------------------------------------------
def inverse(array):
"""return the inverse square of array, for all elements"""
return 1 / (array)
# ------------------------------------------------
# *******************************************************************
# RESIDUAL FUNCTIONS DEFINED BELOW
# These functions will require edit based on the name of the numpy
# array containing the experimental data.
# Also, the J-index for the rotational level also requires edit
# depending on the length of the data ( number of states included).
# *******************************************************************
def residual_Q_D2(param):
'''Function which computes the residual (as sum of squares) comparing the
ratio of expt with the corresponding calculated ratios. The calculated
ratios are computed for given T.
Param : T
'''
TK = param
sosD2 = bp.sumofstate_D2(TK)
QJ_D2 = 2 # max J value of analyzed Q-bands (change this value depending
# on the experimental data
# compute_series_para.D2_Q1(temperature, J-level, sumofstates)
computed_D2 = compute_series_para.D2_Q1(TK, QJ_D2, sosD2)
# ------ D2 ------
trueR_D2 = gen_intensity_mat(computed_D2, 2)
# experimental data is used in the following two lines
# modify these lines as required (make sure to edit the
# JMax level defined above as well)
expt_D2 = gen_intensity_mat(dataD2_Q2, 0)
errD2_output = gen_weight(dataD2_Q2)
#print(computed_D2.shape, dataD2Q.shape)
errorP = errD2_output
calc_D2 = clean_mat(trueR_D2)
expt_D2 = clean_mat(expt_D2)
errorP = clean_mat(errorP)
# ----------------
diffD2 = expt_D2 - calc_D2
# scale by weights
#diffD2 = (np.multiply(errorP , diffD2))
# remove redundant terms
diffD2 = clean_mat(diffD2)
#np.savetxt("diff_D2", diffD2,fmt='%2.4f')
# choosing norm ----------
if norm=='' or norm.lower()=='absolute' or norm =='a' or norm =='A':
E=np.sum(np.abs(diffD2))
elif norm.lower()=='frobenius' or norm =='F' :
E=np.sqrt(np.sum(np.square(diffD2)))
elif norm.lower()=='frobenius_square' or norm =='FS' :
E=np.sum(np.square(diffD2))
# -------------------------
return E
# *******************************************************************
def residual_Q_D2_234(param):
'''Function which computes the residual (as sum of squares) comparing the
ratio of expt with the corresponding calculated ratios. The calculated
ratios are computed for given T.
Param : T
'''
TK = param
sosD2 = bp.sumofstate_D2(TK)
QJ_D2 = 4 # max J value of analyzed Q-bands (change this value depending
# on the experimental data
computed_D2 = compute_series_para.D2_Q1(TK, QJ_D2, sosD2)
# ------ D2 ------
#print(computed_D2)
computed_D2=computed_D2[:-2, :]
#print(computed_D2)
# experimental data is used in the following two lines
# modify these lines as required (make sure to edit the
# JMax level defined above as well)
dataD2Q = dataD2Q4[:-2, :] # subset of datapoints included here
#print(computed_D2.shape, dataD2Q.shape)
trueR_D2 = gen_intensity_mat(computed_D2, 2)
expt_D2 = gen_intensity_mat(dataD2Q, 0)
errD2_output = gen_weight(dataD2Q)
errorP = errD2_output
#np.savetxt("exptD2",clean_mat(expt_D2),fmt='%2.4f')
#np.savetxt("errD2",clean_mat(errorP),fmt='%2.4f')
calc_D2 = clean_mat(trueR_D2)
expt_D2 = clean_mat(expt_D2)
errorP = clean_mat(errorP)
# ----------------
diffD2 = expt_D2 - calc_D2
# scale by weights
#diffD2 = (np.multiply(errorP , diffD2))
# remove redundant terms
diffD2 = clean_mat(diffD2)
#np.savetxt("diff_D2", diffD2,fmt='%2.4f')
# choosing norm ----------
if norm=='' or norm.lower()=='absolute' or norm =='a' or norm =='A':
E=np.sum(np.abs(diffD2))
elif norm.lower()=='frobenius' or norm =='F' :
E=np.sqrt(np.sum(np.square(diffD2)))
elif norm.lower()=='frobenius_square' or norm =='FS' :
E=np.sum(np.square(diffD2))
# -------------------------
return E
# *******************************************************************
def residual_Q_HD(param):
'''Function which computes the residual (as sum of squares) comparing the
ratio of expt with the corresponding calculated ratios. The calculated
ratios are computed for given T.
Param : T
'''
TK = param
sosHD = bp.sumofstate_HD(TK)
QJ_HD = 3 # max J value of analyzed Q-bands (change this value depending
# on the experimental data
computed_HD = compute_series_para.HD_Q1(TK, QJ_HD, sosHD)
# ------ HD ------
trueR_HD = gen_intensity_mat(computed_HD, 2)
# experimental data is used in the following two lines
# modify these lines as required (make sure to edit the
# JMax level defined above as well)
expt_HD = gen_intensity_mat(dataHDQ, 0)
errHD_output = gen_weight(dataHDQ)
errorP = errHD_output
# errorP = 1/(np.divide( errHD_output, expt_HD))
calc_HD = clean_mat(trueR_HD)
expt_HD = clean_mat(expt_HD)
errorP = clean_mat(errorP)
# ----------------
diffHD = expt_HD - calc_HD
# scale by weights
# diffHD = (np.multiply(errorP , diffHD))
# remove redundant terms
diffHD = clean_mat(diffHD)
# choosing norm ----------
if norm=='' or norm.lower()=='absolute' or norm =='a' or norm =='A':
E=np.sum(np.abs(diffHD))
elif norm.lower()=='frobenius' | |
""" Contains basic Batch classes """
import os
import traceback
import threading
import dill
try:
import blosc
except ImportError:
pass
import numpy as np
try:
import pandas as pd
except ImportError:
pass
try:
import feather
except ImportError:
pass
try:
import dask.dataframe as dd
except ImportError:
pass
from .dsindex import DatasetIndex, FilesIndex
from .decorators import action, inbatch_parallel, any_action_failed
from .dataset import Dataset
from .batch_base import BaseBatch
from .components import MetaComponentsTuple
class Batch(BaseBatch):
""" The core Batch class """
_item_class = None
components = None
def __init__(self, index, preloaded=None, *args, **kwargs):
if self.components is not None and not isinstance(self.components, tuple):
raise TypeError("components should be a tuple of strings with components names")
super().__init__(index, *args, **kwargs)
self._preloaded_lock = threading.Lock()
self._preloaded = preloaded
self._local = None
self._pipeline = None
@property
def pipeline(self):
""": Pipeline - a pipeline the batch is being used in """
if self._local is not None and hasattr(self._local, 'pipeline'):
return self._local.pipeline
else:
return self._pipeline
@pipeline.setter
def pipeline(self, val):
""" Store pipeline in a thread-local storage """
if val is None:
self._local = None
else:
if self._local is None:
self._local = threading.local()
self._local.pipeline = val
self._pipeline = val
def deepcopy(self):
""" Return a deep copy of the batch.
Constructs a new ``Batch`` instance and then recursively copies all
the objects found in the original batch, except the ``pipeline``,
which remains unchanged.
Returns
-------
Batch
"""
pipeline = self.pipeline
self.pipeline = None
dump_batch = dill.dumps(self)
self.pipeline = pipeline
restored_batch = dill.loads(dump_batch)
restored_batch.pipeline = pipeline
return restored_batch
@classmethod
def from_data(cls, index, data):
""" Create batch from a given dataset """
# this is roughly equivalent to self.data = data
if index is None:
index = np.arange(len(data))
return cls(index, preloaded=data)
@classmethod
def from_batch(cls, batch):
""" Create batch from another batch """
return cls(batch.index, preloaded=batch._data) # pylint: disable=protected-access
@classmethod
def merge(cls, batches, batch_size=None):
""" Merge several batches to form a new batch of a given size
Parameters
----------
batches : tuple of batches
batch_size : int or None
if `None`, just merge all batches into one batch (the rest will be `None`),
if `int`, then make one batch of `batch_size` and a batch with the rest of data.
Returns
-------
batch, rest : tuple of two batches
"""
def _make_index(data):
return DatasetIndex(np.arange(data.shape[0])) if data is not None and data.shape[0] > 0 else None
def _make_batch(data):
index = _make_index(data[0])
return cls(index, preloaded=tuple(data)) if index is not None else None
if batch_size is None:
break_point = len(batches)
last_batch_len = len(batches[-1])
else:
break_point = -1
last_batch_len = 0
cur_size = 0
for i, b in enumerate(batches):
cur_batch_len = len(b)
if cur_size + cur_batch_len >= batch_size:
break_point = i
last_batch_len = batch_size - cur_size
break
else:
cur_size += cur_batch_len
last_batch_len = cur_batch_len
components = batches[0].components or (None,)
new_data = list(None for _ in components)
rest_data = list(None for _ in components)
for i, comp in enumerate(components):
if batch_size is None:
new_comp = [b.get(component=comp) for b in batches[:break_point]]
else:
b = batches[break_point]
last_batch_len_ = b.get_pos(None, comp, b.indices[last_batch_len - 1])
new_comp = [b.get(component=comp) for b in batches[:break_point]] + \
[batches[break_point].get(component=comp)[:last_batch_len_ + 1]]
new_data[i] = cls.merge_component(comp, new_comp)
if batch_size is not None:
rest_comp = [batches[break_point].get(component=comp)[last_batch_len_ + 1:]] + \
[b.get(component=comp) for b in batches[break_point + 1:]]
rest_data[i] = cls.merge_component(comp, rest_comp)
new_batch = _make_batch(new_data)
rest_batch = _make_batch(rest_data)
return new_batch, rest_batch
@classmethod
def merge_component(cls, component=None, data=None):
""" Merge the same component data from several batches """
_ = component
if isinstance(data[0], np.ndarray):
return np.concatenate(data)
else:
raise TypeError("Unknown data type", type(data[0]))
def as_dataset(self, dataset=None):
""" Makes a new dataset from batch data
Parameters
----------
dataset: could be a dataset or a Dataset class
Returns
-------
an instance of a class specified by `dataset` arg, preloaded with this batch data
"""
if dataset is None:
dataset_class = Dataset
elif isinstance(dataset, Dataset):
dataset_class = dataset.__class__
elif isinstance(dataset, type):
dataset_class = dataset
else:
raise TypeError("dataset should be some Dataset class or an instance of some Dataset class or None")
return dataset_class(self.index, batch_class=type(self), preloaded=self.data)
@property
def indices(self):
""": numpy array - an array with the indices """
if isinstance(self.index, DatasetIndex):
return self.index.indices
return self.index
def __len__(self):
return len(self.index)
@property
def size(self):
""": int - number of items in the batch """
return len(self.index)
@property
def data(self):
""": tuple or named components - batch data """
if self._data is None and self._preloaded is not None:
# load data the first time it's requested
with self._preloaded_lock:
if self._data is None and self._preloaded is not None:
self.load(src=self._preloaded)
res = self._data if self.components is None else self._data_named
return res if res is not None else self._empty_data
def make_item_class(self, local=False):
""" Create a class to handle data components """
# pylint: disable=protected-access
if self.components is None:
type(self)._item_class = None
elif type(self)._item_class is None or not local:
comp_class = MetaComponentsTuple(type(self).__name__ + 'Components', components=self.components)
type(self)._item_class = comp_class
else:
comp_class = MetaComponentsTuple(type(self).__name__ + 'Components' + str(id(self)),
components=self.components)
self._item_class = comp_class
@action
def add_components(self, components, init=None):
""" Add new components
Parameters
----------
components : str or list
new component names
init : array-like
initial component data
"""
if isinstance(components, str):
components = (components,)
init = (init,)
elif isinstance(components, list):
components = tuple(components)
data = self._data
if self.components is None:
self.components = components
data = tuple()
else:
self.components = self.components + components
data = data + tuple(init)
self.make_item_class(local=True)
self._data = data
return self
def __getstate__(self):
state = self.__dict__.copy()
state.pop('_data_named')
return state
def __setstate__(self, state):
for k, v in state.items():
# this warrants that all hidden objects are reconstructed upon unpickling
setattr(self, k, v)
@property
def _empty_data(self):
return None if self.components is None else self._item_class() # pylint: disable=not-callable
def get_pos(self, data, component, index):
""" Return a position in data for a given index
Parameters
----------
data : some array or tuple of arrays
if `None`, should return a position in :attr:`self.data <.Batch.data>`
components : None, int or str
- None - data has no components (e.g. just an array or pandas.DataFrame)
- int - a position of a data component, when components names are not defined
(e.g. data is a tuple)
- str - a name of a data component
index : any
an index id
Returns
-------
int
a position in a batch data where an item with a given index is stored
Notes
-----
It is used to read / write data from / to a given component::
batch_data = data.component[pos]
data.component[pos] = new_data
if `self.data` holds a numpy array, then get_pos(None, None, index) should
just return `self.index.get_pos(index)`
if `self.data.images` contains BATCH_SIZE images as a numpy array,
then `get_pos(None, 'images', index)` should return `self.index.get_pos(index)`
if `self.data.labels` is a dict {index: label}, then `get_pos(None, 'labels', index)` should return index.
if `data` is not `None`, then you need to know in advance how to get a position for a given index.
For instance, `data` is a large numpy array, and a batch is a subset of this array and
`batch.index` holds row numbers from a large arrays.
Thus, `get_pos(data, None, index)` should just return index.
A more complicated example of data:
- batch represent small crops of large images
- `self.data.source` holds a few large images (e.g just 5 items)
- `self.data.coords` holds coordinates for crops (e.g. 100 items)
- `self.data.image_no` holds an array of image numbers for each crop (so it also contains 100 items)
then `get_pos(None, 'source', index)` should return `self.data.image_no[self.index.get_pos(index)]`.
Whilst, `get_pos(data, 'source', index)` should return `data.image_no[index]`.
"""
_ = component
if data is None:
pos = self.index.get_pos(index)
else:
pos = index
return pos
def __getattr__(self, name):
if self.components is not None and name in self.components: # pylint: disable=unsupported-membership-test
attr = getattr(self.data, name)
return attr
else:
raise AttributeError("%s not found in class %s" % (name, self.__class__.__name__))
def __setattr__(self, name, value):
if self.components is not None:
if name == "_data":
super().__setattr__(name, value)
if self._item_class is None:
self.make_item_class()
self._data_named = self._item_class(data=self._data) # pylint: disable=not-callable
elif name in self.components: # pylint: disable=unsupported-membership-test
if self._data_named is None:
_ = self.data
setattr(self._data_named, name, value)
super().__setattr__('_data', self._data_named.data)
else:
super().__setattr__(name, value)
else:
super().__setattr__(name, value)
def | |
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import numpy as np
from jams.const import eps
def zacharias(h, clay, sand, db, params=None, thetar=False, thetas=False, lnalpha=False, n=False):
"""
Soil water content with the van Genuchten equation and
the pedotransfer functions of Zacharias et al. (2007).
Definition
----------
def zacharias(h, clay, sand, db, params=None, thetar=False, thetas=False, lnalpha=False, n=False):
Input
-----
h pressure head, scalar or array [cm], 0=saturation, 15000=wilting point
clay clay content, scalar or array [%, i.e. 0-100]
sand sand content, scalar or array [%, i.e. 0-100]
db bulk density, scalar or array [g/cm3], quartz=2.65
Optional Input
--------------
params Parameter for Zacharias et al. (2007) pedotransfer functions
If None, values from Zacharias et al. will be taken that are different
between sandy and non-sandy soil (<66.5% sand)
Options
-------
thetar If True, outputs residual water content thetar as well [m3 m-3]
thetas If True, outputs saturation water content thetas as well [m3 m-3]
lnalpha If True, outpus logarithm of shape parameter alpha as well [1/cm]
n If True, output exponent n as well
Output
------
Soil water content theta [m^3 m^-3]
Restrictions
------------
Does not check the validity of the parameter set, i.e. negative soil moistures
can occur, for example.
Use zacharias_check to check the parameter set first.
Examples
--------
>>> h = np.array([0.0000000, 0.0000000, 10.000000, 31.622777,
... 100.00000, 199.52623, 199.52623,
... 501.18723, 2511.8864, 15848.932])
>>> sand = np.array([12.800000, 61.600000, 17.200000, 85.800000,
... 16.500000, 12.800000, 61.600000,
... 17.200000, 85.800000, 16.500000])
>>> clay = np.array([30.500000, 17.200000, 25.500000, 8.9000000,
... 28.100000, 30.500000, 17.200000,
... 25.500000, 8.9000000, 28.100000])
>>> rho = np.array([1.2100000, 1.3400000, 1.4600000, 1.6300000,
... 1.3000000, 1.2100000, 1.3400000,
... 1.4600000, 1.6300000, 1.3000000])
>>> from autostring import astr
>>> print(astr(zacharias(h, clay, sand, rho),3,pp=True))
['0.500' '0.453' '0.421' '0.245' '0.393' '0.381' '0.285' '0.313' '0.039' '0.221']
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2012-2016 <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Jun 2012
Modified, MC, Feb 2013 - ported to Python 3
MC, Nov 2016 - const.tiny -> const.eps
"""
#
# Check input
ih = np.where(h==0., eps, h)
if np.any(ih < 0.) | np.any(ih > 1e6):
raise ValueError('h must be >=0 and <= 1e6 (=pf6)')
iclay = np.where(clay==0., eps, clay)
if np.any(iclay < 0.) | np.any(iclay > 100.):
raise ValueError('clay must be >=0 and <= 100.')
isand = np.where(sand==0., eps, sand)
if np.any(isand < 0.) | np.any(isand > 100.):
raise ValueError('sand must be >=0 and <= 100.')
idb = np.array(db)
if np.any(idb < 0.) | np.any(idb > 2.65):
raise ValueError('db must be >=0 and <= 2.65.')
nn = np.size(isand)
if (np.size(iclay) != nn) | (np.size(idb) != nn) | (np.size(ih) != nn):
raise ValueError('h, sand, clay, and db must have the same sizes.')
if params is not None:
if np.size(params) != 15:
raise ValueError('size(params) must be 15.')
# save output shape
ns = np.shape(isand)
iclay = np.ravel(iclay)
isand = np.ravel(isand)
idb = np.ravel(idb)
# Take right params
par0 = np.empty(nn)
par1 = np.empty(nn)
par2 = np.empty(nn)
par3 = np.empty(nn)
par4 = np.empty(nn)
par5 = np.empty(nn)
par6 = np.empty(nn)
par7 = np.empty(nn)
par8 = np.empty(nn)
par9 = np.empty(nn)
par10 = np.empty(nn)
par11 = np.empty(nn)
par12 = np.empty(nn)
par13 = np.empty(nn)
par14 = np.empty(nn)
if params is not None:
# Either params given
par0[:] = params[0]
par1[:] = params[1]
par2[:] = params[2]
par3[:] = params[3]
par4[:] = params[4]
par5[:] = params[5]
par6[:] = params[6]
par7[:] = params[7]
par8[:] = params[8]
par9[:] = params[9]
par10[:] = params[10]
par11[:] = params[11]
par12[:] = params[12]
par13[:] = params[13]
par14[:] = params[14]
else:
# or take Zacharias
parclay = np.array([ 0., 0., 0.,
0.788, 0.001, -0.263,
-0.648, 0.044, -3.168, 0.023,
1.392, 1.212, -0.704, -0.418, -0.024])
parsand = np.array([ 0., 0., 0.,
0.890, -0.001, -0.322,
-4.197, 0.076, -0.276, 0.013,
-2.562, 3.750, -0.016, 7e-9, 4.004])
for i in range(nn):
if isand[i] < 66.5:
par = parclay
else:
par = parsand
par0[i] = par[0]
par1[i] = par[1]
par2[i] = par[2]
par3[i] = par[3]
par4[i] = par[4]
par5[i] = par[5]
par6[i] = par[6]
par7[i] = par[7]
par8[i] = par[8]
par9[i] = par[9]
par10[i] = par[10]
par11[i] = par[11]
par12[i] = par[12]
par13[i] = par[13]
par14[i] = par[14]
# Zacharias pedotransfer
ithetar = par0 + par1*iclay + par2*idb
ithetas = par3 + par4*iclay + par5*idb
ilna = par6 + par7*iclay + par8*idb + par9*isand
inn = par10 + par11*np.exp(par12*np.log(iclay)) + par13*np.exp(par14*np.log(isand))
imm = 1. - 1./np.where(inn != 0., inn, 1e-3)
# van Genuchten sign
# limit exp to 600 and log to eps so that no over- and underflows occur
expmax = 600.
lnah = np.log(np.maximum(np.exp(ilna)*ih, eps))
ahn = np.exp(np.minimum(inn*lnah, expmax))
denom = np.maximum(np.exp(np.minimum(imm*np.log(1.+ahn), expmax)), eps)
itheta = np.where(ih <= eps, ithetas, ithetar + (ithetas-ithetar)/denom)
# Output
itheta = np.reshape(itheta, ns)
if nn==1: itheta = np.float(itheta)
if (thetar==False) & (thetas==False) & (lnalpha==False) & (n==False):
return itheta
else:
out = [itheta]
if thetar==True:
ithetar = np.reshape(ithetar, ns)
if nn==1: ithetar = np.float(ithetar)
out = out + [ithetar]
if thetas==True:
if nn==1: ithetas = np.float(ithetas)
ithetas = np.reshape(ithetas, ns)
out = out + [ithetas]
if lnalpha==True:
if nn==1: ilna = np.float(ilna)
ilna = np.reshape(ilna, ns)
out = out + [ilna]
if n==True:
if nn==1: inn = np.float(inn)
inn = np.reshape(inn, ns)
out = out + [inn]
return out
def zacharias_check(params, sand=None, clay=None):
"""
Checks if a given parameter set is valid for all possible soils with
the van Genuchten equation and the pedotransfer functions of Zacharias et al. (2007).
Definition
----------
def zacharias_check(params, sand=None, clay=None):
Input
-----
params array[15] with parameters a1, b1, ..., d4 of Zacharias et al. (2007)
Optional Input
--------------
clay If given: 1. < clay < 99. then calc sand content < clay
sand If given: 1. < sand < 99. then calc sand content > sand
Output
------
Boolean True: valid parameter set for all soils
False: not valid in at least one extreme case
Examples
--------
>>> parclay = np.array([ 0., 0., 0.,
... 0.788, 0.001, -0.263,
... -0.648, 0.044, -3.168, 0.023,
... 1.392, 1.212, -0.704, -0.418, -0.024])
>>> print(zacharias_check(parclay))
True
>>> print(zacharias_check(parclay, clay=66))
True
>>> parsand = np.array([ 0., 0., 0.,
... 0.890, -0.001, -0.322,
... -4.197, 0.076, -0.276, 0.013,
... -2.562, 3.750, -0.016, 7e-9, 4.004])
>>> print(zacharias_check(parsand))
False
>>> print(zacharias_check(parsand, sand=66))
True
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2012-2016 <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
| |
<gh_stars>0
# import cupy as cp
# import argparse
# import os
# import random
# import shutil
import time
from pprint import pprint
# import warnings
# import sys
# import logging
# import matplotlib
import logging
# from pprint import pprint
# from pprint import pformat
from config import opt
from dataset import TrainDataset, TestDataset
# import pprint
from pprint import pformat
from trainer import WaterNetTrainer
from mymodels import MyModels as mymodels
import torch
import torch.nn as nn
import torch.nn.parallel
# import torch.backends.cudnn as cudnn
# import torch.distributed as dist
import torch.optim
# import torch.multiprocessing as mp
from torch.utils import data as data_
import torch.utils.data.distributed
# import torchvision.transforms as transforms
# import torchvision.datasets as datasets
# from utils import array_tool as at
# from utils.vis_tool import visdom_bbox
# from utils.eval_tool import eval_detection_voc
# import resource
import fire
best_acc1 = 0
best_path = None
lossesnum = 100.0
def main(**kwargs):
opt._parse(kwargs)
# pprint(opt)
# set path of saving log in opt._parse()
# and save init info in opt._parse() too
logging.debug('this is a logging debug message')
main_worker()
def val_out(**kwargs):
opt._parse(kwargs)
print("===========validate & predict mode ===============")
opt.data_dir = kwargs['data_dir']
testset = TestDataset(opt)
test_dataloader = data_.DataLoader(testset,
batch_size=128,
num_workers=opt.test_num_workers,
shuffle=False,
pin_memory=True
)
model = mymodels.__dict__[kwargs['arch']]()
trainer = WaterNetTrainer(model).cuda()
trainer.load(kwargs['load_path'], parse_opt=True)
print('load pretrained model from %s' % kwargs['loadpath'])
# if opt.multi_label > 1:
# criterion = nn.BCELoss()
# else:
criterion = nn.CrossEntropyLoss().cuda()
validate(test_dataloader, model, criterion, True)
def validate(val_loader, model, criterion, outfile='predict', seeout=False):
batch_time = AverageMeter()
losses = AverageMeter()
multi_label_acc = AverageMeter()
top1 = AverageMeter()
top2 = AverageMeter()
top3 = AverageMeter()
top4 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
outpath = outfile + '.txt'
outf = open(outpath, 'w')
with torch.no_grad():
end = time.time()
for i, (target, datas) in enumerate(val_loader):
# if args.gpu is not None:
# input = input.cuda(args.gpu, non_blocking=True)
# target = target.cuda(args.gpu, non_blocking=True)
# compute output
# measure accuracy and record loss
if opt.multi_label > 1:
target = target.cuda().float()
datas = datas.cuda().float()
output = model(datas)
# loss = criterion(output, target)
loss1 = nn.BCELoss()
loss = loss1(output, target)
acc, acc_list, output_list, batch_pred, batch_target = accuracy_multilabel(output, target)
if seeout:
# writepred = pred5.tolist()
# max5out = max5out.tolist()
for i in range(len(output_list)):
outf.writelines("output:" + str(output_list[i]).strip('[').strip(']') +
',' + "pred:" + str(batch_pred[i]).strip('[').strip(']') +
',' + "target_encode:" + str(batch_target[i]).strip('[').strip(']') +
',' + "hamming acc:" + str(acc_list[i]) + '\r\n')
multi_label_acc.update(acc, 1)
losses.update(loss.item(), datas.size(0))
# if lossesnum > losses.val:
# lossesnum = losses.val
# print('====iter *{}==== * * * losses.val :{} Update ========\n'.format(ii, lossesnum))
# best_path = trainer.save(better=True)
# print("====epoch[{}]--- iter[{}] ** save params *******===".format(epoch, ii))
# # if best_acc1 < top1.val:
# # best_acc1 = top1.val
# # print('===== * * * best_acc1 :{} Update ========\n'.format(best_acc1))
# # best_path = trainer.save(better=True)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if (i + 1) % opt.plot_every == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@hamming {multi_label_acc.val:.3f} ({multi_label_acc.avg:.3f})\t'.format(
i, len(val_loader), batch_time=batch_time, loss=losses, multi_label_acc=multi_label_acc))
else:
target = target.cuda()
datas = datas.cuda().float()
output = model(datas)
loss = criterion(output, target)
acc, pred5, max5out = accuracy(output, target, topk=(1, 2, 3, 4, 5))
if seeout:
writepred = pred5.tolist()
max5out = max5out.tolist()
for i, item in enumerate(writepred):
outf.writelines(str(item).strip('[').strip(']') + ',' + str(max5out[i]).strip('[').strip(']') +
',' + str(target.tolist()[i]) + '\r\n')
acc1 = acc[0]
acc2 = acc[1]
acc3 = acc[2]
acc4 = acc[3]
acc5 = acc[4]
losses.update(loss.item(), datas.size(0))
top1.update(acc1[0], datas.size(0))
top2.update(acc2[0], datas.size(0))
top3.update(acc3[0], datas.size(0))
top4.update(acc4[0], datas.size(0))
top5.update(acc5[0], datas.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % opt.plot_every == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@2 {top2.val:.3f} ({top2.avg:.3f})\t'
'Acc@3 {top3.val:.3f} ({top3.avg:.3f})\t'
'Acc@4 {top4.val:.3f} ({top4.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(i, len(val_loader), batch_time=batch_time,
loss=losses,
top1=top1, top2=top2, top3=top3, top4=top4,
top5=top5))
if opt.multi_label > 1:
print(' * Acc@hamming {multi_label_acc.avg:.3f}'
.format(multi_label_acc=multi_label_acc))
else:
print(
' * Acc@1 {top1.avg:.3f} Acc@2 {top2.avg:.3f} Acc@3 {top3.avg:.3f} Acc@4 {top4.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top2=top2, top3=top3, top4=top4, top5=top5))
logging.info(
' validate-----* Acc@1 {top1.avg:.3f} Acc@2 {top2.avg:.3f} Acc@3 {top3.avg:.3f} Acc@4 {top4.avg:.3f} Acc@5 {top5.avg:.3f} Loss {loss.val:.4f}\r\n'
.format(top1=top1, top2=top2, top3=top3, top4=top4, top5=top5, loss=losses))
if seeout:
if opt.multi_label > 1:
outf.writelines('* Acc@hamming {multi_label_acc.avg:.3f} Loss {loss.val:.4f}\r\n'
.format(multi_label_acc=multi_label_acc, loss=losses))
else:
outf.writelines(
'* Acc@1 {top1.avg:.3f} Acc@2 {top2.avg:.3f} Acc@3 {top3.avg:.3f} Acc@4 {top4.avg:.3f} Acc@5 {top5.avg:.3f} Loss {loss.val:.4f}\r\n'
.format(top1=top1, top2=top2, top3=top3, top4=top4, top5=top5, loss=losses))
outf.writelines('======user config========')
outf.writelines(pformat(opt._state_dict()))
outf.close()
def main_worker():
global best_acc1
global best_path
global lossesnum
# gpu = opt.gpu
trainset = TrainDataset(opt)
print('load data')
train_dataloader = data_.DataLoader(trainset,
batch_size=128,
# pin_memory=True,
# num_workers=opt.train_num_workers,
shuffle=False)
testset = TestDataset(opt)
test_dataloader = data_.DataLoader(testset,
batch_size=64,
# num_workers=opt.test_num_workers,
shuffle=False,
pin_memory=True
)
model = mymodels.__dict__[opt.arch](opt)
model.apply(weights_init)
print('model construct completed')
trainer = WaterNetTrainer(model).cuda()
if opt.load_path:
trainer.load(opt.load_path)
print('load pretrained model from %s' % opt.load_path)
# define (criterion) for evaluation
criterion = nn.CrossEntropyLoss().cuda()
# lr_ = opt.lr
if opt.evaluate:
validate(test_dataloader, model, criterion)
return
for epoch in range(opt.epoch):
# trainer.reset_meters()
train(train_dataloader, trainer, epoch)
# validate(test_dataloader, model, criterion, opt.predict_name, seeout=False)
# evaluate on validation set
# top1avr, _ = validate(test_dataloader, model, criterion, seeout=False)
# if best_acc1 < top1avr:
# best_acc1 = top1avr
# print('===== * * * best_acc1 :{} Update ========\n'.format(best_acc1))
# best_path = trainer.save(better=True)
if epoch == 20:
# trainer.load(best_path, load_optimizer=False)
trainer.scale_lr()
if epoch == 40:
# trainer.load(best_path, load_optimizer=False)
trainer.scale_lr()
if epoch == 60:
# trainer.load(best_path, load_optimizer=False)
trainer.scale_lr()
if epoch == 80:
# trainer.load(best_path, load_optimizer=False)
trainer.scale_lr()
if epoch == 100:
# trainer.load(best_path, load_optimizer=False)
trainer.scale_lr()
# if epoch == 75:
# trainer.load(best_path, load_optimizer=False)
# trainer.scale_lr()
# if epoch == 90:
# trainer.load(best_path, load_optimizer=False)
# trainer.scale_lr()
validate(test_dataloader, model, criterion, opt.predict_name, seeout=True)
print("=====complete training & output predict =======")
# trainer.save(save_optimizer=True, better=False, save_path=opt.save_path)
# if epoch == 9:
# trainer.load(best_path)
# trainer.faster_rcnn.scale_lr(opt.lr_decay)
# lr_ = lr_ * opt.lr_decay
def train(train_loader, trainer, epoch):
global best_acc1
global best_path
global lossesnum
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top2 = AverageMeter()
top3 = AverageMeter()
top4 = AverageMeter()
top5 = AverageMeter()
multi_label_acc = AverageMeter()
# switch to train mode
# model.train()
end = time.time()
for ii, (label_, datas_) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if opt.multi_label > 1:
datas, label = datas_.cuda().float(), label_.cuda().float()
else:
datas, label = datas_.cuda().float(), label_.cuda()
# print(label)
# print(type(label))
trainloss, output = trainer.train_step(label, datas)
# print('==========output=======[{}]===='.format(output))
# measure accuracy and record loss
if opt.multi_label > 1:
acc, acc_list, output_list, batch_pred, batch_target = accuracy_multilabel(output, label)
multi_label_acc.update(acc, 1)
# print(trainloss)
losses.update(trainloss.item(), datas.size(0))
if lossesnum > losses.val:
lossesnum = losses.val
print('====iter *{}==== * * * losses.val :{} Update ========\n'.format(ii, lossesnum))
# best_path = trainer.save(better=True)
# print("====epoch[{}]--- iter[{}] ** save params *******===".format(epoch, ii))
# # if best_acc1 < top1.val:
# # best_acc1 = top1.val
# # print('===== * * * best_acc1 :{} Update ========\n'.format(best_acc1))
# # best_path = trainer.save(better=True)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if (ii + 1) % opt.plot_every == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@hamming {multi_label_acc.val:.3f} ({multi_label_acc.avg:.3f})\t'.format(
epoch, ii, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, multi_label_acc=multi_label_acc))
logging.info(
' train-----* ===Epoch: [{0}][{1}/{2}]\t Acc@hamming {multi_label_acc.avg:.3f} Loss {loss.val:.4f}'
.format(epoch, ii, len(train_loader), multi_label_acc=multi_label_acc, loss=losses))
else:
acc, pred5, max5out = accuracy(output, label, topk=(1, 2, 3, 4, 5))
acc1 = acc[0]
acc2 = acc[1]
acc3 = acc[2]
acc4 = acc[3]
acc5 = acc[4]
losses.update(trainloss.item(), datas.size(0))
top1.update(acc1[0], datas.size(0))
top2.update(acc2[0], datas.size(0))
top3.update(acc3[0], datas.size(0))
top4.update(acc4[0], datas.size(0))
top5.update(acc5[0], datas.size(0))
if lossesnum > losses.val:
lossesnum = losses.val
print('====iter *{}==== * * * losses.val :{} Update ========\n'.format(ii, lossesnum))
# best_path = trainer.save(better=True)
# print("====epoch[{}]--- iter[{}] ** save params *******===".format(epoch, ii))
# if best_acc1 < top1.val:
# best_acc1 = top1.val
# print('===== * * * best_acc1 :{} Update ========\n'.format(best_acc1))
# best_path = trainer.save(better=True)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if (ii + 1) % opt.plot_every == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@2 {top2.val:.3f} ({top2.avg:.3f})\t'
'Acc@3 {top3.val:.3f} ({top3.avg:.3f})\t'
'Acc@4 {top4.val:.3f} ({top4.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, ii, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top2=top2, top3=top3, top4=top4, top5=top5))
logging.info(
' train-----* ===Epoch: [{0}][{1}/{2}]\t Acc@1 {top1.avg:.3f} Acc@2 {top2.avg:.3f} Acc@3 {top3.avg:.3f} Acc@4 {top4.avg:.3f} Acc@5 {top5.avg:.3f} Loss {loss.val:.4f}'
.format(epoch, ii, len(train_loader), top1=top1, top2=top2, top3=top3, top4=top4, top5=top5,
loss=losses))
def accuracy_multilabel(output, target):
# print("output", output)
with torch.no_grad():
batch_pred = []
batch_target = []
output_list = []
hamming_acc_list = []
hamming_acc = 0.0
origin_target = []
# for i, item_target | |
<gh_stars>10-100
# -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
# (c) Copyright IBM Corp. 2010, 2019. All Rights Reserved.
"""Polling implementation"""
import calendar
import logging
import os
import time
from datetime import datetime
from threading import Thread
import jinja2
from resilient_circuits import ResilientComponent, handler, template_functions
from resilient import SimpleHTTPException
from pkg_resources import Requirement, resource_filename
from resilient_lib import RequestsCommon
from fn_proofpoint_tap.util.proofpoint_common import get_threat_list
log = logging.getLogger(__name__)
PROOFPOINT_ID_FIELDS = [
'campaignId', # in main body
'campaignID', # in threat map
'messageID',
]
# do not rename the values in this dict - they match Proofpoint TAP values
ARTIFACT_TYPES = {
'Proofpoint Campaign ID': ['campaignId', 'campaignID'],
'Proofpoint Threat ID': ['threatID'],
}
ARTIFACT_TYPE_API_NAME = {
'Proofpoint Campaign ID': 'proofpoint_campaign_id',
'Proofpoint Threat ID': 'proofpoint_threat_id'
}
timefields = [
'threatTime',
'messageTime',
'clickTime',
]
# Map for Proofpoint threat type to Resilient incident type
TYPE_2_TYPE_ID_MAP = {
u'impostor': u'Other',
u'malware': u'Malware',
u'phish': u'Phishing',
u'spam': u'Other',
u'unknown': u'TBD / Unknown',
}
threats_info_map = {
'threat_id': 'threatID',
'threat_status': 'threatStatus',
'classification': 'classification',
'threat_url': 'threatUrl',
'threat': 'threat',
}
data_table_ids = [
'threat_info_map',
]
class PP_ThreatPolling(ResilientComponent):
"""Component that polls for new data arriving from Proofpoint"""
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(PP_ThreatPolling, self).__init__(opts)
current_path = os.path.dirname(os.path.realpath(__file__))
self.default_path = os.path.join(current_path, os.path.pardir, "data/templates/pp_threat_description.jinja")
self.class2typeids = self.getclass2typeids()
self.lastupdate = None
self._parseopts(opts)
self.main()
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self._parseopts(opts)
def _parseopts(self, opts):
"""Parse and process configuration options, called from __init__ and _reload"""
self.opts = opts
self.options = opts.get("fn_proofpoint_tap", {})
# Proofpoint score threshold
self.score_threshold = float(self.options.get("score_threshold")) \
if self.options.get("score_threshold") else None
# Type Filters - Set of incident type filters by name.
self.type_filter = self._get_type_filter(self.options.get("type_filter", None))
# Create a new Resilient incident from this event
# using an optional JSON (JINJA2) template file
threat_path = self.options.get("threat_template", self.default_path)
if threat_path and not os.path.exists(threat_path):
log.warning(u"Template file '%s' not found.", threat_path)
threat_path = None
if not threat_path:
# Use the template file installed by this package
threat_path = resource_filename(Requirement("fn-proofpoint_tap"), "fn_proofpoint_tap/data/templates/pp_threat_description.jinja")
if not os.path.exists(threat_path):
raise Exception(u"Template file '{}' not found".format(threat_path))
log.info(u"Template file: %s", threat_path)
with open(threat_path, "r") as threat_file:
self.threat_template = threat_file.read()
# initialize last update to startup interval if present, otherwise update interval
interval = self.options.get('startup_interval')
if interval is None or interval.strip() == "":
self.lastupdate = None
else:
self.lastupdate = 60 * int(interval)
def _get_type_filter(self, tp_filter):
"""
Get set of filter types by name for the type_filter option.
:return: Type filter set.
"""
if not tp_filter:
return None
type_filter = tp_filter.strip().lower()
if not type_filter or 'all' in type_filter:
# none or "all" specified, no filtering
return None
filters_by_name = set()
for typestring in type_filter.split(','):
t_f = typestring.strip()
if t_f in TYPE_2_TYPE_ID_MAP:
filters_by_name.add(t_f)
else:
log.info(u"Invalid incident type filter option '{}'.".format(t_f))
return filters_by_name
def main(self):
"""main entry point, instantiate polling thread"""
options = self.options
interval = int(options.get("polling_interval", 0))
if interval > 0:
# Create and start polling thread
thread = Thread(target=self.polling_thread)
thread.daemon = True
thread.start()
log.info("Polling for threats in Proofpoint every {0} minutes".format(interval))
else:
log.info("Polling for threats in Proofpoint not enabled")
def polling_thread(self):
"""contents of polling thread, alternately check for new data and wait"""
cafile = self.options.get('cafile')
bundle = os.path.expanduser(cafile) if cafile else False
rc = RequestsCommon(opts=self.opts, function_opts=self.options)
while True:
try:
threat_list = get_threat_list(rc, self.options, self.lastupdate, bundle)
for kind, datas in threat_list.items():
if kind == 'queryEndTime':
self.lastupdate = datas
else:
for data in datas:
incident_id = None
threat_id, idtype = self.find_id(data)
if not threat_id:
log.error("Threat ID not found for ProofPoint TAP event '%s' of kind '%s'.", data, kind)
continue
existing_incidents = self._find_resilient_incident_for_req(threat_id, idtype)
if len(existing_incidents) == 0:
# incident doesn't already exist, create Incident data
incident_payload = self.build_incident_dto(data, kind, threat_id)
if incident_payload is not None:
incident_id = self.create_incident(incident_payload)
log.debug('created Incident ID {}'.format(incident_id))
else:
log.debug('Incident filtered')
else:
# incident already exists, extract its ID
log.debug(u'incident {} {} already exists'.format(idtype, threat_id))
incident_id = existing_incidents[0]['id']
if incident_id is not None:
# created or found an Incident, attach any (possibly new) artifacts
artifact_payloads = self.build_artifacts(data)
self.update_incident(incident_id, artifact_payloads)
except Exception as err:
log.error(err)
# Amount of time (seconds) to wait to check cases again, defaults to 10 mins if not set
time.sleep(int(self.options.get("polling_interval", 10)) * 60)
def build_incident_dto(self, data, kind, threat_id):
"""build Incident data structure in Resilient DTO format"""
properties = {}
for field in PROOFPOINT_ID_FIELDS:
value = data.get(field)
if value is not None:
properties[field] = value
# pull the threat types from the data
threat_types = self._get_threat_types(data)
# map threat types to incident type ids and check to see if filtering is requested
threat_type_ids = self._filtered_threat_types(threat_types)
if threat_type_ids is None:
log.debug("no threat_types, discarding")
return None
# look for threat name and classification in main body, else in threatsInfoMap
threatsinfo = data.get('threatsInfoMap')
threatinfo = threatsinfo[0] if threatsinfo else {}
threatname = data.get('threat', threatinfo.get('threat'))
classification = self._format_set(self._get_event_classification(data))
return {
'description': self.mkdescription(data, kind, threat_id, classification),
'discovered_date': self.getdiscovereddate(data),
'incident_type_ids': threat_type_ids,
'name': u'Proofpoint TAP Event: {0} {1}'.format(threatname if threatname else "", classification),
'properties': properties,
}
def mkdescription(self, data, kind, threat_id, classification):
"""Make Incident description text"""
data[u'kind'] = kind
data[u'id'] = threat_id
data[u'classification'] = classification
try:
return {'format': 'text', 'content': template_functions.render(self.threat_template, data)}
except jinja2.exceptions.TemplateSyntaxError as err:
log.info(u'threat template is not set correctly in config file {}'.format(err))
raise err
@staticmethod
def getdiscovereddate(data):
"""Find field to use for discovered date, convert to millisecond timestamp"""
for field in timefields:
if field in data:
val = data.get(field)
ts_format = '%Y-%m-%dT%H:%M:%S.%fZ'
if not val:
continue
try:
dt = datetime.strptime(val, ts_format)
log.debug('dt is {}'.format(dt))
seconds = calendar.timegm(dt.utctimetuple())
millis = int(dt.microsecond / 1000)
combined = seconds * 1000 + millis
log.debug('seconds {} millis {} combined {}'.format(seconds, millis, combined))
return combined
except ValueError as err:
log.exception(u"{} Not in expected timestamp format {} - {}".format(val, ts_format, err))
raise err
@staticmethod
def _get_event_classification(data):
"""
Get the "original" TAP classification for this event
:param data:
:return:
"""
# Use set() to automatically avoid duplication and support disjoint filtering
original_threat_types = set()
# pull the threat type data from classification field
event_classification = data.get('classification')
if event_classification:
original_threat_types.add(event_classification.lower())
# examine Threat Info Map for possible threat types
threatinfos = data.get('threatsInfoMap')
if threatinfos:
for threatinfo in threatinfos: # There may be more than one threat per message.
# check info map classification field
event_classification = threatinfo.get('classification')
if event_classification:
original_threat_types.add(event_classification.lower())
return original_threat_types
@staticmethod
def _format_set(set_to_format):
"""
Format content of set and return str.
:param set_to_format:
:return:
"""
if set_to_format is None or not isinstance(set_to_format, set):
return "N/A"
if len(set_to_format) == 0:
return "None"
formatted_list = list(set_to_format)
return ', '.join(formatted_list)
def _get_threat_types(self, data):
"""
Pull the the threat types from the data.
:param data:
:return: set with threat_types
"""
# Get the TAP classification for this event
original_threat_types = self._get_event_classification(data)
log.debug(u"TAP event threat type classification is '{}'".format(self._format_set(original_threat_types)))
# score_threshold is an optional param
# if score_threshold was defined in the config file
# filter the score values and pull appropriate threat types
if self.score_threshold is not None:
# extract spam, phishing, malware and impostor scores, if no value default is -1
spamscore = float(data.get('spamScore', '-1'))
phishscore = float(data.get('phishScore', '-1'))
malwarescore = float(data.get('malwareScore', '-1'))
impostorscore = float(data.get('impostorScore', '-1'))
log.debug("spamScore {}".format(spamscore))
log.debug("phishScore {}".format(phishscore))
log.debug("malwareScore {}".format(malwarescore))
log.debug("impostorScore {}".format(impostorscore))
# create a copy of original_threat_types, keep the original values separate
score_threat_types = original_threat_types.copy()
self._check_if_score_above_threshold(spamscore, 'spam', score_threat_types)
self._check_if_score_above_threshold(phishscore, 'phishing', score_threat_types)
self._check_if_score_above_threshold(malwarescore, 'malware', score_threat_types)
self._check_if_score_above_threshold(impostorscore, 'impostor', score_threat_types)
log.debug(u"Updated threat type classification based on score values is '{}'".format(self._format_set(score_threat_types)))
# validation for irregular results
# example of an irregular result: if the TAP classification is "spam" and the score_threshold is set to 60
# and the incoming spamscore is 50 and phishscore is 70 the code will remove "spam" from the threat_types
# set (because it's lower than score_threshold) but will add "phishing" to the threat_types set making
# the result inconsistent with the TAP classification of this event. In this case we log the error.
# verify the size of the threat_types set, if it includes at least one element but it doesn't include
# elements from original_threat_types then we have found an irregularity
if len(score_threat_types) > 0:
for orig_threat in original_threat_types:
if orig_threat not in score_threat_types:
log.info(u"Irregular result. The original TAP threat type classification '{}' | |
import numpy as np
from spherical_geometry import sphere_distance
from qc import sunangle, dayinyear
import copy
import Extended_IMMA as ex
import math
"""
The trackqc module contains a set of functions for performing the tracking QC
first described in Atkinson et al. [2013]. The general procedures described
in Atkinson et al. [2013] were later revised and improved for the SST CCI 2 project.
Documentation and IDL code for the revised (and original) procedures can be found
in the CMA FCM code repository. The code in this module represents a port of the
revised IDL code into the python marine QC suite. New versions of the aground
and speed checks have also been added.
These functions perform tracking QC checks on a :class:`.Voyage`
References:
<NAME>., <NAME>, <NAME>, <NAME>, 2013:
Assessing the quality of sea surface temperature observations from
drifting buoys and ships on a platform-by-platform basis (doi:10.1002/jgrc.20257).
CMA FCM code repository:
http://fcm9/projects/ClimateMonitoringAttribution/browser/Track_QC?order=name
"""
def track_day_test(year, month, day, hour, lat, lon, elevdlim=-2.5):
"""
Given date, time, lat and lon calculate if the sun elevation is > elevdlim.
If so return daytime is True
This is the "day" test used by tracking QC to decide whether an SST measurement is night or day.
This is important because daytime diurnal heating can affect comparison with an SST background.
It uses the function sunangle to calculate the elevation of the sun. A default solar_zenith angle
of 92.5 degrees (elevation of -2.5 degrees) delimits night from day.
:param year: Year
:param month: Month
:param day: Day
:param hour: Hour expressed as decimal fraction (e.g. 20.75 = 20:45 pm)
:param lat: Latitude in degrees
:param lon: Longitude in degrees
:param elevdlim: Elevation day/night delimiter in degrees above horizon
:type year: integer
:type month: integer
:type day: integer
:type hour: float
:type lat: float
:type lon: float
:type elevdlim: float
:return: True if daytime, else False.
:rtype: boolean
"""
assert year is not None, 'year is missing'
assert month is not None, 'month is missing'
assert day is not None, 'day is missing'
assert hour is not None, 'hour is missing'
assert lat is not None, 'latitude is missing'
assert lon is not None, 'longitude is missing'
assert 1 <= month <= 12, 'month is invalid'
assert 1 <= day <= 31, 'day is invalid'
assert 0 <= hour <= 24, 'hour is invalid'
assert 90 >= lat >= -90, 'latitude is invalid'
daytime = False
year2 = year
day2 = dayinyear(year, month, day)
hour2 = math.floor(hour)
minute2 = (hour - math.floor(hour)) * 60.0
lat2 = lat
lon2 = lon
if lat == 0:
lat2 = 0.0001
if lon == 0:
lon2 = 0.0001
azimuth, elevation, rta, hra, sid, dec = \
sunangle(year2, day2, hour2, minute2, 0, 0, 0, lat2, lon2)
if elevation > elevdlim:
daytime = True
return daytime
def trim_mean(inarr, trim):
"""
Calculate a resistant (aka robust) mean of an input array given a trimming criteria.
:param inarr: array of numbers
:param trim: trimming criteria. A value of 10 trims one tenth of the values off each end of the sorted array before calculating the mean.
:type inarr: array of floats
:type trim: integer
:return: trimmed mean
:rtype: float
"""
arr = np.array(inarr)
if trim == 0:
return np.mean(arr)
length = len(arr)
arr.sort()
index1 = (length / trim)
trim = np.mean(arr[index1:length - index1])
return trim
def trim_std(inarr, trim):
"""
Calculate a resistant (aka robust) standard deviation of an input array given a trimming criteria.
:param inarr: array of numbers
:param trim: trimming criteria. A value of 10 trims one tenth of the values off each end of the sorted array before
calculating the standard deviation.
:type inarr: array of floats
:type trim: integer
:return: trimmed standard deviation
:rtype: float
"""
arr = np.array(inarr)
if trim == 0:
return np.std(arr)
length = len(arr)
arr.sort()
index1 = (length / trim)
trim = np.std(arr[index1:length - index1])
return trim
def aground_check(reps, smooth_win=41, min_win_period=8, max_win_period=10):
"""
Check to see whether a drifter has run aground based on 1/100th degree precision positions.
A flag 'drf_agr' is set for each input report: flag=1 for reports deemed aground, else flag=0.
Positional errors introduced by lon/lat 'jitter' and data precision can be of order several km's.
Longitude and latitude timeseries are smoothed prior to assessment to reduce position 'jitter'.
Some post-smoothing position 'jitter' may remain and its expected magnitude is set within the
function by the 'tolerance' parameter. A drifter is deemed aground when, after a period of time,
the distance between reports is less than the 'tolerance'. The minimum period of time over which this
assessment is made is set by 'min_win_period'. This period must be long enough such that slow moving
drifters are not falsely flagged as aground given errors in position (e.g. a buoy drifting at around
1 cm/s will travel around 1 km/day; given 'tolerance' and precision errors of a few km's the 'min_win_period'
needs to be several days to ensure distance-travelled exceeds the error so that motion is reliably
detected and the buoy is not falsely flagged as aground). However, min_win_period should not be longer
than necessary as buoys that run aground for less than min_win_period will not be detected.
Because temporal sampling can be erratic the time period over which an assessment is made is specified
as a range (bound by 'min_win_period' and 'max_win_period') - assesment uses the longest time separation
available within this range. If a drifter is deemed aground and subsequently starts moving (e.g. if a drifter
has moved very slowly for a prolonged period) incorrectly flagged reports will be reinstated.
:param reps: a time-sorted list of drifter observations in format :class:`.Voyage`, each report must have a valid longitude, latitude and time-difference
:param smooth_win: length of window (odd number) in datapoints used for smoothing lon/lat
:param min_win_period: minimum period of time in days over which position is assessed for no movement (see description)
:param max_win_period: maximum period of time in days over which position is assessed for no movement (this should be greater than min_win_period and allow for erratic temporal sampling e.g. min_win_period+2 to allow for gaps of up to 2-days in sampling).
:type reps: a :class:`.Voyage`
:type smooth_win: integer
:type min_win_period: integer
:type max_win_period: integer
"""
tolerance = sphere_distance(0, 0, 0.01, 0.01)
# displacement resulting from 1/100th deg 'position-jitter' at equator (km)
try:
smooth_win = int(smooth_win)
min_win_period = int(min_win_period)
max_win_period = int(max_win_period)
assert smooth_win >= 1, 'smooth_win must be >= 1'
assert smooth_win % 2 != 0, 'smooth_win must be an odd number'
assert min_win_period >= 1, 'min_win_period must be >= 1'
assert max_win_period >= 1, 'max_win_period must be >= 1'
assert max_win_period >= min_win_period, 'max_win_period must be >= min_win_period'
except AssertionError as error:
raise AssertionError('invalid input parameter: ' + str(error))
half_win = (smooth_win - 1) / 2
min_win_period_hours = min_win_period * 24.0
max_win_period_hours = max_win_period * 24.0
nrep = len(reps)
if nrep <= smooth_win: # records shorter than smoothing-window can't be evaluated
print('Voyage too short for QC, setting flags to pass')
for rep in reps:
rep.set_qc('POS', 'drf_agr', 0)
return
# retrieve lon/lat/time_diff variables from marine reports
lon = np.empty(nrep)
lon[:] = np.nan
lat = np.empty(nrep)
lat[:] = np.nan
hrs = np.empty(nrep)
hrs[:] = np.nan
try:
for ind, rep in enumerate(reps):
lon[ind] = rep.getvar('LON') # returns None if missing
lat[ind] = rep.getvar('LAT') # returns None if missing
if ind == 0:
hrs[ind] = 0
else:
hrs[ind] = rep.getext('time_diff') # raises assertion error if 'time_diff' not found
assert not any(np.isnan(lon)), 'Nan(s) found in longitude'
assert not any(np.isnan(lat)), 'Nan(s) found in latitude'
assert not any(np.isnan(hrs)), 'Nan(s) found in time differences'
assert not any(hrs < 0), 'times are not sorted'
except AssertionError as error:
raise AssertionError('problem with report values: ' + str(error))
hrs = np.cumsum(hrs) # get time difference in hours relative to first report
# create smoothed | |
<reponame>acrellin/cesium
import numpy as np
import scipy.stats as stats
from ._lomb_scargle import lomb_scargle
def lomb_scargle_model(time, signal, error, sys_err=0.05, nharm=8, nfreq=3, tone_control=5.0):
"""Simultaneous fit of a sum of sinusoids by weighted least squares:
y(t) = Sum_k Ck*t^k + Sum_i Sum_j A_ij sin(2*pi*j*fi*(t-t0)+phi_j),
i=[1,nfreq], j=[1,nharm]
Parameters
----------
time : array_like
Array containing time values.
signal : array_like
Array containing data values.
error : array_like
Array containing measurement error values.
nharm : int
Number of harmonics to fit for each frequency.
nfreq : int
Number of frequencies to fit.
Returns
-------
dict
Dictionary containing fitted parameter values. Parameters specific to
a specific fitted frequency are stored in a list of dicts at
model_dict['freq_fits'], each of which contains the output of
fit_lomb_scargle(...)
"""
dy0 = np.sqrt(error**2 + sys_err**2)
wt = 1. / dy0**2
time = time.copy() - min(time) # speeds up lomb_scargle code to have min(time)==0
signal = signal.copy()
chi0 = np.dot(signal**2, wt)
# TODO parametrize?
f0 = 1. / max(time)
df = 0.8 / max(time) # 20120202 : 0.1/Xmax
fmax = 33. #pre 20120126: 10. # 25
numf = int((fmax - f0) / df) # TODO !!! this is off by 1 point, fix?
model_dict = {'freq_fits' : []}
lambda0_range = [-np.log10(len(time)), 8] # these numbers "fix" the strange-amplitude effect
for i in range(nfreq):
if i == 0:
fit = fit_lomb_scargle(time, signal, dy0, f0, df, numf,
tone_control=tone_control, lambda0_range=lambda0_range,
nharm=nharm, detrend_order=1)
model_dict['trend'] = fit['trend_coef'][1]
else:
fit = fit_lomb_scargle(time, signal, dy0, f0, df, numf,
tone_control=tone_control, lambda0_range=lambda0_range,
nharm=nharm, detrend_order=0)
model_dict['freq_fits'].append(fit)
signal -= fit['model']
model_dict['freq_fits'][-1]['resid'] = signal.copy()
if i == 0:
model_dict['varrat'] = np.dot(signal**2, wt) / chi0
model_dict['nfreq'] = nfreq
model_dict['nharm'] = nharm
model_dict['chi2'] = fit['chi2']
model_dict['f0'] = f0
model_dict['df'] = df
model_dict['numf'] = numf
return model_dict
def lprob2sigma(lprob):
"""Translate a log_e(probability) to units of Gaussian sigmas."""
if lprob > -36.:
sigma = stats.norm.ppf(1. - 0.5 * np.exp(lprob))
else:
sigma = np.sqrt(np.log(2. / np.pi) - 2. * np.log(8.2) - 2. * lprob)
f = 0.5 * np.log(2. / np.pi) - 0.5 * sigma**2 - np.log(sigma) - lprob
sigma += f / (sigma + 1. / sigma)
return sigma
def fit_lomb_scargle(time, signal, error, f0, df, numf, nharm=8, psdmin=6., detrend_order=0,
freq_zoom=10., tone_control=5., lambda0=1., lambda0_range=[-8,6]):
"""Calls C implementation of Lomb Scargle sinusoid fitting, which fits a
single frequency with nharm harmonics to the data. Called repeatedly by
lomb_scargle_model in order to produce a fit with multiple distinct
frequencies.
Inputs:
time : array_like
Array containing time values.
signal : array_like
Array containing data values.
error : array_like
Array containing measurement error values.
f0 : float
Smallest frequency value to consider.
df : float
Step size for frequency grid search.
numf : int
Number of frequencies for frequency grid search.
nharm : int
Number of harmonics to fit.
detrend_order : int
Order of polynomial detrending.
psdmin : int
Refine periodogram values with larger psd using multi-harmonic fit
nharm : int
Number of harmonics to use in refinement
lambda0 : float
Typical value for regularization parameter
lambda0_range : [float, float]
Allowable range for log10 of regularization parameter
Returns
-------
dict
Dictionary describing various parameters of the multiharmonic fit at
the best-fit frequency
"""
ntime = len(time)
# For some reason we round this to the nearest even integer
freq_zoom = round(freq_zoom/2.)*2.
# Polynomial terms
coef = np.zeros(detrend_order + 1, dtype='float64')
norm = np.zeros(detrend_order + 1, dtype='float64')
wth0 = 1. / error
s0 = np.dot(wth0, wth0)
wth0 /= np.sqrt(s0)
cn = signal * wth0
coef[0] = np.dot(cn,wth0)
cn0 = coef[0]
norm[0] = 1.
cn -= coef[0] * wth0
vcn = 1.
# np.sin's and cosin's for later
tt = 2. * np.pi * time
sinx,cosx = np.sin(tt*f0)*wth0,np.cos(tt*f0)*wth0
sinx_step,cosx_step = np.sin(tt*df),np.cos(tt*df)
sinx_back,cosx_back = -np.sin(tt*df/2.),np.cos(tt*df/2)
sinx_smallstep,cosx_smallstep = np.sin(tt*df/freq_zoom),np.cos(tt*df/freq_zoom)
npar = 2*nharm
hat_matr = np.zeros((npar,ntime),dtype='float64')
hat0 = np.zeros((npar,detrend_order+1),dtype='float64')
hat_hat = np.zeros((npar,npar),dtype='float64')
soln = np.zeros(npar,dtype='float64')
psd = np.zeros(numf,dtype='float64')
# Detrend the data and create the orthogonal detrending basis
if detrend_order > 0:
wth = np.zeros((detrend_order + 1, ntime),dtype='float64')
wth[0,:] = wth0
else:
wth = wth0
for i in range(detrend_order):
f = wth[i,:] * tt / (2 * np.pi)
for j in range(i+1):
f -= np.dot(f, wth[j,:]) * wth[j,:]
norm[i+1] = np.sqrt(np.dot(f,f))
f /= norm[i+1]
coef[i+1] = np.dot(cn,f)
cn -= coef[i+1]*f
wth[i+1,:] = f
vcn += (f/wth0)**2
chi0 = np.dot(cn,cn)
varcn = chi0/(ntime-1-detrend_order)
psdmin *= 2*varcn
Tr = np.array(0., dtype='float64')
ifreq = np.array(0, dtype='int32')
lambda0 = np.array(lambda0 / s0, dtype='float64')
lambda0_range = 10**np.array(lambda0_range, dtype='float64') / s0
lomb_scargle(ntime, numf, nharm, detrend_order, psd, cn, wth, sinx, cosx,
sinx_step, cosx_step, sinx_back, cosx_back, sinx_smallstep,
cosx_smallstep, hat_matr, hat_hat, hat0, soln, chi0, freq_zoom,
psdmin, tone_control, lambda0, lambda0_range, Tr, ifreq)
hat_hat /= s0
ii = np.arange(nharm, dtype='int32')
soln[0:nharm] /= (1. + ii)**2
soln[nharm:] /= (1. + ii)**2
hat_matr0 = np.outer(hat0[:,0], wth0)
for i in range(detrend_order):
hat_matr0 += np.outer(hat0[:,i+1], wth[i+1,:])
modl = np.dot(hat_matr.T, soln)
coef0 = np.dot(soln, hat0)
coef -= coef0
hat_matr -= hat_matr0
out_dict = {}
out_dict['psd'] = psd
out_dict['chi0'] = chi0 * s0
if detrend_order > 0:
out_dict['trend'] = np.dot(coef,wth)/wth0
else:
out_dict['trend'] = coef[0] + 0*wth0
out_dict['model'] = modl/wth0 + out_dict['trend']
j = psd.argmax()
freq = f0 + df * j + (ifreq / freq_zoom - 1/2.) * df
tt = (time * freq) % 1.
out_dict['freq'] = freq
out_dict['s0'] = s0
out_dict['chi2'] = (chi0 - psd[j]) * s0
out_dict['psd'] = psd[j] * 0.5 / varcn
out_dict['lambda'] = lambda0 * s0
# out_dict['gcv_weight'] = (1 - 3. / ntime) / Tr
out_dict['trace'] = Tr
out_dict['nu0'] = ntime - npar
npars = (1 - Tr) * ntime / 2.
out_dict['nu'] = ntime - npars
out_dict['npars'] = npars
A0, B0 = soln[0:nharm], soln[nharm:]
hat_hat /= np.outer(np.hstack(((1.+ii)**2, (1.+ii)**2)), np.hstack(((1.+ii)**2, (1.+ii)**2)))
err2 = np.diag(hat_hat)
vA0, vB0 = err2[0:nharm], err2[nharm:]
covA0B0 = hat_hat[(ii,nharm+ii)]
vmodl = vcn/s0 + np.dot((hat_matr/wth0).T, np.dot(hat_hat, hat_matr/wth0))
vmodl0 = vcn/s0 + np.dot((hat_matr0/wth0).T, np.dot(hat_hat, hat_matr0/wth0))
out_dict['model_error'] = np.sqrt(np.diag(vmodl))
out_dict['trend_error'] = np.sqrt(np.diag(vmodl0))
amp = np.sqrt(A0**2 + B0**2)
damp = np.sqrt(A0**2 * vA0 + B0**2 * vB0 + 2. * A0 * B0 * covA0B0) / amp
phase = np.arctan2(B0, A0)
rel_phase = phase - phase[0]*(1.+ii)
rel_phase = np.arctan2(np.sin(rel_phase), np.cos(rel_phase))
dphase = 0.*rel_phase
for i in range(nharm - 1):
j = i + 1
v = np.array([-A0[0] * (1. + j) / amp[0]**2, B0[0] * (1. + j) / amp[0]**2, A0[j] / amp[j]**2, -B0[j] / amp[j]**2])
jj = np.array([0, nharm, j, j+nharm])
m = hat_hat[np.ix_(jj, jj)]
dphase[j] = np.sqrt(np.dot(np.dot(v, m), v))
out_dict['amplitude'] = amp
out_dict['amplitude_error'] = damp
out_dict['rel_phase'] = rel_phase
out_dict['rel_phase_error'] = dphase
out_dict['time0'] = -phase[0] / (2 * np.pi * freq)
ncp = norm.cumprod()
out_dict['trend_coef'] = coef / ncp
out_dict['y_offset'] = out_dict['trend_coef'][0] - cn0
prob = stats.f.sf(0.5 * (ntime - 1. - detrend_order) * (1. -out_dict['chi2'] / out_dict['chi0']), 2, ntime - 1 - detrend_order)
out_dict['signif'] = lprob2sigma(np.log(prob))
return out_dict
def get_lomb_frequency(lomb_model, i):
"""Get the ith frequency from a fitted Lomb-Scargle model."""
return lomb_model['freq_fits'][i-1]['freq']
def get_lomb_amplitude(lomb_model, i, j):
"""
Get the amplitude of the jth harmonic of the ith frequency from a fitted
Lomb-Scargle model.
"""
return lomb_model['freq_fits'][i-1]['amplitude'][j-1]
def get_lomb_rel_phase(lomb_model, i, j):
"""
Get the relative phase of the jth harmonic of the ith frequency from a
fitted Lomb-Scargle model.
"""
return lomb_model['freq_fits'][i-1]['rel_phase'][j-1]
def get_lomb_amplitude_ratio(lomb_model, i):
"""
Get the ratio of the amplitudes of the first harmonic for the ith and first
frequencies from a fitted Lomb-Scargle model.
"""
return (lomb_model['freq_fits'][i-1]['amplitude'][0] /
lomb_model['freq_fits'][0]['amplitude'][0])
def get_lomb_frequency_ratio(lomb_model, i):
"""
Get the ratio of the ith and first frequencies from a fitted Lomb-Scargle
model.
"""
return (lomb_model['freq_fits'][i-1]['freq'] /
lomb_model['freq_fits'][0]['freq'])
def get_lomb_signif_ratio(lomb_model, i):
"""
Get the ratio of the significances (in sigmas) of the ith and first
frequencies from a fitted Lomb-Scargle model.
"""
return (lomb_model['freq_fits'][i-1]['signif'] /
lomb_model['freq_fits'][0]['signif'])
def get_lomb_lambda(lomb_model):
"""Get the regularization parameter of a fitted Lomb-Scargle model."""
return lomb_model['freq_fits'][0]['lambda']
def get_lomb_signif(lomb_model):
"""
Get the significance (in sigmas) of the first frequency from a fitted
Lomb-Scargle model.
"""
return lomb_model['freq_fits'][0]['signif']
def get_lomb_varrat(lomb_model):
"""
Get the fraction of the variance explained by the first frequency of a
fitted Lomb-Scargle model.
"""
return lomb_model['varrat']
def get_lomb_trend(lomb_model):
"""Get the linear trend of a fitted Lomb-Scargle model."""
| |
<filename>tests/test_tests.py
import json
import os
import time
import unittest
from mock import (
MagicMock,
Mock,
)
class TestGetTree(unittest.TestCase):
"""Tests for the get_tree function
"""
def test_nonascii_attribute(self):
from smoketest.tests import get_tree
response = Mock()
response.text = u"""
<html \u2603 \u2604="yes">
</html>
"""
tree = get_tree(response)
self.assertIn(u'\u2603', tree.attrib)
self.assertEqual('yes', tree.attrib[u'\u2604'])
class TestTestResults(unittest.TestCase):
"""Tests for the TestResult classes
"""
def setUp(self):
self.json_schema_filename = 'test-json-schema-{0}.json'.format(
time.time(),
)
def tearDown(self):
if os.path.exists(self.json_schema_filename):
os.unlink(self.json_schema_filename)
def test_redirect_test_result_pass(self):
from smoketest.tests import RedirectTestResult
test = Mock()
response = Mock()
test.target_code = '30X'
test.target_location = 'usnews.com'
test.follow_redirects = False
response.status_code = '301'
response.headers = dict(location='usnews.com?_=987654321')
# Check test result object is truthy
test_result = RedirectTestResult(test, response)
self.assertTrue(bool(test_result))
def test_redirect_test_result_fail_url(self):
from smoketest.tests import RedirectTestResult
test = Mock()
response = Mock()
test.target_code = '30X'
test.target_location = 'usnews.com'
test.follow_redirects = False
response.status_code = '301'
response.headers = dict(location='google.com')
# Check test result object is falsey
test_result = RedirectTestResult(test, response)
self.assertFalse(bool(test_result))
def test_redirect_test_result_fail_status_code(self):
from smoketest.tests import RedirectTestResult
test = Mock()
response = Mock()
test.target_code = '30X'
response.status_code = '200'
response.headers = dict(location=test.target_location)
# Check test result object is falsey
test_result = RedirectTestResult(test, response)
self.assertFalse(bool(test_result))
def test_redirect_test_result_requires_location_if_30X(self):
from smoketest.tests import RedirectTestResult
test = Mock()
response = Mock()
test.target_code = '30X'
test.target_location = 'http://www.usnews.com'
test.follow_redirects = False
response.status_code = '301'
response.headers = {}
# Check that the test is a fail if 'location' header is missing.
test_result = RedirectTestResult(test, response)
self.assertFalse(test_result)
def test_redirect_test_result_doesnt_require_location_if_non_30X(self):
from smoketest.tests import RedirectTestResult
test = Mock()
response = Mock()
test.target_code = '30X'
response.status_code = '200'
response.headers = {}
# Check that checking for pass/fail raises exception
test_result = RedirectTestResult(test, response)
self.assertFalse(bool(test_result))
def test_html_test_result_ordinary_success(self):
from smoketest.tests import (
HTMLTest,
TextMatchingMethod,
)
html_test = HTMLTest(
'h1',
None,
TextMatchingMethod('endswith', 'ello'),
'always',
)
response = Mock()
response.text = '<h1>hello</h1>'
html_test_result = html_test.get_result(
response
)
self.assertTrue(bool(html_test_result))
self.assertEqual(
'h1 text was: hello',
html_test_result.description,
)
def test_html_test_result_with_child_tag(self):
from smoketest.tests import (
HTMLTest,
TextMatchingMethod,
)
html_test = HTMLTest(
'h1',
None,
TextMatchingMethod('endswith', 'ello'),
'always',
)
response = Mock()
response.text = '<h1><img src="example.com">hello</h1>'
html_test_result = html_test.get_result(
response
)
self.assertTrue(bool(html_test_result))
self.assertEqual(
'h1 text was: hello',
html_test_result.description,
)
def test_html_test_result_ordinary_failure(self):
from smoketest.tests import (
HTMLTest,
TextMatchingMethod,
)
html_test = HTMLTest(
'h1',
None,
TextMatchingMethod('equals', 'ello'),
'always',
)
response = Mock()
response.text = '<h1>hello</h1>'
html_test_result = html_test.get_result(
response
)
self.assertFalse(bool(html_test_result))
self.assertEqual(
'h1 text was: hello',
html_test_result.description,
)
def test_html_test_result_never_option_and_it_succeeds(self):
from smoketest.tests import (
HTMLTest,
)
html_test = HTMLTest(
'h1',
None,
None,
'never',
)
response = Mock()
response.text = ''
html_test_result = html_test.get_result(
response
)
self.assertTrue(bool(html_test_result))
self.assertEqual(
'h1 text was: None',
html_test_result.description,
)
def test_html_test_result_never_option_and_it_fails(self):
from smoketest.tests import (
HTMLTest,
)
html_test = HTMLTest(
'h1',
None,
None,
'never',
)
response = Mock()
response.text = '<h1>hello</h1>'
html_test_result = html_test.get_result(
response
)
self.assertFalse(bool(html_test_result))
self.assertEqual(
'h1 text was: hello',
html_test_result.description,
)
def test_html_test_result_empty_html(self):
from smoketest.tests import (
HTMLTest,
TextMatchingMethod,
)
html_test = HTMLTest(
'h1',
None,
TextMatchingMethod('equals', 'oodby'),
'always',
)
response = Mock()
response.text = ''
html_test_result = html_test.get_result(
response
)
self.assertFalse(bool(html_test_result))
self.assertEqual(
'h1 text was: None',
html_test_result.description,
)
def test_json_schema_test_schema_file_does_not_exist(self):
from smoketest.tests import (
JSONSchemaTest
)
if os.path.exists(self.json_schema_filename):
os.unlink(self.json_schema_filename)
response = Mock()
json_schema_test = JSONSchemaTest(self.json_schema_filename)
json_schema_test_result = json_schema_test.get_result(
response
)
self.assertFalse(bool(json_schema_test_result))
self.assertEqual(
'Schema file {0} not found'.format(self.json_schema_filename),
json_schema_test_result.description,
)
def test_json_schema_test_schema_is_not_valid_json(self):
from smoketest.tests import (
JSONSchemaTest
)
# Write some garbage to the schema file
with open(self.json_schema_filename, 'w') as f:
f.write('GARBAGE')
response = Mock()
json_schema_test = JSONSchemaTest(self.json_schema_filename)
json_schema_test_result = json_schema_test.get_result(
response
)
self.assertFalse(bool(json_schema_test_result))
self.assertEqual(
'Schema file {0} was not valid JSON'.format(self.json_schema_filename),
json_schema_test_result.description,
)
def test_json_schema_test_schema_is_not_valid_schema_bad_type(self):
from smoketest.tests import (
JSONSchemaTest
)
# Write some garbage to the schema file
with open(self.json_schema_filename, 'w') as f:
f.write(json.dumps(
{
'type': 'fake',
}
))
response = Mock()
response.text = '{}'
json_schema_test = JSONSchemaTest(self.json_schema_filename)
json_schema_test_result = json_schema_test.get_result(
response
)
self.assertFalse(bool(json_schema_test_result))
self.assertTrue(
json_schema_test_result.description.startswith(
'Schema file {0} had a problem'.format(
self.json_schema_filename
)
)
)
def test_json_schema_test_schema_is_not_valid_schema_not_even_close(self):
from smoketest.tests import (
JSONSchemaTest
)
# Write some garbage to the schema file
with open(self.json_schema_filename, 'w') as f:
f.write('[]')
response = Mock()
response.text = '{}'
json_schema_test = JSONSchemaTest(self.json_schema_filename)
json_schema_test_result = json_schema_test.get_result(
response
)
self.assertFalse(bool(json_schema_test_result))
self.assertTrue(
json_schema_test_result.description.startswith(
'Schema file {0} had a problem'.format(
self.json_schema_filename
)
)
)
def test_json_schema_test_non_json_response(self):
from smoketest.tests import (
JSONSchemaTest
)
with open(self.json_schema_filename, 'w') as f:
f.write('{}')
response = Mock()
response.text = 'GARBAGE'
json_schema_test = JSONSchemaTest(self.json_schema_filename)
json_schema_test_result = json_schema_test.get_result(
response
)
self.assertFalse(bool(json_schema_test_result))
self.assertEqual(
'Response body was not valid JSON',
json_schema_test_result.description,
)
def test_json_schema_test_response_does_not_follow_schema(self):
from smoketest.tests import (
JSONSchemaTest
)
with open(self.json_schema_filename, 'w') as f:
f.write(json.dumps(
{
"type": "object",
"properties": {
"foo": {
"type": "string"
}
},
"required": ["foo"]
}
))
response = Mock()
response.text = '{}'
json_schema_test = JSONSchemaTest(self.json_schema_filename)
json_schema_test_result = json_schema_test.get_result(
response
)
self.assertFalse(bool(json_schema_test_result))
self.assertEqual(
"Response did not obey {0}: {1} is a required property".format(
self.json_schema_filename,
repr(u'foo'),
),
json_schema_test_result.description,
)
def test_json_schema_test_everything_is_good(self):
from smoketest.tests import (
JSONSchemaTest
)
with open(self.json_schema_filename, 'w') as f:
f.write(json.dumps(
{
"type": "object",
"properties": {
"foo": {
"type": "string"
}
},
"required": ["foo"]
}
))
response = Mock()
response.text = json.dumps({
'foo': 'bar'
})
json_schema_test = JSONSchemaTest(self.json_schema_filename)
json_schema_test_result = json_schema_test.get_result(
response
)
self.assertTrue(bool(json_schema_test_result))
self.assertEqual(
'Response body obeyed {0}'.format(self.json_schema_filename),
json_schema_test_result.description,
)
def test_header_test(self):
from smoketest.tests import (
HeaderTest,
TextMatchingMethod,
)
header_test = HeaderTest(
'X-Some-Header',
TextMatchingMethod('equals', 'ello'),
)
response = Mock()
response.headers = {
'X-Some-Header': 'bye',
}
result = header_test.get_result(response)
self.assertFalse(result)
self.assertEqual(
'X-Some-Header header was bye',
result.description,
)
def test_header_test_missing_response_header(self):
from smoketest.tests import (
HeaderTest,
TextMatchingMethod,
)
header_test = HeaderTest(
'X-Some-Header',
TextMatchingMethod('equals', 'bye'),
)
response = Mock()
response.headers = {}
result = header_test.get_result(response)
self.assertFalse(result)
self.assertEqual(
'X-Some-Header header was not present',
result.description,
)
class TestTextMatchingMethod(unittest.TestCase):
"""Tests for the class TextMatchingMethod
"""
def test_regex(self):
from smoketest.tests import TextMatchingMethod
text_matching_method = TextMatchingMethod(
'regex',
'^hello$',
)
self.assertTrue(text_matching_method('hello'))
self.assertFalse(text_matching_method('shello'))
def test_endswith(self):
from smoketest.tests import TextMatchingMethod
text_matching_method = TextMatchingMethod(
'endswith',
'^hello$',
)
self.assertTrue(text_matching_method('asdf ^hello$'))
self.assertFalse(text_matching_method('hello'))
def test_startswith(self):
from smoketest.tests import TextMatchingMethod
text_matching_method = TextMatchingMethod(
'startswith',
'^hello$',
)
self.assertTrue(text_matching_method('^hello$ asdf'))
self.assertFalse(text_matching_method('hello'))
def test_equals(self):
from smoketest.tests import TextMatchingMethod
text_matching_method = TextMatchingMethod(
'equals',
'^hello$',
)
self.assertTrue(text_matching_method('^hello$'))
self.assertFalse(text_matching_method('hello'))
def test_contains(self):
from smoketest.tests import TextMatchingMethod
text_matching_method = TextMatchingMethod(
'contains',
'^hello$',
)
self.assertTrue(text_matching_method('a^hello$b'))
self.assertFalse(text_matching_method('hello'))
class TestParsers(unittest.TestCase):
"""Tests for the parser functions
"""
def test_status_default(self):
from smoketest.tests import (
get_status_tests,
StatusTest,
)
elem = {}
options = Mock()
tests = get_status_tests(elem, options)
self.assertIsInstance(tests[0], StatusTest)
self.assertEqual(tests[0].target_code, '200')
def test_status_explicit(self):
from smoketest.tests import (
get_status_tests,
StatusTest,
)
elem = {'status': '404'}
options = Mock()
tests = get_status_tests(elem, options)
self.assertIsInstance(tests[0], StatusTest)
self.assertEqual(tests[0].target_code, '404')
def test_environment_dependent_test(self):
from smoketest.tests import get_status_tests
# Test with explicit default
elem = {
"status": {
"live": "30X",
"other": "404",
}
}
options = Mock()
options.level = 'live'
tests = get_status_tests(elem, options)
self.assertEqual(tests[0].target_code, "30X")
options.level = 'stag'
tests = get_status_tests(elem, options)
self.assertEqual(tests[0].target_code, "404")
# Test with implicit default (200)
elem = {
"status": {
"live": "30X",
}
}
options.level = 'stag'
tests = get_status_tests(elem, options)
self.assertEqual(tests[0].target_code, "200")
def test_redirect_test(self):
from smoketest.tests import (
get_redirect_tests,
RedirectTest,
)
elem = {
"redirect": {
"status": "30X",
"location": "usnews.com",
},
}
options = Mock()
options.port = None
options.level = None
options.cachebust = True
tests = get_redirect_tests(elem, options)
self.assertIsInstance(tests[0], RedirectTest)
self.assertEqual(tests[0].target_code, "30X")
self.assertEqual(tests[0].target_location, "usnews.com")
def test_html_test_regex_attribute(self):
from smoketest.tests import (
get_html_tests,
HTMLTest,
)
elem = {
'html': [{
'selector': 'h1',
'attribute': 'attr',
'regex': 'r',
}]
}
options = Mock()
tests = get_html_tests(elem, options)
self.assertEqual(1, len(tests))
test = tests[0]
self.assertIs(HTMLTest, test.__class__)
self.assertEqual('h1', test.selector)
self.assertEqual('attr', test.attr)
self.assertEqual('regex', test.text_matching_method.methodname)
self.assertEqual('r', test.text_matching_method.text_to_match)
self.assertEquals('h1 attr matches the regex r', test.description)
self.assertEqual('always', test.when)
self.assertFalse(test.text)
def test_html_test_simple_equals(self):
from smoketest.tests import (
get_html_tests,
HTMLTest,
)
elem = {
'html': [{
'selector': 'h1',
'equals': 'r',
}]
}
options = Mock()
tests = get_html_tests(elem, options)
self.assertEqual(1, len(tests))
test = tests[0]
self.assertIs(HTMLTest, test.__class__)
self.assertEqual('h1', test.selector)
self.assertIs(None, test.attr)
self.assertEqual('equals', test.text_matching_method.methodname)
self.assertEqual('r', test.text_matching_method.text_to_match)
self.assertEquals('h1 text equals r', test.description)
self.assertEqual('always', test.when)
self.assertTrue(test.text)
def test_html_test_never_exists(self):
from smoketest.tests import (
get_html_tests,
HTMLTest,
)
elem = {
'html': [{
'selector': 'h1',
'when': 'never',
}]
}
options = Mock()
tests = get_html_tests(elem, options)
self.assertEqual(1, len(tests))
test = tests[0]
self.assertIs(HTMLTest, test.__class__)
self.assertEqual('h1', test.selector)
self.assertIs(None, test.attr)
self.assertIs(None, test.text_matching_method)
self.assertEquals('h1 is not present', test.description)
self.assertEqual('never', test.when)
def test_parser_decorator(self):
# Define a custom parser
from smoketest.tests import (
get_tests_from_element,
parser,
)
mymock = MagicMock()
@parser
| |
'remote_system_name': neighbor[3],
'remote_port': neighbor[1],
'remote_port_description': neighbor[2],
'remote_system_description': neighbor[4],
'remote_system_capab': neighbor[5],
'remote_system_enable_capab': neighbor[5]
}
lldp_neighbors[local_iface].append(neighbor_dict)
return lldp_neighbors
def __get_ntp_peers(self):
"""
Return the NTP peers configuration as dictionary.
Sample output:
{
'192.168.0.1': {},
'192.168.3.11': {},
'172.16.17.32': {},
'172.16.31.10': {}
}
"""
ntp_server = {}
# command = "display ntp session"
# output = self.device.send_command(command)
return ntp_server
def get_ntp_servers(self):
"""
Return the NTP servers configuration as dictionary.
Sample output:
{
'192.168.0.1': {},
'192.168.3.11': {},
'172.16.17.32': {},
'172.16.31.10': {}
}
"""
re_ntp_sessions = r"clock source: (\d+\.\d+\.\d+\.\d+)"
ntp_server = {}
command = "display ntp sessions"
output = self.device.send_command(command)
matches = re.findall(re_ntp_sessions, output)
if len(matches) > 0:
for match in matches:
ntp_server[match] = {}
return ntp_server
def get_ntp_stats(self):
"""
Return a list of NTP synchronization statistics.
Sample output:
[
{
'remote' : u'192.168.127.12',
'referenceid' : u'192.168.127.12',
'synchronized' : True,
'stratum' : 4,
'type' : u'-',
'when' : u'107',
'hostpoll' : 256,
'reachability' : 377,
'delay' : 164.228,
'offset' : -13.866,
'jitter' : 2.695
}
]
"""
re_ntp_sessions = r'clock source: (\d+\.\d+\.\d+\.\d+)\n.*clock stratum: (\d+)\n.*clock status: (.*)\n.*ID: (\d+\.\d+\.\d+\.\d+)\n.*reach: (\d+)\n.*poll: (\d+)\n.*now: (\d+)\n.*offset: (-?\d+.\d+).*\n.*delay: (\d+.\d+).*\n.*disper: (\d+.\d+).*'
ntp_stats = []
command = "display ntp sessions | no-more"
output = self.device.send_command(command)
try:
matches = re.findall(re_ntp_sessions, output)
for match in matches:
synchronized = False
statuses = match[2]
statuses = [x.strip() for x in statuses.split(',')]
if 'sane' and 'valid' in statuses:
synchronized = True
session = {
"remote": match[0],
"referenceid": match[3],
"synchronized": synchronized,
"stratum": int(match[1]),
"type": "",
"when": int(match[6]),
"hostpoll": int(match[5]),
"reachability": int(match[4]),
"delay": float(match[8]),
"offset": float(match[7]),
"jitter": float(match[9])
}
ntp_stats.append(session)
except:
return False
return ntp_stats
def get_bgp_neighbors(self):
re_bgp_neighbors = '(\d+.\d+.\d+.\d+.)\s+(\w)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\w+)\s+(\w+)\s+(\d+)'
command = 'display bgp peer | no-more'
output = self.device.send_command(command)
matches = re.findall(re_bgp_neighbors, output)
neighbors = {
"global": {
"router-id": "1.2.3.4",
"peers": {}
}
}
for match in matches:
neighbors["global"]["peers"][match[0]] = {
"local_as": "",
"remote_as": match["2"],
"remote_id": match["0"],
"is_up": True,
"is_enabled": True,
"description": "Abc",
"uptime": 123456,
"address_family": {
"ipv4": {
"sent_prefixes": 1,
"accepted_prefixes": 2,
"received_prefixes": 3
},
"ipv6": {
"sent_prefixes": 1,
"accepted_prefixes": 2,
"received_prefixes": 3
}
}
}
return neighbors
def get_bgp_neighbors_detail(self, neighbor_address=""):
"""
{
'global': {
8121: [
{
'up' : True,
'local_as' : 13335,
'remote_as' : 8121,
'local_address' : u'172.16.31.10',
'local_address_configured' : True,
'local_port' : 179,
'routing_table' : u'inet.0',
'remote_address' : u'172.16.31.10',
'remote_port' : 58380,
'multihop' : False,
'multipath' : True,
'remove_private_as' : True,
'import_policy' : u'4-NTT-TRANSIT-IN',
'export_policy' : u'4-NTT-TRANSIT-OUT',
'input_messages' : 123,
'output_messages' : 13,
'input_updates' : 123,
'output_updates' : 5,
'messages_queued_out' : 23,
'connection_state' : u'Established',
'previous_connection_state' : u'EstabSync',
'last_event' : u'RecvKeepAlive',
'suppress_4byte_as' : False,
'local_as_prepend' : False,
'holdtime' : 90,
'configured_holdtime' : 90,
'keepalive' : 30,
'configured_keepalive' : 30,
'active_prefix_count' : 132808,
'received_prefix_count' : 566739,
'accepted_prefix_count' : 566479,
'suppressed_prefix_count' : 0,
'advertised_prefix_count' : 0,
'flap_count' : 27
}
]
}
}
:param neighbor_address:
:return:
"""
peers = {
"global": {}
}
re_bgp_brief = r'(\d+\.\d+\.\d+\.\d+)\W+(\d)\W+(\d+)\W+(\d+)\W+(\d+)\W+(\d+)\W+(\w+)\W+(Established|Idle|Idle\(Admin\)|Idle\(Ovlmt\)|Connect|Active|OpenSent|OpenConfirm|No neg)\W+(\d+)'
re_bgp_brief_local_as = r'Local AS number\W+(\d+)'
command = "display bgp peer"
output = self.device.send_command(command)
matches = re.findall(re_bgp_brief, output, re.S)
local_as = re.findall(re_bgp_brief_local_as, output) # <--- Applicable for us
# Make sure the dictionary has all key's necessary
for match in matches:
peer_as = match[2]
if not peer_as in peers["global"]:
peers["global"][peer_as] = []
for match in matches:
peer_ip = match[0]
peer_version = match[1]
peer_as = match[2]
command = "display bgp peer {} verbose | no-more".format(peer_ip)
# Expect_string needed due to large output??
output = self.device.send_command(command, expect_string="<")
re_bgp_detail_state = r'BGP current state: (\w+), (\w+) for (.*)'
re_bgp_detail_peer = r'BGP Peer is (\d+.\d+.\d+.\d+).*AS (\d+)'
re_bgp_detail_last_state = r'BGP last state: (\w+)'
re_bgp_detail_ports = r'Port: Local - (\d+)\W+Remote - (\d+)'
re_bgp_detail_received = r'Received:.*\n.*messages\W+(\d+)\n.*messages\W+(\d+)\n.*messages\W+(\d+)\n.*messages\W+(\d+)\n.*messages\W+(\d+)\n.*messages\W+(\d+)\n'
re_bgp_detail_sent = r'Sent\W+:.*\n.*messages\W+(\d+)\n.*messages\W+(\d+)\n.*messages\W+(\d+)\n.*messages\W+(\d+)\n.*messages\W+(\d+)\n.*messages\W+(\d+)\n'
re_bgp_detail_flaps = r'BGP Peer Up count: (\d+)'
re_bgp_detail_routes = r'Received total routes: (\d+)\n Received active routes total: (\d+)\n Advertised total routes: (\d+)'
re_bgp_detail_event = r'BGP current event: (\w+)'
re_bgp_detail_times = r'Configured: Active Hold Time: (\d+).*:(\d+).*\n.*\n Negotiated: Active Hold Time: (\d+).*:(\d+)'
re_bgp_detail_policies = r'Import route policy is: (.*)\n Export route policy is: (.*)'
state = re.findall(re_bgp_detail_state, output)[0]
peer = re.findall(re_bgp_detail_peer, output)[0]
last_state = re.findall(re_bgp_detail_last_state, output)[0]
ports = re.findall(re_bgp_detail_ports, output)[0]
sent = re.findall(re_bgp_detail_sent, output)[0]
received = re.findall(re_bgp_detail_received, output)[0]
flaps = re.findall(re_bgp_detail_flaps, output)[0]
routes = re.findall(re_bgp_detail_routes, output)[0]
event = re.findall(re_bgp_detail_event, output)[0]
times = re.findall(re_bgp_detail_times, output)[0]
up = False
if "UP" in state[1].upper():
up = True
policies = ["", ""]
if "No routing policy is configured" not in output:
policies = re.findall(re_bgp_detail_policies)[0]
peer = {
'up': up,
'local_as': local_as,
'remote_as': peer[1],
'local_address': u'',
#'local_address_configured': True,
'local_port': ports[0],
'routing_table': u'',
'remote_address': peer[0],
'remote_port': ports[1],
#'multihop': False,
#'multipath': True,
'remove_private_as': True,
'import_policy': policies[0],
'export_policy': policies[1],
'input_messages': received[0],
'output_messages': sent[0],
'input_updates': received[1],
'output_updates': sent[1],
'messages_queued_out': 0,
'connection_state': state[0],
'previous_connection_state': last_state,
'last_event': event,
#'suppress_4byte_as': False,
#'local_as_prepend': False,
'holdtime': times[2],
'configured_holdtime': times[0],
'keepalive': times[1],
'configured_keepalive': times[3],
'active_prefix_count': routes[1],
'received_prefix_count': routes[0],
#'accepted_prefix_count': 0,
#'suppressed_prefix_count': 0,
'advertised_prefix_count': routes[2],
'flap_count': flaps
}
peers["global"][peer_as].append(peer)
return peers
@staticmethod
def _separate_section(separator, content):
if content == "":
return []
# Break output into per-interface sections
interface_lines = re.split(separator, content, flags=re.M)
if len(interface_lines) == 1:
msg = "Unexpected output data:\n{}".format(interface_lines)
raise ValueError(msg)
# Get rid of the blank data at the beginning
interface_lines.pop(0)
# Must be pairs of data (the separator and section corresponding to it)
if len(interface_lines) % 2 != 0:
msg = "Unexpected output data:\n{}".format(interface_lines)
raise ValueError(msg)
# Combine the separator and section into one string
intf_iter = iter(interface_lines)
try:
new_interfaces = [line + next(intf_iter, '') for line in intf_iter]
except TypeError:
raise ValueError()
return new_interfaces
def _delete_file(self, filename):
command = 'delete /unreserved /quiet {0}'.format(filename)
self.device.send_command(command)
def _save_config(self, filename=''):
"""Save the current running config to the given file."""
command = 'save {}'.format(filename)
save_log = self.device.send_command(command, max_loops=10, expect_string=r'Y/N')
# Search pattern will not be detected when set a new hostname, so don't use auto_find_prompt=False
save_log += self.device.send_command('y', expect_string=r'<.+>')
search_result = re.search("successfully", save_log, re.M)
if search_result is None:
msg = "Failed to save config. Command output:{}".format(save_log)
raise CommandErrorException(msg)
def _load_config(self, config_file):
command = 'rollback configuration to file {0}'.format(config_file)
rollback_result = self.device.send_command(command, expect_string=r'Y/N')
rollback_result += self.device.send_command('y', expect_string=r'[<\[].+[>\]]')
search_result = re.search("clear the information", rollback_result, re.M)
if search_result is not None:
rollback_result += self.device.send_command('y', expect_string=r'<.+>')
search_result = re.search("succeeded|finished", rollback_result, re.M)
if search_result is None:
msg = "Failed to load config. Command output:{}".format(rollback_result)
raise CommandErrorException(msg)
def _replace_candidate(self, filename, config):
if not filename:
filename = self._create_tmp_file(config)
else:
if not os.path.isfile(filename):
raise ReplaceConfigException("File {} not found".format(filename))
self.replace_file = filename
if not self._enough_space(self.replace_file):
msg = 'Could not transfer file. Not enough space on device.'
raise ReplaceConfigException(msg)
need_transfer = True
if self._check_file_exists(self.replace_file):
if self._check_md5(self.replace_file):
need_transfer = False
if need_transfer:
dest = os.path.basename(self.replace_file)
# full_remote_path = 'flash:/{}'.format(dest)
with paramiko.SSHClient() as ssh:
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=self.hostname, username=self.username, password=<PASSWORD>, port=self.port,
look_for_keys=False)
try:
with paramiko.SFTPClient.from_transport(ssh.get_transport()) as sftp_client:
sftp_client.put(self.replace_file, dest)
# with SCPClient(ssh.get_transport()) as scp_client:
# scp_client.put(self.replace_file, dest)
except Exception as e:
msg = 'Could not transfer file. There was an error during transfer:' + str(e)
raise ReplaceConfigException(msg)
self.config_replace = True
if config and os.path.isfile(self.replace_file):
os.remove(self.replace_file)
def _verify_remote_file_exists(self, dst, file_system='flash:'):
command = 'dir {0}/{1}'.format(file_system, dst)
output = self.device.send_command(command)
if 'No file found' in output:
raise ReplaceConfigException('Could not transfer file.')
def _check_file_exists(self, cfg_file):
command = 'dir {}'.format(cfg_file)
output = self.device.send_command(command)
if 'No file found' in output:
return False
return True
def _check_md5(self, dst):
dst_hash = self._get_remote_md5(dst)
src_hash = self._get_local_md5(dst)
if src_hash == dst_hash:
return True
return False
@staticmethod
def _get_local_md5(dst, blocksize=2**20):
md5 = hashlib.md5()
local_file = open(dst, 'rb')
buf = local_file.read(blocksize)
while buf:
md5.update(buf)
buf = local_file.read(blocksize)
local_file.close()
return md5.hexdigest()
def _get_remote_md5(self, dst):
command = 'display system file-md5 {0}'.format(dst)
output = self.device.send_command(command)
filename = os.path.basename(dst)
match = re.search(filename + r'\s+(?P<md5>\w+)', output, re.M)
if match is None:
msg = "Unexpected format: {}".format(output)
raise ValueError(msg)
return match.group('md5')
def _commit_merge(self):
commands = [command for command in self.merge_candidate.splitlines() if command]
output = ''
try:
output += self.device.send_command('system-view', expect_string=r'\[.+\]')
for command in commands:
output += self.device.send_command(command, expect_string=r'\[.+\]')
if self.device.check_config_mode():
check_error = re.search("error", output, re.IGNORECASE)
if check_error is not None:
return_log = self.device.send_command('return', expect_string=r'[<\[].+[>\]]')
if 'Uncommitted configurations' in return_log:
# Discard uncommitted configuration
return_log += self.device.send_command('n', expect_string=r'<.+>')
output += return_log
raise MergeConfigException('Error | |
Note
---------
The results are stored in a DataFrame for each subspace/singleton
under the "FAS" column of the main DataFrame
"""
if useSubSpaces:
self._updateOffsets() # make sure offset times are up to date
for sta in self.subspaces.keys():
# check if FAS already calculated, only recalc if recalc
fas1 = self.subspaces[sta]['FAS'][0]
if isinstance(fas1, dict) and not recalc:
msg = ('FAS for station %s already calculated, to '
'recalculate pass True to the parameter recalc' %
sta)
detex.log(__name__, msg, pri=True)
else:
self.subspaces[sta]['FAS'] = detex.fas._initFAS(
self.subspaces[sta],
conDatNum,
self.clusters,
self.cfetcher,
LTATime=LTATime,
STATime=STATime,
staltalimit=staltalimit,
numBins=numBins,
dtype=self.dtype)
if useSingles:
for sta in self.singles.keys():
for a in range(len(self.singles[sta])):
fas1 = self.singles[sta]['FAS'][a]
if isinstance(fas1, dict) and not recalc:
msg = (('FAS for singleton %d already calculated on '
'station %s, to recalculate pass True to the '
'parameter recalc') % (a, sta))
detex.log(__name__, msg, pri=True)
# skip any events that have not been trimmed
elif len(self.singles[sta]['SampleTrims'][a].keys()) < 1:
continue
else:
self.singles[sta]['FAS'][a] = detex.fas._initFAS(
self.singles[sta][a:a + 1],
conDatNum,
self.clusters,
self.cfetcher,
LTATime=LTATime,
STATime=STATime,
staltalimit=staltalimit,
numBins=numBins,
dtype=self.dtype,
issubspace=False)
def detex(self,
utcStart=None,
utcEnd=None,
subspaceDB='SubSpace.db',
trigCon=0,
triggerLTATime=5,
triggerSTATime=0,
multiprocess=False,
delOldCorrs=True,
calcHist=True,
useSubSpaces=True,
useSingles=False,
estimateMags=True,
classifyEvents=None,
eventCorFile='EventCors',
utcSaves=None,
fillZeros=False):
"""
function to run subspace detection over continuous data and store
results in SQL database subspaceDB
Parameters
------------
utcStart : str or num
An obspy.core.UTCDateTime readable object defining the start time
of the correlations if not all avaliable data are to be used
utcEnd : str num
An obspy.core.UTCDateTime readable object defining the end time
of the correlations
subspaceDB : str
Path to the SQLite database to store detections in. If it already
exists delOldCorrs parameters governs if it will be deleted before
running new detections, or appended to.
trigCon is the condition for which detections should trigger.
Once the condition is set the variable minCoef is used:
0 is based on the detection statistic threshold
1 is based on the STA/LTA of the detection statistic threshold
(Only 0 is currently supported)
triggerLTATime : number
The long term average for the STA/LTA calculations in seconds.
triggerSTATime : number
The short term average for the STA/LTA calculations in seconds.
If ==0 then one sample is used.
multiprocess : bool
Determine if each station should be forked into its own process
for potential speed ups. Currently not implemented.
delOldCorrs : bool
Determines if subspaceDB should be deleted before performing
detections. If False old database is appended to.
calcHist : boolean
If True calculates the histagram for every point of the detection
statistic vectors (all hours, stations and subspaces) by keeping a
a cumulative bin count. Only slows the detections down slightly
and can be useful for threshold sanity checks. The histograms are
then returned to the main DataFrame in the SubSpace instance
as the column histSubSpaces, and saved in the subspaceDB under the
ss_hist and sg_hists tables for subspacs and singletons.
useSubspace : bool
If True the subspaces will be used as detectors to scan
continuous data
useSingles : bool
If True the singles (events that did not cluster) will be used as
detectors to scan continuous data
estimateMags : bool
If True, magnitudes will be estimated for each detection by using
two methods. The first is using standard deviation ratios, and the
second uses projected energy ratios (see chambers et al. 2015 for
details).
classifyEvents : None, str, or DataFrame
If None subspace detectors will be run over continuous data.
Else, detex will be run over event waveforms in order to classify
events into groups bassed on which subspace they are most similar
to. In the latter case the classifyEvents argument must be a
str (path to template key like csv) or DataFrame (loaded template
key file). The same event DataFetcher attached to the cluster
object will be used to get the data. This feature is Experimental.
eventCorFile : str
A path to a new pickled DataFrame created when the eventDir option
is used. Records the highest detection statistic in the file
for each event, station, and subspace. Useful when trying to
characterize events.
utcSaves : None or list of obspy DateTime readable objects
Either none (not used) or an iterrable of objects readable by
obspy.UTCDateTime. When the detections are run if the continous
data cover a time indicated in UTCSaves then the continuous data
and detection statistic vectors,are saved to a pickled dataframe
of the name "UTCsaves.pkl". This can be useful for debugging, or
extracting the DS vector for a time of interest.
fillZeros : bool
If true fill the gaps in continuous data with 0s. If True
STA/LTA of detection statistic cannot be calculated in order to
avoid dividing by 0.
Notes
----------
The same filter and decimation parameters that were used in the
ClusterStream instance will be applied.
"""
# make sure no parameters that dont work yet are selected
if multiprocess or trigCon != 0:
msg = 'multiprocessing and trigcon other than 0 not supported'
detex.log(__name__, msg, level='error')
if os.path.exists(subspaceDB):
if delOldCorrs:
os.remove(subspaceDB)
msg = 'Deleting old subspace database %s' % subspaceDB
detex.log(__name__, msg, pri=True)
else:
msg = 'Not deleting old subspace database %s' % subspaceDB
detex.log(__name__, msg, pri=True)
if useSubSpaces: # run subspaces
TRDF = self.subspaces
# determine if subspaces are defined (ie SVD has been called)
stas = self.subspaces.keys()
sv = [all(TRDF[sta].SVDdefined) for sta in stas]
if not all(sv):
msg = 'call SVD before running subspace detectors'
detex.log(__name__, msg, level='error')
Det = _SSDetex(TRDF, utcStart, utcEnd, self.cfetcher, self.clusters,
subspaceDB, trigCon, triggerLTATime, triggerSTATime,
multiprocess, calcHist, self.dtype, estimateMags,
classifyEvents, eventCorFile, utcSaves, fillZeros)
self.histSubSpaces = Det.hist
if useSingles: # run singletons
# make sure thresholds are calcualted
self.setSinglesThresholds()
TRDF = self.singles
Det = _SSDetex(TRDF, utcStart, utcEnd, self.cfetcher, self.clusters,
subspaceDB, trigCon, triggerLTATime, triggerSTATime,
multiprocess, calcHist, self.dtype, estimateMags,
classifyEvents, eventCorFile, utcSaves, fillZeros,
issubspace=False)
self.histSingles = Det.hist
# save addational info to sql database
if useSubSpaces or useSingles:
cols = ['FREQMIN', 'FREQMAX', 'CORNERS', 'ZEROPHASE']
dffil = pd.DataFrame([self.clusters.filt], columns=cols, index=[0])
detex.util.saveSQLite(dffil, subspaceDB, 'filt_params')
# get general info on each singleton/subspace and save
ssinfo, sginfo = self._getInfoDF()
sshists, sghists = self._getHistograms(useSubSpaces, useSingles)
if useSubSpaces and ssinfo is not None:
# save subspace info
detex.util.saveSQLite(ssinfo, subspaceDB, 'ss_info')
if useSingles and sginfo is not None:
# save singles info
detex.util.saveSQLite(sginfo, subspaceDB, 'sg_info')
if useSubSpaces and sshists is not None:
# save subspace histograms
detex.util.saveSQLite(sshists, subspaceDB, 'ss_hist')
if useSingles and sghists is not None:
# save singles histograms
detex.util.saveSQLite(sghists, subspaceDB, 'sg_hist')
def _getInfoDF(self):
"""
get dataframes that have info about each subspace and single
"""
sslist = [] # list in which to put DFs for each subspace/station pair
sglist = [] # list in which to put DFs for each single/station pair
for sta in self.Stations:
if sta not in self.ssStations:
msg = 'No subspaces on station %s' % sta
detex.log(__name__, msg, pri=True)
continue
for num, ss in self.subspaces[sta].iterrows(): # write ss info
name = ss.Name
station = ss.Station
events = ','.join(ss.Events)
numbasis = ss.NumBasis
thresh = ss.Threshold
if isinstance(ss.FAS, dict) and len(ss.FAS.keys()) > 1:
b1, b2 = ss.FAS['betadist'][0], ss.FAS['betadist'][1]
else:
b1, b2 = np.nan, np.nan
cols = ['Name', 'Sta', 'Events', 'Threshold', 'NumBasisUsed',
'beta1', 'beta2']
dat = [[name, station, events, thresh, numbasis, b1, b2]]
sslist.append(pd.DataFrame(dat, columns=cols))
for sta in self.Stations:
if sta not in self.singStations:
msg = 'No singletons on station %s' % sta
detex.log(__name__, msg, pri=True)
continue
for num, ss in self.singles[sta].iterrows(): # write singles info
name = ss.Name
station = ss.Station
events = ','.join(ss.Events)
thresh = ss.Threshold
if isinstance(ss.FAS, list) and len(ss.FAS[0].keys()) > 1:
b1, b2 = ss.FAS[0]['betadist'][0], ss.FAS[0]['betadist'][1]
else:
b1, b2 = np.nan, np.nan
cols = ['Name', 'Sta', 'Events', 'Threshold', 'beta1', 'beta2']
dat = [[name, station, events, thresh, b1, b2]]
sglist.append(pd.DataFrame(dat, columns=cols))
if len(sslist) > 0:
ssinfo = pd.concat(sslist, ignore_index=True)
else:
ssinfo | |
41.469, 29.027,
VERTEX, 10.225, 41.425, 29.178,
VERTEX, 10.348, 41.381, 29.330,
VERTEX, 10.470, 41.336, 29.482,
VERTEX, 10.593, 41.291, 29.633,
VERTEX, 10.716, 41.244, 29.784,
VERTEX, 10.817, 41.205, 29.908,
END,
BEGIN, LINE_LOOP,
VERTEX, 11.182, 40.607, 29.803,
VERTEX, 11.213, 40.498, 29.948,
VERTEX, 11.243, 40.388, 30.092,
VERTEX, 11.273, 40.277, 30.236,
VERTEX, 11.304, 40.166, 30.379,
VERTEX, 11.334, 40.054, 30.522,
VERTEX, 11.329, 40.035, 30.591,
VERTEX, 11.315, 39.984, 30.778,
VERTEX, 11.301, 39.933, 30.965,
VERTEX, 11.286, 39.882, 31.151,
VERTEX, 11.270, 39.832, 31.338,
VERTEX, 11.209, 39.872, 31.503,
VERTEX, 11.140, 39.919, 31.686,
VERTEX, 11.070, 39.968, 31.867,
VERTEX, 10.998, 40.019, 32.047,
VERTEX, 10.924, 40.072, 32.225,
VERTEX, 10.848, 40.129, 32.400,
VERTEX, 10.769, 40.190, 32.573,
VERTEX, 10.687, 40.254, 32.744,
VERTEX, 10.602, 40.322, 32.911,
VERTEX, 10.516, 40.393, 33.076,
VERTEX, 10.426, 40.467, 33.239,
VERTEX, 10.335, 40.544, 33.400,
VERTEX, 10.242, 40.623, 33.560,
VERTEX, 10.148, 40.704, 33.718,
VERTEX, 10.053, 40.786, 33.875,
VERTEX, 9.957, 40.870, 34.031,
VERTEX, 9.897, 40.922, 34.128,
VERTEX, 9.796, 41.101, 34.113,
VERTEX, 9.695, 41.280, 34.096,
VERTEX, 9.594, 41.459, 34.077,
VERTEX, 9.493, 41.638, 34.056,
VERTEX, 9.392, 41.816, 34.033,
VERTEX, 9.393, 41.854, 33.953,
VERTEX, 9.396, 41.935, 33.778,
VERTEX, 9.398, 42.016, 33.603,
VERTEX, 9.399, 42.097, 33.428,
VERTEX, 9.401, 42.178, 33.253,
VERTEX, 9.401, 42.259, 33.078,
VERTEX, 9.404, 42.257, 33.071,
VERTEX, 9.482, 42.215, 32.892,
VERTEX, 9.559, 42.172, 32.714,
VERTEX, 9.636, 42.128, 32.536,
VERTEX, 9.714, 42.082, 32.358,
VERTEX, 9.791, 42.035, 32.181,
VERTEX, 9.869, 41.985, 32.004,
VERTEX, 9.946, 41.933, 31.829,
VERTEX, 10.024, 41.878, 31.654,
VERTEX, 10.101, 41.820, 31.481,
VERTEX, 10.179, 41.760, 31.308,
VERTEX, 10.257, 41.697, 31.135,
VERTEX, 10.334, 41.632, 30.964,
VERTEX, 10.412, 41.566, 30.793,
VERTEX, 10.490, 41.498, 30.622,
VERTEX, 10.568, 41.430, 30.452,
VERTEX, 10.645, 41.360, 30.282,
VERTEX, 10.723, 41.290, 30.113,
VERTEX, 10.801, 41.219, 29.943,
VERTEX, 10.817, 41.205, 29.909,
VERTEX, 10.922, 41.052, 29.830,
VERTEX, 11.027, 40.899, 29.751,
VERTEX, 11.132, 40.745, 29.672,
VERTEX, 11.152, 40.715, 29.657,
END,
BEGIN, LINE_LOOP,
VERTEX, 11.412, 39.749, 31.484,
VERTEX, 11.558, 39.668, 31.624,
VERTEX, 11.706, 39.587, 31.762,
VERTEX, 11.658, 39.618, 31.831,
VERTEX, 11.552, 39.686, 31.985,
VERTEX, 11.450, 39.754, 32.141,
VERTEX, 11.351, 39.821, 32.299,
VERTEX, 11.256, 39.887, 32.459,
VERTEX, 11.165, 39.952, 32.622,
VERTEX, 11.080, 40.016, 32.787,
VERTEX, 11.000, 40.079, 32.956,
VERTEX, 10.926, 40.141, 33.128,
VERTEX, 10.856, 40.201, 33.302,
VERTEX, 10.791, 40.261, 33.479,
VERTEX, 10.730, 40.320, 33.658,
VERTEX, 10.673, 40.378, 33.839,
VERTEX, 10.617, 40.435, 34.022,
VERTEX, 10.564, 40.492, 34.205,
VERTEX, 10.527, 40.533, 34.337,
VERTEX, 10.373, 40.644, 34.420,
VERTEX, 10.219, 40.755, 34.502,
VERTEX, 10.158, 40.786, 34.431,
VERTEX, 10.027, 40.853, 34.279,
VERTEX, 9.897, 40.921, 34.128,
VERTEX, 9.968, 40.860, 34.015,
VERTEX, 10.062, 40.778, 33.861,
VERTEX, 10.156, 40.697, 33.706,
VERTEX, 10.249, 40.617, 33.550,
VERTEX, 10.340, 40.539, 33.393,
VERTEX, 10.430, 40.463, 33.234,
VERTEX, 10.518, 40.390, 33.073,
VERTEX, 10.603, 40.320, 32.911,
VERTEX, 10.687, 40.253, 32.745,
VERTEX, 10.767, 40.189, 32.578,
VERTEX, 10.845, 40.130, 32.407,
VERTEX, 10.921, 40.073, 32.234,
VERTEX, 10.994, 40.020, 32.059,
VERTEX, 11.065, 39.970, 31.882,
VERTEX, 11.134, 39.922, 31.703,
VERTEX, 11.202, 39.875, 31.523,
VERTEX, 11.269, 39.831, 31.342,
VERTEX, 11.269, 39.831, 31.342,
END,
BEGIN, LINE_LOOP,
VERTEX, 12.242, 39.829, 30.442,
VERTEX, 12.210, 39.784, 30.623,
VERTEX, 12.179, 39.739, 30.805,
VERTEX, 12.147, 39.694, 30.987,
VERTEX, 12.115, 39.649, 31.169,
VERTEX, 12.083, 39.605, 31.351,
VERTEX, 12.071, 39.604, 31.363,
VERTEX, 11.931, 39.599, 31.512,
VERTEX, 11.793, 39.592, 31.663,
VERTEX, 11.706, 39.587, 31.762,
VERTEX, 11.558, 39.668, 31.624,
VERTEX, 11.412, 39.749, 31.484,
VERTEX, 11.269, 39.831, 31.342,
VERTEX, 11.271, 39.836, 31.322,
VERTEX, 11.287, 39.886, 31.137,
VERTEX, 11.302, 39.936, 30.952,
VERTEX, 11.316, 39.987, 30.767,
VERTEX, 11.330, 40.038, 30.582,
VERTEX, 11.334, 40.054, 30.522,
VERTEX, 11.529, 40.015, 30.473,
VERTEX, 11.724, 39.977, 30.420,
VERTEX, 11.919, 39.940, 30.365,
VERTEX, 12.114, 39.903, 30.308,
VERTEX, 12.273, 39.874, 30.260,
END,
BEGIN, LINE_LOOP,
VERTEX, 12.337, 39.941, 30.129,
VERTEX, 12.273, 39.874, 30.260,
VERTEX, 12.237, 39.880, 30.271,
VERTEX, 12.057, 39.914, 30.325,
VERTEX, 11.876, 39.948, 30.377,
VERTEX, 11.696, 39.983, 30.428,
VERTEX, 11.515, 40.018, 30.476,
VERTEX, 11.334, 40.054, 30.522,
VERTEX, 11.316, 40.119, 30.440,
VERTEX, 11.283, 40.243, 30.281,
VERTEX, 11.249, 40.366, 30.121,
VERTEX, 11.215, 40.488, 29.961,
VERTEX, 11.182, 40.609, 29.799,
VERTEX, 11.152, 40.715, 29.657,
VERTEX, 11.325, 40.621, 29.704,
VERTEX, 11.498, 40.527, 29.751,
VERTEX, 11.670, 40.431, 29.798,
VERTEX, 11.841, 40.334, 29.845,
VERTEX, 12.012, 40.235, 29.891,
VERTEX, 12.183, 40.136, 29.938,
VERTEX, 12.353, 40.036, 29.984,
VERTEX, 12.402, 40.008, 29.997,
END,
BEGIN, LINE_LOOP,
VERTEX, 10.179, 40.938, 34.588,
VERTEX, 10.171, 40.975, 34.606,
VERTEX, 10.059, 41.165, 34.583,
VERTEX, 9.948, 41.355, 34.561,
VERTEX, 9.839, 41.545, 34.538,
VERTEX, 9.730, 41.736, 34.516,
VERTEX, 9.632, 41.759, 34.377,
VERTEX, 9.511, 41.788, 34.205,
VERTEX, 9.391, 41.816, 34.033,
VERTEX, 9.482, 41.658, 34.053,
VERTEX, 9.577, 41.490, 34.073,
VERTEX, 9.672, 41.321, 34.092,
VERTEX, 9.767, 41.153, 34.108,
VERTEX, 9.862, 40.984, 34.123,
VERTEX, 9.897, 40.921, 34.128,
VERTEX, 10.020, 40.857, 34.271,
VERTEX, 10.143, 40.794, 34.414,
VERTEX, 10.219, 40.755, 34.502,
END,
BEGIN, LINE_LOOP,
VERTEX, 11.754, 39.467, 31.903,
VERTEX, 11.806, 39.346, 32.042,
VERTEX, 11.863, 39.226, 32.181,
VERTEX, 11.898, 39.156, 32.261,
VERTEX, 11.897, 39.089, 32.460,
VERTEX, 11.899, 39.025, 32.659,
VERTEX, 11.901, 38.964, 32.860,
VERTEX, 11.906, 38.906, 33.062,
VERTEX, 11.911, 38.851, 33.264,
VERTEX, 11.912, 38.841, 33.303,
VERTEX, 11.811, 38.928, 33.442,
VERTEX, 11.713, 39.017, 33.582,
VERTEX, 11.618, 39.105, 33.724,
VERTEX, 11.526, 39.194, 33.868,
VERTEX, 11.424, 39.313, 33.918,
VERTEX, 11.304, 39.458, 33.976,
VERTEX, 11.187, 39.605, 34.032,
VERTEX, 11.071, 39.755, 34.087,
VERTEX, 10.958, 39.906, 34.140,
VERTEX, 10.847, 40.060, 34.191,
VERTEX, 10.738, 40.216, 34.242,
VERTEX, 10.632, 40.373, 34.290,
VERTEX, 10.527, 40.533, 34.337,
VERTEX, 10.576, 40.479, 34.162,
VERTEX, 10.631, 40.420, 33.974,
VERTEX, 10.689, 40.361, 33.787,
VERTEX, 10.749, 40.301, 33.602,
VERTEX, 10.813, 40.241, 33.419,
VERTEX, 10.881, 40.179, 33.238,
VERTEX, 10.954, 40.117, 33.059,
VERTEX, 11.033, 40.053, 32.884,
VERTEX, 11.117, 39.988, 32.713,
VERTEX, 11.207, 39.921, 32.544,
VERTEX, 11.303, 39.854, 32.378,
VERTEX, 11.403, 39.786, 32.215,
VERTEX, 11.507, 39.716, 32.054,
VERTEX, 11.613, 39.647, 31.895,
VERTEX, 11.706, 39.587, 31.762,
END,
BEGIN, LINE_LOOP,
VERTEX, 12.046, 39.511, 31.532,
VERTEX, 12.010, 39.420, 31.713,
VERTEX, 11.973, 39.330, 31.895,
VERTEX, 11.935, 39.242, 32.078,
VERTEX, 11.898, 39.156, 32.261,
VERTEX, 11.892, 39.166, 32.249,
VERTEX, 11.825, 39.306, 32.088,
VERTEX, 11.762, 39.447, 31.926,
VERTEX, 11.706, 39.587, 31.762,
VERTEX, 11.839, 39.595, 31.612,
VERTEX, 11.980, 39.601, 31.459,
VERTEX, 12.083, 39.605, 31.351,
END,
BEGIN, LINE_LOOP,
VERTEX, 10.515, 41.092, 29.114,
VERTEX, 10.643, 41.018, 29.223,
VERTEX, 10.771, 40.944, 29.332,
VERTEX, 10.898, 40.869, 29.441,
VERTEX, 11.025, 40.792, 29.549,
VERTEX, 11.152, 40.715, 29.657,
VERTEX, 11.145, 40.725, 29.662,
VERTEX, 11.036, 40.885, 29.744,
VERTEX, 10.927, 41.045, 29.826,
VERTEX, 10.817, 41.205, 29.909,
VERTEX, 10.716, 41.244, 29.784,
VERTEX, 10.593, 41.290, 29.633,
VERTEX, 10.471, 41.336, 29.482,
VERTEX, 10.348, 41.381, 29.331,
VERTEX, 10.225, 41.425, 29.179,
VERTEX, 10.102, 41.468, 29.027,
VERTEX, 10.112, 41.457, 29.024,
VERTEX, 10.270, 41.286, 29.005,
VERTEX, 10.387, 41.166, 29.004,
END,
BEGIN, LINE_LOOP,
VERTEX, 12.402, 40.008, 29.997,
VERTEX, 12.249, 40.097, 29.956,
VERTEX, 12.077, 40.198, 29.909,
VERTEX, 11.905, 40.298, 29.862,
VERTEX, 11.731, 40.396, 29.815,
VERTEX, 11.558, 40.493, 29.767,
VERTEX, 11.383, 40.590, 29.720,
VERTEX, 11.208, 40.685, 29.672,
VERTEX, 11.152, 40.715, 29.657,
VERTEX, 11.025, 40.792, 29.549,
VERTEX, 10.898, 40.868, 29.441,
VERTEX, 10.771, 40.944, 29.332,
VERTEX, 10.643, 41.019, 29.223,
VERTEX, 10.515, 41.093, 29.113,
VERTEX, 10.386, 41.166, 29.004,
VERTEX, 10.508, 41.115, 28.939,
VERTEX, 10.681, 41.040, 28.871,
VERTEX, 10.857, 40.959, 28.824,
VERTEX, 11.036, 40.874, 28.799,
VERTEX, 11.215, 40.785, 28.797,
VERTEX, 11.392, 40.693, 28.816,
VERTEX, 11.564, 40.601, 28.857,
VERTEX, 11.730, 40.508, 28.920,
VERTEX, 11.886, 40.416, 29.004,
VERTEX, 12.032, 40.326, 29.107,
VERTEX, 12.166, 40.240, 29.228,
VERTEX, 12.285, 40.158, 29.366,
VERTEX, 12.388, 40.082, 29.519,
VERTEX, 12.475, 40.012, 29.685,
VERTEX, 12.503, 39.986, 29.759,
END,
BEGIN, LINE_LOOP,
VERTEX, 9.462, 37.678, 32.637,
VERTEX, 9.644, 37.776, 32.644,
VERTEX, 9.826, 37.872, 32.649,
VERTEX, 10.009, 37.967, 32.652,
VERTEX, 10.193, 38.059, 32.652,
VERTEX, 10.378, 38.150, 32.651,
VERTEX, 10.564, 38.239, 32.647,
VERTEX, 10.750, 38.326, 32.641,
VERTEX, 10.938, 38.411, 32.633,
VERTEX, 11.126, 38.495, 32.624,
VERTEX, 11.315, 38.578, 32.613,
VERTEX, 11.504, 38.660, 32.601,
VERTEX, 11.693, 38.741, 32.589,
VERTEX, 11.813, 38.792, 32.580,
VERTEX, 11.818, 38.741, 32.777,
VERTEX, 11.824, 38.688, 32.973,
VERTEX, 11.829, 38.634, 33.169,
VERTEX, 11.656, 38.553, 33.200,
VERTEX, 11.479, 38.469, 33.232,
VERTEX, 11.304, 38.383, 33.266,
VERTEX, 11.129, 38.296, 33.301,
VERTEX, 10.956, 38.207, 33.337,
VERTEX, 10.784, 38.117, 33.375,
VERTEX, 10.613, 38.024, 33.414,
VERTEX, 10.444, 37.930, 33.456,
VERTEX, 10.276, 37.834, 33.499,
VERTEX, 10.110, 37.735, 33.543,
VERTEX, 9.945, 37.636, 33.589,
VERTEX, 9.781, 37.535, 33.636,
VERTEX, 9.618, 37.432, 33.684,
VERTEX, 9.579, 37.451, 33.565,
VERTEX, 9.520, 37.479, 33.383,
VERTEX, 9.462, 37.505, 33.201,
VERTEX, 9.404, 37.531, 33.020,
VERTEX, 9.346, 37.555, 32.838,
VERTEX, 9.289, 37.577, 32.655,
VERTEX, 9.281, 37.580, 32.630,
END,
BEGIN, LINE_LOOP,
VERTEX, 11.829, 38.634, 33.169,
VERTEX, 11.824, 38.685, 32.986,
VERTEX, 11.819, 38.739, 32.783,
VERTEX, 11.813, 38.792, 32.580,
VERTEX, 11.830, 38.866, 32.517,
VERTEX, 11.865, 39.014, 32.388,
VERTEX, 11.898, 39.156, 32.261,
VERTEX, 11.897, 39.089, 32.458,
VERTEX, 11.899, 39.026, 32.655,
VERTEX, 11.901, 38.966, 32.854,
VERTEX, 11.905, 38.908, 33.053,
VERTEX, 11.911, 38.853, 33.253,
VERTEX, 11.912, 38.841, 33.303,
END,
BEGIN, LINE_LOOP,
VERTEX, 8.054, 36.282, 31.168,
VERTEX, 8.234, 36.364, 31.131,
VERTEX, 8.415, 36.445, 31.094,
VERTEX, 8.596, 36.526, 31.058,
VERTEX, 8.776, 36.608, 31.022,
VERTEX, 8.957, 36.690, 30.988,
VERTEX, 9.137, 36.771, 30.955,
VERTEX, 9.318, 36.854, 30.923,
VERTEX, 9.498, 36.936, 30.893,
VERTEX, 9.678, 37.019, 30.865,
VERTEX, 9.858, 37.102, 30.840,
VERTEX, 10.038, 37.185, 30.818,
VERTEX, 10.218, 37.270, 30.801,
VERTEX, 10.398, 37.355, 30.788,
VERTEX, 10.577, 37.441, 30.780,
VERTEX, 10.756, 37.527, 30.777,
VERTEX, 10.935, 37.615, 30.780,
VERTEX, 11.114, 37.703, 30.787,
VERTEX, 11.292, 37.792, 30.798,
VERTEX, 11.470, 37.881, 30.812,
VERTEX, 11.475, 37.884, 30.813,
VERTEX, 11.601, 37.989, 30.918,
VERTEX, 11.726, 38.094, 31.024,
VERTEX, 11.849, 38.199, 31.133,
VERTEX, 11.973, 38.302, 31.243,
VERTEX, 11.987, 38.315, 31.255,
VERTEX, 11.964, 38.386, 31.444,
VERTEX, 11.940, 38.456, 31.632,
VERTEX, 11.916, 38.526, 31.821,
VERTEX, 11.891, 38.594, 32.010,
VERTEX, 11.865, 38.661, 32.200,
VERTEX, 11.839, 38.727, 32.390,
VERTEX, 11.813, 38.792, 32.580,
VERTEX, 11.668, 38.730, 32.590,
VERTEX, 11.483, 38.651, 32.603,
VERTEX, 11.298, 38.570, 32.614,
VERTEX, 11.113, 38.489, 32.624,
VERTEX, 10.929, 38.407, 32.634,
VERTEX, 10.745, 38.323, 32.641,
VERTEX, 10.562, 38.238, 32.647,
VERTEX, 10.380, 38.151, 32.651,
VERTEX, 10.199, 38.062, 32.652,
VERTEX, 10.019, 37.971, 32.652,
VERTEX, 9.839, 37.879, 32.649,
VERTEX, 9.661, 37.785, 32.644,
VERTEX, 9.483, 37.689, 32.638,
VERTEX, 9.305, 37.593, 32.631,
VERTEX, 9.281, 37.580, 32.630,
VERTEX, 9.159, 37.468, 32.516,
VERTEX, 9.037, 37.355, 32.401,
VERTEX, 8.917, 37.242, 32.286,
VERTEX, 8.798, 37.129, 32.171,
VERTEX, 8.679, 37.015, 32.055,
VERTEX, 8.561, 36.901, 31.939,
VERTEX, 8.444, 36.786, 31.823,
VERTEX, 8.327, 36.671, 31.706,
VERTEX, 8.210, 36.556, 31.589,
VERTEX, 8.094, 36.441, 31.472,
VERTEX, 7.978, 36.325, 31.355,
VERTEX, 7.862, 36.210, 31.238,
VERTEX, 7.866, 36.206, 31.225,
VERTEX, 7.873, 36.201, 31.206,
END,
BEGIN, LINE_LOOP,
VERTEX, 7.979, 36.411, 30.436,
VERTEX, 8.070, 36.489, 30.252,
VERTEX, 8.160, 36.569, 30.069,
VERTEX, 8.202, 36.585, 30.077,
VERTEX, 8.391, 36.655, 30.108,
VERTEX, 8.580, 36.726, 30.139,
VERTEX, 8.768, 36.797, 30.169,
VERTEX, 8.957, 36.869, 30.197,
VERTEX, 9.145, 36.941, 30.225,
VERTEX, 9.332, 37.014, 30.250,
VERTEX, 9.519, 37.089, 30.274,
VERTEX, 9.705, 37.164, 30.294,
VERTEX, 9.891, 37.241, 30.311,
VERTEX, 10.075, 37.320, 30.324,
VERTEX, 10.259, 37.402, 30.332,
VERTEX, 10.441, 37.486, 30.334,
VERTEX, 10.621, 37.572, 30.331,
VERTEX, 10.800, 37.661, 30.321,
VERTEX, 10.978, 37.753, 30.306,
VERTEX, 11.155, 37.847, 30.286,
VERTEX, 11.200, 37.872, 30.280,
VERTEX, 11.291, 37.874, 30.457,
VERTEX, 11.383, 37.877, 30.634,
VERTEX, 11.473, 37.882, 30.812,
VERTEX, 11.471, 37.881, 30.812,
VERTEX, 11.293, 37.792, 30.797,
VERTEX, 11.114, 37.703, 30.786,
VERTEX, 10.936, 37.615, 30.779,
VERTEX, 10.757, 37.527, 30.776,
VERTEX, 10.578, 37.440, 30.779,
VERTEX, 10.399, 37.354, 30.787,
VERTEX, 10.219, 37.269, 30.800,
VERTEX, 10.039, 37.185, 30.817,
VERTEX, 9.859, 37.101, 30.839,
VERTEX, 9.679, 37.018, 30.864,
VERTEX, 9.499, 36.936, | |
dict_len,
frag_min,
frag_max,
folder,
offset_threshold,
off_correction_threshold,
three_prime,
get_coverage_stats=True,
advanced=True,
conserve_frame=True,
bootstrap=False,
cov_range=(1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 25, 30, 35, 40, 45, 50)):
# Used for debugging
offset_dic = {}
correction_dic = {}
details = {}
debug_details = {}
for frame in range(3):
details[frame] = {}
debug_details[frame] = {}
# The following dict will contain meta gene for every gene in every fsize and frame. The meta-data include no. of zeros, perc zeros, avg, avg at start and avg at end
sum_total = {}
dict_cov_info = {}
# Initialize files for all fragment size and frame combinations
for fsize in range(frag_min, frag_max + 1):
dict_cov_info[fsize] = {}
sum_total[fsize] = {}
for frame in range(3):
dict_cov_info[fsize][frame] = {}
sum_total[fsize][frame] = 0
# MAIN ANALYSIS STARTS HERE
for fsize in range(frag_min, frag_max + 1):
# The following will be used as an index in the read dictionary for each gene
last_off = -fsize
for frame in range(3):
if frame not in offset_dic:
offset_dic[frame] = {}
correction_dic[frame] = {}
if fsize not in offset_dic[frame]:
offset_dic[frame][fsize] = {}
'''
FOLLOWING CODE FOR GENES IN EVERY FSIZE AND FRAME
'''
for gene in reads_dict[fsize][frame]:
skip_gene = False
short_utr = False
"""
*** PARSING THE READ VALUES ***
"""
# The following will be reads dictionary with nucleotide position as key and number of reads as value
dict_reads = reads_dict[fsize][frame][gene]
reads = []
try:
# 5' end
if not three_prime:
start = -fsize
end = dict_len[gene] + 1
# For 3' end, we get the reads at the CDS nucleotide positions as well as fsize length after the end of CDS
else:
start = 1
end = dict_len[gene] + fsize + 1
for k in range(start, end):
# Ignoring since there is no zero position
if k == 0:
continue
try:
reads.append(dict_reads[k])
except KeyError:
logger.warn('KeyError in reads dictionary for %s at position %d with gene length %d in main analysis.' % (gene, k, dict_len[gene]))
short_utr = True
if k > 0:
skip_gene = True
logger.warn('Skip gene %s.' % gene)
# Using break instead of continue as this will break the inner for loop and continue the outer for loop
break
except KeyError:
# Length not available for this gene
skip_gene = True
logger.warn('KeyError in length dictionary for gene %s in main analysis.' % gene)
if skip_gene:
# If gene does not have proper read values, we will not consider it for analysis and hence we remove it and move to next gene in the for loop
continue
# The extra number of nucleotides on one side of CDS
if short_utr:
extra_s = min(dict_reads)
if extra_s < 12:
continue
else:
extra_s = fsize
if three_prime:
ref = [0] * frame
for r in reads[frame::3]:
ref += [r, 0, 0]
else:
# To adjust for the len and replace zeroes for out of frame reads, we do the following. This will be followed by deletion/addition of
# additional zeroes to make length equal to original seq
ref = [0] * (extra_s % 3 + frame)
# Choose the reads in positions of multiples of 3 according to the frame
for r in reads[extra_s % 3 + frame::3]: # select the reads of the given 5' end frame
ref += [r, 0, 0]
ref = ref[:-2] # we exclude the last [0,0] we added at the end
if (len(reads) - len(ref)) > 0:
ref += (len(reads[:last_off]) - len(ref)) * [0]
# we put it back to the original length (which might have changed when multiple of 3).
if three_prime:
avg_reads = np.mean(ref[frame::3])
else:
avg_reads = np.mean(ref[extra_s % 3 + frame::3])
"""
*** CALCULATING THE OFFSET FOR THE GENE ***
"""
# try:
if conserve_frame:
if three_prime:
best_start_index, best_end_index, offset, score_per_offset = put_reads_in_orf_3_end(ref, extra_s, dict_len[gene], centre_on_p_not_on_a=False,
advanced=advanced,
go_three_at_a_time=True)
else:
best_start_index, best_end_index, offset, score_per_offset = put_reads_in_orf(ref, extra_s, dict_len[gene], centre_on_p_not_on_a=False,
advanced=advanced, go_three_at_a_time=True)
else:
# we get the offset to centre on frame 1, hence +1 and -2. (-2 is because we use the length to get to the last index by start+len,
# but we gave +1 to start so we need to take out an extra one to len).
best_start_index, best_end_index, offset, score_per_offset = put_reads_in_orf(ref, extra_s + 1, dict_len[gene] - 2, centre_on_p_not_on_a=False,
advanced=advanced, go_three_at_a_time=True)
# If secondary selection criteria is to be applied, we compare the scores of top two offset and the reads near the start codon.
if advanced:
# sort the offsets based on scores. If scores are same for two or more offsets, they will be sorted according to offset values.
sorted_scores = sorted(sorted(score_per_offset), key=score_per_offset.get, reverse=True)
# Quality check to make sure the highest offset is the same as the best offset we get from our function
if sorted_scores[0] != offset:
logger.warn('Sorted offsets do not match the offset we get from put_reads_in_orf function for gene %s in fragment size %d and frame %d.' % (gene, fsize, frame))
# Difference of top two offsets
diff = score_per_offset[offset] - score_per_offset[sorted_scores[1]]
# If the difference in scores is less than the avg number of reads across the gene, we apply the secondary selection criteria
if diff < avg_reads:
# Offsets wit diff less than avg will be listed in here
list_offsets_to_compare = []
# Add the top two offsets to the list of offsets to compare
if score_per_offset[sorted_scores[0]] >= score_per_offset[sorted_scores[1]]:
list_offsets_to_compare.append(sorted_scores[0])
list_offsets_to_compare.append(sorted_scores[1])
# If any other offsets down the order have equal scores with second best offset, then they get added to the list as well
for i in range(2, len(sorted_scores)):
if score_per_offset[sorted_scores[i]] == score_per_offset[sorted_scores[1]]:
list_offsets_to_compare.append(sorted_scores[i])
# The offsets for which the condition is met will be added in here
off_true = []
# The dict will contain the difference between the average of R2, R3 and R4 and the reads in first codon R1
diff_dict = {}
# Check the secondary selection criteria of the listed offsets
for off in list_offsets_to_compare:
# quality check.
if off > fsize:
logger.warn('Unusual offset %d being considered for fsize %d frame %d in gene %s. Skip.' % (off, fsize, frame, gene))
continue
# quality check
if off % 3 != 0:
logger.warn('Unusual offset %d being considered for fsize %d frame %d in gene %s.' % (off, fsize, frame, gene))
# Getting the first 4 codon values in the particular offset
if three_prime:
reads_offset = reads[off:off + 12]
else:
reads_offset = reads[extra_s - off:extra_s - off + 12]
if not reads_offset:
logger.warn('Reads offset list is empty.')
# Checking the condition whether the R1 is less than one-fifth of the average of R2, R3 and R4
bool_off, diff_avg = secondary_selection_conditions(reads_offset, frame, threshold=off_correction_threshold)
# Adding this offset to the list if the condition is met
if bool_off:
off_true.append(off)
diff_dict[off] = diff_avg
# Select the offset which meets the secondary selection criteria
if len(off_true) == 1:
offset_correction = off_true[0]
# If more than one offset meets the secondary selection criteria, then choose the offset with the maximum score
elif len(off_true) > 1:
diff_compare = {}
# check if the scores are equal or not. If the scores are also equal, add the difference of avg(R2, R3, R4) and R1 to diff compare and
# choose the offset with max difference
max_score = score_per_offset[sorted_scores[0]]
for i in range(0, len(off_true)):
if score_per_offset[off_true[i]] == max_score:
diff_compare[off_true[i]] = diff_dict[off_true[i]]
if len(diff_compare) > 1:
sorted_diff = sorted(diff_compare, key=diff_compare.get, reverse=True)
offset_correction = sorted_diff[0]
else:
# check if anything changes
# offset_correction = sorted_scores[0]
offset_correction = off_true[0]
# If no offset meets the secondary selection criteria, we let the offset with the maximum score remain the best offset.
# For offsets with equal scores, the smallest offset is the optimal offset
else:
offset_correction = | |
<reponame>glcLucky/dccapp<filename>dccapp/src/utils/graph_utils.py<gh_stars>0
import numpy as np
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
from operator import itemgetter
class mgcgraph(object):
"""plot graph given a networkx graph type"""
def __init__(self, graph_input, **kwargs):
"""
Arguement:
graph_input: can be directed or undirected graph create with networkx
"""
# super(mgcgraph, self).__init__()
self.graph_input = graph_input
def mgcgraph_plt(self, **kwargs):
"""
Arguement:
graph_in: input graph, if not provided, the same graph used to initiate the method is used
axs_k: matplotlib axes
centrality_algo: string with centraility algorithm, different algorithm available for both directed and undirected graph
directed: ['degree', 'betweenness', 'eigenvector', 'harmonic', 'load', 'closeness']
undirected: ['degree', 'betweenness', 'eigenvector', 'flow_closeness', 'harmonic', 'information']
var_title: string to describe on plot tile
node_size_range: a list of 2 numeric values
node_att: node attribute available in the graph, only applicable if centrality_algo is set to None
layout_type: string represent graph layout algorithm to be used, default is 'kamada_kawai_layout', other option
['circular_layout', 'random_layout', 'shell_layout', 'spring_layout', 'spectral_layout', 'kamada_kawai_layout', 'spiral_layout']
"""
import matplotlib
# user matplotlib default
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
plt.style.use('default')
import warnings
warnings.filterwarnings('ignore')
from mpl_toolkits.axes_grid1 import make_axes_locatable
# default values used when argument is not provided
def_vals = {'graph_in' : self.graph_input,
'axs_k': None,
'centrality_algo': 'degree',
'var_title': None,
'node_size_range': [100, 2000],
'node_att': None,
'node_att_thresh': 0.5,
'node_att_categorical': 'normal',
'edge_att': None, # not used
'layout_type': 'kamada_kawai_layout',
'local_legend': True,
'local_colorbar': True, 'target_node': None}
for k, v in def_vals.items():
kwargs.setdefault(k, v)
graph_in = kwargs['graph_in']
axs_k = kwargs['axs_k']
centrality_algo = kwargs['centrality_algo']
var_title = kwargs['var_title']
node_size_range = kwargs['node_size_range']
node_att = kwargs['node_att']
node_att_thresh = kwargs['node_att_thresh']
node_att_categorical = kwargs['node_att_categorical']
edge_att = kwargs['edge_att'] # for future
layout_type = kwargs['layout_type']
local_legend = kwargs['local_legend']
local_colorbar = kwargs['local_colorbar']
target_node = kwargs['target_node']
if node_att_thresh is None:
node_att_thresh = 0.3
if node_att_categorical is None:
node_att_categorical = 'normal'
# function to scale a list to min-max range
def norm_list2(ls, rangex):
return [((rangex[1] - rangex[0]) * ((float(i) - min(ls))/(np.finfo(np.double).eps + max(ls) - min(ls)))) + rangex[0] for i in ls]
# print("edge {}, node {}".format(graph_in.number_of_edges(), graph_in.number_of_nodes()))
if graph_in.number_of_edges() > 0:
if edge_att is None:
# hard coded to edge_betweenness for graph edge
dict_edge_centrality = {'edge_flow': nx.edge_current_flow_betweenness_centrality,
'edge_betweenness':nx.edge_betweenness_centrality}
edge_centr_attr = 'edge_betweenness'
# Set edge attribute
edges_x, edge_color_x = zip(*dict_edge_centrality[edge_centr_attr](graph_in).items())
edge_width_x = edge_color_x
# scale data
# edge_color_x = norm_list2(edge_color_x, [1, 10])
edge_color_x = [round(i, 3) for i in edge_color_x]
edge_width_x = norm_list2(edge_width_x, [1, 2])
dict_labels = dict(zip(edges_x, edge_color_x))
elif edge_att is not None:
dict_edge_x = nx.get_edge_attributes(graph_in, edge_att)
edges_x, edge_color_x = zip(*dict_edge_x.items())
edge_width_x = edge_color_x
edge_color_x = [round(i, 3) for i in edge_color_x]
edge_width_x = norm_list2(edge_width_x, [1, 2])
dict_labels = dict(zip(edges_x, edge_color_x))
else:
edge_color_x = 1
edge_width_x = 1
dict_class_inv = {'no_issue': 0,
'access_cell_edge': 1,
'resource_limit': 2,
'high_load': 3,
'access_resource': 4,
'ho_SR': 5,
'prach_interference': 6,
'uplink_interf': 7,
'normal': 8,
'signaling_load': 9,
'signaling_SR': 10,
'others': 11,
'very_high_load': 12}
# Select node cetrality option
dict_ctr = {'directed': {'degree': nx.degree_centrality,
'betweenness': nx.betweenness_centrality,
'eigenvector': nx.eigenvector_centrality_numpy,
'harmonic': nx.harmonic_centrality,
'load': nx.load_centrality,
'closeness': nx.closeness_centrality},
'undirected': {'degree': nx.degree_centrality,
'betweenness': nx.betweenness_centrality,
'eigenvector': nx.eigenvector_centrality_numpy,
'flow_closeness': nx.current_flow_closeness_centrality,
'harmonic': nx.harmonic_centrality,
'information': nx.information_centrality}}
# check if the graph is directed or undirected
if nx.is_directed(graph_in):
dict_centrality = dict_ctr.get('directed')
else:
dict_centrality = dict_ctr.get('undirected')
if centrality_algo is None:
dict_tmp = nx.get_node_attributes(graph_in, node_att)
ls_tmp = list(dict_tmp.values())
if type(ls_tmp[0]) == float: #if node attribute is string type
sorted_node_attri = sorted(dict_tmp.items(), key=itemgetter(1), reverse=True)
ls_node1 = [i[0] for i in sorted_node_attri if i[1] > node_att_thresh]
ls_node2 = list(set(graph_in.nodes()) - set(ls_node1))
# Set plot node size and color
node_size_x1 = [dict_tmp[v] for v in ls_node1]
node_color_x1 = [dict_tmp[v] for v in ls_node1]
node_size_x2 = [dict_tmp[v] for v in ls_node2]
node_color_x2 = [dict_tmp[v] for v in ls_node2]
# Scale a list with min and max
node_size_x1 = [(size_i + 5000) for size_i in node_size_x1]
node_size_x1 = norm_list2(node_size_x1, node_size_range)
node_size_x2 = [(size_i + 50) for size_i in node_size_x2]
node_size_x2 = norm_list2(node_size_x2, [i/2 for i in node_size_range])
# replace nan with a value
node_size_x1 = [20 if np.isnan(i) else i for i in node_size_x1]
node_size_x2 = [20 if np.isnan(i) else i for i in node_size_x2]
node_color_x1 = [min(node_color_x1) if np.isnan(i) else i for i in node_color_x1]
node_color_x2 = [min(node_color_x2) if np.isnan(i) else i for i in node_color_x2]
else:
dict_class_color = dict(zip(set(ls_tmp), range(1, len(ls_tmp) + 1))) # map categorical to integer
ls_node1 = [k for k, v in dict_tmp.items() if v == node_att_categorical]
ls_node2 = list(set(graph_in.nodes()) - set(ls_node1))
node_size_x1 = [node_size_range[1]] * len(ls_node1)
node_color_x1 = [dict_class_color[dict_tmp[n]] for n in ls_node1]
node_size_x2 = [node_size_range[0]] * len(ls_node1)
node_color_x2 = [dict_class_color[dict_tmp[n]] for n in ls_node2]
# print(node_size_x1, node_size_x2)
else:
# Set node attribute with node centrality
centrality_type_dict = dict_centrality[centrality_algo](graph_in)
# Assign each to an attribute in your network
nx.set_node_attributes(graph_in, centrality_type_dict, centrality_algo)
# Set plot node size and color
node_size_x = [size_i for size_i in centrality_type_dict.values()]
node_color_x = [color_i for color_i in centrality_type_dict.values()]
# print(node_color_x)
# Scale a list with min and max
node_size_x = norm_list2(node_size_x, node_size_range)
# node_color_x = norm_list2(node_color_x, [0, 100])
# Set layout
def rescl_layout(graph_in):
# use latitude and longitude as layout position
ls_node_pos = list(nx.get_node_attributes(graph_in, 'pos').values())
if len(ls_node_pos) > 0:
posx = nx.rescale_layout(np.array(ls_node_pos))# using gps position
layout_d = dict(zip(graph_in.nodes(), posx))
else:
layout_d = nx.random_layout(graph_in)
return layout_d
def pydot_neato(graph_in):
return nx.nx_pydot.pydot_layout(graph_in, 'neato')
def pydot_dot(graph_in):
return nx.nx_pydot.pydot_layout(graph_in, 'dot')
def pydot_twopi(graph_in):
return nx.nx_pydot.pydot_layout(graph_in, 'twopi')
def pydot_fdp(graph_in):
return nx.nx_pydot.pydot_layout(graph_in, 'fdp')
def pydot_circo(graph_in):
return nx.nx_pydot.pydot_layout(graph_in, 'circo')
dict_layout = {'circular_layout': nx.circular_layout,
'random_layout': nx.random_layout,
'shell_layout': nx.shell_layout,
'spring_layout': nx.spring_layout,
'spectral_layout': nx.spectral_layout,
'kamada_kawai_layout': nx.kamada_kawai_layout,
'spiral_layout': nx.spiral_layout,
'rescale_layout':rescl_layout,
'pydot_neato': pydot_neato,
'pydot_dot': pydot_dot,
'pydot_twopi': pydot_twopi,
'pydot_fdp': pydot_fdp,
'pydot_circo': pydot_circo}
layout_x = dict_layout[layout_type](graph_in)
node_cmapx = plt.cm.autumn
edge_cmapx = plt.cm.cool
# draw node label
if target_node is None:
labelsx = dict(zip(graph_in.nodes(), graph_in.nodes()))
else:
labelsx = {target_node: dict(zip(graph_in.nodes(), graph_in.nodes())).get(target_node)}
if axs_k is None:
fig, axs_k = plt.subplots(figsize=(18,16))
if centrality_algo is None:
if len(node_size_x2) != 0:
plt_node = nx.draw_networkx_nodes(graph_in,
pos = layout_x,
node_size = node_size_x2,
node_color = node_color_x2,
ax = axs_k,
alpha = 0.2,
with_labels = False,
cmap = node_cmapx,
nodelist = ls_node2, node_shape='o'
)
plt_node = nx.draw_networkx_nodes(graph_in,
pos = layout_x,
node_size = node_size_x1,
node_color = node_color_x1,
ax = axs_k,
alpha = 0.7,
with_labels = False,
cmap = node_cmapx,
nodelist = ls_node1, node_shape='s'
)
# nx.draw_networkx_labels(graph_in, pos = layout_x, labels = labelsx, font_color='r')
else:
plt_node = nx.draw_networkx_nodes(graph_in,
pos = layout_x,
node_size = node_size_x1,
node_color = node_color_x1,
ax = axs_k,
alpha = 0.7,
with_labels = False,
cmap = node_cmapx,
nodelist = ls_node1, node_shape='s'
)
else:
plt_node = nx.draw_networkx_nodes(graph_in,
pos = layout_x,
node_size = node_size_x,
node_color = node_color_x,
ax = axs_k,
alpha = 0.7,
with_labels = False,
cmap = node_cmapx
)
if len(graph_in) > 1:
plt_edge = nx.draw_networkx_edges(graph_in,
pos = layout_x,
ax = axs_k,
alpha = 1,
connectionstyle = 'arc3,rad=0.3',
edge_color = edge_color_x,
width = edge_width_x,
edge_cmap=edge_cmapx)
else:
plt_edge = nx.draw_networkx_edges(graph_in,
pos = layout_x,
ax = axs_k,
alpha = 0.2,
connectionstyle = 'arc3,rad=0.3',
# edge_color = edge_color_x,
# width = edge_width_x,
edge_cmap=edge_cmapx)
# show label
nx.draw_networkx_labels(graph_in, pos = layout_x, labels = labelsx, font_color='b', ax = axs_k,)
# nx.draw_networkx_edge_labels(graph_in, layout_x, ax = axs_k, edge_labels=dict_labels)
axs_k.set_title('nodeid: {0}, node count: {1} edge count: {2}'.format(var_title, graph_in.number_of_nodes(), graph_in.number_of_edges()), fontsize = 14, loc = 'right')
if local_colorbar:
# create new axes on the right and on the top of the current axes
if centrality_algo is None:
divider = make_axes_locatable(axs_k)
# below height and pad are in inches
cax2 = divider.append_axes("top", 0.2, pad=0.3) # colorbar for node
plt_node.set_clim(min(node_color_x1 + node_color_x2), max(node_color_x1 + node_color_x2))
cb_node = plt.colorbar(plt_node, cax2, orientation='horizontal').set_label(label="node " + str(node_att if centrality_algo is None else centrality_algo), size=12)
cax2.xaxis.set_ticks_position("top")
cax2.xaxis.set_label_position("top")
else:
divider = make_axes_locatable(axs_k)
# below height and pad are in inches
cax2 = divider.append_axes("top", 0.2, pad=0.3) # colorbar for node value_when_true if | |
of yesterday
edt = dtm - datetime.timedelta(seconds=1)
# Subtract one day to obtain the beginning of yesterday
bdt = dtm - datetime.timedelta(days=1)
# Attempt to count the user posts
pcount = CountUserPostsBetween(userid, bdt, edt)
# Now generate the reply message
reply = '{:d} posts made between ({:s}) and ({:s})'.format(pcount, str(bdt), str(edt))
# Command: count posts from a certain amount of hours
elif scmd == 'hour':
# The default number of hours to go back
hsub = 1
# Was there a specific number of hours to go back?
if sections > 3:
try:
hsub = max(min(23, int(cmd_list[3])), 1)
except:
pass
# Grab the current time
edt = datetime.datetime.now().replace(microsecond=0)
# Subtract the specified hours from the current time
bdt = edt - datetime.timedelta(hours=int(hsub))
# Attempt to count the user posts
pcount = CountUserPostsBetween(userid, bdt, edt)
# Now generate the reply message
reply = '{:d} posts made between ({:s}) and ({:s})'.format(pcount, str(bdt), str(edt))
else:
# Generate the reply message to warn the user
reply = 'Unknown Range: {:s}'.format(scmd)
# Now that we're here, let's execute the resulted query
try:
cursor.execute("UPDATE mybb_dvz_shoutbox SET text = '{:s}', modified = {:d} WHERE id = {:d}".format(reply, int(time.time()), shoutid))
except Exception as e:
# Display information
print('Failed to generate count: {:s}'.format(str(e)))
# Ignore the error
pass
# Move to the next command
continue
# Command: report the post count via PM
elif cmd == 'report':
# Grab the sub command, if any, or default to 'current'
scmd = cmd_list[2] if sections > 2 else 'current'
# The success and failure messages to keep code shorter
success, failure = 'Please check your private messages', 'However, an error occurred. Please get in touch with a staff member'
# The reply message to replace the command shout
reply = None
# Command: report posts from current month
if scmd == 'current':
# Grab the current time and move to the beginning of the month
dtm = datetime.datetime.now().replace(day=1,hour=0,minute=0,second=0,microsecond=0)
# Attempt to generate a report of the user posts
result = ReportUserPostsAfter(userid, dtm)
# Now generate the reply message
reply = 'Report of posts after ({:s}) was queued. {:s}'.format(str(dtm), success if result else failure)
# Command: report posts from previous month
elif scmd == 'previous':
# Subtract 1 second from the beginning of the current month so we jump to the end of previous month
edt = datetime.datetime.now().replace(day=1,hour=0,minute=0,second=0,microsecond=0) - datetime.timedelta(seconds=1)
# Now use that to obtain the beginning of the previous month as well
bdt = edt.replace(day=1,hour=0,minute=0,second=0)
# Attempt to generate a report of the user posts
result = ReportUserPostsBetween(userid, bdt, edt)
# Now generate the reply message
reply = 'Report of posts between ({:s}) and ({:s}) was queued. {:s}'.format(str(bdt), str(edt), success if result else failure)
# Command: report posts from today
elif scmd == 'today':
# Grab the current time and move to the beginning of the day
bdt = datetime.datetime.now().replace(hour=0,minute=0,second=0,microsecond=0)
# Grab the current time and move to the end of the day
edt = datetime.datetime.now().replace(hour=23,minute=59,second=59,microsecond=0)
# Attempt to generate a report of the user posts
result = ReportUserPostsBetween(userid, bdt, edt)
# Now generate the reply message
reply = 'Report of posts between ({:s}) and ({:s}) was queued. {:s}'.format(str(bdt), str(edt), success if result else failure)
# Command: report posts from yesterday
elif scmd == 'yesterday':
# Grab the current time and move to the beginning of the day
dtm = datetime.datetime.now().replace(hour=0,minute=0,second=0,microsecond=0)
# Subtract one second to obtain the end of yesterday
edt = dtm - datetime.timedelta(seconds=1)
# Subtract one day to obtain the beginning of yesterday
bdt = dtm - datetime.timedelta(days=1)
# Attempt to generate a report of the user posts
result = ReportUserPostsBetween(userid, bdt, edt)
# Now generate the reply message
reply = 'Report of posts between ({:s}) and ({:s}) was queued. {:s}'.format(str(bdt), str(edt), success if result else failure)
# Command: report posts from a certain amount of hours
elif scmd == 'hour':
# The default number of hours to go back
hsub = 1
# Was there a specific number of hours to go back?
if sections > 3:
try:
hsub = max(min(23, int(cmd_list[3])), 1)
except:
pass
# Grab the current time
edt = datetime.datetime.now().replace(microsecond=0)
# Subtract the specified hours from the current time
bdt = edt - datetime.timedelta(hours=int(hsub))
# Attempt to generate a report of the user posts
result = ReportUserPostsBetween(userid, bdt, edt)
# Now generate the reply message
reply = 'Report of posts between ({:s}) and ({:s}) was queued. {:s}'.format(str(bdt), str(edt), success if result else failure)
else:
# Generate the reply message to warn the user
reply = 'Unknown Range: {:s}'.format(scmd)
# Now that we're here, let's execute the resulted query
try:
cursor.execute("UPDATE mybb_dvz_shoutbox SET text = '{:s}', modified = {:d} WHERE id = {:d}".format(reply, int(time.time()), shoutid))
except Exception as e:
# Display information
print('Failed to generate report: {:s}'.format(str(e)))
# Ignore the error
pass
# Move to the next command
continue
# Command: run preliminary post count for owners and warn them to complete their posts
elif cmd == 'alert':
# Default to an empty command
scmd = ''
# Default to a non silent report
silent = False
# Is there a second parameter?
if sections > 2:
# Should we run a silent report of current month?
if cmd_list[2] == 'silent':
# Enable silent report
silent = True
# Default to current month
scmd = 'current'
# Just save whatever parameter that was
else:
scmd = cmd_list[2]
# Is there a third parameter?
if sections > 3:
# Should we run a silent report of current month?
if cmd_list[3] == 'silent':
silent = True
# Just save whatever parameter that was if still none
elif not scmd:
scmd = cmd_list[3]
# Default to current month
else:
scmd = 'current'
# The success and failure messages to keep code shorter
success, failure = 'Please check your private messages', 'However, an error occurred'
# The reply message to replace the command shout
reply = None
# See if the user even has the privilege to use this command
if userid not in g_ManagerID:
# Generate the warning message
reply = 'You do not have the privilege to perform such action'
# Command: report posts from current month
elif scmd == 'current':
# Grab the current time and move to the 25'th day then add 10 days to jump to the next month
edt = datetime.datetime.now().replace(day=25) + datetime.timedelta(days=10)
# Now go back to the first day and subtract one second so we have the end of current month
edt = edt.replace(day=1,hour=0,minute=0,second=0,microsecond=0) - datetime.timedelta(seconds=1)
# Grab the current time and move to the beginning of the month
bdt = datetime.datetime.now().replace(day=1,hour=0,minute=0,second=0,microsecond=0)
# Attempt to generate a report of the user posts
try:
result = AdminReport(userid, bdt, edt, silent, False)
except Exception as e:
# Display information
print('Failed generate alert: {:s}'.format(str(e)))
# Specify we failed
result = False
# Ignore error
pass
# Now generate the reply message
reply = '{:s} of posts between ({:s}) and ({:s}) was queued. {:s}'.format('Alert' if not silent else 'Silent alert', str(bdt), str(edt), success if result else failure)
# Command: report posts from previous month
elif scmd == 'previous':
# Subtract 1 second from the beginning of the current month so we jump to the end of previous month
edt = datetime.datetime.now().replace(day=1,hour=0,minute=0,second=0,microsecond=0) - datetime.timedelta(seconds=1)
# Now use that to obtain the beginning of the previous month as well
bdt = edt.replace(day=1,hour=0,minute=0,second=0)
# Attempt to generate a report of the user posts
try:
result = AdminReport(userid, bdt, edt, silent, False)
except Exception as e:
# Display information
print('Failed generate alert: {:s}'.format(str(e)))
# Specify we failed
result = False
# Ignore error
pass
# Now generate the reply message
reply = '{:s} of posts between ({:s}) and ({:s}) was queued. {:s}'.format('Alert' if not silent else 'Silent alert', str(bdt), str(edt), success if result else failure)
else:
# Generate | |
<reponame>gh20s/lear<filename>legal-api/tests/unit/services/filings/validations/test_incorporation_application.py
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test suite to ensure the Incorporation Application is validated correctly."""
import copy
from datetime import date
from http import HTTPStatus
import datedelta
import pytest
from freezegun import freeze_time
from registry_schemas.example_data import INCORPORATION, INCORPORATION_FILING_TEMPLATE
from legal_api.models import Business
from legal_api.services.filings import validate
from legal_api.services.filings.validations.incorporation_application import validate_roles, \
validate_parties_mailing_address
from . import lists_are_equal, create_party, create_party_address
# setup
identifier = 'NR 1234567'
now = date(2020, 9, 17)
founding_date = now - datedelta.YEAR
business = Business(identifier=identifier)
effective_date = '2020-09-18T00:00:00+00:00'
@pytest.mark.parametrize(
'test_name, delivery_region, delivery_country, mailing_region, mailing_country, expected_code, expected_msg',
[
('SUCCESS', 'BC', 'CA', 'BC', 'CA', None, None),
('FAIL_NOT_BC_DELIVERY_REGION', 'AB', 'CA', 'BC', 'CA',
HTTPStatus.BAD_REQUEST, [
{'error': "Address Region must be 'BC'.",
'path':
'/filing/incorporationApplication/offices/registeredOffice/deliveryAddress/addressRegion'},
{'error': "Address Region must be 'BC'.",
'path':
'/filing/incorporationApplication/offices/recordsOffice/deliveryAddress/addressRegion'}
]),
('FAIL_NOT_BC_MAILING_REGION', 'BC', 'CA', 'AB', 'CA',
HTTPStatus.BAD_REQUEST, [
{'error': "Address Region must be 'BC'.",
'path':
'/filing/incorporationApplication/offices/registeredOffice/mailingAddress/addressRegion'},
{'error': "Address Region must be 'BC'.",
'path':
'/filing/incorporationApplication/offices/recordsOffice/mailingAddress/addressRegion'}
]),
('FAIL_ALL_ADDRESS_REGIONS', 'WA', 'CA', 'WA', 'CA',
HTTPStatus.BAD_REQUEST, [
{'error': "Address Region must be 'BC'.",
'path': '/filing/incorporationApplication/offices/registeredOffice/deliveryAddress/addressRegion'},
{'error': "Address Region must be 'BC'.",
'path': '/filing/incorporationApplication/offices/registeredOffice/mailingAddress/addressRegion'},
{'error': "Address Region must be 'BC'.",
'path': '/filing/incorporationApplication/offices/recordsOffice/deliveryAddress/addressRegion'},
{'error': "Address Region must be 'BC'.",
'path': '/filing/incorporationApplication/offices/recordsOffice/mailingAddress/addressRegion'}
]),
('FAIL_NOT_DELIVERY_COUNTRY', 'BC', 'NZ', 'BC', 'CA',
HTTPStatus.BAD_REQUEST, [
{'error': "Address Country must be 'CA'.",
'path': '/filing/incorporationApplication/offices/registeredOffice/deliveryAddress/addressCountry'},
{'error': "Address Country must be 'CA'.",
'path': '/filing/incorporationApplication/offices/recordsOffice/deliveryAddress/addressCountry'}
]),
('FAIL_NOT_MAILING_COUNTRY', 'BC', 'CA', 'BC', 'NZ',
HTTPStatus.BAD_REQUEST, [
{'error': "Address Country must be 'CA'.",
'path': '/filing/incorporationApplication/offices/registeredOffice/mailingAddress/addressCountry'},
{'error': "Address Country must be 'CA'.",
'path': '/filing/incorporationApplication/offices/recordsOffice/mailingAddress/addressCountry'}
]),
('FAIL_ALL_ADDRESS', 'AB', 'NZ', 'AB', 'NZ',
HTTPStatus.BAD_REQUEST, [
{'error': "Address Region must be 'BC'.",
'path': '/filing/incorporationApplication/offices/registeredOffice/deliveryAddress/addressRegion'},
{'error': "Address Country must be 'CA'.",
'path': '/filing/incorporationApplication/offices/registeredOffice/deliveryAddress/addressCountry'},
{'error': "Address Region must be 'BC'.",
'path': '/filing/incorporationApplication/offices/registeredOffice/mailingAddress/addressRegion'},
{'error': "Address Country must be 'CA'.",
'path': '/filing/incorporationApplication/offices/registeredOffice/mailingAddress/addressCountry'},
{'error': "Address Region must be 'BC'.",
'path': '/filing/incorporationApplication/offices/recordsOffice/deliveryAddress/addressRegion'},
{'error': "Address Country must be 'CA'.",
'path': '/filing/incorporationApplication/offices/recordsOffice/deliveryAddress/addressCountry'},
{'error': "Address Region must be 'BC'.",
'path': '/filing/incorporationApplication/offices/recordsOffice/mailingAddress/addressRegion'},
{'error': "Address Country must be 'CA'.",
'path': '/filing/incorporationApplication/offices/recordsOffice/mailingAddress/addressCountry'}
])
])
def test_validate_incorporation_addresses_basic(session, test_name, delivery_region, delivery_country, mailing_region,
mailing_country, expected_code, expected_msg):
"""Assert that incorporation offices can be validated."""
f = copy.deepcopy(INCORPORATION_FILING_TEMPLATE)
f['filing']['header'] = {'name': 'incorporationApplication', 'date': '2019-04-08', 'certifiedBy': 'full name',
'email': '<EMAIL>', 'filingId': 1, 'effectiveDate': effective_date}
f['filing']['incorporationApplication'] = copy.deepcopy(INCORPORATION)
f['filing']['incorporationApplication']['nameRequest']['nrNumber'] = identifier
f['filing']['incorporationApplication']['nameRequest']['legalType'] = Business.LegalTypes.BCOMP.value
f['filing']['incorporationApplication']['contactPoint']['email'] = '<EMAIL>'
f['filing']['incorporationApplication']['contactPoint']['phone'] = '123-456-7890'
regoffice = f['filing']['incorporationApplication']['offices']['registeredOffice']
regoffice['deliveryAddress']['addressRegion'] = delivery_region
regoffice['deliveryAddress']['addressCountry'] = delivery_country
regoffice['mailingAddress']['addressRegion'] = mailing_region
regoffice['mailingAddress']['addressCountry'] = mailing_country
recoffice = f['filing']['incorporationApplication']['offices']['recordsOffice']
recoffice['deliveryAddress']['addressRegion'] = delivery_region
recoffice['deliveryAddress']['addressCountry'] = delivery_country
recoffice['mailingAddress']['addressRegion'] = mailing_region
recoffice['mailingAddress']['addressCountry'] = mailing_country
# perform test
with freeze_time(now):
err = validate(business, f)
# validate outcomes
if expected_code:
assert err.code == expected_code
assert lists_are_equal(err.msg, expected_msg)
else:
assert err is None
@pytest.mark.parametrize(
'test_name, legal_type, parties, expected_code, expected_msg',
[
(
'SUCCESS', 'BEN',
[
{'partyName': 'officer1', 'roles': ['Completing Party', 'Incorporator']},
{'partyName': 'officer2','roles': ['Incorporator', 'Director']}
],
None, None
),
(
'FAIL_NO_COMPLETING_PARTY', 'BEN',
[
{'partyName': 'officer1', 'roles': ['Director', 'Incorporator']},
{'partyName': 'officer2','roles': ['Incorporator', 'Director']}
],
HTTPStatus.BAD_REQUEST, [{'error': 'Must have a minimum of one completing party',
'path': '/filing/incorporationApplication/parties/roles'}]
),
(
'FAIL_EXCEEDING_ONE_COMPLETING_PARTY', 'BEN',
[
{'partyName': 'officer1', 'roles': ['Completing Party', 'Director']},
{'partyName': 'officer2','roles': ['Incorporator', 'Completing Party']}
],
HTTPStatus.BAD_REQUEST, [{'error': 'Must have a maximum of one completing party',
'path': '/filing/incorporationApplication/parties/roles'}]
),
(
'SUCCESS', 'CP',
[
{'partyName': 'officer1', 'roles': ['Completing Party', 'Director']},
{'partyName': 'officer2','roles': ['Director']},
{'partyName': 'officer3','roles': ['Director']}
],
None, None
),
(
'FAIL_NO_COMPLETING_PARTY', 'CP',
[
{'partyName': 'officer1', 'roles': ['Director']},
{'partyName': 'officer2','roles': ['Director']},
{'partyName': 'officer3','roles': ['Director']}
],
HTTPStatus.BAD_REQUEST, [{'error': 'Must have a minimum of one completing party',
'path': '/filing/incorporationApplication/parties/roles'}]
),
(
'FAIL_INVALID_PARTY_ROLE', 'CP',
[
{'partyName': 'officer1', 'roles': ['Completing Party', 'Director']},
{'partyName': 'officer2','roles': ['Director']},
{'partyName': 'officer3','roles': ['Director']},
{'partyName': 'officer3','roles': ['Incorporator']}
],
HTTPStatus.BAD_REQUEST, [{'error': 'Incorporator is an invalid party role',
'path': '/filing/incorporationApplication/parties/roles'}]
),
(
'FAIL_MINIMUM_3_DIRECTORS', 'CP',
[
{'partyName': 'officer1', 'roles': ['Completing Party', 'Director']},
{'partyName': 'officer2','roles': ['Director']}
],
HTTPStatus.BAD_REQUEST, [{'error': 'Must have a minimum of three Directors',
'path': '/filing/incorporationApplication/parties/roles'}]
)
])
def test_validate_incorporation_role(session, test_name, legal_type, parties, expected_code, expected_msg):
"""Assert that incorporation parties roles can be validated."""
f = copy.deepcopy(INCORPORATION_FILING_TEMPLATE)
f['filing']['header'] = {'name': 'incorporationApplication', 'date': '2019-04-08', 'certifiedBy': 'full name',
'email': '<EMAIL>', 'filingId': 1, 'effectiveDate': effective_date}
f['filing']['incorporationApplication'] = copy.deepcopy(INCORPORATION)
f['filing']['business']['legalType'] = legal_type
f['filing']['incorporationApplication']['nameRequest']['nrNumber'] = identifier
f['filing']['incorporationApplication']['nameRequest']['legalType'] = legal_type
f['filing']['incorporationApplication']['contactPoint']['email'] = '<EMAIL>'
f['filing']['incorporationApplication']['contactPoint']['phone'] = '123-456-7890'
base_mailing_address = f['filing']['incorporationApplication']['parties'][0]['mailingAddress']
base_delivery_address = f['filing']['incorporationApplication']['parties'][0]['deliveryAddress']
f['filing']['incorporationApplication']['parties'] = []
# populate party and party role info
for index, party in enumerate(parties):
mailing_addr = create_party_address(base_address=base_mailing_address)
delivery_addr = create_party_address(base_address=base_delivery_address)
p = create_party(party['roles'], index + 1, mailing_addr, delivery_addr)
f['filing']['incorporationApplication']['parties'].append(p)
# perform test
with freeze_time(now):
err = validate(business, f)
# validate outcomes
if expected_code:
assert err.code == expected_code
assert lists_are_equal(err.msg, expected_msg)
else:
assert err is None
@pytest.mark.parametrize(
'test_name, legal_type, parties, expected_msg',
[
('SUCCESS', 'BEN',
[
{'partyName': 'officer1', 'roles': ['Director'],
'mailingAddress': {'street': '123 st', 'city': 'Vancouver', 'country': 'CA',
'postalCode': 'h0h0h0', 'region': 'BC'}}
], None
),
('FAIL_INVALID_STREET', 'BEN',
[
{'partyName': 'officer1', 'roles': ['Director'],
'mailingAddress': {'street': None, 'city': 'Vancouver', 'country': 'CA',
'postalCode': 'h0h0h0', 'region': 'BC'}},
], [{'error': 'Person 1: Mailing address streetAddress None is invalid',
'path': '/filing/incorporationApplication/parties/1/mailingAddress/streetAddress/None/' }]
),
('FAIL_INVALID_CITY', 'BEN',
[
{'partyName': 'officer1', 'roles': ['Director'],
'mailingAddress': {'street': '123 St', 'city': None, 'country': 'CA',
'postalCode': 'h0h0h0', 'region': 'BC'}},
], [{'error': 'Person 1: Mailing address addressCity None is invalid',
'path': '/filing/incorporationApplication/parties/1/mailingAddress/addressCity/None/' }]
),
('FAIL_INVALID_COUNTRY', 'BEN',
[
{'partyName': 'officer1', 'roles': ['Director'],
'mailingAddress': {'street': '123 St', 'city': 'Vancouver', 'country': None,
'postalCode': 'h0h0h0', 'region': 'BC'}},
], [{'error': 'Person 1: Mailing address addressCountry None is invalid',
'path': '/filing/incorporationApplication/parties/1/mailingAddress/addressCountry/None/' }]
),
('FAIL_INVALID_POSTAL_CODE', 'BEN',
[
{'partyName': 'officer1', 'roles': ['Director'],
'mailingAddress': {'street': '123 St', 'city': 'Vancouver', 'country': 'CA',
'postalCode': None, 'region': 'BC'}},
], [{'error': 'Person 1: Mailing address postalCode None is invalid',
'path': '/filing/incorporationApplication/parties/1/mailingAddress/postalCode/None/' }]
),
('FAIL_INVALID_REGION', 'BEN',
[
{'partyName': 'officer1', 'roles': ['Director'],
'mailingAddress': {'street': '123 St', 'city': 'Vancouver', 'country': 'CA', 'postalCode': 'h0h0h0', 'region': None}},
], [{'error': 'Person 1: Mailing address addressRegion None is invalid',
'path': '/filing/incorporationApplication/parties/1/mailingAddress/addressRegion/None/' }]
),
('SUCCESS', 'CP',
[
{
'partyName': 'officer1', 'roles': ['Completing Party', 'Director'],
'mailingAddress': {'street': '123 st', 'city': 'Vancouver', 'country': 'CA',
'postalCode': 'h0h0h0', 'region': 'BC'}
},
{
'partyName': 'officer2', 'roles': ['Director'],
'mailingAddress': {'street': '123 st', 'city': 'Vancouver', 'country': 'CA',
'postalCode': 'h0h0h0', 'region': 'AB'}
},
{
'partyName': 'officer3', 'roles': ['Director'],
'mailingAddress': {'street': '123 st', 'city': 'Vancouver', 'country': 'CA',
'postalCode': 'h0h0h0', 'region': 'MB'}
},
], None
),
('FAIL_ONE_IN_REGION_BC', 'CP',
[
{
'partyName': 'officer1', 'roles': ['Completing Party', 'Director'],
'mailingAddress': {'street': '123 st', 'city': 'asdf', 'country': 'CA',
'postalCode': 'h0h0h0', 'region': 'AB'}
},
{
'partyName': 'officer2', 'roles': ['Director'],
'mailingAddress': {'street': '123 st', 'city': 'asdf', 'country': 'CA',
'postalCode': 'h0h0h0', 'region': 'AB'}
},
{
'partyName': 'officer3', 'roles': ['Director'],
'mailingAddress': {'street': '123 st', 'city': 'asdfd', 'country': 'CA',
'postalCode': 'h0h0h0', 'region': 'MB'}
},
], [{'error': 'Must have minimum of one BC mailing address',
'path': '/filing/incorporationApplication/parties/mailingAddress' }]
),
('FAIL_MAJORITY_IN_COUNTRY_CA', 'CP',
[
{
'partyName': 'officer1', 'roles': ['Completing Party', 'Director'],
'mailingAddress': {'street': '123 st', 'city': 'asdf', 'country': 'US',
'postalCode': 'h0h0h0', 'region': 'AB'}
},
{
'partyName': 'officer2', 'roles': ['Director'],
'mailingAddress': {'street': '123 st', 'city': 'asdf', 'country': 'JP',
'postalCode': 'h0h0h0', 'region': 'AICHI'}
},
{
'partyName': 'officer3', 'roles': ['Director'],
'mailingAddress': {'street': '123 st', 'city': 'Vancouver', 'country': 'CA',
'postalCode': 'h0h0h0', 'region': 'BC'}
}
], [{'error': 'Must have majority of mailing addresses in Canada',
'path': '/filing/incorporationApplication/parties/mailingAddress' }]
),
('FAIL_MAJORITY_IN_COUNTRY_CA_50_percent', 'CP',
[
{
'partyName': 'officer1', 'roles': ['Completing Party', 'Director'],
'mailingAddress': {'street': '123 st', 'city': 'asdf', 'country': 'US',
'postalCode': 'h0h0h0', 'region': 'AB'}
},
{
'partyName': 'officer2', 'roles': ['Director'],
'mailingAddress': {'street': '123 st', 'city': 'asdf', 'country': 'JP',
'postalCode': 'h0h0h0', 'region': 'AICHI'}
},
{
'partyName': 'officer3', 'roles': ['Director'],
'mailingAddress': {'street': '123 st', 'city': 'Vancouver', 'country': 'CA',
'postalCode': 'h0h0h0', 'region': 'BC'}
},
{
'partyName': 'officer4', 'roles': ['Director'],
'mailingAddress': {'street': '123 st', 'city': 'Vancouver', 'country': 'CA',
'postalCode': 'h0h0h0', 'region': 'BC'}
}
], [{'error': 'Must have majority of mailing addresses in Canada',
'path': '/filing/incorporationApplication/parties/mailingAddress' }]
),
('PASS_MAJORITY_IN_COUNTRY_CA', 'CP',
[
{
'partyName': 'officer1', 'roles': ['Completing Party', 'Director'],
'mailingAddress': {'street': '123 st', 'city': 'asdf', 'country': 'US',
'postalCode': 'h0h0h0', 'region': | |
from copy import deepcopy
from geochem_dataset.excel import Dataset
from geochem_dataset.excel.dataclasses import Survey, Sample, Result
from geochem_dataset.excel.exceptions import (
IntegrityError,
)
import numpy as np
import pandas as pd
import pytest
from helpers.utils import xlref, xlrowref, xlcolref
"""
|----------|-------------|-----|-------------------|-----------------|---------------|-----|---------------|
| SAMPLE | SUBSAMPLE | ... | SUB...SUBSAMPLE | METADATA_TYPE | result_type_1 | ... | result_type_y |
|----------|-------------|-----|-------------------|-----------------|---------------|-----|---------------|
| | | | | metadata_type_1 | metadata_1_1 | ... | metadata_1_y |
| | | | | ... | ... | ... | ... |
| | | | | metadata_type_z | metadata_z_1 | ... | metadata_z_y |
|----------|-------------|-----|-------------------|-----------------|---------------|-----|---------------|
| sample_1 | subsample_1 | ... | sub...subsample_1 | | result_1_1 | ... | result_1_y |
| ... | ... | ... | ... | | ... | ... | ... |
| sample_x | subsample_x | ... | sub...subsample_x | | result_x_1 | ... | result_x_y |
|----------|-------------|-----|-------------------|-----------------|---------------|-----|---------------|
"""
SAMPLES_FILE_NAME = 'SAMPLES.xlsx'
SAMPLES_SHEET_NAME = 'SAMPLES'
SURVEYS = [
Survey('2011, Till sampling survey, Hall Peninsula. Canada-Nunavut Geoscience Office', 'Canada-Nunavut Geoscience Office', 2011, 2013, '<NAME>', 'A test description', 1000),
]
SAMPLES = [
Sample(SURVEYS[0], '11TIAT001', '11TIAT001A', '11TIAT001A01', None, None, 64.010103, -67.351092, None, None, None, None, None, 'Till', None),
Sample(SURVEYS[0], '11TIAT024', '11TIAT024A', '11TIAT024A01', None, None, 64.472825, -67.721319, None, None, None, None, None, 'Till', None),
Sample(SURVEYS[0], '12TIAT138', '12TIAT138A', '12TIAT138A01', None, None, 64.209300, -67.011316, None, None, None, None, None, 'Till', None),
Sample(SURVEYS[0], '12TIAT139', '12TIAT139A', '12TIAT139A01', None, None, 64.334217, -67.087329, None, None, None, None, None, 'Till', None),
]
BULK_FILE_NAME = 'BULK.xlsx'
BULK_DATA = {
'BULK1': {
'subsamples': [
(SAMPLES[0], '11TIAT001'),
(SAMPLES[1], '11TIAT024'),
],
'result_types': [
'Soil_Munsell', 'Colour_Description', 'W_peb_bulk'
],
'metadata_types': [
'Method', 'Threshold', 'Unit', 'Fraction_min', 'Fraction_max', 'Year', 'Lab_analysis'
],
'metadata_values': [
('SP64 Series X-Rite Spectrophotometer', None, None, '0', '2mm', '2013', 'GSC Sedimentology'),
('SP64 Series X-Rite Spectrophotometer', None, None, '0', '2mm', '2013', 'GSC Sedimentology'),
('laser particle size analyzer and Camsizer & Lecotrac LT100', None, 'pct', '0', '30cm', '2013', 'GSC Sedimentology'),
],
'results': [
# subsample_idx, result_type_idx, metadata_values_idx, result
(0, 0, 0, '2.5Y 6/4'),
(0, 1, 1, 'light yellowish brown'),
(0, 2, 2, '7.256'),
(1, 0, 0, '10YR 5/4'),
(1, 1, 1, 'yellowish brown'),
(1, 2, 2, '33.538'),
]
},
'BULK2': {
'subsamples': [
(SAMPLES[2], '12TIAT138'),
(SAMPLES[3], '12TIAT139'),
],
'result_types': [
'Soil_Munsell', 'Colour_Description', 'W_peb_bulk'
],
'metadata_types': [
'Method', 'Threshold', 'Unit', 'Fraction_min', 'Fraction_max', 'Year', 'Lab_analysis'
],
'metadata_values': [
('SP64 Series X-Rite Spectrophotometer', None, None, '0', '2mm', '2013', 'GSC Sedimentology'),
('SP64 Series X-Rite Spectrophotometer', None, None, '0', '2mm', '2013', 'GSC Sedimentology'),
('laser particle size analyzer and Camsizer & Lecotrac LT100', None, 'pct', '0', '30cm', '2013', 'GSC Sedimentology'),
],
'results': [
# subsample_idx, result_type_idx, metadata_values_idx, result
(0, 0, 0, '2.5Y 6/4'),
(0, 1, 1, 'light yellowish brown'),
(0, 2, 2, '12.699'),
(1, 0, 0, '2.5Y 5/4'),
(1, 1, 1, 'light olive brown'),
(1, 2, 2, '22.173'),
]
}
}
ERROR_MESSAGES = {
'sample_heading_missing': 'Cell {cell} of worksheet {workbook}::{worksheet} must be SAMPLE',
'subsample_heading_missing': 'Cell {cell} of worksheet {workbook}::{worksheet} must be SUBSAMPLE',
'metadata_type_heading_missing': 'Cell {cell} of worksheet {workbook}::{worksheet} must be METADATA_TYPE',
'region_left_of_metadata_types_not_empty': 'Region left of metadata types in worksheet {workbook}::{worksheet} is not empty',
'metadata_type_missing': 'Metadata type is missing in cell {cell} of worksheet {workbook}::{worksheet}',
'metadata_type_duplicate': 'Metadata type in cell {cell} of worksheet {workbook}::{worksheet} is a duplicate',
'result_type_metadata_pair_duplicate': 'Result type-metadata pair in column {column} of worksheet {workbook}::{worksheet} is a duplicate',
'subsample_values_missing': 'Missing value(s) for subsample in row {row} of worksheet {workbook}::{worksheet}',
'sample_does_not_exist': 'Sample in cell {cell} of worksheet {workbook}::{worksheet} does not exist',
'subsample_duplicate': 'Subsample in row {row} of worksheet {workbook}::{worksheet} is a duplicate',
'result_type_missing': 'Result type in cell {cell} of worksheet {workbook}::{worksheet} is missing'
}
def build_expected_results(data):
expected_results = []
for sheet_name in data:
subsamples = data[sheet_name]['subsamples']
result_types = data[sheet_name]['result_types']
metadata_types = data[sheet_name]['metadata_types']
metadata_value_tuples = data[sheet_name]['metadata_values']
for subsample_idx, result_type_idx, metadata_values_idx, result_value in data[sheet_name]['results']:
sample = subsamples[subsample_idx][0]
subsample = tuple(subsamples[subsample_idx][1:])
result_type = result_types[result_type_idx]
metadata_values = [] if metadata_values_idx is None else metadata_value_tuples[metadata_values_idx]
metadata = frozenset(x for x in zip(metadata_types, metadata_values) if x[1] is not None)
result = Result(sample, subsample, result_type, metadata, result_value)
expected_results.append(result)
return expected_results
class TestBulk:
def test_bulk(self, dataset_path):
# Build expected
expected_results = build_expected_results(BULK_DATA)
# Assert
with Dataset(dataset_path) as dataset:
results = list(dataset.analysis_bulk_results)
assert set(dataset.analysis_bulk_results) == set(expected_results)
def test_bulk_with_spaces_at_end_of_subsample(self, dataset_path):
# Modify bulk file
bulk_path = dataset_path / BULK_FILE_NAME
dfs = {
sheet_name: pd.read_excel(bulk_path, header=None, sheet_name=sheet_name)
for sheet_name in BULK_DATA
}
with pd.ExcelWriter(bulk_path) as writer:
for sheet_name, df in dfs.items():
df.iloc[8, 0] = df.iloc[8, 0] + ' ' # SAMPLE column
df.iloc[8, 1] = df.iloc[8, 1] + ' ' # SUBSAMPLE column
df.to_excel(writer, sheet_name=sheet_name, index=False, header=False)
# Build expected
expected_results = build_expected_results(BULK_DATA)
# Assert
with Dataset(dataset_path) as dataset:
results = list(dataset.analysis_bulk_results)
assert set(dataset.analysis_bulk_results) == set(expected_results)
def test_bulk_without_bulk(self, dataset_path):
# Modify
bulk_path = dataset_path / BULK_FILE_NAME
bulk_path.unlink()
# Assert
with Dataset(dataset_path) as dataset:
with pytest.raises(AttributeError) as excinfo:
dataset.analysis_bulk_results
# Test with no items on various combinations of axes
def test_bulk_with_no_subsamples(self, dataset_path):
# Modify file
bulk_path = dataset_path / BULK_FILE_NAME
dfs = {
sheet_name: pd.read_excel(bulk_path, header=None, sheet_name=sheet_name)
for sheet_name in BULK_DATA
}
with pd.ExcelWriter(bulk_path) as writer:
for sheet_name, df in dfs.items():
df = df.iloc[:8, :] # Omit all subsample rows
df.to_excel(writer, sheet_name=sheet_name, index=False, header=False)
# Build expected
new_bulk_data = deepcopy(BULK_DATA)
for sheet_name in new_bulk_data:
new_bulk_data[sheet_name]['subsamples'] = []
new_bulk_data[sheet_name]['results'] = []
expected_results = build_expected_results(new_bulk_data)
# Assert
with Dataset(dataset_path) as dataset:
results = list(dataset.analysis_bulk_results)
assert set(dataset.analysis_bulk_results) == set(expected_results)
def test_bulk_with_no_metadata_types(self, dataset_path):
# Modify bulk file
bulk_path = dataset_path / BULK_FILE_NAME
dfs = {
sheet_name: pd.read_excel(bulk_path, header=None, sheet_name=sheet_name)
for sheet_name in BULK_DATA
}
with pd.ExcelWriter(bulk_path) as writer:
for sheet_name, df in dfs.items():
df = df.iloc[[0, 8, 9], :] # Omit all metadata type rows
df.to_excel(writer, sheet_name=sheet_name, index=False, header=False)
# Build expected results
new_bulk_data = deepcopy(BULK_DATA)
for sheet_name in new_bulk_data:
new_bulk_data[sheet_name]['metadata_types'] = []
new_bulk_data[sheet_name]['metadata_values'] = []
for idx, result in enumerate(new_bulk_data[sheet_name]['results']):
result = list(result)
result[2] = None
new_bulk_data[sheet_name]['results'][idx] = result
expected_results = build_expected_results(new_bulk_data)
# Assert
with Dataset(dataset_path) as dataset:
results = list(dataset.analysis_bulk_results)
assert results == expected_results
def test_bulk_with_no_result_types(self, dataset_path):
# Modify bulk file
bulk_path = dataset_path / BULK_FILE_NAME
dfs = {
sheet_name: pd.read_excel(bulk_path, header=None, sheet_name=sheet_name)
for sheet_name in BULK_DATA
}
with pd.ExcelWriter(bulk_path) as writer:
for sheet_name, df in dfs.items():
df = df.iloc[:, :3] # Omit all subsample rows
df.to_excel(writer, sheet_name=sheet_name, index=False, header=False)
# Build expected results
new_bulk_data = deepcopy(BULK_DATA)
for sheet_name in new_bulk_data:
new_bulk_data[sheet_name]['result_types'] = []
new_bulk_data[sheet_name]['results'] = []
expected_results = build_expected_results(new_bulk_data)
# Assert
with Dataset(dataset_path) as dataset:
results = list(dataset.analysis_bulk_results)
assert set(dataset.analysis_bulk_results) == set(expected_results)
def test_bulk_with_no_subsamples_and_metadata_types(self, dataset_path):
# Modify bulk file
bulk_path = dataset_path / BULK_FILE_NAME
dfs = {
sheet_name: pd.read_excel(bulk_path, header=None, sheet_name=sheet_name)
for sheet_name in BULK_DATA
}
with pd.ExcelWriter(bulk_path) as writer:
for sheet_name, df in dfs.items():
df = pd.DataFrame([df.iloc[0]], columns=list(range(len(df.iloc[0]))))
df.to_excel(writer, sheet_name=sheet_name, index=False, header=False)
# Build expected results
new_bulk_data = deepcopy(BULK_DATA)
for sheet_name in new_bulk_data:
new_bulk_data[sheet_name]['subsamples'] = []
new_bulk_data[sheet_name]['metadata_types'] = []
new_bulk_data[sheet_name]['results'] = []
expected_results = build_expected_results(new_bulk_data)
# Assert
with Dataset(dataset_path) as dataset:
results = list(dataset.analysis_bulk_results)
assert set(dataset.analysis_bulk_results) == set(expected_results)
def test_bulk_with_no_subsamples_and_result_types(self, dataset_path):
# Modify bulk file
bulk_path = dataset_path / BULK_FILE_NAME
dfs = {
sheet_name: pd.read_excel(bulk_path, header=None, sheet_name=sheet_name)
for sheet_name in BULK_DATA
}
with pd.ExcelWriter(bulk_path) as writer:
for sheet_name, df in dfs.items():
df = df.iloc[:8, :3]
df.to_excel(writer, sheet_name=sheet_name, index=False, header=False)
# Build expected results
new_bulk_data = deepcopy(BULK_DATA)
for sheet_name in new_bulk_data:
new_bulk_data[sheet_name]['subsamples'] = []
new_bulk_data[sheet_name]['result_types'] = []
new_bulk_data[sheet_name]['results'] = []
expected_results = build_expected_results(new_bulk_data)
# Assert
with Dataset(dataset_path) as dataset:
results = list(dataset.analysis_bulk_results)
assert set(dataset.analysis_bulk_results) == set(expected_results)
def test_bulk_with_no_metadata_types_and_result_types(self, dataset_path):
# Modify bulk file
bulk_path = dataset_path / BULK_FILE_NAME
dfs = {
sheet_name: pd.read_excel(bulk_path, header=None, sheet_name=sheet_name)
for sheet_name in BULK_DATA
}
with pd.ExcelWriter(bulk_path) as writer:
for sheet_name, df in dfs.items():
df = df.iloc[[0, 8, 9], :3]
df.to_excel(writer, sheet_name=sheet_name, index=False, header=False)
# Build expected results
new_bulk_data = deepcopy(BULK_DATA)
for sheet_name in new_bulk_data:
new_bulk_data[sheet_name]['metadata_types'] = []
new_bulk_data[sheet_name]['result_types'] = []
new_bulk_data[sheet_name]['results'] = []
expected_results = build_expected_results(new_bulk_data)
# Assert
with Dataset(dataset_path) as dataset:
results = list(dataset.analysis_bulk_results)
assert set(dataset.analysis_bulk_results) == set(expected_results)
def test_bulk_with_no_subsamples_metadata_types_and_result_types(self, dataset_path):
# Modify bulk file
bulk_path = dataset_path / BULK_FILE_NAME
dfs = {
sheet_name: pd.read_excel(bulk_path, header=None, sheet_name=sheet_name)
for sheet_name in BULK_DATA
}
with pd.ExcelWriter(bulk_path) as writer:
for sheet_name, df in dfs.items():
df = pd.DataFrame([df.iloc[0, :3]], columns=list(range(len(df.iloc[0, :3]))))
df.to_excel(writer, sheet_name=sheet_name, index=False, header=False)
# Build expected results
new_bulk_data | |
zSize[0] * zSize[1] * zSize[2];
groups = %(grp)s;
fdimension = dimension + (groups != 1);
// Create conv gradInput primitive
CHECK_ERR( dnnGroupsConvolutionCreateBackwardData_%(precision)s(&pConvolutionBwdData, NULL,
dnnAlgorithmConvolutionDirect, groups, dimension, imageSize,
zSize, weightSize, convStride, convPadding, dnnBorderZeros), err );
}
if (NULL == weight_internal_layout) {
CHECK_ERR( dnnLayoutCreateFromPrimitive_%(precision)s(&weight_internal_layout,
pConvolutionBwdData, dnnResourceFilter), err );
}
if (NULL == image_internal_layout) {
CHECK_ERR( dnnLayoutCreateFromPrimitive_%(precision)s(&image_internal_layout,
pConvolutionBwdData, dnnResourceDiffSrc), err );
}
if (NULL == pConvolutionFwd) {
// Create conv forward primitive
CHECK_ERR( dnnGroupsConvolutionCreateForward_%(precision)s(&pConvolutionFwd, NULL,
dnnAlgorithmConvolutionDirect, groups, dimension, imageSize,
zSize, weightSize, convStride, convPadding, dnnBorderZeros), err );
}
if(NULL == fwd_weight_internal_layout) {
CHECK_ERR(dnnLayoutCreateFromPrimitive_%(precision)s(&fwd_weight_internal_layout,
pConvolutionFwd, dnnResourceFilter), err );
}
if ( !(%(imagegrad)s)) {
%(imagegrad)s = (PyArrayObject*)PyArray_ZEROS(PyArray_NDIM(%(image)s),
PyArray_DIMS(%(image)s),
PyArray_TYPE(%(image)s),
0);
if (NULL == %(imagegrad)s) {
PyErr_Format(PyExc_RuntimeError,
"conv_gradInput: Failed to allocate image of %%lld x %%lld x %%lld x %%lld",
(long long)(PyArray_DIMS(%(image)s))[0], (long long)(PyArray_DIMS(%(image)s))[1],
(long long)(PyArray_DIMS(%(image)s))[2], (long long)(PyArray_DIMS(%(image)s))[3]);
%(fail)s
}
}
//weight use its own buffer
weight_buf = (%(dtype)s*)PyArray_DATA(%(weight)s);
//get internal layout for gradz from previous Op
gradz_internal_layout = ((dnnLayout_t*)PyArray_DATA(%(gradz)s))[0];
//get internal buffer for gradz from previous op
gradz_buf = ((void **)PyArray_DATA(%(gradz)s))[1];
conv_res[dnnResourceDiffDst] = gradz_buf;
#if __SUPPORT_USER_PARAMS__
if(NULL == weight_usr_layout) {
CHECK_ERR( dnnLayoutCreate_%(precision)s(&weight_usr_layout, fdimension, weightSize, weightStride), err );
}
if (weight_to_internal) {
if(NULL == weight_buf_tmp) {
CHECK_ERR( dnnAllocateBuffer_%(precision)s((void**)&weight_buf_tmp, weight_internal_layout), err );
}
CHECK_ERR( dnnConversionExecute_%(precision)s(weight_to_internal, weight_buf, weight_buf_tmp), err );
} else {
weight_buf_tmp = weight_buf;
}
#else
if (1 == first_run) {
if (!dnnLayoutCompare_%(precision)s(fwd_weight_internal_layout, weight_internal_layout)) {
if(NULL == bwdd_weight_to_bwdd_internal) {
CHECK_ERR( dnnConversionCreate_%(precision)s(&bwdd_weight_to_bwdd_internal, fwd_weight_internal_layout, weight_internal_layout), err );
}
}
}
if (bwdd_weight_to_bwdd_internal) {
if(NULL == weight_buf_tmp) {
CHECK_ERR( dnnAllocateBuffer_%(precision)s((void**)&weight_buf_tmp, weight_internal_layout), err );
}
CHECK_ERR( dnnConversionExecute_%(precision)s(bwdd_weight_to_bwdd_internal, weight_buf, weight_buf_tmp), err );
} else {
weight_buf_tmp = weight_buf;
}
#endif
conv_res[dnnResourceFilter] = weight_buf_tmp;
//Allocate internal buffer for imagegrad data
if (NULL == image_buf) {
CHECK_ERR( dnnAllocateBuffer_%(precision)s((void**)&image_buf, image_internal_layout), err );
}
conv_res[dnnResourceDiffSrc] = image_buf;
//Execute convolution gradInput pass
CHECK_ERR( dnnExecute_%(precision)s(pConvolutionBwdData, (void**)conv_res), err );
//get image_internal_layout from forward pass, pass the data buffer match previous layout.
image_internal_layout_from_previous = ((dnnLayout_t*)PyArray_DATA(%(image)s))[0];
//image int2int cvt
if (1 == first_run) {
if (!dnnLayoutCompare_%(precision)s(image_internal_layout, image_internal_layout_from_previous)) {
if (NULL == internal_to_internal_image) {
CHECK_ERR( dnnConversionCreate_%(precision)s(&internal_to_internal_image, image_internal_layout, image_internal_layout_from_previous), err );
}
}
}
if (internal_to_internal_image) {
if (NULL == image_buf_to_previous) {
CHECK_ERR( dnnAllocateBuffer_%(precision)s((void**)&image_buf_to_previous, image_internal_layout_from_previous), err );
}
CHECK_ERR( dnnConversionExecute_%(precision)s(internal_to_internal_image, image_buf, image_buf_to_previous), err );
} else {
image_buf_to_previous = image_buf;
}
((dnnLayout_t*)PyArray_DATA(%(imagegrad)s))[0] = image_internal_layout_from_previous;
((void**)PyArray_DATA(%(imagegrad)s))[1] = image_buf_to_previous;
first_run = 0;
""" % sub
return ccode
class ConvGradWeights(MKLConvBase):
__props__ = ('imshp', 'kshp', 'border_mode', 'subsample', 'filter_flip', 'filter_dilation')
def __init__(self, imshp=None, kshp=None, border_mode='valid', subsample=(1, 1), filter_flip=False, filter_dilation=(1, 1)):
super(ConvGradWeights, self).__init__(imshp=imshp, kshp=kshp, border_mode=border_mode, subsample=subsample)
self.filter_flip = filter_flip
self.filter_dilation = filter_dilation
def make_node(self, image, weight, gradz, bias=None):
image = as_tensor_variable(image)
weight = as_tensor_variable(weight)
gradz = as_tensor_variable(gradz)
if image.type.ndim != 4:
raise TypeError('image must be 4D tensor')
if weight.type.ndim not in [4, 5]:
raise TypeError('weightmust be 4D or 5D tensor')
if gradz.type.ndim != 4:
raise TypeError('gradz must be 4D tensor')
if weight.type.ndim == 4:
weightbt = [gradz.type.broadcastable[1], image.type.broadcastable[1], False, False]
else:
weightbt = [False, gradz.type.broadcastable[1], image.type.broadcastable[1], False, False]
dtype = image.type.dtype
if bias is not None:
bias = as_tensor_variable(bias)
inputs = [image, weight, gradz, bias]
biasbt = [gradz.type.broadcastable[1]]
outputs = [TensorType(dtype, weightbt)(), TensorType(dtype, biasbt)()]
else:
inputs = [image, weight, gradz]
outputs = [TensorType(dtype, weightbt)()]
return Apply(self, inputs, outputs)
def c_code(self, node, name, inp, out_, sub):
if len(inp) > 3:
image, weight, gradz, bias = inp
weightgrad, biasgrad = out_
else:
image, weight, gradz = inp
bias = None
weightgrad, = out_
if self.imshp is None:
imshp = node.inputs[0].shape
else:
imshp = self.imshp
in_n, in_c, in_h, in_w = imshp
if self.kshp is None:
kshp = node.inputs[1].shape
else:
kshp = self.kshp
if node.inputs[1].type.ndim == 5:
grp, k_n, k_c, k_h, k_w = kshp
tshp = [kshp[1] * kshp[0], kshp[2] * kshp[0], kshp[3], kshp[4]]
assert in_c == k_c * grp
else:
k_n, k_c, k_h, k_w = kshp
grp = 1
tshp = [kshp[0], kshp[1], kshp[2], kshp[3]]
assert in_c == k_c
if bias is not None:
sub['bias'] = bias
sub['biasgrad'] = biasgrad
withBias = 1
else:
withBias = 0
sub['withBias'] = withBias
outshp = get_conv_output_shape(imshp, tshp, self.border_mode, self.subsample)
o_n, o_c, o_h, o_w = outshp
dH, dW = self.subsample
if self.border_mode == 'valid':
padH, padW = (0, 0)
elif self.border_mode == 'full':
padH, padW = ((k_h - 1), (k_w - 1))
elif self.border_mode == 'half':
padH, padW = ((k_h / 2), (k_w / 2))
elif isinstance(self.border_mode, tuple):
padH, padW = self.border_mode
else:
raise ValueError("border_mode must have two elements")
sub['image'] = image
sub['weight'] = weight
sub['weightgrad'] = weightgrad
sub['gradz'] = gradz
if node.inputs[0].dtype == "float32":
sub['precision'] = 'F32'
sub['dtype'] = 'float'
elif node.inputs[0].dtype == "float64":
sub['precision'] = 'F64'
sub['dtype'] = 'double'
sub.update(locals())
if bias is None:
sub['bias'] = 'NULL'
sub['biasgrad'] = 'NULL'
ccode = """
////bwdfilter related
if (NULL == pConvolutionBwdFilter) {
convStride[0] = %(dW)s;
convStride[1] = %(dH)s;
convPadding[0] = -%(padW)s;
convPadding[1] = -%(padH)s;
imageSize[0] = %(in_w)s; //w
imageSize[1] = %(in_h)s; //h
imageSize[2] = %(in_c)s; //c
imageSize[3] = %(in_n)s; //n
imageStride[0] = 1;
imageStride[1] = imageSize[0];
imageStride[2] = imageSize[0] * imageSize[1];
imageStride[3] = imageSize[0] * imageSize[1] * imageSize[2];
weightSize[0] = %(k_w)s;
weightSize[1] = %(k_h)s;
weightSize[2] = %(k_c)s;
weightSize[3] = %(k_n)s;
weightSize[4] = %(grp)s;
weightStride[0] = 1;
weightStride[1] = weightSize[0];
weightStride[2] = weightSize[0] * weightSize[1];
weightStride[3] = weightSize[0] * weightSize[1] * weightSize[2];
weightStride[4] = weightSize[0] * weightSize[1] * weightSize[2] * weightSize[3];
zSize[0] = %(o_w)s;
zSize[1] = %(o_h)s;
zSize[2] = %(o_c)s;
zSize[3] = %(o_n)s;
zStride[0] = 1;
zStride[1] = zSize[0];
zStride[2] = zSize[0] * zSize[1];
zStride[3] = zSize[0] * zSize[1] * zSize[2];
if( %(withBias)s ) {
biasSize[0] = %(o_c)s;
biasStride[0] = 1;
}
groups = %(grp)s;
fdimension = dimension + (groups != 1);
// Create conv backward primitive
CHECK_ERR( dnnGroupsConvolutionCreateBackwardFilter_%(precision)s(&pConvolutionBwdFilter, NULL,
dnnAlgorithmConvolutionDirect, groups, dimension, imageSize,
zSize, weightSize, convStride, convPadding, dnnBorderZeros), err );
}
if (NULL == bwdf_weight_internal_layout) {
CHECK_ERR( dnnLayoutCreateFromPrimitive_%(precision)s(&bwdf_weight_internal_layout,
pConvolutionBwdFilter, dnnResourceDiffFilter), err );
}
if (NULL == image_internal_layout) {
CHECK_ERR( dnnLayoutCreateFromPrimitive_%(precision)s(&image_internal_layout,
pConvolutionBwdFilter, dnnResourceSrc), err );
}
if (NULL == gradz_internal_layout_for_weight) {
CHECK_ERR( dnnLayoutCreateFromPrimitive_%(precision)s(&gradz_internal_layout_for_weight,
pConvolutionBwdFilter, dnnResourceDiffDst), err );
}
// create forward primitive here to get forward internal layout
if (NULL == pConvolutionFwd) {
if ( %(withBias)s ) {
CHECK_ERR( dnnGroupsConvolutionCreateForwardBias_%(precision)s(&pConvolutionFwd, NULL,
dnnAlgorithmConvolutionDirect, groups, dimension, imageSize,
zSize, weightSize, convStride, convPadding, dnnBorderZeros), err );
} else {
CHECK_ERR( dnnGroupsConvolutionCreateForward_%(precision)s(&pConvolutionFwd, NULL,
dnnAlgorithmConvolutionDirect, groups, dimension, imageSize,
zSize, weightSize, convStride, convPadding, dnnBorderZeros), err );
}
}
if (NULL == fwd_weight_internal_layout) {
CHECK_ERR( dnnLayoutCreateFromPrimitive_%(precision)s(&fwd_weight_internal_layout,
pConvolutionFwd, dnnResourceFilter), err );
}
//bwdbias related
if( %(withBias)s ) {
if (NULL == pConvolutionBwdBias) {
CHECK_ERR ( dnnGroupsConvolutionCreateBackwardBias_%(precision)s(&pConvolutionBwdBias, NULL,
dnnAlgorithmConvolutionDirect, groups, dimension, zSize), err );
}
if (NULL == bias_internal_layout) {
CHECK_ERR( dnnLayoutCreateFromPrimitive_%(precision)s(&bias_internal_layout,
pConvolutionBwdBias, dnnResourceDiffBias), err );
}
if (NULL == gradz_internal_layout_for_bias) {
CHECK_ERR( dnnLayoutCreateFromPrimitive_%(precision)s(&gradz_internal_layout_for_bias,
pConvolutionBwdBias, dnnResourceDiffDst), err );
}
}
//// Prepare weightgrad array
if ( !(%(weightgrad)s) ) {
%(weightgrad)s = (PyArrayObject*)PyArray_ZEROS(PyArray_NDIM(%(weight)s),
PyArray_DIMS(%(weight)s),
PyArray_TYPE(%(weight)s),
0);
if (NULL == %(weightgrad)s) {
/*
PyErr_Format(PyExc_RuntimeError,
"conv_gradWeight: Failed to allocate weight of %%lld x %%lld x %%lld x %%lld x %%lld",
(long long)(PyArray_DIMS(%(weight)s))[0], (long long)(PyArray_DIMS(%(weight)s))[1],
(long long)(PyArray_DIMS(%(weight)s))[2], (long long)(PyArray_DIMS(%(weight)s))[3]);
*/
}
}
weight_buf = (%(dtype)s*)PyArray_DATA(%(weightgrad)s);
""" % sub
if bias is not None:
ccode += """
if (NULL == %(biasgrad)s) {
%(biasgrad)s = (PyArrayObject*)PyArray_ZEROS(PyArray_NDIM(%(bias)s),
PyArray_DIMS(%(bias)s),
PyArray_TYPE(%(bias)s),
0);
if (NULL == %(biasgrad)s) {
PyErr_Format(PyExc_RuntimeError, "conv_backward: Failed to allocate bias of %%lld",
(long long)PyArray_NDIM(%(bias)s));
%(fail)s
}
}
bias_buf = (%(dtype)s*)PyArray_DATA(%(biasgrad)s);
""" % sub
ccode += """
// get internal layout for input from previous Op
image_internal_layout_from_previous = ((dnnLayout_t*)PyArray_DATA(%(image)s))[0];
// get internal buffer for input from previous op
image_buf_from_previous = ((void **)PyArray_DATA(%(image)s))[1];
if (1 == first_run) {
if (!dnnLayoutCompare_%(precision)s(image_internal_layout_from_previous, image_internal_layout)) {
if (NULL == internal_to_internal_image) {
CHECK_ERR( dnnConversionCreate_%(precision)s(&internal_to_internal_image, image_internal_layout_from_previous, image_internal_layout), err );
}
}
}
if (internal_to_internal_image) {
if (NULL == image_buf) {
CHECK_ERR( dnnAllocateBuffer_%(precision)s((void**)&image_buf, image_internal_layout), err );
}
CHECK_ERR( dnnConversionExecute_%(precision)s(internal_to_internal_image, image_buf_from_previous, image_buf), err );
image_internal_layout_buf = &image_internal_layout;
} else {
image_internal_layout_buf = &image_internal_layout_from_previous;
image_buf = image_buf_from_previous;
}
// get internal layout for gradz from previous Op
gradz_internal_layout = ((dnnLayout_t*)PyArray_DATA(%(gradz)s))[0];
// get internal buffer for gradz from previous op
gradz_buf = ((void **)PyArray_DATA(%(gradz)s))[1];
if (1 == first_run) {
if (!dnnLayoutCompare_%(precision)s(gradz_internal_layout, gradz_internal_layout_for_weight)) {
if (NULL == | |
svm.getMaxIter()
100
>>> svm.setMaxIter(5)
LinearSVC...
>>> svm.getMaxIter()
5
>>> svm.getRegParam()
0.0
>>> svm.setRegParam(0.01)
LinearSVC...
>>> svm.getRegParam()
0.01
>>> model = svm.fit(df)
>>> model.setPredictionCol("newPrediction")
LinearSVCModel...
>>> model.getPredictionCol()
'newPrediction'
>>> model.setThreshold(0.5)
LinearSVCModel...
>>> model.getThreshold()
0.5
>>> model.getBlockSize()
1
>>> model.coefficients
DenseVector([0.0, -0.2792, -0.1833])
>>> model.intercept
1.0206118982229047
>>> model.numClasses
2
>>> model.numFeatures
3
>>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0, -1.0, -1.0))]).toDF()
>>> model.predict(test0.head().features)
1.0
>>> model.predictRaw(test0.head().features)
DenseVector([-1.4831, 1.4831])
>>> result = model.transform(test0).head()
>>> result.newPrediction
1.0
>>> result.rawPrediction
DenseVector([-1.4831, 1.4831])
>>> svm_path = temp_path + "/svm"
>>> svm.save(svm_path)
>>> svm2 = LinearSVC.load(svm_path)
>>> svm2.getMaxIter()
5
>>> model_path = temp_path + "/svm_model"
>>> model.save(model_path)
>>> model2 = LinearSVCModel.load(model_path)
>>> model.coefficients[0] == model2.coefficients[0]
True
>>> model.intercept == model2.intercept
True
.. versionadded:: 2.2.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction",
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None,
aggregationDepth=2, blockSize=1):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", \
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, \
aggregationDepth=2, blockSize=1):
"""
super(LinearSVC, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.LinearSVC", self.uid)
self._setDefault(maxIter=100, regParam=0.0, tol=1e-6, fitIntercept=True,
standardization=True, threshold=0.0, aggregationDepth=2,
blockSize=1)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.2.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction",
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None,
aggregationDepth=2, blockSize=1):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", \
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, \
aggregationDepth=2, blockSize=1):
Sets params for Linear SVM Classifier.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return LinearSVCModel(java_model)
@since("2.2.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.2.0")
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
@since("2.2.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("2.2.0")
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
@since("2.2.0")
def setStandardization(self, value):
"""
Sets the value of :py:attr:`standardization`.
"""
return self._set(standardization=value)
@since("2.2.0")
def setThreshold(self, value):
"""
Sets the value of :py:attr:`threshold`.
"""
return self._set(threshold=value)
@since("2.2.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.2.0")
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
@since("3.1.0")
def setBlockSize(self, value):
"""
Sets the value of :py:attr:`blockSize`.
"""
return self._set(blockSize=value)
class LinearSVCModel(_JavaClassificationModel, _LinearSVCParams, JavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by LinearSVC.
.. versionadded:: 2.2.0
"""
@since("3.0.0")
def setThreshold(self, value):
"""
Sets the value of :py:attr:`threshold`.
"""
return self._set(threshold=value)
@property
@since("2.2.0")
def coefficients(self):
"""
Model coefficients of Linear SVM Classifier.
"""
return self._call_java("coefficients")
@property
@since("2.2.0")
def intercept(self):
"""
Model intercept of Linear SVM Classifier.
"""
return self._call_java("intercept")
@since("3.1.0")
def summary(self):
"""
Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`.
"""
if self.hasSummary:
return LinearSVCTrainingSummary(super(LinearSVCModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("3.1.0")
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
:param dataset:
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_lsvc_summary = self._call_java("evaluate", dataset)
return LinearSVCSummary(java_lsvc_summary)
class LinearSVCSummary(_BinaryClassificationSummary):
"""
Abstraction for LinearSVC Results for a given model.
.. versionadded:: 3.1.0
"""
pass
@inherit_doc
class LinearSVCTrainingSummary(LinearSVCSummary, _TrainingSummary):
"""
Abstraction for LinearSVC Training results.
.. versionadded:: 3.1.0
"""
pass
class _LogisticRegressionParams(_ProbabilisticClassifierParams, HasRegParam,
HasElasticNetParam, HasMaxIter, HasFitIntercept, HasTol,
HasStandardization, HasWeightCol, HasAggregationDepth,
HasThreshold, HasBlockSize):
"""
Params for :py:class:`LogisticRegression` and :py:class:`LogisticRegressionModel`.
.. versionadded:: 3.0.0
"""
threshold = Param(Params._dummy(), "threshold",
"Threshold in binary classification prediction, in range [0, 1]." +
" If threshold and thresholds are both set, they must match." +
"e.g. if threshold is p, then thresholds must be equal to [1-p, p].",
typeConverter=TypeConverters.toFloat)
family = Param(Params._dummy(), "family",
"The name of family which is a description of the label distribution to " +
"be used in the model. Supported options: auto, binomial, multinomial",
typeConverter=TypeConverters.toString)
lowerBoundsOnCoefficients = Param(Params._dummy(), "lowerBoundsOnCoefficients",
"The lower bounds on coefficients if fitting under bound "
"constrained optimization. The bound matrix must be "
"compatible with the shape "
"(1, number of features) for binomial regression, or "
"(number of classes, number of features) "
"for multinomial regression.",
typeConverter=TypeConverters.toMatrix)
upperBoundsOnCoefficients = Param(Params._dummy(), "upperBoundsOnCoefficients",
"The upper bounds on coefficients if fitting under bound "
"constrained optimization. The bound matrix must be "
"compatible with the shape "
"(1, number of features) for binomial regression, or "
"(number of classes, number of features) "
"for multinomial regression.",
typeConverter=TypeConverters.toMatrix)
lowerBoundsOnIntercepts = Param(Params._dummy(), "lowerBoundsOnIntercepts",
"The lower bounds on intercepts if fitting under bound "
"constrained optimization. The bounds vector size must be"
"equal with 1 for binomial regression, or the number of"
"lasses for multinomial regression.",
typeConverter=TypeConverters.toVector)
upperBoundsOnIntercepts = Param(Params._dummy(), "upperBoundsOnIntercepts",
"The upper bounds on intercepts if fitting under bound "
"constrained optimization. The bound vector size must be "
"equal with 1 for binomial regression, or the number of "
"classes for multinomial regression.",
typeConverter=TypeConverters.toVector)
@since("1.4.0")
def setThreshold(self, value):
"""
Sets the value of :py:attr:`threshold`.
Clears value of :py:attr:`thresholds` if it has been set.
"""
self._set(threshold=value)
self.clear(self.thresholds)
return self
@since("1.4.0")
def getThreshold(self):
"""
Get threshold for binary classification.
If :py:attr:`thresholds` is set with length 2 (i.e., binary classification),
this returns the equivalent threshold:
:math:`\\frac{1}{1 + \\frac{thresholds(0)}{thresholds(1)}}`.
Otherwise, returns :py:attr:`threshold` if set or its default value if unset.
"""
self._checkThresholdConsistency()
if self.isSet(self.thresholds):
ts = self.getOrDefault(self.thresholds)
if len(ts) != 2:
raise ValueError("Logistic Regression getThreshold only applies to" +
" binary classification, but thresholds has length != 2." +
" thresholds: " + ",".join(ts))
return 1.0/(1.0 + ts[0]/ts[1])
else:
return self.getOrDefault(self.threshold)
@since("1.5.0")
def setThresholds(self, value):
"""
Sets the value of :py:attr:`thresholds`.
Clears value of :py:attr:`threshold` if it has been set.
"""
self._set(thresholds=value)
self.clear(self.threshold)
return self
@since("1.5.0")
def getThresholds(self):
"""
If :py:attr:`thresholds` is set, return its value.
Otherwise, if :py:attr:`threshold` is set, return the equivalent thresholds for binary
classification: (1-threshold, threshold).
If neither are set, throw an error.
"""
self._checkThresholdConsistency()
if not self.isSet(self.thresholds) and self.isSet(self.threshold):
t = self.getOrDefault(self.threshold)
return [1.0-t, t]
else:
return self.getOrDefault(self.thresholds)
def _checkThresholdConsistency(self):
if self.isSet(self.threshold) and self.isSet(self.thresholds):
ts = self.getOrDefault(self.thresholds)
if len(ts) != 2:
raise ValueError("Logistic Regression getThreshold only applies to" +
" binary classification, but thresholds has length != 2." +
" thresholds: {0}".format(str(ts)))
t = 1.0/(1.0 + ts[0]/ts[1])
t2 = self.getOrDefault(self.threshold)
if abs(t2 - t) >= 1E-5:
raise ValueError("Logistic Regression getThreshold found inconsistent values for" +
" threshold (%g) and thresholds (equivalent to %g)" % (t2, t))
@since("2.1.0")
def getFamily(self):
"""
Gets the value of :py:attr:`family` or its default value.
"""
return self.getOrDefault(self.family)
@since("2.3.0")
def getLowerBoundsOnCoefficients(self):
"""
Gets the value of :py:attr:`lowerBoundsOnCoefficients`
"""
return self.getOrDefault(self.lowerBoundsOnCoefficients)
@since("2.3.0")
def getUpperBoundsOnCoefficients(self):
"""
Gets the value of :py:attr:`upperBoundsOnCoefficients`
"""
return self.getOrDefault(self.upperBoundsOnCoefficients)
@since("2.3.0")
def getLowerBoundsOnIntercepts(self):
"""
Gets the value of :py:attr:`lowerBoundsOnIntercepts`
"""
return self.getOrDefault(self.lowerBoundsOnIntercepts)
@since("2.3.0")
def getUpperBoundsOnIntercepts(self):
"""
Gets the value of :py:attr:`upperBoundsOnIntercepts`
"""
return self.getOrDefault(self.upperBoundsOnIntercepts)
@inherit_doc
class LogisticRegression(_JavaProbabilisticClassifier, _LogisticRegressionParams, JavaMLWritable,
JavaMLReadable):
"""
Logistic regression.
This class supports multinomial logistic (softmax) and binomial logistic regression.
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> bdf = sc.parallelize([
... Row(label=1.0, weight=1.0, features=Vectors.dense(0.0, 5.0)),
... Row(label=0.0, weight=2.0, features=Vectors.dense(1.0, 2.0)),
... Row(label=1.0, weight=3.0, features=Vectors.dense(2.0, 1.0)),
... Row(label=0.0, weight=4.0, features=Vectors.dense(3.0, 3.0))]).toDF()
>>> blor = LogisticRegression(weightCol="weight")
>>> blor.getRegParam()
0.0
>>> blor.setRegParam(0.01)
LogisticRegression...
>>> blor.getRegParam()
0.01
>>> blor.setMaxIter(10)
LogisticRegression...
>>> blor.getMaxIter()
10
>>> blor.clear(blor.maxIter)
>>> blorModel = blor.fit(bdf)
>>> blorModel.setFeaturesCol("features")
LogisticRegressionModel...
>>> blorModel.setProbabilityCol("newProbability")
LogisticRegressionModel...
>>> blorModel.getProbabilityCol()
'newProbability'
>>> blorModel.getBlockSize()
1
>>> blorModel.setThreshold(0.1)
LogisticRegressionModel...
>>> blorModel.getThreshold()
0.1
>>> blorModel.coefficients
DenseVector([-1.080..., -0.646...])
>>> blorModel.intercept
3.112...
>>> blorModel.evaluate(bdf).accuracy == blorModel.summary.accuracy
True
>>> data_path = "data/mllib/sample_multiclass_classification_data.txt"
>>> mdf = spark.read.format("libsvm").load(data_path)
>>> mlor = LogisticRegression(regParam=0.1, elasticNetParam=1.0, family="multinomial")
>>> mlorModel = mlor.fit(mdf)
>>> mlorModel.coefficientMatrix
SparseMatrix(3, 4, [0, 1, 2, 3], [3, 2, 1], [1.87..., -2.75..., -0.50...], 1)
>>> mlorModel.interceptVector
DenseVector([0.04..., -0.42..., 0.37...])
>>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0, 1.0))]).toDF()
>>> blorModel.predict(test0.head().features)
1.0
>>> blorModel.predictRaw(test0.head().features)
DenseVector([-3.54..., 3.54...])
>>> blorModel.predictProbability(test0.head().features)
DenseVector([0.028, 0.972])
>>> result = blorModel.transform(test0).head()
>>> result.prediction
1.0
>>> result.newProbability
DenseVector([0.02..., 0.97...])
>>> result.rawPrediction
DenseVector([-3.54..., 3.54...])
>>> test1 = sc.parallelize([Row(features=Vectors.sparse(2, [0], [1.0]))]).toDF()
>>> blorModel.transform(test1).head().prediction
1.0
>>> blor.setParams("vector")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
| |
<reponame>shaurz/ome<filename>ome/ome_ast.py<gh_stars>1-10
# ome - Object Message Expressions
# Copyright (c) 2015-2016 <NAME> <<EMAIL>>
from .constants import *
from .emit import MethodCodeBuilder
from .error import OmeError
from .instructions import *
from .sexpr import format_sexpr
from .symbol import is_private_symbol
class ASTNode(object):
def __str__(self):
return format_sexpr(self.sexpr())
class Send(ASTNode):
def __init__(self, receiver, symbol, args, parse_state=None):
self.receiver = receiver
self.symbol = symbol
self.args = args
self.parse_state = parse_state
self.receiver_block = None
self.traceback_info = None
self.private_receiver_block = None
def sexpr(self):
call = 'call' if self.receiver_block else 'send'
receiver = self.receiver.sexpr() if self.receiver else '<free>'
args = tuple(arg.sexpr() for arg in self.args)
return (call, self.symbol, receiver) + args
def error(self, message):
if self.parse_state:
self.parse_state.error(message)
else:
raise OmeError(message)
def resolve_free_vars(self, parent):
self.method = parent.find_method()
for i, arg in enumerate(self.args):
self.args[i] = arg.resolve_free_vars(parent)
if self.receiver:
self.receiver = self.receiver.resolve_free_vars(parent)
if self.symbol == 'self':
return self.receiver
if is_private_symbol(self.symbol):
receiver_block = parent.lookup_receiver(self.symbol, False)
if not receiver_block:
self.error("receiver could not be resolved for '%s'" % self.symbol)
self.private_receiver_block = receiver_block
elif self.symbol:
if self.symbol == 'self':
return Self
if len(self.args) == 0:
ref = parent.lookup_var(self.symbol)
if ref:
return ref
self.receiver_block = parent.lookup_receiver(self.symbol)
if not self.receiver_block:
if self.symbol in builtin_constants:
return builtin_constants[self.symbol]
self.error("receiver could not be resolved for '%s'" % self.symbol)
return self
def resolve_block_refs(self, parent):
for i, arg in enumerate(self.args):
self.args[i] = arg.resolve_block_refs(parent)
if self.receiver:
self.receiver = self.receiver.resolve_block_refs(parent)
if is_private_symbol(self.symbol):
self.check_tag_block = parent.find_block()
elif self.receiver_block:
block = self.receiver_block
if block.is_constant and block != parent.find_block():
# No need to get block ref for constant blocks
self.receiver = block.constant_ref
else:
self.receiver = parent.get_block_ref(block)
# Direct slot access optimisation
if len(self.args) == 0 and self.symbol in block.instance_vars:
var = block.instance_vars[self.symbol]
return SlotGet(self.receiver, var.slot_index, var.mutable)
if len(self.args) == 1 and self.symbol[:-1] in block.instance_vars:
var = block.instance_vars[self.symbol[:-1]]
return SlotSet(self.receiver, var.slot_index, self.args[0])
return self
def walk(self, visitor):
visitor(self)
if self.receiver:
self.receiver.walk(visitor)
for arg in self.args:
arg.walk(visitor)
def generate_code(self, code):
receiver = self.receiver.generate_code(code)
args = [arg.generate_code(code) for arg in self.args]
dest = code.add_temp()
check_tag = None
if self.receiver_block:
label = code.make_method_label(self.receiver_block.tag_id, self.symbol)
else:
label = code.make_message_label(self.symbol)
if self.private_receiver_block:
check_tag = self.private_receiver_block.tag_id
code.add_instruction(CALL(dest, [receiver] + args, label, self.traceback_info, check_tag=check_tag))
return dest
class Concat(Send):
def __init__(self, args, parse_state=None):
super(Concat, self).__init__(None, '', args, parse_state)
def sexpr(self):
return ('concat',) + tuple(arg.sexpr() for arg in self.args)
def generate_code(self, code):
args = [arg.generate_code(code) for arg in self.args]
dest = code.add_temp()
code.add_instruction(CONCAT(dest, args, self.traceback_info))
return dest
class BlockVariable(object):
def __init__(self, name, mutable, index, init_ref=None):
self.name = name
self.mutable = mutable
self.slot_index = index
self.init_ref = init_ref
self.self_ref = SlotGet(Self, index, mutable)
def generate_code(self, code):
return self.init_ref.generate_code(code)
class Block(ASTNode):
def __init__(self, slots, methods):
self.slots = slots # list of BlockVariables for instance vars, closure vars and block references
self.methods = methods
self.instance_vars = {var.name: var for var in slots}
self.closure_vars = {}
self.block_refs = {}
self.blocks_needed = set()
self.symbols = set(self.instance_vars) # Set of all symbols this block defines
self.symbols.update(method.symbol for method in self.methods)
# Generate getter and setter methods
for var in slots:
setter = var.name + ':'
if var.mutable:
self.symbols.add(setter)
self.methods.append(Method(var.name, [], var.self_ref))
if var.mutable:
self.methods.append(Method(setter, [var.name], var.self_ref.setter(Send(None, var.name, []))))
# Generate show method
if 'show' not in self.symbols:
if not slots:
self.methods.append(Method('show', [], String('{}')))
else:
concat_args = []
concat_args.append(String('{' + slots[0].name + '='))
concat_args.append(Send(Send(None, slots[0].name, []), 'show', []))
for var in slots[1:]:
concat_args.append(String('; ' + var.name + '='))
concat_args.append(Send(Send(None, var.name, []), 'show', []))
concat_args.append(String('}'))
self.methods.append(Method('show', [], Concat(concat_args)))
def sexpr(self):
methods = tuple(method.sexpr() for method in self.methods)
slots = tuple(slot.name for slot in self.slots)
if slots:
return ('block', ('slots',) + slots) + methods
else:
return ('block',) + methods
@property
def is_constant(self):
return len(self.slots) == 0 and all(block.is_constant for block in self.blocks_needed)
def find_block(self):
return self
def resolve_free_vars(self, parent):
for var in self.slots:
var.init_ref = parent.lookup_var(var.name)
self.parent = parent
for method in self.methods:
method.resolve_free_vars(self)
return self
def resolve_block_refs(self, parent):
if self.is_constant:
self.constant_ref = ConstantBlock(self)
for method in self.methods:
method.resolve_block_refs(self)
return self
def lookup_var(self, symbol):
if symbol in self.closure_vars:
return self.closure_vars[symbol].self_ref
if symbol not in self.symbols:
ref = self.parent.lookup_var(symbol)
if ref:
var = BlockVariable(symbol, False, len(self.slots), ref)
self.closure_vars[symbol] = var
self.slots.append(var)
return var.self_ref
def lookup_receiver(self, symbol, add_blocks_needed=True):
if symbol in self.symbols:
return self
block = self.parent.lookup_receiver(symbol, add_blocks_needed)
if block and add_blocks_needed:
self.blocks_needed.add(block)
return block
def get_block_ref(self, block):
if block is self:
return Self
if block in self.block_refs:
return self.block_refs[block]
init_ref = self.parent.get_block_ref(block)
var = BlockVariable('<blockref>', False, len(self.slots), init_ref)
self.block_refs[block] = var.self_ref
self.slots.append(var)
return var.self_ref
def walk(self, visitor):
visitor(self)
for method in self.methods:
method.walk(visitor)
def generate_code(self, code):
dest = code.add_temp()
if self.is_constant:
code.add_instruction(LOAD_VALUE(dest, code.get_tag('Constant'), self.constant_id))
else:
code.add_instruction(ALLOC(dest, len(self.slots), self.tag_id))
for index, slot in enumerate(self.slots):
value = slot.generate_code(code)
code.add_instruction(SET_SLOT(dest, index, value))
return dest
class LocalVariable(ASTNode):
def __init__(self, name, expr):
self.name = name
self.expr = expr
def sexpr(self):
return ('local', self.name, self.expr.sexpr())
def resolve_free_vars(self, parent):
self.expr = self.expr.resolve_free_vars(parent)
self.local_ref = parent.add_local(self.name)
return self
def resolve_block_refs(self, parent):
self.expr = self.expr.resolve_block_refs(parent)
return self
def walk(self, visitor):
visitor(self)
self.expr.walk(visitor)
def generate_code(self, code):
local = self.local_ref.generate_code(code)
expr = self.expr.generate_code(code)
code.add_instruction(ALIAS(local, expr))
return local
class Method(ASTNode):
def __init__(self, symbol, args, expr):
self.symbol = symbol
self.locals = []
self.args = args
self.vars = {}
for index, arg in enumerate(args, 1):
ref = LocalGet(index, arg)
self.locals.append(ref)
self.vars[arg] = ref
self.expr = expr
def sexpr(self):
return ('method', (self.symbol,) + tuple(self.args), self.expr.sexpr())
def add_local(self, name):
ref = LocalGet(len(self.locals) + 1, name)
self.locals.append(ref)
return ref
def find_method(self):
return self
def find_block(self):
return self.parent.find_block()
def resolve_free_vars(self, parent):
self.parent = parent
self.expr = self.expr.resolve_free_vars(self)
return self
def resolve_block_refs(self, parent):
self.expr = self.expr.resolve_block_refs(self)
return self
def lookup_var(self, symbol):
if symbol in self.vars:
return self.vars[symbol]
return self.parent.lookup_var(symbol)
def lookup_receiver(self, symbol, add_blocks_needed=True):
return self.parent.lookup_receiver(symbol, add_blocks_needed)
def get_block_ref(self, block):
return self.parent.get_block_ref(block)
def walk(self, visitor):
visitor(self)
self.expr.walk(visitor)
def generate_code(self, program):
code = MethodCodeBuilder(len(self.args), len(self.locals) - len(self.args), program)
code.add_instruction(RETURN(self.expr.generate_code(code)))
return code.get_code()
class Sequence(ASTNode):
def __init__(self, statements):
self.statements = statements
def sexpr(self):
return ('begin',) + tuple(statement.sexpr() for statement in self.statements)
def add_local(self, name):
ref = self.method.add_local(name)
self.vars[name] = ref
return ref
def find_method(self):
return self.parent.find_method()
def find_block(self):
return self.parent.find_block()
def resolve_free_vars(self, parent):
self.parent = parent
self.method = parent.find_method()
self.vars = {}
for i, statement in enumerate(self.statements):
self.statements[i] = statement.resolve_free_vars(self)
return self
def resolve_block_refs(self, parent):
for i, statement in enumerate(self.statements):
self.statements[i] = statement.resolve_block_refs(self)
return self
def lookup_var(self, symbol):
if symbol in self.vars:
return self.vars[symbol]
return self.parent.lookup_var(symbol)
def lookup_receiver(self, symbol, add_blocks_needed=True):
return self.parent.lookup_receiver(symbol, add_blocks_needed)
def get_block_ref(self, block):
return self.parent.get_block_ref(block)
def walk(self, visitor):
visitor(self)
for statement in self.statements:
statement.walk(visitor)
def generate_code(self, code):
for statement in self.statements[:-1]:
statement.generate_code(code)
return self.statements[-1].generate_code(code)
class Array(ASTNode):
def __init__(self, elems):
self.elems = elems
def sexpr(self):
return ('array',) + tuple(elem.sexpr() for elem in self.elems)
def resolve_free_vars(self, parent):
for i, elem in enumerate(self.elems):
self.elems[i] = elem.resolve_free_vars(parent)
return self
def resolve_block_refs(self, parent):
for i, elem in enumerate(self.elems):
self.elems[i] = elem.resolve_block_refs(parent)
return self
def walk(self, visitor):
visitor(self)
for elem in self.elems:
elem.walk(visitor)
def generate_code(self, code):
dest = code.add_temp()
code.add_instruction(ARRAY(dest, len(self.elems), code.get_tag('Array')))
for index, elem in enumerate(self.elems):
value = elem.generate_code(code)
code.add_instruction(SET_ELEM(dest, index, value))
return dest
class TerminalNode(ASTNode):
def resolve_free_vars(self, parent):
return self
def resolve_block_refs(self, parent):
return self
def walk(self, visitor):
visitor(self)
class Constant(TerminalNode):
def __init__(self, constant_name):
self.constant_name = constant_name
def sexpr(self):
return self.constant_name
def generate_code(self, code):
dest = code.add_temp()
code.add_instruction(LOAD_VALUE(dest, code.get_tag('Constant'), code.get_constant(self.constant_name)))
return dest
class ConstantBlock(TerminalNode):
def __init__(self, block):
self.block = block
def sexpr(self):
if hasattr(self.block, 'constant_id'):
return ('constant', self.block.constant_id)
else:
return '<constant>'
def generate_code(self, code):
dest = code.add_temp()
code.add_instruction(LOAD_VALUE(dest, code.get_tag('Constant'), self.block.constant_id))
return dest
class BuiltInBlock(object):
is_constant = True
constant_ref = Constant('BuiltIn')
def __init__(self, methods):
self.symbols = {}
for method in methods:
if method.tag_name == 'BuiltIn':
self.symbols[method.symbol] = method
def lookup_var(self, symbol):
pass
def lookup_receiver(self, symbol, add_blocks_needed=True):
if symbol in self.symbols:
return self
def get_block_ref(self, block):
pass
class LocalGet(TerminalNode):
def __init__(self, index, name):
self.local_index = index
self.name = name
def sexpr(self):
return self.name
def generate_code(self, code):
return self.local_index
class SlotGet(ASTNode):
def __init__(self, obj_expr, slot_index, mutable):
self.obj_expr = obj_expr
self.slot_index = slot_index
self.mutable = mutable
def sexpr(self):
return ('slot-get', self.obj_expr.sexpr(), self.slot_index)
def setter(self, set_expr):
return SlotSet(self.obj_expr, self.slot_index, set_expr)
def resolve_free_vars(self, parent):
self.obj_expr = self.obj_expr.resolve_free_vars(parent)
return self
def resolve_block_refs(self, parent):
self.obj_expr = self.obj_expr.resolve_block_refs(parent)
return self
def walk(self, visitor):
visitor(self)
self.obj_expr.walk(visitor)
def generate_code(self, code):
object = self.obj_expr.generate_code(code)
dest = code.add_temp()
code.add_instruction(GET_SLOT(dest, | |
'-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
time_parts = input_data.split('.')
if len(time_parts) > 1:
micro_seconds = int(float('0.' + time_parts[1]) * 1000000)
input_data = '%s.%s' % (
time_parts[0], "{}".format(micro_seconds).rjust(6, "0"), )
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node=None, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(
hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node=None, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_validate_simple_patterns(self, patterns, target):
# pat is a list of lists of strings/patterns.
# The target value must match at least one of the patterns
# in order for the test to succeed.
found1 = True
for patterns1 in patterns:
found2 = False
for patterns2 in patterns1:
mo = re_.search(patterns2, target)
if mo is not None and len(mo.group(0)) == len(target):
found2 = True
break
if not found2:
found1 = False
break
return found1
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_check_cardinality_(
self, value, input_name,
min_occurs=0, max_occurs=1, required=None):
if value is None:
length = 0
elif isinstance(value, list):
length = len(value)
else:
length = 1
if required is not None :
if required and length < 1:
self.gds_collector_.add_message(
"Required value {}{} is missing".format(
input_name, self.gds_get_node_lineno_()))
if length < min_occurs:
self.gds_collector_.add_message(
"Number of values for {}{} is below "
"the minimum allowed, "
"expected at least {}, found {}".format(
input_name, self.gds_get_node_lineno_(),
min_occurs, length))
elif length > max_occurs:
self.gds_collector_.add_message(
"Number of values for {}{} is above "
"the maximum allowed, "
"expected at most {}, found {}".format(
input_name, self.gds_get_node_lineno_(),
max_occurs, length))
def gds_validate_builtin_ST_(
self, validator, value, input_name,
min_occurs=None, max_occurs=None, required=None):
if value is not None:
try:
validator(value, input_name=input_name)
except GDSParseError as parse_error:
self.gds_collector_.add_message(str(parse_error))
def gds_validate_defined_ST_(
self, validator, value, input_name,
min_occurs=None, max_occurs=None, required=None):
if value is not None:
try:
validator(value)
except GDSParseError as parse_error:
self.gds_collector_.add_message(str(parse_error))
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
# provide default value in case option --disable-xml is used.
content = ""
content = etree_.tostring(node, encoding="unicode")
return content
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.items()))
@staticmethod
def gds_encode(instring):
if sys.version_info.major == 2:
if ExternalEncoding:
encoding = ExternalEncoding
else:
encoding = 'utf-8'
return instring.encode(encoding)
else:
return instring
@staticmethod
def convert_unicode(instring):
if isinstance(instring, str):
result = quote_xml(instring)
elif sys.version_info.major == 2 and isinstance(instring, unicode):
result = quote_xml(instring).encode('utf8')
else:
result = GeneratedsSuper.gds_encode(str(instring))
return result
def __eq__(self, other):
def excl_select_objs_(obj):
return (obj[0] != 'parent_object_' and
obj[0] != 'gds_collector_')
if type(self) != type(other):
return False
return all(x == y for x, y in zip_longest(
filter(excl_select_objs_, self.__dict__.items()),
filter(excl_select_objs_, other.__dict__.items())))
def __ne__(self, other):
return not self.__eq__(other)
# Django ETL transform hooks.
def gds_djo_etl_transform(self):
pass
def gds_djo_etl_transform_db_obj(self, dbobj):
pass
# SQLAlchemy ETL transform hooks.
def gds_sqa_etl_transform(self):
return 0, None
def gds_sqa_etl_transform_db_obj(self, dbobj):
pass
def gds_get_node_lineno_(self):
if (hasattr(self, "gds_elementtree_node_") and
self.gds_elementtree_node_ is not None):
return ' near line {}'.format(
self.gds_elementtree_node_.sourceline)
else:
return ""
def getSubclassFromModule_(module, class_):
'''Get the subclass of a class from a specific module.'''
name = class_.__name__ + 'Sub'
if hasattr(module, name):
return getattr(module, name)
else:
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = ''
# Set this to false in order to deactivate during export, the use of
# name space prefixes captured from the input document.
UseCapturedNS_ = True
CapturedNsmap_ = {}
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
CDATA_pattern_ = re_.compile(r"<!\[CDATA\[.*?\]\]>", re_.DOTALL)
# Change this to redirect the generated superclass module to use a
# specific subclass module.
CurrentSubclassModule_ = None
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
"Escape markup chars, but do not modify CDATA sections."
if not inStr:
return ''
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s2 = ''
pos = 0
matchobjects = CDATA_pattern_.finditer(s1)
for mo in matchobjects:
s3 = s1[pos:mo.start()]
s2 += quote_xml_aux(s3)
s2 += s1[mo.start():mo.end()]
pos = mo.end()
s3 = s1[pos:]
s2 += quote_xml_aux(s3)
return s2
def quote_xml_aux(inStr):
s1 = inStr.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
if prefix == 'xml':
namespace = 'http://www.w3.org/XML/1998/namespace'
else:
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
def encode_str_2_3(instr):
return instr
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if node is not None:
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants | |
a list of parameters after splitting <line> at space boundaries."""
return shlex.split(line) # Use shlex so that quoted substrings are preserved intact
def display_single_line(self, object_type):
"""
Query for objects named object_type, and display the most recent one on a single, refreshed line.
The query is reissued every N seconds, where N is the value of the 'freq' filter.
This runs in an endless loop until it's interrupted with a KeyboardInterrupt (ctrl-C).
@param object_name: The name of the object type to query, either DebugMessage or DebugMessageExchange.
"""
column_labels = self.column_labels(object_type)
formatter = None
try:
while True:
response = MessageViewer.display_db_objects(object_type, filters=self._filters)
if type(response) == dict and len(response['results']) > 0:
data_rows, col_widths = self.data_rows_for_obj_list(response['results'],
column_labels,
formatter=formatter)
for data_row in data_rows:
if not formatter:
formatter = self.left_aligned_formatter(column_labels, col_widths)
# Write with no line feed so that the next line will overwrite it.
sys.stdout.write('\r'.rjust(80))
sys.stdout.write('{0} \r'.format(formatter.compose(data_row,
include_newline=False,
wrap_text=False)))
sys.stdout.flush()
time.sleep(float(self._filters['freq']))
elif type(response) == str:
sys.stdout.write(str)
except KeyboardInterrupt:
print('Execution interrupted')
def print_stats(self, stat_type, response):
"""
Fill out a stats table, format it and print it.
'response' is either a status/error message or a dictionary containing the json-style RPC response.
The response's 'stats' element contains statistics by agent -- a dictionary of dictionaries of numbers.
@param stat_type: Either 'Topic' or 'Receiving Agent'.
@param response: The RPC response.
"""
if type(response) == str:
# Got back an error/status message, so just print it
print(response)
elif response['message_count'] == 0:
print('No messages found for session')
else:
response_items = response['stats']
if len(response_items) > 0:
# Gather the data into rows and columns.
# Collect a set of all column titles (the original data may have been sparse).
all_columns_set = set()
for row, inner_dict in response_items.items():
for column in inner_dict.keys():
all_columns_set.add(column)
# Alpha-sort row and column labels.
row_labels = sorted(response_items.keys())
column_labels = sorted(list(all_columns_set))
header_row = [stat_type] # Leftmost column
header_row.extend([(c or '(No Sender Name)') for c in column_labels])
col_widths = {c: len(c) for c in header_row}
# Write data rows
data_rows = []
for r in row_labels:
if not self.exclude_by_agent(r):
data_row = [r or '(No {})'.format(stat_type)] # Leftmost column
col_widths[stat_type] = max(col_widths[stat_type], len(data_row[0]))
for label in column_labels:
c = label or '(No Sender Name)'
cell_text = str(response_items[r][c] if c in response_items[r] else '-')
data_row.append(cell_text)
col_widths[c] = max(col_widths[c], len(cell_text))
data_rows.append(data_row)
column_formats = [{'width': self.col_width(col_widths[header_row[0]]), 'margin': 2, 'alignment': LEFT}]
for label in column_labels:
header = label or '(No Sender Name)'
column_formats.append({'width': self.col_width(col_widths[header]), 'margin': 2, 'alignment': RIGHT})
formatter = self.formatter(header_row, column_formats)
for row in data_rows:
sys.stdout.write(formatter.compose(row))
else:
print('No stats in filtered output, consider relaxing filters')
def print_response_dict(self, object_type, response):
"""
Fill out a table with one row per response element, format it and print it.
@param object_type: The name of the type of data to be displayed, which defines the column layout.
@param response: An RPC response, usually a dictionary containing a list of json-formatted objects.
"""
if type(response) == str:
# Got back an error/status message, so just print it
print(response)
else:
response_list = response['results']
if len(response_list) > 0:
column_labels = self.column_labels(object_type)
data_rows, col_widths = self.data_rows_for_obj_list(response_list, column_labels)
formatter = self.left_aligned_formatter(column_labels, col_widths)
for row in data_rows:
sys.stdout.write(formatter.compose(row))
else:
print('No data in filtered output, consider relaxing filters')
def data_rows_for_obj_list(self, obj_list, column_labels, formatter=None):
"""
Given a list of dictionaries, format the data into a list of data rows.
@param obj_list: A list of objects, each of which is a dictionary of strings.
@param column_labels: Labels to print as column headers, and also to index into the obj_list dictionaries.
@param formatter: A TextFormatter instance. If null, it's not too late to adjust column widths.
@return: A tuple of two lists: the data rows and their column widths.
"""
col_widths = {c: len(c) for c in column_labels}
data_rows = []
for obj_dict in obj_list:
if not self.exclude_by_agent(obj_dict):
data_row = []
for c in column_labels:
cell_text = str(obj_dict[c] or '-')
data_row.append(cell_text)
if not formatter:
# Columns haven't been formatted yet, so we can still adjust their widths.
col_widths[c] = max(col_widths[c], len(cell_text))
data_rows.append(data_row)
return data_rows, col_widths
def left_aligned_formatter(self, column_labels, col_widths):
"""
Create and return a TextFormatter for the indicated columns. Print the header row.
@param column_labels: A list of column labels/headers.
@param col_widths: A dictionary of column widths, indexed by column label/header names.
@return: The TextFormatter instance.
"""
column_formats = []
for label in column_labels:
column_formats.append({'width': self.col_width(col_widths[label]), 'margin': 2, 'alignment': LEFT})
return self.formatter(column_labels, column_formats)
def column_labels(self, object_type):
"""Return a list of column labels to display based on the object type and verbosity."""
if self._verbosity == 'high':
return ALL_COLUMNS[object_type]
else:
return INTERESTING_COLUMNS[object_type]
@staticmethod
def col_width(cwidth):
"""Return an adjusted column width that's within MIN and MAX parameters."""
return max(min(cwidth, MAX_COL_WIDTH), MIN_COL_WIDTH)
@staticmethod
def formatter(column_labels, column_formats):
"""
Create and return a TextFormatter for the indicated columns. Print the header row.
@param column_labels: A list of column labels/headers.
@param column_formats: A list of column formats defining widths, alignments, etc.
@return: The TextFormatter instance.
"""
formatter = TextFormatter(column_formats)
sys.stdout.write(formatter.compose(column_labels))
return formatter
def exclude_by_agent(self, response):
"""Return whether the dictionary contains a 'sender' or 'recipient' property that should be excluded."""
if self._verbosity == 'high':
return False # At high verbosity, report 'em all
if 'sender' in response and response['sender'] in EXCLUDED_SENDERS:
return True
if 'recipient' in response and response['recipient'] in EXCLUDED_RECIPIENTS:
return True
return False
def initialize_monitor_socket(self):
"""Initialize and return the monitor socket used by the MessageDebuggerAgent to stream messages."""
monitor_path = os.path.expandvars('$VOLTTRON_HOME/run/messageviewer')
monitor_socket = zmq.Context().socket(zmq.SUB)
monitor_socket_address = 'ipc://{}'.format('@' if sys.platform.startswith('linux') else '') + monitor_path
monitor_socket.bind(monitor_socket_address)
monitor_socket.setsockopt_string(zmq.SUBSCRIBE, "")
_log.debug('Subscribing to monitor socket {}'.format(monitor_socket_address))
return monitor_socket
class MessageViewer(object):
"""
View MessageDebugger messages by issuing RPC calls to MessageDebuggerAgent.
MessageViewer is almost entirely stateless. It consists of a set of class methods,
each of which issues a single RPC call. The only state is the connection to the
MessageDebuggerAgent. Once it has been established, it's cached and re-used throughout
the lifetime of the process.
MessageViewer methods can be called directly if desired.
MessageViewerCmd provides an interactive UI for them.
Sample MessageViewer commands:
MessageViewer.display_message_stream()
MessageViewer.enable_message_streaming(filters={})
MessageViewer.disable_message_streaming()
MessageViewer.display_db_objects('DebugMessage', filters={'freq': '1'})
MessageViewer.display_db_objects('DebugMessageExchange')
MessageViewer.display_db_objects('DebugMessageExchange', filters={'freq': '1'})
MessageViewer.display_db_objects('DebugMessageExchange', filters={'sender': 'test.agent',
'recipient': 'platform.driver'})
MessageViewer.display_db_objects('DebugMessageExchange', filters={'device': 'chargepoint1',
'point': 'stationMacAddr'})
MessageViewer.display_db_objects('DebugMessageExchange', filters={'freq': '1',
'results_only': '1',
'device': 'chargepoint1',
'sender': 'test.agent'})
MessageViewer.display_db_objects('DebugMessage', filters={'session_id': '1'})
MessageViewer.display_db_objects('DebugMessage', filters={'topic': 'heartbeat'})
MessageViewer.display_db_objects('DebugMessage', filters={'starttime': '2017-03-06 15:57:00',
'endtime': '2017-03-06 15:57:50'})
MessageViewer.display_db_objects('DebugMessageExchange', filters={'session_id': '1'})
MessageViewer.display_db_objects('DebugSession')
MessageViewer.session_details_by_agent(38)
MessageViewer.session_details_by_topic(38)
MessageViewer.message_exchange_details(8950737996372725552.272119477)
MessageViewer.set_verbosity('high')
MessageViewer.enable_message_debugging()
MessageViewer.disable_message_debugging()
MessageViewer.delete_debugging_session(22)
MessageViewer.delete_debugging_db()
"""
@classmethod
def display_message_stream(cls):
"""Display the stream of DebugMessage strings as they arrive on the monitor socket."""
monitor_path = os.path.expandvars('$VOLTTRON_HOME/run/messageviewer')
monitor_socket = zmq.Context().socket(zmq.SUB)
monitor_socket_address = 'ipc://{}'.format('@' if sys.platform.startswith('linux') else '') + monitor_path
monitor_socket.bind(monitor_socket_address)
monitor_socket.setsockopt_string(zmq.SUBSCRIBE, "")
_log.debug('Subscribing to monitor socket {}'.format(monitor_socket_address))
try:
while True:
json_string = monitor_socket.recv()
print(jsonapi.loads(json_string))
except KeyboardInterrupt:
_log.debug('Execution interrupted')
@classmethod
def start_streaming(cls, filters=None):
"""Start publishing DebugMessage strings to the monitor socket."""
return cls.issue_debugger_request('enable_message_streaming', filters=filters)
@classmethod
def stop_streaming(cls):
"""Stop publishing DebugMessage strings to the monitor socket."""
return cls.issue_debugger_request('disable_message_streaming')
@classmethod
def display_db_objects(cls, db_object_name, filters=None):
return cls.issue_debugger_request('execute_db_query', db_object_name, filters=filters)
@classmethod
def session_details_by_agent(cls, session_id):
return cls.issue_debugger_request('session_details_by_agent', session_id)
@classmethod
def session_details_by_topic(cls, session_id):
return cls.issue_debugger_request('session_details_by_topic', session_id)
@classmethod
def message_exchange_details(cls, message_id):
return cls.issue_debugger_request('message_exchange_details', message_id)
@classmethod
def set_verbosity(cls, verbosity_level):
return cls.issue_debugger_request('set_verbosity', verbosity_level)
@classmethod
def set_filters(cls, filters):
return cls.issue_debugger_request('set_filters', filters)
@classmethod
def enable_message_debugging(cls):
return cls.issue_debugger_request('enable_message_debugging')
@classmethod
def disable_message_debugging(cls):
return cls.issue_debugger_request('disable_message_debugging')
@classmethod
def delete_debugging_session(cls, session_id):
return cls.issue_debugger_request('delete_debugging_session', session_id)
@classmethod
def delete_debugging_db(cls):
return cls.issue_debugger_request('delete_debugging_db')
@classmethod
def issue_debugger_request(cls, method_name, *args, **kwargs):
_log.debug('Sending {0} to message debugger'.format(method_name))
global debugger_connection
if not debugger_connection:
debugger_connection = ViewerConnection()
return debugger_connection.call(method_name, *args, **kwargs)
class ViewerConnection(object):
"""
This is a copy of volttron.platform.control.ControlConnection.
ControlConnection could not be used directly because it has a hard-coded
identity that would conflict if it were re-used by MessageViewer.
This connection/agent authenticates using the platform's credentials.
"""
def __init__(self):
self.address = get_address()
self.peer = 'messagedebugger'
self._server = Agent(identity='message.viewer',
address=self.address,
publickey=KeyStore().public,
secretkey=KeyStore().secret,
serverkey=KnownHostsStore().serverkey(self.address),
enable_store=False,
enable_channel=True)
self._greenlet = None
@property
def server(self):
if self._greenlet is None:
event = gevent.event.Event()
self._greenlet = gevent.spawn(self._server.core.run, event)
event.wait()
return self._server
def call(self, method, *args, **kwargs):
return self.server.vip.rpc.call(
self.peer, method, *args, **kwargs).get()
def call_no_get(self, method, *args, **kwargs):
return self.server.vip.rpc.call(
self.peer, method, *args, **kwargs)
def | |
"""
Procurement Model
Copyright: 2009-2021 (c) Sahana Software Foundation
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("PROCProcurementPlansModel",
"PROCPurchaseOrdersModel",
"proc_rheader"
)
from gluon import *
from gluon.storage import Storage
from ..core import *
# =============================================================================
class PROCProcurementPlansModel(DataModel):
"""
Procurement Plans
@ToDo: Link Table to Projects
"""
names = ("proc_plan",
"proc_plan_item"
)
def model(self):
T = current.T
db = current.db
auth = current.auth
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
messages = current.messages
configure = self.configure
settings = current.deployment_settings
SITE_LABEL = settings.get_org_site_label()
# =====================================================================
# Planned Procurements
#
proc_shipping_opts = {0: messages["NONE"],
1: T("Air"),
2: T("Rail"),
3: T("Road"),
4: T("Sea")
}
tablename = "proc_plan"
define_table(tablename,
self.super_link("site_id", "org_site",
label = SITE_LABEL,
default = auth.user.site_id if auth.is_logged_in() else None,
readable = True,
writable = True,
empty = False,
# Comment these to use a Dropdown & not an Autocomplete
#widget = S3SiteAutocompleteWidget(),
#comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Inventory"),
# messages.AUTOCOMPLETE_HELP)),
represent = self.org_site_represent,
),
s3_date("order_date",
label = T("Order Date")
),
s3_date("eta",
label = T("Date Expected"),
),
# @ToDo: Do we want more than 1 supplier per Plan?
# @ToDo: Filter to orgs of type 'supplier'
self.org_organisation_id(label = T("Supplier")),
Field("shipping", "integer",
requires = IS_EMPTY_OR(IS_IN_SET(proc_shipping_opts)),
represent = represent_option(proc_shipping_opts),
label = T("Shipping Method"),
default = 0,
),
# @ToDo: Add estimated shipping costs
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Create Procurement Plan"),
title_display = T("Procurement Plan Details"),
title_list = T("Procurement Plans"),
title_update = T("Edit Procurement Plan"),
label_list_button = T("List Procurement Plans"),
label_delete_button = T("Delete Procurement Plan"),
msg_record_created = T("Procurement Plan added"),
msg_record_modified = T("Procurement Plan updated"),
msg_record_deleted = T("Procurement Plan deleted"),
msg_list_empty = T("No Procurement Plans currently registered"))
# ---------------------------------------------------------------------
# Redirect to the Items tabs after creation
plan_item_url = URL(f="plan", args=["[id]", "plan_item"])
configure(tablename,
# @ToDo: Move these to controller r.interactive?
create_next = plan_item_url,
update_next = plan_item_url,
)
proc_plan_represent = self.proc_plan_represent
plan_id = S3ReusableField("plan_id", "reference %s" % tablename,
sortby = "date",
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "proc_plan.id",
proc_plan_represent,
orderby="proc_plan.date",
sort=True)),
represent = proc_plan_represent,
label = T("Procurement Plan"),
ondelete = "CASCADE",
)
# Items as a component of Plans
self.add_components(tablename,
proc_plan_item = "plan_id",
)
# =====================================================================
# Procurement Plan Items
#
tablename = "proc_plan_item"
define_table(tablename,
plan_id(),
self.supply_item_entity_id(),
self.supply_item_id(),
self.supply_item_pack_id(),
Field("quantity", "double", notnull = True,
label = T("Quantity"),
),
# @ToDo: Move this into a Currency Widget
# for the pack_value field
s3_currency(readable=False,
writable=False
),
Field("pack_value", "double",
label = T("Value per Pack"),
readable = False,
writable = False,
),
#Field("pack_quantity",
# "double",
# compute = record_pack_quantity), # defined in supply
#Field.Method("pack_quantity",
# self.supply_item_pack_quantity(tablename=tablename)),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Add Item to Procurement Plan"),
title_display = T("Procurement Plan Item Details"),
title_list = T("Items in Procurement Plan"),
title_update = T("Edit Procurement Plan Item"),
label_list_button = T("List Items in Procurement Plan"),
label_delete_button = T("Remove Item from Procurement Plan"),
msg_record_created = T("Item added to Procurement Plan"),
msg_record_modified = T("Procurement Plan Item updated"),
msg_record_deleted = T("Item removed from Procurement Plan"),
msg_list_empty = T("No Items currently registered in this Procurement Plan"))
# ---------------------------------------------------------------------
# Item Search Method
#
filter_widgets = [
S3TextFilter(["item_id$name",
#"item_id$category_id$name",
#"plan_id$site_id$name"
],
label = T("Search"),
comment = T("Search for an item by text."),
),
S3OptionsFilter("plan_id$organisation_id$name",
label = T("Supplier"),
comment = T("If none are selected, then all are searched."),
cols = 2,
hidden = True,
),
#S3OptionsFilter("plan_id$site_id",
# label = T("Facility"),
# represent = "%(name)s",
# comment = T("If none are selected, then all are searched."),
# cols = 2,
# hidden = True,
# ),
#S3DateFilter("plan_id$order_date",
# label=T("Order Date"),
# hidden = True,
# ),
#S3DateFilter("plan_id$eta",
# label = T("Date Expected"),
# hidden = True,
# ),
]
configure(tablename,
super_entity = "supply_item_entity",
filter_widgets = filter_widgets,
#report_groupby = db.proc_plan.site_id,
report_hide_comments = True,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return None
# -------------------------------------------------------------------------
@staticmethod
def proc_plan_represent(plan_id, row=None):
"""
Represent a Procurement Plan
"""
if row:
table = current.db.proc_plan
elif not plan_id:
return current.messages["NONE"]
else:
db = current.db
table = db.proc_plan
row = db(table.id == plan_id).select(table.site_id,
table.order_date,
limitby = (0, 1),
).first()
try:
return "%s (%s)" % (table.site_id.represent(row.site_id),
table.order_date.represent(row.order_date))
except AttributeError:
# Plan not found
return current.messages.UNKNOWN_OPT
# =============================================================================
class PROCPurchaseOrdersModel(DataModel):
"""
Purchase Orders (PO)
@ToDo: Link to inv_send
@ToDo: Link to req_req
"""
names = ("proc_order",
"proc_order_item"
"proc_order_tag"
)
def model(self):
T = current.T
db = current.db
auth = current.auth
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
#messages = current.messages
configure = self.configure
settings = current.deployment_settings
SITE_LABEL = settings.get_org_site_label()
string_represent = lambda s: s if s else current.messages["NONE"]
purchase_ref = S3ReusableField("purchase_ref",
label = T("%(PO)s Number") % \
{"PO": settings.get_proc_shortname()},
represent = string_represent,
)
# =====================================================================
# Purchase Orders
#
tablename = "proc_order"
define_table(tablename,
purchase_ref(),
self.super_link("site_id", "org_site",
label = SITE_LABEL,
default = auth.user.site_id if auth.is_logged_in() else None,
readable = True,
writable = True,
#empty = False,
# Comment these to use a Dropdown & not an Autocomplete
#widget = S3SiteAutocompleteWidget(),
#comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Inventory"),
# messages.AUTOCOMPLETE_HELP)),
represent = self.org_site_represent,
),
s3_date(default = "now"),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Create Purchase Order"),
title_display = T("Purchase Order Details"),
title_list = T("Purchase Orders"),
title_update = T("Edit Purchase Order"),
label_list_button = T("List Purchase Orders"),
label_delete_button = T("Delete Purchase Order"),
msg_record_created = T("Purchase Order added"),
msg_record_modified = T("Purchase Order updated"),
msg_record_deleted = T("Purchase Order deleted"),
msg_list_empty = T("No Purchase Orders currently registered"))
# ---------------------------------------------------------------------
# Redirect to the Items tabs after creation
order_item_url = URL(f="order", args=["[id]", "order_item"])
configure(tablename,
create_onaccept = self.proc_order_onaccept,
# @ToDo: Move these to controller r.interactive?
create_next = order_item_url,
update_next = order_item_url,
)
proc_order_represent = S3Represent(lookup = tablename,
fields = ["purchase_ref"],
)
order_id = S3ReusableField("order_id", "reference %s" % tablename,
sortby = "date",
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "proc_order.id",
proc_order_represent,
orderby="proc_order.date",
sort=True)),
represent = proc_order_represent,
label = T("Purchase Order"),
ondelete = "CASCADE",
)
# Items as a component of Plans
self.add_components(tablename,
proc_order_item = "order_id",
proc_order_tag = {"name": "tag",
"joinby": "order_id",
},
)
# =====================================================================
# Purchase Order Items
#
tablename = "proc_order_item"
define_table(tablename,
order_id(),
self.supply_item_entity_id(),
self.supply_item_id(),
self.supply_item_pack_id(),
Field("quantity", "double", notnull = True,
label = T("Quantity"),
),
# @ToDo: Move this into a Currency Widget
# for the pack_value field
s3_currency(readable=False,
writable=False
),
Field("pack_value", "double",
label = T("Value per Pack"),
readable = False,
writable = False,
),
#Field("pack_quantity",
# "double",
# compute = record_pack_quantity), # defined in supply
#Field.Method("pack_quantity",
# self.supply_item_pack_quantity(tablename=tablename)),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Add Item to Purchase Order"),
title_display = T("Purchase Order Item Details"),
title_list = T("Items in Purchase Order"),
title_update = T("Edit Purchase Order Item"),
label_list_button = T("List Items in Purchase Order"),
label_delete_button = T("Remove Item from Purchase Order"),
msg_record_created = T("Item added to Purchase Order"),
msg_record_modified = T("Purchase Order Item updated"),
msg_record_deleted = T("Item removed from Purchase Order"),
msg_list_empty = T("No Items currently registered in this Purchase Order"))
# ---------------------------------------------------------------------
# Item Search Method
#
filter_widgets = [
S3TextFilter(["item_id$name",
#"item_id$category_id$name",
#"order_id$site_id$name"
],
label = T("Search"),
comment = T("Search for an item by text."),
),
S3OptionsFilter("order_id$organisation_id$name",
label = T("Supplier"),
comment = T("If | |
#!/usr/bin/env python
# Rock-Paper-Scissors runner for http://www.rpscontest.com/
# Copyright (c) 2011 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
try:
from multiprocessing import Pool, cpu_count
except:
Pool = None
import getopt
import glob
import os
import random
import sys
import time
import traceback
########
VERSION = "1.0.1"
MATCHES = 10
POOL_SIZE = 1
if Pool is not None:
try:
POOL_SIZE = cpu_count()
except:
pass
WINDOWS = False
try:
sys.getwindowsversion()
except:
WINDOWS = False
else:
WINDOWS = True
########
class Bot:
"""Basic bot class to wrap bot functions"""
def __init__(self, name, code=None):
"""
name should be a unique identifier and must be a readable
filename if code is not specified
"""
self.name = name
if code is None:
self.load_code()
else:
self.code = code
self.reset()
def __eq__(self, other):
return self.name == other.name
def get_move(self, input):
"""Get the next move for the bot given input
input must be "R", "P", "S" or ""
"""
if self._code is None:
self.compile_code()
self.scope["input"] = input
exec self._code in self.scope
self.output = self.scope["output"]
return self.output
def compile_code(self):
self._code = compile(self.code, '<string>', 'exec')
def reset(self):
"""Resets bot for another round. This must be called before trying
to pass the bot between workers, or you may see obscure errors from failures
to pickle the bots scope dictionary."""
self.scope = dict()
# this will hold compiled code, but it apparently can't be
# pickled? so we'll have to do it later. XXX check into this
self._code = None
def load_code(self):
"""Load bot code from the file specified by the name attribute"""
f = open(self.name, "r")
self.code = f.read()
f.close()
# used to evaluate a pair of moves
# scoring[bot1_move][bot2_move]
# 1 = bot1 won, -1 = bot2 won, 0 = tie
# TODO: move into ContestResult?
scoring = {
"R": {
"R": 0,
"P": -1,
"S": 1
},
"P": {
"R": 1,
"P": 0,
"S": -1
},
"S": {
"R": -1,
"P": 1,
"S": 0
}
}
class ContestResult:
"""Used to track and report on the status of a contest. Shared values
are reported from the perspective of bot1. For example, score > 0 indicates
that bot1 won by that many points. score < 0 indicates bot 1 lost by that
many points."""
# TODO bot-specific data should be a seperate object. ContestResult
# should track two of these objects and move most of the bot-specific
# data below into them.
def __init__(self, bot1, bot2):
self.bot1 = bot1
self.bot2 = bot2
self.bot1_disqualified = False
self.bot2_disqualified = False
self.finalized = False
self.errors = False
self.error_string = ""
self.wins1 = 0
self.wins2 = 0
self.ties1 = 0
self.ties2 = 0
self.losses1 = 0
self.losses2 = 0
self.score = 0
self.played = 0
self.history1 = []
self.history2 = []
self.score_history = []
self.start_time = None
self.end_time = None
self.run_time = 0.0
self.winner = None
self.loser = None
def start(self):
self.start_time = time.time()
def score_moves(self, move1, move2):
"""This function is called to score and track each pair of moves
from a contest."""
score = 0
try:
score = scoring[move1][move2]
except KeyError:
# TODO disqualify bot and exit contest
if move1 not in "RPS":
score = -1
elif move2 not in "RPS":
score = 1
else:
raise Exception("Can't score %s and %s?!" % (move1, move2))
if score > 0:
self.wins1 += 1
self.losses2 += 1
elif score < 0:
self.losses1 += 1
self.wins2 += 1
else:
self.ties1 += 1
self.ties2 += 1
self.score += score
self.history1.append(move1)
self.history2.append(move2)
self.score_history.append(score)
self.played += 1
return score
def finalize(self, errors=False, error_string=""):
"""Called once a contest is complete to do some final bookkeeping.
This is REQUIRED if multiprocessing features are in use."""
# the bots must be reset before being passed between workers
# see comments under Bot.reset()
self.bot1.reset()
self.bot2.reset()
self.errors = errors
self.error_string = error_string
self.history1 = "".join(self.history1)
self.history2 = "".join(self.history2)
self.end_time = time.time()
self.run_time = self.end_time - self.start_time
if self.wins1 > self.wins2:
self.winner = self.bot1
self.loser = self.bot2
elif self.wins1 < self.wins2:
self.winner = self.bot2
self.loser = self.bot1
self.finalized = True
def __str__(self):
game = "%s vs %s:" % (self.bot1.name, self.bot2.name)
if self.bot1_disqualified:
return "%s bot 1 disqualified" % game
elif self.bot2_disqualified:
return "%s bot 2 disqualified" % game
elif self.finalized:
return "%s score %d, took %.2f seconds" % \
(game, self.score, self.run_time)
else:
return "%s score %d -- not final" % (game, self.score)
class Contest:
"""Contest object handles running a contest between two sets of bots."""
def __init__(self, bot1, bot2, rounds=1000):
self.bot1 = bot1
self.bot2 = bot2
self.rounds = rounds
self.result = ContestResult(bot1, bot2)
# isolate random number generator
r1 = random.random()
r2 = random.random()
base_rng = random.getstate()
random.seed(r1)
self.bot1_rng = random.getstate()
random.seed(r2)
self.bot2_rng = random.getstate()
random.setstate(base_rng)
def run(self):
"""Runs the configured contest and reports a ContestResult"""
self.result.start()
base_rng = random.getstate()
input1 = input2 = output1 = output2 = ""
errors = False
error_string = ""
for i in xrange(self.rounds):
random.setstate(self.bot1_rng)
try:
output1 = self.bot1.get_move(input1)
except KeyboardInterrupt:
raise
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
exc_string = "".join(traceback.format_exception(exc_type,
exc_value, exc_traceback))
error_string = "Error from %s\n%s" % (self.bot1.name,
exc_string)
errors = True
self.result.bot1_disqualified = True
else:
if output1 not in "RPS":
errors = True
self.result.bot1_disqualified = True
error_string = "bot1 did not make a valid move"
self.bot1_rng = random.getstate()
random.setstate(self.bot2_rng)
try:
output2 = self.bot2.get_move(input2)
except KeyboardInterrupt:
raise
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
exc_string = "".join(traceback.format_exception(exc_type,
exc_value, exc_traceback))
error_string = "Error from %s\n%s" % (self.bot1.name,
exc_string)
errors = True
self.result.bot2_disqualified = True
else:
if output2 not in "RPS":
errors = True
self.result.bot2_disqualified = True
error_string = "bot2 did not make a valid move"
self.bot2_rng = random.getstate()
if errors:
break
self.result.score_moves(output1, output2)
input1 = output2
input2 = output1
# TODO add early bail out like official contest
self.result.finalize(errors=errors, error_string=error_string)
random.setstate(base_rng)
return self.result
### Main program logic
def load_bots(names, desc=None, bot_obj=Bot):
"""Initializes several Bot objects given a list of filenames.
desc is an optional output string."""
bots = []
for name in names:
bots.append(bot_obj(name))
if desc is not None:
print "%s:" % (desc),
print "%d bots loaded" % len(bots)
return bots
def match_maker(bots, bots2=None, matches=1, rounds=1000):
"""generates matches between all the bots in bots or in the union of
bots and bots2. matches specifies the number of matches played for each
pairing. a bot will never play itself."""
if not bots:
raise Exception("Must specify bots")
if not bots2:
for i in xrange(len(bots)):
bot1 = bots[i]
for j in xrange(i+1, len(bots)):
bot2 = bots[j]
if bot1 == bot2:
continue
for k in xrange(matches):
# TODO modify contest to allow for multiple matches?
yield Contest(bot1, bot2, rounds)
else:
for bot1 in bots:
for bot2 in bots2:
if bot1 == bot2:
continue
for i in xrange(matches):
# TODO modify contest to specify multiple matches?
yield Contest(bot1, bot2, rounds)
def report_results(bots, results):
"""Summarizes a list of ContestResults"""
# TODO this is ugly, streamline.
botnames = [i.name for i in bots]
matches_played = dict.fromkeys(botnames, 0)
matches_won = dict.fromkeys(botnames, 0)
scores = dict.fromkeys(botnames, 0)
rounds_won = dict.fromkeys(botnames, 0)
rounds_played = dict.fromkeys(botnames, 0)
bot_results = dict(zip(botnames, [list() for i in botnames]))
for result in results:
if result.errors:
print "errors in contest:", result
print result.error_string
matches_played[ result.bot1.name ] += 1
matches_played[ result.bot2.name ] += 1
if result.winner is not None:
matches_won[ result.winner.name ] | |
<filename>midgard/parsers/_parser_sinex.py
"""Basic functionality for parsing Sinex datafiles
Description:
------------
This module contains functions and classes for parsing Sinex datafiles.
References:
-----------
* SINEX Format: https://www.iers.org/IERS/EN/Organization/AnalysisCoordinator/SinexFormat/sinex.html
"""
# Standard library imports
from datetime import datetime, timedelta
import itertools
import pathlib
from typing import cast, Any, Callable, Dict, Iterable, List, NamedTuple, Optional, Tuple, Union
# Third party imports
import numpy as np
import pandas as pd
# Midgard imports
from midgard.dev import log
from midgard.files import files
from midgard.parsers._parser import Parser
from midgard.math.unit import Unit
# A simple structure used to define a Sinex field
class SinexField(NamedTuple):
"""A convenience class for defining the fields in a Sinex block
Args:
name: Name of field.
start_col: Starting column of field (First column is 0)
dtype: String, using numpy notation, defining type of field, use None to ignore field.
converter: Optional, name of converter to apply to field data.
"""
name: str
start_col: int
dtype: Optional[str]
converter: Optional[str] = None
# A simple structure used to define a Sinex block
class SinexBlock(NamedTuple):
"""A convenience class for defining a Sinex block
Args:
marker: Sinex marker denoting the block.
fields: Fields in Sinex block.
parser: Function used to parse the data.
"""
marker: str
fields: Tuple[SinexField, ...]
parser: Callable[[np.array, Tuple[str, ...]], Dict[str, Any]]
#
# FACTORY FUNCTIONS
#
def parsing_factory() -> Callable[..., Dict[str, Any]]:
"""Create a default parsing function for a Sinex block
The default parsing function returns a dictionary containing all fields of
the block as separated arrays. This will be stored in self.data['{marker}']
with the {marker} of the current block.
Returns:
Simple parsing function for one Sinex block.
"""
def parse_func(self: "SinexParser", data: np.array, *params: str) -> Dict[str, Any]:
"""Simple parser for Sinex data
Converts the input data to a dictionary of numpy arrays and returns it
in order to add it to self.data['{marker}']. Ignores any block title
parameters.
Args:
data: Input data, raw data for the block.
params: Tuple of strings with parameters given after the marker at the start of the block.
Returns:
Dictionary with each column in the Sinex file as a numpy array.
"""
return {n: data[n] for n in data.dtype.names}
return parse_func
def parsing_matrix_factory(marker: str, size_marker: str) -> Callable[..., Dict[str, Any]]:
"""Create a parsing function for parsing a matrix within a Sinex block
The default parsing function converts data to a symmetric matrix and stores
it inside `self.data[marker]`.
The size of the matrix is set to equal the number of parameters in the
`size_marker`-block. If that block is not parsed/found. The size is set to
the last given row index. If some zero elements in the matrix are omitted
this might be wrong.
Args:
marker: Marker of Sinex block.
size_marker: Marker of a different Sinex block indicating the size of the matrix.
Returns:
Simple parsing function for one Sinex block.
"""
def parse_matrix_func(self: "SinexParser", data: np.array, lower_upper: str, type: str = "") -> Dict[str, Any]:
"""Parser for {marker} data
Converts the input data to a symmetric matrix and adds it to
self.data['{marker}'].
The NEQ-Matrix Row/Column Number correspond to the Estimated Parameters
Index in the {size_marker} block. Missing elements in the matrix are
assumed to be zero (0); consequently, zero elements may be omitted to
reduce the size of this block.
Args:
data: Input data, raw data for {marker} block.
lower_upper: Either 'L' or 'U', indicating whether the matrix is given in lower or upper form.
type: Information about the type of matrix, optional.
Returns:
Dictionary with symmetric matrix as a numpy array.
"""
# Size of matrix is given by {size_marker}-block, initialize to all zeros
try:
n = len(self._sinex[size_marker])
except KeyError:
n = max(data["row_idx"])
log.warn(f"{size_marker!r}-block was not parsed. Guessing at size of normal equation matrix (n={n}).")
matrix = np.zeros((n, n))
# Loop through each line of values and put it in the correct place in the matrix (cannot simply reshape as
# elements may have been omitted)
values = np.stack((data["value_0"], data["value_1"], data["value_2"]), axis=1)
for row, col, vals in zip(data["row_idx"], data["column_idx"], values):
vals = vals[~np.isnan(vals)]
idx = slice(row - 1, row), slice(col - 1, col - 1 + len(vals))
matrix[idx] = vals
# Add symmetrical elements, depending on whether the matrix being represented in lower or upper form
if lower_upper.upper() == "L":
matrix = np.tril(matrix) + np.tril(matrix, k=-1).T
elif lower_upper.upper() == "U":
matrix = np.triu(matrix) + np.triu(matrix, k=1).T
else:
log.warn(f"'L' or 'U' not specified for {marker}. Trying to create a symmetric matrix anyway.")
matrix = matrix + matrix.T - np.diag(np.diag(matrix))
return {"matrix": matrix, "type": type}
# Add information to doc-string
if parse_matrix_func.__doc__:
parse_matrix_func.__doc__ = parse_matrix_func.__doc__.format(marker=marker, size_marker=size_marker)
return parse_matrix_func
#
# SINEXPARSER CLASS
#
class SinexParser(Parser):
"""An abstract base class that has basic methods for parsing a Sinex file
This class provides functionality for parsing a sinex file with chained
groups of information. You should inherit from this one, and at least
specify which Sinex blocks you are interested in by implementing
`setup_parser`, as well as implement methods that parse each block if
needed.
"""
_TECH = {"C": "comb", "D": "doris", "L": "slr", "M": "llr", "P": "gnss", "R": "vlbi"}
def __init__(
self, file_path: Union[str, pathlib.Path], encoding: Optional[str] = None, header: bool = True
) -> None:
"""Set up the basic information needed by the parser
Add a self._sinex dictionary for the raw Sinex data and read which
blocks to read from self.setup_parser().
Args:
file_path: Path to file that will be read.
encoding: Encoding of file that will be read.
header: Whether to parse the header.
"""
super().__init__(file_path, encoding=encoding)
self._header = header
self._sinex: Dict[str, Any] = dict()
self.sinex_blocks = cast(Iterable[SinexBlock], self.setup_parser())
def setup_parser(self) -> Any:
"""Set up information needed for the parser
Each individual Sinex-parser should at least implement this method.
If the order the blocks are parsed is not important, the information
should be returned as a set for optimal performance. If the parsing
order is important, a tuple of SinexBlock-objects may be returned
instead.
Returns:
Iterable of blocks in the Sinex file that should be parsed.
"""
raise NotImplementedError
def read_data(self) -> None:
"""Read data from a Sinex file and parse the contents
First the whole Sinex file is read and the requested blocks are stored
in self._sinex. After the file has been read, a parser is called on
each block so that self.data is properly populated.
"""
# Read raw sinex data to self._sinex from file
with files.open(self.file_path, mode="rb") as fid:
if self._header:
self.parse_header_line(next(fid)) # Header must be first line
self.parse_blocks(fid)
# Apply parsers to raw sinex data, the information returned by parsers is stored in self.data
for sinex_block in self.sinex_blocks:
if sinex_block.parser and sinex_block.marker in self._sinex:
params = self._sinex.get("__params__", dict()).get(sinex_block.marker, ())
data = sinex_block.parser(self._sinex.get(sinex_block.marker), *params)
if data is not None:
self.data[sinex_block.marker] = data
def parse_blocks(self, fid: Iterable[bytes]) -> None:
"""Parse contents of Sinex blocks
Contents of Sinex blocks are stored as separate numpy-arrays in
self._sinex
Args:
fid: Pointer to file being read.
"""
# Get set of interesting Sinex blocks, index them by marker
sinex_blocks = {b.marker: b for b in self.sinex_blocks}
# Iterate until all interesting Sinex blocks have been found or whole file is read
try:
while sinex_blocks:
# Find next block (line that starts with +)
fid = itertools.dropwhile(lambda ln: not ln.startswith(b"+"), fid)
block_header = next(fid).decode(self.file_encoding or "utf-8")
marker, *params = block_header[1:].strip().split()
if marker not in sinex_blocks:
continue
# Find lines in block, remove comments and parse lines, store parameters for later
lines = [
ln for ln in itertools.takewhile(lambda ln: not ln.startswith(b"-"), fid) if ln.startswith(b" ")
]
self._sinex[marker] = self.parse_lines(lines, sinex_blocks[marker].fields)
if params:
self._sinex.setdefault("__params__", dict())[marker] = params
del sinex_blocks[marker]
except StopIteration: # File ended without reading all sinex_blocks
missing = ", ".join(sinex_blocks)
log.warn(f"SinexParser {self.parser_name!r} did not find Sinex blocks {missing} in file {self.file_path}")
def parse_lines(self, lines: List[bytes], fields: Tuple[SinexField, ...]) -> np.array:
"""Parse lines in a Sinex file
Args:
lines: Lines to parse.
fields: Definition of sinex fields in lines.
Returns:
Data contained in lines.
"""
# Set up for np.genfromtxt to parse the Sinex block
delimiter = np.diff(np.array([0] + [f.start_col for f in fields] + [81])) # Length | |
<reponame>JamesATrevino/cassandra-dtest<filename>upgrade_tests/paging_test.py
import itertools
import time
import uuid
import pytest
import logging
from cassandra import ConsistencyLevel as CL
from cassandra import InvalidRequest
from cassandra.query import SimpleStatement, dict_factory, named_tuple_factory
from ccmlib.common import LogPatternToVersion
from dtest import RUN_STATIC_UPGRADE_MATRIX, run_scenarios, MAJOR_VERSION_4
from tools.assertions import (assert_read_timeout_or_failure, assert_lists_equal_ignoring_order)
from tools.data import rows_to_list
from tools.datahelp import create_rows, flatten_into_set, parse_data_into_dicts
from tools.misc import add_skip
from tools.paging import PageAssertionMixin, PageFetcher
from .upgrade_base import UpgradeTester
from .upgrade_manifest import build_upgrade_pairs
since = pytest.mark.since
logger = logging.getLogger(__name__)
class BasePagingTester(UpgradeTester):
def prepare(self, *args, **kwargs):
start_on, upgrade_to = self.UPGRADE_PATH.starting_meta, self.UPGRADE_PATH.upgrade_meta
if 'protocol_version' not in list(kwargs.keys()):
# Due to CASSANDRA-10880, we need to use proto v3 (instead of v4) when it's a mixed cluster of 2.2.x and 3.0.x nodes.
if start_on.family in ('2.1.x', '2.2.x') and upgrade_to.family == '3.0.x':
logger.debug("Protocol version set to v3, due to 2.1.x/2.2.x and 3.0.x mixed version cluster.")
kwargs['protocol_version'] = 3
cursor = UpgradeTester.prepare(self, *args, row_factory=kwargs.pop('row_factory', dict_factory), **kwargs)
return cursor
@pytest.mark.upgrade_test
class TestPagingSize(BasePagingTester, PageAssertionMixin):
"""
Basic tests relating to page size (relative to results set)
and validation of page size setting.
"""
def test_with_no_results(self):
"""
No errors when a page is requested and query has no results.
"""
cursor = self.prepare()
cursor.execute("CREATE TABLE paging_test ( id int PRIMARY KEY, value text )")
for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
# run a query that has no results and make sure it's exhausted
future = cursor.execute_async(
SimpleStatement("select * from paging_test", fetch_size=100, consistency_level=CL.ALL)
)
pf = PageFetcher(future)
pf.request_all()
assert [] == pf.all_data()
assert not pf.has_more_pages
def test_with_less_results_than_page_size(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE paging_test ( id int PRIMARY KEY, value text )")
for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
cursor.execute("TRUNCATE paging_test")
data = """
|id| value |
|1 |testing |
|2 |and more testing|
|3 |and more testing|
|4 |and more testing|
|5 |and more testing|
"""
expected_data = create_rows(data, cursor, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': str})
future = cursor.execute_async(
SimpleStatement("select * from paging_test", fetch_size=100, consistency_level=CL.ALL)
)
pf = PageFetcher(future)
pf.request_all()
assert not pf.has_more_pages
assert len(expected_data) == len(pf.all_data())
def test_with_more_results_than_page_size(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE paging_test ( id int PRIMARY KEY, value text )")
for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
cursor.execute("TRUNCATE paging_test")
data = """
|id| value |
|1 |testing |
|2 |and more testing|
|3 |and more testing|
|4 |and more testing|
|5 |and more testing|
|6 |testing |
|7 |and more testing|
|8 |and more testing|
|9 |and more testing|
"""
expected_data = create_rows(data, cursor, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': str})
future = cursor.execute_async(
SimpleStatement("select * from paging_test", fetch_size=5, consistency_level=CL.ALL)
)
pf = PageFetcher(future).request_all()
assert pf.pagecount() == 2
assert pf.num_results_all() == [5, 4]
# make sure expected and actual have same data elements (ignoring order)
assert_lists_equal_ignoring_order(pf.all_data(), expected_data, sort_key='value')
def test_with_equal_results_to_page_size(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE paging_test ( id int PRIMARY KEY, value text )")
for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
cursor.execute("TRUNCATE paging_test")
data = """
|id| value |
|1 |testing |
|2 |and more testing|
|3 |and more testing|
|4 |and more testing|
|5 |and more testing|
"""
expected_data = create_rows(data, cursor, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': str})
future = cursor.execute_async(
SimpleStatement("select * from paging_test", fetch_size=5, consistency_level=CL.ALL)
)
pf = PageFetcher(future).request_all()
assert pf.num_results_all() == [5]
assert pf.pagecount() == 1
# make sure expected and actual have same data elements (ignoring order)
assert_lists_equal_ignoring_order(pf.all_data(), expected_data, sort_key='value')
def test_undefined_page_size_default(self):
"""
If the page size isn't sent then the default fetch size is used.
"""
cursor = self.prepare()
cursor.execute("CREATE TABLE paging_test ( id uuid PRIMARY KEY, value text )")
def random_txt(text):
return uuid.uuid4()
for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
cursor.execute("TRUNCATE paging_test")
data = """
| id |value |
*5001| [uuid] |testing |
"""
expected_data = create_rows(data, cursor, 'paging_test', cl=CL.ALL, format_funcs={'id': random_txt, 'value': str})
future = cursor.execute_async(
SimpleStatement("select * from paging_test", consistency_level=CL.ALL)
)
pf = PageFetcher(future).request_all()
assert pf.num_results_all(), [5000 == 1]
self.maxDiff = None
# make sure expected and actual have same data elements (ignoring order)
assert_lists_equal_ignoring_order(pf.all_data(), expected_data, sort_key='value')
class TestPagingWithModifiers(BasePagingTester, PageAssertionMixin):
"""
Tests concerned with paging when CQL modifiers (such as order, limit, allow filtering) are used.
"""
def test_with_order_by(self):
""""
Paging over a single partition with ordering should work.
(Spanning multiple partitions won't though, by design. See CASSANDRA-6722).
"""
cursor = self.prepare()
cursor.execute(
"""
CREATE TABLE paging_test (
id int,
value text,
PRIMARY KEY (id, value)
) WITH CLUSTERING ORDER BY (value ASC)
""")
for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
cursor.execute("TRUNCATE paging_test")
data = """
|id|value|
|1 |a |
|1 |b |
|1 |c |
|1 |d |
|1 |e |
|1 |f |
|1 |g |
|1 |h |
|1 |i |
|1 |j |
"""
expected_data = create_rows(data, cursor, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': str})
future = cursor.execute_async(
SimpleStatement("select * from paging_test where id = 1 order by value asc", fetch_size=5, consistency_level=CL.ALL)
)
pf = PageFetcher(future).request_all()
assert pf.pagecount() == 2
assert pf.num_results_all() == [5, 5]
# these should be equal (in the same order)
assert pf.all_data() == expected_data
# make sure we don't allow paging over multiple partitions with order because that's weird
with pytest.raises(InvalidRequest, match='Cannot page queries with both ORDER BY and a IN restriction on the partition key'):
stmt = SimpleStatement("select * from paging_test where id in (1,2) order by value asc", consistency_level=CL.ALL)
cursor.execute(stmt)
def test_with_order_by_reversed(self):
""""
Paging over a single partition with ordering and a reversed clustering order.
"""
cursor = self.prepare()
cursor.execute(
"""
CREATE TABLE paging_test (
id int,
value text,
value2 text,
value3 text,
PRIMARY KEY (id, value)
) WITH CLUSTERING ORDER BY (value DESC)
""")
for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
cursor.execute("TRUNCATE paging_test")
data = """
|id|value|value2|value3|
|1 |a |a |a |
|1 |b |b |b |
|1 |c |c |c |
|1 |d |d |d |
|1 |e |e |e |
|1 |f |f |f |
|1 |g |g |g |
|1 |h |h |h |
|1 |i |i |i |
|1 |j |j |j |
"""
expected_data = create_rows(data, cursor, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': str, 'value2': str})
future = cursor.execute_async(
SimpleStatement("select * from paging_test where id = 1 order by value asc", fetch_size=3, consistency_level=CL.ALL)
)
pf = PageFetcher(future).request_all()
print("pages:", pf.num_results_all())
assert pf.pagecount() == 4
assert pf.num_results_all(), [3, 3, 3 == 1]
# these should be equal (in the same order)
assert pf.all_data() == expected_data
# drop the ORDER BY
future = cursor.execute_async(
SimpleStatement("select * from paging_test where id = 1", fetch_size=3, consistency_level=CL.ALL)
)
pf = PageFetcher(future).request_all()
assert pf.pagecount() == 4
assert pf.num_results_all(), [3, 3, 3 == 1]
# these should be equal (in the same order)
assert pf.all_data() == list(reversed(expected_data))
def test_with_limit(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE paging_test ( id int, value text, PRIMARY KEY (id, value) )")
def random_txt(text):
return str(uuid.uuid4())
for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
cursor.execute("TRUNCATE paging_test")
data = """
| id | value |
*5| 1 | [random text] |
*5| 2 | [random text] |
*10| 3 | [random text] |
*10| 4 | [random text] |
*20| 5 | [random text] |
*30| 6 | [random text] |
"""
expected_data = create_rows(data, cursor, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': random_txt})
scenarios = [
# using equals clause w/single partition
{'limit': 10, 'fetch': 20, 'data_size': 30, 'whereclause': 'WHERE id = 6', 'expect_pgcount': 1, 'expect_pgsizes': [10]}, # limit < fetch < data
{'limit': 10, 'fetch': 30, 'data_size': 20, 'whereclause': 'WHERE id = 5', 'expect_pgcount': 1, 'expect_pgsizes': [10]}, # limit < data < fetch
{'limit': 20, 'fetch': 10, 'data_size': 30, 'whereclause': 'WHERE id = 6', 'expect_pgcount': 2, 'expect_pgsizes': [10, 10]}, # fetch < limit < data
{'limit': 30, 'fetch': 10, 'data_size': 20, 'whereclause': 'WHERE id = 5', 'expect_pgcount': 2, 'expect_pgsizes': [10, 10]}, # fetch < data | |
to WebSocket
await self.send(text_data=json.dumps({"message": message}))
# Receive message from room group
async def requested_message(self, event):
message = event["message"]
# print(f'data_message: {json.dumps(message)}')
# Send message to WebSocket
await self.send(text_data=json.dumps({"message": message}))
@database_sync_to_async
def get_daq_controller(
self,
host="localhost",
parent_ns_sig="default",
name="default",
force_create=True,
):
daq_controller = None
try:
controllers = DAQController.objects.filter(name=name)
for controller in controllers:
ns = controller.get_namespace()
parent_sig = ns.get_namespace_sig(section="PARENT")
if (
parent_sig["host"] == host
and parent_sig["namespace"] == parent_ns_sig
):
return controller
except DAQController.DoesNotExist:
pass
# TODO create a controller?
# daq_server = None
# if force_create:
# daq_server = DAQServer(name=name, host=host)
# daq_server.save()
# print(f"daq_server_new: {daq_server}")
return daq_controller
@database_sync_to_async
def update_daq_controller_metadata(self, controller, metadata):
if controller:
try:
meas_meta = metadata["measurement_meta"]
controller.measurement_sets = meas_meta
controller.save()
except KeyError:
pass
# Receive message from room group
async def daq_message(self, event):
message = event["message"]
# print(f'daq_message: {json.dumps(message)}')
# Send message to WebSocket
await self.send(text_data=json.dumps({"message": message}))
class InstrumentConsumer(AsyncWebsocketConsumer):
async def connect(self):
try:
# self.daqserver_namespace = self.scope["url_route"]["kwargs"][
# "daq_namespace"
# ]
# self.controller_namespace = self.scope["url_route"]["kwargs"][
# "controller_namespace"
# ]
self.daq_host = self.scope["url_route"]["kwargs"]["daq_host"]
self.parent_namespace = self.scope["url_route"]["kwargs"][
"parent_namespace"
]
self.instrument_namespace = self.scope["url_route"]["kwargs"][
"instrument_namespace"
]
except KeyError:
# self.daqserver_namespace = "default"
# self.controller_namespace = "default"
self.daq_host = "localhost"
self.parent_namespace = "default"
self.instrument_namespace = "default"
# self.instrument_group_name = f"{self.daqserver_namespace}-{self.controller_namespace}-instrument-{self.instrument_namespace}"
self.instrument_group_name = f"{self.daq_host}-{self.parent_namespace}-instrument-{self.instrument_namespace}"
# self.instrument_id = f"{self.daqserver_namespace}-{self.controller_namespace}-{self.instrument_namespace}"
self.instrument_id = f"{self.parent_namespace}-{self.instrument_namespace}"
self.ui_save_base_path = "/tmp/envDataSystem/UIServer"
self.ui_save_data = False
# if "DATA_MANAGER" in settings:
if "ui_save_base_path" in settings.DATA_MANAGER:
self.ui_save_base_path = settings.DATA_MANAGER["ui_save_base_path"]
if "ui_save_data" in settings.DATA_MANAGER:
self.ui_save_data = settings.DATA_MANAGER["ui_save_data"]
self.ui_save_path = self.ui_save_base_path
# path_list = [
# # self.ui_save_base_path,
# # self.daqserver_namespace,
# # self.controller_namespace,
# # self.instrument_namespace,
# self.ui_save_base_path,
# self.parent_namespace,
# self.instrument_namespace,
# ]
# self.ui_save_path = os.path.join(*path_list)
# DataManager.open_datafile(self.ui_save_path)
self.manage_group_name = "envdaq-manage"
self.registry_group_name = "envnet-manage"
await self.channel_layer.group_add(self.manage_group_name, self.channel_name)
await self.channel_layer.group_add(self.registry_group_name, self.channel_name)
# print(f'name = {self.instrument_namespace}')
# Join room group
await self.channel_layer.group_add(
self.instrument_group_name, self.channel_name
)
self.hostname = self.scope["server"][0]
self.port = self.scope["server"][1]
await self.accept()
# TODO: request config from controller?
async def disconnect(self, close_code):
# Leave room group
await self.channel_layer.group_discard(
self.instrument_group_name, self.channel_name
)
await self.channel_layer.group_discard(
self.manage_group_name, self.channel_name
)
await self.channel_layer.group_discard(
self.registry_group_name, self.channel_name
)
# Receive message from WebSocket
async def receive(self, text_data):
# TODO: parse incoming message
# if data, pass along to socket
# if server request (e.g., send config) send
# message to server. Do I need to send
# to whole group? Does this break down
# as controller_req, instrument_req, etc?
# if status, pass to socket
# print(f'^^^^^ {text_data}')
# text_data_json = json.loads(text_data)
# message = text_data_json['message']
# # print(f'InstrumentConsumer.receive: {message}')
# await self.channel_layer.group_send(
# self.instrument_group_name,
# {
# 'type': 'daq_message',
# 'message': message
# }
# )
# print(text_data)
# text_data_json = json.loads(text_data)
try:
data = json.loads(text_data)
except json.JSONDecodeError as e:
print(f"InstrumentConsumer error {e}")
print(f"text_data: {text_data}")
return
message = data["message"]
# print(f'message: {message}')
if message["SUBJECT"] == "DATA":
# print(f'data message')
await self.channel_layer.group_send(
self.instrument_group_name,
{"type": "daq_message", "message": message},
)
# print(f'123123123 data: {message}')
src_id = message["SENDER_ID"]
namespace = Namespace().from_dict(message["BODY"]["namespace"])
ns_sig = namespace.get_namespace_sig()
# print(f"*** update plot: {src_id}, {data}")
await PlotManager.update_data_by_source(src_id, data, server_id=namespace)
if self.ui_save_data:
# print(f"save data")
await DataManager.send_data(self.ui_save_path, data)
# print(f'message ***111***: {message}')
if "BODY" in message and "DATA_REQUEST_LIST" in message["BODY"]:
# TODO: make this a utility function
for dr in message["BODY"]["DATA_REQUEST_LIST"]:
# print("here:1")
if dr["class"] == "CONTROLLER":
# print("here:2")
controller_ns = Namespace().from_dict(dr["namespace"])
# print("here:3")
controller_parent_ns_sig = controller_ns.get_namespace_sig(
section="PARENT"
)["namespace"]
# print("here:4")
# group_name = f'controller_{dr["alias"]["name"]}'
# group_name = f"{self.daqserver_namespace}-controller-{self.controller_namespace}"
group_name = f"{self.daq_host}-{controller_parent_ns_sig}-controller-{controller_ns.name}"
# print("here:5")
await self.channel_layer.group_send(
# group_name.replace(" ", ""),
group_name,
{"type": "daq_message", "message": message},
)
# print("here:6")
# print(f'Done with Data!!!')
# if 'alias' in message['BODY']:
# alias_name = message['BODY']['alias']['name']
# alias_name = message.BODY.alias.name
# print(f'alias: {alias_name}')
# await PlotManager.update_data_by_key(alias_name, data)
elif message["SUBJECT"] == "SETTINGS":
# print(f'settings: {message}')
await self.channel_layer.group_send(
self.instrument_group_name,
{"type": "daq_message", "message": message},
)
elif message["SUBJECT"] == "PING":
# await DAQRegistry.ping(reg_id=self.instrument_id, type="Instrument")
body = message["BODY"]
# print(f"instrument ping: {body}")
namespace = Namespace().from_dict(body["namespace"])
ns_sig = namespace.get_namespace_sig()
# await DAQRegistry.ping(reg_id=self.controller_id, type="Controller")
await DAQRegistry.ping(reg_id=ns_sig, type=Namespace.INSTRUMENT)
elif message["SUBJECT"] == "REGISTRATION":
body = message["BODY"]
if body["purpose"] == "ADD":
# daq_namespace = body["namespace"]["daq_server"]
# namespace = body["namespace"]
# print(f'namespace: {self.daqserver_namespace}, {daq_namespace}')
# registration = RegistrationManager.get(body['id'])
# registration = RegistrationManager.get(daq_namespace, type="DAQServer")
ui_reconfig_request = False
self.namespace = Namespace().from_dict(body["namespace"])
ns_sig = self.namespace.get_namespace_sig()
parent_ns_sig = self.namespace.get_namespace_sig(section="PARENT")[
"namespace"
]
instrument_host = ns_sig["host"]
instrument_name = ns_sig["name"]
instrument_sig = ns_sig["namespace"]
daq_instrument = await self.get_daq_instrument(
host=instrument_host,
parent_ns_sig=parent_ns_sig,
name=instrument_name,
)
try:
metadata = body["metadata"]
await self.update_daq_instrument_metadata(daq_instrument, metadata)
PlotManager.add_apps(metadata)
except KeyError:
pass
# open Datafile for saving data on UI side
path_list = [
self.ui_save_base_path,
instrument_host,
self.namespace.get_namespace_as_path(),
]
self.ui_save_path = os.path.join(*path_list)
DataManager.open_datafile(self.ui_save_path)
registration = await DAQRegistry.get_registration(
# reg_id=self.instrument_id, type="Instrument"
reg_id=ns_sig,
type=Namespace.INSTRUMENT,
)
# print(f"registration2-get: {registration}")
# registration2 = await DAQRegistry.register(
# namespace=self.daqserver_namespace,
# type="DAQServer",
# config=body["config"],
# )
# print(f"registration2: {registration2}")
if registration:
if body["regkey"]: # daq running (likely a reconnect)
# same: daq_server config takes precedence
if body["regkey"] == registration["regkey"]:
registration["config"] = body["config"]
# RegistrationManager.update(body['id'], registration)
# RegistrationManager.update(daq_namespace, registration, type="DAQServer")
registration = await DAQRegistry.update_registration(
# reg_id=self.instrument_id,
# namespace=namespace,
# registration=registration,
# type="Instrument",
reg_id=ns_sig,
# namespace=namespace,
namespace=self.namespace.to_dict(),
registration=registration,
type=Namespace.INSTRUMENT,
)
else: # no reg, no connection to daq since UI start
if body["regkey"]: # daq has been running
ui_reconfig_request = True
registration = {
# "regkey": body["regkey"],
"config": body["config"],
}
# RegistrationManager.update(body['id'], registration)
# RegistrationManager.update(daq_namespace, registration, type="DAQServer")
registration = await DAQRegistry.update_registration(
# reg_id=self.instrument_id,
# namespace=namespace,
# registration=registration,
# type="Instrument",
reg_id=ns_sig,
# namespace=namespace,
namespace=self.namespace.to_dict(),
registration=registration,
type=Namespace.INSTRUMENT,
)
else: # daq has started
registration = await DAQRegistry.register(
# body['id'],
# reg_id=self.instrument_id,
# namespace=namespace,
# config=body["config"],
# type="Instrument",
reg_id=ns_sig,
namespace=self.namespace.to_dict(),
config=body["config"],
type=Namespace.INSTRUMENT,
)
reply = {
"TYPE": "UI",
"SENDER_ID": "InstrumentConsumer",
"TIMESTAMP": dt_to_string(),
"SUBJECT": "REGISTRATION",
"BODY": {
"purpose": "SUCCESS",
"regkey": registration["regkey"],
"config": registration["config"],
"ui_reconfig_request": ui_reconfig_request,
},
}
# print(f"reply2: {json.dumps(reply)}")
# ui_reconfig_request = False
# registration = RegistrationManager.get(
# self.daqserver_namespace, type="DAQServer"
# )
# if registration: # reg exists - UI running, unknown daq state
# # if daq_server has key, check against current registration
# if body["regkey"]: # daq running (likely a reconnect)
# # same: daq_server config takes precedence
# if body["regkey"] == registration["regkey"]:
# registration["config"] = body["config"]
# # RegistrationManager.update(body['id'], registration)
# # RegistrationManager.update(daq_namespace, registration, type="DAQServer")
# RegistrationManager.update(
# self.daqserver_namespace, registration, type="DAQServer"
# )
# else: # no reg, no connection to daq since UI start
# if body["regkey"]: # daq has been running
# ui_reconfig_request = True
# registration = {
# "regkey": body["regkey"],
# "config": body["config"],
# }
# # RegistrationManager.update(body['id'], registration)
# # RegistrationManager.update(daq_namespace, registration, type="DAQServer")
# RegistrationManager.update(
# self.daqserver_namespace, registration, type="DAQServer"
# )
# else: # daq has started
# registration = RegistrationManager.add(
# # body['id'],
# self.daqserver_namespace,
# config=body["config"],
# type="DAQServer",
# )
# print("before reply")
# reply = {
# "TYPE": "UI",
# "SENDER_ID": "DAQServerConsumer",
# "TIMESTAMP": dt_to_string(),
# "SUBJECT": "REGISTRATION",
# "BODY": {
# "purpose": "SUCCESS",
# "regkey": registration["regkey"],
# "config": registration["config"],
# "ui_reconfig_request": ui_reconfig_request,
# },
# }
# print(f"reply: {reply}")
# print(json.dumps(reply))
await self.daq_message({"message": reply})
instrument_registration_map = await DAQRegistry.get_registry(
type=Namespace.INSTRUMENT
)
msg_body = {
"purpose": "REGISTRY",
"instrument_registry": instrument_registration_map,
# "controller_registry": controller_registration_map,
# "instrument_registry": instrument_registration_map,
}
message = Message(
msgtype="UI",
sender_id="InstrumentConsumer",
subject="DAQServerRegistry",
body=msg_body,
)
print(f"message_to_dict: {message.to_dict()}")
# await self.daq_message(message.to_dict())
await self.channel_layer.group_send(
self.manage_group_name,
{
"type": "daq_message",
"message": message.to_dict()["message"],
},
)
if body["purpose"] == "REMOVE":
print("remove")
body = message["BODY"]
namespace = Namespace().from_dict(body["namespace"])
ns_sig = namespace.get_namespace_sig()
await DAQRegistry.unregister(
# reg_id=self.controller_id,
reg_id=ns_sig,
type=Namespace.INSTRUMENT,
)
reply = {
"TYPE": "UI",
"SENDER_ID": "InstrumentConsumer",
"TIMESTAMP": dt_to_string(),
"SUBJECT": "UNREGISTRATION",
"BODY": {
"purpose": "SUCCESS",
},
}
print(f"success: {reply}")
await self.daq_message({"message": reply})
instrument_registration_map = await DAQRegistry.get_registry(
type=Namespace.INSTRUMENT
)
msg_body = {
"purpose": "REGISTRY",
"instrument_registry": instrument_registration_map,
# "controller_registry": controller_registration_map,
# "instrument_registry": instrument_registration_map,
}
message = Message(
msgtype="UI",
sender_id="InstrumentConsumer",
subject="DAQServerRegistry",
body=msg_body,
)
print(f"message_to_dict: {message.to_dict()}")
# await self.daq_message(message.to_dict())
await self.channel_layer.group_send(
self.manage_group_name,
{
"type": "daq_message",
"message": message.to_dict()["message"],
},
)
DataManager.close_datafile(self.ui_save_base_path)
# RegistrationManager.remove(body['id'])
# RegistrationManager.remove(self.daqserver_namespace, type="DAQServer")
# await DAQRegistry.unregister(
# namespace=self.instrument_id, type="Instrument"
# )
elif message["SUBJECT"] == "CONFIG":
body = message["BODY"]
# if (body['purpose'] == 'REQUEST'):
# if (body['type'] == 'ENVDAQ_CONFIG'):
# # do we ever get here?
# cfg = await ConfigurationUtility().get_config()
# reply = {
# 'TYPE': 'GUI',
# 'SENDER_ID': 'DAQServerConsumer',
# | |
# Copyright 2009-2010 by <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.api import users
import csv
import datetime
import logging
import re
import zipfile
from StringIO import StringIO
import kml
import setup
from feedlib.geo import point_inside_polygon
from model import *
from pakistan_data import *
SHORELAND_URL = 'http://shoreland.com/'
SHORELAND_EMAIL = '<EMAIL>'
SHORELAND_NICKNAME = '<NAME>'
SHORELAND_AFFILIATION = 'Shoreland Inc.'
PAKISTAN_URL = 'http://www.google.com/mapmaker/'
PAKISTAN_EMAIL = '<EMAIL>'
PAKISTAN_NICKNAME = 'Google MapMaker'
PAKISTAN_AFFILIATION = 'Google Inc.'
def strip_or_none(value):
"""Converts strings to their stripped values or None, while preserving
values of other types."""
if isinstance(value, db.Text):
# We have to preserve db.Text as db.Text, or the datastore might
# reject it. (Strings longer than 500 characters are not storable.)
return db.Text(value.strip()) or None
if isinstance(value, basestring):
return value.strip() or None
return value
class ValueInfo:
"""Keeps track of an attribute value and metadata."""
def __init__(self, value, observed=None, source=None, comment=None):
self.value = strip_or_none(value)
self.observed = observed
self.comment = self.combine_comment(source, comment)
def combine_comment(self, source, comment):
source = strip_or_none(source)
comment = strip_or_none(comment)
if source is not None:
source = 'Source: %s' % source
comment = comment and '%s; %s' % (source, comment) or source
return comment
def __repr__(self):
return 'ValueInfo(%r, observed=%r, comment=%r)' % (
self.value, self.observed, self.comment)
def convert_paho_record(record):
"""Converts a dictionary of values from one row of a PAHO CSV file
into a dictionary of ValueInfo objects for our datastore."""
title = (record['Fac_NameFr'].strip() or record['NomInstitu'].strip())
if not record.get('HealthC_ID').strip():
# TODO(shakusa) Fix this. We should be importing all facilities.
logging.warn('Skipping %r (%s): Invalid HealthC_ID: "%s"' % (
title, record.get('PCode'), record.get('HealthC_ID')))
return None, None, None
key_name = 'paho.org/HealthC_ID/' + record['HealthC_ID'].strip()
title = (record['Fac_NameFr'].strip() or record['NomInstitu'].strip())
alt_title = (title == record['Fac_NameFr'].strip() and
record['NomInstitu'].strip() or '')
try:
latitude = float(record['X_DDS'])
longitude = float(record['Y_DDS'])
except ValueError:
# TODO(shakusa) Fix this. We should be importing all facilities.
logging.warn('Skipping %r (%s): X_DDS=%r Y_DDS=%r' % (
title, key_name, record.get('X_DDS'), record.get('Y_DDS')))
return None, None, None
return key_name, None, {
'title': ValueInfo(title),
'alt_title': ValueInfo(alt_title),
'healthc_id': ValueInfo(
record['HealthC_ID'],
comment=record['AlternateHealthCIDDeleted']),
'pcode': ValueInfo(record['PCode']),
'organization': ValueInfo(record['Oorganisat']),
'department': ValueInfo(record['Departemen']),
'district': ValueInfo(record['DistrictNom']),
'commune': ValueInfo(record['Commune']),
'address': ValueInfo(record['Address']),
'location': ValueInfo(db.GeoPt(latitude, longitude),
source=record['SourceHospitalCoordinates'],
comment=record['AlternateCoordinates']),
'accuracy': ValueInfo(record['Accuracy']),
'phone': ValueInfo(record['Telephone']),
'email': ValueInfo(record['email']),
'organization_type': ValueInfo(record['Type']),
'category': ValueInfo(record['Categorie']),
'damage': ValueInfo(record['Damage'],
observed=parse_paho_date(record['DateDamage']),
source=record['SourceDamage']),
'operational_status': ValueInfo(
record['OperationalStatus'],
observed=parse_paho_date(record['DateOperationalStatus']),
source=record['SourceOperationalStatus']),
'comments': ValueInfo(db.Text(record['Comment'])),
'region_id': ValueInfo(record['RegionId']),
'district_id': ValueInfo(record['DistrictId']),
'commune_id': ValueInfo(record['CommuneId']),
'commune_code': ValueInfo(record['CodeCommun']),
'sante_id': ValueInfo(record['SanteID'])
}
def convert_shoreland_record(record):
"""Converts a dictionary of values from one row of a Shoreland CSV file
into a dictionary of ValueInfo objects for our datastore."""
title = record['facility_name'].strip()
healthc_id = record.get(
'healthc_id', record.get('facility_healthc_id', '')).strip()
pcode = record.get('pcode', record.get('facility_pcode', '')).strip()
if not healthc_id:
# Every row in a Shoreland CSV file should have a non-blank healthc_id.
logging.warn('Skipping %r (pcode %s): no HealthC_ID' % (title, pcode))
return None, None, None
subject_name = 'paho.org/HealthC_ID/' + healthc_id
try:
latitude = float(record['latitude'])
longitude = float(record['longitude'])
location = db.GeoPt(latitude, longitude)
except (KeyError, ValueError):
logging.warn('No location for %r (%s): latitude=%r longitude=%r' % (
title, healthc_id, record.get('latitude'), record.get('longitude')))
location = None
observed = None
if record.get('entry_last_updated'):
observed = parse_shoreland_datetime(record['entry_last_updated'])
# The CSV 'type' column maps to our 'category' attribute.
CATEGORY_MAP = {
None: '',
'': '',
'Clinic': 'CLINIC',
'Dispensary': 'DISPENSARY',
'Hospital': 'HOSPITAL',
'Mobile Clinic': 'MOBILE_CLINIC'
}
# The CSV 'category' column maps to our 'organization_type' attribute.
ORGANIZATION_TYPE_MAP = {
None: '',
'': '',
'Community': 'COMMUNITY',
'Faith-Based Org': 'FAITH_BASED',
'For Profit': 'FOR_PROFIT',
'Military': 'MILITARY',
'Mixed': 'MIXED',
'NGO': 'NGO',
'Public': 'PUBLIC',
'University': 'UNIVERSITY'
}
# The CSV 'operational_status' column has two possible values.
OPERATIONAL_STATUS_MAP = {
None: '',
'': '',
'Open': 'OPERATIONAL',
'Closed or Closing': 'CLOSED_OR_CLOSING'
}
# The CSV 'services' column contains space-separated abbreviations.
SERVICE_MAP = {
'GenSurg': 'GENERAL_SURGERY',
'Ortho': 'ORTHOPEDICS',
'Neuro': 'NEUROSURGERY',
'Vascular': 'VASCULAR_SURGERY',
'IntMed': 'INTERNAL_MEDICINE',
'Cardiology': 'CARDIOLOGY',
'ID': 'INFECTIOUS_DISEASE',
'Peds': 'PEDIATRICS',
'OB': 'OBSTETRICS_GYNECOLOGY',
'Dialysis': 'DIALYSIS',
'MentalHealth': 'MENTAL_HEALTH',
'Rehab': 'REHABILITATION'
}
service_list = []
for keyword in record.get('services', '').split():
service_list.append(SERVICE_MAP[keyword])
if record.get('services_last_updated'):
services = ValueInfo(service_list, parse_shoreland_datetime(
record['services_last_updated']))
else:
services = ValueInfo(service_list)
return subject_name, observed, {
'title': ValueInfo(title),
'healthc_id': ValueInfo(healthc_id),
'pcode': ValueInfo(pcode),
# <NAME> recommends (2010-06-07) ignoring the available_beds column.
'available_beds': ValueInfo(None),
# NOTE(kpy): Intentionally treating total_beds=0 as "number unknown".
'total_beds': ValueInfo(
record.get('total_beds') and int(record['total_beds']) or None,
comment=record.get('BED TRACKING COMMENTS')),
# Didn't bother to convert the 'services' field because it's empty
# in the CSV from Shoreland.
'contact_name': ValueInfo(record.get('contact_name')),
'phone': ValueInfo(record.get('contact_phone')),
'email': ValueInfo(record.get('contact_email')),
'department': ValueInfo(record.get('department')),
'district': ValueInfo(record.get('district')),
'commune': ValueInfo(record.get('commune')),
'address': ValueInfo(record.get('address')),
'location': ValueInfo(location),
'accuracy': ValueInfo(record.get('accuracy')),
'organization': ValueInfo(record.get('organization')),
# The 'type' and 'category' columns are swapped.
'organization_type':
ValueInfo(ORGANIZATION_TYPE_MAP[record.get('category')]),
'category': ValueInfo(CATEGORY_MAP[record.get('type')]),
# Didn't bother to convert the 'construction' field because it's empty
# in the CSV from Shoreland.
'damage': ValueInfo(record.get('damage')),
'operational_status':
ValueInfo(OPERATIONAL_STATUS_MAP[record.get('operational_status')]),
'services': services,
'comments': ValueInfo(db.Text(record.get('comments', ''))),
'region_id': ValueInfo(record.get('region_id')),
'district_id': ValueInfo(record.get('district_id')),
'commune_id': ValueInfo(record.get('commune_id')),
'sante_id': ValueInfo(record.get('sante_id'))
}
def convert_pakistan_record(record):
"""Converts a dictionary of values from one placemark of a Mapmaker-
exported KML file into a dictionary of ValueInfo objects for our
datastore."""
title = ''
type = ''
description = ''
comments = []
CDATA_PATTERN = re.compile(r'<b>(.*):</b> <i>(.*)</i>.*')
for line in record.get('comment', '').strip().split('\n'):
line = line.strip()
match = CDATA_PATTERN.match(line)
if match:
key = match.group(1)
value = match.group(2)
if key == 'ID':
# ID is given as a pair of colon-separated hex strings
# Only the second part is stable. It is more common externally
# to see base-10 ids, so convert. Finally, the mapmaker URL
# needs both parts.
id_parts = value.split(':')
int_id_parts = list(str(int(part, 16)) for part in id_parts)
record['id'] = int_id_parts[1]
record['maps_link'] = ('http://www.google.com/mapmaker?q=' +
':'.join(int_id_parts))
if key == 'NAME1':
title = value
if key == 'TYPE' or key == 'ETYPE':
type = value
elif key == 'LANGNAME':
record['alt_title'] = value
elif key == 'PHONE':
record['phone'] = value
elif key == 'MOBILE':
record['mobile'] = value
elif key == 'FAX':
record['fax'] = value
elif key == 'EMAIL':
record['email'] = value
elif key == 'ADDRESS':
record['address'] = value
elif key == 'DESCRIPTIO':
description = value
elif value in PAKISTAN_ADMIN_AREAS:
if value == 'North West Frontier':
# Officially renamed;
# See http://en.wikipedia.org/wiki/Khyber-Pakhtunkhwa
value = 'Khyber Pakhtunkhwa'
record['admin_area'] = value
elif value in PAKISTAN_DISTRICTS:
record['sub_admin_area'] = value
if key not in ['ADDRESS', 'CENTERTYPE', 'CNTRYCODE', 'DESCRIPTIO',
'EMAIL', 'FAX', 'ID', 'LANG1', 'LANGNAME', 'MOBILE',
'NAME1', 'PHONE']:
comments.append('%s: %s' % (key, value))
if not record.get('id'):
logging.warn('SKIPPING, NO ID: %r' % record)
return None, None, None
try:
latitude = float(record['location'][1])
longitude = float(record['location'][0])
location = db.GeoPt(latitude, longitude)
except (KeyError, ValueError):
logging.warn('No location for %r' % record)
location = None
if not (point_inside_polygon(
{'lat': latitude, 'lon': longitude}, PAKISTAN_FLOOD_POLYGON) or
record.get('sub_admin_area', '') in PAKISTAN_FLOOD_DISTRICTS):
return None, None, None
title = title or record.get('title', '').strip()
if not title:
logging.warn('SKIPPING, NO TITLE: %r' % record)
return None, None, None
if description:
# Make sure it's the first comment
comments.insert(0, description)
subject_name = 'mapmaker.google.com/fid/' + record['id']
title_lower = title.lower()
if type == 'TYPE_HOSPITAL' or 'hospital' in title_lower:
record['category'] = 'HOSPITAL'
elif 'clinic' in title_lower:
record['category'] = 'CLINIC'
elif 'laborator' in title_lower or 'labs ' in title_lower:
record['category'] = 'LABORATORY'
elif 'dispensary' in title_lower:
record['category'] = 'DISPENSARY'
observed = None
return subject_name, observed, {
'id': ValueInfo(record['id']),
'title': ValueInfo(title),
'alt_title': ValueInfo(record.get('alt_title', '')),
'address': ValueInfo(record.get('address', '')),
'administrative_area': ValueInfo(record.get('admin_area', '')),
'sub_administrative_area': ValueInfo(record.get('sub_admin_area', '')),
'locality': ValueInfo(record.get('locality', '')),
'location': ValueInfo(location),
'maps_link': ValueInfo(record['maps_link']),
'phone': ValueInfo(record.get('phone', '')),
'mobile': ValueInfo(record.get('mobile', '')),
'fax': ValueInfo(record.get('fax', '')),
'email': ValueInfo(record.get('email', '')),
'category': ValueInfo(record.get('category', '')),
'comments': ValueInfo(db.Text('\n'.join(comments))),
}
def load(
filename, record_reader, record_converter, subdomain, subject_type_name,
source_url, default_observed, author, author_nickname, author_affiliation,
limit=None):
"""Loads a file of records into the datastore.
Args:
filename: name of the file to load
record_reader: function that takes a file and returns a record iterator
record_converter: function that takes a parsed record and returns a
(subject_name, observed, values) triple, where observed is a datetime
and values is a dictionary of attribute names to ValueInfo objects
subdomain: name of the subdomain to | |
acceptable types for their
corresponding properties or a TypeError is raised.
If the value of the key property is set, it must not collide with
other key strings or a ValueError is raised.
If the value of a Link or Multilink property contains an invalid
node id, a ValueError is raised.
"""
raise NotImplementedError
def retire(self, nodeid):
"""Retire a node.
The properties on the node remain available from the get() method,
and the node's id is never reused.
Retired nodes are not returned by the find(), list(), or lookup()
methods, and other nodes may reuse the values of their key properties.
"""
raise NotImplementedError
def restore(self, nodeid):
"""Restpre a retired node.
Make node available for all operations like it was before retirement.
"""
raise NotImplementedError
def is_retired(self, nodeid):
"""Return true if the node is rerired
"""
raise NotImplementedError
def destroy(self, nodeid):
"""Destroy a node.
WARNING: this method should never be used except in extremely rare
situations where there could never be links to the node being
deleted
WARNING: use retire() instead
WARNING: the properties of this node will not be available ever again
WARNING: really, use retire() instead
Well, I think that's enough warnings. This method exists mostly to
support the session storage of the cgi interface.
The node is completely removed from the hyperdb, including all journal
entries. It will no longer be available, and will generally break code
if there are any references to the node.
"""
def history(self, nodeid, enforceperm=True, skipquiet=True):
"""Retrieve the journal of edits on a particular node.
'nodeid' must be the id of an existing node of this class or an
IndexError is raised.
The returned list contains tuples of the form
(date, tag, action, params)
'date' is a Timestamp object specifying the time of the change and
'tag' is the journaltag specified when the database was opened.
If the property to be displayed is a quiet property, it will
not be shown. This can be disabled by setting skipquiet=False.
If the user requesting the history does not have View access
to the property, the journal entry will not be shown. This can
be disabled by setting enforceperm=False.
Note that there is a check for obsolete properties and classes
resulting from history changes. These are also only checked if
enforceperm is True.
"""
if not self.do_journal:
raise ValueError('Journalling is disabled for this class')
perm = self.db.security.hasPermission
journal = []
uid=self.db.getuid() # id of the person requesting the history
# Roles of the user and the configured obsolete_history_roles
hr = set(iter_roles(self.db.config.OBSOLETE_HISTORY_ROLES))
ur = set(self.db.user.get_roles(uid))
allow_obsolete = bool(hr & ur)
for j in self.db.getjournal(self.classname, nodeid):
# hide/remove journal entry if:
# property is quiet
# property is not (viewable or editable)
# property is obsolete and not allow_obsolete
id, evt_date, user, action, args = j
if logger.isEnabledFor(logging.DEBUG):
j_repr = "%s"%(j,)
else:
j_repr=''
if args and type(args) == type({}):
for key in args.keys():
if key not in self.properties :
if enforceperm and not allow_obsolete:
del j[4][key]
continue
if skipquiet and self.properties[key].quiet:
logger.debug("skipping quiet property"
" %s::%s in %s",
self.classname, key, j_repr)
del j[4][key]
continue
if enforceperm and not ( perm("View",
uid,
self.classname,
property=key ) or perm("Edit",
uid,
self.classname,
property=key )):
logger.debug("skipping unaccessible property "
"%s::%s seen by user%s in %s",
self.classname, key, uid, j_repr)
del j[4][key]
continue
if not args:
logger.debug("Omitting journal entry for %s%s"
" all props removed in: %s",
self.classname, nodeid, j_repr)
continue
journal.append(j)
elif action in ['link', 'unlink' ] and type(args) == type(()):
# definitions:
# myself - object whose history is being filtered
# linkee - object/class whose property is changing to
# include/remove myself
# link property - property of the linkee class that is changing
#
# Remove the history item if
# linkee.link property (key) is quiet
# linkee class.link property is not (viewable or editable)
# to user
# [ should linkee object.link property is not
# (viewable or editable) to user be included?? ]
# linkee object (linkcl, linkid) is not
# (viewable or editable) to user
if len(args) == 3:
# e.g. for issue3 blockedby adds link to issue5 with:
# j = id, evt_date, user, action, args
# 3|20170528045201.484|5|link|('issue', '5', 'blockedby')
linkcl, linkid, key = args
cls = None
try:
cls = self.db.getclass(linkcl)
except KeyError:
pass
# obsolete property or class
if not cls or key not in cls.properties:
if not enforceperm or allow_obsolete:
journal.append(j)
continue
# is the updated property quiet?
if skipquiet and cls.properties[key].quiet:
logger.debug("skipping quiet property: "
"%s %sed %s%s",
j_repr, action, self.classname, nodeid)
continue
# can user view the property in linkee class
if enforceperm and not (perm("View",
uid,
linkcl,
property=key) or perm("Edit",
uid,
linkcl,
property=key)):
logger.debug("skipping unaccessible property: "
"%s with uid %s %sed %s%s",
j_repr, uid, action,
self.classname, nodeid)
continue
# check access to linkee object
if enforceperm and not (perm("View",
uid,
cls.classname,
itemid=linkid) or perm("Edit",
uid,
cls.classname,
itemid=linkid)):
logger.debug("skipping unaccessible object: "
"%s uid %s %sed %s%s",
j_repr, uid, action,
self.classname, nodeid)
continue
journal.append(j)
else:
logger.error("Invalid %s journal entry for %s%s: %s",
action, self.classname, nodeid, j)
elif action in ['create', 'retired', 'restored']:
journal.append(j)
else:
logger.warning("Possibly malformed journal for %s%s %s",
self.classname, nodeid, j)
return journal
# Locating nodes:
def hasnode(self, nodeid):
"""Determine if the given nodeid actually exists
"""
raise NotImplementedError
def setkey(self, propname):
"""Select a String property of this class to be the key property.
'propname' must be the name of a String property of this class or
None, or a TypeError is raised. The values of the key property on
all existing nodes must be unique or a ValueError is raised.
"""
raise NotImplementedError
def setlabelprop(self, labelprop):
"""Set the label property. Used for override of labelprop
resolution order.
"""
if labelprop not in self.getprops():
raise ValueError, _("Not a property name: %s") % labelprop
self._labelprop = labelprop
def setorderprop(self, orderprop):
"""Set the order property. Used for override of orderprop
resolution order
"""
if orderprop not in self.getprops():
raise ValueError, _("Not a property name: %s") % orderprop
self._orderprop = orderprop
def getkey(self):
"""Return the name of the key property for this class or None."""
raise NotImplementedError
def labelprop(self, default_to_id=0):
"""Return the property name for a label for the given node.
This method attempts to generate a consistent label for the node.
It tries the following in order:
0. self._labelprop if set
1. key property
2. "name" property
3. "title" property
4. first property from the sorted property name list
"""
if hasattr(self, '_labelprop'):
return self._labelprop
k = self.getkey()
if k:
return k
props = self.getprops()
if props.has_key('name'):
return 'name'
elif props.has_key('title'):
return 'title'
if default_to_id:
return 'id'
props = props.keys()
props.sort()
return props[0]
def orderprop(self):
"""Return the property name to use for sorting for the given node.
This method computes the property for sorting.
It tries the following in order:
0. self._orderprop if set
1. "order" property
2. self.labelprop()
"""
if hasattr(self, '_orderprop'):
return self._orderprop
props = self.getprops()
if props.has_key('order'):
return 'order'
return self.labelprop()
def lookup(self, keyvalue):
"""Locate a particular node by its key property and return its id.
If this class has no key property, a TypeError is raised. If the
'keyvalue' matches one of the values for the key property among
the nodes in this class, the matching node's id is returned;
otherwise a KeyError is raised.
"""
raise NotImplementedError
def find(self, **propspec):
"""Get the ids of nodes in this class which link to the given nodes.
'propspec' consists of keyword args propname={nodeid:1,}
'propname' must be the name of a property in this class, or a
KeyError is raised. That property must be a Link or Multilink
property, or a TypeError is raised.
Any node in this class whose 'propname' property links to any of the
nodeids will be returned. Used by the full text indexing, which knows
that "foo" occurs in msg1, msg3 and file7, so | |
import time, sys, collections, os
from textwrap import dedent
import psycopg2
# NOTE: instead of using the python csv writer, this directly writes tables to
# file in the pyomo .tab format. This uses tabs between columns and the standard
# line break for the system it is run on. This does the following translations (only):
# - If a value contains double quotes, they get doubled.
# - If a value contains a single quote, tab or space character, the value gets enclosed in double quotes.
# (Note that pyomo doesn't allow quoting (and therefore spaces) in column headers.)
# - null values are converted to . (the pyomo/ampl standard for missing data)
# - any other values are simply passed to str().
# NOTE: this does not use the python csv writer because it doesn't support the quoting
# or null behaviors described above.
# NOTE: ANSI SQL specifies single quotes for literal strings, and postgres conforms
# to this, so all the queries below should use single quotes around strings.
# NOTE: write_table() will automatically convert null values to '.',
# so pyomo will recognize them as missing data
# NOTE: the code below could be made more generic, e.g., a list of
# table names and queries, which are then processed at the end.
# But that would be harder to debug, and wouldn't allow for ad hoc
# calculations or writing .dat files (which are used for a few parameters)
def write_tables(**args):
#########################
# timescales
write_table('periods.tab', """
WITH period_length as (
SELECT
CASE WHEN max(period) = min(period)
THEN
-- one-period model; assume length = number of days provided
sum(ts_scale_to_period) / 365
ELSE
-- multi-period model; count number of years between periods
(max(period)-min(period)) / (count(distinct period)-1)
END as length
FROM study_date WHERE time_sample = %(time_sample)s
)
SELECT period AS "INVESTMENT_PERIOD",
period as period_start,
period + length - 1 as period_end
FROM study_periods, period_length
WHERE time_sample = %(time_sample)s
ORDER by 1;
""", args)
write_table('timeseries.tab', """
SELECT study_date as "TIMESERIES", period as ts_period,
ts_duration_of_tp, ts_num_tps, ts_scale_to_period
FROM study_date
WHERE time_sample = %(time_sample)s
ORDER BY 1;
""", args)
write_table('timepoints.tab', """
SELECT h.study_hour as timepoint_id,
to_char(date_time + (period - extract(year from date_time)) * interval '1 year',
'YYYY-MM-DD-HH24:MI') as timestamp,
h.study_date as timeseries
FROM study_hour h JOIN study_date d USING (study_date, time_sample)
WHERE h.time_sample = %(time_sample)s
ORDER BY period, extract(doy from date), study_hour;
""", args)
#########################
# financials
# this just uses a dat file, not a table (and the values are not in a database for now)
write_dat_file(
'financials.dat',
['base_financial_year', 'interest_rate', 'discount_rate'],
args
)
#########################
# load_zones
# note: we don't provide the following fields in this version:
# lz_cost_multipliers, lz_ccs_distance_km, lz_dbid,
# existing_local_td, local_td_annual_cost_per_mw
write_table('load_zones.tab', """
SELECT load_zone as "LOAD_ZONE"
FROM load_zone
WHERE load_zone in %(load_zones)s
""", args)
# NOTE: we don't provide lz_peak_loads.tab (sometimes used by local_td.py) in this version.
# get system loads, scaled from the historical years to the model years
# note: 'offset' is a keyword in postgresql, so we use double-quotes to specify the column name
write_table('loads.tab', """
SELECT
l.load_zone AS "LOAD_ZONE",
study_hour AS "TIMEPOINT",
system_load * scale + "offset" AS lz_demand_mw
FROM study_date d
JOIN study_hour h USING (time_sample, study_date)
JOIN system_load l USING (date_time)
JOIN system_load_scale s ON (
s.load_zone = l.load_zone
AND s.year_hist = extract(year from l.date_time)
AND s.year_fore = d.period)
WHERE l.load_zone in %(load_zones)s
AND d.time_sample = %(time_sample)s
AND load_scen_id = %(load_scen_id)s;
""", args)
#########################
# fuels
write_table('non_fuel_energy_sources.tab', """
SELECT DISTINCT fuel AS "NON_FUEL_ENERGY_SOURCES"
FROM generator_costs
WHERE fuel NOT IN (SELECT fuel_type FROM fuel_costs)
AND min_vintage_year <= (SELECT MAX(period) FROM study_periods WHERE time_sample = %(time_sample)s)
UNION DISTINCT
SELECT aer_fuel_code AS "NON_FUEL_ENERGY_SOURCES"
FROM existing_plants
WHERE aer_fuel_code NOT IN (SELECT fuel_type FROM fuel_costs)
AND load_zone in %(load_zones)s
AND insvyear <= (SELECT MAX(period) FROM study_periods WHERE time_sample = %(time_sample)s)
AND technology NOT IN %(exclude_technologies)s;
""", args)
# gather info on fuels
write_table('fuels.tab', """
SELECT DISTINCT c.fuel_type AS fuel, co2_intensity, 0.0 AS upstream_co2_intensity, rps_eligible
FROM fuel_costs c JOIN energy_source_properties p on (p.energy_source = c.fuel_type)
WHERE load_zone in %(load_zones)s AND fuel_scen_id=%(fuel_scen_id)s;
""", args)
#########################
# rps targets
write_tab_file(
'rps_targets.tab',
headers=('year', 'rps_target'),
data=[(y, args['rps_targets'][y]) for y in sorted(args['rps_targets'].keys())],
arguments=args
)
#########################
# fuel_markets
# deflate HECO fuel scenarios to base year, and inflate EIA-based scenarios
# from 2013 (forecast base year) to model base year. (ugh)
# TODO: add a flag to fuel_costs indicating whether forecasts are real or nominal,
# and base year, and possibly inflation rate.
if args['fuel_scen_id'] in ('1', '2', '3'):
inflator = 'power(1.0+%(inflation_rate)s, %(base_financial_year)s-c.year)'
elif args['fuel_scen_id'].startswith('EIA'):
inflator = 'power(1.0+%(inflation_rate)s, %(base_financial_year)s-2013)'
else:
inflator = '1.0'
if args.get("use_simple_fuel_costs", False):
# simple fuel markets with no bulk LNG expansion option
# (use fuel_cost module)
# TODO: get monthly fuel costs from Karl Jandoc spreadsheet
write_table('fuel_cost.tab', """
SELECT load_zone, fuel_type as fuel, period,
price_mmbtu * {inflator} as fuel_cost
FROM fuel_costs c JOIN study_periods p ON (c.year=p.period)
WHERE load_zone in %(load_zones)s
AND fuel_scen_id = %(fuel_scen_id)s
AND p.time_sample = %(time_sample)s
AND NOT (fuel_type='LNG' AND tier='bulk')
ORDER BY 1, 2, 3;
""".format(inflator=inflator), args)
else:
# advanced fuel markets with LNG expansion options (used by forward-looking models)
# (use fuel_markets module)
write_table('regional_fuel_markets.tab', """
SELECT DISTINCT concat('Hawaii_', fuel_type) AS regional_fuel_market, fuel_type AS fuel
FROM fuel_costs
WHERE load_zone in %(load_zones)s AND fuel_scen_id = %(fuel_scen_id)s;
""", args)
write_table('fuel_supply_curves.tab', """
SELECT concat('Hawaii_', fuel_type) as regional_fuel_market, fuel_type as fuel,
period, tier, price_mmbtu * {inflator} as unit_cost,
%(bulk_lng_limit)s AS max_avail_at_cost,
CASE WHEN fuel_type='LNG' AND tier='bulk' THEN %(bulk_lng_fixed_cost)s ELSE 0.0 END
AS fixed_cost
FROM fuel_costs c JOIN study_periods p ON (c.year=p.period)
WHERE load_zone in %(load_zones)s
AND fuel_scen_id = %(fuel_scen_id)s
AND p.time_sample = %(time_sample)s
ORDER BY 1, 2, 3;
""".format(inflator=inflator), args)
write_table('lz_to_regional_fuel_market.tab', """
SELECT DISTINCT load_zone, concat('Hawaii_', fuel_type) AS regional_fuel_market
FROM fuel_costs
WHERE load_zone in %(load_zones)s AND fuel_scen_id = %(fuel_scen_id)s;
""", args)
# TODO: (when multi-island) add fuel_cost_adders for each zone
#########################
# gen_tech
# TODO: provide reasonable retirement ages for existing plants (not 100+base age)
# note: this zeroes out variable_o_m for renewable projects
# TODO: find out where variable_o_m came from for renewable projects and put it in the right place
# TODO: fix baseload flag in the database
# TODO: account for multiple fuel sources for a single plant in the upstream database
# and propagate that to this table.
# TODO: make sure the heat rates are null for non-fuel projects in the upstream database,
# and remove the correction code from here
# TODO: create heat_rate and fuel columns in the existing_plants_gen_tech table and simplify the query below.
# TODO: add unit sizes for new projects to the generator_costs table (new projects) from
# Switch-Hawaii/data/HECO\ IRP\ Report/IRP-2013-App-K-Supply-Side-Resource-Assessment-062813-Filed.pdf
# and then incorporate those into unit_sizes.tab below.
# NOTE: this converts variable o&m from $/kWh to $/MWh
# NOTE: we don't provide the following in this version:
# g_min_build_capacity
# g_ccs_capture_efficiency, g_ccs_energy_load,
# g_storage_efficiency, g_store_to_release_ratio
# NOTE: for all energy sources other than 'SUN' and 'WND' (i.e., all fuels),
# We report the fuel as 'multiple' and then provide data in a multi-fuel table.
# Some of these are actually single-fuel, but this approach is simpler than sorting
# them out within each query, and it doesn't add any complexity to the model.
# TODO: maybe replace "fuel IN ('SUN', 'WND', 'MSW')" with "fuel not in (SELECT fuel FROM fuel_cost)"
# TODO: convert 'MSW' to a proper fuel, possibly with a negative cost, instead of ignoring it
write_table('generator_info.tab', """
SELECT technology as generation_technology,
technology as g_dbid,
max_age_years as g_max_age,
scheduled_outage_rate as g_scheduled_outage_rate,
forced_outage_rate as g_forced_outage_rate,
intermittent as g_is_variable,
0 as g_is_baseload,
0 as g_is_flexible_baseload,
0 as g_is_cogen,
0 as g_competes_for_space,
CASE WHEN fuel IN ('SUN', 'WND') THEN 0 ELSE variable_o_m * 1000.0 END AS g_variable_o_m,
CASE WHEN | |
import arviz as az
import warnings
from importlib import reload
from typing import List, Any
from copy import copy
import altair as alt
import numpy as np
import pandas as pd
import xarray as xr
from bayes_window import models, BayesWindow
from bayes_window import utils
from bayes_window import visualization
from bayes_window.fitting import fit_numpyro
from .visualization import plot_posterior
class BayesRegression:
b_name: str
chart_data_line: alt.Chart
chart_posterior_kde: alt.Chart
chart_zero: alt.Chart
chart_posterior_intercept: alt.Chart
chart: alt.Chart
chart_data_boxplot: alt.Chart
chart_posterior_whiskers: alt.Chart
chart_posterior_center: alt.Chart
chart_base_posterior: alt.Chart
charts_for_facet: List[Any]
chart_posterior_hdi_no_data: alt.LayerChart
add_data: bool
data_and_posterior: pd.DataFrame
posterior: dict
trace: xr.Dataset
def __init__(self, window=None, add_data=True, **kwargs):
window = copy(window) if window is not None else BayesWindow(**kwargs)
window.add_data = add_data
self.window = window
def fit(self, model=models.model_hierarchical, do_make_change='subtract', fold_change_index_cols=None,
do_mean_over_trials=True, fit_method=fit_numpyro, add_condition_slope=True, **kwargs):
self.model_args = kwargs
if do_make_change not in ['subtract', 'divide', False]:
raise ValueError(f'do_make_change should be subtract or divide, not {do_make_change}')
if not add_condition_slope:
warnings.warn(
f'add_condition_slope is not requested. Slopes will be the same across {self.window.condition}')
# if self.b_name is not None:
# raise SyntaxError("A model is already present in this BayesWindow object. "
# "Please create a new one by calling BayesWindow(...) again")
self.window.do_make_change = do_make_change
self.model = model
if fold_change_index_cols is None:
fold_change_index_cols = self.window.levels
fold_change_index_cols = list(fold_change_index_cols)
if self.window.detail and (self.window.detail in self.window.data.columns) and (
self.window.detail not in fold_change_index_cols):
fold_change_index_cols += [self.window.detail]
if add_condition_slope:
add_condition_slope = self.window.condition[0] and (
np.unique(self.window.data['combined_condition']).size > 1)
fold_change_index_cols.append('combined_condition')
self.b_name = 'slope_per_condition' if add_condition_slope else 'slope'
if add_condition_slope and (not self.window.condition[0] in fold_change_index_cols):
[fold_change_index_cols.extend([condition]) for condition in self.window.condition
if not (condition in fold_change_index_cols)]
# Fit
self.trace = fit_method(y=self.window.data[self.window.y].values,
treatment=self.window.data[self.window.treatment].values,
# condition=self.window.data[self.window.condition[0]].values if self.window.condition[0] else None,
condition=self.window.data['combined_condition'].values if self.window.condition[
0] else None,
group=self.window.data[self.window.group].values if self.window.group else None,
model=model,
add_condition_slope=add_condition_slope,
**kwargs)
df_data = self.window.data.copy()
if do_mean_over_trials:
df_data = df_data.groupby(fold_change_index_cols).mean().reset_index()
# Make (fold) change
if do_make_change:
try:
df_data, _ = utils.make_fold_change(df_data, y=self.window.y, index_cols=fold_change_index_cols,
treatment_name=self.window.treatment,
fold_change_method=do_make_change)
except Exception as e:
print(e)
reload(utils)
self.trace.posterior = utils.rename_posterior(self.trace.posterior, self.b_name,
posterior_index_name='combined_condition',
group_name=self.window.group, group2_name=self.window.group2)
# HDI and MAP:
self.posterior = {var: utils.get_hdi_map(self.trace.posterior[var],
prefix=f'{var} '
if (var != self.b_name) and (
var != 'slope_per_condition') else '')
for var in self.trace.posterior.data_vars}
# Fill posterior into data
self.data_and_posterior = utils.insert_posterior_into_data(posteriors=self.posterior,
data=df_data.copy(),
group=self.window.group,
group2=self.window.group2)
try:
self.posterior = utils.recode_posterior(self.posterior, self.window.levels, self.window.data,
self.window.original_data,
self.window.condition)
except Exception as e:
print(e)
self.trace.posterior = utils.recode_trace(self.trace.posterior, self.window.levels, self.window.data,
self.window.original_data,
self.window.condition)
self.default_regression_charts()
return self
def plot(self, x: str = ':O', color: str = ':N', detail: str = ':N', independent_axes=None,
add_data=None,
**kwargs):
# Set some options
if (x == '') or (x[-2] != ':'):
x = f'{x}:O'
if color[-2] != ':':
color = f'{color}:N'
if add_data is None:
add_data = self.window.add_data
if add_data or self.posterior is None: # LME
posterior = self.data_and_posterior
elif 'slope_per_condition' in self.posterior.keys():
posterior = self.posterior['slope_per_condition']
elif 'mu_intercept_per_group' in self.posterior.keys():
posterior = self.posterior['mu_intercept_per_group'] # TODO fix data_and_posterior
else:
posterior = self.data_and_posterior
if len(x) > 2 and len(posterior[x[:-2]].unique() == 1):
add_x_axis = True
# x = f'{self.window.condition[0]}:O'
else:
add_x_axis = False
if not ((x != ':O') and (x != ':N') and x[:-2] in posterior.columns and len(posterior[x[:-2]].unique()) < 10):
# long_x_axis = False
# else:
# long_x_axis = True
x = f'{x[:-1]}Q' # Change to quantitative encoding
print(f'changing x to {x}')
# If we are only plotting posterior and not data, independenet axis does not make sense:
self.window.independent_axes = independent_axes or f'{self.window.y} diff' in posterior
self.charts = []
# 1. Plot posterior
if posterior is not None:
base_chart = alt.Chart(posterior)
# Add zero for zero line
base_chart.data['zero'] = 0
self.chart_base_posterior = base_chart
# No-data plot
(self.chart_posterior_whiskers, self.chart_posterior_whiskers75,
self.chart_posterior_center, self.chart_zero) = plot_posterior(title=f'{self.window.y}',
x=x,
base_chart=base_chart,
do_make_change=self.window.do_make_change, **kwargs)
# if no self.data_and_posterior, use self.posterior to build slope per condition:
if (self.b_name != 'lme') and (type(self.posterior) == dict):
main_effect = (self.posterior[self.b_name] if self.posterior[self.b_name] is not None
else self.posterior['slope_per_condition'])
self.chart_posterior_hdi_no_data = alt.layer(
*plot_posterior(df=main_effect, title=f'{self.window.y}', x=x,
do_make_change=self.window.do_make_change))
self.chart_posterior_hdi = alt.layer(self.chart_posterior_whiskers, self.chart_posterior_whiskers75,
self.chart_posterior_center)
self.charts.append(self.chart_posterior_whiskers)
self.charts.append(self.chart_posterior_center)
self.charts.append(self.chart_zero)
self.charts_for_facet = self.charts.copy() # KDE cannot be faceted so don't add it
if (self.b_name != 'lme') and not add_x_axis:
# Y Axis limits to match self.chart
minmax = [float(posterior['lower interval'].min()), 0,
float(posterior['higher interval'].max())]
y_domain = [min(minmax), max(minmax)]
self.chart_posterior_kde = visualization.plot_posterior_density(base_chart, self.window.y, y_domain,
self.trace,
posterior,
self.b_name,
do_make_change=self.window.do_make_change)
self.charts.append(self.chart_posterior_kde)
# self.charts_for_facet.append(self.chart_posterior_kde) # kde cannot be faceted
else:
base_chart = alt.Chart(self.window.data)
# 2. Plot data
y = f'{self.window.y} diff'
if y in posterior:
if (detail != ':N') and (detail != ':O'):
assert detail in self.window.data
# Plot data:
y_domain = list(np.quantile(base_chart.data[y], [.05, .95]))
if x != ':O':
self.chart_data_line, chart_data_points = visualization.line_with_highlight(base_chart, x, y,
color, detail,
highlight=False)
self.charts.append(self.chart_data_line)
self.charts.append(chart_data_points)
self.charts_for_facet.append(chart_data_points)
self.charts_for_facet.append(self.chart_data_line)
self.chart_data_boxplot = base_chart.mark_boxplot(
clip=True, opacity=.3, size=9, color='black',
median=alt.MarkConfig(color='red', strokeWidth=20)
).encode(
x=x,
y=alt.Y(f'{y}:Q',
axis=alt.Axis(orient='right', title=''),
scale=alt.Scale(zero=False, domain=y_domain)
)
)
self.charts.append(self.chart_data_boxplot)
self.charts_for_facet.append(self.chart_data_boxplot)
else: # No data overlay
warnings.warn("Did you have Uneven number of entries in conditions? I can't add data overlay")
# Layer and facet:
self.chart = visualization.auto_layer_and_facet(
self.charts, self.charts_for_facet, self.window.independent_axes, **kwargs)
# self.chart_posterior_hdi_no_data = visualization.auto_layer_and_facet(
# self.chart_posterior_hdi_no_data, charts_for_facet=None, independent_axes=self.window.independent_axes, **kwargs)
# 4. Make overlay for data_detail_plot
# self.plot_slopes_shading()
return self.chart
def plot_slopes_shading(self): # TODO this method is WIP
# 0. Use
pd.concat([utils.get_hdi_map(self.trace.posterior[var], prefix=f'{var} ')
for var in self.trace.posterior.data_vars], axis=1)
# 1. intercepts for stim=1
self.data_and_posterior['mu_intercept_per_group center interval']
# 2. slopes+ intercepts
self.data_and_posterior['intercept'] * self.data_and_posterior['slope']
# 3. Overlay with
self.chart_data_detail
# 4. color by dimension of slope (condition (and group if self.window.group))
def plot_intercepts(self, x=':O', y='mu_intercept_per_group center interval', **kwargs):
"""
Plot intercepts of a regression model, mostly for a better understanding of slopes
Parameters
----------
x
y
kwargs
Returns
-------
"""
assert self.posterior is not None
if self.window.do_make_change:
# combine posterior with original data instead, not diff TODO
# Fill posterior into data
data_and_posterior = utils.insert_posterior_into_data(posteriors=self.posterior,
data=self.window.original_data.copy(),
group=self.window.group,
group2=self.window.group2)
else:
data_and_posterior = self.data_and_posterior
# Redo boxplot (no need to show):
self.window.data_box_detail(data=data_and_posterior, autofacet=False)
# Make stand-alone posterior intercept chart:
self.chart_posterior_intercept = visualization.posterior_intercept_chart(data_and_posterior,
x=x, y=y,
group=self.window.group)
# Redo chart_intercept with x=treatment for overlay with self.chart_data_box_detail:
chart_intercept = visualization.posterior_intercept_chart(data_and_posterior,
x=':O', y=y,
group=self.window.group)
chart = alt.layer(chart_intercept, self.window.chart_data_box_detail).resolve_scale(y='independent')
# Check
if len(chart.data) == 0:
raise IndexError('was layer chart from different sources?')
if ('column' in kwargs) or ('row' in kwargs):
return visualization.facet(chart, **kwargs)
else: # Auto facet
return visualization.facet(chart, **visualization.auto_facet(self.window.group, self.window.condition))
def default_regression_charts(self, **kwargs):
reload(visualization)
# Default plots:
# try:
# facet_kwargs=visualization.auto_facet(self.window.group,self,condition)
if self.window.condition[0] and len(self.window.condition) > 2:
try:
return self.plot(x=self.window.condition[0], column=self.window.condition[1],
row=self.window.condition[2],
**kwargs)
except KeyError:
return self.plot(x=self.window.condition[0], row=self.window.condition[1], **kwargs)
elif self.window.condition[0] and len(self.window.condition) > 1:
try:
return self.plot(x=self.window.condition[0], column=self.window.group, row=self.window.condition[1],
**kwargs)
except KeyError:
return self.plot(x=self.window.condition[0], row=self.window.condition[1], **kwargs)
elif self.window.condition[0] and self.b_name != 'lme':
try:
return self.plot(x=self.window.condition[0], column=self.window.group, **kwargs)
except KeyError:
return self.plot(x=self.window.condition[0], **kwargs)
else: # self.window.group:
return self.plot(x=self.window.condition[0] if self.window.condition[0] else ':O', **kwargs)
# self.regression_charts(column=self.window.group)
# except Exception as e: # In case I haven't thought of something
# print(f'Please use window.regression_charts(): {e}')
# # import traceback
# # traceback.(e)
def facet(self, **kwargs):
return BayesWindow.facet(self, **kwargs)
def explore_model_kinds(self, parallel=True, add_group_slope=True, **kwargs):
from bayes_window.model_comparison import compare_models
if self.b_name is None:
raise ValueError('Fit a model first')
elif 'slope' in self.b_name:
models = {
'full': self.model,
'no_condition': self.model,
'no_condition_or_treatment': self.model,
'no-treatment': self.model,
'no_group': self.model,
}
extra_model_args = [
{'treatment': self.window.treatment, 'condition': self.window.condition, 'group': self.window.group},
{'treatment': self.window.treatment, 'condition': None},
{'treatment': None, 'condition': None},
{'treatment': None, 'condition': self.window.condition},
{'treatment': self.window.treatment, 'condition': self.window.condition, 'group': None},
]
if add_group_slope and self.window.group is not None:
models['with_group_slope'] = self.model
# add_group_slope is False by default in model_hierarchical
extra_model_args.extend([{'treatment': self.window.treatment, 'condition': self.window.condition,
'group': self.window.group,
'add_group_slope': True}])
return compare_models(
df=self.window.data,
models=models,
extra_model_args=extra_model_args,
y=self.window.y,
parallel=parallel,
dist_y=self.model_args['dist_y'] if 'dist_y' in self.model_args.keys() else None,
**kwargs
)
def explore_models(self, parallel=True, add_group_slope=False, **kwargs):
from bayes_window.model_comparison import compare_models
if self.b_name is None:
raise ValueError('Fit a model first')
elif 'slope' in self.b_name:
models = {
'full_normal': self.model,
'no_condition': self.model,
'no_condition_or_treatment': self.model,
'no-treatment': self.model
}
extra_model_args = [
{'treatment': self.window.treatment, 'condition': self.window.condition, 'group': self.window.group},
{'treatment': self.window.treatment, 'condition': None},
{'treatment': None, 'condition': None},
{'treatment': None, 'condition': self.window.condition}]
if self.window.group:
models.update({
'no_group': self.model,
'full_student': self.model,
'full_lognormal': self.model,
'full_gamma': self.model,
'full_exponential': self.model,})
extra_model_args+=[
{'treatment': self.window.treatment, 'condition': self.window.condition, 'group': None},
{'treatment': self.window.treatment, 'condition': self.window.condition, 'group': self.window.group,
'dist_y': 'student'},
{'treatment': self.window.treatment, 'condition': self.window.condition, 'group': self.window.group,
'dist_y': 'lognormal'},
{'treatment': self.window.treatment, 'condition': self.window.condition, 'group': self.window.group,
'dist_y': 'gamma'},
{'treatment': self.window.treatment, 'condition': self.window.condition, 'group': self.window.group,
'dist_y': 'exponential'},
]
| |
#!/usr/bin/env python
#SETMODE 777
#----------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------ HEADER --#
"""
:author:
<NAME> & <NAME>
:synopsis:
runs step three of the auto rigger, creating the rig
:description:
takes in a list of joints, and controls and then creates a rig from those. Sets up the
ik/fk arms and legs, the hands, reverse feet, head, and ribbon spine
:applications:
Maya
:see_also:
step_one
step_two
gen_utils
maya_enums
auto_rig_gui
"""
#----------------------------------------------------------------------------------------#
#----------------------------------------------------------------------------- IMPORTS --#
# Default Python Imports
import maya.cmds as cmds
import maya.mel as mel
import math
# Imports That You Wrote
import auto_rigger.gen_utils as gu
from maya_enums import MayaCommandEnums, NamingConventionEnums
import auto_rigger.step_one as step_one
#----------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------- FUNCTIONS --#
def run_step(control_list, num_vertebrae):
"""
adds box control curves to all joints
:param control_list: list of the controls in the scene
:type: list
:param num_vertebrae: number of vertebrae to help with spine creation
:type: int
"""
# loop through controls and 0 transforms then lock scale
for control in control_list:
#0 transforms
if control.find('FK') == -1:
gu.create_buffer(control)
#delete history on the curve
cmds.select(control)
cmds.DeleteHistory()
#lock channels
for cc_type in NamingConventionEnums.LOCK_CHANNLES:
if control.find(cc_type) != -1:
for channel in NamingConventionEnums.LOCK_CHANNLES[cc_type]:
gu.lock_channels(control, channel)
#add the extra attrs to the controls that have them
for cc_type in NamingConventionEnums.EXTRA_ATTRS:
if control.find(cc_type) != -1:
for attr in NamingConventionEnums.EXTRA_ATTRS[cc_type]:
cmds.addAttr(control, longName = attr, attributeType='float')
cmds.setAttr(control + '.' + attr, keyable = True)
#lock the visibility and scale of all controls
gu.lock_channels(control, 'visibility')
for channel in MayaCommandEnums.SCALE:
gu.lock_channels(control, channel)
# replicate joint structure with real joints
cmds.select(d=True)
gu.create_real_skeleton()
joint_list = cmds.listRelatives('pelvis' + NamingConventionEnums.JOINT_SUFFIX,
children=True, shapes=False, allDescendents=True)
joint_list.append('pelvis' + NamingConventionEnums.JOINT_SUFFIX)
joint_list.reverse()
cmds.parent('pelvis'+NamingConventionEnums.JOINT_SUFFIX,
'joints'+NamingConventionEnums.GROUP_SUFFIX)
# delete old structure
cmds.delete('connectors' + NamingConventionEnums.GROUP_SUFFIX)
cmds.delete('pelvis'+NamingConventionEnums.FAKE_JOINT_SUFFIX)
# loop through the joints and calls the necessary functions to make the rig
for jnt_name in joint_list:
#check if the joint needs an ik fk switch
for ik_obj in NamingConventionEnums.IK_JOINTS:
if jnt_name.find(ik_obj) != -1:
#check side of joint first
side = jnt_name.split('_')[0]
#checks what obj has the ik fk switch
switch_jnt = (side + '_' + NamingConventionEnums.IK_JOINTS[ik_obj] +
NamingConventionEnums.JOINT_SUFFIX)
#call the make_ik_fk
make_ik_fk(jnt_name, switch_jnt)
#check if its the head joint and calls the set up for it
if jnt_name.find('head') != -1 and jnt_name.find('Tip') == -1:
setup_head(jnt_name)
if jnt_name.find('clavicle') != -1:
#create the fk control for the clavicle
gu.create_fk_control(jnt_name)
#rename the clavicle bind suffix
bind_name = jnt_name.replace(NamingConventionEnums.JOINT_SUFFIX,
NamingConventionEnums.BIND_JOINT_SUFFIX)
cmds.rename(jnt_name, bind_name)
# Constrain left hand_cc_GRP to wrist_cc
if jnt_name.find('palm') != -1 and \
jnt_name.find(NamingConventionEnums.LEFT) != -1:
palm_cc_grp = NamingConventionEnums.LEFT + 'palm' \
+ NamingConventionEnums.CONTROL_CURVE_SUFFIX+ NamingConventionEnums.GROUP_SUFFIX
cmds.parent(palm_cc_grp, jnt_name)
cmds.select(clear=True)
cmds.group(name=NamingConventionEnums.LEFT+'digits'
+NamingConventionEnums.GROUP_SUFFIX, empty = True)
cmds.parent(NamingConventionEnums.LEFT + 'digits'
+NamingConventionEnums.GROUP_SUFFIX, jnt_name)
# rename the palm to bind suffix
bind_name = jnt_name.replace(NamingConventionEnums.JOINT_SUFFIX,
NamingConventionEnums.BIND_JOINT_SUFFIX)
cmds.rename(jnt_name, bind_name)
# Constrain left hand_cc_GRP to wrist_cc
if jnt_name.find('palm') != -1 and \
jnt_name.find(NamingConventionEnums.RIGHT) != -1:
palm_cc_grp = NamingConventionEnums.RIGHT + 'palm' \
+ NamingConventionEnums.CONTROL_CURVE_SUFFIX+ NamingConventionEnums.GROUP_SUFFIX
cmds.parent(palm_cc_grp, jnt_name)
cmds.select(clear=True)
cmds.group(name=NamingConventionEnums.RIGHT + 'digits'
+NamingConventionEnums.GROUP_SUFFIX, empty = True)
cmds.parent(NamingConventionEnums.RIGHT + 'digits'
+NamingConventionEnums.GROUP_SUFFIX, jnt_name)
#rename the palm to bind suffix
bind_name = jnt_name.replace(NamingConventionEnums.JOINT_SUFFIX,
NamingConventionEnums.BIND_JOINT_SUFFIX)
cmds.rename(jnt_name, bind_name)
# left or right side enum
side = ''
# Connects all of the attributes in the hand_CC to the finger rotations
for digit in NamingConventionEnums.DIGITS:
# if the digit is not a toe or tip
if jnt_name.find(digit) != -1 and jnt_name.find('Tip') == -1 \
and jnt_name.find('Toe') == -1:
# Calls setup_digits for each side to setup the hierarchy and lock values
if jnt_name.find(NamingConventionEnums.LEFT) != -1:
setup_digits(jnt_name, NamingConventionEnums.LEFT)
side = NamingConventionEnums.LEFT
if jnt_name.find(NamingConventionEnums.RIGHT)!= -1:
setup_digits(jnt_name, NamingConventionEnums.RIGHT)
side = NamingConventionEnums.RIGHT
# Connects the attributes for the index finger to the hand_CC
if jnt_name.find('index_1') != -1:
node = cmds.shadingNode('multiplyDivide',
name=jnt_name.replace('_1'+NamingConventionEnums.JOINT_SUFFIX,
'') + "_MD", asUtility=True)
cmds.connectAttr(side + 'palm_CC.indexCurl', node + '.input1Z',
force=True)
cmds.connectAttr(node + '.outputZ', jnt_name + '.rotateZ', force=True)
cmds.connectAttr(node + '.outputZ', jnt_name.replace('1', '2')
+ '.rotateZ', force=True)
cmds.connectAttr(node + '.outputZ', jnt_name.replace('1', '3')
+ '.rotateZ', force=True)
cmds.setAttr(node + ".input2Z", 2)
cmds.connectAttr(side + 'palm_CC.fingerSpread', node + '.input1Y',
force=True)
cmds.connectAttr(node + '.outputY', jnt_name + '.rotateY', force=True)
cmds.setAttr(node + ".input2Y", -1)
# Connects the attributes for the middle finger to the hand_CC
elif jnt_name.find('middle_1') != -1:
node = cmds.shadingNode('multiplyDivide', name=jnt_name.replace(
'_1' + NamingConventionEnums.JOINT_SUFFIX,
'') + "_MD", asUtility=True)
cmds.connectAttr(side + 'palm_CC.middleCurl', node + '.input1Z',
force=True)
cmds.connectAttr(node + '.outputZ', jnt_name + '.rotateZ', force=True)
cmds.connectAttr(node + '.outputZ', jnt_name.replace('1', '2')
+ '.rotateZ', force=True)
cmds.connectAttr(node + '.outputZ', jnt_name.replace('1', '3')
+ '.rotateZ', force=True)
cmds.setAttr(node+".input2Z", 2)
cmds.connectAttr(side + 'palm_CC.fingerSpread', node + '.input1Y',
force=True)
cmds.connectAttr(node + '.outputY', jnt_name + '.rotateY', force=True)
cmds.setAttr(node + ".input2Y", -.5)
# Connects the attributes for the ring finger to the hand_CC
elif jnt_name.find('ring_1') != -1:
node = cmds.shadingNode('multiplyDivide', name=jnt_name.replace(
'_1' + NamingConventionEnums.JOINT_SUFFIX,
'') + "_MD", asUtility=True)
cmds.connectAttr(side + 'palm_CC.ringCurl', node + '.input1Z',
force=True)
cmds.connectAttr(node + '.outputZ', jnt_name + '.rotateZ', force=True)
cmds.connectAttr(node + '.outputZ', jnt_name.replace('1', '2')
+ '.rotateZ', force=True)
cmds.connectAttr(node + '.outputZ', jnt_name.replace('1', '3')
+ '.rotateZ', force=True)
cmds.setAttr(node + ".input2Z", 2)
cmds.connectAttr(side + 'palm_CC.fingerSpread', node + '.input1Y',
force=True)
cmds.connectAttr(node + '.outputY', jnt_name + '.rotateY', force=True)
cmds.setAttr(node + ".input2Y", .5)
# Connects the attributes for the pinky finger to the hand_CC
elif jnt_name.find('pinky_1') != -1:
node = cmds.shadingNode('multiplyDivide', name=jnt_name.replace(
'_1' + NamingConventionEnums.JOINT_SUFFIX,
'') + "_MD", asUtility=True)
cmds.connectAttr(side + 'palm_CC.pinkyCurl', node + '.input1Z',
force=True)
cmds.connectAttr(node + '.outputZ', jnt_name + '.rotateZ', force=True)
cmds.connectAttr(node + '.outputZ', jnt_name.replace('1', '2')
+ '.rotateZ', force=True)
cmds.connectAttr(node + '.outputZ', jnt_name.replace('1', '3')
+ '.rotateZ', force=True)
cmds.setAttr(node + ".input2Z", 2)
cmds.connectAttr(side + 'palm_CC.fingerSpread', node + '.input1Y',
force=True)
cmds.connectAttr(node + '.outputY', jnt_name + '.rotateY', force=True)
cmds.setAttr(node + ".input2Y", 1)
# Connects the attributes for the thumb to the hand_CC
elif jnt_name.find('thumb_1') != -1:
node = cmds.shadingNode('multiplyDivide', name=jnt_name.replace(
'_1' + NamingConventionEnums.JOINT_SUFFIX,
'') + "_MD", asUtility=True)
cmds.connectAttr(side + 'palm_CC.thumbCurl', node + '.input1Z',
force=True)
cmds.connectAttr(node + '.outputZ', jnt_name + '.rotateZ', force=True)
cmds.connectAttr(node + '.outputZ', jnt_name.replace('1', '2')
+ '.rotateZ', force=True)
cmds.connectAttr(node + '.outputZ', jnt_name.replace('1', '3')
+ '.rotateZ', force=True)
cmds.setAttr(node + ".input2Z", 2)
cmds.connectAttr(side + 'palm_CC.fingerSpread', node + '.input1Y',
force=True)
cmds.connectAttr(node + '.outputY', jnt_name + '.rotateY', force=True)
cmds.setAttr(node + ".input2Y", -1)
# if hte digit is a toe, but not a tip
elif jnt_name.find(digit) != -1 and jnt_name.find('Tip') == -1 \
and jnt_name.find('Toe') != -1:
# calls setup toes for each side
if jnt_name.find(NamingConventionEnums.LEFT) != -1:
setup_toes(jnt_name, NamingConventionEnums.LEFT)
side = NamingConventionEnums.LEFT
if jnt_name.find(NamingConventionEnums.RIGHT) != -1:
setup_toes(jnt_name, NamingConventionEnums.RIGHT)
side = NamingConventionEnums.RIGHT
#checks if its the foot joint and calls the reverse foot
if jnt_name.find('ball') != -1:
setup_reverse_foot(jnt_name)
#call the spine set up
if jnt_name.find('pelvis') != -1:
#fix the pelvis orientation
control_name = 'pelvis' + NamingConventionEnums.CONTROL_CURVE_SUFFIX
gu.unlock_all_channels(control_name)
cmds.select(control_name)
cmds.pickWalk(direction='up')
control_buffer = cmds.ls(selection=True)
cmds.makeIdentity(control_buffer, apply=True, translate=True, rotate=True,
scale=True, normal=False, preserveNormals=1)
for channel in MayaCommandEnums.SCALE:
gu.lock_channels(control_name, channel)
#call the spine setup
setup_spine(jnt_name, num_vertebrae)
#connect the arms and legs to the spine
#make a list with the arms and legs the parent them to the right spot
clavicles = cmds.ls('*clavicle*', type = 'transform')
#unlock the clavicles so they can be parented
for clav in clavicles:
gu.unlock_all_channels(clav)
cmds.parent(clav, NamingConventionEnums.SPINE_CTRLS[-1] +
NamingConventionEnums.CONTROL_CURVE_SUFFIX)
#relock the clavicles
for channel in MayaCommandEnums.SCALE:
gu.lock_channels(clav, channel)
for channel in MayaCommandEnums.TRANSLATION:
gu.lock_channels(clav, channel)
#parent the legs
legs = cmds.ls('*leg*', type='transform')
cmds.parent(legs, NamingConventionEnums.SPINE_CTRLS[0] +
NamingConventionEnums.CONTROL_CURVE_SUFFIX)
#rename all of the fingers to bind suffix
for type in NamingConventionEnums.DIGITS:
digits = cmds.ls('*' + type + '*', type = 'joint')
for digit in digits:
digit_bind_name = digit.replace(NamingConventionEnums.JOINT_SUFFIX,
NamingConventionEnums.BIND_JOINT_SUFFIX)
#skips the tips
if cmds.objExists(digit) and digit.find('Tip') == -1:
cmds.rename(digit, digit_bind_name)
#hide the extra groups in the master node
cmds.setAttr(NamingConventionEnums.RIG_HIERARCHY[9] + '.visibility', 0)
cmds.setAttr(NamingConventionEnums.RIG_HIERARCHY[10] + '.visibility', 0)
cmds.setAttr(NamingConventionEnums.RIG_HIERARCHY[11] + '.visibility', 0)
cmds.setAttr(NamingConventionEnums.RIG_HIERARCHY[12] + '.visibility', 0)
def make_ik_fk(blend_root, switch_jnt):
"""
this code takes the blend root joint and makes an ik fk switch out of it
:param blend_root: name of the root joint
:type: str
:param switch_jnt: name of the obj to have the ik and fk switch on it
:type: str
"""
# unparents the jnt at the end of the chain so it isnt duped
cmds.select(switch_jnt)
cmds.pickWalk(direction='up')
to_parent = cmds.ls(selection=True)
cmds.parent(switch_jnt, world=True)
# get the obj with the ik/fk switch
switch_obj_temp = switch_jnt.replace(NamingConventionEnums.JOINT_SUFFIX,
NamingConventionEnums.CONTROL_CURVE_SUFFIX)
switch_obj = switch_obj_temp.replace('ball', 'foot')
# duplicate IKs
ik_children = cmds.duplicate(blend_root, renameChildren=True)
# duplicate FKs
fk_children = cmds.duplicate(blend_root, renameChildren=True)
# makes a list of the 3 blend joints
blend_objs = [blend_root]
blend_children = cmds.listRelatives(blend_root, allDescendents=True)
blend_children.reverse()
blend_objs.extend(blend_children)
# get the pole | |
from __future__ import division
from __future__ import print_function
__copyright__='''
Copyright (c) 2010 Red Hat, Inc.
'''
# All rights reserved.
#
# Author: <NAME> <<EMAIL>>
#
# This software licensed under BSD license, the text of which follows:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the MontaVista Software, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import random
import socket
import sys
if sys.version_info < (3,):
from UserDict import UserDict
else:
from collections import UserDict
from cts.CTStests import *
from corosync import CpgTestAgent
###################################################################
class CoroTest(CTSTest):
'''
basic class to make sure that new configuration is applied
and old configuration is removed.
'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.start = StartTest(cm)
self.stop = StopTest(cm)
self.config = {}
self.config['logging/logger_subsys[1]/subsys'] = 'MAIN'
self.config['logging/logger_subsys[1]/debug'] = 'on'
self.need_all_up = True
self.CM.start_cpg = True
self.cpg_name = 'cts_group'
def setup(self, node):
ret = CTSTest.setup(self, node)
# setup the authkey
localauthkey = '/tmp/authkey'
if not os.path.exists(localauthkey):
self.CM.rsh(node, 'corosync-keygen -l')
self.CM.rsh.cp("%s:%s" % (node, "/etc/corosync/authkey"), localauthkey)
for n in self.CM.Env["nodes"]:
if n is not node:
#copy key onto other nodes
self.CM.rsh.cp(localauthkey, "%s:%s" % (n, "/etc/corosync/authkey"))
# copy over any new config
for c in self.config:
self.CM.new_config[c] = self.config[c]
# apply the config
self.CM.apply_new_config(self.need_all_up)
# start/stop all corosyncs'
for n in self.CM.Env["nodes"]:
if self.need_all_up and not self.CM.StataCM(n):
self.incr("started")
self.start(n)
if self.need_all_up and self.CM.start_cpg:
self.CM.cpg_agent[n].clean_start()
self.CM.cpg_agent[n].cpg_join(self.cpg_name)
self.CM.cpg_agent[n].cfg_initialize()
if not self.need_all_up and self.CM.StataCM(n):
self.incr("stopped")
self.stop(n)
return ret
def config_valid(self, config):
return True
def teardown(self, node):
self.CM.apply_default_config()
return CTSTest.teardown(self, node)
###################################################################
class CpgContextTest(CoroTest):
def __init__(self, cm):
self.name="CpgContextTest"
CoroTest.__init__(self, cm)
self.CM.start_cpg = True
def __call__(self, node):
self.incr("calls")
res = self.CM.cpg_agent[node].context_test()
if 'OK' in res:
return self.success()
else:
return self.failure('context_test failed')
###################################################################
class CpgConfigChangeBase(CoroTest):
'''
join a cpg group on each node, and test that the following
causes a leave event:
- a call to cpg_leave()
- app exit
- node leave
- node leave (with large token timeout)
'''
def setup(self, node):
ret = CoroTest.setup(self, node)
self.listener = None
self.wobbly = None
for n in self.CM.Env["nodes"]:
if self.wobbly is None:
self.wobbly = n
elif self.listener is None:
self.listener = n
if self.wobbly in self.CM.cpg_agent:
self.wobbly_id = self.CM.cpg_agent[self.wobbly].cpg_local_get()
if self.listener in self.CM.cpg_agent:
self.CM.cpg_agent[self.listener].record_config_events(truncate=True)
return ret
def wait_for_config_change(self):
found = False
max_timeout = 60 * 15
waited = 0
printit = 0
self.CM.log("Waiting for config change on " + self.listener)
while not found:
try:
event = self.CM.cpg_agent[self.listener].read_config_event()
except:
return self.failure('connection to test cpg_agent failed.')
if not event == None:
self.CM.debug("RECEIVED: " + str(event))
if event == None:
if waited >= max_timeout:
return self.failure("timedout(" + str(waited) + " sec) == no event!")
else:
time.sleep(1)
waited = waited + 1
printit = printit + 1
if printit is 60:
print('waited ' + str(waited) + ' seconds')
printit = 0
elif str(event.node_id) in str(self.wobbly_id) and not event.is_member:
self.CM.log("Got the config change in " + str(waited) + " seconds")
found = True
else:
self.CM.debug("No match")
self.CM.debug("wobbly nodeid:" + str(self.wobbly_id))
self.CM.debug("event nodeid:" + str(event.node_id))
self.CM.debug("event.is_member:" + str(event.is_member))
if found:
return self.success()
###################################################################
class CpgCfgChgOnGroupLeave(CpgConfigChangeBase):
def __init__(self, cm):
CpgConfigChangeBase.__init__(self,cm)
self.name="CpgCfgChgOnGroupLeave"
def failure_action(self):
self.CM.log("calling cpg_leave() on " + self.wobbly)
self.CM.cpg_agent[self.wobbly].cpg_leave(self.cpg_name)
def __call__(self, node):
self.incr("calls")
self.failure_action()
return self.wait_for_config_change()
###################################################################
class CpgCfgChgOnNodeLeave(CpgConfigChangeBase):
def __init__(self, cm):
CpgConfigChangeBase.__init__(self,cm)
self.name="CpgCfgChgOnNodeLeave"
def failure_action(self):
self.CM.log("stopping corosync on " + self.wobbly)
self.stop(self.wobbly)
def __call__(self, node):
self.incr("calls")
self.failure_action()
return self.wait_for_config_change()
###################################################################
class CpgCfgChgOnLowestNodeJoin(CTSTest):
'''
1) stop all nodes
2) start all but the node with the smallest ip address
3) start recording events
4) start the last node
'''
def __init__(self, cm):
CTSTest.__init__(self, cm)
self.name="CpgCfgChgOnLowestNodeJoin"
self.start = StartTest(cm)
self.stop = StopTest(cm)
self.config = {}
self.need_all_up = False
def config_valid(self, config):
return True
def lowest_ip_set(self):
self.lowest = None
for n in self.CM.Env["nodes"]:
if self.lowest is None:
self.lowest = n
self.CM.log("lowest node is " + self.lowest)
def setup(self, node):
# stop all nodes
for n in self.CM.Env["nodes"]:
self.CM.StopaCM(n)
self.lowest_ip_set()
# copy over any new config
for c in self.config:
self.CM.new_config[c] = self.config[c]
# install the config
self.CM.install_all_config()
# start all but lowest
self.listener = None
for n in self.CM.Env["nodes"]:
if n is not self.lowest:
if self.listener is None:
self.listener = n
self.incr("started")
self.CM.log("starting " + n)
self.start(n)
self.CM.cpg_agent[n].clean_start()
self.CM.cpg_agent[n].cpg_join(self.cpg_name)
# start recording events
pats = []
pats.append("%s .*sync: node joined.*" % self.listener)
pats.append("%s .*sync: activate correctly.*" % self.listener)
self.sync_log = self.create_watch(pats, 60)
self.sync_log.setwatch()
self.CM.log("setup done")
return CTSTest.setup(self, node)
def __call__(self, node):
self.incr("calls")
self.start(self.lowest)
self.CM.cpg_agent[self.lowest].clean_start()
self.CM.cpg_agent[self.lowest].cpg_join(self.cpg_name)
self.wobbly_id = self.CM.cpg_agent[self.lowest].cpg_local_get()
self.CM.log("waiting for sync events")
if not self.sync_log.lookforall():
return self.failure("Patterns not found: " + repr(self.sync_log.unmatched))
else:
return self.success()
###################################################################
class CpgCfgChgOnExecCrash(CpgConfigChangeBase):
def __init__(self, cm):
CpgConfigChangeBase.__init__(self,cm)
self.name="CpgCfgChgOnExecCrash"
def failure_action(self):
self.CM.log("sending KILL to corosync on " + self.wobbly)
self.CM.rsh(self.wobbly, "killall -9 corosync")
self.CM.rsh(self.wobbly, "rm -f /var/run/corosync.pid")
self.CM.rsh(self.wobbly, "rm -f /dev/shm/qb-corosync-blackbox*")
self.CM.ShouldBeStatus[self.wobbly] = "down"
def __call__(self, node):
self.incr("calls")
self.failure_action()
return self.wait_for_config_change()
###################################################################
class CpgCfgChgOnNodeIsolate(CpgConfigChangeBase):
def __init__(self, cm):
CpgConfigChangeBase.__init__(self,cm)
self.name="CpgCfgChgOnNodeIsolate"
def config_valid(self, config):
if 'totem/rrp_mode' in config:
return False
else:
return True
def failure_action(self):
self.CM.log("isolating node " + self.wobbly)
self.CM.isolate_node(self.wobbly)
def __call__(self, node):
self.incr("calls")
self.failure_action()
return self.wait_for_config_change()
def teardown(self, node):
self.CM.unisolate_node (self.wobbly)
return CpgConfigChangeBase.teardown(self, node)
###################################################################
class CpgCfgChgOnNodeRestart(CpgConfigChangeBase):
def __init__(self, cm):
CpgConfigChangeBase.__init__(self,cm)
self.name="CpgCfgChgOnNodeRestart"
self.CM.start_cpg = False
def config_valid(self, config):
if 'totem/secauth' in config:
if config['totem/secauth'] is 'on':
return False
else:
return True
if 'totem/rrp_mode' in config:
return False
else:
return True
def failure_action(self):
self.CM.log("2: isolating node " + self.wobbly)
self.CM.isolate_node(self.wobbly)
self.CM.log("3: Killing corosync on " + self.wobbly)
self.CM.rsh(self.wobbly, "killall -9 corosync")
self.CM.rsh(self.wobbly, "rm -f /var/run/corosync.pid")
self.CM.ShouldBeStatus[self.wobbly] = "down"
self.CM.log("4: unisolating node " + self.wobbly)
self.CM.unisolate_node (self.wobbly)
self.CM.log("5: starting corosync on " + self.wobbly)
self.CM.StartaCM(self.wobbly)
time.sleep(5)
self.CM.log("6: starting cpg on all nodes")
self.CM.start_cpg = True
for node in self.CM.Env["nodes"]:
self.CM.cpg_agent[node] = CpgTestAgent(node, self.CM.Env)
self.CM.cpg_agent[node].start()
self.CM.cpg_agent[node].cpg_join(self.cpg_name)
self.wobbly_id = self.CM.cpg_agent[self.wobbly].cpg_local_get()
self.CM.cpg_agent[self.listener].record_config_events(truncate=True)
self.CM.log("7: isolating node " + self.wobbly)
self.CM.isolate_node(self.wobbly)
self.CM.log("8: Killing corosync on " + self.wobbly)
self.CM.rsh(self.wobbly, "killall -9 corosync")
self.CM.rsh(self.wobbly, "rm -f /var/run/corosync.pid")
self.CM.ShouldBeStatus[self.wobbly] = "down"
self.CM.log("9: unisolating node " + self.wobbly)
self.CM.unisolate_node (self.wobbly)
self.CM.log("10: starting corosync on " + self.wobbly)
self.CM.StartaCM(self.wobbly)
def __call__(self, node):
self.incr("calls")
self.failure_action()
return self.wait_for_config_change()
def teardown(self, node):
self.CM.unisolate_node (self.wobbly)
return CpgConfigChangeBase.teardown(self, node)
###################################################################
class CpgMsgOrderBase(CoroTest):
def __init__(self, cm):
CoroTest.__init__(self,cm)
self.num_msgs_per_node = 0
self.total_num_msgs = 0
def setup(self, node):
ret = CoroTest.setup(self, node)
for n in self.CM.Env["nodes"]:
self.CM.cpg_agent[n].clean_start()
self.CM.cpg_agent[n].cpg_join(self.cpg_name)
self.CM.cpg_agent[n].record_messages()
time.sleep(1)
return ret
def cpg_msg_blaster(self):
for n in self.CM.Env["nodes"]:
self.CM.cpg_agent[n].msg_blaster(self.num_msgs_per_node)
def wait_and_validate_order(self):
msgs = {}
self.total_num_msgs = 0
for n in self.CM.Env["nodes"]:
self.total_num_msgs = self.total_num_msgs + self.num_msgs_per_node
for n in self.CM.Env["nodes"]:
msgs[n] = []
stopped = False
waited = 0
while len(msgs[n]) < self.total_num_msgs and waited < 360:
try:
msg = self.CM.cpg_agent[n].read_messages(50)
except:
return self.failure('connection to test cpg_agent failed.')
if not msg == None:
msgl = msg.split(";")
# remove empty entries
not_done=True
while not_done:
try:
msgl.remove('')
except:
not_done = False
msgs[n].extend(msgl)
elif msg == None:
time.sleep(2)
waited = waited + 2
if len(msgs[n]) < self.total_num_msgs:
return self.failure("expected %d messages from %s got %d" % (self.total_num_msgs, n, len(msgs[n])))
fail = False
error_message = ''
for i in range(0, self.total_num_msgs):
first = None
for n in self.CM.Env["nodes"]:
# first test | |
'xs', 10020)
>>> h2.merge(h2_2)
MAT MF MT
128 1 451 1.002000+3 1.995712+0 0 0 ...
2 151 1.002000+3 1.995712+0 0 0 ...
3 1 1.002000+3 1.995712+0 0 0 ...
2 1.002000+3 1.995712+0 0 0 ...
3 1.002000+3 1.995712+0 0 0 ...
16 1.002000+3 1.995712+0 0 0 ...
102 1.002000+3 1.995712+0 0 0 ...
4 2 1.002000+3 1.995712+0 0 1 ...
6 16 1.002000+3 1.995712+0 0 1 ...
8 102 1.002000+3 1.996800+0 0 0 ...
9 102 1.002000+3 1.996800+0 0 0 ...
12 102 1.002000+3 1.995712+0 1 0 ...
14 102 1.002000+3 1.995712+0 1 0 ...
33 1 1.002000+3 1.996800+0 0 0 ...
2 1.002000+3 1.996800+0 0 0 ...
16 1.002000+3 1.996800+0 0 0 ...
102 1.002000+3 1.996800+0 0 0 ...
dtype: object
"""
tape = reduce(lambda x, y: x.add_sections(y.data), iterable)
merged = self.add_sections(tape.data)
return merged
def filter_by(self,
listmat=range(1, 10000),
listmf=range(1, 10000),
listmt=range(1, 10000)):
"""Filter dataframe based on MAT, MF, MT lists.
Parameters
----------
listmat : `list` or `None`
list of requested MAT values (default is `None`: use all MAT)
listmf : `list` or `None`
list of requested MF values (default is `None`: use all MF)
listmt : `list` or `None`
list of requested MT values (default is `None`: use all MT)
Returns
-------
`sandy.formats.endf6.BaseFile` or derived instance
Copy of the original instance with filtered MAT, MF and MT sections
"""
series = self.to_series()
cond_mat = series.index.get_level_values("MAT").isin(listmat)
cond_mf = series.index.get_level_values("MF").isin(listmf)
cond_mt = series.index.get_level_values("MT").isin(listmt)
d = series.loc[cond_mat & cond_mf & cond_mt].to_dict()
return self.__class__(d, file=self.file)
def get_value(self, mat, mf, mt, line_number, pos):
return self._get_section_df(mat, mf, mt)[pos.upper()] \
.iloc[line_number - 1]
def change_value(self, val, mat, mf, mt, line_number, pos, inplace=False,
dtype=float):
items = ("C1", "C2", "L1", "L2", "N1", "N2")
positions = {y: x for x, y in enumerate(items)}
step = positions[pos]
length = 81
ibeg = length * (line_number - 1) + 11 * step
iend = length * (line_number - 1) + 11 * (step + 1)
text = self.data[(mat, mf, mt)]
new = sandy.write_int(val) if dtype is int else sandy.write_float(val)
new_text = "".join((text[:ibeg], new, text[iend:]))
new_tape = self.add_section(mat, mf, mt, new_text, inplace=False)
if inplace:
self.data = new_tape.data
else:
return new_tape
print(new_text)
@classmethod
def _from_old_format(cls, old_endf6):
"""
Convert old endf6 tape into new one!
"""
return cls(old_endf6.TEXT.to_dict())
def write_string(self, title=""):
"""
Write `_FormattedFile.data` content to string according to the ENDF-6
file rules.
Parameters
----------
title : `str`, optional, default is an empty string
first line of the file
Returns
-------
`str`
string containing the ENDF-6 information stored in this instance.
Notes
-----
..note:: no modification is implemented to the actual content of
the `Endf6.data` object.
Examples
--------
>>> file = os.path.join(sandy.data.__path__[0], "h1.endf")
>>> string = sandy.Endf6.from_file(file).write_string()
>>> print(string[:81 * 4 - 1])
1 0 0 0
1.001000+3 9.991673-1 0 0 2 5 125 1451 1
0.000000+0 0.000000+0 0 0 0 6 125 1451 2
1.000000+0 2.000000+7 3 0 10 3 125 1451 3
if no modification is applied to the `_FormattedFile` content, the
`write_string` returns an output identical to the file ASCII content.
>>> assert string == open(file).read()
Test with `sandy.Errorr` object and title option:
>>> endf6 = sandy.get_endf6_file("jeff_33", "xs", 10010)
>>> err = endf6.get_errorr(ek=[1e-2, 1e1, 2e7], err=1)
>>> err.to_file("out.err", title="H with ERRORR")
>>> err_2 = sandy.Errorr.from_file("out.err")
>>> os.remove("out.err")
>>> assert err_2.data[(125, 1, 451)] == err.data[(125, 1, 451)]
>>> assert err_2.data[(125, 3, 102)] == err.data[(125, 3, 102)]
>>> assert err_2.data[(125, 33, 102)] == err.data[(125, 33, 102)]
..note:: differences might appear from the way zeros were handled at
the end of ENDF-6 section, or if a different fiel title is
given
"""
string = sandy.write_line(title, 1, 0, 0, 0)
string += "\n"
for mat, dfmat in self.to_series().groupby('MAT', sort=True):
for mf, dfmf in dfmat.groupby('MF', sort=True):
for mt, text in dfmf.groupby('MT', sort=True):
string += text.squeeze()\
.encode('ascii', 'replace')\
.decode('ascii')
string += "\n"
string += sandy.write_line("", mat, mf, 0, 99999)
string += "\n"
string += sandy.write_line("", mat, 0, 0, 0)
string += "\n"
string += sandy.write_line("", 0, 0, 0, 0)
string += "\n"
string += sandy.write_line("", -1, 0, 0, 0)
return string
def to_file(self, filename, mode="w", **kwargs):
"""
Given a filename write the content of a `_FormattedFile` instance to
disk in ASCII format.
Parameters
----------
filename : `str`
The name of the file.
mode : `str`, optional
Mode while opening a file. The default is "w".
Parameters for `write_string`
-----------------------------
title : `str`, optional, default is an empty string
first line of the file
Returns
-------
None.
"""
text = self.write_string(**kwargs)
with open(filename, mode) as f:
f.write(text)
class Endf6(_FormattedFile):
"""
Container for ENDF-6 file text grouped by MAT, MF and MT numbers.
Methods
-------
get_ace
Process `Endf6` instance into an ACE file using NJOY.
get_pendf
Process `Endf6` instance into a PENDF file using NJOY.
get_errorr
Process `Endf6` instance into a Errorr file using NJOY.
get_id
Extract ID for a given MAT for a ENDF-6 file.
read_section
Parse MAT/MF/MT section.
to_file
Given a filename write the content of a `Endf6` instance to disk in
ASCII format.
to_string
Write `Endf6.data` content to string according to the ENDF-6 file
rules.
write_string
Write ENDF-6 content to string.
"""
def _get_nsub(self):
"""
Determine ENDF-6 sub-library type by reading flag "NSUB" of first MAT
in file:
* `NSUB = 10` : Incident-Neutron Data
* `NSUB = 11` : Neutron-Induced Fission Product Yields
Returns
-------
`int`
NSUB value
"""
return self.read_section(self.mat[0], 1, 451)["NSUB"]
def read_section(self, mat, mf, mt, raise_error=True):
"""
Parse MAT/MF/MT section.
Parameters
----------
`mat` : int
MAT number
`mf` : int
MF number
`mt` : int
MT number
Returns
-------
`dict`
"""
read_module = f"read_mf{mf}"
found = hasattr(sandy, read_module)
if not raise_error and not found:
return
foo = eval(f"sandy.{read_module}")
return foo(self, mat, mt)
def _update_info(self, descr=None):
"""Update RECORDS item (in DATA column) for MF1/MT451 of each MAT based on the content of the TEXT column.
"""
from .mf1 import write
tape = self.copy()
for mat in sorted(tape.index.get_level_values('MAT').unique()):
sec = self.read_section(mat,1,451)
records = pd.DataFrame(sec["RECORDS"], columns=["MF","MT","NC","MOD"]).set_index(["MF","MT"])
new_records = []
dfmat=tape.loc[mat]
# for (mf,mt),text in sorted(tape.loc[mat].query('MT!=451'.format(mat)).TEXT.items()):
for (mf,mt),text in sorted(dfmat[dfmat.index.get_level_values("MT")!=451].TEXT.items()):
nc = len(text.splitlines())
# when copying PENDF sections (MF2/MT152) mod is not present in the dictionary
try:
mod = records.MOD.loc[mf,mt]
except:
mod = 0
new_records.append((mf,mt,nc,mod))
if descr is not None:
sec["TEXT"] = descr
nc = 4 + len(sec["TEXT"]) + len(new_records) + 1
mod = records.MOD.loc[1,451]
new_records = [(1,451,nc,mod)] + new_records
sec["RECORDS"] = new_records
text = write(sec)
tape.loc[mat,1,451].TEXT = text
return Endf6(tape)
def parse(self):
mats = self.index.get_level_values("MAT").unique()
if len(mats) > 1:
raise NotImplementedError("file contains more than 1 MAT")
self.mat = self.endf = mats[0]
if hasattr(self, "tape"):
self.filename = os.path.basename(self.tape)
INFO = self.read_section(mats[0], 1 ,451)
del INFO["TEXT"], INFO["RECORDS"]
self.__dict__.update(**INFO)
self.SECTIONS = self.loc[INFO["MAT"]].reset_index()["MF"].unique()
self.EHRES = 0
self.THNMAX = - self.EHRES if self.EHRES != 0 else 1.0E6
def get_id(self, method="nndc"):
"""
Extract ID for a given MAT for a ENDF-6 file.
Parameters
----------
method : `str`, optional
Methods adopted to produce the ID. The default is `"nndc"`.
- if `method='aleph'` the ID is the ZAM identifier
- else, the ID is the ZA identifier according to the NNDC rules
Returns
-------
ID : `int`
ID of the ENDF-6 file.
Notes
-----
.. note:: a warning is raised if more than one MAT is found.
Only the ID corresponding to the lowest MAT will be returned.
Examples
--------
Extract ID for H1 file using NNDC and ALEPH methods
>>> tape = sandy.get_endf6_file("jeff_33", "xs", 10010)
>>> assert tape.get_id() == 1001
>>> assert tape.get_id(method="aleph") == 10010
Extract ID for Am242m file using NNDC and ALEPH methods
>>> tape2 = sandy.get_endf6_file("jeff_33", "xs", 952421)
>>> assert tape2.get_id() == 95642
>>> assert tape2.get_id(method="ALEPH") == 952421
>>> assert tape.merge(tape2).get_id() == 1001
>>> assert tape2.merge(tape).get_id() == 1001
"""
mat = self.mat[0]
if len(self.mat) != 1:
msg = "More than one MAT found, will give ID only for the lowest MAT"
logging.warning(msg)
info = self.read_section(mat, 1, 451)
meta = info["LISO"]
za = int(info["ZA"])
zam = za * 10 + meta
| |
import os
import io
import struct
import bmesh
import bpy
import math
import mathutils
from mathutils import Matrix, Vector, Color
from bpy_extras import io_utils, node_shader_utils
import bmesh
from bpy_extras.wm_utils.progress_report import (
ProgressReport,
ProgressReportSubstep,
)
def name_compat(name):
if name is None:
return 'None'
else:
return name.replace(' ', '_')
def mesh_triangulate(me):
#import bmesh
bm = bmesh.new()
bm.from_mesh(me)
bmesh.ops.triangulate(bm, faces=bm.faces)
bm.to_mesh(me)
bm.free()
def veckey2d(v):
return round(v[0], 4), round(v[1], 4)
def power_of_two(n):
return (n & (n-1) == 0) and n != 0
#def split_obj(split_objects, obj):
# print("Too many vertices on object " + obj.name + "! Splitting.")
# objcount = len(obj.vertices) % 65535
# for i in range(objcount):
# baseIndex = i * 65535
# bm = bmesh.new()
# bm.from_mesh(me)
# #new_obj.data = obj.data.copy()
# bm.vertices = obj.vertices[baseIndex : baseIndex + 65535]
#
#new_obj = src_obj.copy()
#new_obj.data = src_obj.data.copy()
def jet_str(f, str):
encoded_name = (str + '\0').encode('utf-8')
f.write(struct.pack("<I", len(encoded_name)))
f.write(encoded_name)
def texture_file(type, img_path, target_dir, is_npo2):
basename = os.path.basename(img_path).lower()
texturepath = target_dir + '\\' + os.path.splitext(basename)[0] + ".texture"
txtpath = texturepath + ".txt"
print("write to " + txtpath)
with open(txtpath, "w") as f:
f.write("Primary=" + basename + "\n")
f.write("Alpha=" + basename + "\n")
f.write("Tile=st" + "\n")
if is_npo2:
f.write("nonpoweroftwo=1\n")
if type == 8:
f.write("NormalMapHint=normalmap")
return texturepath
def chunk_ver(f, ver):
f.write(struct.pack("<I", ver))
def end_chunk(f, chunk):
f.write(struct.pack("<I", chunk.tell()))
f.write(chunk.getbuffer())
def recursive_writebone(chnk, srcBone, bones_flat):
bones_flat.append(srcBone)
relevant_children = []
for child in srcBone.children:
if "b.r." in child.name:
relevant_children.append(child)
chnk.write('BONE'.encode('utf-8'))
with io.BytesIO() as bone:
chunk_ver(bone, 100)
#BoneName
jet_str(bone, srcBone.name)
#NumChildren
bone.write(struct.pack("<I", len(relevant_children)))
#BoneList
for child in relevant_children:
recursive_writebone(bone, child, bones_flat)
end_chunk(chnk, bone)
def write_kin(filepath, bones, armature, EXPORT_GLOBAL_MATRIX):
print("Writing .kin to " + filepath)
scene = bpy.context.scene
NumFrames = scene.frame_end - scene.frame_start + 1
#preprocess
root_bone = None
for key in bones:
bonegroup = bones[key]
bone = bonegroup[0]
if bone.parent is None:
root_bone = bone
break
if root_bone is None:
print("Could not find a root bone!")
return
with open(filepath, "wb") as f:
#JIRF, filesize
f.write('JIRF'.encode('utf-8'))
with io.BytesIO() as rf: #resource file
rf.write('ANIM'.encode('utf-8'))
rf.write('INFO'.encode('utf-8'))
with io.BytesIO() as info:
chunk_ver(info, 100)
#FileName
jet_str(info, os.path.basename(filepath).lower())
#NumFrames
info.write(struct.pack("<I", NumFrames))
#FrameRate
info.write(struct.pack("<I", 30))
#MetricScale
info.write(struct.pack("<f", 1.0))
end_chunk(rf, info)
#Events
rf.write('EVNT'.encode('utf-8'))
with io.BytesIO() as evnt:
chunk_ver(evnt, 100)
#NumEvents
evnt.write(struct.pack("<I", 0))
end_chunk(rf, evnt)
bones_flat = []
#Skeleton
rf.write('SKEL'.encode('utf-8'))
with io.BytesIO() as skel:
chunk_ver(skel, 100)
#SkeletonBlock
recursive_writebone(skel, root_bone, bones_flat)
#skel.write(struct.pack("<I", 0))
end_chunk(rf, skel)
posebones_flat = []
if armature != None:
for bone in bones_flat:
print("bone " + bone.name)
for posebone in armature.pose.bones:
if posebone.name == bone.name:
posebones_flat.append(posebone)
break
objbones_flat = []
for obj in bones_flat:
if hasattr(obj, 'type') and obj.type == 'EMPTY' or obj.type == 'LATTICE':
objbones_flat.append(obj)
#pose_bone = (b for b in armature.pose.bones if b.bone is bone)
#posebones_flat.append(pose_bone)
if armature != None:
print("Found " + str(len(posebones_flat)) + "/" + str(len(armature.pose.bones)) + " pose bones")
print("Found " + str(len(objbones_flat)) + " object bones")
#FrameList
for i in range(NumFrames):
scene.frame_set(i)
rf.write('FRAM'.encode('utf-8'))
with io.BytesIO() as fram:
chunk_ver(fram, 100)
#FrameNum
fram.write(struct.pack("<I", i))
#BoneDataList
for pose_bone in posebones_flat:
#mat = pose_bone.matrix_basis
mat = EXPORT_GLOBAL_MATRIX @ armature.matrix_world @ pose_bone.matrix
position, rotation, scale = mat.decompose()
rotation = rotation.inverted()
#Position
fram.write(struct.pack("<fff", *position))
#Orientation
fram.write(struct.pack("<ffff", rotation.x, rotation.y, rotation.z, rotation.w))
#Scale
#fram.write(struct.pack("<fff", scale.x, scale.y, scale.z))
for obj_bone in objbones_flat:
#objMat = obj_bone.matrix_world
#if obj_bone.parent != None:
#objMat = obj_bone.parent.matrix_world.inverted() @ objMat
#objMat = EXPORT_GLOBAL_MATRIX @ objMat
objMat = obj_bone.matrix_world
#if obj_bone.parent is None:
#objMat = obj_bone.matrix_world
#else:
#pbMat = obj_bone.matrix_local.copy() @ obj_bone.matrix_basis
#objMat = obj_bone.parent.matrix_world.copy() @ pbMat
objMat = EXPORT_GLOBAL_MATRIX @ objMat
position, rotation, scale = objMat.decompose()
rotation = rotation.inverted()
#Position
fram.write(struct.pack("<fff", *position))
#Orientation
fram.write(struct.pack("<ffff", rotation.x, rotation.y, rotation.z, rotation.w))
#Scale
#fram.write(struct.pack("<fff", scale.x, scale.y, scale.z))
end_chunk(rf, fram)
end_chunk(f, rf)
def write_file(self, filepath, objects, depsgraph, scene,
EXPORT_APPLY_MODIFIERS=True,
EXPORT_TEXTURETXT=True,
EXPORT_TANGENTS=False,
EXPORT_KIN=True,
EXPORT_SEL_ONLY=False,
EXPORT_GLOBAL_MATRIX=None,
EXPORT_PATH_MODE='AUTO',
#progress=ProgressReport(),
):
if EXPORT_GLOBAL_MATRIX is None:
EXPORT_GLOBAL_MATRIX = Matrix()
#split objects
meshes = []
hold_meshes = [] #prevent garbage collection of bmeshes
for obj in objects:
final = obj.evaluated_get(depsgraph) if EXPORT_APPLY_MODIFIERS else obj.original
try:
me = final.to_mesh()
except RuntimeError:
me = None
if me is None:
continue
if len(me.uv_layers) == 0:
print("Object " + obj.name + " is missing UV coodinates!") # Skipping.
#continue
#uv_layer = me.uv_layers.active.data
mesh_triangulate(me)
me.transform(EXPORT_GLOBAL_MATRIX @ obj.matrix_world)
if len(me.uv_layers) == 0:
uv_layer = None
else:
uv_layer = me.uv_layers.active.data[:]
me_verts = me.vertices[:]
me_edges = me.edges[:]
me.calc_normals_split() #unsure
if EXPORT_TANGENTS:
me.calc_tangents()
#bm = bmesh.new()
#hold_meshes.append(bm)
#bm.from_mesh(me)
#bmesh.ops.triangulate(bm, faces=bm.faces)
objectParent = None #empty and lattice parents
if obj.parent != None:
if "b.r." in obj.parent.name:
objectParent = obj.parent
#split_faces = []
#idx2idxmap = {}
print("Processing mesh...")
vertgroups = obj.vertex_groups
#split into materials
materials = me.materials[:]
print("object contains " + str(len(materials)) + " materials")
#mat_count = len(materials) if len(materials) > 0 else 1
if len(materials) == 0:
materials.append(None)
#split_matblocks = [None] * len(materials)
split_matblocks = [[] for i in range(len(materials))]
for face in me.polygons:
mat_index = face.material_index
if mat_index is None:
mat_index = 0
split_matblocks[mat_index].append(face)
for i, srcobject in enumerate(split_matblocks):
#wasCopied = [None] * len(me_verts)
uv_dict = {}
uv = uv_key = uv_val = None
unique_verts = []
indices = []
normals = []
face_normals = [] #[]
tangents = []
area = 0.0
for face in srcobject:
area += face.area
#split_faces.append(face)
face_normals.append(face.normal.normalized())
#face_normals.append([0.0, 0.0, 1.0])
for uv_index, l_index in enumerate(face.loop_indices):
loop = me.loops[l_index]
vert = me_verts[loop.vertex_index]
uv = uv_layer[l_index].uv if uv_layer != None else [0, 0]
uv_key = loop.vertex_index, veckey2d(uv)
#uv_key = veckey2d(uv)
uv_val = uv_dict.get(uv_key)
#vert = loop.vert
if uv_val is None: #wasCopied[loop.vertex_index] is None or
#wasCopied[loop.vertex_index] = len(unique_verts)
uv_dict[uv_key] = len(unique_verts)
influences = []
for group in vertgroups:
try:
weight = group.weight(loop.vertex_index)
except RuntimeError:
weight = 0.0
if weight != 0.0:
influences.append([group.name, weight])
#for infl in influences:
#print("vert infl obj " + obj.name + " " + infl[0] + ": " + str(infl[1]))
unique_verts.append([vert.co[:], uv[:], influences])
#normals.append(vert.normal.normalized())
normals.append(loop.normal.normalized())
if EXPORT_TANGENTS:
tangents.append(loop.tangent.normalized())
#indices.append(wasCopied[loop.vertex_index])
indices.append(uv_dict[uv_key])
if len(unique_verts) > 65532 or len(indices) // 3 > 65535:
#apply and update
#ret = bmesh.ops.split(bm, geom=split_faces)
#split_blocks.append(ret["geom"])
meshes.append([obj, materials[i],
unique_verts.copy(),
indices.copy(),
normals.copy(),
face_normals.copy(),
tangents.copy(),
area,
uv_layer,
objectParent])
unique_verts.clear()
indices.clear()
normals.clear()
face_normals.clear()
tangents.clear()
area = 0.0
#split_faces.clear()
#idx2idxmap.clear()
#wasCopied = [None] * len(me_verts)
uv_dict.clear()
uv = uv_key = uv_val = None
print("Block split.")
#Add remaining verts
if len(unique_verts) > 0:
meshes.append([obj, materials[i],
unique_verts.copy(),
indices.copy(),
normals.copy(),
face_normals.copy(),
tangents.copy(),
area,
uv_layer,
objectParent])
print("Complete.")
#bm.free()
active_armature = None
bones = {}
root_bone = None
for ob in objects:
if ob.type != 'ARMATURE':
continue
active_armature = ob
break
if active_armature is None:
print("No armature in scene.")
#EXPORT_KIN = False
else:
for bone in active_armature.data.bones:
if "b.r." in bone.name:
#bone, chunk influences
bones[bone.name] = [bone, [[] for _ in range(len(meshes))]]
if bone.parent == None:
root_bone = bone
#legacy empty, lattice support
for ob in objects:
if "b.r." in ob.name:
print("Found bone object " + ob.name)
#ob.scale = [1.0, 1.0, 1.0]
#bpy.context.view_layer.update()
bones[ob.name] = [ob, [[] for _ in range(len(meshes))]]
if ob.parent == None:
root_bone = ob
if EXPORT_KIN:
write_kin(os.path.splitext(filepath)[0] + ".kin", bones, active_armature, EXPORT_GLOBAL_MATRIX)
#write_kin(os.path.dirname(filepath) + "\\anim.kin", bones, active_armature, EXPORT_GLOBAL_MATRIX)
#reset frame after writing kin, for object transforms
scene.frame_set(0)
#Attachment setup
attachments = []
for ob in objects:
if ob.type == 'EMPTY' and 'a.' in ob.name:
print("Attachment " + ob.name)
attachments.append(ob)
copy_set = set()
with open(filepath, "wb") as f:
#fw = f.write
source_dir = os.path.dirname(bpy.data.filepath)
dest_dir = os.path.dirname(filepath)
#JIRF, filesize
f.write('JIRF'.encode('utf-8'))
with io.BytesIO() as rf: #resource file
rf.write('IDXM'.encode('utf-8'))
rf.write('INFO'.encode('utf-8'))
with io.BytesIO() as info:
chunk_ver(info, 102)
objLocation, objRotation, objScale = EXPORT_GLOBAL_MATRIX.decompose();
#Position
info.write(struct.pack("<fff", objLocation.x, objLocation.y, objLocation.z))
#Rotation
#info.write(struct.pack("<ffff", objRotation.x, objRotation.y, objRotation.z, objRotation.w))
info.write(struct.pack("<ffff", objRotation.w, objRotation.x, objRotation.y, objRotation.z))
#NumAttributes
info.write(struct.pack("<I", len(meshes)))
#MaxInfluencePerVertex
info.write(struct.pack("<I", 0))
#MaxInfluencePerChunk
info.write(struct.pack("<I", 0))
end_chunk(rf, info)
for i, entry in enumerate(meshes):
obj = entry[0]
#mesh = entry[1]
#uv_layer = entry[2]
## [obj, materials[i]
# unique_verts.copy(),
# indices.copy(),
# normals.copy(),
# face_normals.copy(),
# area,
# uv_layer]
mat = entry[1]
verts = entry[2]
indices = entry[3]
normals = entry[4]
face_normals = entry[5]
tangents = entry[6]
area = entry[7]
uv_layer = entry[8]
objParent = entry[9]
#uv_layer = mesh.uv_layers.active.data
#mesh_triangulate(mesh)
#mat = obj.active_material
defaultMaterial = bpy.data.materials.new(obj.name)
if mat is None:
mat = defaultMaterial
rf.write('CHNK'.encode('utf-8'))
with io.BytesIO() as attr:
chunk_ver(attr, 100)
#Chunk ID
attr.write(struct.pack("<I", i))
attr.write('MATL'.encode('utf-8'))
with io.BytesIO() as matl:
chunk_ver(matl, 103)
#Name
jet_str(matl, mat.name)
#NumProperties
matl.write(struct.pack("<I", 0))
#nodes
mat_wrap = node_shader_utils.PrincipledBSDFWrapper(mat)
#TwoSided
matl.write(struct.pack("<I", int(not mat.use_backface_culling)))
#matl.write(struct.pack("<I", 0))
#Opacity
matl.write(struct.pack("<f", | |
+ ' '
def p_otros_from(t):
'otros_froms : otros_froms COMA otro_from'
t[1].append(t[2])
t[1].append(t[3])
t[0] = t[1]
def p_otros_from2(t):
'otros_froms : otro_from'
t[0] = [t[1]]
def p_opcion_select_tm(t):
'opcion_select_tm : opcion_select_lista FROM opciones_from opcion_from'
# ES UNA LISTA t[3]
cadena = ""
for i in t[3]:
cadena += str(i)
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(cadena) + ' '+ str(t[4])
def p_opciones_from(t):
'''opciones_from : opciones_from COMA from_s'''
t[1].append(t[2])
t[1].append(t[3])
t[0] = t[1]
def p_opciones_from2(t):
'opciones_from : from_s'
t[0] = [t[1]]
def p_ins_1(t):
'opcion_select_tm : varias_funciones'
# ES UNA LISTA t[1]
cadena = ""
for i in t[1]:
cadena+= str(i)
t[0] = ' ' + str(cadena) + ' '
def p_varias_funciones(t):
'varias_funciones : varias_funciones COMA funcion'
t[1].append(t[2])
t[1].append(t[3])
t[0] = t[1]
def p_varias_funciones1(t):
'varias_funciones : funcion'
t[0] = [t[1]]
def p_funcionSobre(t):
'funcion : funciones_select seguir_sobrenombre'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '
def p_funcion1(t):
'funcion : funciones_select'
t[0] = ' ' + str(t[1]) + ' '
def p_opcion_select_tm_op2(t):
'''otro_from : from_s '''
t[0] = ' ' + str(t[1]) + ' '
def p_opcion_select_tm_op3(t):
'otro_from : from_s opcion_from'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '
def p_opcion_s(t):
''' from_s : ID'''
t[0] = ' ' + str(t[1]) + ' '
def p_opcion_s2(t):
' from_s : PARA'
t[0] = ' ' + str(t[1]) + ' '
def p_sobre_Nombre(t):
''' opcion_sobrenombre : ID seguir_sobrenombre'''
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '
def p_sobre_Nombre2(t):
' opcion_sobrenombre : ID '
t[0] = ' ' + str(t[1]) + ' '
def p_as_ID(t):
''' as_ID : ID '''
t[0] = ' ' + str(t[1]) + ' '
def p_as_ID2(t):
'as_ID : CADENA'
cadena = '\\\''+t[1]+'\\\''
t[0] = ' ' + str(cadena) + ' '
#---------------------------------------------------------
def p_alias(t):
''' seguir_sobrenombre : AS as_ID'''
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '
def p_alias2(t):
'seguir_sobrenombre : ID'
t[0] = t[1]
t[0] = ' ' + str(t[1]) + ' '
def p_alias3(t):
'seguir_sobrenombre : PUNTO ID'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '
def p_opcion_select_tm_extract(t):
'opcion_select_tm : EXTRACT PARA extract_time FROM TIMESTAMP CADENA PARC '
cadena = '\\\''+t[6]+'\\\''
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '+ str(t[5]) + ' '+ str(cadena) + ' '+ str(t[7]) + ' '
def p_opcion_select_tm_date(t):
'opcion_select_tm : DATE_PART PARA CADENA COMA INTERVAL CADENA PARC '
cadena = '\\\''+t[3]+'\\\''
cadena1 = '\\\''+t[6]+'\\\''
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(cadena) + ' '+ str(t[4]) + ' '+ str(t[5]) + ' '+ str(cadena1) + ' '+ str(t[7]) + ' '
def p_opcion_select_tm_now(t):
'opcion_select_tm : NOW PARA PARC '
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '
def p_opcion_select_tm_current(t):
'opcion_select_tm : CURRENT_DATE '
t[0] = ' ' + str(t[1]) + ' '
def p_opcion_select_tm_crtm(t):
'opcion_select_tm : CURRENT_TIME '
t[0] = ' ' + str(t[1]) + ' '
def p_opcion_select_tm_timestamp(t):
'opcion_select_tm : TIMESTAMP CADENA '
cadena = '\\\''+t[2]+'\\\''
t[0] = ' ' + str(t[1]) + ' '+ str(cadena) + ' '
#?######################################################
# TODO OFFSET
#?######################################################
def p_opcion_from_0_0_1_1_1_1_1_0(t):
'opcion_from : cond_where cond_gb cond_having cond_ob orden cond_limit cond_offset'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '+ str(t[5]) + ' '+ str(t[6]) + ' '+ str(t[7]) + ' '
def p_opcion_from_0_0_0_1_1_1_1_0(t):
'opcion_from : cond_gb cond_having cond_ob orden cond_limit cond_offset'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '+ str(t[5]) + ' '+ str(t[6]) + ' '
def p_opcion_from_0_0_1_0_1_1_1_0(t):
'opcion_from : cond_where cond_having cond_ob orden cond_limit OFFSET ENTERO'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '+ str(t[5]) + ' '+ str(t[6]) + ' '+ str(t[7]) + ' '
def p_opcion_from_0_0_0_0_1_1_1_0(t):
'opcion_from : cond_having cond_ob orden cond_limit cond_offset'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '+ str(t[5]) + ' '
def p_opcion_from_0_0_1_1_0_1_1_0(t):
'opcion_from : cond_where cond_gb cond_ob orden cond_limit cond_offset'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '+ str(t[5]) + ' '+ str(t[6]) + ' '
def p_opcion_from_0_0_0_1_0_1_1_0(t):
'opcion_from : cond_gb cond_ob orden cond_limit cond_offset'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '+ str(t[5]) + ' '
def p_opcion_from_0_0_1_0_0_1_1_0(t):
'opcion_from : cond_where cond_ob orden cond_limit cond_offset'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '+ str(t[5]) + ' '
def p_opcion_from_0_0_0_0_0_1_1_0(t):
'opcion_from : cond_ob orden cond_limit cond_offset'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '
def p_opcion_from_0_0_1_1_1_1_1_0_ordeno(t):
'opcion_from : cond_where cond_gb cond_having cond_ob cond_limit cond_offset'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '+ str(t[5]) + ' '+ str(t[6]) + ' '
def p_opcion_from_0_0_0_1_1_1_1_0_ordeno(t):
'opcion_from : cond_gb cond_having cond_ob cond_limit cond_offset'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '+ str(t[5]) + ' '
def p_opcion_from_0_0_1_0_1_1_1_0_ordeno(t):
'opcion_from : cond_where cond_having cond_ob cond_limit cond_offset'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '+ str(t[5]) + ' '
def p_opcion_from_0_0_0_0_1_1_1_0_ordeno(t):
'opcion_from : cond_having cond_ob cond_limit cond_offset'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '
def p_opcion_from_0_0_1_1_0_1_1_0_ordeno(t):
'opcion_from : cond_where cond_gb cond_ob cond_limit cond_offset'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '+ str(t[5]) + ' '
def p_opcion_from_0_0_0_1_0_1_1_0_ordeno(t):
'opcion_from : cond_gb cond_ob cond_limit cond_offset'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '
def p_opcion_from_0_0_1_0_0_1_1_0_ordeno(t):
'opcion_from : cond_where cond_ob cond_limit cond_offset'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '
def p_opcion_from_0_0_0_0_0_1_1_0_ordeno(t):
'opcion_from : cond_ob cond_limit cond_offset'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '
def p_opcion_from_0_0_1_1_1_0_1_0(t):
'opcion_from : cond_where cond_gb cond_having cond_limit cond_offset'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '+ str(t[5]) + ' '
def p_opcion_from_0_0_0_1_1_0_1_0(t):
'opcion_from : cond_gb cond_having cond_limit cond_offset'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '
def p_opcion_from_0_0_1_0_1_0_1_0(t):
'opcion_from : cond_where cond_having cond_limit cond_offset'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '
def p_opcion_from_0_0_0_0_1_0_1_0(t):
'opcion_from : cond_having cond_limit cond_offset'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '
def p_opcion_from_0_0_1_1_0_0_1_0(t):
'opcion_from : cond_where cond_gb cond_limit cond_offset'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '
def p_opcion_from_0_0_0_1_0_0_1_0(t):
'opcion_from : cond_gb cond_limit cond_offset'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '
def p_opcion_from_0_0_1_0_0_0_1_0(t):
'opcion_from : cond_where cond_limit cond_offset'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '
def p_opcion_from_0_0_0_0_0_0_1_0(t):
'opcion_from : cond_limit cond_offset'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '
def p_opcion_from_0_0_1_1_1_1_1_0_offno(t):
'opcion_from : cond_where cond_gb cond_having cond_ob orden cond_limit'
t[0] = ' ' + str(t[1]) + ' '+ str(t[2]) + ' '+ str(t[3]) + ' '+ str(t[4]) + ' '+ str(t[5]) + ' '+ | |
import inspect, traceback
import json
import sys
from contextlib import contextmanager
from time import perf_counter
from typing import Optional, Callable
from ._backend import Backend, BACKENDS, _DEFAULT
from ._dtype import DType
class BackendCall:
def __init__(self, start: float, stop: float, backend: 'ProfilingBackend', function_name):
self._start = start
self._stop = stop
self._backend = backend
self._function_name = function_name
self._args = {"Backend": backend.name}
def __repr__(self):
return f"{1000 * self._duration:.2f} ms {self._function_name}"
def print(self, include_parents, depth, min_duration, code_col, code_len):
if self._duration >= min_duration:
print(f"{' ' * depth}{1000 * self._duration:.2f} ms {self._backend}.{self._function_name}")
@property
def _name(self):
return repr(self)
@property
def _duration(self):
return self._stop - self._start
def trace_json_events(self, include_parents) -> list:
backend_index = self._backend._index
name = self._function_name
return [
{
'name': name,
'ph': 'X',
'pid': 1,
'tid': backend_index+1,
'ts': int(round(self._start * 1000000)),
'dur': int(round((self._stop - self._start) * 1000000)),
'args': self._args
}
]
def call_count(self) -> int:
return 1
def add_arg(self, key, value):
assert key not in self._args
self._args[key] = value
class ExtCall:
def __init__(self, parent: 'ExtCall' or None, stack: list):
self._parent = parent
if parent is None:
self._parents = ()
else:
self._parents = parent._parents + (parent,)
self._stack = stack # stack trace from inspect.stack() including parent calls
self._children = [] # BackendCalls and ExtCalls
self._converted = False
def common_call(self, stack: list):
""" Returns the deepest ExtCall in the hierarchy of this call that contains `stack`. """
if self._parent is None:
return self
if len(stack) < len(self._stack):
return self._parent.common_call(stack)
for i in range(len(self._stack)):
if self._stack[-1-i].function != stack[-1-i].function:
return self._parents[i]
return self
def add(self, child):
self._children.append(child)
@property
def _name(self):
if not self._stack:
return ""
info = self._stack[0]
fun = info.function
if 'self' in info.frame.f_locals:
if fun == '__init__':
return f"{type(info.frame.f_locals['self']).__name__}()"
return f"{type(info.frame.f_locals['self']).__name__}.{fun}"
if 'phi/math' in info.filename or 'phi\\math' in info.filename:
return f"math.{fun}"
else:
return fun
@property
def _start(self):
return self._children[0]._start
@property
def _stop(self):
return self._children[-1]._stop
@property
def _duration(self):
return sum(c._duration for c in self._children)
def call_count(self) -> int:
return sum(child.call_count() for child in self._children)
def __repr__(self):
if not self._converted:
if self._parent is None:
return "/"
return f"{self._name} ({len(self._stack)})"
else:
context = self._stack[0].code_context
return f"sum {1000 * self._duration:.2f} ms {context}"
def __len__(self):
return len(self._children)
def _empty_parent_count(self):
for i, parent in enumerate(reversed(self._parents)):
if len(parent._children) > 1:
return i
return len(self._parents)
def _eff_parent_count(self):
return len([p for p in self._parents if len(p._children) > 1])
def _closest_non_trivial_parent(self):
parent = self._parent
while parent._parent is not None:
if len(parent._children) > 1:
return parent
parent = parent._parent
return parent
def _calling_code(self, backtrack=0):
if len(self._stack) > backtrack + 1:
frame = self._stack[backtrack+1]
return frame.code_context[0].strip(), frame.filename, frame.function, frame.lineno
else:
return "", "", "", -1
def print(self, include_parents=(), depth=0, min_duration=0., code_col=80, code_len=50):
if self._duration < min_duration:
return
if len(self._children) == 1 and isinstance(self._children[0], ExtCall):
self._children[0].print(include_parents + ((self,) if self._parent is not None else ()), depth, min_duration, code_col, code_len)
else:
funcs = [par._name for par in include_parents] + [self._name]
text = f"{'. ' * depth}-> {' -> '.join(funcs)} ({1000 * self._duration:.2f} ms)"
if len(self._stack) > len(include_parents)+1:
code = self._calling_code(backtrack=len(include_parents))[0]
if len(code) > code_len:
code = code[:code_len-3] + "..."
text += " " + "." * max(0, (code_col - len(text))) + " > " + code
print(text)
for child in self._children:
child.print((), depth + 1, min_duration, code_col, code_len)
def children_to_properties(self) -> dict:
result = {}
for child in self._children:
name = f"{len(result)} {child._name}" if len(self._children) <= 10 else f"{len(result):02d} {child._name}"
while isinstance(child, ExtCall) and len(child) == 1:
child = child._children[0]
name += " -> " + child._name
result[name] = child
if isinstance(child, ExtCall):
child.children_to_properties()
# finalize
for name, child in result.items():
setattr(self, name, child)
self._converted = True
return result
def trace_json_events(self, include_parents=()) -> list:
if len(self._children) == 1:
return self._children[0].trace_json_events(include_parents + (self,))
else:
name = ' -> '.join([par._name for par in include_parents] + [self._name])
eff_parent_count = self._eff_parent_count()
calling_code, calling_filename, calling_function, lineno = self._calling_code(backtrack=self._empty_parent_count())
result = [
{
'name': name,
'ph': "X", # complete event
'pid': 0,
'tid': eff_parent_count,
'ts': int(self._start * 1000000),
'dur': int((self._stop - self._start) * 1000000),
'args': {
"Calling code snippet": calling_code,
"Called by": f"{calling_function}() in {calling_filename}, line {lineno}",
"Active time (backend calls)": f"{self._duration * 1000:.2f} ms ({round(100 * self._duration / self._closest_non_trivial_parent()._duration):.0f}% of parent, {100 * self._duration / (self._stop - self._start):.1f}% efficiency)",
"Backend calls": f"{self.call_count()} ({round(100 * self.call_count() / self._closest_non_trivial_parent().call_count()):.0f}% of parent)"
}
}
]
for child in self._children:
result.extend(child.trace_json_events(()))
return result
class Profile:
"""
Stores information about calls to backends and their timing.
Profile may be created through `profile()` or `profile_function()`.
Profiles can be printed or saved to disc.
"""
def __init__(self, trace: bool, backends: tuple or list, subtract_trace_time: bool):
self._start = perf_counter()
self._stop = None
self._root = ExtCall(None, [])
self._last_ext_call = self._root
self._messages = []
self._trace = trace
self._backend_calls = []
self._retime_index = -1
self._accumulating = False
self._backends = backends
self._subtract_trace_time = subtract_trace_time
self._total_trace_time = 0
def _add_call(self, backend_call: BackendCall, args: tuple, kwargs: dict, result):
if self._retime_index >= 0:
prev_call = self._backend_calls[self._retime_index]
assert prev_call._function_name == backend_call._function_name
if self._accumulating:
prev_call._start += backend_call._start
prev_call._stop += backend_call._stop
else:
prev_call._start = backend_call._start
prev_call._stop = backend_call._stop
self._retime_index = (self._retime_index + 1) % len(self._backend_calls)
else:
self._backend_calls.append(backend_call)
args = {i: arg for i, arg in enumerate(args)}
args.update(kwargs)
backend_call.add_arg("Inputs", _format_values(args, backend_call._backend))
if isinstance(result, (tuple, list)):
backend_call.add_arg("Outputs", _format_values({i: res for i, res in enumerate(result)}, backend_call._backend))
else:
backend_call.add_arg("Outputs", _format_values({0: result}, backend_call._backend))
if self._trace:
stack = inspect.stack()[2:]
call = self._last_ext_call.common_call(stack)
for i in range(len(call._stack), len(stack)):
sub_call = ExtCall(call, stack[len(stack) - i - 1:])
call.add(sub_call)
call = sub_call
call.add(backend_call)
self._last_ext_call = call
if self._subtract_trace_time:
delta_trace_time = perf_counter() - backend_call._stop
backend_call._start -= self._total_trace_time
backend_call._stop -= self._total_trace_time
self._total_trace_time += delta_trace_time
def _finish(self):
self._stop = perf_counter()
self._children_to_properties()
@property
def duration(self) -> float:
""" Total time passed from creation of the profile to the end of the last operation. """
return self._stop - self._start if self._stop is not None else None
def print(self, min_duration=1e-3, code_col=80, code_len=50):
"""
Prints this profile to the console.
Args:
min_duration: Hides elements with less time spent on backend calls than `min_duration` (seconds)
code_col: Formatting option for where the context code is printed.
code_len: Formatting option for cropping the context code
"""
print(f"Profile: {self.duration:.4f} seconds total. Skipping elements shorter than {1000 * min_duration:.2f} ms")
if self._messages:
print("External profiling:")
for message in self._messages:
print(f" {message}")
print()
self._root.print(min_duration=min_duration, code_col=code_col, code_len=code_len)
def save(self, json_file: str):
"""
Saves this profile to disc using the *trace event format* described at
https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/edit
This file can be viewed with external applications such as Google chrome.
Args:
json_file: filename
"""
data = [
{'name': "process_name", 'ph': 'M', 'pid': 0, 'tid': 0, "args": {"name": "0 Python calls"}},
{'name': "process_name", 'ph': 'M', 'pid': 1, 'tid': 1, "args": {"name": "1 Operations"}},
] + [
{'name': "thread_name", 'ph': 'M', 'pid': 1, 'tid': i + 1, "args": {"name": backend.name}}
for i, backend in enumerate(self._backends)
]
if self._trace:
if len(self._root._children) > 0:
data.extend(self._root.trace_json_events())
else:
data.extend(sum([call.trace_json_events(()) for call in self._backend_calls], []))
with open(json_file, 'w') as file:
json.dump(data, file)
save_trace = save
def _children_to_properties(self):
children = self._root.children_to_properties()
for name, child in children.items():
setattr(self, name, child)
def add_external_message(self, message: str):
""" Stores an external message in this profile. External messages are printed in `Profile.print()`. """
self._messages.append(message)
@contextmanager
def retime(self):
"""
To be used in `with` statements, `with prof.retime(): ...`.
Updates this profile by running the same operations again but without tracing.
This gives a much better indication of the true timing.
The code within the `with` block must perform the same operations as the code that created this profile.
*Warning:* Internal caching may reduce the number of operations after the first time a function is called.
To prevent this, run the function before profiling it, see `warmup` in `profile_function()`.
"""
self._retime_index = 0
restore_data = _start_profiling(self, self._backends)
try:
yield None
finally:
_stop_profiling(self, *restore_data)
assert self._retime_index == 0, f"Number of calls during retime did not match original profile, originally {len(self._backend_calls)}, now {self._retime_index}, "
self._retime_index = -1
@contextmanager
def _accumulate_average(self, n):
self._retime_index = 0
self._accumulating = True
restore_data = _start_profiling(self, self._backends)
try:
yield None
finally:
_stop_profiling(self, *restore_data)
assert self._retime_index == 0, f"Number of calls during retime did not match original profile, originally | |
<filename>d3pm/text/losses.py
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standard loss functions and utilities."""
from typing import Optional, Tuple
import chex
import jax
from jax import lax
import jax.numpy as jnp
@jax.custom_vjp
def _cross_entropy_with_logits(logits, targets):
"""Computes cross entropy loss with custom gradient support.
Computes a stabilized-gradient version of:
-jnp.sum(targets * nn.log_softmax(logits), axis=-1)
Args:
logits: [..., num_classes] float array.
targets: categorical targets [..., num_classes] float array.
Returns:
per-example cross entropy loss
"""
assert logits.shape == targets.shape, ("logits and targets must have the same"
" shape (targets must be one-hot or "
"label smoothed).")
shifted = logits - logits.max(axis=-1, keepdims=True)
exp_shifted = jnp.exp(shifted)
sum_exp = jnp.sum(exp_shifted, axis=-1, keepdims=True)
log_softmax = shifted - jnp.log(sum_exp)
loss = -jnp.sum(targets * log_softmax, axis=-1)
return loss
def _cross_entropy_with_logits_fwd(logits, targets):
shifted = logits - logits.max(axis=-1, keepdims=True)
exp_shifted = jnp.exp(shifted)
sum_exp = jnp.sum(exp_shifted, axis=-1, keepdims=True)
log_softmax = shifted - jnp.log(sum_exp)
loss = -jnp.sum(targets * log_softmax, axis=-1)
return loss, (exp_shifted, sum_exp, logits, targets)
def _cross_entropy_with_logits_bwd(res, g):
exp_shifted, sum_exp, logits, targets = res
g_logits = jnp.expand_dims(g, axis=-1) * (exp_shifted / sum_exp - targets)
return jnp.asarray(g_logits, logits.dtype), jnp.asarray(g, targets.dtype)
_cross_entropy_with_logits.defvjp(_cross_entropy_with_logits_fwd,
_cross_entropy_with_logits_bwd)
def onehot(labels,
num_classes,
on_value=1.0,
off_value=0.0):
"""Returns the one-hot encoding of the labels with dimension num_classes.
Args:
labels: integer labels to be encoded.
num_classes: the dimension of the one-hot encoding.
on_value: the value to use for the "1" values.
off_value: the value to use for the "0" values.
Returns:
an array of shape labels.shape + (num_classes,) containing one-hot encodings
of labels (as a floating point array).
"""
labels = jnp.asarray(labels)
x = (labels[Ellipsis, None] == jnp.arange(num_classes))
x = lax.select(x, jnp.full(x.shape, on_value), jnp.full(x.shape, off_value))
return x.astype(jnp.float32)
def cross_entropy_with_logits(logits,
targets,
label_smoothing = 0.0):
"""Compute cross entropy and entropy for log probs and targets.
Cross entropy is taken over the last axis. Remaining axes are unchanged.
Args:
logits: [..., length, num_classes] float array.
targets: categorical targets [..., length] int array.
label_smoothing: label smoothing constant, used to determine the on and off
values.
Returns:
Array with loss taken over the last axis.
"""
assert logits.shape[:-1] == targets.shape, (
"Logits shape must agree with targets, except in the last dimension.")
chex.assert_type([logits, targets], [jnp.float32, jnp.int32])
vocab_size = logits.shape[-1]
confidence = 1.0 - label_smoothing
low_confidence = (1.0 - confidence) / (vocab_size - 1)
normalizing_constant = -(
confidence * jnp.log(confidence) +
(vocab_size - 1) * low_confidence * jnp.log(low_confidence + 1e-20))
soft_targets = onehot(
targets, vocab_size, on_value=confidence, off_value=low_confidence)
loss = _cross_entropy_with_logits(logits, soft_targets)
loss = loss - normalizing_constant
return loss
def cross_entropy_with_probs(probs,
targets,
label_smoothing = 0.0,
epsilon = 1e-20):
"""Compute cross entropy for a given distribution and targets.
Cross entropy is taken over the last axis. Remaining axes are unchanged.
Args:
probs: [..., length, num_classes] float array.
targets: categorical targets [..., length] int array.
label_smoothing: label smoothing constant, used to determine the on and off
values.
epsilon: small noise to add to probs when converting to log space.
Returns:
Array with loss taken over the last axis.
"""
assert probs.shape[:-1] == targets.shape, (
"Logits shape must agree with targets, except in the last dimension.")
chex.assert_type([probs, targets], [jnp.float32, jnp.int32])
vocab_size = probs.shape[-1]
confidence = 1.0 - label_smoothing
low_confidence = (1.0 - confidence) / (vocab_size - 1)
normalizing_constant = -(
confidence * jnp.log(confidence) +
(vocab_size - 1) * low_confidence * jnp.log(low_confidence + epsilon))
soft_targets = onehot(
targets, vocab_size, on_value=confidence, off_value=low_confidence)
probs = jax.nn.relu(probs) # help with numerical stability
loss = -jnp.sum(soft_targets * jnp.log(probs + epsilon), axis=-1)
loss = loss - normalizing_constant
return loss
def kl_divergence_with_logits(p_logits = None,
q_logits = None,
temperature = 1.):
"""Compute the KL between two categorical distributions from their logits.
Args:
p_logits: [..., dim] array with logits for the first distribution.
q_logits: [..., dim] array with logits for the second distribution.
temperature: the temperature for the softmax distribution, defaults at 1.
Returns:
an array of KL divergence terms taken over the last axis.
"""
chex.assert_type([p_logits, q_logits], float)
chex.assert_equal_shape([p_logits, q_logits])
p_logits /= temperature
q_logits /= temperature
p = jax.nn.softmax(p_logits)
log_p = jax.nn.log_softmax(p_logits)
log_q = jax.nn.log_softmax(q_logits)
kl = jnp.sum(p * (log_p - log_q), axis=-1)
## KL divergence should be positive, this helps with numerical stability
loss = jax.nn.relu(kl)
return loss
def kl_divergence_with_probs(p = None,
q = None,
epsilon = 1e-20):
"""Compute the KL between two categorical distributions from their probabilities.
Args:
p: [..., dim] array with probs for the first distribution.
q: [..., dim] array with probs for the second distribution.
epsilon: a small float to normalize probabilities with.
Returns:
an array of KL divergence terms taken over the last axis.
"""
chex.assert_type([p, q], float)
chex.assert_equal_shape([p, q])
log_p = jnp.log(p + epsilon)
log_q = jnp.log(q + epsilon)
kl = jnp.sum(p * (log_p - log_q), axis=-1)
## KL divergence should be positive, this helps with numerical stability
loss = jax.nn.relu(kl)
return loss
def lp_norm(inputs,
targets,
p = 2,
apply_root = False):
"""Compute the weighted L^p error between inputs and targets.
Args:
inputs: the input array.
targets: the target array.
p: the norm order to use.
apply_root: if True, applies the p-norm root. Note that we always assume the
first axis is a batch axis.
Returns:
the L^p error between inputs and targets over the last axis.
"""
assert inputs.shape == targets.shape, (f"Inputs and target shapes must agree."
f" Found {inputs.shape}, "
f"{targets.shape}.")
loss = jnp.abs(inputs - targets)**p
loss = loss.sum()
if apply_root:
loss = loss**(1 / float(p))
return loss
def cosine_distance(inputs,
targets,
epsilon = 1e-20):
"""Compute the cosine distance along the last axis.
Args:
inputs: the input array.
targets: the target array.
epsilon: a small float used to normalize the denominator.
Returns:
the cosine distance between inputs and targets over the last axis.
"""
assert inputs.shape == targets.shape, (f"Inputs and target shapes must agree."
f" Found {inputs.shape}, "
f"{targets.shape}.")
inputs_norm = jnp.linalg.norm(inputs, ord=2, axis=-1)
targets_norm = jnp.linalg.norm(targets, ord=2, axis=-1)
loss = 1 - (inputs * targets).sum(axis=-1) / (
inputs_norm * targets_norm + epsilon)
return loss
def weighted_accuracy(logits,
targets,
weights=None):
"""Computes the weighted accuracy of the predicted logits.
Args:
logits: [..., num_classes] unnormalized logits.
targets: [...] categorical tensor with class labels.
weights: Optional[...] float tensor containing weights to scale targets by.
Can be used for class weights or masking.
Returns:
tuple of sum accuracy across all examples and normalizing factor. To recover
true accuracy, divide total by weights.
"""
assert logits.shape[:-1] == targets.shape, (
f"Logits shape must agree with targets, except "
f"in the last dimension. Found {logits.shape[:-1]}, {targets.shape}.")
chex.assert_type([logits, targets], [jnp.float32, jnp.int32])
if weights is not None:
assert targets.shape == weights.shape, ("labels and weights must have the "
"same shape.")
loss = jnp.equal(jnp.argmax(logits, axis=-1), targets)
total_loss, weights = weighted_mean(loss, weights)
return total_loss, weights
def weighted_mean(array,
weights = None):
"""Computes the weighted mean of an array.
Args:
array: the array to compute the mean of.
weights: if supplied, a set of weights which are multiplied with the array
before average.
Returns:
the total loss summed over examples, and the weights to divide by.
"""
if weights is None:
weights = jnp.ones_like(array)
chex.assert_equal_rank([array, weights])
loss = (array * weights).sum()
return loss, weights.sum()
def classification_loss_fn(params,
inputs,
targets,
*,
model_apply,
rng_key,
is_eval=False,
label_smoothing=0.0):
"""Applies cross entropy loss given a batch and model_apply fn."""
del is_eval, rng_key
output = model_apply(params, inputs)
if isinstance(output, tuple):
if len(output) != 2:
raise ValueError("expected model_apply to return logits or (logits, mask)"
f", but found a tuple of length {len(output)}.")
logits, mask = output
else:
logits, mask = output, None
loss = cross_entropy_with_logits(
logits, targets, label_smoothing=label_smoothing)
loss, weights = weighted_mean(loss, mask)
acc, _ = weighted_accuracy(logits, targets, weights=mask)
metrics = {
"loss": loss,
"denominator": weights,
"accuracy": acc,
}
extras | |
import hashlib
import json
import logging
import urllib.parse
from collections import OrderedDict
import requests
from django import forms
from django.conf import settings
from django.contrib import messages
from django.core import signing
from django.http import HttpRequest
from django.template.loader import get_template
from django.utils.translation import get_language, gettext_lazy as _
from pretix_payone.models import ReferencedPayoneObject
from requests import HTTPError
from pretix.base.decimal import round_decimal
from pretix.base.forms import SecretKeySettingsField
from pretix.base.forms.questions import guess_country
from pretix.base.models import Event, InvoiceAddress, OrderPayment, OrderRefund
from pretix.base.payment import BasePaymentProvider, PaymentException
from pretix.base.settings import SettingsSandbox
from pretix.helpers.countries import CachedCountries
from pretix.helpers.urls import build_absolute_uri as build_global_uri
from pretix.multidomain.urlreverse import build_absolute_uri
logger = logging.getLogger(__name__)
class PayoneSettingsHolder(BasePaymentProvider):
identifier = "payone"
verbose_name = "PAYONE"
is_enabled = False
is_meta = True
def __init__(self, event: Event):
super().__init__(event)
self.settings = SettingsSandbox("payment", "payone", event)
@property
def settings_form_fields(self):
fields = [
(
"mid",
forms.CharField(
label=_("Merchant ID"),
required=True,
),
),
(
"aid",
forms.CharField(
label=_("Sub-Account ID"),
required=True,
),
),
(
"portalid",
forms.CharField(
label=_("Portal ID"),
required=True,
),
),
(
"key",
SecretKeySettingsField(
label=_("Key"),
required=True,
),
),
]
methods = [
("creditcard", _("Credit card")),
("paypal", _("PayPal")),
("eps", _("eps")), # ???
("sofort", _("SOFORT")),
("ideal", _("iDEAL")),
# disabled because they are untested
# ("giropay", _("giropay")),
# ("przelewy24", _("Przelewy24")),
# ("multibanco", _("Multibanco")),
# ("bancontact", _("Bancontact")),
# ("vkp", _("Verkkopankki")),
# ("mybank", _("MyBank")),
# ("alipay", _("Alipay")),
# ("paydirekt", _("paydirekt")),
# ("paysafecard", _("paysafecard")),
# ("qiwi", _("Qiwi")),
# more: https://docs.payone.com/display/public/PLATFORM/General+information
]
d = OrderedDict(
fields
+ [
(f"method_{k}", forms.BooleanField(label=v, required=False))
for k, v in methods
]
+ list(super().settings_form_fields.items())
)
d.move_to_end("_enabled", last=False)
return d
def settings_content_render(self, request):
return "<div class='alert alert-info'>%s<br /><code>%s</code></div>" % (
_(
"Please configure the TransactionStatus URL to "
"the following endpoint in order to automatically cancel orders when charges are refunded externally "
"and to process asynchronous payment methods like SOFORT."
),
build_global_uri("plugins:pretix_payone:webhook"),
)
class PayoneMethod(BasePaymentProvider):
method = ""
abort_pending_allowed = False
refunds_allowed = True
invoice_address_mandatory = False
clearingtype = None # https://docs.payone.com/display/public/PLATFORM/clearingtype+-+definition
onlinebanktransfertype = None # https://docs.payone.com/display/public/PLATFORM/onlinebanktransfertype+-+definition
onlinebanktransfer_countries = ()
consider_appointed_as_paid = True
wallettype = (
None # https://docs.payone.com/display/PLATFORM/wallettype+-+definition
)
def __init__(self, event: Event):
super().__init__(event)
self.settings = SettingsSandbox("payment", "payone", event)
@property
def settings_form_fields(self):
return {}
@property
def identifier(self):
return "payone_{}".format(self.method)
@property
def test_mode_message(self):
if self.event.testmode:
return _(
"The PAYONE plugin is operating in test mode. No money will actually be transferred."
)
return None
@property
def is_enabled(self) -> bool:
return self.settings.get("_enabled", as_type=bool) and self.settings.get(
"method_{}".format(self.method), as_type=bool
)
def payment_refund_supported(self, payment: OrderPayment) -> bool:
return self.refunds_allowed
def payment_partial_refund_supported(self, payment: OrderPayment) -> bool:
return self.refunds_allowed
def payment_prepare(self, request, payment):
return self.checkout_prepare(request, None)
def payment_is_valid_session(self, request: HttpRequest):
return True
def payment_form_render(self, request) -> str:
template = get_template("pretix_payone/checkout_payment_form.html")
if self.payment_form_fields:
form = self.payment_form(request)
else:
form = None
ctx = {"request": request, "event": self.event, "settings": self.settings, "form": form}
return template.render(ctx)
def checkout_confirm_render(self, request) -> str:
template = get_template("pretix_payone/checkout_payment_confirm.html")
ctx = {
"request": request,
"event": self.event,
"settings": self.settings,
"provider": self,
}
return template.render(ctx)
def payment_can_retry(self, payment):
return self._is_still_available(order=payment.order)
def payment_pending_render(self, request, payment) -> str:
if payment.info:
payment_info = json.loads(payment.info)
else:
payment_info = None
template = get_template("pretix_payone/pending.html")
ctx = {
"request": request,
"event": self.event,
"settings": self.settings,
"provider": self,
"order": payment.order,
"payment": payment,
"payment_info": payment_info,
}
return template.render(ctx)
def payment_control_render(self, request, payment) -> str:
if payment.info:
payment_info = json.loads(payment.info)
else:
payment_info = None
template = get_template("pretix_payone/control.html")
ctx = {
"request": request,
"event": self.event,
"settings": self.settings,
"payment_info": payment_info,
"payment": payment,
"method": self.method,
"provider": self,
}
return template.render(ctx)
@property
def _default_params(self):
from pretix_payone import __version__ as pluginver
from pretix import __version__
return {
"aid": self.settings.aid,
"mid": self.settings.mid,
"portalid": self.settings.portalid,
"key": hashlib.md5(self.settings.key.encode()).hexdigest(),
"api_version": "3.11",
"mode": "test" if self.event.testmode else "live",
"encoding": "UTF-8",
"integrator_name": "rami.io GmbH",
"integrator_version": pluginver,
"solution_name": "pretix",
"solution_version": __version__,
}
def execute_refund(self, refund: OrderRefund):
refund_params = {
'request': 'refund',
'txid': refund.payment.info_data.get('TxId'),
'sequencenumber': int(refund.payment.info_data.get('sequencenumber', "0")) + 1,
'amount': self._decimal_to_int(refund.amount) * -1,
'currency': self.event.currency,
"narrative_text": "{code} {event}".format(
code=refund.order.code,
event=str(self.event.name)[: 81 - 1 - len(refund.order.code)],
),
'transaction_param': f"{self.event.slug}-{refund.full_id}",
}
data = dict(
**refund_params, **self._default_params
)
try:
req = requests.post(
"https://api.pay1.de/post-gateway/",
data=data,
headers={"Accept": "application/json"},
)
req.raise_for_status()
except HTTPError:
logger.exception("PAYONE error: %s" % req.text)
try:
d = req.json()
except:
d = {"error": True, "detail": req.text}
refund.info_data = d
refund.state = OrderRefund.REFUND_STATE_FAILED
refund.save()
raise PaymentException(
_(
"We had trouble communicating with our payment provider. Please try again and get in touch "
"with us if this problem persists."
)
)
data = req.json()
if data['Status'] != 'ERROR':
d = refund.payment.info_data
d['sequencenumber'] = refund_params['sequencenumber']
refund.payment.info = json.dumps(d)
refund.payment.save()
refund.info = json.dumps(data)
if data["Status"] == "APPROVED":
refund.done()
elif data["Status"] == "PENDING":
refund.done() # not technically correct, but we're not sure we'd ever get an udpate.
elif data["Status"] == "ERROR":
refund.state = OrderRefund.REFUND_STATE_FAILED
refund.save()
raise PaymentException(
data["Error"].get(
"ErrorMessage", "Unknown error"
)
)
def _amount_to_decimal(self, cents):
places = settings.CURRENCY_PLACES.get(self.event.currency, 2)
return round_decimal(float(cents) / (10 ** places), self.event.currency)
def _decimal_to_int(self, amount):
places = settings.CURRENCY_PLACES.get(self.event.currency, 2)
return int(amount * 10 ** places)
def _get_payment_params(self, request, payment):
d = {
"request": "authorization",
"reference": "{ev}-{code}".format(
ev=self.event.slug[: 20 - 1 - len(payment.order.code)],
code=payment.full_id,
),
"amount": self._decimal_to_int(payment.amount),
"currency": self.event.currency,
"param": f"{self.event.slug}-{payment.full_id}",
"narrative_text": "{code} {event}".format(
code=payment.order.code,
event=str(self.event.name)[: 81 - 1 - len(payment.order.code)],
),
"customer_is_present": "yes",
"recurrence": "none",
"clearingtype": self.clearingtype,
}
if self.clearingtype == "sb":
d["onlinebanktransfertype"] = self.onlinebanktransfertype
d["bankcountry"] = (
self.onlinebanktransfer_countries[0]
if len(self.onlinebanktransfer_countries) == 1
else "USERSELECTED"
) # todo
if self.clearingtype == "wlt":
d["wallettype"] = self.wallettype
if self.clearingtype in ("sb", "wlt", "cc"):
d["successurl"] = build_absolute_uri(
self.event,
"plugins:pretix_payone:return",
kwargs={
"order": payment.order.code,
"payment": payment.pk,
"hash": hashlib.sha1(
payment.order.secret.lower().encode()
).hexdigest(),
"action": "success",
},
)
d["errorurl"] = build_absolute_uri(
self.event,
"plugins:pretix_payone:return",
kwargs={
"order": payment.order.code,
"payment": payment.pk,
"hash": hashlib.sha1(
payment.order.secret.lower().encode()
).hexdigest(),
"action": "error",
},
)
d["backurl"] = build_absolute_uri(
self.event,
"plugins:pretix_payone:return",
kwargs={
"order": payment.order.code,
"payment": payment.pk,
"hash": hashlib.sha1(
payment.order.secret.lower().encode()
).hexdigest(),
"action": "cancel",
},
)
try:
ia = payment.order.invoice_address
except InvoiceAddress.DoesNotExist:
ia = InvoiceAddress()
if ia.company:
d["company"] = ia.company[:50]
if ia.name_parts.get("family_name"):
d["lastname"] = ia.name_parts.get("family_name", "")[:50]
d["firstname"] = ia.name_parts.get("given_name", "")[:50]
elif ia.name:
d["lastname"] = ia.name.rsplit(" ", 1)[-1][:50]
d["firstname"] = ia.name.rsplit(" ", 1)[0][:50]
elif not ia.company:
d["lastname"] = "Unknown"
if ia.country:
d["country"] = str(ia.country)
else:
d["country"] = guess_country(self.event) or "DE"
if ia.vat_id and ia.vat_id_validated:
d["vatid"] = ia.vatid
if self.invoice_address_mandatory:
if ia.name_parts.get("salutation"):
d["salutation"] = ia.get("salutation", "")[:10]
if ia.name_parts.get("title"):
d["title"] = ia.get("title", "")[:20]
if ia.address:
d["street"] = ia.address[:50]
if ia.zipcode:
d["zip"] = ia.zipcode[:10]
if ia.city:
d["city"] = ia.city[:50]
if ia.state and ia.country in (
"US",
"CA",
"CN",
"JP",
"MX",
"BR",
"AR",
"ID",
"TH",
"IN",
):
d["state"] = ia.state
d["language"] = payment.order.locale[:2]
return d
def execute_payment(self, request: HttpRequest, payment: OrderPayment):
data = dict(
**self._get_payment_params(request, payment), **self._default_params
)
try:
req = requests.post(
"https://api.pay1.de/post-gateway/",
data=data,
headers={"Accept": "application/json"},
)
req.raise_for_status()
except HTTPError:
logger.exception("PAYONE error: %s" % req.text)
try:
d = req.json()
except:
d = {"error": True, "detail": req.text}
payment.fail(info=d)
raise PaymentException(
_(
"We had trouble communicating with our payment provider. Please try again and get in touch "
"with us if this problem persists."
)
)
data = req.json()
payment.info = json.dumps(data)
payment.state = OrderPayment.PAYMENT_STATE_CREATED
payment.save()
if "TxId" in data:
ReferencedPayoneObject.objects.get_or_create(
txid=data["TxId"],
payment=payment,
order=payment.order,
)
if data["Status"] == "APPROVED":
payment.confirm()
elif data["Status"] == "REDIRECT":
request.session["payment_payone_order_secret"] = payment.order.secret
return self.redirect(request, data["RedirectUrl"])
elif data["Status"] == "ERROR":
payment.fail()
raise PaymentException(
_("Our payment provider returned an error message: {message}").format(
message=data["Error"].get(
"CustomerMessage", data.get("ErrorMessage", "Unknown error")
)
)
)
elif data["Status"] == "PENDING":
payment.state = OrderPayment.PAYMENT_STATE_PENDING
payment.save()
def redirect(self, request, url):
if request.session.get("iframe_session", False):
signer = signing.Signer(salt="safe-redirect")
return (
build_absolute_uri(request.event, "plugins:pretix_payone:redirect")
+ "?url="
+ urllib.parse.quote(signer.sign(url))
)
else:
return str(url)
class PayoneCC(PayoneMethod):
method = "creditcard"
verbose_name = _("Credit card via PAYONE")
public_name = _("Credit card")
clearingtype = "cc"
def _get_payment_params(self, request, payment):
d = super()._get_payment_params(request, payment)
d["pseudocardpan"] = request.session["payment_payone_pseudocardpan"]
d["cardholder"] = request.session.get("payment_payone_cardholder", "")
return d
def payment_is_valid_session(self, request):
return request.session.get("payment_payone_pseudocardpan", "") != ""
def checkout_prepare(self, request: HttpRequest, cart):
ppan = request.POST.get("payone_pseudocardpan", "")
if ppan:
request.session["payment_payone_pseudocardpan"] = ppan
for f in (
"truncatedcardpan",
"cardtypeResponse",
"cardexpiredateResponse",
"cardholder",
):
request.session[f"payment_payone_{f}"] = request.POST.get(
f"payone_{f}", ""
)
elif not request.session.get("payment_payone_pseudocardpan"):
messages.warning(
request, _("You may need to enable JavaScript for payments.")
)
return False
return True
def payment_prepare(self, request, payment):
return self.checkout_prepare(request, payment)
def execute_payment(self, request: HttpRequest, payment: OrderPayment):
try:
return super().execute_payment(request, payment)
finally:
request.session.pop("payment_payone_pseudocardpan", None)
request.session.pop("payment_payone_truncatedcardpan", None)
request.session.pop("payment_payone_cardtypeResponse", None)
request.session.pop("payment_payone_cardexpiredateResponse", None)
request.session.pop("payment_payone_cardholder", None)
def payment_form_render(self, request) -> str:
d = {
"request": "creditcardcheck",
"responsetype": "JSON",
"aid": self.settings.aid,
"mid": self.settings.mid,
"portalid": self.settings.portalid,
"mode": | |
<gh_stars>1-10
from __future__ import annotations
import logging
import os
import uuid
from types import TracebackType
from typing import cast, Optional, Literal, Type, Tuple, Set
import aiohttp
import azure.core
import azure.core.exceptions
import azure.identity
import azure.identity.aio
from azure.core.credentials import TokenCredential, AzureNamedKeyCredential
from azure.data.tables.aio import TableClient
from azure.mgmt.authorization.aio import AuthorizationManagementClient
from azure.mgmt.msi.aio import ManagedServiceIdentityClient
from azure.mgmt.resource.resources.aio import ResourceManagementClient
# There are two Subscription clients, azure.mgmt.subscription.SubscriptionClient and
# azure.mgmt.resource.subscriptions. They seem pretty similar, but the first one seems
# to return a tenant_id for each subscription and the second one does not, so we just
# use the first one.
from azure.mgmt.resource.subscriptions import (
SubscriptionClient as SubscriptionClientSync,
)
from azure.mgmt.resource.subscriptions.aio import SubscriptionClient
from azure.mgmt.storage.aio import StorageManagementClient
from msgraph.core import GraphClient
from meadowrun.azure_integration.mgmt_functions.azure_instance_alloc_stub import (
LAST_USED_TABLE_NAME,
MEADOWRUN_RESOURCE_GROUP_NAME,
RESOURCE_TYPES_TYPE,
_DEFAULT_CREDENTIAL_OPTIONS,
get_credential_aio,
)
# credentials and subscriptions
class TokenCredentialWithContextManager(TokenCredential):
"""
This is a really silly duck-typing shim. DefaultAzureCredential implements the
TokenCredential interface but doesn't declare it. But DefaultAzureCredential needs
to be used as a context manager, but the TokenCredential interface doesn't have the
__enter__/__exit__ functions. So we create this type just to satisfy the type
checker.
"""
def __enter__(self) -> TokenCredentialWithContextManager:
pass
def __exit__(
self,
exc_typ: Type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> None:
pass
def get_credential() -> TokenCredentialWithContextManager:
return cast(
TokenCredentialWithContextManager,
azure.identity.DefaultAzureCredential(**_DEFAULT_CREDENTIAL_OPTIONS),
)
_SUBSCRIPTION_ID = None
_TENANT_ID = None
async def get_tenant_id() -> str:
"""Gets the tenant id corresponding to the subscription from get_subscription_id"""
# transitively can modify _SUBSCRIPTION_ID as well
global _TENANT_ID
if _TENANT_ID is not None:
return _TENANT_ID
if _SUBSCRIPTION_ID is None:
await get_subscription_id()
# it's possible that get_subscription_id populated _TENANT_ID in which case we
# don't need to do anything else
if _TENANT_ID is not None:
return _TENANT_ID
# in this case we need to look up the tenant id from the subscription id
async with get_credential_aio() as credential, SubscriptionClient(
credential
) as sub_client:
_TENANT_ID = (await sub_client.subscriptions.get(_SUBSCRIPTION_ID)).tenant_id
return _TENANT_ID
async def get_subscription_id() -> str:
"""
First, tries to get the AZURE_SUBSCRIPTION_ID environment variable. If that's not
available, queries for available subscription and if there's only one, returns that.
If there are 0 or 2+ available subscriptions, raises an exception. The subscription
id we choose is cached and will not change for the duration of the process.
"""
global _SUBSCRIPTION_ID
# This function MIGHT populate _TENANT_ID if it's available because we had to query
# for the available subscriptions. If we're just reading the subscription id off of
# the environment variable, then we don't populate _TENANT_ID
global _TENANT_ID
if _SUBSCRIPTION_ID is None:
specified_subscription_id = os.environ.get("AZURE_SUBSCRIPTION_ID")
if specified_subscription_id:
_SUBSCRIPTION_ID = specified_subscription_id
else:
async with get_credential_aio() as credential, SubscriptionClient(
credential
) as sub_client:
subscriptions = [
sub
async for sub in sub_client.subscriptions.list()
if sub.state == "Enabled"
]
if len(subscriptions) > 1:
raise ValueError(
"Please specify a subscription via the "
"AZURE_SUBSCRIPTION_ID environment variable from among the "
"available subscription ids: "
+ ", ".join([sub.subscription_id for sub in subscriptions])
)
elif len(subscriptions) == 0:
raise ValueError("There are no subscriptions available")
else:
_SUBSCRIPTION_ID = cast(str, subscriptions[0].subscription_id)
_TENANT_ID = cast(str, subscriptions[0].tenant_id)
return _SUBSCRIPTION_ID
def get_subscription_id_sync() -> str:
"""Identical to get_subscription_id but not async"""
global _SUBSCRIPTION_ID
global _TENANT_ID
if _SUBSCRIPTION_ID is None:
specified_subscription_id = os.environ.get("AZURE_SUBSCRIPTION_ID")
if specified_subscription_id:
_SUBSCRIPTION_ID = specified_subscription_id
else:
with get_credential() as credential, SubscriptionClientSync(
credential
) as sub_client:
subscriptions = [
sub
for sub in sub_client.subscriptions.list()
if sub.state == "Enabled"
]
if len(subscriptions) > 1:
raise ValueError(
"Please specify a subscription via the "
"AZURE_SUBSCRIPTION_ID environment variable from among the "
"available subscription ids: "
+ ", ".join([sub.subscription_id for sub in subscriptions])
)
elif len(subscriptions) == 0:
raise ValueError("There are no subscriptions available")
else:
_SUBSCRIPTION_ID = cast(str, subscriptions[0].subscription_id)
_TENANT_ID = cast(str, subscriptions[0].tenant_id)
return _SUBSCRIPTION_ID
def get_current_user_id() -> str:
"""
This functions gets the id of the current user that is signed in to the Azure CLI.
In order to get this information, it looks like there are two different services,
"Microsoft Graph" (developer.microsoft.com/graph) and "Azure AD Graph"
(graph.windows.net), the latter being deprecated
(https://devblogs.microsoft.com/microsoft365dev/microsoft-graph-or-azure-ad-graph/).
I think these services correspond to two different python libraries, msal
(https://docs.microsoft.com/en-us/python/api/overview/azure/active-directory?view=azure-python)
and adal (https://docs.microsoft.com/en-us/python/api/adal/adal?view=azure-python),
but these libraries don't appear to do anything super useful on their own.
The deprecated Azure Graph API seems to correspond to a higher-level library
azure-graphrbac, which does seem to have the functionality we need:
azure.graphrbac.GraphRbacManagementClient.signed_in_user, but is deprecated along
with Azure Graph
(https://github.com/Azure/azure-sdk-for-python/issues/14022#issuecomment-752279618).
The msgraph library that we use here seems to be a not-very-high-level library
for Microsoft Graph (https://github.com/microsoftgraph/msgraph-sdk-python-core).
As a side note, another way to get this information is to use the command line to
call `az ad signed-in-user show`, but that appears to be relying on the deprecated
Azure Graph API as it gives a deprecation warning.
"""
# crucial scopes parameter is needed, see
# https://github.com/microsoftgraph/msgraph-sdk-python-core/issues/106#issuecomment-969281260
with get_credential() as credential:
client = GraphClient(
credential=credential, scopes=["https://graph.microsoft.com"]
)
# https://docs.microsoft.com/en-us/graph/api/user-get?view=graph-rest-1.0&tabs=http
result = client.get("/me")
return result.json()["id"]
def get_default_location() -> str:
# TODO try `az config get defaults.location`. Then our own custom config, maybe?
return "eastus"
_MEADOWRUN_RESOURCE_GROUP_ENSURED = False
async def ensure_meadowrun_resource_group(location: str) -> str:
"""
Creates the meadowrun resource group if it doesn't already exist. This resource
group will contain all meadowrun-generated resources
"""
global _MEADOWRUN_RESOURCE_GROUP_ENSURED
if not _MEADOWRUN_RESOURCE_GROUP_ENSURED:
async with get_credential_aio() as credential, ResourceManagementClient(
credential, await get_subscription_id()
) as resource_client:
try:
await resource_client.resource_groups.get(
resource_group_name=MEADOWRUN_RESOURCE_GROUP_NAME
)
except azure.core.exceptions.ResourceNotFoundError:
print(
f"The meadowrun resource group ({MEADOWRUN_RESOURCE_GROUP_NAME}) "
f"doesn't exist, creating it now in {location}"
)
await resource_client.resource_groups.create_or_update(
MEADOWRUN_RESOURCE_GROUP_NAME, {"location": location}
)
_MEADOWRUN_RESOURCE_GROUP_ENSURED = True
return MEADOWRUN_RESOURCE_GROUP_NAME
async def get_current_ip_address_on_vm() -> Optional[str]:
"""
Assuming we're running on an Azure VM, get our current public ip address. If we're
not running on an Azure VM, or we're running on an Azure VM without a public IP
address, or if we're running on an Azure VM with a Standard SKU public IP address
(see comments in _provision_nic_with_public_ip), we will return None.
"""
async with aiohttp.request(
"GET",
"http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress"
"/0/publicIpAddress?api-version=2021-02-01&format=text",
headers={"Metadata": "true"},
) as response:
if not response.ok:
return None
return await response.text()
_MEADOWRUN_MANAGED_IDENTITY = "meadowrun-managed-identity"
async def _ensure_managed_identity(location: str) -> Tuple[str, str]:
"""Returns identity id, client id"""
resource_group_name = await ensure_meadowrun_resource_group(location)
async with get_credential_aio() as credential, ManagedServiceIdentityClient(
credential, await get_subscription_id()
) as client:
try:
identity = await client.user_assigned_identities.get(
resource_group_name, _MEADOWRUN_MANAGED_IDENTITY
)
return identity.id, identity.client_id
except azure.core.exceptions.ResourceNotFoundError:
print(
f"Azure managed identity {_MEADOWRUN_MANAGED_IDENTITY} does not exist, "
f"creating it now"
)
identity = await client.user_assigned_identities.create_or_update(
resource_group_name, _MEADOWRUN_MANAGED_IDENTITY, {"location": location}
)
await assign_role_to_principal(
"Contributor", identity.principal_id, location, "ServicePrincipal"
)
await assign_role_to_principal(
"Key Vault Secrets User",
identity.principal_id,
location,
"ServicePrincipal",
)
return identity.id, identity.client_id
async def assign_role_to_principal(
role_name: str,
principal_id: str,
location: str,
principal_type: Literal[None, "ServicePrincipal"] = None,
) -> None:
"""
Assigns the specified role to the specified principal (e.g. user or identity) in the
scope of the meadowrun resource group.
principal_type should be set to ServicePrincipal as per the recommendation in
https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-template#new-service-principal
if the service principal is brand new.
"""
subscription_id = await get_subscription_id()
resource_group = await ensure_meadowrun_resource_group(location)
# Bizarrely, setting principal_type to ServicePrincipal for newly created service
# identities only seems to have an effect in 2018-09-01-preview (or later according
# to the docs), but the role_definitions property is missing on 2018-09-01-preview
# (and later) API versions. So it seems like we need to create two different clients
# with different API verisons.
async with get_credential_aio() as credential, AuthorizationManagementClient(
credential, subscription_id, api_version="2018-01-01-preview"
) as client, AuthorizationManagementClient(
credential, subscription_id, api_version="2018-09-01-preview"
) as client2:
roles = [
r
async for r in client.role_definitions.list(
scope=f"/subscriptions/{subscription_id}",
filter=f"roleName eq '{role_name}'",
)
]
if len(roles) == 0:
raise ValueError(f"Role {role_name} was not found")
elif len(roles) > 1:
raise ValueError(f"More than one role {role_name} was found")
try:
# https://docs.microsoft.com/en-us/python/api/azure-mgmt-authorization/azure.mgmt.authorization.v2020_10_01_preview.operations.roleassignmentsoperations?view=azure-python#azure-mgmt-authorization-v2020-10-01-preview-operations-roleassignmentsoperations-create
parameters = {
"role_definition_id": roles[0].id,
"principal_id": principal_id,
}
if principal_type:
parameters["principal_type"] = principal_type
await client2.role_assignments.create(
f"/subscriptions/{subscription_id}/resourceGroups/{resource_group}",
str(uuid.uuid4()),
parameters,
)
except azure.core.exceptions.ResourceExistsError:
# this means the role assignment already exists
pass
async def delete_meadowrun_resource_group() -> None:
"""
This should delete all meadowrun-generated resources as deletes everything in the
meadowrun resource group.
"""
async with get_credential_aio() as credential, ResourceManagementClient(
credential, await get_subscription_id()
) as resource_client:
try:
poller = await resource_client.resource_groups.begin_delete(
MEADOWRUN_RESOURCE_GROUP_NAME
)
await poller.result()
except azure.core.exceptions.ResourceNotFoundError:
pass
async def ensure_meadowrun_storage_account(
location: str, on_missing: Literal["raise", "create"]
) -> Tuple[str, str]:
"""Returns (storage account | |
<filename>optimal/tests/test_optimize.py
###############################################################################
# The MIT License (MIT)
#
# Copyright (c) 2014 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
import copy
import random
import functools
import multiprocessing
import numpy
import pytest
from optimal import optimize, common, GenAlg, Problem, problems
def simple_function(binary):
return (float(binary[0]) + float(binary[1]) + 0.001), (binary[0]
and binary[1])
SIMPLE_PROBLEM = Problem(simple_function)
#########################
# Problem
#########################
def test_Problem_copy():
problem = Problem(simple_function, fitness_args=['a'])
problem_copy = problem.copy()
assert problem_copy is not problem
assert problem_copy.__dict__ == problem.__dict__
problem_copy = problem.copy(fitness_args=['a', 'b'])
assert problem_copy._fitness_args == ['a', 'b']
assert problem._fitness_args == ['a']
###############################
# Optimizer.optimize
###############################
def test_Optimizer_optimize_parallel():
optimzier = GenAlg(2)
optimzier.optimize(SIMPLE_PROBLEM, n_processes=random.randint(2, 4))
assert optimzier.solution_found
def test_Optimizer_optimize_solution_correct():
optimizer = GenAlg(2)
assert optimizer.optimize(SIMPLE_PROBLEM) == [1, 1]
def test_Optimizer_optimize_sphere_max_iterations():
optimizer = GenAlg(32, population_size=10)
optimizer.optimize(
problems.sphere_binary, max_iterations=100,
max_seconds=float('inf'))
assert optimizer.solution_found
def test_Optimizer_optimize_sphere_max_seconds():
optimizer = GenAlg(32, population_size=10)
optimizer.optimize(
problems.sphere_binary, max_iterations=float('inf'),
max_seconds=10)
assert optimizer.solution_found
###############################
# Optimizer._get_fitnesses
###############################
def test_Optimizer_get_fitnesses_no_finished():
"""Fitnesses should correspond to solutions."""
# Fitness function is weighted summation of bits
solution_size = random.randint(1, 50)
weights = numpy.random.random(solution_size)
def fitness_func(solution):
return weights.dot(solution)
# Test Optimizer._get_fitnesses
_check_get_fitnesses(fitness_func, lambda x: x, solution_size)
def test_Optimizer_get_fitnesses_correct_with_finished():
"""Fitnesses should correspond to solutions."""
# Fitness function is weighted summation of bits
solution_size = random.randint(1, 50)
weights = numpy.random.random(solution_size)
def fitness_func(solution):
# Return tuple, with finished as second value
return weights.dot(solution), False
# Test Optimizer._get_fitnesses
_check_get_fitnesses(
fitness_func,
lambda x: x,
solution_size,
fitness_func_returns_finished=True)
def test_Optimizer_get_fitnesses_with_fitness_func_side_effects():
"""Fitness function modifying solution should not affect fitnesses.
This could potentially be a problem when there are duplicate solutions.
"""
# Fitness function is weighted summation of bits
solution_size = random.randint(1, 50)
weights = numpy.random.random(solution_size)
def fitness_func(solution):
for i, val in enumerate(solution):
solution[i] *= 2
return weights.dot(solution)
# Test Optimizer._get_fitnesses
problem = Problem(fitness_func)
optimizer = optimize.Optimizer()
# Use simple map of fitness function over solutions as oracle
# Repeat to test cache
for _ in range(100):
# Create a random population, and compare values returned by _get_fitness to simple maps
population = common.make_population(
random.randint(1, 20), common.random_binary_solution,
solution_size)
solutions, fitnesses, finished = optimizer._get_fitnesses(
problem, copy.deepcopy(population), pool=None)
assert fitnesses == map(fitness_func, population)
assert finished is False
def test_Optimizer_get_fitnesses_with_decoder():
"""Fitnesses should correspond to solutions."""
# Fitness function is weighted summation of bits
solution_size = random.randint(1, 50)
weights = numpy.random.random(solution_size)
def fitness_func(solution):
return weights.dot(solution)
decode_weights = numpy.random.random(solution_size)
def decode_func(encoded_solution):
return list(decode_weights * encoded_solution)
# Test Optimizer._get_fitnesses
_check_get_fitnesses(fitness_func, decode_func, solution_size)
def test_Optimizer_get_fitnesses_unhashable_solution():
"""Should not fail when solution cannot be hashed or cached."""
# Fitness function is weighted summation of bits
solution_size = random.randint(1, 50)
weights = numpy.random.random(solution_size)
def fitness_func(solution):
return weights.dot(solution.list)
class ListWrapper(object):
def __init__(self, list_):
self.list = list_
def __eq__(self, other):
return type(self) == type(other) and self.list == other.list
def __hash__(self):
raise NotImplementedError()
def __str__(self):
raise NotImplementedError()
decode_weights = numpy.random.random(solution_size)
def decode_func(encoded_solution):
return ListWrapper(list(decode_weights * encoded_solution))
optimizer = optimize.Optimizer()
# Test Optimizer._get_fitnesses
_check_get_fitnesses(
fitness_func,
decode_func,
solution_size,
optimizer=optimizer,
cache_solution=True)
assert optimizer._Optimizer__solution_cache == {}
def test_Optimizer_get_fitnesses_cache_encoded_True_cache_solution_True():
"""Fitnesses should correspond to solutions."""
# Fitness function is weighted summation of bits
solution_size = random.randint(1, 50)
weights = numpy.random.random(solution_size)
def fitness_func(solution):
return weights.dot(solution)
# Optimizer with disabled encoded cache
optimizer = optimize.Optimizer()
# Test Optimizer._get_fitnesses
_check_get_fitnesses(
fitness_func,
lambda x: x,
solution_size,
optimizer=optimizer,
cache_encoded=True,
cache_solution=True)
# Check caches as expected
assert optimizer._Optimizer__encoded_cache != {}
assert optimizer._Optimizer__solution_cache != {}
def test_Optimizer_get_fitnesses_cache_encoded_False_cache_solution_True():
"""Fitnesses should correspond to solutions."""
# Fitness function is weighted summation of bits
solution_size = random.randint(1, 50)
weights = numpy.random.random(solution_size)
def fitness_func(solution):
return weights.dot(solution)
# Optimizer with disabled encoded cache
optimizer = optimize.Optimizer()
# Test Optimizer._get_fitnesses
_check_get_fitnesses(
fitness_func,
lambda x: x,
solution_size,
optimizer=optimizer,
cache_encoded=False,
cache_solution=True)
# Check caches as expected
assert optimizer._Optimizer__encoded_cache == {}
assert optimizer._Optimizer__solution_cache != {}
def test_Optimizer_get_fitnesses_cache_encoded_True_cache_solution_False():
"""Fitnesses should correspond to solutions."""
# Fitness function is weighted summation of bits
solution_size = random.randint(1, 50)
weights = numpy.random.random(solution_size)
def fitness_func(solution):
return weights.dot(solution)
# Optimizer with disabled encoded cache
optimizer = optimize.Optimizer()
optimizer.cache_decoded_solution = False
# Test Optimizer._get_fitnesses
_check_get_fitnesses(
fitness_func,
lambda x: x,
solution_size,
optimizer=optimizer,
cache_encoded=True,
cache_solution=False)
# Check caches as expected
assert optimizer._Optimizer__encoded_cache != {}
assert optimizer._Optimizer__solution_cache == {}
def test_Optimizer_get_fitnesses_cache_encoded_False_cache_solution_False():
"""Fitnesses should correspond to solutions."""
# Fitness function is weighted summation of bits
solution_size = random.randint(1, 50)
weights = numpy.random.random(solution_size)
def fitness_func(solution):
return weights.dot(solution)
# Optimizer with disabled encoded cache
optimizer = optimize.Optimizer()
optimizer.cache_decoded_solution = False
# Test Optimizer._get_fitnesses
_check_get_fitnesses(
fitness_func,
lambda x: x,
solution_size,
optimizer=optimizer,
cache_encoded=False,
cache_solution=False)
# Check caches as expected
assert optimizer._Optimizer__encoded_cache == {}
assert optimizer._Optimizer__solution_cache == {}
def test_Optimizer_get_fitnesses_with_pool():
"""Fitnesses should correspond to solutions."""
# Fitness function is weighted summation of bits
solution_size = random.randint(1, 50)
weights = numpy.random.random(solution_size)
def fitness_func(solution):
return weights.dot(solution)
# Test Optimizer._get_fitnesses
_check_get_fitnesses(
fitness_func,
lambda x: x,
solution_size,
n_processes=random.randint(2, 4))
def _check_get_fitnesses(fitness_func,
decode_func,
solution_size,
fitness_func_returns_finished=False,
optimizer=None,
n_processes=0,
**kwargs):
"""Assert that return values of Optimizer._get_fitnesses are correct."""
problem = Problem(fitness_func, decode_function=decode_func)
if optimizer is None:
optimizer = optimize.Optimizer()
if n_processes > 0:
pool = multiprocessing.Pool(processes=n_processes)
else:
pool = None
# Use simple map of fitness function over solutions as oracle
# Repeat to test cache
for _ in range(100):
# Create a random population, and compare values returned by _get_fitness to simple maps
population = common.make_population(
random.randint(1, 20), common.random_binary_solution,
solution_size)
solutions, fitnesses, finished = optimizer._get_fitnesses(
problem, copy.deepcopy(population), pool=pool, **kwargs)
# NOTE: _get_fitnesses will return None for solutions in cache, this is expected and ok
assert False not in [
solution == expected
for solution, expected in zip(solutions,
map(decode_func, population))
if solution is not None
]
if fitness_func_returns_finished is False:
assert fitnesses == map(fitness_func, map(decode_func, population))
else:
# Need to strip finished from fitness_func return values
assert fitnesses == [
fitness_finished[0]
for fitness_finished in map(fitness_func,
map(decode_func, population))
]
assert finished is False
###############################
# Caching
###############################
def test_Optimizer_encoded_cache_correct():
"""Should map the correct key to fitness."""
optimizer = optimize.Optimizer()
def fitness_func(solution):
return solution[0] + 0.5 * solution[1]
problem = Problem(fitness_func)
# Test cache
optimizer._get_fitnesses(
problem, [[0, 0], [0, 1], [1, 0], [1, 1]], cache_encoded=True)
assert optimizer._Optimizer__encoded_cache == {
(0, 0): 0,
(0, 1): 0.5,
(1, 0): 1.0,
(1, 1): 1.5
}
def test_Optimizer_solution_cache_correct():
"""Should map the correct key to fitness."""
optimizer = optimize.Optimizer()
def fitness_func(solution):
return solution[0] + 0.5 * solution[1]
def decode_func(encoded_solution):
return (-encoded_solution[0], -encoded_solution[1])
problem = Problem(fitness_func, decode_function=decode_func)
# Test cache
optimizer._get_fitnesses(
problem, [[0, 0], [0, 1], [1, 0], [1, 1]], cache_solution=True)
assert optimizer._Optimizer__solution_cache == {
(0, 0): 0,
(0, -1): -0.5,
(-1, 0): -1.0,
(-1, -1): -1.5
}
def test_Optimizer_get_solution_key():
# Hashable
optimizer = optimize.Optimizer()
optimizer._get_solution_key('1') == '1'
# Dict
# NOTE: This requires special treatment, otherwise,
# tuple(dict) will return a tuple of the KEYS only
optimizer = optimize.Optimizer()
optimizer._get_solution_key({'a': '1'}) == tuple([('a', '1')])
# Tupleable
optimizer = optimize.Optimizer()
optimizer._get_solution_key(['1']) == tuple(['1'])
# Stringable
optimizer = optimize.Optimizer()
optimizer._get_solution_key([['1']]) == str([['1']])
def test_Optimizer_optimize_cache_encoded_False_cache_solution_True():
"""Should only cache encoded solutions if True."""
# After calling Optimizer._get_fitnesses
# __encoded_cache should be empty
# __solution_cache should not
optimizer = GenAlg(2)
# Optimize
optimizer.optimize(
SIMPLE_PROBLEM,
max_iterations=1,
cache_encoded=False,
cache_solution=True,
clear_cache=False)
# Assert caches as expected
assert optimizer._Optimizer__encoded_cache == {}
assert optimizer._Optimizer__solution_cache != {}
def test_Optimizer_optimize_cache_encoded_True_cache_solution_False():
"""Should only cache decoded solutions if True."""
# After calling Optimizer._get_fitnesses
# __encoded_cache should not be empty
# __solution_cache should be empty
optimizer = GenAlg(2)
# Get fitnesses
optimizer.optimize(
SIMPLE_PROBLEM,
max_iterations=1,
cache_encoded=True,
cache_solution=False,
clear_cache=False)
# Assert caches as expected
assert optimizer._Optimizer__encoded_cache != {}
assert optimizer._Optimizer__solution_cache == {}
####################################
# Hyperparameters
####################################
def test_Optimizer_get_hyperparameters():
| |
from .testutils import FullStackTests, Recording, Collection, BaseAccess
from mock import patch
from itertools import count
from pywb.recorder.multifilewarcwriter import MultiFileWARCWriter
from webrecorder.models.usermanager import CLIUserManager
from webrecorder.rec.storage import get_storage
from webrecorder.models.stats import Stats
from webrecorder.utils import today_str
import re
import os
import time
all_closed = False
# ============================================================================
class TestRegisterMigrate(FullStackTests):
@classmethod
def setup_class(cls):
super(TestRegisterMigrate, cls).setup_class(extra_config_file='test_no_invites_config.yaml',
storage_worker=True,
temp_worker=True)
cls.val_reg = ''
def test_anon_record_1(self):
self.set_uuids('Recording', ['abc'])
res = self.testapp.get('/_new/temp/abc/record/mp_/http://httpbin.org/get?food=bar')
res.headers['Location'].endswith('/' + self.anon_user + '/temp/abc/record/mp_/http://httpbin.org/get?food=bar')
res = res.follow()
res.charset = 'utf-8'
assert '"food": "bar"' in res.text, res.text
assert self.testapp.cookies['__test_sesh'] != ''
# Add as page
page = {'title': 'Example Title', 'url': 'http://httpbin.org/get?food=bar', 'ts': '2016010203000000'}
res = self.testapp.post_json('/api/v1/recording/abc/pages?user={user}&coll=temp'.format(user=self.anon_user), params=page)
assert res.json['page_id']
user = self.anon_user
coll, rec = self.get_coll_rec(user, 'temp', 'abc')
self.assert_coll_rec_warcs(coll, rec, 1, 1)
anon_dir = os.path.join(self.warcs_dir, user)
assert len(os.listdir(anon_dir)) == 1
def _test_register(self):
res = self.testapp.get('/_register')
res.charset = 'utf-8'
assert self.testapp.cookies['__test_sesh'] != ''
assert '"toColl"' in res.text
@classmethod
def mock_send_reg_email(cls, sender, title, text):
cls.val_reg = re.search('/_valreg/([^"?]+)', text).group(1)
assert '?username=' in text
def test_register_post_success(self):
params = {'email': '<EMAIL>',
'username': 'someuser',
'password': '<PASSWORD>',
'confirmpassword': '<PASSWORD>',
'toColl': 'Test Migrate',
'moveTemp': '1',
}
with patch('cork.Mailer.send_email', self.mock_send_reg_email):
res = self.testapp.post_json('/api/v1/auth/register', params=params)
#assert res.headers['Location'] == 'http://localhost:80/'
assert res.json == {
'success': 'A confirmation e-mail has been sent to <b>someuser</b>. Please '
'check your e-mail to complete the registration!'}
def _test_val_user_reg_page(self):
res = self.testapp.get('/_valreg/' + self.val_reg)
assert self.val_reg in res.body.decode('utf-8')
def test_val_user_reg_post(self):
params = {'reg': self.val_reg}
headers = {'Cookie': '__test_sesh={0}; valreg={1}'.format(self.testapp.cookies['__test_sesh'], self.val_reg)}
def _get_storage(storage_type, redis):
time.sleep(1.1)
return get_storage(storage_type, redis)
with patch('webrecorder.models.collection.get_global_storage', _get_storage) as p:
res = self.testapp.post_json('/api/v1/auth/validate', params=params, headers=headers)
time.sleep(1.1)
assert res.json == {'first_coll_name': 'test-migrate', 'registered': 'someuser'}
user_info = self.redis.hgetall('u:someuser:info')
#user_info = self.appcont.manager._format_info(user_info)
assert user_info['max_size'] == '1000000000'
assert user_info['created_at'] != None
coll, rec = self.get_coll_rec('someuser', 'test-migrate', None)
key_prefix = 'c:{coll}'.format(coll=coll)
assert self.redis.exists(key_prefix + ':info')
coll_info = self.redis.hgetall(key_prefix + ':info')
#coll_info = self.appcont.manager._format_info(coll_info)
assert coll_info['owner'] == 'someuser'
assert coll_info['title'] == 'Test Migrate'
assert coll_info['created_at'] != None
assert user_info['size'] == coll_info['size']
def test_renamed_temp_to_perm(self):
def assert_one_dir():
coll_dir = os.path.join(self.storage_today, coll)
assert set(os.listdir(coll_dir)) == {'warcs', 'indexes'}
assert len(os.listdir(os.path.join(coll_dir, 'warcs'))) == 1
assert len(os.listdir(os.path.join(coll_dir, 'indexes'))) == 1
coll, rec = self.get_coll_rec('someuser', 'test-migrate', 'abc')
assert coll != None
self.sleep_try(0.2, 20.0, assert_one_dir)
#result = self.redis.hgetall('r:{rec}:warc'.format(rec=rec))
result = self.redis.hgetall(Recording.COLL_WARC_KEY.format(coll=coll))
assert len(result) == 1
storage_dir = self.storage_today.replace(os.path.sep, '/')
for key in result:
assert storage_dir in result[key]
def test_logged_in_user_info(self):
res = self.testapp.get('/someuser')
assert '"/someuser/test-migrate"' in res.text
assert 'Test Migrate' in res.text
# no rec replay after commit
def _test_logged_in_rec_replay_1(self):
res = self.testapp.get('/someuser/test-migrate/abc/replay/mp_/http://httpbin.org/get?food=bar')
res.charset = 'utf-8'
# no cache control setting here (only at collection replay)
assert 'Cache-Control' not in res.headers
assert '"food": "bar"' in res.text, res.text
def test_logged_in_replay_1(self):
res = self.testapp.get('/someuser/test-migrate/mp_/http://httpbin.org/get?food=bar')
res.charset = 'utf-8'
# Cache-Control private to ignore cache
assert res.headers['Cache-Control'] == 'private'
assert '"food": "bar"' in res.text, res.text
def test_logged_in_coll_info(self):
res = self.testapp.get('/someuser/test-migrate')
res.charset = 'utf-8'
assert 'Test Migrate' in res.text
assert '/someuser/test-migrate/' in res.text, res.text
assert '/http://httpbin.org/get?food=bar' in res.text
def test_logged_in_rec_info(self):
res = self.testapp.get('/someuser/test-migrate/abc')
res.charset = 'utf-8'
assert 'Test Migrate' in res.text
assert '/someuser/test-migrate/' in res.text
assert '/http://httpbin.org/get?food=bar' in res.text
def _test_logged_in_create_coll_page(self):
res = self.testapp.get('/_create')
#assert 'https://webrecorder.io/someuser/' in res.text
assert 'New Collection' in res.text
def test_logged_in_create_coll(self):
params = {'title': 'New Coll',
'public': True,
}
res = self.testapp.post_json('/api/v1/collections?user=someuser', params=params)
#res.headers['Location'] == 'http://localhost:80/'
assert res.json['collection']['slug'] == 'new-coll'
res = self.testapp.get('/someuser/new-coll')
assert 'Created collection' in res.text
assert 'New Coll' in res.text
# ensure csrf token present
#m = re.search('name="csrf" value="([^\"]+)"', res.text)
#assert m
def test_logged_in_create_coll_dupe_name_error(self):
params = {'title': 'New Coll',
'public': True,
}
res = self.testapp.post_json('/api/v1/collections?user=someuser', params=params, status=400)
assert res.json['error'] == 'duplicate_name'
def test_logged_in_create_coll_new_name(self):
params = {'title': 'New Coll 2',
'public': True
}
res = self.testapp.post_json('/api/v1/collections?user=someuser', params=params)
assert res.json['collection']['slug'] == 'new-coll-2'
res = self.testapp.get('/someuser/new-coll-2')
assert 'Created collection' in res.text
assert 'New Coll' in res.text
# ensure csrf token present
#m = re.search('name="csrf" value="([^\"]+)"', res.text)
#assert m
def test_logged_in_create_coll_and_rename_to_dupe_name(self):
params = {'title': 'Other Coll',
'public': False
}
res = self.testapp.post_json('/api/v1/collections?user=someuser', params=params)
assert res.json['collection']['slug'] == 'other-coll'
res = self.testapp.get('/someuser/other-coll')
assert 'Other Coll' in res.text
params = {'title': 'New Coll'}
res = self.testapp.post_json('/api/v1/collection/other-coll?user=someuser', params=params, status=400)
assert res.json == {'error': 'duplicate_name'}
params = {'title': 'New Coll 3'}
res = self.testapp.post_json('/api/v1/collection/other-coll?user=someuser', params=params)
assert res.json['collection']['id'] == 'new-coll-3'
assert set(self.redis.hkeys('u:someuser:colls')) == {'new-coll-3', 'new-coll', 'test-migrate', 'new-coll-2'}
def test_logged_in_user_info_2(self):
res = self.testapp.get('/someuser')
assert '"/someuser/test-migrate"' in res.text
assert 'Test Migrate' in res.text
assert '"/someuser/new-coll"' in res.text
assert 'New Coll' in res.text
def test_logged_in_record_1(self):
self.set_uuids('Recording', ['move-test'])
res = self.testapp.get('/_new/new-coll/move-test/record/mp_/http://example.com/')
assert res.status_code == 302
assert res.headers['Location'].endswith('/someuser/new-coll/move-test/record/mp_/http://example.com/')
res = res.follow()
res.charset = 'utf-8'
assert 'Example Domain' in res.text
# allow recording to be written
def assert_written():
coll, rec = self.get_coll_rec('someuser', 'new-coll', 'move-test')
assert coll
assert rec
assert self.redis.exists(Recording.CDXJ_KEY.format(rec=rec))
assert self.redis.exists(Recording.REC_WARC_KEY.format(rec=rec))
assert self.redis.exists(Recording.COLL_WARC_KEY.format(coll=coll))
self.sleep_try(0.1, 5.0, assert_written)
def test_logged_in_replay_public(self):
res = self.testapp.get('/someuser/new-coll/mp_/http://example.com/')
res.charset = 'utf-8'
# no cache-control for public collections
assert 'Cache-Control' not in res.headers
assert 'Example Domain' in res.text
def test_logged_in_download(self):
res = self.testapp.head('/someuser/new-coll/$download')
assert res.headers['Content-Disposition'].startswith("attachment; filename*=UTF-8''new-coll-")
def get_rec_titles(self, user, coll_name, num):
coll, rec = self.get_coll_rec(user, coll_name, None)
assert self.redis.hlen(Recording.COLL_WARC_KEY.format(coll=coll)) == num
collection = Collection(my_id=coll, redis=self.redis, access=BaseAccess())
return set([recording['title'] for recording in collection.get_recordings()])
#return self.redis.smembers(Collection.UNORDERED_RECS_KEY.format(coll=coll))
#return set(self.redis.hkeys(Collection.COMP_KEY.format(coll=coll)))
def test_logged_in_move_rec(self):
assert self.get_rec_titles('someuser', 'new-coll', 1) == {'move-test'}
assert self.get_rec_titles('someuser', 'new-coll-2', 0) == set()
res = self.testapp.post_json('/api/v1/recording/move-test/move/new-coll-2?user=someuser&coll=new-coll')
#assert res.json == {'coll_id': 'new-coll-2', 'rec_id': 'move-test'}
assert res.json['coll_id'] == 'new-coll-2'
rec_id = res.json['rec_id']
def assert_moved():
assert self.get_rec_titles('someuser', 'new-coll', 0) == set()
assert self.get_rec_titles('someuser', 'new-coll-2', 1) == {'move-test'}
self.sleep_try(0.2, 5.0, assert_moved)
# rec replay
res = self.testapp.get('/someuser/new-coll-2/{0}/replay/mp_/http://example.com/'.format(rec_id))
res.charset = 'utf-8'
assert 'Example Domain' in res.text
# coll replay
res = self.testapp.get('/someuser/new-coll-2/mp_/http://example.com/')
res.charset = 'utf-8'
assert 'Example Domain' in res.text
def test_logged_in_record_2(self):
self.set_uuids('Recording', ['move-test'])
res = self.testapp.get('/_new/new-coll/Move Test/record/mp_/http://httpbin.org/get?rec=test')
assert res.status_code == 302
assert res.headers['Location'].endswith('/someuser/new-coll/move-test/record/mp_/http://httpbin.org/get?rec=test')
res = res.follow()
res.charset = 'utf-8'
assert '"rec": "test"' in res.text
coll, rec = self.get_coll_rec('someuser', 'new-coll', 'move-test')
assert self.redis.exists('r:{0}:open'.format(rec))
# allow recording to be written
def assert_written():
assert coll
assert rec
assert self.redis.exists(Recording.CDXJ_KEY.format(rec=rec))
assert self.redis.exists(Recording.REC_WARC_KEY.format(rec=rec))
assert self.redis.exists(Recording.COLL_WARC_KEY.format(coll=coll))
self.sleep_try(0.1, 5.0, assert_written)
def test_logged_in_replay_2(self):
res = self.testapp.get('/someuser/new-coll/move-test/replay/mp_/http://httpbin.org/get?rec=test')
res.charset = 'utf-8'
assert '"rec": "test"' in res.text
def test_logged_in_move_rec_dupe(self):
assert self.get_rec_titles('someuser', 'new-coll', 1) == {'Move Test'}
assert self.get_rec_titles('someuser', 'new-coll-2', 1) == {'move-test'}
res = self.testapp.post_json('/api/v1/recording/move-test/move/new-coll-2?user=someuser&coll=new-coll')
#assert res.json == {'coll_id': 'new-coll-2', 'rec_id': 'move-test-2'}
assert res.json['coll_id'] == 'new-coll-2'
rec_id = res.json['rec_id']
def assert_moved():
assert self.get_rec_titles('someuser', 'new-coll', 0) == set()
assert self.get_rec_titles('someuser', 'new-coll-2', 2) == {'move-test', 'Move Test'}
self.sleep_try(0.2, 5.0, assert_moved)
# rec replay
res = self.testapp.get('/someuser/new-coll-2/{0}/replay/mp_/http://httpbin.org/get?rec=test'.format(rec_id))
res.charset = 'utf-8'
assert '"rec": "test"' in res.text
# coll replay
res = self.testapp.get('/someuser/new-coll-2/mp_/http://httpbin.org/get?rec=test')
res.charset = 'utf-8'
assert '"rec": "test"' in res.text
def test_logout_1(self):
res = self.testapp.post_json('/api/v1/auth/logout', status=200)
assert res.json['success']
assert self.testapp.cookies.get('__test_sesh', '') == ''
def test_logged_out_user_info(self):
res = self.testapp.get('/someuser')
assert '"/someuser/new-coll"' in res.text
assert 'New Coll' in res.text
def test_logged_out_coll(self):
res = self.testapp.get('/someuser/new-coll')
assert '/new-coll' in res.text, res.text
def test_logged_out_replay(self):
res = self.testapp.get('/someuser/new-coll-2/mp_/http://example.com/')
res.charset = 'utf-8'
# no cache-control for public collections
assert 'Cache-Control' not in res.headers
assert 'Example Domain' in res.text
def test_error_logged_out_download(self):
res = self.testapp.get('/someuser/new-coll/$download', status=404)
assert res.json == {'error': 'not_found'}
def test_error_logged_out_no_coll(self):
res = self.testapp.get('/someuser/test-migrate', status=404)
assert res.json == {'error': 'not_found'}
def test_error_logged_out_record(self):
res = self.testapp.get('/someuser/new-coll/move-test/record/mp_/http://example.com/', status=404)
assert res.json == {'error': 'no_such_recording'}
def test_error_logged_out_patch(self):
res = self.testapp.get('/someuser/new-coll/move-test/patch/mp_/http://example.com/', status=404)
assert res.json == {'error': 'no_such_recording'}
def test_error_logged_out_replay_coll_1(self):
res = self.testapp.get('/someuser/test-migrate/mp_/http://httpbin.org/get?food=bar', status=404)
assert res.json == {'error': 'no_such_collection'}
def test_login(self):
params = {'username': 'someuser',
'password': '<PASSWORD>'}
res = self.testapp.post_json('/api/v1/auth/login', params=params)
assert res.json['user']['anon'] == False
assert res.json['user']['num_collections'] == 4
assert res.json['user']['role'] == 'archivist'
assert res.json['user']['username'] == 'someuser'
assert 'max-age=' not in res.headers['Set-Cookie'].lower()
assert self.testapp.cookies.get('__test_sesh', '') != ''
def _test_rename_rec(self):
res = self.testapp.post_json('/api/v1/recording/abc/rename/FOOD%20BAR?user=someuser&coll=test-migrate')
assert res.json == {'rec_id': 'food-bar', 'coll_id': 'test-migrate'}
assert self.get_rec_titles('someuser', 'test-migrate', 1) == {'food-bar'}
# rec replay
res = self.testapp.get('/someuser/test-migrate/food-bar/replay/mp_/http://httpbin.org/get?food=bar')
res.charset = 'utf-8'
assert '"food": "bar"' in res.text, res.text
# coll replay
res = self.testapp.get('/someuser/test-migrate/mp_/http://httpbin.org/get?food=bar')
res.charset = 'utf-8'
assert '"food": "bar"' in res.text, res.text
def test_rename_coll_invalid_name(self):
# empty title
params = {'title': ''}
res = self.testapp.post_json('/api/v1/collection/test-migrate?user=someuser', params=params, status=400)
assert res.json['error'] == 'invalid_coll_name'
# title that results in empty slug
params = {'title': '@$%'}
res = self.testapp.post_json('/api/v1/collection/test-migrate?user=someuser', params=params, status=400)
assert res.json['error'] == 'invalid_coll_name'
assert set(self.redis.hkeys('u:someuser:colls')) == {'new-coll-3', 'new-coll', 'test-migrate', 'new-coll-2'}
def test_rename_coll(self):
params = {'title': 'Test Coll'}
res = self.testapp.post_json('/api/v1/collection/test-migrate?user=someuser', params=params)
assert res.json['collection']['id'] == 'test-coll'
assert res.json['collection']['slug'] == 'test-coll'
assert res.json['collection']['title'] == | |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for syntax_tree."""
import unittest
import idl_parser
import syntax_tree
_location = idl_parser.SourceLocation(idl_parser.File('test.idl'), 0)
def MakeType(name):
return syntax_tree.Typename(_location, {}, name)
def MakeScope(name):
return ContextMock([], [], name, None)
class TypeReferenceMock(syntax_tree.TypeReference):
def __init__(self, return_type):
syntax_tree.TypeReference.__init__(self, _location)
self.return_type = return_type
self.context = None
self.scoped = None
def GetTypeInternal(self, context, scoped):
self.context = context
self.scoped = scoped
return self.return_type
class ContextMock(syntax_tree.Definition):
defn_type = 'ContextMock'
def __init__(self, types_list, scopes_list, name, parent):
syntax_tree.Definition.__init__(self, _location, [], name)
self.is_scope = True
self.types_dict = dict([(type_defn.name, type_defn) for type_defn
in types_list])
self.scopes_list = scopes_list
for o in scopes_list + types_list:
o.parent = self
self.parent = parent
def LookUpType(self, name):
if name in self.types_dict:
return self.types_dict[name]
else:
return None
def FindScopes(self, name):
return [scope for scope in self.scopes_list if scope.name == name]
class TypeReferenceTest(unittest.TestCase):
def setUp(self):
self.type_defn = MakeType('Type')
self.context = ContextMock([], [], None, None)
def testGetTypeSuccess(self):
mock = TypeReferenceMock(self.type_defn)
return_type = mock.GetType(self.context)
self.assertEquals(return_type, self.type_defn)
self.assertEquals(mock.context, self.context)
self.assertEquals(mock.scoped, False)
def testGetTypeFailure(self):
mock = TypeReferenceMock(None)
self.assertRaises(syntax_tree.TypeNotFoundError, mock.GetType, self.context)
self.assertEquals(mock.context, self.context)
self.assertEquals(mock.scoped, False)
class NameTypeReferenceTest(unittest.TestCase):
def setUp(self):
# Context2 {
# Type1;
# Type3;
# Context1 {
# Type1;
# Type2;
# }
# }
self.type1_c1 = MakeType('Type1')
self.type2_c1 = MakeType('Type2')
self.context1 = ContextMock([self.type1_c1, self.type2_c1], [], 'Context1',
None)
self.type1_c2 = MakeType('Type1')
self.type3_c2 = MakeType('Type3')
self.context2 = ContextMock([self.type1_c2, self.type3_c2], [self.context1],
'Context2', None)
self.type1_ref = syntax_tree.NameTypeReference(_location, 'Type1')
self.type2_ref = syntax_tree.NameTypeReference(_location, 'Type2')
self.type3_ref = syntax_tree.NameTypeReference(_location, 'Type3')
def testGetTypeInScope(self):
self.assertEquals(self.type1_c1, self.type1_ref.GetType(self.context1))
self.assertEquals(self.type2_c1, self.type2_ref.GetType(self.context1))
self.assertEquals(self.type3_c2, self.type3_ref.GetType(self.context2))
def testGetTypeFromOuterScope(self):
self.assertEquals(self.type1_c2, self.type1_ref.GetType(self.context2))
self.assertRaises(syntax_tree.TypeNotFoundError,
self.type2_ref.GetType, self.context2)
def testGetTypeFromInnerScope(self):
self.assertEquals(self.type3_c2, self.type3_ref.GetType(self.context1))
class ScopedTypeReferenceTest(unittest.TestCase):
def setUp(self):
# Context3 {
# Type1;
# Type2;
# Context1 {
# Type1;
# }
# Context2 {
# Type1;
# }
# }
self.type1_c1 = MakeType('Type1')
self.context1 = ContextMock([self.type1_c1], [], 'Context1', None)
self.type1_c2 = MakeType('Type1')
self.context2 = ContextMock([self.type1_c2], [], 'Context2', None)
self.type1_c3 = MakeType('Type1')
self.type2_c3 = MakeType('Type2')
self.context3 = ContextMock([self.type1_c3, self.type2_c3],
[self.context1, self.context2], 'Context3',
None)
self.type1_ref = syntax_tree.NameTypeReference(_location, 'Type1')
self.type2_ref = syntax_tree.NameTypeReference(_location, 'Type2')
self.c1_t1_ref = syntax_tree.ScopedTypeReference(_location, 'Context1',
self.type1_ref)
self.c2_t1_ref = syntax_tree.ScopedTypeReference(_location, 'Context2',
self.type1_ref)
self.c1_t2_ref = syntax_tree.ScopedTypeReference(_location, 'Context1',
self.type2_ref)
def testGetTypeFromOuterScope(self):
self.assertEquals(self.type1_c1, self.c1_t1_ref.GetType(self.context3))
self.assertEquals(self.type1_c2, self.c2_t1_ref.GetType(self.context3))
def testGetTypeFromInnerScope(self):
self.assertEquals(self.type1_c1, self.c1_t1_ref.GetType(self.context1))
self.assertEquals(self.type1_c2, self.c2_t1_ref.GetType(self.context1))
self.assertEquals(self.type1_c1, self.c1_t1_ref.GetType(self.context2))
self.assertEquals(self.type1_c2, self.c2_t1_ref.GetType(self.context2))
def testGetInexistentType(self):
self.assertRaises(syntax_tree.TypeNotFoundError, self.c1_t2_ref.GetType,
self.context1)
self.assertRaises(syntax_tree.TypeNotFoundError, self.c1_t2_ref.GetType,
self.context2)
self.assertRaises(syntax_tree.TypeNotFoundError, self.c1_t2_ref.GetType,
self.context3)
class ArrayTypeReferenceTest(unittest.TestCase):
def setUp(self):
self.type_defn = MakeType('Type')
self.context = ContextMock([self.type_defn], [], 'Context', None)
self.type_ref = syntax_tree.NameTypeReference(_location, 'Type')
self.nonexist_type_ref = syntax_tree.NameTypeReference(_location,
'NonexistentType')
def testGetType(self):
unsized_ref = syntax_tree.ArrayTypeReference(_location, self.type_ref, None)
unsized_array = self.type_defn.GetArrayType(None)
self.assertEquals(unsized_ref.GetType(self.context), unsized_array)
sized_ref = syntax_tree.ArrayTypeReference(_location, self.type_ref, 3)
sized_array = self.type_defn.GetArrayType(3)
self.assertEquals(sized_ref.GetType(self.context), sized_array)
def testGetInexistentType(self):
unsized_ref = syntax_tree.ArrayTypeReference(_location,
self.nonexist_type_ref, None)
self.assertRaises(syntax_tree.TypeNotFoundError, unsized_ref.GetType,
self.context)
sized_ref = syntax_tree.ArrayTypeReference(_location,
self.nonexist_type_ref, 5)
self.assertRaises(syntax_tree.TypeNotFoundError, sized_ref.GetType,
self.context)
class QualifiedTypeReferenceTest(unittest.TestCase):
def setUp(self):
self.type_defn = MakeType('Type')
self.context = ContextMock([self.type_defn], [], 'Context', None)
self.type_ref = syntax_tree.NameTypeReference(_location, 'Type')
self.nonexist_type_ref = syntax_tree.NameTypeReference(_location,
'NonexistentType')
def testGetType(self):
qualified_ref = syntax_tree.QualifiedTypeReference(_location, 'const',
self.type_ref)
self.assertEquals(qualified_ref.GetType(self.context), self.type_defn)
def testGetInexistentType(self):
qualified_ref = syntax_tree.QualifiedTypeReference(_location, 'const',
self.nonexist_type_ref)
self.assertRaises(syntax_tree.TypeNotFoundError, qualified_ref.GetType,
self.context)
class DefinitionTest(unittest.TestCase):
def setUp(self):
pass
def testGetParentScopeStack(self):
definition1 = syntax_tree.Definition(_location, [], 'Definition1')
definition1.is_scope = True
definition2 = syntax_tree.Definition(_location, [], 'Definition2')
definition2.parent = definition1
definition2.is_scope = True
definition3 = syntax_tree.Definition(_location, [], 'Definition3')
definition3.parent = definition2
self.assertEquals(definition1.GetParentScopeStack(), [])
self.assertEquals(definition2.GetParentScopeStack(), [definition1])
self.assertEquals(definition3.GetParentScopeStack(), [definition1,
definition2])
def testGetDefinitionInclude(self):
definition1 = syntax_tree.Definition(_location, [], 'Definition1')
self.assertEquals(definition1.GetDefinitionInclude(), _location.file.header)
include = '/path/to/header.h'
definition2 = syntax_tree.Definition(_location, {'include': include},
'Definition2')
self.assertEquals(definition2.GetDefinitionInclude(), include)
def testGetArrayTypeFail(self):
definition = syntax_tree.Definition(_location, [], 'Definition')
definition.is_type = False
self.assertRaises(syntax_tree.ArrayOfNonTypeError, definition.GetArrayType,
None)
self.assertRaises(syntax_tree.ArrayOfNonTypeError, definition.GetArrayType,
5)
def testGetArrayType(self):
definition = syntax_tree.Definition(_location, [], 'Definition')
definition.is_type = True
unsized = definition.GetArrayType(None)
self.assertEquals(unsized.data_type, definition)
self.assertEquals(unsized.size, None)
self.assertEquals(unsized, definition.GetArrayType(None))
sized = definition.GetArrayType(3)
self.assertEquals(sized.data_type, definition)
self.assertEquals(sized.size, 3)
self.assertEquals(sized, definition.GetArrayType(3))
def testLookUpTypeRecursive(self):
type1_c1 = MakeType('Type1')
type2_c1 = MakeType('Type2')
context1 = ContextMock([type1_c1, type2_c1], [], 'Context1', None)
type1_c2 = MakeType('Type1')
context2 = ContextMock([type1_c2], [], 'Context2', context1)
self.assertEquals(context1.LookUpTypeRecursive('Type1'), type1_c1)
self.assertEquals(context1.LookUpTypeRecursive('Type2'), type2_c1)
self.assertEquals(context1.LookUpTypeRecursive('Type3'), None)
self.assertEquals(context2.LookUpTypeRecursive('Type1'), type1_c2)
self.assertEquals(context2.LookUpTypeRecursive('Type2'), type2_c1)
self.assertEquals(context2.LookUpTypeRecursive('Type3'), None)
def testFindScopesRecursive(self):
scope1_c1 = MakeScope('Scope1')
scope2_c1 = MakeScope('Scope2')
context1 = ContextMock([], [scope1_c1, scope2_c1], 'Context1', None)
scope1_c2 = MakeScope('Scope1')
context2 = ContextMock([], [scope1_c2], 'Context2', context1)
self.assertEquals(context1.FindScopesRecursive('Scope1'), [scope1_c1])
self.assertEquals(context1.FindScopesRecursive('Scope2'), [scope2_c1])
self.assertEquals(context2.FindScopesRecursive('Scope1'), [scope1_c2,
scope1_c1])
self.assertEquals(context2.FindScopesRecursive('Scope2'), [scope2_c1])
context3 = ContextMock([], [context1], 'Context3', None)
self.assertEquals(context3.FindScopesRecursive('Scope1'), [])
self.assertEquals(context3.FindScopesRecursive('Scope2'), [])
def testSetBindingModel(self):
class DefinitionMock(syntax_tree.Definition):
defn_type = 'DefinitionMock'
def __init__(self, name, binding_model_name):
syntax_tree.Definition.__init__(self, _location, [], name)
self.binding_model_name = binding_model_name
self.is_type = True
def LookUpBindingModel(self):
return self.binding_model_name
bm_binding_model = object()
unsized_array_binding_model = object()
sized_array_binding_model = object()
binding_models = {'bm': bm_binding_model,
'unsized_array': unsized_array_binding_model,
'sized_array': sized_array_binding_model}
definition1 = DefinitionMock('Definition1', 'bm')
definition1.SetBindingModel(binding_models)
self.assertEquals(definition1.binding_model, bm_binding_model)
definition2 = DefinitionMock('Definition2', 'non_bm')
self.assertRaises(syntax_tree.UnknownBindingModelError,
definition2.SetBindingModel, binding_models)
definition3 = DefinitionMock('Definition3', 'bm')
unsized_array = definition3.GetArrayType(None)
sized_array = definition3.GetArrayType(21)
definition3.SetBindingModel(binding_models)
self.assertEquals(unsized_array.binding_model, unsized_array_binding_model)
self.assertEquals(sized_array.binding_model, sized_array_binding_model)
class ClassTest(unittest.TestCase):
def setUp(self):
self.type1_c1 = MakeType('Type1')
self.type2 = MakeType('Type2')
self.scope1_c1 = MakeScope('Scope1')
self.scope2 = MakeScope('Scope2')
self.class1 = syntax_tree.Class(_location, {'binding_model': 'bm1'},
'Class1', None,
[self.type1_c1, self.type2, self.scope1_c1,
self.scope2])
self.type1_c2 = MakeType('Type1')
self.type3 = MakeType('Type3')
self.scope1_c2 = MakeScope('Scope1')
self.scope3 = MakeScope('Scope3')
self.class1_ref = TypeReferenceMock(self.class1)
self.class2 = syntax_tree.Class(_location, {}, 'Class2', self.class1_ref,
[self.type1_c2, self.type3, self.scope1_c2,
self.scope3])
self.class3 = syntax_tree.Class(_location, {},
'Class3', None, [self.class1, self.class2])
invalid_base = MakeType('Type5')
self.class4 = syntax_tree.Class(_location, {}, 'Class4',
TypeReferenceMock(invalid_base), [])
self.type1_global = MakeType('Type1')
self.type4 = MakeType('Type4')
self.context = ContextMock([self.class3, self.type1_global, self.type4],
[self.class3], 'Context', None)
def testTypeScope(self):
for c in [self.class1, self.class2, self.class3, self.class4]:
self.assertTrue(c.is_type)
self.assertTrue(c.is_scope)
def testParent(self):
self.assertEquals(self.type1_c1.parent, self.class1)
self.assertEquals(self.type2.parent, self.class1)
self.assertEquals(self.scope1_c1.parent, self.class1)
self.assertEquals(self.scope2.parent, self.class1)
self.assertEquals(self.type1_c2.parent, self.class2)
self.assertEquals(self.type3.parent, self.class2)
self.assertEquals(self.scope1_c2.parent, self.class2)
self.assertEquals(self.scope3.parent, self.class2)
self.assertEquals(self.class1.parent, self.class3)
self.assertEquals(self.class2.parent, self.class3)
def testResolveTypeReferences(self):
self.assertEquals(self.class1._types_resolved, False)
self.class1.ResolveTypeReferences()
self.assertEquals(self.class1.base_type, None)
self.assertEquals(self.class1._types_resolved, True)
self.class2.ResolveTypeReferences()
self.assertEquals(self.class2._types_resolved, True)
self.assertEquals(self.class2.base_type, self.class1)
# check that the type resolution for class2 happened in the correct scope
self.assertEquals(self.class1_ref.context, self.class3)
self.assertEquals(self.class1_ref.scoped, False)
self.assertRaises(syntax_tree.DerivingFromNonClassError,
self.class4.ResolveTypeReferences)
def testGetBaseSafe(self):
self.assertEquals(self.class1.GetBaseSafe(), None)
self.assertEquals(self.class2.GetBaseSafe(), self.class1)
self.assertEquals(self.class3.GetBaseSafe(), None)
self.assertRaises(syntax_tree.DerivingFromNonClassError,
self.class4.GetBaseSafe)
def testGetObjectsRecursive(self):
class1_list = self.class1.GetObjectsRecursive()
self.assertEquals(class1_list[0], self.class1)
class1_list.sort()
class1_list_expected = [self.class1, self.type1_c1, self.type2,
self.scope1_c1, self.scope2]
class1_list_expected.sort()
self.assertEquals(class1_list, class1_list_expected)
class2_list = self.class2.GetObjectsRecursive()
self.assertEquals(class2_list[0], self.class2)
class2_list.sort()
class2_list_expected = [self.class2, self.type1_c2, self.type3,
self.scope1_c2, self.scope3]
class2_list_expected.sort()
self.assertEquals(class2_list, class2_list_expected)
class3_list = self.class3.GetObjectsRecursive()
self.assertEquals(class3_list[0], self.class3)
class3_list.sort()
class3_list_expected = [self.class3] + class1_list + class2_list
class3_list_expected.sort()
self.assertEquals(class3_list, class3_list_expected)
def testLookUpType(self):
self.assertEquals(self.class1.LookUpType('Type1'), self.type1_c1)
self.assertEquals(self.class1.LookUpType('Type2'), self.type2)
self.assertEquals(self.class1.LookUpType('Type3'), None)
self.assertEquals(self.class1.LookUpType('Type4'), None)
self.assertEquals(self.class1.LookUpType('Class1'), None)
self.assertEquals(self.class1.LookUpType('Class2'), None)
self.assertEquals(self.class1.LookUpType('Class3'), None)
self.assertEquals(self.class1.LookUpType('Scope1'), None)
self.assertEquals(self.class1.LookUpType('Scope2'), None)
self.assertEquals(self.class1.LookUpType('Scope3'), None)
self.assertEquals(self.class2.LookUpType('Type1'), self.type1_c2)
self.assertEquals(self.class2.LookUpType('Type2'), self.type2)
self.assertEquals(self.class2.LookUpType('Type3'), self.type3)
self.assertEquals(self.class2.LookUpType('Type4'), None)
self.assertEquals(self.class2.LookUpType('Class1'), None)
self.assertEquals(self.class2.LookUpType('Class2'), None)
self.assertEquals(self.class2.LookUpType('Class3'), None)
self.assertEquals(self.class2.LookUpType('Scope1'), None)
self.assertEquals(self.class2.LookUpType('Scope2'), None)
self.assertEquals(self.class2.LookUpType('Scope3'), None)
self.assertEquals(self.class3.LookUpType('Type1'), None)
self.assertEquals(self.class3.LookUpType('Type2'), None)
self.assertEquals(self.class3.LookUpType('Type3'), None)
self.assertEquals(self.class3.LookUpType('Type4'), None)
self.assertEquals(self.class3.LookUpType('Class1'), self.class1)
self.assertEquals(self.class3.LookUpType('Class2'), self.class2)
self.assertEquals(self.class3.LookUpType('Class3'), None)
self.assertEquals(self.class3.LookUpType('Scope1'), None)
self.assertEquals(self.class3.LookUpType('Scope2'), None)
self.assertEquals(self.class3.LookUpType('Scope3'), None)
def testFindScopes(self):
self.assertEquals(self.class1.FindScopes('Class1'), [])
self.assertEquals(self.class1.FindScopes('Class2'), [])
self.assertEquals(self.class1.FindScopes('Type1'), [])
self.assertEquals(self.class1.FindScopes('Type2'), [])
self.assertEquals(self.class1.FindScopes('Scope1'), [self.scope1_c1])
self.assertEquals(self.class1.FindScopes('Scope2'), [self.scope2])
self.assertEquals(self.class1.FindScopes('Scope3'), [])
self.assertEquals(self.class2.FindScopes('Class1'), [])
self.assertEquals(self.class2.FindScopes('Class2'), [])
self.assertEquals(self.class2.FindScopes('Type1'), [])
self.assertEquals(self.class2.FindScopes('Type2'), [])
self.assertEquals(self.class2.FindScopes('Scope1'), [self.scope1_c2,
self.scope1_c1])
self.assertEquals(self.class2.FindScopes('Scope2'), [self.scope2])
self.assertEquals(self.class2.FindScopes('Scope3'), [self.scope3])
self.assertEquals(self.class3.FindScopes('Class1'), [self.class1])
self.assertEquals(self.class3.FindScopes('Class2'), [self.class2])
self.assertEquals(self.class3.FindScopes('Type1'), [])
self.assertEquals(self.class3.FindScopes('Type2'), [])
self.assertEquals(self.class3.FindScopes('Scope1'), [])
self.assertEquals(self.class3.FindScopes('Scope2'), [])
self.assertEquals(self.class3.FindScopes('Scope3'), [])
def testLookUpBindingModel(self):
self.assertEquals(self.class1.LookUpBindingModel(), 'bm1')
self.assertEquals(self.class2.LookUpBindingModel(), 'bm1')
self.assertEquals(self.class3.LookUpBindingModel(), None)
self.assertRaises(syntax_tree.DerivingFromNonClassError,
self.class4.LookUpBindingModel)
class NamespaceTest(unittest.TestCase):
def setUp(self):
self.type1_n1 = MakeType('Type1')
self.type2 = MakeType('Type2')
self.scope1_n1 = MakeScope('Scope1')
self.scope2 = MakeScope('Scope2')
self.ns1 = syntax_tree.Namespace(_location, {}, 'ns1',
[self.type1_n1, self.type2,
self.scope1_n1, self.scope2])
self.type1_n2 = MakeType('Type1')
self.type3 = MakeType('Type3')
self.scope1_n2 = MakeScope('Scope1')
self.scope3 = MakeScope('Scope3')
self.ns2 = syntax_tree.Namespace(_location, {}, 'ns2',
[self.type1_n2, self.type3,
self.scope1_n2, self.scope3])
self.type_ns1 = MakeType('ns1')
self.ns3 = syntax_tree.Namespace(_location, {}, 'ns3', [self.ns1, self.ns2,
self.type_ns1])
def testTypeScope(self):
for ns in [self.ns1, self.ns2, self.ns3]:
self.assertFalse(ns.is_type)
self.assertTrue(ns.is_scope)
def testParents(self):
self.assertEquals(self.type1_n1.parent, self.ns1)
self.assertEquals(self.type2.parent, self.ns1)
self.assertEquals(self.scope1_n1.parent, self.ns1)
self.assertEquals(self.scope2.parent, self.ns1)
self.assertEquals(self.type1_n2.parent, self.ns2)
self.assertEquals(self.type3.parent, self.ns2)
self.assertEquals(self.scope1_n2.parent, self.ns2)
self.assertEquals(self.scope3.parent, self.ns2)
self.assertEquals(self.ns1.parent, self.ns3)
self.assertEquals(self.ns2.parent, self.ns3)
self.assertEquals(self.type_ns1.parent, self.ns3)
def testGetObjectsRecursive(self):
ns1_list = self.ns1.GetObjectsRecursive()
self.assertEquals(ns1_list[0], self.ns1)
ns1_list.sort()
ns1_list_expected = [self.ns1, self.type1_n1, self.type2,
self.scope1_n1, self.scope2]
ns1_list_expected.sort()
self.assertEquals(ns1_list, ns1_list_expected)
ns2_list = self.ns2.GetObjectsRecursive()
self.assertEquals(ns2_list[0], self.ns2)
ns2_list.sort()
ns2_list_expected = [self.ns2, self.type1_n2, self.type3,
self.scope1_n2, self.scope3]
ns2_list_expected.sort()
self.assertEquals(ns2_list, ns2_list_expected)
ns3_list = self.ns3.GetObjectsRecursive()
self.assertEquals(ns3_list[0], self.ns3)
ns3_list.sort()
ns3_list_expected = [self.ns3, self.type_ns1] + ns1_list + ns2_list
ns3_list_expected.sort()
self.assertEquals(ns3_list, ns3_list_expected)
def testLookUpType(self):
self.assertEquals(self.ns1.LookUpType('Type1'), self.type1_n1)
self.assertEquals(self.ns1.LookUpType('Type2'), self.type2)
self.assertEquals(self.ns1.LookUpType('Type3'), None)
self.assertEquals(self.ns1.LookUpType('ns1'), None)
self.assertEquals(self.ns1.LookUpType('ns2'), None)
self.assertEquals(self.ns1.LookUpType('ns3'), None)
self.assertEquals(self.ns1.LookUpType('Scope1'), None)
self.assertEquals(self.ns1.LookUpType('Scope2'), None)
self.assertEquals(self.ns1.LookUpType('Scope3'), None)
self.assertEquals(self.ns2.LookUpType('Type1'), self.type1_n2)
self.assertEquals(self.ns2.LookUpType('Type2'), None)
self.assertEquals(self.ns2.LookUpType('Type3'), self.type3)
self.assertEquals(self.ns2.LookUpType('ns1'), None)
self.assertEquals(self.ns2.LookUpType('ns2'), None)
self.assertEquals(self.ns2.LookUpType('ns3'), None)
self.assertEquals(self.ns2.LookUpType('Scope1'), None)
self.assertEquals(self.ns2.LookUpType('Scope2'), None)
self.assertEquals(self.ns2.LookUpType('Scope3'), None)
self.assertEquals(self.ns3.LookUpType('Type1'), None)
self.assertEquals(self.ns3.LookUpType('Type2'), None)
| |
in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class createUser_args:
"""
Attributes:
- handle
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'handle', None, None, ), # 1
)
def __init__(self, handle=None,):
self.handle = handle
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.handle = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('createUser_args')
if self.handle is not None:
oprot.writeFieldBegin('handle', TType.STRING, 1)
oprot.writeString(self.handle)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class createUser_result:
"""
Attributes:
- existsx
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'existsx', (AlreadyExistsException, AlreadyExistsException.thrift_spec), None, ), # 1
)
def __init__(self, existsx=None,):
self.existsx = existsx
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.existsx = AlreadyExistsException()
self.existsx.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('createUser_result')
if self.existsx is not None:
oprot.writeFieldBegin('existsx', TType.STRUCT, 1)
self.existsx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class subscribe_args:
"""
Attributes:
- handle
- theirhandle
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'handle', None, None, ), # 1
(2, TType.STRING, 'theirhandle', None, None, ), # 2
)
def __init__(self, handle=None, theirhandle=None,):
self.handle = handle
self.theirhandle = theirhandle
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.handle = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.theirhandle = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('subscribe_args')
if self.handle is not None:
oprot.writeFieldBegin('handle', TType.STRING, 1)
oprot.writeString(self.handle)
oprot.writeFieldEnd()
if self.theirhandle is not None:
oprot.writeFieldBegin('theirhandle', TType.STRING, 2)
oprot.writeString(self.theirhandle)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class subscribe_result:
"""
Attributes:
- userx
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'userx', (NoSuchUserException, NoSuchUserException.thrift_spec), None, ), # 1
)
def __init__(self, userx=None,):
self.userx = userx
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.userx = NoSuchUserException()
self.userx.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('subscribe_result')
if self.userx is not None:
oprot.writeFieldBegin('userx', TType.STRUCT, 1)
self.userx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class unsubscribe_args:
"""
Attributes:
- handle
- theirhandle
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'handle', None, None, ), # 1
(2, TType.STRING, 'theirhandle', None, None, ), # 2
)
def __init__(self, handle=None, theirhandle=None,):
self.handle = handle
self.theirhandle = theirhandle
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.handle = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.theirhandle = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('unsubscribe_args')
if self.handle is not None:
oprot.writeFieldBegin('handle', TType.STRING, 1)
oprot.writeString(self.handle)
oprot.writeFieldEnd()
if self.theirhandle is not None:
oprot.writeFieldBegin('theirhandle', TType.STRING, 2)
oprot.writeString(self.theirhandle)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class unsubscribe_result:
"""
Attributes:
- userx
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'userx', (NoSuchUserException, NoSuchUserException.thrift_spec), None, ), # 1
)
def __init__(self, userx=None,):
self.userx = userx
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.userx = NoSuchUserException()
self.userx.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('unsubscribe_result')
if self.userx is not None:
oprot.writeFieldBegin('userx', TType.STRUCT, 1)
self.userx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class testSubscribe_args:
"""
Attributes:
- handle
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'handle', None, None, ), # 1
)
def __init__(self, handle=None,):
self.handle = handle
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.handle = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('testSubscribe_args')
if self.handle is not None:
oprot.writeFieldBegin('handle', TType.STRING, 1)
oprot.writeString(self.handle)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class testSubscribe_result:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('testSubscribe_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
| |
? 140 ALA A CB 1
ATOM 857 N N . GLN A 1 140 ? 38.584 -7.502 31.515 1.00 11.10 ? 141 GLN A N 1
ATOM 858 C CA . GLN A 1 140 ? 39.500 -7.524 32.666 1.00 11.23 ? 141 GLN A CA 1
ATOM 859 C C . GLN A 1 140 ? 40.311 -6.206 32.828 1.00 11.20 ? 141 GLN A C 1
ATOM 860 O O . GLN A 1 140 ? 40.782 -5.835 33.904 1.00 12.35 ? 141 GLN A O 1
ATOM 861 C CB . GLN A 1 140 ? 40.441 -8.739 32.505 1.00 10.84 ? 141 GLN A CB 1
ATOM 862 C CG . GLN A 1 140 ? 41.639 -8.819 33.390 1.00 11.46 ? 141 GLN A CG 1
ATOM 863 C CD . GLN A 1 140 ? 41.271 -8.841 34.856 1.00 11.55 ? 141 GLN A CD 1
ATOM 864 O OE1 . GLN A 1 140 ? 40.207 -9.363 35.206 1.00 13.36 ? 141 GLN A OE1 1
ATOM 865 N NE2 . GLN A 1 140 ? 42.130 -8.269 35.701 1.00 12.52 ? 141 GLN A NE2 1
ATOM 866 N N . LEU A 1 141 ? 40.433 -5.454 31.752 1.00 10.63 ? 142 LEU A N 1
ATOM 867 C CA . LEU A 1 141 ? 41.176 -4.188 31.738 1.00 11.42 ? 142 LEU A CA 1
ATOM 868 C C . LEU A 1 141 ? 40.269 -2.947 31.828 1.00 12.74 ? 142 LEU A C 1
ATOM 869 O O . LEU A 1 141 ? 40.763 -1.849 31.872 1.00 12.55 ? 142 LEU A O 1
ATOM 870 C CB . LEU A 1 141 ? 42.000 -4.115 30.466 1.00 12.67 ? 142 LEU A CB 1
ATOM 871 C CG . LEU A 1 141 ? 43.149 -5.143 30.362 1.00 13.00 ? 142 LEU A CG 1
ATOM 872 C CD1 . LEU A 1 141 ? 43.739 -5.137 28.944 1.00 12.91 ? 142 LEU A CD1 1
ATOM 873 C CD2 . LEU A 1 141 ? 44.221 -4.970 31.391 1.00 16.07 ? 142 LEU A CD2 1
ATOM 874 N N . GLY A 1 142 ? 38.981 -3.161 31.929 1.00 12.28 ? 143 GLY A N 1
ATOM 875 C CA . GLY A 1 142 ? 37.995 -2.101 32.148 1.00 12.31 ? 143 GLY A CA 1
ATOM 876 C C . GLY A 1 142 ? 37.486 -1.510 30.896 1.00 12.75 ? 143 GLY A C 1
ATOM 877 O O . GLY A 1 142 ? 36.971 -0.391 30.866 1.00 14.50 ? 143 GLY A O 1
ATOM 878 N N . GLY A 1 143 ? 37.534 -2.292 29.833 1.00 10.63 ? 144 GLY A N 1
ATOM 879 C CA . GLY A 1 143 ? 36.978 -1.782 28.574 1.00 11.42 ? 144 GLY A CA 1
ATOM 880 C C . GLY A 1 143 ? 38.048 -1.325 27.671 1.00 11.20 ? 144 GLY A C 1
ATOM 881 O O . GLY A 1 143 ? 39.252 -1.335 27.988 1.00 12.39 ? 144 GLY A O 1
ATOM 882 N N . PRO A 1 144 ? 37.696 -0.964 26.445 1.00 12.49 ? 145 PRO A N 1
ATOM 883 C CA . PRO A 1 144 ? 38.679 -0.538 25.474 1.00 11.30 ? 145 PRO A CA 1
ATOM 884 C C . PRO A 1 144 ? 39.585 0.596 25.970 1.00 10.88 ? 145 PRO A C 1
ATOM 885 O O . PRO A 1 144 ? 40.798 0.584 25.701 1.00 10.78 ? 145 PRO A O 1
ATOM 886 C CB . PRO A 1 144 ? 37.815 -0.158 24.257 1.00 11.03 ? 145 PRO A CB 1
ATOM 887 C CG . PRO A 1 144 ? 36.653 -1.072 24.359 1.00 10.94 ? 145 PRO A CG 1
ATOM 888 C CD . PRO A 1 144 ? 36.379 -1.198 25.842 1.00 11.59 ? 145 PRO A CD 1
ATOM 889 N N . GLY A 1 145 ? 38.968 1.545 26.675 1.00 9.97 ? 146 GLY A N 1
ATOM 890 C CA . GLY A 1 145 ? 39.659 2.617 27.417 1.00 10.63 ? 146 GLY A CA 1
ATOM 891 C C . GLY A 1 145 ? 40.814 2.172 28.263 1.00 10.50 ? 146 GLY A C 1
ATOM 892 O O . GLY A 1 145 ? 41.846 2.891 28.384 1.00 10.41 ? 146 GLY A O 1
ATOM 893 N N . GLY A 1 146 ? 40.608 1.044 28.970 1.00 11.53 ? 147 GLY A N 1
ATOM 894 C CA . GLY A 1 146 ? 41.601 0.463 29.806 1.00 12.96 ? 147 GLY A CA 1
ATOM 895 C C . GLY A 1 146 ? 42.776 -0.086 29.092 1.00 11.27 ? 147 GLY A C 1
ATOM 896 O O . GLY A 1 146 ? 43.922 0.128 29.506 1.00 13.17 ? 147 GLY A O 1
ATOM 897 N N . VAL A 1 147 ? 42.534 -0.663 27.903 1.00 12.56 ? 148 VAL A N 1
ATOM 898 C CA . VAL A 1 147 ? 43.645 -1.067 27.035 1.00 11.60 ? 148 VAL A CA 1
ATOM 899 C C . VAL A 1 147 ? 44.457 0.158 26.578 1.00 10.33 ? 148 VAL A C 1
ATOM 900 O O . VAL A 1 147 ? 45.732 0.202 26.616 1.00 10.16 ? 148 VAL A O 1
ATOM 901 C CB . VAL A 1 147 ? 43.147 -1.900 25.820 1.00 12.67 ? 148 VAL A CB 1
ATOM 902 C CG1 . VAL A 1 147 ? 44.369 -2.500 25.123 1.00 13.37 ? 148 VAL A CG1 1
ATOM 903 C CG2 . VAL A 1 147 ? 42.181 -2.992 26.309 1.00 12.87 ? 148 VAL A CG2 1
ATOM 904 N N . THR A 1 148 ? 43.725 1.190 26.124 1.00 10.66 ? 149 THR A N 1
ATOM 905 C CA . THR A 1 148 ? 44.380 2.415 25.783 1.00 10.07 ? 149 THR A CA 1
ATOM 906 C C . THR A 1 148 ? 45.178 3.010 26.919 1.00 10.23 ? 149 THR A C 1
ATOM 907 O O . THR A 1 148 ? 46.338 3.453 26.670 1.00 10.31 ? 149 THR A O 1
ATOM 908 C CB . THR A 1 148 ? 43.387 3.455 25.257 1.00 11.12 ? 149 THR A CB 1
ATOM 909 O OG1 . THR A 1 148 ? 42.657 2.872 24.124 1.00 11.18 ? 149 THR A OG1 1
ATOM 910 C CG2 . THR A 1 148 ? 44.096 4.733 24.787 1.00 11.08 ? 149 THR A CG2 1
ATOM 911 N N . ALA A 1 149 ? 44.633 2.986 28.156 1.00 10.34 ? 150 ALA A N 1
ATOM 912 C CA . ALA A 1 149 ? 45.336 3.573 29.286 1.00 11.48 ? 150 ALA A CA 1
ATOM 913 C C . ALA A 1 149 ? 46.672 2.851 29.548 1.00 11.01 ? 150 ALA A C 1
ATOM 914 O O . ALA A 1 149 ? 47.689 3.534 29.855 1.00 12.08 ? 150 ALA A O 1
ATOM 915 C CB . ALA A 1 149 ? 44.450 3.627 30.517 1.00 11.81 ? 150 ALA A CB 1
ATOM 916 N N . PHE A 1 150 ? 46.687 1.526 29.329 1.00 11.55 ? 151 PHE A N 1
ATOM 917 C CA . PHE A 1 150 ? 47.929 0.737 29.433 1.00 | |
(1.0, -100, 'Simulation temperature for hydration free energies (atm)', 'Hydration free energy using molecular dynamics', 'hydration'),
"energy_rms_override" : (0.0, 0, 'If nonzero, override the Energy RMS used to normalize the energy part of the objective function term', 'Energy matching', 'abinitio'),
"force_rms_override" : (0.0, 0, 'If nonzero, override the Force RMS used to normalize the energy part of the objective function term', 'Force matching', 'abinitio'),
"rmsd_denom" : (0.1, 0, 'RMSD normalization for optimized geometries in Angstrom', 'Binding energy targets', 'binding'),
"wavenumber_tol" : (10.0, 0, 'Frequency normalization (in wavenumber) for vibrational frequencies', 'Vibrational frequency targets', 'vibration'),
"dipole_denom" : (1.0, 0, 'Dipole normalization (Debye) ; set to 0 if a zero weight is desired', 'Monomer property targets', 'monomer'),
"quadrupole_denom" : (1.0, 0, 'Quadrupole normalization (Buckingham) ; set to 0 if a zero weight is desired', 'Monomer property targets', 'monomer'),
"polarizability_denom" : (1.0, 0, 'Dipole polarizability tensor normalization (cubic Angstrom) ; set to 0 if a zero weight is desired', 'Monomer property targets with polarizability', 'monomer'),
"liquid_timestep" : (1.0, 0, 'Time step size for the liquid simulation.', 'Condensed phase property targets', 'liquid'),
"liquid_interval" : (0.1, 0, 'Time interval for saving coordinates for the liquid production run.', 'Condensed phase property targets', 'liquid'),
"gas_timestep" : (1.0, 0, 'Time step size for the gas simulation (if zero, use default in external script.).', 'Condensed phase property targets', 'liquid'),
"gas_interval" : (0.1, 0, 'Time interval for saving coordinates for the gas production run (if zero, use default in external script.)', 'Condensed phase property targets', 'liquid'),
"lipid_timestep" : (1.0, 0, 'Time step size for the lipid simulation.', 'Lipid property targets', 'lipid'),
"lipid_interval" : (0.1, 0, 'Time interval for saving coordinates for the lipid production run.', 'Lipid property targets', 'lipid'),
"nvt_timestep" : (1.0, 0, 'Time step size for the NVT simulation.', 'Condensed phase property targets', 'liquid'),
"nvt_interval" : (0.1, 0, 'Time interval for saving coordinates for the NVT simulation production run.', 'Condensed phase property targets', 'liquid'),
"self_pol_mu0" : (0.0, -150, 'Gas-phase dipole parameter for self-polarization correction (in debye).', 'Condensed phase property targets', 'liquid'),
"self_pol_alpha" : (0.0, -150, 'Polarizability parameter for self-polarization correction (in debye).', 'Condensed phase property targets', 'liquid'),
"epsgrad" : (0.0, -150, 'Gradient below this threshold will be set to zero.', 'All targets'),
"energy_asymmetry": (1.0, -150, 'Snapshots with (E_MM - E_QM) < 0.0 will have their weights increased by this factor. Only valid if energy_mode is set to "qm_minimum".', 'Ab initio targets'),
"nonbonded_cutoff" : (None, -1, 'Cutoff for nonbonded interactions (passed to engines).', 'Condensed phase property targets', 'liquid'),
"vdw_cutoff" : (None, -2, 'Cutoff for vdW interactions if different from other nonbonded interactions', 'Condensed phase property targets', 'liquid'),
"liquid_fdiff_h" : (1e-2, 0, 'Step size for finite difference derivatives for liquid targets in pure_num_grad', 'Condensed phase property targets', 'liquid'),
"restrain_k" : (1.0, 0, 'Force constant for harmonic positional energy restraints', 'Torsion profile with MM relaxation target', 'torsionprofile'),
},
'sections': {}
}
all_opts_names = list(itertools.chain(*[i.keys() for i in gen_opts_types.values()])) + list(itertools.chain(*[i.keys() for i in tgt_opts_types.values()]))
## Check for uniqueness of option names.
for i in all_opts_names:
iocc = []
for typ, dct in gen_opts_types.items():
if i in dct:
iocc.append("gen_opt_types %s" % typ)
for typ, dct in tgt_opts_types.items():
if i in dct:
iocc.append("gen_opt_types %s" % typ)
if len(iocc) != 1:
logger.error("CODING ERROR: ForceBalance option %s occurs in more than one place (%s)\n" % (i, str(iocc)))
raise RuntimeError
## Default general options - basically a collapsed veresion of gen_opts_types.
gen_opts_defaults = {}
for t in gen_opts_types:
subdict = {}
for i in gen_opts_types[t]:
subdict[i] = gen_opts_types[t][i][0]
gen_opts_defaults.update(subdict)
## Default target options - basically a collapsed version of tgt_opts_types.
tgt_opts_defaults = {}
for t in tgt_opts_types:
subdict = {}
for i in tgt_opts_types[t]:
subdict[i] = tgt_opts_types[t][i][0]
tgt_opts_defaults.update(subdict)
## Option maps for maintaining backward compatibility.
bkwd = {"simtype" : "type",
"masterfile" : "inter_txt",
"openmm_cuda_precision" : "openmm_precision",
"mdrun_threads" : "md_threads",
"mts_vvvr" : "mts_integrator",
"amoeba_polarization" : "amoeba_pol",
"liquid_prod_steps" : "liquid_md_steps",
"gas_prod_steps" : "gas_md_steps",
"liquid_equ_steps" : "liquid_eq_steps",
"gas_equ_steps" : "gas_eq_steps",
"lipid_prod_steps" : "lipid_md_steps",
"lipid_equ_steps" : "lipid_eq_steps",
}
## Listing of sections in the input file.
mainsections = ["SIMULATION","TARGET","OPTIONS","END","NONE"]
def read_mvals(fobj):
Answer = []
for line in fobj:
if re.match("(/read_mvals)|(^\$end)",line):
break
Answer.append(float(line.split('[', maxsplit=1)[-1].split(']', maxsplit=1)[0].split()[-1]))
return Answer
def read_pvals(fobj):
Answer = []
for line in fobj:
if re.match("(/read_pvals)|(^\$end)",line):
break
Answer.append(float(line.split('[', maxsplit=1)[-1].split(']', maxsplit=1)[0].split()[-1]))
return Answer
def read_priors(fobj):
Answer = OrderedDict()
for line in fobj:
line = line.split("#")[0]
if re.match("(/priors)|(^\$end)",line):
break
Answer[line.split()[0]] = float(line.split()[-1])
return Answer
def read_internals(fobj):
return
## ParsTab that refers to subsection parsers.
ParsTab = {"read_mvals" : read_mvals,
"read_pvals" : read_pvals,
"priors" : read_priors,
"internal" : read_internals
}
def printsection(heading,optdict,typedict):
""" Print out a section of the input file in a parser-compliant and readable format.
At the time of writing of this function, it's mainly intended to be called by MakeInputFile.py.
The heading is printed first (it is something like $options or $target). Then it loops
through the variable types (strings, allcaps, etc...) and the keys in each variable type.
The one-line description of each key is printed out as a comment, and then the key itself is
printed out along with the value provided in optdict. If optdict is None, then the default
value is printed out instead.
@param[in] heading Heading, either $options or $target
@param[in] optdict Options dictionary or None.
@param[in] typedict Option type dictionary, either gen_opts_types or tgt_opts_types specified in this file.
@return Answer List of strings for the section that we are printing out.
"""
from forcebalance.objective import Implemented_Targets
from forcebalance.optimizer import Optimizer
def FilterTargets(search):
if type(search) == str:
search = [search]
list_out = []
for key in sorted(Implemented_Targets.keys()):
if any([i.lower() in key.lower() for i in search]):
list_out.append(Implemented_Targets[key].__name__)
return ', '.join(sorted(list_out))
Answer = [heading]
firstentry = 1
Options = []
for i in ['strings','allcaps','lists','ints','bools','floats','sections']:
vartype = re.sub('s$','',i)
for j in typedict[i]:
Option = []
val = optdict[j] if optdict is not None else typedict[i][j][0]
if firstentry:
firstentry = 0
else:
Option.append("")
Priority = typedict[i][j][1]
Option.append("# (%s) %s" % (vartype, typedict[i][j][2]))
if len(typedict[i][j]) >= 4:
Relevance = typedict[i][j][3]
str2 = "# used in: %s" % Relevance
if len(typedict[i][j]) >= 5:
TargetName = FilterTargets(typedict[i][j][4])
str2 += " (%s)" % TargetName
else:
TargetName = "None"
Option.append(str2)
else:
Relevance = "None"
Option.append("%s %s" % (str(j),str(val)))
Options.append((Option, Priority, TargetName, j))
def key1(o):
return o[1]
def key2(o):
return o[2]
def key3(o):
return o[3]
Options.sort(key=key3)
Options.sort(key=key2)
Options.sort(key=key1, reverse=True)
for o in Options:
Answer += o[0]
# PriSet = sorted(list(set(Priorities)))[::-1]
# TgtSet = sorted(list(set(TargetNames)))
# RelSet = sorted(list(set(Relevances)))
# for p0 in PriSet:
# ogrp = []
# rgrp = []
# tgrp = []
# for o, p, r, t in zip(Options, Priorities, Relevances, TargetNames):
# if p == p0:
# ogrp.append(o)
# rgrp.append(r)
# tgrp.append(t)
# ogrp2 = []
# rgrp2 = []
# for t0 in TgtSet:
# for o, r, t in zip(ogrp, rgrp, tgrp):
# if t == t0:
# ogrp2.append(
Answer.append("$end")
return Answer
def parse_inputs(input_file=None):
""" Parse through the input file and read all user-supplied options.
This is usually the first thing that happens when an executable script is called.
Our parser first loads the default options, and then updates these options as it
encounters keywords.
Each keyword corresponds to a variable type; each variable type (e.g. string,
integer, float, boolean) is treated differently. For more elaborate inputs,
there is a 'section' variable type.
There is only one set of general options, but multiple sets of target options.
Each target has its own section delimited by the \em $target keyword,
and we build a list of target options.
@param[in] input_file The name of the input file.
@return options General options.
@return tgt_opts List of fitting target options.
@todo Implement internal coordinates.
@todo Implement sampling correction.
@todo Implement charge groups.
"""
logger.info("Reading options from file: %s\n" % input_file)
section = "NONE"
# First load in all of the default options.
options = deepcopy(gen_opts_defaults) # deepcopy to make sure options doesn't make changes to gen_opts_defaults
options['root'] = os.getcwd()
options['input_file'] = input_file
tgt_opts = []
this_tgt_opt = deepcopy(tgt_opts_defaults)
# Give back a bunch | |
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'uiMainWindow.ui'
##
## Created by: Qt User Interface Compiler version 6.2.3
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide6.QtCore import (QCoreApplication, QDate, QDateTime, QLocale,
QMetaObject, QObject, QPoint, QRect,
QSize, QTime, QUrl, Qt)
from PySide6.QtGui import (QAction, QBrush, QColor, QConicalGradient,
QCursor, QFont, QFontDatabase, QGradient,
QIcon, QImage, QKeySequence, QLinearGradient,
QPainter, QPalette, QPixmap, QRadialGradient,
QTransform)
from PySide6.QtWidgets import (QAbstractSpinBox, QApplication, QCheckBox, QComboBox,
QGridLayout, QHBoxLayout, QHeaderView, QLabel,
QMainWindow, QMenu, QMenuBar, QProgressBar,
QPushButton, QSizePolicy, QSpacerItem, QSpinBox,
QStatusBar, QTabWidget, QTableWidget, QTableWidgetItem,
QWidget)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
if not MainWindow.objectName():
MainWindow.setObjectName(u"MainWindow")
MainWindow.resize(871, 648)
icon = QIcon()
icon.addFile(u"src/img/QIcon.png", QSize(), QIcon.Normal, QIcon.Off)
MainWindow.setWindowIcon(icon)
self.action = QAction(MainWindow)
self.action.setObjectName(u"action")
self.action.setCheckable(True)
self.action.setChecked(True)
self.action_open = QAction(MainWindow)
self.action_open.setObjectName(u"action_open")
icon1 = QIcon()
icon1.addFile(u"src/img/Open.png", QSize(), QIcon.Normal, QIcon.Off)
self.action_open.setIcon(icon1)
self.action_check_update = QAction(MainWindow)
self.action_check_update.setObjectName(u"action_check_update")
icon2 = QIcon()
icon2.addFile(u"src/img/Clockwise.png", QSize(), QIcon.Normal, QIcon.Off)
self.action_check_update.setIcon(icon2)
self.action_table = QAction(MainWindow)
self.action_table.setObjectName(u"action_table")
self.action_table.setEnabled(False)
self.action_wordCloudImage = QAction(MainWindow)
self.action_wordCloudImage.setObjectName(u"action_wordCloudImage")
self.action_wordCloudImage.setEnabled(False)
self.action_about = QAction(MainWindow)
self.action_about.setObjectName(u"action_about")
icon3 = QIcon()
icon3.addFile(u"src/img/Information.png", QSize(), QIcon.Normal, QIcon.Off)
self.action_about.setIcon(icon3)
self.actionJVM = QAction(MainWindow)
self.actionJVM.setObjectName(u"actionJVM")
icon4 = QIcon()
icon4.addFile(u"src/img/JVM.png", QSize(), QIcon.Normal, QIcon.Off)
self.actionJVM.setIcon(icon4)
self.action_preference = QAction(MainWindow)
self.action_preference.setObjectName(u"action_preference")
icon5 = QIcon()
icon5.addFile(u"src/img/Setting.png", QSize(), QIcon.Normal, QIcon.Off)
self.action_preference.setIcon(icon5)
self.action_textCleaningSetting = QAction(MainWindow)
self.action_textCleaningSetting.setObjectName(u"action_textCleaningSetting")
self.action_textCleaningSetting.setCheckable(True)
self.action_textCleaningSetting.setChecked(False)
self.action_textMiningSetting = QAction(MainWindow)
self.action_textMiningSetting.setObjectName(u"action_textMiningSetting")
self.action_textMiningSetting.setCheckable(True)
self.action_cutWordSetting = QAction(MainWindow)
self.action_cutWordSetting.setObjectName(u"action_cutWordSetting")
self.action_cutWordSetting.setCheckable(True)
self.action_wordCloudSetting = QAction(MainWindow)
self.action_wordCloudSetting.setObjectName(u"action_wordCloudSetting")
self.action_wordCloudSetting.setCheckable(True)
self.centralwidget = QWidget(MainWindow)
self.centralwidget.setObjectName(u"centralwidget")
self.gridLayout_6 = QGridLayout(self.centralwidget)
self.gridLayout_6.setObjectName(u"gridLayout_6")
self.horizontalLayout_20 = QHBoxLayout()
self.horizontalLayout_20.setObjectName(u"horizontalLayout_20")
self.horizontalSpacer_3 = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_20.addItem(self.horizontalSpacer_3)
self.pushButton_UpTop = QPushButton(self.centralwidget)
self.pushButton_UpTop.setObjectName(u"pushButton_UpTop")
self.pushButton_UpTop.setMinimumSize(QSize(30, 25))
self.pushButton_UpTop.setMaximumSize(QSize(30, 25))
icon6 = QIcon()
icon6.addFile(u"src/img/Head.png", QSize(), QIcon.Normal, QIcon.Off)
self.pushButton_UpTop.setIcon(icon6)
self.horizontalLayout_20.addWidget(self.pushButton_UpTop)
self.pushButton_Up = QPushButton(self.centralwidget)
self.pushButton_Up.setObjectName(u"pushButton_Up")
self.pushButton_Up.setMinimumSize(QSize(30, 25))
self.pushButton_Up.setMaximumSize(QSize(30, 25))
icon7 = QIcon()
icon7.addFile(u"src/img/Up.png", QSize(), QIcon.Normal, QIcon.Off)
self.pushButton_Up.setIcon(icon7)
self.pushButton_Up.setAutoRepeat(True)
self.pushButton_Up.setAutoRepeatDelay(500)
self.horizontalLayout_20.addWidget(self.pushButton_Up)
self.spinBox_pageNow = QSpinBox(self.centralwidget)
self.spinBox_pageNow.setObjectName(u"spinBox_pageNow")
self.spinBox_pageNow.setMinimumSize(QSize(30, 25))
self.spinBox_pageNow.setMaximumSize(QSize(16777215, 25))
self.spinBox_pageNow.setLayoutDirection(Qt.LeftToRight)
self.spinBox_pageNow.setStyleSheet(u"background:transparent;")
self.spinBox_pageNow.setWrapping(False)
self.spinBox_pageNow.setFrame(False)
self.spinBox_pageNow.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)
self.spinBox_pageNow.setButtonSymbols(QAbstractSpinBox.NoButtons)
self.spinBox_pageNow.setProperty("showGroupSeparator", False)
self.spinBox_pageNow.setMinimum(1)
self.spinBox_pageNow.setMaximum(10000)
self.horizontalLayout_20.addWidget(self.spinBox_pageNow)
self.label_pageMax = QLabel(self.centralwidget)
self.label_pageMax.setObjectName(u"label_pageMax")
sizePolicy = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_pageMax.sizePolicy().hasHeightForWidth())
self.label_pageMax.setSizePolicy(sizePolicy)
self.label_pageMax.setMinimumSize(QSize(30, 25))
self.label_pageMax.setMaximumSize(QSize(16777215, 25))
self.horizontalLayout_20.addWidget(self.label_pageMax)
self.spinBox_everyPage = QSpinBox(self.centralwidget)
self.spinBox_everyPage.setObjectName(u"spinBox_everyPage")
self.spinBox_everyPage.setMinimumSize(QSize(30, 25))
self.spinBox_everyPage.setMaximumSize(QSize(16777215, 25))
self.spinBox_everyPage.setLayoutDirection(Qt.LeftToRight)
self.spinBox_everyPage.setStyleSheet(u"background:transparent;")
self.spinBox_everyPage.setWrapping(False)
self.spinBox_everyPage.setFrame(False)
self.spinBox_everyPage.setAlignment(Qt.AlignCenter)
self.spinBox_everyPage.setButtonSymbols(QAbstractSpinBox.NoButtons)
self.spinBox_everyPage.setProperty("showGroupSeparator", False)
self.spinBox_everyPage.setMinimum(100)
self.spinBox_everyPage.setMaximum(1000)
self.spinBox_everyPage.setSingleStep(50)
self.spinBox_everyPage.setValue(100)
self.horizontalLayout_20.addWidget(self.spinBox_everyPage)
self.label_pageMax_2 = QLabel(self.centralwidget)
self.label_pageMax_2.setObjectName(u"label_pageMax_2")
sizePolicy.setHeightForWidth(self.label_pageMax_2.sizePolicy().hasHeightForWidth())
self.label_pageMax_2.setSizePolicy(sizePolicy)
self.label_pageMax_2.setMinimumSize(QSize(30, 25))
self.label_pageMax_2.setMaximumSize(QSize(16777215, 25))
self.horizontalLayout_20.addWidget(self.label_pageMax_2)
self.pushButton_Down = QPushButton(self.centralwidget)
self.pushButton_Down.setObjectName(u"pushButton_Down")
self.pushButton_Down.setMinimumSize(QSize(30, 25))
self.pushButton_Down.setMaximumSize(QSize(30, 25))
icon8 = QIcon()
icon8.addFile(u"src/img/Next.png", QSize(), QIcon.Normal, QIcon.Off)
self.pushButton_Down.setIcon(icon8)
self.pushButton_Down.setAutoRepeat(True)
self.pushButton_Down.setAutoRepeatDelay(500)
self.horizontalLayout_20.addWidget(self.pushButton_Down)
self.pushButton_DownTop = QPushButton(self.centralwidget)
self.pushButton_DownTop.setObjectName(u"pushButton_DownTop")
self.pushButton_DownTop.setMinimumSize(QSize(30, 25))
self.pushButton_DownTop.setMaximumSize(QSize(30, 25))
icon9 = QIcon()
icon9.addFile(u"src/img/End.png", QSize(), QIcon.Normal, QIcon.Off)
self.pushButton_DownTop.setIcon(icon9)
self.horizontalLayout_20.addWidget(self.pushButton_DownTop)
self.horizontalSpacer_4 = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_20.addItem(self.horizontalSpacer_4)
self.gridLayout_6.addLayout(self.horizontalLayout_20, 4, 0, 1, 1)
self.tableWidget = QTableWidget(self.centralwidget)
self.tableWidget.setObjectName(u"tableWidget")
self.gridLayout_6.addWidget(self.tableWidget, 3, 0, 1, 1)
self.tabWidget = QTabWidget(self.centralwidget)
self.tabWidget.setObjectName(u"tabWidget")
self.tabWidget.setMinimumSize(QSize(0, 25))
self.tabWidget.setMaximumSize(QSize(16777215, 25))
self.tabWidget.setAutoFillBackground(True)
self.tabWidget.setStyleSheet(u"\u201cbackground:transparent;border-width:0;border-style:outset\u201d")
self.tabWidget.setTabPosition(QTabWidget.South)
self.tabWidget.setDocumentMode(True)
self.tabWidget.setTabBarAutoHide(False)
self.tab = QWidget()
self.tab.setObjectName(u"tab")
self.tabWidget.addTab(self.tab, "")
self.gridLayout_6.addWidget(self.tabWidget, 5, 0, 1, 1)
self.progressBar = QProgressBar(self.centralwidget)
self.progressBar.setObjectName(u"progressBar")
self.progressBar.setMinimumSize(QSize(0, 25))
self.progressBar.setMaximumSize(QSize(16777215, 25))
self.progressBar.setValue(0)
self.gridLayout_6.addWidget(self.progressBar, 2, 0, 1, 1)
self.horizontalLayout = QHBoxLayout()
self.horizontalLayout.setObjectName(u"horizontalLayout")
self.comboBox_selectColumn = QComboBox(self.centralwidget)
self.comboBox_selectColumn.addItem("")
self.comboBox_selectColumn.setObjectName(u"comboBox_selectColumn")
sizePolicy1 = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
sizePolicy1.setHorizontalStretch(0)
sizePolicy1.setVerticalStretch(0)
sizePolicy1.setHeightForWidth(self.comboBox_selectColumn.sizePolicy().hasHeightForWidth())
self.comboBox_selectColumn.setSizePolicy(sizePolicy1)
self.comboBox_selectColumn.setMinimumSize(QSize(0, 35))
self.comboBox_selectColumn.setMaximumSize(QSize(16777215, 35))
font = QFont()
font.setPointSize(9)
self.comboBox_selectColumn.setFont(font)
self.horizontalLayout.addWidget(self.comboBox_selectColumn)
self.pushButton_start = QPushButton(self.centralwidget)
self.pushButton_start.setObjectName(u"pushButton_start")
sizePolicy1.setHeightForWidth(self.pushButton_start.sizePolicy().hasHeightForWidth())
self.pushButton_start.setSizePolicy(sizePolicy1)
self.pushButton_start.setMinimumSize(QSize(90, 35))
self.pushButton_start.setMaximumSize(QSize(90, 35))
icon10 = QIcon()
icon10.addFile(u"src/img/Run.png", QSize(), QIcon.Normal, QIcon.Off)
self.pushButton_start.setIcon(icon10)
self.horizontalLayout.addWidget(self.pushButton_start)
self.gridLayout_6.addLayout(self.horizontalLayout, 1, 0, 1, 1)
self.tabWidget_modelChoice = QTabWidget(self.centralwidget)
self.tabWidget_modelChoice.setObjectName(u"tabWidget_modelChoice")
self.tabWidget_modelChoice.setMaximumSize(QSize(16777215, 125))
self.tabWidget_modelChoice.setFont(font)
self.tab_0 = QWidget()
self.tab_0.setObjectName(u"tab_0")
self.gridLayout = QGridLayout(self.tab_0)
self.gridLayout.setObjectName(u"gridLayout")
self.gridLayout_3 = QGridLayout()
self.gridLayout_3.setObjectName(u"gridLayout_3")
self.checkBox_Chinese = QCheckBox(self.tab_0)
self.checkBox_Chinese.setObjectName(u"checkBox_Chinese")
self.checkBox_Chinese.setFont(font)
self.checkBox_Chinese.setChecked(True)
self.gridLayout_3.addWidget(self.checkBox_Chinese, 0, 0, 1, 1)
self.checkBox_ChinesePunctuation = QCheckBox(self.tab_0)
self.checkBox_ChinesePunctuation.setObjectName(u"checkBox_ChinesePunctuation")
self.checkBox_ChinesePunctuation.setFont(font)
self.gridLayout_3.addWidget(self.checkBox_ChinesePunctuation, 0, 1, 1, 1)
self.checkBox_Number = QCheckBox(self.tab_0)
self.checkBox_Number.setObjectName(u"checkBox_Number")
self.checkBox_Number.setFont(font)
self.gridLayout_3.addWidget(self.checkBox_Number, 0, 2, 1, 1)
self.checkBox_EnglishUpper = QCheckBox(self.tab_0)
self.checkBox_EnglishUpper.setObjectName(u"checkBox_EnglishUpper")
self.checkBox_EnglishUpper.setFont(font)
self.gridLayout_3.addWidget(self.checkBox_EnglishUpper, 1, 0, 1, 1)
self.checkBox_EnglishLower = QCheckBox(self.tab_0)
self.checkBox_EnglishLower.setObjectName(u"checkBox_EnglishLower")
self.checkBox_EnglishLower.setFont(font)
self.gridLayout_3.addWidget(self.checkBox_EnglishLower, 1, 1, 1, 1)
self.checkBox_EnglishPunctuation = QCheckBox(self.tab_0)
self.checkBox_EnglishPunctuation.setObjectName(u"checkBox_EnglishPunctuation")
self.checkBox_EnglishPunctuation.setFont(font)
self.gridLayout_3.addWidget(self.checkBox_EnglishPunctuation, 1, 2, 1, 1)
self.gridLayout.addLayout(self.gridLayout_3, 0, 0, 1, 1)
self.pushButton_textCleaningSetting = QPushButton(self.tab_0)
self.pushButton_textCleaningSetting.setObjectName(u"pushButton_textCleaningSetting")
sizePolicy2 = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sizePolicy2.setHorizontalStretch(0)
sizePolicy2.setVerticalStretch(0)
sizePolicy2.setHeightForWidth(self.pushButton_textCleaningSetting.sizePolicy().hasHeightForWidth())
self.pushButton_textCleaningSetting.setSizePolicy(sizePolicy2)
self.pushButton_textCleaningSetting.setMinimumSize(QSize(120, 35))
self.pushButton_textCleaningSetting.setMaximumSize(QSize(120, 30))
self.pushButton_textCleaningSetting.setFont(font)
self.pushButton_textCleaningSetting.setIcon(icon5)
self.gridLayout.addWidget(self.pushButton_textCleaningSetting, 0, 1, 1, 1)
self.tabWidget_modelChoice.addTab(self.tab_0, "")
self.tab_1 = QWidget()
self.tab_1.setObjectName(u"tab_1")
self.gridLayout_5 = QGridLayout(self.tab_1)
self.gridLayout_5.setObjectName(u"gridLayout_5")
self.label_2 = QLabel(self.tab_1)
self.label_2.setObjectName(u"label_2")
self.label_2.setMinimumSize(QSize(0, 40))
self.label_2.setMaximumSize(QSize(16777215, 40))
self.label_2.setFont(font)
self.gridLayout_5.addWidget(self.label_2, 0, 0, 1, 1)
self.spinBox_wordMining = QSpinBox(self.tab_1)
self.spinBox_wordMining.setObjectName(u"spinBox_wordMining")
sizePolicy3 = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
sizePolicy3.setHorizontalStretch(0)
sizePolicy3.setVerticalStretch(0)
sizePolicy3.setHeightForWidth(self.spinBox_wordMining.sizePolicy().hasHeightForWidth())
self.spinBox_wordMining.setSizePolicy(sizePolicy3)
self.spinBox_wordMining.setMinimumSize(QSize(60, 35))
self.spinBox_wordMining.setMaximumSize(QSize(60, 35))
self.spinBox_wordMining.setFont(font)
self.spinBox_wordMining.setMinimum(2)
self.spinBox_wordMining.setValue(4)
self.gridLayout_5.addWidget(self.spinBox_wordMining, 0, 1, 1, 1)
self.pushButton_wordMiningSetting = QPushButton(self.tab_1)
self.pushButton_wordMiningSetting.setObjectName(u"pushButton_wordMiningSetting")
sizePolicy2.setHeightForWidth(self.pushButton_wordMiningSetting.sizePolicy().hasHeightForWidth())
self.pushButton_wordMiningSetting.setSizePolicy(sizePolicy2)
self.pushButton_wordMiningSetting.setMinimumSize(QSize(120, 35))
self.pushButton_wordMiningSetting.setMaximumSize(QSize(120, 35))
self.pushButton_wordMiningSetting.setFont(font)
self.pushButton_wordMiningSetting.setIcon(icon5)
self.gridLayout_5.addWidget(self.pushButton_wordMiningSetting, 0, 2, 1, 1)
self.horizontalSpacer = QSpacerItem(606, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.gridLayout_5.addItem(self.horizontalSpacer, 0, 3, 1, 1)
self.tabWidget_modelChoice.addTab(self.tab_1, "")
self.tab_2 = QWidget()
self.tab_2.setObjectName(u"tab_2")
self.gridLayout_4 = QGridLayout(self.tab_2)
self.gridLayout_4.setObjectName(u"gridLayout_4")
self.pushButton_cutWordSetting = QPushButton(self.tab_2)
self.pushButton_cutWordSetting.setObjectName(u"pushButton_cutWordSetting")
sizePolicy2.setHeightForWidth(self.pushButton_cutWordSetting.sizePolicy().hasHeightForWidth())
self.pushButton_cutWordSetting.setSizePolicy(sizePolicy2)
self.pushButton_cutWordSetting.setMinimumSize(QSize(120, 35))
self.pushButton_cutWordSetting.setMaximumSize(QSize(120, 35))
self.pushButton_cutWordSetting.setFont(font)
self.pushButton_cutWordSetting.setIcon(icon5)
self.gridLayout_4.addWidget(self.pushButton_cutWordSetting, 0, 5, 1, 1)
self.spinBox_cutModelExtraction = QSpinBox(self.tab_2)
self.spinBox_cutModelExtraction.setObjectName(u"spinBox_cutModelExtraction")
self.spinBox_cutModelExtraction.setEnabled(True)
sizePolicy1.setHeightForWidth(self.spinBox_cutModelExtraction.sizePolicy().hasHeightForWidth())
self.spinBox_cutModelExtraction.setSizePolicy(sizePolicy1)
self.spinBox_cutModelExtraction.setMinimumSize(QSize(70, 35))
self.spinBox_cutModelExtraction.setMaximumSize(QSize(70, 35))
self.spinBox_cutModelExtraction.setFont(font)
self.spinBox_cutModelExtraction.setMinimum(1)
self.spinBox_cutModelExtraction.setValue(5)
self.gridLayout_4.addWidget(self.spinBox_cutModelExtraction, 0, 3, 1, 1)
self.comboBox_cutWordModelSelect = QComboBox(self.tab_2)
self.comboBox_cutWordModelSelect.addItem("")
self.comboBox_cutWordModelSelect.addItem("")
self.comboBox_cutWordModelSelect.addItem("")
self.comboBox_cutWordModelSelect.addItem("")
self.comboBox_cutWordModelSelect.addItem("")
self.comboBox_cutWordModelSelect.addItem("")
self.comboBox_cutWordModelSelect.setObjectName(u"comboBox_cutWordModelSelect")
sizePolicy1.setHeightForWidth(self.comboBox_cutWordModelSelect.sizePolicy().hasHeightForWidth())
self.comboBox_cutWordModelSelect.setSizePolicy(sizePolicy1)
self.comboBox_cutWordModelSelect.setMinimumSize(QSize(0, 35))
self.comboBox_cutWordModelSelect.setMaximumSize(QSize(16777215, 35))
self.comboBox_cutWordModelSelect.setFont(font)
self.gridLayout_4.addWidget(self.comboBox_cutWordModelSelect, 0, 1, 1, 1)
self.label_cutModelExtraction = QLabel(self.tab_2)
self.label_cutModelExtraction.setObjectName(u"label_cutModelExtraction")
self.label_cutModelExtraction.setMinimumSize(QSize(90, 40))
self.label_cutModelExtraction.setMaximumSize(QSize(90, 40))
self.label_cutModelExtraction.setFont(font)
self.gridLayout_4.addWidget(self.label_cutModelExtraction, 0, 2, 1, 1)
self.label = QLabel(self.tab_2)
self.label.setObjectName(u"label")
self.label.setMinimumSize(QSize(110, 40))
self.label.setMaximumSize(QSize(110, 40))
self.label.setFont(font)
self.gridLayout_4.addWidget(self.label, 0, 0, 1, 1)
self.checkBox_wordFrequency = QCheckBox(self.tab_2)
self.checkBox_wordFrequency.setObjectName(u"checkBox_wordFrequency")
self.checkBox_wordFrequency.setMinimumSize(QSize(110, 40))
self.checkBox_wordFrequency.setMaximumSize(QSize(110, 40))
self.checkBox_wordFrequency.setChecked(True)
self.gridLayout_4.addWidget(self.checkBox_wordFrequency, 0, 4, 1, 1)
self.tabWidget_modelChoice.addTab(self.tab_2, "")
self.tab_3 = QWidget()
self.tab_3.setObjectName(u"tab_3")
self.gridLayout_2 = QGridLayout(self.tab_3)
self.gridLayout_2.setObjectName(u"gridLayout_2")
self.label_8 = QLabel(self.tab_3)
self.label_8.setObjectName(u"label_8")
self.label_8.setMinimumSize(QSize(90, 40))
self.label_8.setMaximumSize(QSize(90, 40))
self.label_8.setFont(font)
self.gridLayout_2.addWidget(self.label_8, 0, 2, 1, 1)
self.spinBox_textClusteringParam = QSpinBox(self.tab_3)
self.spinBox_textClusteringParam.setObjectName(u"spinBox_textClusteringParam")
self.spinBox_textClusteringParam.setMinimumSize(QSize(70, 35))
self.spinBox_textClusteringParam.setMaximumSize(QSize(70, 35))
self.spinBox_textClusteringParam.setMinimum(2)
self.spinBox_textClusteringParam.setMaximum(9999)
self.gridLayout_2.addWidget(self.spinBox_textClusteringParam, 0, 3, 1, 1)
self.comboBox_textClusteringModelSelect = QComboBox(self.tab_3)
self.comboBox_textClusteringModelSelect.addItem("")
self.comboBox_textClusteringModelSelect.addItem("")
self.comboBox_textClusteringModelSelect.addItem("")
self.comboBox_textClusteringModelSelect.setObjectName(u"comboBox_textClusteringModelSelect")
sizePolicy1.setHeightForWidth(self.comboBox_textClusteringModelSelect.sizePolicy().hasHeightForWidth())
self.comboBox_textClusteringModelSelect.setSizePolicy(sizePolicy1)
self.comboBox_textClusteringModelSelect.setMinimumSize(QSize(0, 35))
self.comboBox_textClusteringModelSelect.setMaximumSize(QSize(16777215, 35))
self.comboBox_textClusteringModelSelect.setFont(font)
self.gridLayout_2.addWidget(self.comboBox_textClusteringModelSelect, 0, 1, 1, 1)
self.label_7 = QLabel(self.tab_3)
self.label_7.setObjectName(u"label_7")
self.label_7.setMinimumSize(QSize(110, 40))
self.label_7.setMaximumSize(QSize(110, 40))
self.label_7.setFont(font)
self.gridLayout_2.addWidget(self.label_7, 0, 0, 1, 1)
self.horizontalSpacer_2 = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.gridLayout_2.addItem(self.horizontalSpacer_2, 0, 4, 1, 1)
self.tabWidget_modelChoice.addTab(self.tab_3, "")
self.tab_4 = QWidget()
self.tab_4.setObjectName(u"tab_4")
self.gridLayout_7 = QGridLayout(self.tab_4)
self.gridLayout_7.setObjectName(u"gridLayout_7")
self.label_4 = QLabel(self.tab_4)
self.label_4.setObjectName(u"label_4")
self.label_4.setMinimumSize(QSize(80, 35))
self.label_4.setMaximumSize(QSize(80, 35))
self.label_4.setFont(font)
self.gridLayout_7.addWidget(self.label_4, 0, 0, 1, 1)
self.comboBox_wordCloudText = QComboBox(self.tab_4)
self.comboBox_wordCloudText.addItem("")
self.comboBox_wordCloudText.setObjectName(u"comboBox_wordCloudText")
sizePolicy1.setHeightForWidth(self.comboBox_wordCloudText.sizePolicy().hasHeightForWidth())
self.comboBox_wordCloudText.setSizePolicy(sizePolicy1)
self.comboBox_wordCloudText.setMinimumSize(QSize(0, 35))
self.comboBox_wordCloudText.setMaximumSize(QSize(16777215, 35))
self.comboBox_wordCloudText.setFont(font)
self.gridLayout_7.addWidget(self.comboBox_wordCloudText, 0, 1, 1, 1)
self.label_6 = QLabel(self.tab_4)
self.label_6.setObjectName(u"label_6")
self.label_6.setMinimumSize(QSize(50, 35))
self.label_6.setMaximumSize(QSize(50, 35))
self.label_6.setFont(font)
self.gridLayout_7.addWidget(self.label_6, 0, 2, 1, 1)
self.spinBox_wordCloudImgWidth = QSpinBox(self.tab_4)
self.spinBox_wordCloudImgWidth.setObjectName(u"spinBox_wordCloudImgWidth")
self.spinBox_wordCloudImgWidth.setMinimumSize(QSize(80, 35))
self.spinBox_wordCloudImgWidth.setMaximumSize(QSize(80, 35))
self.spinBox_wordCloudImgWidth.setMinimum(400)
self.spinBox_wordCloudImgWidth.setMaximum(4096)
self.spinBox_wordCloudImgWidth.setValue(400)
self.gridLayout_7.addWidget(self.spinBox_wordCloudImgWidth, 0, 3, 1, 1)
self.pushButton_wordCloudSetting = QPushButton(self.tab_4)
self.pushButton_wordCloudSetting.setObjectName(u"pushButton_wordCloudSetting")
sizePolicy2.setHeightForWidth(self.pushButton_wordCloudSetting.sizePolicy().hasHeightForWidth())
self.pushButton_wordCloudSetting.setSizePolicy(sizePolicy2)
self.pushButton_wordCloudSetting.setMinimumSize(QSize(120, 35))
self.pushButton_wordCloudSetting.setMaximumSize(QSize(120, 35))
self.pushButton_wordCloudSetting.setFont(font)
self.pushButton_wordCloudSetting.setIcon(icon5)
self.gridLayout_7.addWidget(self.pushButton_wordCloudSetting, 0, 4, 1, 2)
self.label_5 = QLabel(self.tab_4)
self.label_5.setObjectName(u"label_5")
self.label_5.setMinimumSize(QSize(80, 35))
self.label_5.setMaximumSize(QSize(80, 35))
self.label_5.setFont(font)
self.gridLayout_7.addWidget(self.label_5, 1, 0, 1, 1)
self.comboBox_wordCloudRank = QComboBox(self.tab_4)
self.comboBox_wordCloudRank.addItem("")
self.comboBox_wordCloudRank.setObjectName(u"comboBox_wordCloudRank")
sizePolicy1.setHeightForWidth(self.comboBox_wordCloudRank.sizePolicy().hasHeightForWidth())
self.comboBox_wordCloudRank.setSizePolicy(sizePolicy1)
self.comboBox_wordCloudRank.setMinimumSize(QSize(0, 35))
self.comboBox_wordCloudRank.setMaximumSize(QSize(16777215, 35))
self.comboBox_wordCloudRank.setFont(font)
self.gridLayout_7.addWidget(self.comboBox_wordCloudRank, 1, 1, 1, 1)
self.label_9 = QLabel(self.tab_4)
self.label_9.setObjectName(u"label_9")
self.label_9.setMinimumSize(QSize(50, 35))
self.label_9.setMaximumSize(QSize(50, 35))
self.label_9.setFont(font)
self.gridLayout_7.addWidget(self.label_9, 1, 2, 1, 1)
self.spinBox_wordCloudImgHeight = QSpinBox(self.tab_4)
self.spinBox_wordCloudImgHeight.setObjectName(u"spinBox_wordCloudImgHeight")
self.spinBox_wordCloudImgHeight.setMinimumSize(QSize(80, 35))
self.spinBox_wordCloudImgHeight.setMaximumSize(QSize(80, 35))
self.spinBox_wordCloudImgHeight.setMinimum(300)
self.spinBox_wordCloudImgHeight.setMaximum(2160)
self.spinBox_wordCloudImgHeight.setSingleStep(1)
self.spinBox_wordCloudImgHeight.setValue(300)
self.gridLayout_7.addWidget(self.spinBox_wordCloudImgHeight, 1, 3, 1, 1)
self.pushButton_makeWordCloud = QPushButton(self.tab_4)
self.pushButton_makeWordCloud.setObjectName(u"pushButton_makeWordCloud")
self.pushButton_makeWordCloud.setMinimumSize(QSize(90, 35))
self.pushButton_makeWordCloud.setMaximumSize(QSize(90, 35))
icon11 = QIcon()
icon11.addFile(u"src/img/Pencil.png", QSize(), QIcon.Normal, QIcon.Off)
self.pushButton_makeWordCloud.setIcon(icon11)
self.gridLayout_7.addWidget(self.pushButton_makeWordCloud, 1, 4, 1, 1)
self.pushButton_viewWordCloud = QPushButton(self.tab_4)
self.pushButton_viewWordCloud.setObjectName(u"pushButton_viewWordCloud")
sizePolicy1.setHeightForWidth(self.pushButton_viewWordCloud.sizePolicy().hasHeightForWidth())
self.pushButton_viewWordCloud.setSizePolicy(sizePolicy1)
self.pushButton_viewWordCloud.setMinimumSize(QSize(90, 35))
self.pushButton_viewWordCloud.setMaximumSize(QSize(90, 35))
icon12 = QIcon()
icon12.addFile(u"src/img/Preview.png", QSize(), QIcon.Normal, QIcon.Off)
self.pushButton_viewWordCloud.setIcon(icon12)
self.gridLayout_7.addWidget(self.pushButton_viewWordCloud, 1, 5, 1, 1)
self.tabWidget_modelChoice.addTab(self.tab_4, "")
self.gridLayout_6.addWidget(self.tabWidget_modelChoice, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QMenuBar(MainWindow)
self.menubar.setObjectName(u"menubar")
self.menubar.setGeometry(QRect(0, 0, 871, 30))
self.file_menu = QMenu(self.menubar)
self.file_menu.setObjectName(u"file_menu")
self.menu_save = QMenu(self.file_menu)
self.menu_save.setObjectName(u"menu_save")
self.menu_save.setEnabled(True)
icon13 = QIcon()
icon13.addFile(u"src/img/Export.png", QSize(), QIcon.Normal, QIcon.Off)
self.menu_save.setIcon(icon13)
self.menu_table = QMenu(self.menu_save)
self.menu_table.setObjectName(u"menu_table")
self.menu_table.setEnabled(True)
icon14 = QIcon()
icon14.addFile(u"src/img/Table.png", QSize(), QIcon.Normal, QIcon.Off)
self.menu_table.setIcon(icon14)
self.menu_image = QMenu(self.menu_save)
self.menu_image.setObjectName(u"menu_image")
self.menu_image.setEnabled(True)
icon15 = QIcon()
icon15.addFile(u"src/img/Photos.png", QSize(), QIcon.Normal, QIcon.Off)
self.menu_image.setIcon(icon15)
self.menu_2 = QMenu(self.menubar)
self.menu_2.setObjectName(u"menu_2")
self.menu = QMenu(self.menubar)
self.menu.setObjectName(u"menu")
self.menu_3 = QMenu(self.menubar)
self.menu_3.setObjectName(u"menu_3")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QStatusBar(MainWindow)
self.statusbar.setObjectName(u"statusbar")
MainWindow.setStatusBar(self.statusbar)
self.menubar.addAction(self.file_menu.menuAction())
self.menubar.addAction(self.menu_3.menuAction())
self.menubar.addAction(self.menu.menuAction())
self.menubar.addAction(self.menu_2.menuAction())
self.file_menu.addAction(self.action_open)
self.file_menu.addAction(self.menu_save.menuAction())
self.menu_save.addAction(self.menu_table.menuAction())
self.menu_save.addAction(self.menu_image.menuAction())
self.menu_table.addAction(self.action_table)
self.menu_image.addAction(self.action_wordCloudImage)
self.menu_2.addAction(self.action_check_update)
self.menu_2.addAction(self.action_about)
self.menu.addAction(self.action_preference)
self.menu.addAction(self.actionJVM)
self.menu_3.addAction(self.action_textCleaningSetting)
self.menu_3.addAction(self.action_textMiningSetting)
self.menu_3.addAction(self.action_cutWordSetting)
self.menu_3.addAction(self.action_wordCloudSetting)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
self.tabWidget_modelChoice.setCurrentIndex(0)
QMetaObject.connectSlotsByName(MainWindow)
# setupUi
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QCoreApplication.translate("MainWindow", u"Text Tools", None))
self.action.setText(QCoreApplication.translate("MainWindow", u"\u6587\u672c\u6e05\u6d17", None))
self.action_open.setText(QCoreApplication.translate("MainWindow", u"\u6253\u5f00", None))
self.action_check_update.setText(QCoreApplication.translate("MainWindow", u"\u68c0\u67e5\u66f4\u65b0", None))
self.action_table.setText(QCoreApplication.translate("MainWindow", u"\u65e0\u6570\u636e\u53ef\u4fdd\u5b58", None))
self.action_wordCloudImage.setText(QCoreApplication.translate("MainWindow", u"\u65e0\u56fe\u7247\u53ef\u4fdd\u5b58", None))
self.action_about.setText(QCoreApplication.translate("MainWindow", u"\u5173\u4e8e", None))
self.actionJVM.setText(QCoreApplication.translate("MainWindow", u"JVM\u8bbe\u7f6e", None))
self.action_preference.setText(QCoreApplication.translate("MainWindow", u"\u9996\u9009\u9879", None))
self.action_textCleaningSetting.setText(QCoreApplication.translate("MainWindow", u"\u6587\u672c\u6e05\u6d17\u8bbe\u7f6e", None))
self.action_textMiningSetting.setText(QCoreApplication.translate("MainWindow", u"\u8bcd\u8bed\u6316\u6398\u8bbe\u7f6e", None))
self.action_cutWordSetting.setText(QCoreApplication.translate("MainWindow", u"\u6587\u672c\u5206\u8bcd\u8bbe\u7f6e", None))
self.action_wordCloudSetting.setText(QCoreApplication.translate("MainWindow", u"\u8bcd\u4e91\u56fe\u8bbe\u7f6e", None))
#if QT_CONFIG(statustip)
self.pushButton_UpTop.setStatusTip(QCoreApplication.translate("MainWindow", u"\u9996\u9875", None))
#endif // QT_CONFIG(statustip)
#if QT_CONFIG(whatsthis)
self.pushButton_UpTop.setWhatsThis(QCoreApplication.translate("MainWindow", u"\u9996\u9875", None))
#endif // QT_CONFIG(whatsthis)
self.pushButton_UpTop.setText("")
#if QT_CONFIG(statustip)
self.pushButton_Up.setStatusTip(QCoreApplication.translate("MainWindow", u"\u4e0a\u4e00\u9875", None))
#endif // QT_CONFIG(statustip)
#if QT_CONFIG(whatsthis)
self.pushButton_Up.setWhatsThis(QCoreApplication.translate("MainWindow", u"\u4e0a\u4e00\u9875", None))
#endif // QT_CONFIG(whatsthis)
self.pushButton_Up.setText("")
#if QT_CONFIG(statustip)
self.spinBox_pageNow.setStatusTip(QCoreApplication.translate("MainWindow", u"\u5f53\u524d\u9875\u7801", None))
#endif // QT_CONFIG(statustip)
#if QT_CONFIG(whatsthis)
self.spinBox_pageNow.setWhatsThis(QCoreApplication.translate("MainWindow", u"\u5f53\u524d\u9875\u7801", None))
#endif // QT_CONFIG(whatsthis)
self.label_pageMax.setText(QCoreApplication.translate("MainWindow", u"/1 (\u6bcf\u9875", None))
#if QT_CONFIG(statustip)
self.spinBox_everyPage.setStatusTip(QCoreApplication.translate("MainWindow", u"\u6bcf\u9875\u8bb0\u5f55", None))
#endif // QT_CONFIG(statustip)
#if QT_CONFIG(whatsthis)
self.spinBox_everyPage.setWhatsThis(QCoreApplication.translate("MainWindow", u"\u6bcf\u9875\u8bb0\u5f55", None))
#endif // QT_CONFIG(whatsthis)
self.label_pageMax_2.setText(QCoreApplication.translate("MainWindow", u"\u6761\u8bb0\u5f55)", None))
#if QT_CONFIG(statustip)
self.pushButton_Down.setStatusTip(QCoreApplication.translate("MainWindow", u"\u4e0b\u4e00\u9875", None))
#endif // QT_CONFIG(statustip)
#if QT_CONFIG(whatsthis)
self.pushButton_Down.setWhatsThis(QCoreApplication.translate("MainWindow", u"\u4e0b\u4e00\u9875", None))
#endif // QT_CONFIG(whatsthis)
self.pushButton_Down.setText("")
#if QT_CONFIG(statustip)
self.pushButton_DownTop.setStatusTip(QCoreApplication.translate("MainWindow", u"\u5c3e\u9875", None))
#endif // QT_CONFIG(statustip)
#if QT_CONFIG(whatsthis)
self.pushButton_DownTop.setWhatsThis(QCoreApplication.translate("MainWindow", u"\u5c3e\u9875", None))
#endif // QT_CONFIG(whatsthis)
self.pushButton_DownTop.setText("")
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), QCoreApplication.translate("MainWindow", u"\u65e0\u6570\u636e", None))
self.comboBox_selectColumn.setItemText(0, QCoreApplication.translate("MainWindow", u"\u8bf7\u9009\u62e9\u4e00\u5217\u6570\u636e", None))
| |
#!/usr/bin/env python
"""
Created on Mon Jun 15 21:49:32 2015
@author: <NAME>
"""
import sys, os, glob, textwrap, itertools
from optparse import OptionParser
from collections import defaultdict
from os.path import splitext
from pdbx.reader.PdbxReader import PdbxReader
from multiprocessing import Pool
DEBUG_MODE = False
MIN_SEQ_LEN = None
SCOP_LIBRARY = False
THREE2ONE = {
'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K', 'ILE': 'I', 'PRO': 'P',
'THR': 'T', 'PHE': 'F', 'ASN': 'N', 'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R',
'TRP': 'W', 'ALA': 'A', 'VAL': 'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M', 'MSE': 'M',
'HYP': 'P', 'MLY': 'K', 'SEP': 'S', 'TPO': 'T', 'CSO': 'C', 'PTR': 'Y', 'KCX': 'K',
'CME': 'C', 'CSD': 'A', 'CAS': 'C', 'MLE': 'L', 'DAL': 'A', 'CGU': 'E', 'DLE': 'L',
'FME': 'M', 'DVA': 'V', 'OCS': 'C', 'DPR': 'P', 'MVA': 'V', 'TYS': 'Y', 'M3L': 'K',
'SMC': 'C', 'ALY': 'K', 'CSX': 'C', 'DCY': 'C', 'NLE': 'L', 'DGL': 'E', 'DSN': 'S',
'CSS': 'C', 'DLY': 'K', 'MLZ': 'K', 'DPN': 'F', 'DAR': 'R', 'PHI': 'F', 'IAS': 'D',
'DAS': 'D', 'HIC': 'H', 'MP8': 'P', 'DTH': 'T', 'DIL': 'I', 'MEN': 'N', 'DTY': 'Y',
'CXM': 'M', 'DGN': 'G', 'DTR': 'W', 'SAC': 'S', 'DSG': 'N', 'MME': 'M', 'MAA': 'A',
'YOF': 'Y', 'FP9': 'P', 'FVA': 'V', 'MLU': 'L', 'OMY': 'Y', 'FGA': 'E', 'MEA': 'F',
'CMH': 'C', 'DHI': 'H', 'SEC': 'C', 'OMZ': 'Y', 'SCY': 'C', 'MHO': 'M', 'MED': 'M',
'CAF': 'C', 'NIY': 'Y', 'OAS': 'S', 'SCH': 'C', 'MK8': 'L', 'SME': 'M', 'LYZ': 'K'
}
CANONICAL_RESIDUES = set(['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P',
'Q', 'R', 'S', 'T', 'V', 'W', 'Y'])
class CIF2FASTA(object):
def __init__(self, cif_path):
self.cif_path = cif_path
self.block = self.open_cif()
def open_cif(self):
""" Assumes a mmCif file and returns a data block used for subsequent procedures. """
# The "usual" procedure to open a mmCIF with pdbX/mmCIF
with open(self.cif_path) as cif_fh:
data = []
reader = PdbxReader(cif_fh)
reader.read(data)
if len(data) == 0:
return None
else:
return data[0]
def is_valid(self):
return self.block is not None
def chain_to_seq(self):
"""Extracts the sequence of the cif from entity_poly.pdbx_seq_one_letter_code"""
cif_chain_to_seq = dict()
non_polypeptide_chains = list()
try:
entity_poly = self.block.getObj('entity_poly')
except AttributeError:
if DEBUG_MODE > 0:
print ('! {pdb} Could not extract entity_poly table.'.format(
pdb = self.pdb_entry()))
return False
try:
total_rows = entity_poly.getRowCount()
except AttributeError:
print ('! {pdb} Could not extract rows from entity_poly.'.format(
pdb = self.pdb_entry()))
return False
for row in range(0, total_rows):
if entity_poly.getValue('type', row) == 'polypeptide(L)':
seq = entity_poly.getValue('pdbx_seq_one_letter_code', row)
parsed_seq = parse_seq(seq) # removes special amino acids and newlines
try:
chains = entity_poly.getValue('pdbx_strand_id', row)
except ValueError:
if total_rows == 1:
print ('! {pdb} Only one polypeptide chain, but no chain identifiers, setting it to ".".'.format(
pdb = self.pdb_entry()))
cif_chain_to_seq['.'] = parsed_seq
return cif_chain_to_seq
print ('! {pdb} Could not extract pdbx_strand_id from entity_poly table (polypeptide).'.format(
pdb = self.pdb_entry()))
return False
chain_list = chains.split(',')
for chain in chain_list:
cif_chain_to_seq[chain] = parsed_seq
else:
try:
chains = entity_poly.getValue('pdbx_strand_id', row)
except ValueError:
print ('! {pdb} Could not extract pdbx_strand_id from entity_poly table (non-polypeptide).'.format(
pdb = self.pdb_entry()))
return False
non_polypeptide_chains.append(chains)
chains = list(cif_chain_to_seq.keys())
# remove chains that contain only unknown residues
for chain in chains:
# this is a very odd way to check whether a string contains only a single char
tmp_set = set(cif_chain_to_seq[chain])
if len(tmp_set) == 1 and 'X' in tmp_set:
print ('! Removing {pdb}_{chain} (contains only unknown residues).'.format(
pdb = self.pdb_entry(),
chain = chain))
del cif_chain_to_seq[chain]
continue
if len(cif_chain_to_seq[chain]) < MIN_SEQ_LEN:
print ('! Removing {pdb}_{chain} (sequence length < {min_len}).'.format(
pdb = self.pdb_entry(),
chain = chain,
min_len = MIN_SEQ_LEN))
del cif_chain_to_seq[chain]
if len(cif_chain_to_seq) != 0:
if DEBUG_MODE > 1:
print ('- Extracted chains of {pdb} {chains}.'.format(
pdb = self.pdb_entry(),
chains = ' '.join( str(chain) + ' (' + str(len(cif_chain_to_seq[chain])) + ')' for chain in cif_chain_to_seq.keys())))
if len(non_polypeptide_chains) != 0:
print ('- Following chains were non polypeptide chains {chains} no polypeptide chains were found.'.format(
chains = ', '.join(non_polypeptide_chains)))
return cif_chain_to_seq
else:
if DEBUG_MODE > 0:
print ('! {pdb} No polypeptide chains were found.'.format(
pdb = self.pdb_entry()))
return False
def chain_ratios(self, chain_to_seq):
""" Tries to extract Sequence from the atom section """
# chain_to_seq = self.chain_to_seq()
if chain_to_seq != False:
chain_ratios = dict()
# compute the lengths of sequences found in _entity_poly
entity_length = { chain : float(len(seq)) for chain, seq in chain_to_seq.items() }
entity_chains = entity_length.keys()
# load the atomsite and set up dictionary to keep track of sequences
atom_site = self.block.getObj('atom_site')
atom_seq = defaultdict(str)
current_residue = 0
# Iterate through the atomsection of the cif file
for atom_row in range(0, atom_site.getRowCount()):
# NMR structures contain many confomers
try:
model_num = int(atom_site.getValue('pdbx_PDB_model_num', atom_row))
except ValueError:
model_num = 1
if model_num > 1:
continue
atom_chain = atom_site.getValue('label_asym_id', atom_row)
# get the alternative chain identifier too
try:
alt_chain = atom_site.getValue('auth_asym_id', atom_row)
except ValueError:
alt_chain = None
# handle cases where there are no chains but only one structure
if atom_chain == '.' and entity_chains[0] == '.':
atom_chain = '.'
# get the residue and the residue number
try:
res_num = int(atom_site.getValue("label_seq_id", atom_row))
except ValueError:
continue
if res_num != current_residue:
residue = atom_site.getValue('label_comp_id', atom_row)
try:
residue = THREE2ONE[residue]
except KeyError:
residue = 'X'
# try to get the chain identifier from alt_chain first, if this does not work use label_asym_id
if alt_chain is not None:
atom_seq[alt_chain] += residue
# sometimes we find the right chain identifier not in the alt_chain
if not (atom_chain in atom_seq.keys()) and atom_chain is not None:
atom_seq[atom_chain] += residue
current_residue = res_num
for chain in entity_length.keys():
if chain in atom_seq.keys():
chain_ratios[chain] = len(atom_seq[chain]) / entity_length[chain]
else:
chain_ratios[chain] = 0
return chain_ratios
else:
return False
def pdb_entry(self):
"""Extracts the PDB entry information of a cif file."""
try:
entry = self.block.getObj('entry')
entry_id = entry.getValue('id')
return entry_id.replace('\n', ' ')
except AttributeError:
if DEBUG_MODE > 0:
print ('! {pdb} Could not extract id from entry.'.format(
pdb = self.pdb_entry()))
def protein_description(self):
"""Extracts the protein description annotated in struct.pdbx_descriptor of the cif file."""
try:
# Get struct table which contains the protein description
struct = self.block.getObj('struct')
# Get the pdbx description and make format it appropritaly
protein_description = struct.getValue('pdbx_descriptor')
protein_description = protein_description.replace('\n', ' ')
protein_description = protein_description.replace(';', ' ') # to prevent parsing errors
if len(protein_description.split(' ')) >= 5:
protein_description = ' '.join(protein_description.split(' ')[0:5]) # maximum of 5 words in header
return protein_description.strip(',')
except AttributeError:
if DEBUG_MODE > 1:
print ('! {pdb} Could not extract pdbx_descriptor from struct table.'.format(
pdb = self.pdb_entry()))
return False
def compounds(self):
""" Extracts all compounds annotated in the HETATM section of the atom
struct table if the compound appears at least 10 times and is not water
(HOH)."""
atom_site = self.block.getObj('atom_site')
compounds = {}
for row in range(0, atom_site.getRowCount()):
if atom_site.getValue('group_PDB', row) == 'HETATM':
label_comp_id = atom_site.getValue('label_comp_id', row)
if label_comp_id not in compounds.keys():
compounds[label_comp_id] = 1
else:
compounds[label_comp_id] += 1
filtered_compounds = set()
for compound in compounds.keys():
if compounds[compound] >= 10 and compound != 'HOH':
filtered_compounds.add(compound)
if len(filtered_compounds) == 0:
return False
else:
return ', '.join(filtered_compounds).replace('\n', ' ')
def resolution(self):
"""Extracts the resolution of the mmCIF."""
try:
refine = self.block.getObj('refine')
resolution = refine.getValue('ls_d_res_high')
try:
resolution = float(resolution)
except ValueError:
return False
return resolution
except AttributeError:
if DEBUG_MODE > 1:
print ('! {pdb} Could not extract ls_d_res_high from refine table.'.format(
pdb = self.pdb_entry()))
try:
reflns = self.block.getObj('reflns')
# Extract the resolution of the crystal
resolution = reflns.getValue('d_resolution_high')
try:
resolution = float(resolution)
except ValueError:
return False
return resolution
except AttributeError:
if DEBUG_MODE > 1:
print ('! {pdb} Could not extract d_resolution_high from reflns table.'.format(
pdb = self.pdb_entry()))
# This is true for some Electron Microscopy structures
try:
em_3d = self.block.getObj('em_3d_reconstruction')
resolution = em_3d.getValue('resolution')
try:
resolution = float(resolution)
except ValueError:
return False
return resolution
| |
@hybrid_property
def work(self):
return self.entity1
@hybrid_property
def work_id(self):
return self.entity1_id
class LinkReleaseGroupReleaseGroup(Base):
__tablename__ = 'l_release_group_release_group'
__table_args__ = (
Index('l_release_group_release_group_idx_uniq', 'entity0', 'entity1', 'link', 'link_order', unique=True),
Index('l_release_group_release_group_idx_entity1', 'entity1'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
link_id = Column('link', Integer, ForeignKey(apply_schema('link.id', 'musicbrainz'), name='l_release_group_release_group_fk_link'), nullable=False)
entity0_id = Column('entity0', Integer, ForeignKey(apply_schema('release_group.id', 'musicbrainz'), name='l_release_group_release_group_fk_entity0'), nullable=False)
entity1_id = Column('entity1', Integer, ForeignKey(apply_schema('release_group.id', 'musicbrainz'), name='l_release_group_release_group_fk_entity1'), nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
link_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
entity0_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
entity1_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
link = relationship('Link', foreign_keys=[link_id], innerjoin=True)
entity0 = relationship('ReleaseGroup', foreign_keys=[entity0_id], innerjoin=True)
entity1 = relationship('ReleaseGroup', foreign_keys=[entity1_id], innerjoin=True)
@hybrid_property
def release_group0(self):
return self.entity0
@hybrid_property
def release_group0_id(self):
return self.entity0_id
@hybrid_property
def release_group1(self):
return self.entity1
@hybrid_property
def release_group1_id(self):
return self.entity1_id
class LinkReleaseGroupSeries(Base):
__tablename__ = 'l_release_group_series'
__table_args__ = (
Index('l_release_group_series_idx_uniq', 'entity0', 'entity1', 'link', 'link_order', unique=True),
Index('l_release_group_series_idx_entity1', 'entity1'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
link_id = Column('link', Integer, ForeignKey(apply_schema('link.id', 'musicbrainz'), name='l_release_group_series_fk_link'), nullable=False)
entity0_id = Column('entity0', Integer, ForeignKey(apply_schema('release_group.id', 'musicbrainz'), name='l_release_group_series_fk_entity0'), nullable=False)
entity1_id = Column('entity1', Integer, ForeignKey(apply_schema('series.id', 'musicbrainz'), name='l_release_group_series_fk_entity1'), nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
link_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
entity0_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
entity1_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
link = relationship('Link', foreign_keys=[link_id], innerjoin=True)
entity0 = relationship('ReleaseGroup', foreign_keys=[entity0_id], innerjoin=True)
entity1 = relationship('Series', foreign_keys=[entity1_id], innerjoin=True)
@hybrid_property
def release_group(self):
return self.entity0
@hybrid_property
def release_group_id(self):
return self.entity0_id
@hybrid_property
def series(self):
return self.entity1
@hybrid_property
def series_id(self):
return self.entity1_id
class LinkReleaseGroupURL(Base):
__tablename__ = 'l_release_group_url'
__table_args__ = (
Index('l_release_group_url_idx_uniq', 'entity0', 'entity1', 'link', 'link_order', unique=True),
Index('l_release_group_url_idx_entity1', 'entity1'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
link_id = Column('link', Integer, ForeignKey(apply_schema('link.id', 'musicbrainz'), name='l_release_group_url_fk_link'), nullable=False)
entity0_id = Column('entity0', Integer, ForeignKey(apply_schema('release_group.id', 'musicbrainz'), name='l_release_group_url_fk_entity0'), nullable=False)
entity1_id = Column('entity1', Integer, ForeignKey(apply_schema('url.id', 'musicbrainz'), name='l_release_group_url_fk_entity1'), nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
link_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
entity0_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
entity1_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
link = relationship('Link', foreign_keys=[link_id], innerjoin=True)
entity0 = relationship('ReleaseGroup', foreign_keys=[entity0_id], innerjoin=True)
entity1 = relationship('URL', foreign_keys=[entity1_id], innerjoin=True)
@hybrid_property
def release_group(self):
return self.entity0
@hybrid_property
def release_group_id(self):
return self.entity0_id
@hybrid_property
def url(self):
return self.entity1
@hybrid_property
def url_id(self):
return self.entity1_id
class LinkReleaseGroupWork(Base):
__tablename__ = 'l_release_group_work'
__table_args__ = (
Index('l_release_group_work_idx_uniq', 'entity0', 'entity1', 'link', 'link_order', unique=True),
Index('l_release_group_work_idx_entity1', 'entity1'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
link_id = Column('link', Integer, ForeignKey(apply_schema('link.id', 'musicbrainz'), name='l_release_group_work_fk_link'), nullable=False)
entity0_id = Column('entity0', Integer, ForeignKey(apply_schema('release_group.id', 'musicbrainz'), name='l_release_group_work_fk_entity0'), nullable=False)
entity1_id = Column('entity1', Integer, ForeignKey(apply_schema('work.id', 'musicbrainz'), name='l_release_group_work_fk_entity1'), nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
link_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
entity0_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
entity1_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
link = relationship('Link', foreign_keys=[link_id], innerjoin=True)
entity0 = relationship('ReleaseGroup', foreign_keys=[entity0_id], innerjoin=True)
entity1 = relationship('Work', foreign_keys=[entity1_id], innerjoin=True)
@hybrid_property
def release_group(self):
return self.entity0
@hybrid_property
def release_group_id(self):
return self.entity0_id
@hybrid_property
def work(self):
return self.entity1
@hybrid_property
def work_id(self):
return self.entity1_id
class LinkSeriesSeries(Base):
__tablename__ = 'l_series_series'
__table_args__ = (
Index('l_series_series_idx_uniq', 'entity0', 'entity1', 'link', 'link_order', unique=True),
Index('l_series_series_idx_entity1', 'entity1'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
link_id = Column('link', Integer, ForeignKey(apply_schema('link.id', 'musicbrainz'), name='l_series_series_fk_link'), nullable=False)
entity0_id = Column('entity0', Integer, ForeignKey(apply_schema('series.id', 'musicbrainz'), name='l_series_series_fk_entity0'), nullable=False)
entity1_id = Column('entity1', Integer, ForeignKey(apply_schema('series.id', 'musicbrainz'), name='l_series_series_fk_entity1'), nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
link_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
entity0_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
entity1_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
link = relationship('Link', foreign_keys=[link_id], innerjoin=True)
entity0 = relationship('Series', foreign_keys=[entity0_id], innerjoin=True)
entity1 = relationship('Series', foreign_keys=[entity1_id], innerjoin=True)
@hybrid_property
def series0(self):
return self.entity0
@hybrid_property
def series0_id(self):
return self.entity0_id
@hybrid_property
def series1(self):
return self.entity1
@hybrid_property
def series1_id(self):
return self.entity1_id
class LinkSeriesURL(Base):
__tablename__ = 'l_series_url'
__table_args__ = (
Index('l_series_url_idx_uniq', 'entity0', 'entity1', 'link', 'link_order', unique=True),
Index('l_series_url_idx_entity1', 'entity1'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
link_id = Column('link', Integer, ForeignKey(apply_schema('link.id', 'musicbrainz'), name='l_series_url_fk_link'), nullable=False)
entity0_id = Column('entity0', Integer, ForeignKey(apply_schema('series.id', 'musicbrainz'), name='l_series_url_fk_entity0'), nullable=False)
entity1_id = Column('entity1', Integer, ForeignKey(apply_schema('url.id', 'musicbrainz'), name='l_series_url_fk_entity1'), nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
link_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
entity0_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
entity1_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
link = relationship('Link', foreign_keys=[link_id], innerjoin=True)
entity0 = relationship('Series', foreign_keys=[entity0_id], innerjoin=True)
entity1 = relationship('URL', foreign_keys=[entity1_id], innerjoin=True)
@hybrid_property
def series(self):
return self.entity0
@hybrid_property
def series_id(self):
return self.entity0_id
@hybrid_property
def url(self):
return self.entity1
@hybrid_property
def url_id(self):
return self.entity1_id
class LinkSeriesWork(Base):
__tablename__ = 'l_series_work'
__table_args__ = (
Index('l_series_work_idx_uniq', 'entity0', 'entity1', 'link', 'link_order', unique=True),
Index('l_series_work_idx_entity1', 'entity1'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
link_id = Column('link', Integer, ForeignKey(apply_schema('link.id', 'musicbrainz'), name='l_series_work_fk_link'), nullable=False)
entity0_id = Column('entity0', Integer, ForeignKey(apply_schema('series.id', 'musicbrainz'), name='l_series_work_fk_entity0'), nullable=False)
entity1_id = Column('entity1', Integer, ForeignKey(apply_schema('work.id', 'musicbrainz'), name='l_series_work_fk_entity1'), nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
link_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
entity0_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
entity1_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
link = relationship('Link', foreign_keys=[link_id], innerjoin=True)
entity0 = relationship('Series', foreign_keys=[entity0_id], innerjoin=True)
entity1 = relationship('Work', foreign_keys=[entity1_id], innerjoin=True)
@hybrid_property
def series(self):
return self.entity0
@hybrid_property
def series_id(self):
return self.entity0_id
@hybrid_property
def work(self):
return self.entity1
@hybrid_property
def work_id(self):
return self.entity1_id
class LinkURLURL(Base):
__tablename__ = 'l_url_url'
__table_args__ = (
Index('l_url_url_idx_uniq', 'entity0', 'entity1', 'link', 'link_order', unique=True),
Index('l_url_url_idx_entity1', 'entity1'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
link_id = Column('link', Integer, ForeignKey(apply_schema('link.id', 'musicbrainz'), name='l_url_url_fk_link'), nullable=False)
entity0_id = Column('entity0', Integer, ForeignKey(apply_schema('url.id', 'musicbrainz'), name='l_url_url_fk_entity0'), nullable=False)
entity1_id = Column('entity1', Integer, ForeignKey(apply_schema('url.id', 'musicbrainz'), name='l_url_url_fk_entity1'), nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
link_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
entity0_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
entity1_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
link = relationship('Link', foreign_keys=[link_id], innerjoin=True)
entity0 = relationship('URL', foreign_keys=[entity0_id], innerjoin=True)
entity1 = relationship('URL', foreign_keys=[entity1_id], innerjoin=True)
@hybrid_property
def url0(self):
return self.entity0
@hybrid_property
def url0_id(self):
return self.entity0_id
@hybrid_property
def url1(self):
return self.entity1
@hybrid_property
def url1_id(self):
return self.entity1_id
class LinkURLWork(Base):
__tablename__ = 'l_url_work'
__table_args__ = (
Index('l_url_work_idx_uniq', 'entity0', 'entity1', 'link', 'link_order', unique=True),
Index('l_url_work_idx_entity1', 'entity1'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
link_id = Column('link', Integer, ForeignKey(apply_schema('link.id', 'musicbrainz'), name='l_url_work_fk_link'), nullable=False)
entity0_id = Column('entity0', Integer, ForeignKey(apply_schema('url.id', 'musicbrainz'), name='l_url_work_fk_entity0'), nullable=False)
entity1_id = Column('entity1', Integer, ForeignKey(apply_schema('work.id', 'musicbrainz'), name='l_url_work_fk_entity1'), nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
link_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
entity0_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
entity1_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
link = relationship('Link', foreign_keys=[link_id], innerjoin=True)
entity0 = relationship('URL', foreign_keys=[entity0_id], innerjoin=True)
entity1 = relationship('Work', foreign_keys=[entity1_id], innerjoin=True)
@hybrid_property
def url(self):
return self.entity0
@hybrid_property
def url_id(self):
return self.entity0_id
@hybrid_property
def work(self):
return self.entity1
@hybrid_property
def work_id(self):
return self.entity1_id
class LinkWorkWork(Base):
__tablename__ = 'l_work_work'
__table_args__ = (
Index('l_work_work_idx_uniq', 'entity0', 'entity1', 'link', 'link_order', unique=True),
Index('l_work_work_idx_entity1', 'entity1'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
link_id = Column('link', Integer, ForeignKey(apply_schema('link.id', 'musicbrainz'), name='l_work_work_fk_link'), nullable=False)
entity0_id = Column('entity0', Integer, ForeignKey(apply_schema('work.id', 'musicbrainz'), name='l_work_work_fk_entity0'), nullable=False)
entity1_id = Column('entity1', Integer, ForeignKey(apply_schema('work.id', 'musicbrainz'), name='l_work_work_fk_entity1'), nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
link_order = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
entity0_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
entity1_credit = Column(String, default='', server_default=sql.text("''"), nullable=False)
link = relationship('Link', foreign_keys=[link_id], innerjoin=True)
entity0 = relationship('Work', foreign_keys=[entity0_id], innerjoin=True)
entity1 = relationship('Work', foreign_keys=[entity1_id], innerjoin=True)
@hybrid_property
def work0(self):
return self.entity0
@hybrid_property
def work0_id(self):
return self.entity0_id
@hybrid_property
def work1(self):
return self.entity1
@hybrid_property
def work1_id(self):
return self.entity1_id
class Label(Base):
__tablename__ = 'label'
__table_args__ = (
Index('label_idx_gid', 'gid', unique=True),
Index('label_idx_name', 'name'),
Index('label_idx_area', 'area'),
Index('label_idx_null_comment', 'name', unique=True),
Index('label_idx_uniq_name_comment', 'name', 'comment', unique=True),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
id = Column(Integer, primary_key=True)
gid = Column(UUID, nullable=False)
name = Column(String, nullable=False)
begin_date_year = Column(SMALLINT)
begin_date_month = Column(SMALLINT)
begin_date_day = Column(SMALLINT)
end_date_year = Column(SMALLINT)
end_date_month = Column(SMALLINT)
end_date_day = Column(SMALLINT)
label_code = Column(Integer)
type_id = Column('type', Integer, ForeignKey(apply_schema('label_type.id', 'musicbrainz'), name='label_fk_type'))
area_id = Column('area', Integer, ForeignKey(apply_schema('area.id', 'musicbrainz'), name='label_fk_area'))
comment = Column(String(255), default='', server_default=sql.text("''"), nullable=False)
edits_pending = Column(Integer, default=0, server_default=sql.text('0'), nullable=False)
last_updated = Column(DateTime(timezone=True), server_default=sql.func.now())
ended = Column(Boolean, default=False, server_default=sql.false(), nullable=False)
type = relationship('LabelType', foreign_keys=[type_id])
area = relationship('Area', foreign_keys=[area_id])
begin_date = composite(PartialDate, begin_date_year, begin_date_month, begin_date_day)
end_date = composite(PartialDate, end_date_year, end_date_month, end_date_day)
class LabelRatingRaw(Base):
__tablename__ = 'label_rating_raw'
__table_args__ = (
Index('label_rating_raw_idx_label', 'label'),
Index('label_rating_raw_idx_editor', 'editor'),
{'schema': mbdata.config.schemas.get('musicbrainz', 'musicbrainz')}
)
label_id = Column('label', Integer, ForeignKey(apply_schema('label.id', 'musicbrainz'), name='label_rating_raw_fk_label'), primary_key=True, nullable=False)
editor_id = Column('editor', Integer, ForeignKey(apply_schema('editor.id', 'musicbrainz'), name='label_rating_raw_fk_editor'), primary_key=True, nullable=False)
rating = Column(SMALLINT, nullable=False)
label = relationship('Label', foreign_keys=[label_id], innerjoin=True)
editor = relationship('Editor', foreign_keys=[editor_id], innerjoin=True)
class LabelTagRaw(Base):
__tablename__ = 'label_tag_raw'
__table_args__ = (
Index('label_tag_raw_idx_tag', 'tag'),
| |
roll, None, purpose, listener)['Result']
# Now get the exact scroll.
words = scroll_type.split(' ')
commonness = words[0].lower()
self.arcaneness = words[1].lower()
# Roll for the spell.
purpose = 'scroll spell'
roll = self.roll('1d100', purpose)
# Note that unlike potions, there are uncommon level 0 scrolls.
result = None
if self.arcaneness == 'arcane':
if self.spell_level == '0':
result = self.t_arcane_level_0.find_roll(conn, roll, commonness, purpose, listener)
elif self.spell_level == '1st':
result = self.t_arcane_level_1.find_roll(conn, roll, commonness, purpose, listener)
elif self.spell_level == '2nd':
result = self.t_arcane_level_2.find_roll(conn, roll, commonness, purpose, listener)
elif self.spell_level == '3rd':
result = self.t_arcane_level_3.find_roll(conn, roll, commonness, purpose, listener)
elif self.spell_level == '4th':
result = self.t_arcane_level_4.find_roll(conn, roll, commonness, purpose, listener)
elif self.spell_level == '5th':
result = self.t_arcane_level_5.find_roll(conn, roll, commonness, purpose, listener)
elif self.spell_level == '6th':
result = self.t_arcane_level_6.find_roll(conn, roll, commonness, purpose, listener)
elif self.spell_level == '7th':
result = self.t_arcane_level_7.find_roll(conn, roll, commonness, purpose, listener)
elif self.spell_level == '8th':
result = self.t_arcane_level_8.find_roll(conn, roll, commonness, purpose, listener)
elif self.spell_level == '9th':
result = self.t_arcane_level_9.find_roll(conn, roll, commonness, purpose, listener)
elif self.arcaneness == 'divine':
if self.spell_level == '0':
result = self.t_divine_level_0.find_roll(conn, roll, commonness, purpose, listener)
elif self.spell_level == '1st':
result = self.t_divine_level_1.find_roll(conn, roll, commonness, purpose, listener)
elif self.spell_level == '2nd':
result = self.t_divine_level_2.find_roll(conn, roll, commonness, purpose, listener)
elif self.spell_level == '3rd':
result = self.t_divine_level_3.find_roll(conn, roll, commonness, purpose, listener)
elif self.spell_level == '4th':
result = self.t_divine_level_4.find_roll(conn, roll, commonness, purpose, listener)
elif self.spell_level == '5th':
result = self.t_divine_level_5.find_roll(conn, roll, commonness, purpose, listener)
elif self.spell_level == '6th':
result = self.t_divine_level_6.find_roll(conn, roll, commonness, purpose, listener)
elif self.spell_level == '7th':
result = self.t_divine_level_7.find_roll(conn, roll, commonness, purpose, listener)
elif self.spell_level == '8th':
result = self.t_divine_level_8.find_roll(conn, roll, commonness, purpose, listener)
elif self.spell_level == '9th':
result = self.t_divine_level_9.find_roll(conn, roll, commonness, purpose, listener)
self.spell = result['Result']
# Subtype
self.subtype = 'Scroll'
# Item specifics
self.label = 'Scroll of ' + self.spell
self.label += ' (' + self.arcaneness
self.label += ', ' + self.spell_level + ' Level'
self.label += ', CL ' + self.caster_level + ')'
self.price = Price(result['Price'])
class Staff(Item):
def __init__(self):
Item.__init__(self, KEY_STAFF)
# Load tables.
self.t_staves = TABLE_STAVES
# Staff details.
self.staff = ''
self.price = ''
def __repr__(self):
result = '<Staff'
result += '>'
return result
def lookup(self, conn, listener):
# We don't do 'least minor'
if self.strength == 'least minor':
self.strength = 'lesser minor'
# Roll for a staff.
purpose = 'specific staff'
roll = self.roll('1d100', purpose)
staff = self.t_staves.find_roll(conn, roll, self.strength, purpose, listener)
# Subtype
self.subtype = 'Staff'
# Item specifics
self.label = staff['Result']
self.price = Price(staff['Price'])
class Wand(Item):
def __init__(self):
Item.__init__(self, KEY_WAND)
# Load tables.
self.t_random = TABLE_RANDOM_WANDS
self.t_type = TABLE_WAND_TYPE
self.t_wands_0 = TABLE_WAND_LEVEL_0
self.t_wands_1 = TABLE_WAND_LEVEL_1
self.t_wands_2 = TABLE_WAND_LEVEL_2
self.t_wands_3 = TABLE_WAND_LEVEL_3
self.t_wands_4 = TABLE_WAND_LEVEL_4
# Wand details.
self.spell = ''
self.spell_level = ''
self.caster_level = ''
self.price = ''
def __repr__(self):
result = '<Wand'
result += '>'
return result
def lookup(self, conn, listener):
# We don't do 'least minor'
if self.strength == 'least minor':
self.strength = 'lesser minor'
# Roll for spell level.
purpose = 'wand level'
roll = self.roll('1d100', purpose)
wand_spell = self.t_random.find_roll(conn, roll, self.strength, purpose, listener)
self.spell_level = wand_spell['Spell Level']
self.caster_level = wand_spell['Caster Level']
# Roll for type.
purpose = 'wand type'
roll = self.roll('1d100', purpose)
wand_type = self.t_type.find_roll(conn, roll, None, purpose, listener)
commonness = wand_type['Result'].lower()
# Roll for the actual wand.
purpose = 'wand spell'
roll = self.roll('1d100', purpose)
result = None
if self.spell_level == '0':
result = self.t_wands_0.find_roll(conn, roll, commonness, purpose, listener)
elif self.spell_level == '1st':
result = self.t_wands_1.find_roll(conn, roll, commonness, purpose, listener)
elif self.spell_level == '2nd':
result = self.t_wands_2.find_roll(conn, roll, commonness, purpose, listener)
elif self.spell_level == '3rd':
result = self.t_wands_3.find_roll(conn, roll, commonness, purpose, listener)
elif self.spell_level == '4th':
result = self.t_wands_4.find_roll(conn, roll, commonness, purpose, listener)
self.spell = result['Result']
# Subtype
self.subtype = 'Wand'
# Item specifics
self.label = 'Wand of ' + self.spell
self.label += ' (' + self.spell_level + ' Level'
self.label += ', CL ' + self.caster_level + ')'
self.price = Price(result['Price'])
class Gem(Item):
def __init__(self):
Item.__init__(self, KEY_GEM)
# Load the table.
self.t_random = TABLE_RANDOM_GEMS
# Gem details.
self.gem = ''
self.price = ''
def __repr__(self):
result = '<Gem'
result += '>'
return result
def lookup(self, conn, listener):
# Roll for the gem.
purpose = 'gem type'
roll = self.roll('1d100', purpose)
gem_type = self.t_random.find_roll(conn, roll, self.strength, purpose, listener)
self.gem = gem_type['Result']
self.price = 'unknown'
# Compute the price
price_expr = gem_type['Price']
m = RE_GEM_PRICE.match(price_expr)
if m:
base = int(m.group(1).replace(",",""))
addl = 0
factor = 1
if m.group(3):
addl = rollers.rollDice(m.group(3))
if m.group(6):
factor = int(m.group(6).replace(",",""))
price = base + (addl[0] * factor)
self.price = Price(price)
#self.price = str(m.group(1)) + '+' + str(m.group(3)) + '*' + str(m.group(6))
# Subtype
self.subtype = 'Gem'
# Item specifics
self.label = self.gem
class ArtObject(Item):
def __init__(self):
Item.__init__(self, KEY_ART_OBJECT)
# Load the table.
self.t_random = TABLE_RANDOM_ART_OBJECTS
# Art object details.
self.obj = ''
self.price = ''
def __repr__(self):
result = '<ArtObject'
result += '>'
return result
def lookup(self, conn, listener):
# Roll for the art object.
purpose = 'art object type'
roll = self.roll('1d100', purpose)
art_type = self.t_random.find_roll(conn, roll, self.strength, purpose, listener)
self.obj = art_type['Result']
self.price = Price(art_type['Price'])
# Subtype
self.subtype = 'Art object'
# Item specifics
self.label = self.obj
class WondrousItem(Item):
def __init__(self):
Item.__init__(self, KEY_WONDROUS_ITEM)
# Load tables.
self.t_random = TABLE_WONDROUS_ITEMS
self.t_belt = TABLE_WONDROUS_ITEMS_BELT
self.t_body = TABLE_WONDROUS_ITEMS_BODY
self.t_chest = TABLE_WONDROUS_ITEMS_CHEST
self.t_eyes = TABLE_WONDROUS_ITEMS_EYES
self.t_feet = TABLE_WONDROUS_ITEMS_FEET
self.t_hands = TABLE_WONDROUS_ITEMS_HANDS
self.t_head = TABLE_WONDROUS_ITEMS_HEAD
self.t_headband = TABLE_WONDROUS_ITEMS_HEADBAND
self.t_neck = TABLE_WONDROUS_ITEMS_NECK
self.t_shoulders = TABLE_WONDROUS_ITEMS_SHOULDERS
self.t_slotless = TABLE_WONDROUS_ITEMS_SLOTLESS
self.t_wrists = TABLE_WONDROUS_ITEMS_WRISTS
# Wondrous item details
self.slot = ''
self.item = ''
self.price = '0 gp'
# Unlike the other classes, we may do least minor.
# So, don't modify self.strength to "fix" that.
def __repr__(self):
result = '<WondrousItem'
result += '>'
return result
def lookup(self, conn, listener):
# If we don't have a subtype, roll for one.
if self.subtype in [None, '']:
# Roll for subtype.
purpose = 'wondrous item slot'
roll = self.roll('1d100', purpose)
self.subtype = self.t_random.find_roll(conn, roll, None, purpose, listener)['Result']
# Note that 'least minor' is only valid for slotless.
if self.subtype != 'Slotless' and self.strength == 'least minor':
self.strength = 'lesser minor'
# Roll for the item.
purpose = 'specific wondrous item'
roll = self.roll('1d100', purpose)
result = None
if self.subtype == 'Belts':
result = self.t_belt.find_roll(conn, roll, self.strength, purpose, listener)
elif self.subtype == 'Body':
result = self.t_body.find_roll(conn, roll, self.strength, purpose, listener)
elif self.subtype == 'Chest':
result = self.t_chest.find_roll(conn, roll, self.strength, purpose, listener)
elif self.subtype == 'Eyes':
result = self.t_eyes.find_roll(conn, roll, self.strength, purpose, listener)
elif self.subtype == 'Feet':
result = self.t_feet.find_roll(conn, roll, self.strength, purpose, listener)
elif self.subtype == 'Hands':
result = self.t_hands.find_roll(conn, roll, self.strength, purpose, listener)
elif self.subtype == 'Head':
result = self.t_head.find_roll(conn, roll, self.strength, purpose, listener)
elif self.subtype == 'Headband':
result = self.t_headband.find_roll(conn, roll, self.strength, purpose, listener)
elif self.subtype == 'Neck':
result = self.t_neck.find_roll(conn, roll, self.strength, purpose, listener)
elif self.subtype == 'Shoulders':
result = self.t_shoulders.find_roll(conn, roll, self.strength, purpose, listener)
elif self.subtype == 'Wrists':
result = self.t_wrists.find_roll(conn, roll, self.strength, purpose, listener)
elif self.subtype == 'Slotless':
result = self.t_slotless.find_roll(conn, roll, self.strength, purpose, listener)
# The table might be directing us to roll on another table.
if result != None and result['Result'] == ROLL_LEAST_MINOR:
purpose = 'least minor wondrous item'
roll = self.roll('1d100', purpose)
result = None
# This special result only happens on the slotless table.
result = self.t_slotless.find_roll(conn, roll, 'least minor', purpose, listener)
# Perform a final check on the rolled item.
if result == None:
return
# Subtype
# (already taken care of)
# Item specifics
self.label = result['Result']
self.price = Price(result['Price'])
# A dictionary that maps from an item type string to an Item subclass
# We won't do any fancy registration stuff, just use a fixed table.
ITEM_SUBCLASSES = {
KEY_INVALID : InvalidItem,
KEY_DATABASE : DatabaseItem,
KEY_ARMOR : Armor,
KEY_WEAPON : Weapon,
KEY_POTION : Potion,
KEY_RING : Ring,
KEY_ROD : Rod,
KEY_SCROLL : Scroll,
KEY_STAFF : Staff,
KEY_WAND : Wand,
| |
56: '_', 57: ':', 58: '5', 59: 'q', 60: 'P', 61: '<',
62: 'i', 63: 'r', 64: 'j', 65: 'J', 66: 'I', 67: ']', 68: '3',
69: 'g', 70: 'u', 71: '3', 72: '@', 73: 'j', 74: 'a', 75: 'm',
76: '`', 77: 'I', 78: 'L', 79: 'i', 80: 'M', 81: 'u', 82: ']',
83: 'w', 84: '3', 85: 'u', 86: 's', 87: 'H', 88: 'w', 89: '@',
90: '?', 91: 'l', 92: 'O', 93: 'r', 94: 'O', 95: 'u', 96: 'C',
97: 'F', 98: 'P', 99: 'w', 100: 'Z', 101: '`', 102: 'z', 103: 'B',
104: 'O', 105: 'z', 106: ']', 107: 'X', 108: 'v', 109: '[', 110: 'E',
111: '?', 112: 'G', 113: '=', 114: '2', 115: 'q', 116: '\\', 117: 'F',
118: 'L', 119: 'N', 120: 'Y', 121: 'u'},
109: {48: '6', 49: ':', 50: 'r', 51: 'f', 52: 'C', 53: '@', 54: 'r',
55: '[', 56: 'E', 57: 'E', 58: 'v', 59: 's', 60: '0', 61: 'a',
62: 'r', 63: 'z', 64: '`', 65: 'z', 66: 'B', 67: 'A', 68: 'S',
69: '8', 70: 'P', 71: 's', 72: '1', 73: 'b', 74: 'M', 75: 'i',
76: '0', 77: '`', 78: '1', 79: '^', 80: '>', 81: 'O', 82: 'p',
83: 'S', 84: '7', 85: 'Z', 86: 'N', 87: 'W', 88: '_', 89: '\\',
90: 'Q', 91: 'h', 92: '7', 93: 'j', 94: 'V', 95: '7', 96: 'g',
97: 'y', 98: 'z', 99: 'o', 100: 'w', 101: 'k', 102: 'h', 103: '8',
104: 's', 105: 'J', 106: 'w', 107: 'b', 108: 'X', 109: 'N', 110: 'l',
111: 'n', 112: 'U', 113: 'R', 114: 'r', 115: ']', 116: 'm', 117: 'F',
118: 'm', 119: 'c', 120: 'R', 121: 'E'},
110: {48: 'I', 49: 't', 50: 'd', 51: 'w', 52: ']', 53: '>', 54: 'v',
55: ';', 56: 'Z', 57: 'k', 58: 'L', 59: '^', 60: 'I', 61: 'v',
62: '6', 63: 'w', 64: 'I', 65: 'D', 66: 'E', 67: '^', 68: 'O',
69: 'x', 70: '4', 71: '5', 72: ']', 73: 'q', 74: 'G', 75: 'A',
76: 'M', 77: 'I', 78: '4', 79: 'i', 80: 'X', 81: 'a', 82: '<',
83: 'j', 84: 'j', 85: 'Q', 86: 'e', 87: 'F', 88: 'r', 89: 'a',
90: 'l', 91: '_', 92: 'F', 93: '>', 94: '[', 95: 'J', 96: 'Z',
97: 'n', 98: '>', 99: 'C', 100: 'J', 101: '>', 102: '8', 103: 'k',
104: '2', 105: '<', 106: '1', 107: 'o', 108: 'P', 109: 'f', 110: 'e',
111: 'k', 112: '[', 113: 'C', 114: 'B', 115: 'h', 116: 'x', 117: ']',
118: '6', 119: '\\', 120: 'e', 121: 'p'},
111: {48: 'e', 49: '7', 50: '6', 51: 'y', 52: 'V', 53: 'X', 54: 'i',
55: 't', 56: 'D', 57: 'G', 58: 'U', 59: ':', 60: 'A', 61: 'S',
62: '0', 63: 'Z', 64: 'V', 65: 'o', 66: 'a', 67: 'z', 68: 'g',
69: 'S', 70: '=', 71: 'h', 72: 'd', 73: 'J', 74: 'I', 75: '?',
76: 'c', 77: 'd', 78: '>', 79: '`', 80: '<', 81: 'i', 82: '7',
83: 'z', 84: 'b', 85: ']', 86: 'e', 87: 'a', 88: 'D', 89: 'G',
90: 'M', 91: 'R', 92: 'a', 93: 'L', 94: 'F', 95: '4', 96: 'g',
97: 'G', 98: 'Q', 99: 'k', 100: '3', 101: 'V', 102: 'p', 103: 'C',
104: '[', 105: 'W', 106: 'z', 107: 'w', 108: 'G', 109: 'I', 110: '?',
111: '^', 112: 'e', 113: 'M', 114: ']', 115: 'a', 116: ']', 117: 'P',
118: 'C', 119: '8', 120: 'a', 121: 'z'},
112: {48: 'O', 49: 'm', 50: '_', 51: 'S', 52: 'v', 53: 'y', 54: '9',
55: 'j', 56: 'D', 57: 'o', 58: 'D', 59: '@', 60: 'B', 61: 'w',
62: 'n', 63: 'i', 64: 'm', 65: 'v', 66: 'd', 67: 'M', 68: 'a',
69: '2', 70: ';', 71: ':', 72: 'i', 73: 'Y', 74: '7', 75: 'A',
76: 'n', 77: 'y', 78: 'Y', 79: 'B', 80: 'x', 81: '=', 82: 'w',
83: 'S', 84: 'i', 85: 'S', 86: '?', 87: '3', 88: 'I', 89: 'n',
90: 'P', 91: 'r', 92: 'Z', 93: '[', 94: 'x', 95: 's', 96: '>',
97: 'I', 98: '2', 99: 'L', 100: 'E', 101: 'c', 102: 'G', 103: ']',
104: '8', 105: 'E', 106: 'l', 107: 'x', 108: 'A', 109: ']', 110: '9',
111: 'N', 112: '9', 113: '8', 114: '3', 115: '`', 116: '5', 117: 'S',
118: '=', 119: '0', 120: 'M', 121: 'Q'},
113: {48: 'g', 49: 'H', 50: '8', 51: 'T', 52: 'N', 53: 'U', 54: 'B',
55: 'Q', 56: 'j', 57: 'j', 58: 'r', 59: 'F', 60: '6', 61: 'V',
62: 'm', 63: 'j', 64: 'R', 65: '8', 66: 'J', 67: 'h', 68: '_',
69: 'p', 70: '1', 71: 'm', 72: '5', 73: '7', 74: '6', 75: 'E',
76: 'M', 77: 'B', 78: 'G', 79: 's', 80: 'w', 81: 't', 82: 'C',
83: 'C', 84: 'E', 85: 'Q', 86: '4', 87: 'q', 88: 'J', 89: 'd',
90: 'Y', 91: 'L', 92: 'Y', 93: 'q', 94: 'b', 95: 'y', 96: 'a',
97: '[', 98: 'P', 99: '@', 100: 'l', 101: 'i', 102: 'H', 103: 'S',
104: '0', 105: 'u', 106: 'f', 107: 'S', 108: 'r', 109: ';', 110: '\\',
111: 'y', 112: 'A', 113: 'd', 114: 'D', 115: 's', 116: 'o', 117: 'J',
118: 'W', 119: 'B', 120: 'l', 121: '2'},
114: {48: 'n', 49: 'z', 50: ']', 51: 'a', 52: 'e', 53: 'n', 54: 'i',
55: 'K', 56: 'E', 57: 'B', 58: 'p', 59: 'w', 60: 'a', 61: ';',
62: 'J', 63: 'P', 64: 'k', 65: 't', 66: 'v', 67: '1', 68: '5',
69: 's', 70: 'j', 71: 'W', 72: 'y', 73: 'W', 74: 'd', 75: 'q',
76: 'x', 77: 'E', 78: 'o', 79: 'l', 80: '<', 81: ':', 82: 'f',
83: '0', 84: '7', 85: 'W', 86: '3', 87: 'a', 88: ':', 89: '3',
90: 'O', 91: 'g', 92: 'O', 93: 'n', 94: '?', 95: '_', 96: '1',
97: 'k', 98: 's', 99: 'b', 100: ']', 101: 'K', 102: 'W', 103: '`',
104: 'I', 105: '0', 106: 'Z', 107: 'o', 108: ';', 109: '6', 110: 'L',
111: ':', 112: '7', 113: '`', 114: '9', 115: '8', 116: 'j', 117: '6',
118: '9', 119: 'i', 120: 'y', 121: 'Q'},
115: {48: 'W', 49: 'j', 50: 'v', 51: '?', 52: 'H', 53: 't', 54: 'k',
55: 'd', 56: 'j', 57: 'F', 58: '>', 59: 's', 60: 'T', 61: 'I',
62: 'a', 63: ';', 64: 'V', 65: ':', 66: '`', 67: 'h', 68: 'u',
69: '1', 70: 'k', 71: ']', 72: 'X', 73: 'r', 74: '?', 75: '?',
76: 'z', 77: 'd', 78: '7', 79: 'C', 80: '3', 81: 'z', 82: 'm',
83: 'J', 84: 'y', 85: '\\', 86: 'C', 87: 'y', 88: '1', 89: 'a',
90: 'U', 91: 'o', 92: '=', 93: 'M', 94: '1', 95: 'T', 96: 'u',
97: ']', 98: 'u', 99: '?', 100: 'I', 101: 'h', 102: '5', 103: 'p',
104: 'A', 105: 'O', 106: '\\', 107: '4', 108: 'W', 109: 'y', 110: 'U',
111: '^', 112: '3', 113: 'm', 114: 'M', 115: '2', 116: '9', 117: 'k',
118: 'X', 119: 'm', 120: ':', 121: 'l'},
116: {48: 'a', 49: 'B', 50: 'K', 51: 'J', 52: '=', 53: 'w', 54: 'a',
55: 'x', 56: 'h', 57: 'O', 58: 'h', 59: 'q', 60: 'c', 61: 't',
62: '^', 63: 'g', 64: 'p', 65: 'G', 66: '=', 67: '[', 68: 'L',
69: 'u', 70: 'w', 71: 'y', 72: 'F', 73: ';', 74: 'h', 75: 'j',
76: 'a', 77: 'r', 78: '`', 79: 'o', 80: '>', 81: '?', 82: 'i',
83: '[', 84: 'j', 85: '_', 86: 'v', 87: 'h', 88: 'Y', 89: 'p',
90: 's', 91: 'G', 92: 'f', 93: '2', 94: 'g', 95: 'c', 96: 'M',
97: 's', 98: 'z', 99: '9', | |
<gh_stars>10-100
from ...base import *
from .transform import TransformMixin
class SelectionMixin:
""" GeomDataObject class mix-in """
def __setstate__(self, state):
self._poly_selection_data = {"selected": [], "unselected": []}
self._selected_subobj_ids = {"vert": [], "edge": [], "poly": [], "normal": []}
def _edit_state(self, state):
del state["_poly_selection_data"]
del state["_selected_subobj_ids"]
def __init__(self):
self._poly_selection_data = {"selected": [], "unselected": []}
self._selected_subobj_ids = {"vert": [], "edge": [], "poly": [], "normal": []}
self._sel_subobj_ids_backup = {}
self._selection_backup = {}
def update_selection(self, subobj_type, subobjs_to_select, subobjs_to_deselect,
update_verts_to_transf=True, selection_colors=None, geom=None):
selected_subobj_ids = self._selected_subobj_ids[subobj_type]
geoms = self._geoms[subobj_type]
selected_subobjs = [subobj for subobj in subobjs_to_select
if subobj.id not in selected_subobj_ids]
deselected_subobjs = [subobj for subobj in subobjs_to_deselect
if subobj.id in selected_subobj_ids]
if not (selected_subobjs or deselected_subobjs):
return False
if subobj_type == "poly":
geom_selected = geoms["selected"]
geom_unselected = geoms["unselected"]
sel_data = self._poly_selection_data
data_selected = sel_data["selected"]
data_unselected = sel_data["unselected"]
prim = geom_selected.node().modify_geom(0).modify_primitive(0)
array_sel = prim.modify_vertices()
stride = array_sel.array_format.stride
size_sel = array_sel.get_num_rows()
row_count = sum([len(poly) for poly in selected_subobjs], size_sel)
array_sel.set_num_rows(row_count)
view_sel = memoryview(array_sel).cast("B")
prim = geom_unselected.node().modify_geom(0).modify_primitive(0)
array_unsel = prim.modify_vertices()
size_unsel = array_unsel.get_num_rows()
row_count = sum([len(poly) for poly in deselected_subobjs], size_unsel)
array_unsel.set_num_rows(row_count)
view_unsel = memoryview(array_unsel).cast("B")
polys_sel = []
polys_unsel = []
row_ranges_sel_to_keep = SparseArray()
row_ranges_sel_to_keep.set_range(0, array_sel.get_num_rows())
row_ranges_unsel_to_keep = SparseArray()
row_ranges_unsel_to_keep.set_range(0, size_unsel)
row_ranges_sel_to_move = SparseArray()
row_ranges_unsel_to_move = SparseArray()
for poly in selected_subobjs:
selected_subobj_ids.append(poly.id)
start = data_unselected.index(poly[0]) * 3
polys_sel.append((start, poly))
row_ranges_unsel_to_keep.clear_range(start, len(poly))
row_ranges_unsel_to_move.set_range(start, len(poly))
for poly in deselected_subobjs:
selected_subobj_ids.remove(poly.id)
start = data_selected.index(poly[0]) * 3
polys_unsel.append((start, poly))
row_ranges_sel_to_keep.clear_range(start, len(poly))
row_ranges_sel_to_move.set_range(start, len(poly))
polys_sel.sort()
polys_unsel.sort()
for _, poly in polys_sel:
data_selected.extend(poly)
for vert_ids in poly:
data_unselected.remove(vert_ids)
for _, poly in polys_unsel:
data_unselected.extend(poly)
for vert_ids in poly:
data_selected.remove(vert_ids)
f = lambda values, stride: (v * stride for v in values)
for i in range(row_ranges_unsel_to_move.get_num_subranges()):
start = row_ranges_unsel_to_move.get_subrange_begin(i)
size = row_ranges_unsel_to_move.get_subrange_end(i) - start
offset_, start_, size_ = f((size_sel, start, size), stride)
view_sel[offset_:offset_+size_] = view_unsel[start_:start_+size_]
size_sel += size
size_unsel -= size
offset = 0
for i in range(row_ranges_unsel_to_keep.get_num_subranges()):
start = row_ranges_unsel_to_keep.get_subrange_begin(i)
size = row_ranges_unsel_to_keep.get_subrange_end(i) - start
offset_, start_, size_ = f((offset, start, size), stride)
view_unsel[offset_:offset_+size_] = view_unsel[start_:start_+size_]
offset += size
for i in range(row_ranges_sel_to_move.get_num_subranges()):
start = row_ranges_sel_to_move.get_subrange_begin(i)
size = row_ranges_sel_to_move.get_subrange_end(i) - start
offset_, start_, size_ = f((size_unsel, start, size), stride)
view_unsel[offset_:offset_+size_] = view_sel[start_:start_+size_]
size_unsel += size
size_sel -= size
offset = 0
for i in range(row_ranges_sel_to_keep.get_num_subranges()):
start = row_ranges_sel_to_keep.get_subrange_begin(i)
size = row_ranges_sel_to_keep.get_subrange_end(i) - start
offset_, start_, size_ = f((offset, start, size), stride)
view_sel[offset_:offset_+size_] = view_sel[start_:start_+size_]
offset += size
array_sel.set_num_rows(size_sel)
array_unsel.set_num_rows(size_unsel)
else:
if subobj_type == "vert":
combined_subobjs = self.merged_verts
elif subobj_type == "edge":
combined_subobjs = self.merged_edges
elif subobj_type == "normal":
combined_subobjs = self.shared_normals
selected_subobjs = set(combined_subobjs[subobj.id] for subobj in selected_subobjs)
deselected_subobjs = set(combined_subobjs[subobj.id] for subobj in deselected_subobjs)
sel_state_geom = geom if geom else geoms["sel_state"]
vertex_data = sel_state_geom.node().modify_geom(0).modify_vertex_data()
col_writer = GeomVertexWriter(vertex_data, "color")
if selection_colors:
sel_colors = selection_colors
else:
sel_colors = Mgr.get("subobj_selection_colors")[subobj_type]
color_sel = sel_colors["selected"]
color_unsel = sel_colors["unselected"]
for combined_subobj in selected_subobjs:
selected_subobj_ids.extend(combined_subobj)
for row_index in combined_subobj.row_indices:
col_writer.set_row(row_index)
col_writer.set_data4(color_sel)
for combined_subobj in deselected_subobjs:
for subobj_id in combined_subobj:
selected_subobj_ids.remove(subobj_id)
for row_index in combined_subobj.row_indices:
col_writer.set_row(row_index)
col_writer.set_data4(color_unsel)
if subobj_type == "normal":
selected_normal_ids = []
deselected_normal_ids = []
for combined_subobj in selected_subobjs:
selected_normal_ids.extend(combined_subobj)
for combined_subobj in deselected_subobjs:
deselected_normal_ids.extend(combined_subobj)
self.update_locked_normal_selection(selected_normal_ids, deselected_normal_ids)
if update_verts_to_transf:
self._update_verts_to_transform(subobj_type)
return True
def is_selected(self, subobj):
return subobj.id in self._selected_subobj_ids[subobj.type]
def get_selection(self, subobj_lvl):
selected_subobj_ids = self._selected_subobj_ids[subobj_lvl]
if subobj_lvl == "poly":
polys = self._subobjs["poly"]
return [polys[poly_id] for poly_id in selected_subobj_ids]
if subobj_lvl == "vert":
combined_subobjs = self.merged_verts
elif subobj_lvl == "edge":
combined_subobjs = self.merged_edges
elif subobj_lvl == "normal":
combined_subobjs = self.shared_normals
return list(set(combined_subobjs[subobj_id] for subobj_id in selected_subobj_ids))
def create_selection_backup(self, subobj_lvl):
if subobj_lvl in self._selection_backup:
return
self._sel_subobj_ids_backup[subobj_lvl] = self._selected_subobj_ids[subobj_lvl][:]
self._selection_backup[subobj_lvl] = self.get_selection(subobj_lvl)
def restore_selection_backup(self, subobj_lvl):
sel_backup = self._selection_backup
if subobj_lvl not in sel_backup:
return
self.clear_selection(subobj_lvl, False)
self.update_selection(subobj_lvl, sel_backup[subobj_lvl], [], False)
del sel_backup[subobj_lvl]
del self._sel_subobj_ids_backup[subobj_lvl]
def remove_selection_backup(self, subobj_lvl):
sel_backup = self._selection_backup
if subobj_lvl in sel_backup:
del sel_backup[subobj_lvl]
del self._sel_subobj_ids_backup[subobj_lvl]
def clear_selection(self, subobj_lvl, update_verts_to_transf=True, force=False):
if not (force or self._selected_subobj_ids[subobj_lvl]):
return
geoms = self._geoms[subobj_lvl]
if subobj_lvl == "poly":
geom_selected = geoms["selected"]
geom_unselected = geoms["unselected"]
sel_data = self._poly_selection_data
sel_data["unselected"].extend(sel_data["selected"])
sel_data["selected"] = []
from_array = geom_selected.node().modify_geom(0).modify_primitive(0).modify_vertices()
from_size = from_array.data_size_bytes
from_view = memoryview(from_array).cast("B")
to_array = geom_unselected.node().modify_geom(0).modify_primitive(0).modify_vertices()
to_size = to_array.data_size_bytes
to_array.set_num_rows(to_array.get_num_rows() + from_array.get_num_rows())
to_view = memoryview(to_array).cast("B")
to_view[to_size:to_size+from_size] = from_view
from_array.clear_rows()
elif subobj_lvl == "normal":
color = Mgr.get("subobj_selection_colors")["normal"]["unselected"]
color_locked = Mgr.get("subobj_selection_colors")["normal"]["locked_unsel"]
vertex_data = geoms["sel_state"].node().modify_geom(0).modify_vertex_data()
col_writer = GeomVertexWriter(vertex_data, "color")
verts = self._subobjs["vert"]
for vert_id in self._selected_subobj_ids["normal"]:
vert = verts[vert_id]
row = vert.row_index
col = color_locked if vert.has_locked_normal() else color
col_writer.set_row(row)
col_writer.set_data4(col)
else:
vertex_data = geoms["sel_state"].node().modify_geom(0).modify_vertex_data()
color = Mgr.get("subobj_selection_colors")[subobj_lvl]["unselected"]
new_data = vertex_data.set_color(color)
vertex_data.set_array(1, new_data.arrays[1])
self._selected_subobj_ids[subobj_lvl] = []
if update_verts_to_transf:
self._verts_to_transf[subobj_lvl] = {}
def delete_selection(self, subobj_lvl, unregister_globally=True, unregister_locally=True):
subobjs = self._subobjs
verts = subobjs["vert"]
edges = subobjs["edge"]
polys = subobjs["poly"]
selected_subobj_ids = self._selected_subobj_ids
selected_vert_ids = selected_subobj_ids["vert"]
selected_edge_ids = selected_subobj_ids["edge"]
selected_poly_ids = selected_subobj_ids["poly"]
if subobj_lvl == "vert":
polys_to_delete = set()
for vert in (verts[v_id] for v_id in selected_vert_ids):
polys_to_delete.add(polys[vert.polygon_id])
elif subobj_lvl == "edge":
polys_to_delete = set()
for edge in (edges[e_id] for e_id in selected_edge_ids):
polys_to_delete.add(polys[edge.polygon_id])
elif subobj_lvl == "poly":
polys_to_delete = [polys[poly_id] for poly_id in selected_poly_ids]
self.delete_polygons(polys_to_delete, unregister_globally, unregister_locally)
def _restore_subobj_selection(self, time_id):
obj_id = self.toplevel_obj.id
prop_id = self._unique_prop_ids["subobj_selection"]
data = Mgr.do("load_last_from_history", obj_id, prop_id, time_id)
verts = self._subobjs["vert"]
normal_ids = data["normal"]
old_sel_normal_ids = set(self._selected_subobj_ids["normal"])
new_sel_normal_ids = set(normal_ids)
sel_normal_ids = new_sel_normal_ids - old_sel_normal_ids
unsel_normal_ids = old_sel_normal_ids - new_sel_normal_ids
unsel_normal_ids.intersection_update(verts)
shared_normals = self.shared_normals
original_shared_normals = {}
if unsel_normal_ids:
tmp_shared_normal = Mgr.do("create_shared_normal", self, unsel_normal_ids)
unsel_id = tmp_shared_normal.id
original_shared_normals[unsel_id] = shared_normals[unsel_id]
shared_normals[unsel_id] = tmp_shared_normal
unsel_normals = [tmp_shared_normal]
else:
unsel_normals = []
if sel_normal_ids:
tmp_shared_normal = Mgr.do("create_shared_normal", self, sel_normal_ids)
sel_id = tmp_shared_normal.id
original_shared_normals[sel_id] = shared_normals[sel_id]
shared_normals[sel_id] = tmp_shared_normal
sel_normals = [tmp_shared_normal]
else:
sel_normals = []
self.update_selection("normal", sel_normals, unsel_normals, False)
if unsel_normals:
shared_normals[unsel_id] = original_shared_normals[unsel_id]
if sel_normals:
shared_normals[sel_id] = original_shared_normals[sel_id]
self._update_verts_to_transform("normal")
for subobj_type in ("vert", "edge", "poly"):
subobjs = self._subobjs[subobj_type]
subobj_ids = data[subobj_type]
old_sel_subobj_ids = set(self._selected_subobj_ids[subobj_type])
new_sel_subobj_ids = set(subobj_ids)
sel_subobj_ids = new_sel_subobj_ids - old_sel_subobj_ids
unsel_subobj_ids = old_sel_subobj_ids - new_sel_subobj_ids
unsel_subobj_ids.intersection_update(subobjs)
unsel_subobjs = [subobjs[i] for i in unsel_subobj_ids]
sel_subobjs = [subobjs[i] for i in sel_subobj_ids]
if subobj_type in ("vert", "edge"):
merged_subobjs = self.merged_verts if subobj_type == "vert" else self.merged_edges
original_merged_subobjs = {}
if unsel_subobjs:
tmp_merged_subobj = Mgr.do(f"create_merged_{subobj_type}", self)
for subobj_id in unsel_subobj_ids:
tmp_merged_subobj.append(subobj_id)
unsel_id = tmp_merged_subobj.id
original_merged_subobjs[unsel_id] = merged_subobjs[unsel_id]
merged_subobjs[unsel_id] = tmp_merged_subobj
unsel_subobjs = [subobjs[unsel_id]]
if sel_subobjs:
tmp_merged_subobj = Mgr.do(f"create_merged_{subobj_type}", self)
for subobj_id in sel_subobj_ids:
tmp_merged_subobj.append(subobj_id)
sel_id = tmp_merged_subobj.id
original_merged_subobjs[sel_id] = merged_subobjs[sel_id]
merged_subobjs[sel_id] = tmp_merged_subobj
sel_subobjs = [subobjs[sel_id]]
self.update_selection(subobj_type, sel_subobjs, unsel_subobjs, False)
if subobj_type in ("vert", "edge"):
if unsel_subobjs:
merged_subobjs[unsel_id] = original_merged_subobjs[unsel_id]
if sel_subobjs:
merged_subobjs[sel_id] = original_merged_subobjs[sel_id]
self._update_verts_to_transform(subobj_type)
class Selection(TransformMixin):
def __init__(self, obj_level, subobjs):
TransformMixin.__init__(self)
self._objs = subobjs
self._obj_level = obj_level
self._groups = {}
for obj in subobjs:
self._groups.setdefault(obj.geom_data_obj, []).append(obj)
def __getitem__(self, index):
try:
return self._objs[index]
except IndexError:
raise IndexError("Index out of range.")
except TypeError:
raise TypeError("Index must be an integer value.")
def __len__(self):
return len(self._objs)
def get_geom_data_objects(self):
return list(self._groups)
def get_toplevel_objects(self, get_group=False):
return [geom_data_obj.get_toplevel_object(get_group) for geom_data_obj in self._groups]
def get_toplevel_object(self, get_group=False):
""" Return a random top-level object """
if self._groups:
return list(self._groups.keys())[0].get_toplevel_object(get_group)
@property
def toplevel_obj(self):
return self.get_toplevel_object()
def get_subobjects(self, geom_data_obj):
return self._groups.get(geom_data_obj, [])
def update(self, hide_sets=False):
self.update_center_pos()
self.update_ui()
if hide_sets:
Mgr.update_remotely("selection_set", "hide_name")
def add(self, subobjs, add_to_hist=True):
sel = self._objs
old_sel = set(sel)
sel_to_add = set(subobjs)
common = old_sel & sel_to_add
if common:
sel_to_add -= common
if not sel_to_add:
return False
geom_data_objs = {}
groups = self._groups
for obj in sel_to_add:
geom_data_obj = obj.geom_data_obj
geom_data_objs.setdefault(geom_data_obj, []).append(obj)
groups.setdefault(geom_data_obj, []).append(obj)
for geom_data_obj, objs in geom_data_objs.items():
geom_data_obj.update_selection(self._obj_level, objs, [])
sel.extend(sel_to_add)
task = lambda: Mgr.get("selection").update()
PendingTasks.add(task, "update_selection", "ui")
if add_to_hist:
subobj_descr = {"vert": "vertex", "edge": "edge", "poly": "polygon", "normal": "normal"}
event_descr = f'Add to {subobj_descr[self._obj_level]} selection'
obj_data = {}
event_data = {"objects": obj_data}
for geom_data_obj in geom_data_objs:
obj = geom_data_obj.toplevel_obj
obj_data[obj.id] = geom_data_obj.get_data_to_store("prop_change", "subobj_selection")
# make undo/redoable
Mgr.do("add_history", event_descr, event_data)
return True
def remove(self, subobjs, add_to_hist=True):
sel = self._objs
old_sel = set(sel)
sel_to_remove = set(subobjs)
common = old_sel & sel_to_remove
if not common:
return False
geom_data_objs = {}
groups = self._groups
for obj in common:
sel.remove(obj)
geom_data_obj = obj.geom_data_obj
geom_data_objs.setdefault(geom_data_obj, []).append(obj)
groups[geom_data_obj].remove(obj)
if not groups[geom_data_obj]:
del groups[geom_data_obj]
for geom_data_obj, objs in geom_data_objs.items():
geom_data_obj.update_selection(self._obj_level, [], objs)
task = lambda: Mgr.get("selection").update()
PendingTasks.add(task, "update_selection", "ui")
| |
<filename>baiduspider/core/__init__.py
"""BaiduSpider,爬取百度的利器
:Author: <NAME>
:Licence: MIT
:GitHub: https://github.com/samzhangjy
:GitLab: https://gitlab.com/samzhangjy
TODO: 完成文档
TODO: 添加更多爬虫
"""
import json
import os
import re
from html import unescape
from pprint import pprint
from urllib.parse import quote, urlparse
import requests
from bs4 import BeautifulSoup
from baiduspider.core._spider import BaseSpider
from baiduspider.core.parser import Parser
from baiduspider.errors import ParseError, UnknownError
__all__ = ['BaiduSpider']
class BaiduSpider(BaseSpider):
def __init__(self) -> None:
"""爬取百度的搜索结果
本类的所有成员方法都遵循下列格式:
{
'results': <一个列表,表示搜索结果,内部的字典会因为不同的成员方法而改变>,
'total': <一个正整数,表示搜索结果的最大页数,可能会因为搜索结果页码的变化而变化,因为百度不提供总共的搜索结果页数>
}
目前支持百度搜索,百度图片,百度知道,百度视频,百度资讯,百度文库,百度经验和百度百科,并且返回的搜索结果无广告。继承自``BaseSpider``。
BaiduSpider.`search_web(self: BaiduSpider, query: str, pn: int = 1) -> dict`: 百度网页搜索
BaiduSpider.`search_pic(self: BaiduSpider, query: str, pn: int = 1) -> dict`: 百度图片搜索
BaiduSpider.`search_zhidao(self: BaiduSpider, query: str, pn: int = 1) -> dict`: 百度知道搜索
BaiduSpider.`search_video(self: BaiduSpider, query: str, pn: int = 1) -> dict`: 百度视频搜索
BaiduSpider.`search_news(self: BaiduSpider, query: str, pn: int = 1) -> dict`: 百度资讯搜索
BaiduSpider.`search_wenku(self: BaiduSpider, query: str, pn: int = 1) -> dict`: 百度文库搜索
BaiduSpider.`search_jingyan(self: BaiduSpider, query: str, pn: int = 1) -> dict`: 百度经验搜索
BaiduSpider.`search_baike(self: BaiduSpider, query: str) -> dict`: 百度百科搜索
"""
super().__init__()
# 爬虫名称(不是请求的,只是用来表识)
self.spider_name = 'BaiduSpider'
# 设置请求头
self.headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
'Referer': 'https://www.baidu.com',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
'Cookie': 'BAIDUID=BB66E815C068DD2911DB67F3F84E9AA5:FG=1; BIDUPSID=BB66E815C068DD2911DB67F3F84E9AA5; PSTM=1592390872; BD_UPN=123253; BDUSS=RQa2c4eEdKMkIySjJ0dng1ZDBLTDZEbVNHbmpBLU1rcFJkcVViaTM5NUdNaDFmRVFBQUFBJCQAAAAAAAAAAAEAAAAPCkwAZGF5ZGF5dXAwNgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEal9V5GpfVebD; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; BD_HOME=1; delPer=0; BD_CK_SAM=1; PSINO=2; COOKIE_SESSION=99799_0_5_2_8_0_1_0_5_0_0_0_99652_0_3_0_1593609921_0_1593609918%7C9%230_0_1593609918%7C1; H_PS_PSSID=1457_31326_32139_31660_32046_32231_32091_32109_31640; sug=3; sugstore=0; ORIGIN=0; bdime=0; BDRCVFR[feWj1Vr5u3D]=I67x6TjHwwYf0; H_PS_645EC=1375sSQTgv84OSzYM3CN5w5Whp9Oy7MkdGdBcw5umqOIFr%2FeFZO4D952XrS0pC1kVwPI; BDSVRTM=223'
}
self.parser = Parser()
def search_web(self, proxies, query: str, pn: int = 1) -> dict:
"""百度网页搜索
- 简单搜索:
>>> BaiduSpider().search_web('搜索词')
{
'results': [
{
'result': int, 总计搜索结果数,
'type': 'total' # type用来区分不同类别的搜索结果
},
{
'results': [
'str, 相关搜索建议',
'...',
'...',
'...',
...
],
'type': 'related'
},
{
'process': 'str, 算数过程',
'result': 'str, 运算结果',
'type': 'calc'
# 这类搜索结果仅会在搜索词涉及运算时出现,不一定每个搜索结果都会出现的
},
{
'results': [
{
'author': 'str, 新闻来源',
'time': 'str, 新闻发布时间',
'title': 'str, 新闻标题',
'url': 'str, 新闻链接',
'des': 'str, 新闻简介,大部分情况为None'
},
{ ... },
{ ... },
{ ... },
...
],
'type': 'news'
# 这类搜索结果仅会在搜索词有相关新闻时出现,不一定每个搜索结果都会出现的
},
{
'results': [
{
'cover': 'str, 视频封面图片链接',
'origin': 'str, 视频来源',
'length': 'str, 视频时长',
'title': 'str, 视频标题',
'url': 'str, 视频链接'
},
{ ... },
{ ... },
{ ... },
...
],
'type': 'video'
# 这类搜索结果仅会在搜索词有相关视频时出现,不一定每个搜索结果都会出现的
},
{
'result': {
'cover': 'str, 百科封面图片/视频链接',
'cover-type': 'str, 百科封面类别,图片是image,视频是video',
'des': 'str, 百科简介',
'title': 'str, 百科标题',
'url': 'str, 百科链接'
},
'type': 'baike'
# 这类搜索结果仅会在搜索词有相关百科时出现,不一定每个搜索结果都会出现的
},
{
'des': 'str, 搜索结果简介',
'origin': 'str, 搜索结果的来源,可能是域名,也可能是名称',
'time': 'str, 搜索结果的发布时间',
'title': 'str, 搜索结果标题',
'type': 'result', # 正经的搜索结果
'url': 'str, 搜索结果链接'
},
{ ... },
{ ... },
{ ... },
...
],
'total': int, 总计的搜索结果页数,可能会因为当前页数的变化而随之变化
}
- 带页码:
>>> BaiduSpider().search_web('搜索词', pn=2)
{
'results': [ ... ],
'total': ...
}
Args:
query (str): 要爬取的query
pn (int, optional): 爬取的页码. Defaults to 1.
Returns:
dict: 爬取的返回值和搜索结果
"""
error = None
try:
text = quote(query, 'utf-8')
url = 'https://www.baidu.com/s?wd=%s&pn=%d' % (text, (pn - 1) * 10)
print('proxies: ', proxies)
content = self._get_response(url, proxies = proxies)
results = self.parser.parse_web(content)
except Exception as err:
error = err
finally:
self._handle_error(error)
return {
'results': results['results'],
'total': results['pages']
}
def search_pic(self, query: str, pn: int = 1) -> dict:
"""百度图片搜索
- 实例:
>>> BaiduSpider().search_pic('搜索词')
{
'results': [
{
'host': 'str, 图片来源域名',
'title': 'str, 图片标题',
'url': 'str, 图片链接'
},
{ ... },
{ ... },
{ ... },
...
],
'total': int, 搜索结果总计页码,可能会变化
}
- 带页码的搜索:
>>> BaiduSpider().search_pic('搜索词', pn=2)
{
'results': [ ... ],
'total': ...
}
Args:
query (str): 要爬取的query
pn (int, optional): 爬取的页码. Defaults to 1.
Returns:
dict: 爬取的搜索结果
"""
error = None
try:
url = 'http://image.baidu.com/search/flip?tn=baiduimage&word=%s&pn=%d' % (
quote(query), (pn - 1) * 20)
source = requests.get(url, headers=self.headers)
content = source.text
result = self.parser.parse_pic(content)
except Exception as err:
error = err
finally:
self._handle_error(error)
return {
'results': result['results'],
'total': result['pages']
}
def search_zhidao(self, query: str, pn: int = 1) -> dict:
"""百度知道搜索
- 普通搜索:
>>> BaiduSpider().search_zhidao('搜索词')
{
'results': [
{
'count': int, 回答总数,
'date': 'str, 发布日期',
'des': 'str, 简介',
'title': 'str, 标题',
'url': 'str, 链接'
},
{ ... },
{ ... },
{ ... },
...
],
'total': int, 搜索结果最大页数,可能会变化
}
- 带页码的搜索:
>>> BaiduSpider().search_zhidao('搜索词', pn=2) # `pn` !!
{
'results': [ ... ],
'total': ...
}
Args:
query (str): 要搜索的query
pn (int, optional): 搜索结果的页码. Defaults to 1.
Returns:
dict: 搜索结果以及总页码
"""
url = 'https://zhidao.baidu.com/search?pn=%d&tn=ikaslis&word=%s' % (
(pn - 1) * 10, quote(query))
source = requests.get(url, headers=self.headers)
# 转化编码
source.encoding = 'gb2312'
code = source.text
bs = BeautifulSoup(self._minify(code), 'html.parser')
# 所有搜索结果
list_ = bs.find('div', class_='list').findAll('dl')
results = []
for item in list_:
# 屏蔽企业回答
if 'ec-oad' in item['class']:
continue
# 标题
title = item.find('dt').text
# 链接
url = item.find('dt').find('a')['href']
# 简介
des = item.find('dd').text.strip('答:')
tmp = item.find('dd', class_='explain').findAll(
'span', class_='mr-8')
# 发布日期
date = tmp[0].text
# 回答总数
count = int(str(tmp[-1].text).strip('个回答'))
# 生成结果
result = {
'title': title,
'des': des,
'date': date,
'count': count,
'url': url
}
results.append(result) # 加入结果
# 获取分页
wrap = bs.find('div', class_='pager')
pages_ = wrap.findAll('a')[:-2]
pages = []
for _ in pages_:
# 暴力
try:
pages.append(int(_.text))
except ValueError:
pass
return {
'results': results,
# 取最大页码
'total': max(pages)
}
def search_video(self, query: str, pn: int = 1) -> dict:
"""百度视频搜索
- 普通搜索:
>>> BaiduSpider().search_video('搜索词')
{
'results': [
{
'img': 'str, 视频封面图片链接',
'time': 'str, 视频时长',
'title': 'str, 视频标题',
'url': 'str, 视频链接'
},
{ ... },
{ ... },
{ ... },
...
'total': int, 搜索结果最大页数,可能因搜索页数改变而改变
}
- 带页码:
>>> BaiduSpider().search_video('搜索词', pn=2) # <=== `pn`
{
'results': [ ... ],
'total': ...
}
Args:
query (str): 要搜索的query
pn (int, optional): 搜索结果的页码. Defaults to 1.
Returns:
dict: 搜索结果及总页码
"""
url = 'http://v.baidu.com/v?no_al=1&word=%s&pn=%d' % (
quote(query), (60 if pn == 2 else (pn - 1) * 20))
# 获取源码
source = requests.get(url, headers=self.headers)
code = self._minify(source.text)
bs = BeautifulSoup(code, 'html.parser')
# 锁定结果div
data = bs.findAll('li', class_='result')
results = []
for res in data:
# 标题
title = res.find('a')['title']
# 链接
url = 'https://v.baidu.com' + res.find('a')['href']
# 封面图片链接
img = res.find('img', class_='img-normal-layer')['src']
# 时长
time = res.find('span', class_='info').text
# 生成结果
result = {
'title': title,
'url': url,
'img': img,
'time': time
}
results.append(result) # 加入结果
# 分页
wrap = bs.find('div', class_='page-wrap')
pages_ = wrap.findAll('a', class_='filter-item')[:-1]
pages = []
for _ in pages_:
pages.append(int(_.text))
return {
'results': results,
# 获取最大值
'total': max(pages)
}
def search_news(self, query: str, pn: int = 1) -> dict:
"""百度资讯搜索
- 获取资讯搜索结果:
>>> BaiduSpider().search_news('搜索词')
{
'results': [
{
'author': 'str, 资讯来源(作者)',
'date': 'str, 资讯发布时间',
'des': 'str, 资讯简介',
'title': 'str, 资讯标题',
'url': 'str, 资讯链接'
},
{ ... },
{ ... },
{ ... },
...
],
'total': int, 搜索结果最大页码,可能会因为当前页数变化而变化
}
- 带页码:
>>> BaiduSpider().search_news('搜索词', pn=2)
{
'results': [ ... ],
'total': ...
}
Args:
query (str): 搜索query
pn (int, optional): 搜索结果的页码. Defaults to 1.
Returns:
dict: 爬取的搜索结果与总页码。
"""
url = 'https://www.baidu.com/s?rtt=1&bsst=1&tn=news&word=%s&pn=%d' % (
quote(query), (pn - 1) * 10)
# 源码
source = requests.get(url, headers=self.headers)
# 压缩
code = self._minify(source.text)
bs = BeautifulSoup(self._format(code), 'html.parser')
# 搜索结果容器
data = bs.find('div', id='content_left').findAll('div')[1].findAll('div', class_='result-op')
# print(len(data))
results = []
for res in data:
# 标题
title = self._format(
res.find('h3').find('a').text)
# 链接
url = res.find('h3').find('a')['href']
# 简介
des = res.find('div', class_='c-span-last').find('span', class_='c-color-text').text
# 作者
author = res.find('div', class_='c-span-last').find('div', class_='news-source').find('span', class_='c-gap-right').text
# 发布日期
date = res.find('div', class_='c-span-last').find('div', class_='news-source').find('span', class_='c-color-gray2').text
# 生成结果
result = {
'title': title,
'author': author,
'date': date,
'des': des,
'url': url
}
results.append(result) # 加入结果
# 获取所有页数
pages_ = bs.find('div', id='page').findAll('a')
# 过滤页码
if '< 上一页' in pages_[0].text:
pages_ = pages_[1:]
if '下一页 >' in pages_[-1].text:
pages_ = pages_[:-1]
pages = []
for _ in pages_:
pages.append(int(_.find('span', class_='pc').text))
return {
'results': results,
# 最大页数值
'total': max(pages)
}
def search_wenku(self, query: str, pn: int = 1) -> dict:
"""百度文库搜索
- 普通搜索:
>>> BaiduSpider().search_wenku('搜索词')
{
'results': [
{
'date': 'str, 文章发布日期',
'des': 'str, 文章简介',
'downloads': int, 文章下载量,
'pages': int, 文章页数,
'title': 'str, 文章标题',
'type': 'str, 文章格式,为全部大写字母',
'url': 'str, 文章链接'
},
{ ... },
{ ... },
{ ... },
...
],
'total': int, 总计搜索结果的页数
}
- 带页码的搜索:
>>> BaiduSpider().search_wenku('搜索词', pn=2)
{
'results': [ ... ],
'total': ...
}
Args:
query (str): 要搜索的query
pn (int, optional): 搜索的页码. Defaults to 1.
Returns:
dict: 搜索结果和总计页数
"""
url = 'https://wenku.baidu.com/search?word=%s&pn=%d' % (
quote(query), (pn - 1) * 10)
source = requests.get(url, headers=self.headers)
source.encoding = | |
int
"""
num = c_int()
self._call_fmod("FMOD_Sound_GetMusicNumChannels", byref(num))
return num.value
@property
def name(self):
"""The name of a sound.
:type: str
"""
name = create_string_buffer(256)
self._call_fmod("FMOD_Sound_GetName", byref(name), 256)
return name.value
@property
def num_subsounds(self):
"""The number of subsounds stored within a sound.
:type: int
"""
num = c_int()
self._call_fmod("FMOD_Sound_GetNumSubSounds", byref(num))
return num.value
@property
def num_sync_points(self):
"""The number of sync points stored within a sound.
:type: int
"""
num = c_int()
self._call_fmod("FMOD_Sound_GetNumSyncPoints", byref(num))
return num.value
@property
def num_tags(self):
"""The number of metadata tags.
:type: int
"""
num = c_int()
updated = c_int()
self._call_fmod("FMOD_Sound_GetNumTags", byref(num), byref(updated))
return so(tags=num.value, updated_tags=updated.value)
@property
def open_state(self):
"""The state a sound is in after being opened with the non blocking
flag, or the current state of the streaming buffer.
When a sound is opened with
:py:attr:`~pyfmodex.enums.MODE.NONBLOCKING`, it is opened and prepared
in the background, or asynchronously. This allows the main application
to execute without stalling on audio loads. This property describes the
state of the asynchronous load routine i.e. whether it has succeeded,
failed or is still in progress.
If 'starving' is true, then you will most likely hear a
stuttering/repeating sound as the decode buffer loops on itself and
replays old data. With the ability to detect stream starvation, muting
the sound with :py:attr:`~pyfmodex.channel_control.ChannelControl.mute`
will keep the stream quiet until it is not starving any more.
Note: Always check 'open_state' to determine the state of the sound. Do
not assume the sound has finished loading.
:type: Structobject with the following members:
state (:py:class:`~pyfmodex.enums.OPENSTATE`)
Open state of a sound.
percentbuffered (int)
Filled percentage of a stream's file buffer.
starving (bool)
Starving state. True if a stream has decoded more than the stream
file buffer has ready.
diskbusy (bool)
Disk is currently being accessed for this sound.
"""
state = c_int()
percentbuffered = c_uint()
starving = c_bool()
diskbusy = c_bool()
self._call_fmod(
"FMOD_Sound_GetOpenState",
byref(state),
byref(percentbuffered),
byref(starving),
byref(diskbusy),
)
return so(
state=OPENSTATE(state.value),
percent_buffered=percentbuffered.value,
starving=starving.value,
disk_busy=diskbusy.value,
)
@property
def sound_group(self):
"""The sound's current sound group.
By default, a sound is located in the 'master sound group'. This can be
retrieved with :py:attr:`~pyfmodex.system.System.master_sound_group`.
:type: SoundGroup
"""
grp_ptr = c_void_p()
self._call_fmod("FMOD_Sound_GetSoundGroup", byref(grp_ptr))
return get_class("SoundGroup")(grp_ptr)
@sound_group.setter
def sound_group(self, group):
check_type(group, get_class("SoundGroup"))
self._call_fmod("FMOD_Sound_SetSoundGroup", group._ptr)
def get_subsound(self, index):
"""A Sound object that is contained within the parent sound.
If the sound is a stream and
:py:attr:`~pyfmodex.flags.MODE.NONBLOCKING` was not used, then this
call will perform a blocking seek/flush to the specified subsound.
If :py:attr:`~pyfmodex.flags.MODE.NONBLOCKING` was used to open this
sound and the sound is a stream, FMOD will do a non blocking seek/flush
and set the state of the subsound to
:py:attr:`~pyfmodex.enums.OPENSTATE.SEEKING`.
The sound won't be ready to be used when
:py:attr:`~pyfmodex.flags.MODE.NONBLOCKING` is used, until the state of
the sound becomes :py:attr:`~pyfmodex.enums.OPENSTATE.READY` or
:py:attr:`~pyfmodex.enums.OPENSTATE.ERROR`.
:param int index: Index of the subsound.
:rtype: Sound
"""
sh_ptr = c_void_p()
self._call_fmod("FMOD_Sound_GetSubSound", index, byref(sh_ptr))
return Sound(sh_ptr)
@property
def subsound_parent(self):
"""The parent Sound object that contains this subsound.
None if this sound is not a subsound.
:type: Sound
"""
sh_ptr = c_void_p()
self._call_fmod("FMOD_Sound_GetSubSoundParent", byref(sh_ptr))
return Sound(sh_ptr)
def get_sync_point(self, index):
"""Retrieve a sync point.
:param int index: Index of the sync point.
:rtype: Sync point.
"""
syncpoint = c_void_p()
self._call_fmod("FMOD_Sound_GetSyncPoint", index, byref(syncpoint))
return syncpoint.value
def get_sync_point_info(self, point, offset_type):
"""Retrieve information on an embedded sync point.
:param point: Sync point.
:param offset_type: The unit in which the point's offset should be expressed.
:rtype: Structobject with the following members:
- name: Name of the syncpoint (str)
- offset: Offset of the syncpoint, expressed in the given offset_type (int)
"""
name = create_string_buffer(256)
offset = c_uint()
self._call_fmod(
"FMOD_Sound_GetSyncPointInfo",
c_void_p(point),
byref(name),
256,
byref(offset),
offset_type.value,
)
return so(
name=name.value, offset=offset.value
)
@property
def system_object(self):
"""The parent System object.
:type: System
"""
sptr = c_void_p()
self._call_fmod("FMOD_Sound_GetSystemObject", byref(sptr))
return get_class("System")(sptr)
def play(self, channel_group=None, paused=False):
"""Instruct the parent System object to play the sound.
See :py:meth:`~pyfmodex.system.System.play_sound`.
:param ChannelGroup channel_group: Group to output to instead of the
master.
:param bool paused: Whether to start in the paused state.
:returns: Newly playing channel.
:rtype: Channel
"""
return self.system_object.play_sound(self, channel_group, paused)
def get_tag(self, index, name=None):
"""Retrieve a metadata tag.
'Tags' are metadata stored within a sound file. These can be things
like a song's name, composer etc...
The number of tags available can be found with :py:attr:`num_tags`.
The way to display or retrieve tags can be done in three different ways:
- All tags can be continuously retrieved by looping from 0 to the
numtags value in :py:attr:`num_tags` - 1. Updated tags will
refresh automatically, and the 'updated' member of the
:py:class:`~pyfmodex.structures.TAG` structure will be set to
True if a tag has been updated, due to something like a netstream
changing the song name for example.
- Tags can be retrieved by specifying -1 as the index and only
updating tags that are returned. If all tags are retrieved and
this method is called it will raise an
:py:exc:`~pyfmodex.exceptions.FmodError` with code
:py:attr:`~pyfmodex.enums.RESULT.TAGNOTFOUND`.
- Specific tags can be retrieved by specifying a name parameter.
The index can be 0 based or -1 in the same fashion as described
previously.
Note with netstreams an important consideration must be made between
songs, a tag may occur that changes the playback rate of the song. It
is up to the user to catch this and reset the playback rate with
:py:attr:`~pyfmodex.channel.Channel.frequency`.
A sample rate change will be signalled with a tag of type
:py:attr:`~pyfmodex.enums.TAGTYPE.FMOD`.
:param int index: Index into the tag list as restricted by name.
:param str name: Name of a type of tag to retrieve. Specify None to
retrieve all types of tags.
"""
name = prepare_str(name, "ascii")
tag = TAG()
ckresult(_dll.FMOD_Sound_GetTag(self._ptr, name, index, byref(tag)))
return tag
def lock(self, offset, length):
"""Give access to a portion or all the sample data of a sound for
direct manipulation.
You must always unlock the data again after you have finished with it,
using :py:meth:`unlock`.
With this method you get access to the raw audio data. If the data is
8, 16, 24 or 32bit PCM data, mono or stereo data, you must take this
into consideration when processing the data.
If the sound is created with
:py:attr:`~pyfmodex.flags.MODE.CREATECOMPRESSEDSAMPLE` the data
retrieved will be the compressed bitstream.
It is not possible to lock the following:
- A parent sound containing subsounds. A parent sound has no audio
data and :py:exc:`~pyfmodex.exceptions.FmodError` will be
raised with code :py:attr:`~pyfmodex.enums.RESULT.SUBSOUNDS`
- A stream / sound created with
:py:attr:`~pyfmodex.flags.MODE.CREATESTREAM`.
An :py:exc:`~pyfmodex.exceptions.FmodError` will be
raised with code
:py:attr:`~pyfmodex.enums.RESULT.BADCOMMAND` in this case.
The names 'lock'/'unlock' are a legacy reference to older Operating
System APIs that used to cause a mutex lock on the data, so that it
could not be written to while the 'lock' was in place. This is no
longer the case with FMOD and data can be 'locked' multiple times from
different places/threads at once.
:param int offset: Offset into the sound's buffer to be retrieved.
:param int length: Length of the data required to be retrieved. If
offset + length exceeds the length of the sample buffer, ptr2 and
len2 will be valid.
:rtype: two-tuple of two-tuples ((ptr1, len1), (ptr2, len2)) with:
- ptr1: First part of the locked data
- len1: Length of ptr1
- ptr2: Second part of the locked data if the offset + length has
exceeded the length of the sample buffer
- len2: Length of ptr2
"""
ptr1 = c_void_p()
len1 = c_uint()
ptr2 = c_void_p()
len2 = c_uint()
ckresult(
_dll.FMOD_Sound_Lock(
self._ptr,
offset,
length,
byref(ptr1),
byref(ptr2),
byref(len1),
byref(len2),
)
)
return ((ptr1, len1), (ptr2, len2))
def release(self):
"""Free this sound object.
This will stop any instances of this sound, and free the sound object
and its children if it is a multi-sound object.
If the sound was opened with
:py:attr:`~pyfmodex.flags.MODE.NONBLOCKING` and hasn't finished opening
yet, it will block. Using :py:attr:`open_state` and checking the open
state for :py:attr:`~pyfmodex.enums.OPENSTATE.READY` and
:py:attr:`~pyfmodex.enums.OPENSTATE.ERROR` is a | |
which to freeze
boundary condition regions, or the SymbolicConstant MODEL, used with
*freezeBoundaryConditionRegions*. The default value is MODEL.
geometricRestrictionEvaluationFrequency
A SymbolicConstant specifying the frequency of evaluating geometric restrictions during
mesh smoothing. Possible values are LOW, MEDIUM, and HIGH. The default value is LOW.
growthScaleFactor
A Float specifying the scale factor to apply to optimization displacements for nodes
with growth. The default value is 1.0.
haltUponViolation
A Boolean specifying whether to halt the optimization if quality criteria are not
satisified. The default value is OFF.
layerReferenceRegion
None or a Region object specifying the region specifying the first node layer for mesh
smoothing, used when *meshSmoothingRegionMethod* is TASK_REGION_LAYERS. The default
value is None.
meshSmoothingRegionMethod
A SymbolicConstant specifying the method used to determine the mesh smoothing region.
The REGION value uses the *smoothingRegion*. The NUMBER_OF_LAYERS value uses the
*layerReferenceRegion*. The TASK_REGION_LAYERS value will smooth six layers using the
task region. Possible values are TASK_REGION_LAYERS, REGION, and NUMBER_OF_LAYERS. The
default value is TASK_REGION_LAYERS.
meshSmoothingStrategy
A SymbolicConstant specifying the method smoothing strategy. Possible values are
CONSTRAINED_LAPLACIAN and LOCAL_GRADIENT. The default value is CONSTRAINED_LAPLACIAN.
midsideInterpolation
A SymbolicConstant specifying the approach used when treating midside node positions
during optimization. POSITIONS indicates midside node positions are interpolated
linearly by position. OPTIMIZATION_DISPLACEMENT indicates they are interpolated by
optimization displacement of corner nodes. Possible values are POSITIONS and
OPTIMIZATION_DISPLACEMENT. The default value is POSITIONS.
numFreeNodeLayers
The SymbolicConstant FIX_NONE or an Int specifying the number of node layers adjoining
the task region to remain free during mesh smoothing. A value of 0 indicates that no
layers are free and all layers are fixed. The default value is 0.
numSmoothedElementLayers
None or an Int specifying the number of layers for mesh smoothing when
*meshSmoothingRegionMethod* is NUMBER_OF_LAYERS. The default value is None.
presumeFeasibleBCRegionAtStart
A Boolean specifying whether to ignore automatically frozen boundary condition regions
in the first design cycle. This is used with *freezeBoundaryConditionRegions*. The
default value is ON.
quadMaxAngle
A Float specifying the maximum angle for quad elements during mesh smoothing. The
default value is 160.0.
quadMinAngle
A Float specifying the minimum angle for quad elements during mesh smoothing. The
default value is 20.0.
quadSkew
A Float specifying the skew angle for quad elements during mesh smoothing, used with
*reportQualityViolation*. The default value is 30.0.
quadTaper
A Float specifying the taper for quad elements during mesh smoothing, used with
*reportQualityViolation*. The default value is 0.5.
region
The SymbolicConstant MODEL or a Region object specifying the region to which the
optimization task is applied. The default value is MODEL.
reportPoorQualityElements
A Boolean specifying whether to report poor quality elements during mesh smoothing. The
default value is OFF.
reportQualityViolation
A Boolean specifying whether to report a quality criteria violation during mesh
smoothing. The default value is OFF.
shrinkScaleFactor
A Float specifying the scale factor to apply to optimization displacements for nodes
with shrinkage. The default value is 1.0.
smoothingRegion
None or a Region object specifying the mesh smoothing region, used when
*meshSmoothingRegionMethod* is REGION. The default value is None.
targetMeshQuality
A SymbolicConstant specifying the target mesh quality for mesh smoothing. Possible
values are NONE, LOW, MEDIUM, and HIGH. The default value is LOW.
tetAspectRatio
A Float specifying the tet element aspect ratio during mesh smoothing. The default value
is 100.0.
tetMaxAspect
A Float specifying the maximum tet element aspect ratio during mesh smoothing. The
default value is 8.0.
tetMinAspect
A Float specifying the minimum tet element aspect ratio during mesh smoothing. The
default value is 0.222.
tetSkew
A Float specifying the tet element skew value during mesh smoothing. The default value
is 100.0.
triMaxAngle
A Float specifying the tri element maximum angle during mesh smoothing. The default
value is 140.0.
triMinAngle
A Float specifying the tri element maximum angle during mesh smoothing. The default
value is 20.0.
updateShapeBasisVectors
A SymbolicConstant specifying whether to update shape basis vectors in the first design
cycle or every design cycle. Possible values are EVERY_CYCLE and FIRST_CYCLE. The
default value is EVERY_CYCLE.
groupOperator
A Boolean specifying whether the group in the design response will be evaluated using
the existing algorithm or a new algorithm based on Abaqus sensitivities. The default
value of False means that the existing algorithm will be used.
Returns
-------
A ShapeTask object.
"""
self.optimizationTasks[name] = optimizationTask = ShapeTask(name, abaqusSensitivities, absoluteStepSizeControl,
activateDurability, additionalDurabilityFiles,
algorithm, constrainedLaplacianConvergenceLevel,
curvatureSmoothingEdgeLength, durabilityInputfile,
durabilitySolver, equalityConstraintTolerance,
featureRecognitionAngle, filterExponent,
filterMaxRadius, filterRadiusReduction,
firstCycleDeletedVolumeTechnique,
freezeBoundaryConditionRegions,
frozenBoundaryConditionRegion,
geometricRestrictionEvaluationFrequency,
growthScaleFactor, haltUponViolation,
layerReferenceRegion, meshSmoothingRegionMethod,
meshSmoothingStrategy, midsideInterpolation,
numFreeNodeLayers, numSmoothedElementLayers,
presumeFeasibleBCRegionAtStart, quadMaxAngle,
quadMinAngle, quadSkew, quadTaper, region,
reportPoorQualityElements, reportQualityViolation,
shrinkScaleFactor, smoothingRegion,
targetMeshQuality, tetAspectRatio, tetMaxAspect,
tetMinAspect, tetSkew, triMaxAngle, triMinAngle,
updateShapeBasisVectors, groupOperator)
return optimizationTask
def SizingTask(self, name: str, abaqusSensitivities: Boolean = True,
elementThicknessDeltaStopCriteria: float = 0,
freezeBoundaryConditionRegions: Boolean = OFF, freezeLoadRegions: Boolean = ON,
modeTrackingRegion: str = MODEL, numFulfilledStopCriteria: int = 2,
numTrackedModes: int = 5, objectiveFunctionDeltaStopCriteria: float = 0,
stopCriteriaDesignCycle: int = 4, thicknessMoveLimit: float = 0,
thicknessUpdateStrategy: SymbolicConstant = NORMAL, groupOperator: Boolean = OFF) -> SizingTask:
"""This method creates a SizingTask object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].SizingTask
Parameters
----------
name
A String specifying the optimization task repository key.
abaqusSensitivities
A Boolean specifying whether to use Abaqus to compute the design responses and their
sensitivities. The default value is True.
elementThicknessDeltaStopCriteria
A Float specifying the stop criteria based on the change in element thickness. The
default value is 0.5 × 10–2.
freezeBoundaryConditionRegions
A Boolean specifying whether to exclude elements with boundary conditions from the
optimization. The default value is OFF.
freezeLoadRegions
A Boolean specifying whether to exclude elements with loads and elements with loaded
nodes from the optimization. The default value is ON.
modeTrackingRegion
The SymbolicConstatnt MODEL or a Region object specifying the region to use for mode
tracking. The default value is MODEL.
numFulfilledStopCriteria
An Int specifying the number of stop criteria. The default value is 2.
numTrackedModes
An Int specifying the number of modes included in mode tracking. The default value is 5.
objectiveFunctionDeltaStopCriteria
A Float specifying the stop criteria based on the change in objective function. The
default value is 0.001.
stopCriteriaDesignCycle
An Int specifying the first design cycle used to evaluate convergence criteria. The
default value is 4.
thicknessMoveLimit
A Float specifying the maximum change in thickness per design cycle. The default value
is 0.25.
thicknessUpdateStrategy
A SymbolicConstant specifying the strategy for how the thickness is updated in the
method of moving asymptotes. Possible values are NORMAL, CONSERVATIVE, and AGGRESSIVE.
The default value is NORMAL.
groupOperator
A Boolean specifying whether the group in the design response will be evaluated using
the existing algorithm or a new algorithm based on Abaqus sensitivities. The default
value of False means that the existing algorithm will be used.
Returns
-------
A SizingTask object.
"""
self.optimizationTasks[name] = optimizationTask = SizingTask(name, abaqusSensitivities,
elementThicknessDeltaStopCriteria,
freezeBoundaryConditionRegions, freezeLoadRegions,
modeTrackingRegion, numFulfilledStopCriteria,
numTrackedModes,
objectiveFunctionDeltaStopCriteria,
stopCriteriaDesignCycle, thicknessMoveLimit,
thicknessUpdateStrategy, groupOperator)
return optimizationTask
def TopologyTask(self, name: str, abaqusSensitivities: Boolean = True,
algorithm: SymbolicConstant = GENERAL_OPTIMIZATION, densityMoveLimit: float = 0,
densityUpdateStrategy: SymbolicConstant = NORMAL,
elementDensityDeltaStopCriteria: float = 0, filterRadius: float = None,
firstCycleDeletedVolume: float = 5,
firstCycleDeletedVolumeTechnique: SymbolicConstant = OFF,
freezeBoundaryConditionRegions: Boolean = OFF, freezeLoadRegions: Boolean = ON,
frequencySpectrumWeight: float = 6, initialDensity: SymbolicConstant = DEFAULT,
materialInterpolationPenalty: float = 3,
materialInterpolationTechnique: SymbolicConstant = DEFAULT, maxDensity: float = 1,
minDensity: float = None, modeTrackingRegion: SymbolicConstant = MODEL,
numDesignCycles: int = 15, numFulfilledStopCriteria: int = 2, numTrackedModes: int = 5,
objectiveFunctionDeltaStopCriteria: float = None, region: SymbolicConstant = MODEL,
softDeletionMethod: SymbolicConstant = STANDARD, softDeletionRadius: float = 0,
softDeletionRegion: str = None, softDeletionThreshold: float = None,
stepSize: SymbolicConstant = MEDIUM,
stiffnessMassDamping: typing.Union[SymbolicConstant, float] = AVERAGE_EDGE_LENGTH,
stopCriteriaDesignCycle: int = 4, structuralMassDamping: float = None,
viscousMassDamping: float = None, viscousStiffnessDamping: float = None,
groupOperator: Boolean = OFF) -> TopologyTask:
"""This method creates a TopologyTask object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].TopologyTask
| |
<reponame>gtfarng/Odoo_migrade
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import uuid
from odoo import api, fields, models, _
from odoo.exceptions import UserError
class AccountCashboxLine(models.Model):
_inherit = 'account.cashbox.line'
default_pos_id = fields.Many2one('pos.config', string='This cashbox line is used by default when opening or closing a balance for this point of sale')
class AccountBankStmtCashWizard(models.Model):
_inherit = 'account.bank.statement.cashbox'
@api.model
def default_get(self, fields):
vals = super(AccountBankStmtCashWizard, self).default_get(fields)
config_id = self.env.context.get('default_pos_id')
if config_id:
lines = self.env['account.cashbox.line'].search([('default_pos_id', '=', config_id)])
if self.env.context.get('balance', False) == 'start':
vals['cashbox_lines_ids'] = [[0, 0, {'coin_value': line.coin_value, 'number': line.number, 'subtotal': line.subtotal}] for line in lines]
else:
vals['cashbox_lines_ids'] = [[0, 0, {'coin_value': line.coin_value, 'number': 0, 'subtotal': 0.0}] for line in lines]
return vals
class PosConfig(models.Model):
_name = 'pos.config'
def _default_sale_journal(self):
journal = self.env.ref('point_of_sale.pos_sale_journal', raise_if_not_found=False)
if journal and journal.sudo().company_id == self.env.user.company_id:
return journal
return self._default_invoice_journal()
def _default_invoice_journal(self):
return self.env['account.journal'].search([('type', '=', 'sale'), ('company_id', '=', self.env.user.company_id.id)], limit=1)
def _default_pricelist(self):
return self.env['product.pricelist'].search([], limit=1)
def _get_default_location(self):
return self.env['stock.warehouse'].search([('company_id', '=', self.env.user.company_id.id)], limit=1).lot_stock_id
def _get_default_nomenclature(self):
return self.env['barcode.nomenclature'].search([], limit=1)
def _get_group_pos_manager(self):
return self.env.ref('point_of_sale.group_pos_manager')
def _get_group_pos_user(self):
return self.env.ref('point_of_sale.group_pos_user')
name = fields.Char(string='Point of Sale Name', index=True, required=True, help="An internal identification of the point of sale")
journal_ids = fields.Many2many(
'account.journal', 'pos_config_journal_rel',
'pos_config_id', 'journal_id', string='Available Payment Methods',
domain="[('journal_user', '=', True ), ('type', 'in', ['bank', 'cash'])]",)
picking_type_id = fields.Many2one('stock.picking.type', string='Picking Type')
stock_location_id = fields.Many2one(
'stock.location', string='Stock Location',
domain=[('usage', '=', 'internal')], required=True, default=_get_default_location)
journal_id = fields.Many2one(
'account.journal', string='Sale Journal',
domain=[('type', '=', 'sale')],
help="Accounting journal used to post sales entries.",
default=_default_sale_journal)
invoice_journal_id = fields.Many2one(
'account.journal', string='Invoice Journal',
domain=[('type', '=', 'sale')],
help="Accounting journal used to create invoices.",
default=_default_invoice_journal)
currency_id = fields.Many2one('res.currency', compute='_compute_currency', string="Currency")
iface_cashdrawer = fields.Boolean(string='Cashdrawer', help="Automatically open the cashdrawer")
iface_payment_terminal = fields.Boolean(string='Payment Terminal', help="Enables Payment Terminal integration")
iface_electronic_scale = fields.Boolean(string='Electronic Scale', help="Enables Electronic Scale integration")
iface_vkeyboard = fields.Boolean(string='Virtual KeyBoard', help="Enables an integrated Virtual Keyboard")
iface_print_via_proxy = fields.Boolean(string='Print via Proxy', help="Bypass browser printing and prints via the hardware proxy")
iface_scan_via_proxy = fields.Boolean(string='Scan via Proxy', help="Enable barcode scanning with a remotely connected barcode scanner")
iface_invoicing = fields.Boolean(string='Invoicing', help='Enables invoice generation from the Point of Sale', default=True)
iface_big_scrollbars = fields.Boolean('Large Scrollbars', help='For imprecise industrial touchscreens')
iface_print_auto = fields.Boolean(string='Automatic Receipt Printing', default=False,
help='The receipt will automatically be printed at the end of each order')
iface_print_skip_screen = fields.Boolean(string='Skip Receipt Screen', default=True,
help='The receipt screen will be skipped if the receipt can be printed automatically.')
iface_precompute_cash = fields.Boolean(string='Prefill Cash Payment',
help='The payment input will behave similarily to bank payment input, and will be prefilled with the exact due amount')
iface_tax_included = fields.Boolean(string='Include Taxes in Prices',
help='The displayed prices will always include all taxes, even if the taxes have been setup differently')
iface_start_categ_id = fields.Many2one('pos.category', string='Start Category',
help='The point of sale will display this product category by default. If no category is specified, all available products will be shown')
iface_display_categ_images = fields.Boolean(string='Display Category Pictures',
help="The product categories will be displayed with pictures.")
cash_control = fields.Boolean(string='Cash Control', help="Check the amount of the cashbox at opening and closing.")
receipt_header = fields.Text(string='Receipt Header', help="A short text that will be inserted as a header in the printed receipt")
receipt_footer = fields.Text(string='Receipt Footer', help="A short text that will be inserted as a footer in the printed receipt")
proxy_ip = fields.Char(string='IP Address', size=45,
help='The hostname or ip address of the hardware proxy, Will be autodetected if left empty')
active = fields.Boolean(default=True)
uuid = fields.Char(readonly=True, default=lambda self: str(uuid.uuid4()),
help='A globally unique identifier for this pos configuration, used to prevent conflicts in client-generated data')
sequence_id = fields.Many2one('ir.sequence', string='Order IDs Sequence', readonly=True,
help="This sequence is automatically created by Odoo but you can change it "
"to customize the reference numbers of your orders.", copy=False)
session_ids = fields.One2many('pos.session', 'config_id', string='Sessions')
current_session_id = fields.Many2one('pos.session', compute='_compute_current_session', string="Current Session")
current_session_state = fields.Char(compute='_compute_current_session')
last_session_closing_cash = fields.Float(compute='_compute_last_session')
last_session_closing_date = fields.Date(compute='_compute_last_session')
pos_session_username = fields.Char(compute='_compute_current_session_user')
group_by = fields.Boolean(string='Group Journal Items', default=True,
help="Check this if you want to group the Journal Items by Product while closing a Session")
pricelist_id = fields.Many2one('product.pricelist', string='Pricelist', required=True, default=_default_pricelist)
company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env.user.company_id)
barcode_nomenclature_id = fields.Many2one('barcode.nomenclature', string='Barcodes', required=True, default=_get_default_nomenclature,
help='Defines what kind of barcodes are available and how they are assigned to products, customers and cashiers')
group_pos_manager_id = fields.Many2one('res.groups', string='Point of Sale Manager Group', default=_get_group_pos_manager,
help='This field is there to pass the id of the pos manager group to the point of sale client')
group_pos_user_id = fields.Many2one('res.groups', string='Point of Sale User Group', default=_get_group_pos_user,
help='This field is there to pass the id of the pos user group to the point of sale client')
tip_product_id = fields.Many2one('product.product', string='Tip Product',
help="The product used to encode the customer tip. Leave empty if you do not accept tips.")
fiscal_position_ids = fields.Many2many('account.fiscal.position', string='Fiscal Positions')
default_fiscal_position_id = fields.Many2one('account.fiscal.position', string='Default Fiscal Position')
default_cashbox_lines_ids = fields.One2many('account.cashbox.line', 'default_pos_id', string='Default Balance')
@api.depends('journal_id.currency_id', 'journal_id.company_id.currency_id')
def _compute_currency(self):
for pos_config in self:
if pos_config.journal_id:
pos_config.currency_id = pos_config.journal_id.currency_id.id or pos_config.journal_id.company_id.currency_id.id
else:
pos_config.currency_id = self.env.user.company_id.currency_id.id
@api.depends('session_ids')
def _compute_current_session(self):
for pos_config in self:
session = pos_config.session_ids.filtered(lambda r: r.user_id.id == self.env.uid and \
not r.state == 'closed' and \
'(RESCUE FOR' not in r.name)
# sessions ordered by id desc
pos_config.current_session_id = session and session[0].id or False
pos_config.current_session_state = session and session[0].state or False
@api.depends('session_ids')
def _compute_last_session(self):
PosSession = self.env['pos.session']
for pos_config in self:
session = PosSession.search_read(
[('config_id', '=', pos_config.id), ('state', '=', 'closed')],
['cash_register_balance_end_real', 'stop_at'],
order="stop_at desc", limit=1)
if session:
pos_config.last_session_closing_cash = session[0]['cash_register_balance_end_real']
pos_config.last_session_closing_date = session[0]['stop_at']
else:
pos_config.last_session_closing_cash = 0
pos_config.last_session_closing_date = False
@api.depends('session_ids')
def _compute_current_session_user(self):
for pos_config in self:
session = pos_config.session_ids.filtered(lambda s: s.state == 'opened' and '(RESCUE FOR' not in s.name)
pos_config.pos_session_username = session and session[0].user_id.name or False
@api.constrains('company_id', 'stock_location_id')
def _check_company_location(self):
if self.stock_location_id.company_id and self.stock_location_id.company_id.id != self.company_id.id:
raise UserError(_("The company of the stock location is different than the one of point of sale"))
@api.constrains('company_id', 'journal_id')
def _check_company_journal(self):
if self.journal_id and self.journal_id.company_id.id != self.company_id.id:
raise UserError(_("The company of the sale journal is different than the one of point of sale"))
@api.constrains('company_id', 'invoice_journal_id')
def _check_company_journal(self):
if self.invoice_journal_id and self.invoice_journal_id.company_id.id != self.company_id.id:
raise UserError(_("The invoice journal and the point of sale must belong to the same company"))
@api.constrains('company_id', 'journal_ids')
def _check_company_payment(self):
if self.env['account.journal'].search_count([('id', 'in', self.journal_ids.ids), ('company_id', '!=', self.company_id.id)]):
raise UserError(_("The company of a payment method is different than the one of point of sale"))
@api.constrains('fiscal_position_ids', 'default_fiscal_position_id')
def _check_default_fiscal_position(self):
if self.default_fiscal_position_id and self.default_fiscal_position_id not in self.fiscal_position_ids:
raise UserError(_("The default fiscal position must be included in the available fiscal positions of the point of sale"))
@api.onchange('iface_print_via_proxy')
def _onchange_iface_print_via_proxy(self):
self.iface_print_auto = self.iface_print_via_proxy
@api.onchange('picking_type_id')
def _onchange_picking_type_id(self):
if self.picking_type_id.default_location_src_id.usage == 'internal' and self.picking_type_id.default_location_dest_id.usage == 'customer':
self.stock_location_id = self.picking_type_id.default_location_src_id.id
@api.multi
def name_get(self):
result = []
for config in self:
if (not config.session_ids) or (config.session_ids[0].state == 'closed'):
result.append((config.id, config.name + ' (' + _('not used') + ')'))
continue
result.append((config.id, config.name + ' (' + config.session_ids[0].user_id.name + ')'))
return result
@api.model
def create(self, values):
IrSequence = self.env['ir.sequence'].sudo()
val = {
'name': _('POS Order %s') % values['name'],
'padding': 4,
'prefix': "%s/" % values['name'],
'code': "pos.order",
'company_id': values.get('company_id', False),
}
# force sequence_id field to new pos.order sequence
values['sequence_id'] = IrSequence.create(val).id
# TODO master: add field sequence_line_id on model
# this make sure we always have one available per company
val.update(name=_('POS order line %s') % values['name'], code='pos.order.line')
IrSequence.create(val)
return super(PosConfig, self).create(values)
@api.multi
def unlink(self):
for pos_config in self.filtered(lambda pos_config: pos_config.sequence_id):
pos_config.sequence_id.unlink()
return super(PosConfig, self).unlink()
# Methods to open the POS
@api.multi
def open_ui(self):
assert len(self.ids) == 1, "you can open only one session at a time"
return {
'type': 'ir.actions.act_url',
'url': '/pos/web/',
'target': 'self',
}
@api.multi
def open_existing_session_cb_close(self):
assert len(self.ids) == 1, "you can open only one session at a time"
if self.current_session_id.cash_control:
self.current_session_id.action_pos_session_closing_control()
return self.open_session_cb()
@api.multi
def open_session_cb(self):
assert len(self.ids) == 1, "you can open only one session at a time"
if not self.current_session_id:
self.current_session_id = self.env['pos.session'].create({
'user_id': self.env.uid,
'config_id': self.id
})
if self.current_session_id.state == 'opened':
return self.open_ui()
return self._open_session(self.current_session_id.id)
return self._open_session(self.current_session_id.id)
@api.multi
def open_existing_session_cb(self):
assert len(self.ids) == 1, "you can open only one session at a time"
return self._open_session(self.current_session_id.id)
def _open_session(self, session_id):
return {
'name': _('Session'),
'view_type': 'form',
'view_mode': 'form,tree',
| |
<gh_stars>10-100
#/usr/bin/env python
# Copyright (c) 2012, <NAME> and <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. All advertising materials mentioning features or use of this software
# must display the following acknowledgement:
# This product includes software developed by the authors.
# 4. Neither the name of the authors nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHORS''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import struct
import helpers
DOT11_FRAME_CONTROL_SIZE = 2
DOT11_MANAGEMENT_FRAME_FIELDS_SIZE = 24
DOT11_DATA_FRAME_FIELDS_SIZE = 24
DOT11_PROBE_REQUEST_FRAME_FIELDS_SIZE = 24
DOT11_BEACON_FRAME_FIELDS_SIZE = 36
FCS_SIZE = 4
IE_SSID = "SSID"
IE_SUPPORTED_RATES = "Supported Rates"
IE_DS_PARAMETER_SET = "DS Parameter Set"
IE_RSN = "RSN"
IE_WPA = "WPA"
IE_EXTENDED_SUPPORTED_RATES = "Extended Supported Rates"
IE_VENDOR_SPECIFIC = "Vendor Specific"
# OUI
OUI_SIZE = 3
OUI_RSN = '\x00\x0F\xAC'
OUI_MS = '\x00\x50\xF2'
# Frame Control Capabilities
CAP_ESS = int('0000000000000001', 2)
CAP_IBSS = int('0000000000000010', 2)
CAP_CF_POLL = int('0000000000000100', 2)
CAP_CF_POLL_REQ = int('0000000000001000', 2)
CAP_PRIVACY = int('0000000000010000', 2)
CAP_SHORT_PREAMBLE = int('0000000000100000', 2)
CAP_PBCC = int('0000000001000000', 2)
CAP_CH_AGILITY = int('0000000010000000', 2)
CAP_SHORT_SLOT_TIME = int('0000010000000000', 2)
CAP_DSSS_OFDM = int('0010000000000000', 2)
TYPE_MANAGEMENT = 0
TYPE_CONTROL = 1
TYPE_DATA = 2
SUBTYPE_MANAGEMENT_ASSOCIATION_REQ = 0
SUBTYPE_MANAGEMENT_ASSOCIATION_RES = 1
SUBTYPE_MANAGEMENT_REASSOCIATION_REQ = 2
SUBTYPE_MANAGEMENT_REASSOCIATION_RES = 3
SUBTYPE_MANAGEMENT_PROBE_REQ = 4
SUBTYPE_MANAGEMENT_PROBE_RES = 5
SUBTYPE_MANAGEMENT_BEACON = 8
SUBTYPE_MANAGEMENT_ATIM = 9
SUBTYPE_MANAGEMENT_DISASSOCIATION = 10
SUBTYPE_MANAGEMENT_AUTHENTICATION = 11
SUBTYPE_MANAGEMENT_DEAUTHENTICATION = 12
SUBTYPE_MANAGEMENT_ACTION = 13
frame_type = {0: "Management",
1: "Control",
2: "Data"}
management_subtype = {0: "Association Request",
1: "Association Response",
2: "Reassociation Request",
3: "Reassociation Response",
4: "Probe Request",
5: "Probe Response",
8: "Beacon",
9: "Announcement Traffic Indication Message",
10: "Disassociation",
11: "Authentication",
12: "Deauthentication",
13: "Action"}
control_subtype = {8: "Block Acknowledgment Request",
9: "Block Acknowledgment",
10: "Power Save-Poll",
11: "RTS",
12: "CTS",
13: "ACK",
14: "Contention-Free-End",
15: "CF-End+CF-ACK"}
data_subtype = {0: "Data",
1: "Data+CF-ACK",
2: "Data+CF-Poll",
3: "Data+CF-ACK+CF-Poll",
4: "Null Data",
5: "CF-ACK",
6: "CF-Poll",
7: "CF-ACK+CF-Poll",
8: "QoS Data",
9: "QoS Data + CF-ACK",
10: "QoS Data + CF-Poll",
11: "QoS Data + CF-ACK + CF-Poll",
12: "QoS Null Data",
13: "QoS CF-ACK",
14: "QoS CF-Poll",
15: "QoS CF-ACK + CF-Poll"}
frame_control_flags = {"ToDS": 1,
"FromDS": 2,
"MoreFrag": 4,
"Retry": 8,
"PowerManagement": 16,
"More Data": 32,
"Protected": 64,
"Order": 128}
information_elements_id = {0x00: IE_SSID,
0x01: IE_SUPPORTED_RATES,
0x03: IE_DS_PARAMETER_SET,
0x30: IE_RSN,
0x32: IE_EXTENDED_SUPPORTED_RATES,
0xdd: IE_VENDOR_SPECIFIC}
rsn_cipher_suite_id = {0: "Same as Group Cipher Suite",
1: "WEP-40",
2: "TKIP",
4: "CCMP",
5: "WEP-104"}
rsn_authentication_suite_id = {1: "PMK", 2: "PSK"}
class InvalidInformationElement(Exception):
pass
class FrameControl(object):
def __init__(self, data):
if len(data) < DOT11_FRAME_CONTROL_SIZE:
raise IndexError("Frame to short")
self._frameControl = data[:DOT11_FRAME_CONTROL_SIZE]
self._protocol = 0
self._type = 0
self._subtype = 0
self._toDs = False
self._fromDs = False
self._moreFrag = False
self._retry = False
self._powerManagement = False
self._moreData = False
self._protectedFrame = False
self._order = False
self._processFrame()
def _processFlags(self, flags):
'''Process Frame Control Flags.'''
if (flags & frame_control_flags["ToDS"]) > 0:
self._toDs = True
if (flags & frame_control_flags["FromDS"]) > 0:
self._fromDs = True
if (flags & frame_control_flags["MoreFrag"]) > 0:
self._moreFrag = True
if (flags & frame_control_flags["Retry"]) > 0:
self._retry = True
if (flags & frame_control_flags["PowerManagement"]) > 0:
self._powerManagement = True
if (flags & frame_control_flags["More Data"]) > 0:
self._moreData = True
if (flags & frame_control_flags["Protected"]) > 0:
self._protectedFrame = True
if (flags & frame_control_flags["Order"]) > 0:
self._order = True
def _processFrame(self):
'''Process Frame Control.'''
frameControl = struct.unpack("H", self._frameControl)[0]
self._protocol = frameControl & 0x0003
self._type = (frameControl & 0x000C) >> 2
self._subtype = (frameControl & 0x00F0) >> 4
flags = (frameControl & 0xFF00) >> 8
self._processFlags(flags)
def getProtocol(self):
'''Return frame control protocol.'''
return self._protocol
def getType(self):
'''Return frame control type.'''
return self._type
def getSubtype(self):
'''Return frame control subtype.'''
return self._subtype
def getToDs(self):
'''Return frame control to DS.'''
return self._toDs
def getFromDs(self):
'''Return frame control from DS.'''
return self._fromDs
def getMoreFrag(self):
'''Return frame control more frag.'''
return self._moreFrag
def getRetry(self):
'''Return frame control retry.'''
return self._retry
def getPowerManagement(self):
'''Return frame control power management flag.'''
return self._powerManagement
def getMoreData(self):
'''Return frame control more data flag.'''
return self._moreData
def getProtectedFrame(self):
'''Return frame control protected flag.'''
return self._protectedFrame
def getOrder(self):
'''Return frame control order flag.'''
return self._order
class ManagementFrame(object):
def __init__(self, data):
self._frame_size = len(data)
# Essential fields on the data frame
# Field ----------- Size
# frame control --- 2 B
# duration -------- 2 B
# destination ----- 6 B
# source ---------- 6 B
# bssid ----------- 6 B
# sequence ctrl --- 2 B
if self._frame_size < DOT11_DATA_FRAME_FIELDS_SIZE:
raise IndexError("Frame to short.")
index = 0
self._fc = FrameControl(data)
index += 2
self._duration = data[index:index + 2]
index += 2
# Addresses
self._destination = helpers.bytes_to_mac_address(data[index:index + 6])
index += 6
self._source = helpers.bytes_to_mac_address(data[index:index + 6])
index += 6
self._bssid = helpers.bytes_to_mac_address(data[index:index + 6])
index += 6
seqctrl = struct.unpack("H", data[index:index + 2])[0]
self._fragment = (seqctrl & 0x000F)
self._sequence = (seqctrl & 0xFFF0) >> 4
def getBssid(self):
'''Return the bssid of the data frame.'''
return self._bssid
def getSourceAddress(self):
'''Return the source address of the data frame.'''
return self._source
def getDestinationAddress(self):
'''Return the destination address of the data frame.'''
return self._destination
class DataFrame(object):
def __init__(self, data):
self._frame_size = len(data)
# Essential fields on the data frame
# Field ----------- Size
# frame control --- 2 B
# duration -------- 2 B
# address1 -------- 6 B
# address2 -------- 6 B
# address3 -------- 6 B
# sequence ctrl --- 2 B
# address4 -------- 6 B (optional)
if self._frame_size < DOT11_DATA_FRAME_FIELDS_SIZE:
raise IndexError("Frame to short.")
index = 0
self._fc = FrameControl(data)
index += 2
self._duration = data[index:index + 2]
index += 2
self._ibss = False
self._infrastructure = False
self._wds = False
# Addresses
self._address1 = helpers.bytes_to_mac_address(data[index:index + 6])
index += 6
self._address2 = helpers.bytes_to_mac_address(data[index:index + 6])
index += 6
self._address3 = helpers.bytes_to_mac_address(data[index:index + 6])
index += 6
to_ds = self._fc.getToDs()
from_ds = self._fc.getFromDs()
# IBSS
if not to_ds and not from_ds:
self._ibss = True
self._destination = self._address1
self._source = self._address2
self._bssid = self._address3
# Infrastructure
if (to_ds and not from_ds) or (not to_ds and from_ds):
self._infrastructure = True
if (to_ds and not from_ds):
self._bssid = self._address1
self._source = self._address2
self._destination = self._address3
else:
self._destination = self._address1
self._bssid = self._address2
self._source = self._address3
# WDS
if to_ds and from_ds:
self._address4 = helpers.bytes_to_mac_address(data[index:index + 6])
index += 6
self._wds = True
self._bssid = self._address1
self._destination = self._address3
self._source = self._address4
seqctrl = struct.unpack("H", data[index:index + 2])[0]
self._fragment = (seqctrl & 0x000F)
self._sequence = (seqctrl & 0xFFF0) >> 4
def isIbss(self):
'''Returns True if frame is from a IBSS network.'''
return self._ibss
def isInfrastructure(self):
'''Returns True if frame is from a Infrastructure network.'''
return self._infrastructure
def isWds(self):
'''Returns True if frame is from a WDS network.'''
return self._wds
def getBssid(self):
'''Return the bssid of the data frame.'''
return self._bssid
def getSourceAddress(self):
'''Return the source address of the data frame.'''
return self._source
def getDestinationAddress(self):
'''Return the destination address of the data frame.'''
return self._destination
class ProbeRequest(object):
def __init__(self, data):
self._frame_size = len(data)
# Essential fields on the beacon frame
# Field ----------- Size
# frame control --- 2 B
# duration -------- 2 B
# destination ----- 6 B
# source ---------- 6 B
# bssid ----------- 6 B
# sequence ctrl --- 2 B
| |
incoming_bids:
if bid_txid == bid["txid"]:
tokenid = bid["tokenid"]
fill_sum = bid["totalrequired"]
fillbid_hex = rpc_connection.tokenfillbid(tokenid, bid_txid, str(fill_sum))
try:
fillbid_txid = rpc_connection.sendrawtransaction(fillbid_hex["hex"])
except Exception as e:
print(e)
print(fillbid_hex)
print("Something went wrong. Be careful with input next time.")
input("Press [Enter] to continue...")
break
print(colorize("Warrior succesfully sold. Txid is: " + fillbid_txid, "green"))
input("Press [Enter] to continue...")
break
if want_to_sell == "n":
print("As you wish!")
input("Press [Enter] to continue...")
break
else:
print(colorize("Choose y or n!", "red"))
def find_warriors_asks(rpc_connection):
warriors_list = warriors_scanner_for_dex(rpc_connection)
warriors_asks = []
for player in warriors_list:
orders = rpc_connection.tokenorders(player)
if len(orders) > 0:
for order in orders:
if order["funcid"] == "s":
warriors_asks.append(order)
for ask in warriors_asks:
print(colorize("\n================================\n", "green"))
print("Warrior selling on marketplace: " + ask["tokenid"])
player_data = rogue_player_info(rpc_connection, ask["tokenid"])["player"]
print("Name: " + player_data["pname"] + "\n")
print("Player txid: " + player_data["playertxid"])
print("Token txid: " + player_data["tokenid"])
print("Hitpoints: " + str(player_data["hitpoints"]))
print("Strength: " + str(player_data["strength"]))
print("Level: " + str(player_data["level"]))
print("Experience: " + str(player_data["experience"]))
print("Dungeon Level: " + str(player_data["dungeonlevel"]))
print("Chain: " + player_data["chain"])
print(colorize("\nInventory:\n", "blue"))
for item in player_data["pack"]:
print(item)
print("\nTotal packsize: " + str(player_data["packsize"]) + "\n")
print(colorize("Order info: \n", "red"))
print("Ask txid: " + ask["txid"])
print("Price: " + str(ask["price"]) + "\n")
while True:
want_to_buy = input("Do you want to buy any warrior? [y/n]: ")
if want_to_buy == "y":
ask_txid = input("Input asktxid which you want to fill: ")
for ask in warriors_asks:
if ask_txid == ask["txid"]:
tokenid = ask["tokenid"]
try:
fillask_raw = rpc_connection.tokenfillask(tokenid, ask_txid, "1")
except Exception as e:
print("Something went wrong. Be careful with input next time.")
input("Press [Enter] to continue...")
break
try:
fillask_txid = rpc_connection.sendrawtransaction(fillask_raw["hex"])
except Exception as e:
print(e)
print(fillask_raw)
print("Something went wrong. Be careful with input next time.")
input("Press [Enter] to continue...")
break
print(colorize("Warrior succesfully bought. Txid is: " + fillask_txid, "green"))
input("Press [Enter] to continue...")
break
if want_to_buy == "n":
print("As you wish!")
input("Press [Enter] to continue...")
break
else:
print(colorize("Choose y or n!", "red"))
def warriors_orders_check(rpc_connection):
my_orders_list = rpc_connection.mytokenorders("17")
warriors_orders = {}
for order in my_orders_list:
player_info = rogue_player_info(rpc_connection, order["tokenid"])
if "status" in player_info and player_info["status"] == "error":
pass
else:
warriors_orders[order["tokenid"]] = order
bids_list = []
asks_list = []
for order in warriors_orders:
if warriors_orders[order]["funcid"] == "s":
asks_list.append(warriors_orders[order])
else:
bids_list.append(order)
print(colorize("\nYour asks:\n", "blue"))
print(colorize("\n********************************\n", "red"))
for ask in asks_list:
print("txid: " + ask["txid"])
print("Price: " + ask["price"])
print("Warrior tokenid: " + ask["tokenid"])
print(colorize("\n================================\n", "green"))
print("Warrior selling on marketplace: " + ask["tokenid"])
player_data = rogue_player_info(rpc_connection, ask["tokenid"])["player"]
print("Name: " + player_data["pname"] + "\n")
print("Player txid: " + player_data["playertxid"])
print("Token txid: " + player_data["tokenid"])
print("Hitpoints: " + str(player_data["hitpoints"]))
print("Strength: " + str(player_data["strength"]))
print("Level: " + str(player_data["level"]))
print("Experience: " + str(player_data["experience"]))
print("Dungeon Level: " + str(player_data["dungeonlevel"]))
print("Chain: " + player_data["chain"])
print(colorize("\nInventory:\n", "blue"))
for item in player_data["pack"]:
print(item)
print("\nTotal packsize: " + str(player_data["packsize"]) + "\n")
print(colorize("\n================================\n", "green"))
print(colorize("\nYour bids:\n", "blue"))
print(colorize("\n********************************\n", "red"))
for bid in bids_list:
print("txid: " + bid["txid"])
print("Price: " + bid["price"])
print("Warrior tokenid: " + bid["tokenid"])
print(colorize("\n================================\n", "green"))
print("Warrior selling on marketplace: " + bid["tokenid"])
player_data = rogue_player_info(rpc_connection, bid["tokenid"])["player"]
print("Name: " + player_data["pname"] + "\n")
print("Player txid: " + player_data["playertxid"])
print("Token txid: " + player_data["tokenid"])
print("Hitpoints: " + str(player_data["hitpoints"]))
print("Strength: " + str(player_data["strength"]))
print("Level: " + str(player_data["level"]))
print("Experience: " + str(player_data["experience"]))
print("Dungeon Level: " + str(player_data["dungeonlevel"]))
print("Chain: " + player_data["chain"])
print(colorize("\nInventory:\n", "blue"))
for item in player_data["pack"]:
print(item)
print("\nTotal packsize: " + str(player_data["packsize"]) + "\n")
print(colorize("\n================================\n", "green"))
while True:
need_order_change = input("Do you want to cancel any of your orders? [y/n]: ")
if need_order_change == "y":
while True:
ask_or_bid = input("Do you want cancel ask or bid? [a/b]: ")
if ask_or_bid == "a":
ask_txid = input("Input txid of ask you want to cancel: ")
warrior_tokenid = input("Input warrior token id for this ask: ")
try:
ask_cancellation_hex = rpc_connection.tokencancelask(warrior_tokenid, ask_txid)
ask_cancellation_txid = rpc_connection.sendrawtransaction(ask_cancellation_hex["hex"])
except Exception as e:
print(colorize("Please re-check your input!", "red"))
print(colorize("Ask succefully cancelled. Cancellation txid: " + ask_cancellation_txid, "green"))
break
if ask_or_bid == "b":
bid_txid = input("Input txid of bid you want to cancel: ")
warrior_tokenid = input("Input warrior token id for this bid: ")
try:
bid_cancellation_hex = rpc_connection.tokencancelbid(warrior_tokenid, bid_txid)
bid_cancellation_txid = rpc_connection.sendrawtransaction(bid_cancellation_hex["hex"])
except Exception as e:
print(colorize("Please re-check your input!", "red"))
print(colorize("Bid succefully cancelled. Cancellation txid: " + bid_cancellation_txid, "green"))
break
else:
print(colorize("Choose a or b!", "red"))
input("Press [Enter] to continue...")
break
if need_order_change == "n":
print("As you wish!")
input("Press [Enter] to continue...")
break
else:
print(colorize("Choose y or n!", "red"))
def set_warriors_name(rpc_connection):
warriors_name = input("What warrior name do you want for legends and tales about your brave adventures?: ")
warrior_name_arg = '"' + "[%22" + warriors_name + "%22]" + '"'
set_name_status = rpc_connection.cclib("setname", "17", warrior_name_arg)
print(colorize("Warrior name succesfully set", "green"))
print("Result: " + set_name_status["result"])
print("Name: " + set_name_status["pname"])
input("Press [Enter] to continue...")
def top_warriors_rating(rpc_connection):
start_time = time.time()
warriors_list = warriors_scanner_for_rating(rpc_connection)
warriors_exp = {}
for warrior in warriors_list:
warriors_exp[warrior] = warriors_list[warrior]["experience"]
warriors_exp_sorted = {}
temp = [(k, warriors_exp[k]) for k in sorted(warriors_exp, key=warriors_exp.get, reverse=True)]
for k,v in temp:
warriors_exp_sorted[k] = v
counter = 0
for experienced_warrior in warriors_exp_sorted:
if counter < 20:
counter = counter + 1
print("\n" + str(counter) + " place.")
print(colorize("\n================================\n", "blue"))
player_data = rogue_player_info(rpc_connection, experienced_warrior)["player"]
print("Name: " + player_data["pname"] + "\n")
print("Player txid: " + player_data["playertxid"])
print("Token txid: " + player_data["tokenid"])
print("Hitpoints: " + str(player_data["hitpoints"]))
print("Strength: " + str(player_data["strength"]))
print("Level: " + str(player_data["level"]))
print("Experience: " + str(player_data["experience"]))
print("Dungeon Level: " + str(player_data["dungeonlevel"]))
print("Chain: " + player_data["chain"])
print("--- %s seconds ---" % (time.time() - start_time))
input("Press [Enter] to continue...")
def exit():
sys.exit()
def exit_main():
return 'back to Antara modules menu'
def warrior_trasnfer(rpc_connection):
print(colorize("Your brave warriors: \n", "blue"))
print_players_list(rpc_connection)
print("\n")
while True:
need_transfer = input("Do you want to transfer any warrior? [y/n]: ")
if need_transfer == "y":
warrior_tokenid = input("Input warrior tokenid: ")
recepient_pubkey = input("Input recepient pubkey: ")
try:
token_transfer_hex = rpc_connection.tokentransfer(warrior_tokenid, recepient_pubkey, "1")
token_transfer_txid = rpc_connection.sendrawtransaction(token_transfer_hex["hex"])
except Exception as e:
print(e)
print("Something went wrong. Please be careful with your input next time!")
input("Press [Enter] to continue...")
break
print(colorize("Warrior succesfully transferred! Transfer txid: " + token_transfer_txid, "green"))
input("Press [Enter] to continue...")
break
if need_transfer == "n":
print("As you wish!")
input("Press [Enter] to continue...")
break
else:
print(colorize("Choose y or n!", "red"))
def check_if_config_is_here(rpc_connection, assetchain_name):
config_name = assetchain_name + ".conf"
if os.path.exists(config_name):
print(colorize("Config is already in daemon folder", "green"))
else:
if operating_system == 'Darwin':
path_to_config = os.environ['HOME'] + '/Library/Application Support/Komodo/' + assetchain_name + '/' + config_name
elif operating_system == 'Linux':
path_to_config = os.environ['HOME'] + '/.komodo/' + assetchain_name + '/' + config_name
elif operating_system == 'Win64' or operating_system == 'Windows':
path_to_config = '%s/komodo/' + assetchain_name + '/' + config_name % os.environ['APPDATA']
try:
copy(path_to_config, os.getcwd())
except Exception as e:
print(e)
print("Can't copy config to current daemon directory automatically by some reason.")
print("Please copy it manually. It's locating here: " + path_to_config)
def find_game_keystrokes_in_log(gametxid):
operating_system = platform.system()
if operating_system == 'Win64' or operating_system == 'Windows':
p1 = subprocess.Popen(["type", "keystrokes.log"], stdout=subprocess.PIPE, shell=True)
p2 = subprocess.Popen(["findstr", gametxid], stdin=p1.stdout, stdout=subprocess.PIPE, shell=True)
else:
p1 = subprocess.Popen(["cat", "keystrokes.log"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", gametxid], stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
output = p2.communicate()[0]
keystrokes_log_for_game = bytes.decode(output).split("\n")
return keystrokes_log_for_game
def check_if_tx_in_mempool(rpc_connection, txid):
while True:
mempool = rpc_connection.getrawmempool()
if txid in mempool:
print(colorize("Waiting for " + txid + " transaction to be mined", "blue"))
time.sleep(5)
else:
print(colorize("Transaction is mined", "green"))
break
def gateway_info_tui(rpc_connection, gw_index=''):
if gw_index == '':
while True:
print(colorize("\nGateways created on this assetchain: \n", "blue"))
gateways_list = rpc_connection.gatewayslist()
if len(gateways_list) == 0:
print("Seems like a no gateways created on this assetchain yet!\n")
input("Press [Enter] to continue...")
break
else:
i = 1
for gateway in gateways_list:
print("["+str(i)+"] "+gateway)
i += 1
print(colorize('_' * 65, "blue"))
print("\n")
gw_selected = input("Select Gateway Bind TXID: ")
gw_index = int(gw_selected)-1
try:
bind_txid = gateways_list[gw_index]
break
except:
print("Invalid selection, must be number between 1 and "+str(len(gateways_list)))
pass
else:
while True:
try:
bind_txid = gateways_list[gw_index]
break
except:
print("Invalid gateway index, select manually...")
gateway_info_tui(rpc_connection)
pass
try:
info = rpc_connection.gatewaysinfo(bind_txid)
print(colorize("Gateways Bind TXID ["+str(bind_txid)+"]", 'green'))
print(colorize("Gateways Oracle TXID ["+str(info['oracletxid'])+"]", 'green'))
print(colorize("Gateways Token TXID ["+str(info['tokenid'])+"]", 'green'))
print(colorize("Gateways Coin ["+str(info['coin'])+"]", 'green'))
print(colorize("Gateways Pubkeys ["+str(info['pubkeys'])+"]", 'green'))
print(colorize("Gateways Deposit Address ["+str(info['deposit'])+"]", 'green'))
print(colorize("Gateways Total Supply | |
<filename>NG-RO/osm_ng_ro/ns.py
# -*- coding: utf-8 -*-
##
# Copyright 2020 Telefonica Investigacion y Desarrollo, S.A.U.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import logging
# import yaml
from traceback import format_exc as traceback_format_exc
from osm_ng_ro.ns_thread import NsWorker
from osm_ng_ro.validation import validate_input, deploy_schema
from osm_common import dbmongo, dbmemory, fslocal, fsmongo, msglocal, msgkafka, version as common_version
from osm_common.dbbase import DbException
from osm_common.fsbase import FsException
from osm_common.msgbase import MsgException
from http import HTTPStatus
from uuid import uuid4
from threading import Lock
from random import choice as random_choice
from time import time
from jinja2 import Environment, Template, meta, TemplateError, TemplateNotFound, TemplateSyntaxError
from cryptography.hazmat.primitives import serialization as crypto_serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend as crypto_default_backend
__author__ = "<NAME> <<EMAIL>>"
min_common_version = "0.1.16"
class NsException(Exception):
def __init__(self, message, http_code=HTTPStatus.BAD_REQUEST):
self.http_code = http_code
super(Exception, self).__init__(message)
def get_process_id():
"""
Obtain a unique ID for this process. If running from inside docker, it will get docker ID. If not it
will provide a random one
:return: Obtained ID
"""
# Try getting docker id. If fails, get pid
try:
with open("/proc/self/cgroup", "r") as f:
text_id_ = f.readline()
_, _, text_id = text_id_.rpartition("/")
text_id = text_id.replace("\n", "")[:12]
if text_id:
return text_id
except Exception:
pass
# Return a random id
return "".join(random_choice("0123456789abcdef") for _ in range(12))
def versiontuple(v):
"""utility for compare dot separate versions. Fills with zeros to proper number comparison"""
filled = []
for point in v.split("."):
filled.append(point.zfill(8))
return tuple(filled)
class Ns(object):
def __init__(self):
self.db = None
self.fs = None
self.msg = None
self.config = None
# self.operations = None
self.logger = logging.getLogger("ro.ns")
self.map_topic = {}
self.write_lock = None
self.assignment = {}
self.next_worker = 0
self.plugins = {}
self.workers = []
def init_db(self, target_version):
pass
def start(self, config):
"""
Connect to database, filesystem storage, and messaging
:param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
:param config: Configuration of db, storage, etc
:return: None
"""
self.config = config
self.config["process_id"] = get_process_id() # used for HA identity
# check right version of common
if versiontuple(common_version) < versiontuple(min_common_version):
raise NsException("Not compatible osm/common version '{}'. Needed '{}' or higher".format(
common_version, min_common_version))
try:
if not self.db:
if config["database"]["driver"] == "mongo":
self.db = dbmongo.DbMongo()
self.db.db_connect(config["database"])
elif config["database"]["driver"] == "memory":
self.db = dbmemory.DbMemory()
self.db.db_connect(config["database"])
else:
raise NsException("Invalid configuration param '{}' at '[database]':'driver'".format(
config["database"]["driver"]))
if not self.fs:
if config["storage"]["driver"] == "local":
self.fs = fslocal.FsLocal()
self.fs.fs_connect(config["storage"])
elif config["storage"]["driver"] == "mongo":
self.fs = fsmongo.FsMongo()
self.fs.fs_connect(config["storage"])
else:
raise NsException("Invalid configuration param '{}' at '[storage]':'driver'".format(
config["storage"]["driver"]))
if not self.msg:
if config["message"]["driver"] == "local":
self.msg = msglocal.MsgLocal()
self.msg.connect(config["message"])
elif config["message"]["driver"] == "kafka":
self.msg = msgkafka.MsgKafka()
self.msg.connect(config["message"])
else:
raise NsException("Invalid configuration param '{}' at '[message]':'driver'".format(
config["message"]["driver"]))
# TODO load workers to deal with exising database tasks
self.write_lock = Lock()
except (DbException, FsException, MsgException) as e:
raise NsException(str(e), http_code=e.http_code)
def stop(self):
try:
if self.db:
self.db.db_disconnect()
if self.fs:
self.fs.fs_disconnect()
if self.msg:
self.msg.disconnect()
self.write_lock = None
except (DbException, FsException, MsgException) as e:
raise NsException(str(e), http_code=e.http_code)
for worker in self.workers:
worker.insert_task(("terminate",))
def _create_worker(self, vim_account_id):
# TODO make use of the limit self.config["global"]["server.ns_threads"]
worker_id = next((i for i in range(len(self.workers)) if not self.workers[i].is_alive()), None)
if worker_id is None:
worker_id = len(self.workers)
self.workers.append(NsWorker(worker_id, self.config, self.plugins, self.db))
self.workers[worker_id].start()
self.workers[worker_id].insert_task(("load_vim", vim_account_id))
return worker_id
def _assign_vim(self, vim_account_id):
if vim_account_id not in self.assignment:
self.assignment[vim_account_id] = self._create_worker(vim_account_id)
def _get_cloud_init(self, where):
"""
:param where: can be 'vnfr_id:file:file_name' or 'vnfr_id:vdu:vdu_idex'
:return:
"""
vnfd_id, _, other = where.partition(":")
_type, _, name = other.partition(":")
vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
if _type == "file":
base_folder = vnfd["_admin"]["storage"]
cloud_init_file = "{}/{}/cloud_init/{}".format(base_folder["folder"], base_folder["pkg-dir"], name)
with self.fs.file_open(cloud_init_file, "r") as ci_file:
cloud_init_content = ci_file.read()
elif _type == "vdu":
cloud_init_content = vnfd["vdu"][int(name)]["cloud-init"]
else:
raise NsException("Mismatch descriptor for cloud init: {}".format(where))
return cloud_init_content
def _parse_jinja2(self, cloud_init_content, params, context):
try:
env = Environment()
ast = env.parse(cloud_init_content)
mandatory_vars = meta.find_undeclared_variables(ast)
if mandatory_vars:
for var in mandatory_vars:
if not params or var not in params:
raise NsException(
"Variable '{}' defined at vnfd='{}' must be provided in the instantiation parameters"
"inside the 'additionalParamsForVnf' block".format(var, context))
template = Template(cloud_init_content)
return template.render(params or {})
except (TemplateError, TemplateNotFound, TemplateSyntaxError) as e:
raise NsException("Error parsing Jinja2 to cloud-init content at vnfd='{}': {}".format(context, e))
def _create_db_ro_nsrs(self, nsr_id, now):
try:
key = rsa.generate_private_key(
backend=crypto_default_backend(),
public_exponent=65537,
key_size=2048
)
private_key = key.private_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PrivateFormat.PKCS8,
crypto_serialization.NoEncryption())
public_key = key.public_key().public_bytes(
crypto_serialization.Encoding.OpenSSH,
crypto_serialization.PublicFormat.OpenSSH
)
private_key = private_key.decode('utf8')
public_key = public_key.decode('utf8')
except Exception as e:
raise NsException("Cannot create ssh-keys: {}".format(e))
schema_version = "1.1"
private_key_encrypted = self.db.encrypt(private_key, schema_version=schema_version, salt=nsr_id)
db_content = {
"_id": nsr_id,
"_admin": {
"created": now,
"modified": now,
"schema_version": schema_version
},
"public_key": public_key,
"private_key": private_key_encrypted,
"actions": [],
}
self.db.create("ro_nsrs", db_content)
return db_content
def deploy(self, session, indata, version, nsr_id, *args, **kwargs):
print("ns.deploy session={} indata={} version={} nsr_id={}".format(session, indata, version, nsr_id))
validate_input(indata, deploy_schema)
action_id = indata.get("action_id", str(uuid4()))
task_index = 0
# get current deployment
db_nsr = None
# db_nslcmop = None
db_nsr_update = {} # update operation on nsrs
db_vnfrs_update = {}
# db_nslcmop_update = {} # update operation on nslcmops
db_vnfrs = {} # vnf's info indexed by _id
vdu2cloud_init = {}
step = ''
logging_text = "Task deploy nsr_id={} action_id={} ".format(nsr_id, action_id)
self.logger.debug(logging_text + "Enter")
try:
step = "Getting ns and vnfr record from db"
# db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
db_ro_tasks = []
db_new_tasks = []
# read from db: vnf's of this ns
step = "Getting vnfrs from db"
db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
if not db_vnfrs_list:
raise NsException("Cannot obtain associated VNF for ns")
for vnfr in db_vnfrs_list:
db_vnfrs[vnfr["_id"]] = vnfr
db_vnfrs_update[vnfr["_id"]] = {}
now = time()
db_ro_nsr = self.db.get_one("ro_nsrs", {"_id": nsr_id}, fail_on_empty=False)
if not db_ro_nsr:
db_ro_nsr = self._create_db_ro_nsrs(nsr_id, now)
ro_nsr_public_key = db_ro_nsr["public_key"]
# check that action_id is not in the list of actions. Suffixed with :index
if action_id in db_ro_nsr["actions"]:
index = 1
while True:
new_action_id = "{}:{}".format(action_id, index)
if new_action_id not in db_ro_nsr["actions"]:
action_id = new_action_id
self.logger.debug(logging_text + "Changing action_id in use to {}".format(action_id))
break
index += 1
def _create_task(item, action, target_record, target_record_id, extra_dict=None):
nonlocal task_index
nonlocal action_id
nonlocal nsr_id
task = {
"action_id": action_id,
"nsr_id": nsr_id,
"task_id": "{}:{}".format(action_id, task_index),
"status": "SCHEDULED",
"action": action,
"item": item,
"target_record": target_record,
"target_record_id": target_record_id,
}
if extra_dict:
task.update(extra_dict) # params, find_params, depends_on
task_index += 1
return task
def _create_ro_task(vim_account_id, item, action, target_record, target_record_id, extra_dict=None):
nonlocal action_id
nonlocal task_index
nonlocal now
_id = action_id + ":" + str(task_index)
db_ro_task = {
"_id": _id,
"locked_by": None,
"locked_at": 0.0,
"target_id": "vim:" + vim_account_id,
"vim_info": {
"created": False,
"created_items": None,
"vim_id": None,
"vim_name": None,
"vim_status": None,
"vim_details": None,
"refresh_at": None,
},
"modified_at": now,
"created_at": now,
"to_check_at": now,
"tasks": [_create_task(item, action, target_record, target_record_id, extra_dict)],
}
return db_ro_task
def _process_image_params(target_image, vim_info):
find_params = {}
if target_image.get("image"):
find_params["filter_dict"] = {"name": target_image.get("image")}
if target_image.get("vim_image_id"):
find_params["filter_dict"] = {"id": target_image.get("vim_image_id")}
if target_image.get("image_checksum"):
find_params["filter_dict"] = {"checksum": target_image.get("image_checksum")}
return {"find_params": find_params}
def _process_flavor_params(target_flavor, vim_info):
def _get_resource_allocation_params(quota_descriptor):
"""
read the quota_descriptor from vnfd and fetch the resource allocation properties from the
descriptor object
:param quota_descriptor: cpu/mem/vif/disk-io quota descriptor
:return: quota params for limit, reserve, shares from the descriptor object
"""
quota = {}
if quota_descriptor.get("limit"):
quota["limit"] = int(quota_descriptor["limit"])
if quota_descriptor.get("reserve"):
quota["reserve"] = int(quota_descriptor["reserve"])
if quota_descriptor.get("shares"):
quota["shares"] = int(quota_descriptor["shares"])
return quota
flavor_data = {
"disk": int(target_flavor["storage-gb"]),
# "ram": max(int(target_flavor["memory-mb"]) // 1024, 1),
# ^ TODO manage at vim_connectors MB instead of GB
"ram": int(target_flavor["memory-mb"]),
"vcpus": target_flavor["vcpu-count"],
}
if target_flavor.get("guest-epa"):
extended = {}
numa = {}
epa_vcpu_set = False
if target_flavor["guest-epa"].get("numa-node-policy"):
numa_node_policy = target_flavor["guest-epa"].get("numa-node-policy")
if numa_node_policy.get("node"):
numa_node = numa_node_policy["node"][0]
if numa_node.get("num-cores"):
numa["cores"] = numa_node["num-cores"]
epa_vcpu_set = True
if numa_node.get("paired-threads"):
if numa_node["paired-threads"].get("num-paired-threads"):
numa["paired-threads"] = int(numa_node["paired-threads"]["num-paired-threads"])
epa_vcpu_set = True
if len(numa_node["paired-threads"].get("paired-thread-ids")):
numa["paired-threads-id"] = []
for pair in numa_node["paired-threads"]["paired-thread-ids"]:
numa["paired-threads-id"].append(
(str(pair["thread-a"]), str(pair["thread-b"]))
)
if numa_node.get("num-threads"):
numa["threads"] = int(numa_node["num-threads"])
epa_vcpu_set = True
if numa_node.get("memory-mb"):
| |
from contextlib import contextmanager
from datetime import datetime
from textwrap import dedent
from typing import Dict, List, Optional, ContextManager, Set
from zkay.compiler.privacy.circuit_generation.circuit_helper import CircuitHelper, HybridArgumentIdf
from zkay.config import cfg
from zkay.utils.multiline_formatter import MultiLineFormatter
from zkay.zkay_ast.ast import ContractDefinition, SourceUnit, ConstructorOrFunctionDefinition, \
indent, FunctionCallExpr, IdentifierExpr, BuiltinFunction, \
StateVariableDeclaration, MemberAccessExpr, IndexExpr, Parameter, TypeName, AnnotatedTypeName, Identifier, \
ReturnStatement, EncryptionExpression, MeExpr, Expression, CipherText, Array, \
AddressTypeName, StructTypeName, HybridArgType, CircuitInputStatement, AddressPayableTypeName, \
CircuitComputationStatement, VariableDeclaration, Block, KeyLiteralExpr, VariableDeclarationStatement, LocationExpr, \
PrimitiveCastExpr, EnumDefinition, EnumTypeName, UintTypeName, \
StatementList, StructDefinition, NumberTypeName, EnterPrivateKeyStatement, ArrayLiteralExpr, NumberLiteralExpr, \
BoolTypeName
from zkay.zkay_ast.visitor.python_visitor import PythonCodeVisitor
def api(name: str, invoker: str = 'self') -> str:
from zkay.transaction.offchain import ApiWrapper
assert name in dir(ApiWrapper)
return f'{invoker}.api.{name}'
PRIV_VALUES_NAME = f'{cfg.reserved_name_prefix}priv'
IS_EXTERNAL_CALL = f'{cfg.reserved_name_prefix}is_ext'
SCALAR_FIELD_NAME = 'bn128_scalar_field'
class PythonOffchainVisitor(PythonCodeVisitor):
"""
This visitor generates python code which is able to deploy, connect to and issue transactions for the specified contract.
The generated code includes both a class corresponding to the contract, as well as a main function for interactive use.
The class has the following two static methods:
* deploy: Compile all necessary contracts (main contract + libraries), deploy them onto a test chain and return a contract handle.
* connect: Get a handle for an already deployed contract (by specifying the on-chain address of the contract). This method automatically verifies the integrity of the remote contract.
If the visited AST contains only a single contract, global deploy and connect functions for that contract are also added to the python
code.
For every zkay function, the class has a corresponding instance method with matching name and (untransformed) signature.
To issue a zkay transaction, simply call one of these functions.
All private parameters will be encrypted automatically. The function will then simulate solidity execution and circuit computations
to obtain all required public circuit inputs. Finally it automatically generates the zero knowledge proof and issues a
transformed transaction (encrypted arguments, additional circuit output and proof arguments added).
If a require statement fails during simulation, a RequireException is raised.
When a state variable is read before it is written in a transaction, its initial value is pulled from the blockchain.
Required foreign public keys are also downloaded from the PKI contract on the block chain.
The main function simply loads the zkay configuration from the circuit's manifest, generates encryption keys if necessary
and enters an interactive python shell.
"""
def __init__(self, circuits: List[CircuitHelper]):
super().__init__(False)
self.circuits: Dict[ConstructorOrFunctionDefinition, CircuitHelper] = {cg.fct: cg for cg in circuits}
self.current_f: Optional[ConstructorOrFunctionDefinition] = None
self.current_params: Optional[List[Parameter]] = None
self.current_circ: Optional[CircuitHelper] = None
self.current_index: List[Expression] = []
self.current_index_t: Optional[AnnotatedTypeName] = None
self.inside_circuit: bool = False
self.flatten_hybrid_args: bool = False
@property
def _get_forbidden_words(self) -> Set[str]:
return super()._get_forbidden_words.union({kw for kw in [
# predefined objects
'connect', 'deploy', 'me', 'wei_amount',
# base class member variables
'api', 'locals', 'state',
# base class functions
'_scope', '_function_ctx', 'default_address', 'initialize_keys_for', 'use_config_from_manifest', 'create_dummy_accounts',
# Globals
'os', 'IntEnum', 'Dict', 'List', 'Tuple', 'Optional', 'Union', 'Any',
'my_logging', 'CipherValue', 'AddressValue', 'RandomnessValue', 'PublicKeyValue',
'ContractSimulator', 'RequireException', 'help', 'annotations'
]})
def _get_type_constr(self, t: TypeName):
if isinstance(t, BoolTypeName):
constr = 'bool'
elif isinstance(t, EnumTypeName):
constr = self.visit_list(t.target.qualified_name, sep='.')
elif isinstance(t, NumberTypeName):
if self.inside_circuit and t.elem_bitwidth == 256:
constr = f'uint'
else:
constr = f'int{t.elem_bitwidth}'
if not t.signed:
constr = f'u{constr}'
elif isinstance(t, (AddressTypeName, AddressPayableTypeName)):
constr = 'AddressValue'
else:
raise NotImplementedError(f'No python constructor for type {t}')
return constr
def get_constructor_args_and_params(self, ast: ContractDefinition):
if not ast.constructor_definitions:
return '', ''
with self.circuit_ctx(ast.constructor_definitions[0]):
a, p = '', ''
for param in self.current_params:
a += f'{self.visit(param.idf)}, '
p += f'{self.visit(param)}, '
return a, p
def visitSourceUnit(self, ast: SourceUnit):
contracts = self.visit_list(ast.contracts)
is_payable = ast.contracts[0].constructor_definitions and ast.contracts[0].constructor_definitions[0].is_payable
val_param = ', wei_amount=0' if is_payable else ''
val_arg = ', wei_amount=wei_amount' if is_payable else ''
c_name = self.visit(ast.contracts[0].idf)
c_args, c_params = self.get_constructor_args_and_params(ast.contracts[0])
# Create skeleton with global functions and main method
return dedent(f'''\
###########################################
## THIS CODE WAS GENERATED AUTOMATICALLY ##
## Creation Time: {datetime.now().strftime('%H:%M:%S %d-%b-%Y')} ##
###########################################
from __future__ import annotations
import os
from enum import IntEnum
from typing import Dict, List, Tuple, Optional, Union, Any
from zkay import my_logging
from zkay.transaction.types import CipherValue, AddressValue, RandomnessValue, PublicKeyValue
from zkay.transaction.offchain import {SCALAR_FIELD_NAME}, ContractSimulator, RequireException
from zkay.transaction.int_casts import *
me = None
''') + contracts + (dedent(f'''
def deploy({c_params}*, user: Union[None, bytes, str] = None{val_param}) -> {c_name}:
user = me if user is None else user
return {c_name}.deploy({c_args}user=user{val_arg})
def connect(address: Union[bytes, str], user: Union[None, bytes, str] = None) -> {c_name}:
user = me if user is None else user
return {c_name}.connect(address, user=user)
def create_dummy_accounts(count: int) -> Union[str, Tuple[str, ...]]:
return ContractSimulator.create_dummy_accounts(count)
def help(val=None):
if val is None:
import sys
ContractSimulator.help(sys.modules[__name__], {c_name}, '{c_name}')
else:
import builtins
builtins.help(val)
''') if len(ast.contracts) == 1 else '') + dedent('''
if __name__ == '__main__':
ContractSimulator.use_config_from_manifest(os.path.dirname(os.path.realpath(__file__)))
me = ContractSimulator.default_address()
if me is not None:
me = me.val
import code
code.interact(local=globals())
''')
def generate_constructors(self, ast: ContractDefinition) -> str:
"""Generate class constructor (!= contract constructor) and static connect/deploy methods."""
# Priv values: private function args plaintext, locally decrypted plaintexts, encryption randomness
# State values: if key not in dict -> pull value from chain on read, otherwise retrieve cached value
name = self.visit(ast.idf)
is_payable = ast.constructor_definitions and ast.constructor_definitions[0].is_payable
val_param = ', wei_amount=0' if is_payable else ''
val_arg = 'wei_amount=wei_amount' if is_payable else ''
c_args, c_params = self.get_constructor_args_and_params(ast)
if not ast.constructor_definitions:
deploy_cmd = f'{api("deploy", "c")}([], []{val_arg})'
else:
deploy_cmd = f'c.constructor({c_args}{val_arg})'
sv_constr = []
for svd in [sv for sv in ast.state_variable_declarations if isinstance(sv, StateVariableDeclaration) and not sv.idf.name.startswith(cfg.reserved_name_prefix)]:
t = svd.annotated_type.type_name
while not isinstance(t, CipherText) and hasattr(t, 'value_type'):
t = t.value_type.type_name
if isinstance(t, CipherText):
constr = f', {self._get_type_constr(t.plain_type.type_name)}, cipher=True'
else:
constr = f', {self._get_type_constr(t)}'
sv_constr.append(f'self.state.decl("{svd.idf.name}"{constr})')
mf = MultiLineFormatter() * \
'def __init__(self, project_dir: str, user_addr: AddressValue):' /\
f"super().__init__(project_dir, user_addr, '{ast.idf.name}')" * sv_constr // f'''\
@staticmethod
def connect(address: Union[bytes, str], user: Union[str, bytes], project_dir: str = os.path.dirname(os.path.realpath(__file__))) -> {name}:
c = {name}(project_dir, AddressValue(user))
{api("connect", "c")}(AddressValue(address))
if not {api("keystore", "c")}.has_initialized_keys_for(AddressValue(user)):
ContractSimulator.initialize_keys_for(user)
return c
@staticmethod
def deploy({c_params}*, user: Union[str, bytes]{val_param}, project_dir: str = os.path.dirname(os.path.realpath(__file__))) -> {name}:
c = {name}(project_dir, AddressValue(user))
if not {api("keystore", "c")}.has_initialized_keys_for(AddressValue(user)):
ContractSimulator.initialize_keys_for(user)
{deploy_cmd}
return c
'''
return indent(f'{mf}\n')
@staticmethod
def is_special_var(idf: Identifier):
return idf.name.startswith(cfg.reserved_name_prefix) or idf.name in ['msg', 'block', 'tx', '_tmp_key', 'now']
@staticmethod
def get_priv_value(idf: str):
"""Retrieve value of private circuit variable from private-value dictionary"""
return f'{PRIV_VALUES_NAME}["{idf}"]'
def get_loc_value(self, arr: Identifier, indices: List[str]) -> str:
"""Get the location of the given identifier/array element."""
if isinstance(arr, HybridArgumentIdf) and arr.arg_type == HybridArgType.PRIV_CIRCUIT_VAL and not arr.name.startswith('tmp'):
# Private circuit values are located in private value dictionary
return self.get_priv_value(arr.name)
elif isinstance(arr, HybridArgumentIdf) and arr.arg_type == HybridArgType.PUB_CIRCUIT_ARG:
# Public circuit inputs are in the zk_data dict
return self.visit(arr.get_loc_expr())
else:
idxvals = ''.join([f'[{idx}]' for idx in indices])
return f'{self.visit(arr)}{idxvals}'
def get_value(self, idf: IdentifierExpr, indices: List[str]):
"""
Get code corresponding to the rvalue location of an identifier or index expression.
e.g. idf = x and indices = [some_addr, 5] corresponds to x[some_addr][5]
State variable values are downloaded from the chain if their value is not yet present in the local state variable dict.
"""
if self.is_special_var(idf.idf):
return self.get_loc_value(idf.idf, indices)
elif isinstance(idf.target, StateVariableDeclaration):
# If a state variable appears as an rvalue, the value may need to be requested from the blockchain
indices = f', {", ".join(indices)}' if indices else ''
return f'self.state["{idf.idf.name}"{indices}]'
else:
name = idf.idf
if isinstance(idf.target, VariableDeclaration) and not self.inside_circuit:
# Local variables are stored in locals dict
name = Identifier(f'self.locals["{idf.idf.name}"]')
return self.get_loc_value(name, indices)
def visitContractDefinition(self, ast: ContractDefinition):
"""Generate a python class with methods for each function and constructor definition and nested classes for each enum definition."""
enums = self.visit_list(ast.enum_definitions, '\n\n')
constr = self.visit_list(ast.constructor_definitions, '\n\n')
fcts = self.visit_list(ast.function_definitions, '\n\n')
return f'class {self.visit(ast.idf)}(ContractSimulator):\n' + \
(f'{indent(enums)}\n\n' if enums else '') + \
f'{self.generate_constructors(ast)}' + \
(f'{indent(constr)}\n\n' if constr else '') + \
(f'{indent(fcts)}\n' if fcts else '')
def visitConstructorOrFunctionDefinition(self, ast: ConstructorOrFunctionDefinition):
with self.circuit_ctx(ast):
return super().visitConstructorOrFunctionDefinition(ast)
def visitParameter(self, ast: Parameter):
if ast.parent.is_external:
ot = ast.annotated_type.zkay_type
if ot is None:
t = 'Any'
elif ot.is_address():
t = 'str'
else:
t = self.visit(ot.type_name)
elif ast.annotated_type is None:
| |
self._success_with_angle_requirement = success_with_angle_requirement
if not additional_observation_list:
additional_observation_list = self._object_list
self._additional_observation_list = additional_observation_list
self._pos_list = list(
itertools.product(
range(-self._max_play_ground_size, self._max_play_ground_size),
range(-self._max_play_ground_size,
self._max_play_ground_size)))
self._pos_list.remove((0, 0))
self._polar_coord = polar_coord
self._use_egocentric_states = use_egocentric_states
self._egocentric_perception_range = egocentric_perception_range
if self.should_use_curriculum_training():
self._orig_random_range = random_range
self._random_range = start_range
self._max_reward_q_length = max_reward_q_length
self._q = deque(maxlen=max_reward_q_length)
self._reward_thresh_to_increase_range = reward_thresh_to_increase_range
self._increase_range_by_percent = increase_range_by_percent
self._percent_full_range_in_curriculum = percent_full_range_in_curriculum
angle_str = ""
if curriculum_target_angle:
angle_str = ", start_angle {}".format(self._random_angle)
logging.info(
"start_range %f%s, reward_thresh_to_increase_range %f",
self._start_range, angle_str,
self._reward_thresh_to_increase_range)
else:
self._random_range = random_range
self.task_vocab += self._object_list
self._env.insert_model_list(self._object_list)
def should_use_curriculum_training(self):
return (self._use_curriculum_training
and self._start_range >= self._success_distance_thresh * 1.2)
def _push_reward_queue(self, value):
if (not self.should_use_curriculum_training()
) or self._is_full_range_in_curriculum:
return
self._q.append(value)
if (value > 0 and len(self._q) == self._max_reward_q_length
and sum(self._q) >= self._max_reward_q_length *
self._reward_thresh_to_increase_range):
if self._curriculum_target_angle:
self._random_angle += 20
logging.info("Raising random_angle to %d", self._random_angle)
if (not self._curriculum_target_angle or self._random_angle > 360):
self._random_angle = 60
new_range = min((1. + self._increase_range_by_percent) *
self._random_range, self._orig_random_range)
if self._random_range < self._orig_random_range:
logging.info("Raising random_range to %f", new_range)
self._random_range = new_range
self._q.clear()
def get_random_range(self):
return self._random_range
def pick_goal(self):
if self._random_goal:
random_id = random.randrange(len(self._goals))
self.set_goal_name(self._goals[random_id])
def _get_agent_loc(self):
loc, agent_dir = self._agent.get_pose()
if self._agent.type.find('icub') != -1:
# For agent icub, we need to use the average pos here
loc = ICubAuxiliaryTask.get_icub_extra_obs(self._agent)[:3]
loc = np.array(loc)
return loc, agent_dir
def run(self):
""" Start a teaching episode for this task. """
agent_sentence = yield
self._agent.reset()
if self._random_agent_orientation:
loc, agent_dir = self._agent.get_pose()
self._agent.set_pose((loc, (agent_dir[0], agent_dir[1],
2 * math.pi * random.random())))
loc, agent_dir = self._agent.get_pose()
loc = np.array(loc)
self._random_move_objects()
self.pick_goal()
goal = self._world.get_model(self._goal_name)
self._move_goal(goal, loc, agent_dir)
steps_since_last_reward = 0
prev_min_dist_to_distraction = 100
while steps_since_last_reward < self._max_steps:
steps_since_last_reward += 1
loc, agent_dir = self._get_agent_loc()
goal_loc, _ = goal.get_pose()
goal_loc = np.array(goal_loc)
dist = np.linalg.norm(loc - goal_loc)
# dir from get_pose is (roll, pitch, yaw)
dir = np.array([math.cos(agent_dir[2]), math.sin(agent_dir[2])])
goal_dir = (goal_loc[0:2] - loc[0:2]) / dist
dot = sum(dir * goal_dir)
distraction_penalty, prev_min_dist_to_distraction = (
self._get_distraction_penalty(loc, dot,
prev_min_dist_to_distraction))
if dist < self._success_distance_thresh and (
not self._success_with_angle_requirement or dot > 0.707):
# within 45 degrees of the agent direction
reward = 1.0 - distraction_penalty
self._push_reward_queue(max(reward, 0))
logging.debug("yielding reward: " + str(reward))
agent_sentence = yield TeacherAction(
reward=reward, sentence="well done",
done=self._end_episode_after_success,
success=True)
steps_since_last_reward = 0
if self._switch_goal_within_episode:
self.pick_goal()
goal = self._world.get_agent(self._goal_name)
if self._move_goal_during_episode:
self._agent.reset()
loc, agent_dir = self._get_agent_loc()
self._move_goal(goal, loc, agent_dir)
elif dist > self._initial_dist + self._fail_distance_thresh:
reward = -1.0 - distraction_penalty
self._push_reward_queue(0)
logging.debug(
"yielding reward: {}, farther than {} from goal".format(
str(reward), str(self._fail_distance_thresh)))
yield TeacherAction(
reward=reward, sentence="failed", done=True)
else:
if self._sparse_reward:
reward = 0
else:
reward = (self._prev_dist - dist) / self._initial_dist
reward = reward - distraction_penalty
if distraction_penalty > 0:
logging.debug("yielding reward: " + str(reward))
self._push_reward_queue(0)
self._prev_dist = dist
agent_sentence = yield TeacherAction(
reward=reward, sentence=self._goal_name)
reward = -1.0
logging.debug("yielding reward: {}, took more than {} steps".format(
str(reward), str(self._max_steps)))
self._push_reward_queue(0)
if self.should_use_curriculum_training():
logging.debug("reward queue len: {}, sum: {}".format(
str(len(self._q)), str(sum(self._q))))
yield TeacherAction(reward=reward, sentence="failed", done=True)
def _get_distraction_penalty(self, agent_loc, dot,
prev_min_dist_to_distraction):
"""
Calculate penalty for hitting/getting close to distraction objects
"""
distraction_penalty = 0
if (self._distraction_penalty_distance_thresh > 0
and self._distraction_list):
curr_min_dist = 100
for obj_name in self._distraction_list:
obj = self._world.get_model(obj_name)
if not obj:
continue
obj_loc, _ = obj.get_pose()
obj_loc = np.array(obj_loc)
distraction_dist = np.linalg.norm(agent_loc - obj_loc)
if (distraction_dist >=
self._distraction_penalty_distance_thresh):
continue
if obj_name == self._goal_name and dot > 0.707:
continue # correctly getting to goal, no penalty
if distraction_dist < curr_min_dist:
curr_min_dist = distraction_dist
if (prev_min_dist_to_distraction >
self._distraction_penalty_distance_thresh):
logging.debug("hitting object: " + obj_name)
distraction_penalty += self._distraction_penalty
prev_min_dist_to_distraction = curr_min_dist
return distraction_penalty, prev_min_dist_to_distraction
def _move_goal(self, goal, agent_loc, agent_dir):
"""
Move goal as well as all distraction objects to a random location.
"""
avoid_locations = [agent_loc]
loc = self._move_obj(
obj=goal,
agent_loc=agent_loc,
agent_dir=agent_dir,
is_goal=True,
avoid_locations=avoid_locations)
avoid_locations.append(loc)
distractions = OrderedDict()
for item in self._distraction_list:
if item is not self._goal_name:
distractions[item] = 1
if len(distractions) and self._curriculum_distractions:
for item, _ in distractions.items():
distraction = self._world.get_agent(item)
loc = self._move_obj(
obj=distraction,
agent_loc=agent_loc,
agent_dir=agent_dir,
is_goal=False,
avoid_locations=avoid_locations)
avoid_locations.append(loc)
def _move_obj(self,
obj,
agent_loc,
agent_dir,
is_goal=True,
avoid_locations=[]):
if (self.should_use_curriculum_training()
and self._percent_full_range_in_curriculum > 0
and random.random() < self._percent_full_range_in_curriculum):
range = self._orig_random_range
self._is_full_range_in_curriculum = is_goal
else:
range = self._random_range
self._is_full_range_in_curriculum = False
attempts = 0
while True:
attempts += 1
dist = random.random() * range
if self._curriculum_target_angle:
angle_range = self._random_angle
else:
angle_range = 360
angle = math.radians(
math.degrees(agent_dir[2]) + random.random() * angle_range -
angle_range / 2)
loc = (dist * math.cos(angle), dist * math.sin(angle),
0) + agent_loc
if not self._polar_coord:
loc = np.asarray((random.random() * range - range / 2,
random.random() * range - range / 2, 0))
self._initial_dist = np.linalg.norm(loc - agent_loc)
satisfied = True
if (abs(loc[0]) > self._max_play_ground_size or abs(loc[1]) >
self._max_play_ground_size): # not within walls
satisfied = False
for avoid_loc in avoid_locations:
dist = np.linalg.norm(loc - avoid_loc)
if dist < self._success_distance_thresh:
satisfied = False
break
if satisfied or attempts > 10000:
if not satisfied:
logging.warning(
"Took forever to find satisfying " +
"object location. " +
"agent_loc: {}, range: {}, max_size: {}.".format(
str(agent_loc), str(range),
str(self._max_play_ground_size)))
break
self._prev_dist = self._initial_dist
obj.reset()
obj.set_pose((loc, (0, 0, 0)))
return loc
def _random_move_objects(self, random_range=10.0):
obj_num = len(self._object_list)
obj_pos_list = random.sample(self._pos_list, obj_num)
for obj_id in range(obj_num):
model_name = self._object_list[obj_id]
loc = (obj_pos_list[obj_id][0], obj_pos_list[obj_id][1], 0)
pose = (np.array(loc), (0, 0, 0))
self._world.get_model(model_name).set_pose(pose)
def get_goal_name(self):
"""
Args:
None
Returns:
Goal's name at this episode
"""
return self._goal_name
def set_goal_name(self, goal_name):
"""
Args:
Goal's name
Returns:
None
"""
logging.debug('Setting Goal to %s', goal_name)
self._goal_name = goal_name
def task_specific_observation(self, agent):
"""
Args:
agent (GazeboAgent): the agent
Returns:
np.array, the observations of the task for non-image case
"""
goal = self._world.get_model(self._goal_name)
goal_first = not agent._with_language
if goal_first: # put goal first
pose = np.array(goal.get_pose()[0]).flatten()
else: # has language input, don't put goal first
pose = None
for name in self._additional_observation_list:
if goal_first and name == self._goal_name:
continue
obj = self._world.get_model(name)
obj_pos = np.array(obj.get_pose()[0]).flatten()
if pose is None:
pose = obj_pos
else:
pose = np.concatenate((pose, obj_pos), axis=0)
agent_pose = np.array(agent.get_pose()).flatten()
if self._use_egocentric_states:
yaw = agent_pose[5]
# adds egocentric velocity input
vx, vy, vz, a1, a2, a3 = np.array(agent.get_velocities()).flatten()
rvx, rvy = agent.get_egocentric_cord_2d(vx, vy, yaw)
obs = [rvx, rvy, vz, a1, a2, a3]
# adds objects' (goal's as well as distractions') egocentric
# coordinates to observation
while len(pose) > 1:
x = pose[0] - agent_pose[0]
y = pose[1] - agent_pose[1]
rotated_x, rotated_y = agent.get_egocentric_cord_2d(x, y, yaw)
if self._egocentric_perception_range > 0:
dist = math.sqrt(rotated_x * rotated_x +
rotated_y * rotated_y)
rotated_x /= dist
rotated_y /= dist
magnitude = 1. / dist
if rotated_x < np.cos(
self._egocentric_perception_range / 180. * np.pi):
rotated_x = 0.
rotated_y = 0.
magnitude = 0.
obs.extend([rotated_x, rotated_y, magnitude])
else:
obs.extend([rotated_x, rotated_y])
pose = pose[3:]
obs = np.array(obs)
else:
agent_vel = np.array(agent.get_velocities()).flatten()
joints_states = agent.get_internal_states()
obs = np.concatenate((pose, agent_pose, agent_vel, joints_states),
axis=0)
return obs
@gin.configurable
class ICubAuxiliaryTask(Task):
"""
An auxiliary task spicified for iCub, to keep the agent from falling down
and to encourage the agent walk
"""
def __init__(self,
env,
max_steps,
target=None,
agent_init_pos=(0, 0),
agent_pos_random_range=0,
reward_weight=1.0):
"""
Args:
env (gym.Env): an instance of Environment
max_steps (int): episode will end in so many steps
reward_weight (float): the weight of the reward, should be tuned
accroding to reward range of other tasks
target (string): this is the target icub should face towards, since
you may want the agent interact with something
agent_init_pos (tuple): the expected initial position of the agent
pos_random_range (float): random range of the initial position
"""
super().__init__(
env=env, max_steps=max_steps, reward_weight=reward_weight)
self.task_vocab = ['icub']
self._target_name = target
self._pre_agent_pos = np.array([0, 0, 0], dtype=np.float32)
self._agent_init_pos = agent_init_pos
self._random_range = agent_pos_random_range
if self._target_name:
self._target = self._world.get_model(self._target_name)
with open(
os.path.join(social_bot.get_model_dir(), "agent_cfg.json"),
'r') as cfg_file:
agent_cfgs = json.load(cfg_file)
self._joints = agent_cfgs[self._agent.type]['control_joints']
def run(self):
""" Start a teaching episode for this task. """
self._pre_agent_pos = self.get_icub_extra_obs(self._agent)[:3]
agent_sentence = yield
done = False
# set icub random initial pose
x = self._agent_init_pos[0] + random.random() * self._random_range
y = self._agent_init_pos[1] + random.random() * self._random_range
orient = (random.random() - 0.5) * np.pi
if self._target_name and random.randint(0, 1) == 0:
# a trick from roboschool humanoid flag run, important to learn | |
<filename>libc/tools/gensyscalls.py
#!/usr/bin/python
#
# this tool is used to generate the syscall assmbler templates
# to be placed into arch-x86/syscalls, as well as the content
# of arch-x86/linux/_syscalls.h
#
import sys, os.path, glob, re, commands, filecmp, shutil
from bionic_utils import *
if sys.version_info.major != 2:
print "error: please use Python 2 with this script. Your version is"
print "%s" % (sys.version)
sys.exit(1)
# set this to 1 if you want to generate thumb stubs
gen_thumb_stubs = 0
# set this to 1 if you want to generate ARM EABI stubs
gen_eabi_stubs = 1
# get the root Bionic directory, simply this script's dirname
#
bionic_root = find_bionic_root()
if not bionic_root:
print "could not find the Bionic root directory. aborting"
sys.exit(1)
if bionic_root[-1] != '/':
bionic_root += "/"
#print "bionic_root is %s" % bionic_root
#print "syscalls.stamp is '%s'" % syscalls_stamp
# temp directory where we store all intermediate files
bionic_temp = ""
# all architectures, update as you see fit
all_archs = [ "arm", "x86" ]
def make_dir( path ):
if not os.path.exists(path):
parent = os.path.dirname(path)
if parent:
make_dir(parent)
os.mkdir(path)
def create_file( relpath ):
abspath = bionic_temp + "/" + relpath
dir = os.path.dirname( abspath )
make_dir(dir)
return open( abspath, "w" )
# x86 assembler templates for each syscall stub
#
x86_header = """/* autogenerated by gensyscalls.py */
#include <linux/err.h>
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(%(fname)s)
"""
x86_registers = [ "%ebx", "%ecx", "%edx", "%esi", "%edi", "%ebp" ]
x86_call = """ movl $%(idname)s, %%eax
int $0x80
cmpl $-MAX_ERRNO, %%eax
jb 1f
negl %%eax
pushl %%eax
call __set_errno
addl $4, %%esp
orl $-1, %%eax
1:
"""
x86_return = """ ret
END(%(fname)s)
"""
# ARM assembler templates for each syscall stub
#
arm_header = """/* autogenerated by gensyscalls.py */
#include <machine/asm.h>
#include <sys/linux-syscalls.h>
ENTRY(%(fname)s)
"""
arm_footer = """\
END(%(fname)s)
"""
arm_call_default = arm_header + """\
swi #%(idname)s
movs r0, r0
bxpl lr
b __set_syscall_errno
""" + arm_footer
arm_call_long = arm_header + """\
.save {r4, r5, lr}
stmfd sp!, {r4, r5, lr}
ldr r4, [sp, #12]
ldr r5, [sp, #16]
swi # %(idname)s
ldmfd sp!, {r4, r5, lr}
movs r0, r0
bxpl lr
b __set_syscall_errno
""" + arm_footer
arm_eabi_call_default = arm_header + """\
.save {r4, r7}
stmfd sp!, {r4, r7}
ldr r7, =%(idname)s
swi #0
ldmfd sp!, {r4, r7}
movs r0, r0
bxpl lr
b __set_syscall_errno
""" + arm_footer
arm_eabi_call_long = arm_header + """\
mov ip, sp
.save {r4, r5, r6, r7}
stmfd sp!, {r4, r5, r6, r7}
ldmfd ip, {r4, r5, r6}
ldr r7, =%(idname)s
swi #0
ldmfd sp!, {r4, r5, r6, r7}
movs r0, r0
bxpl lr
b __set_syscall_errno
""" + arm_footer
# ARM thumb assembler templates for each syscall stub
#
thumb_header = """/* autogenerated by gensyscalls.py */
.text
.type %(fname)s, #function
.globl %(fname)s
.align 4
.thumb_func
.fnstart
.syntax unified
#ifndef __thumb__
#define __thumb__
#endif
#include <sys/linux-syscalls.h>
%(fname)s:
"""
thumb_call_default = thumb_header + """\
.save {r7,lr}
push {r7,lr}
ldr r7, =%(idname)s
swi #0
tst r0, r0
bmi 1f
pop {r7,pc}
1:
rsbs r0, r0, #0
ldr r1, =__set_errno
blx r1
pop {r7,pc}
.fnend
"""
thumb_call_long = thumb_header + """\
.save {r4,r5,r7,lr}
push {r4,r5,r7,lr}
ldr r4, [sp,#16]
ldr r5, [sp,#20]
ldr r7, =%(idname)s
swi #0
tst r0, r0
bmi 1f
pop {r4,r5,r7,pc}
1:
rsbs r0, r0, #0
ldr r1, =__set_errno
blx r1
pop {r4,r5,r7,pc}
.fnend
"""
def param_uses_64bits(param):
"""Returns True iff a syscall parameter description corresponds
to a 64-bit type."""
param = param.strip()
# First, check that the param type begins with one of the known
# 64-bit types.
if not ( \
param.startswith("int64_t") or param.startswith("uint64_t") or \
param.startswith("loff_t") or param.startswith("off64_t") or \
param.startswith("long long") or param.startswith("unsigned long long") or
param.startswith("signed long long") ):
return False
# Second, check that there is no pointer type here
if param.find("*") >= 0:
return False
# Ok
return True
def count_arm_param_registers(params):
"""This function is used to count the number of register used
to pass parameters when invoking a thumb or ARM system call.
This is because the ARM EABI mandates that 64-bit quantities
must be passed in an even+odd register pair. So, for example,
something like:
foo(int fd, off64_t pos)
would actually need 4 registers:
r0 -> int
r1 -> unused
r2-r3 -> pos
"""
count = 0
for param in params:
if param_uses_64bits(param):
if (count & 1) != 0:
count += 1
count += 2
else:
count += 1
return count
def count_generic_param_registers(params):
count = 0
for param in params:
if param_uses_64bits(param):
count += 2
else:
count += 1
return count
class State:
def __init__(self):
self.old_stubs = []
self.new_stubs = []
self.other_files = []
self.syscalls = []
def x86_genstub(self, fname, numparams, idname):
t = { "fname" : fname,
"idname" : idname }
result = x86_header % t
stack_bias = 4
for r in range(numparams):
result += " pushl " + x86_registers[r] + "\n"
stack_bias += 4
for r in range(numparams):
result += " mov %d(%%esp), %s" % (stack_bias+r*4, x86_registers[r]) + "\n"
result += x86_call % t
for r in range(numparams):
result += " popl " + x86_registers[numparams-r-1] + "\n"
result += x86_return % t
return result
def x86_genstub_cid(self, fname, numparams, idname, cid):
# We'll ignore numparams here because in reality, if there is a
# dispatch call (like a socketcall syscall) there are actually
# only 2 arguments to the syscall and 2 regs we have to save:
# %ebx <--- Argument 1 - The call id of the needed vectored
# syscall (socket, bind, recv, etc)
# %ecx <--- Argument 2 - Pointer to the rest of the arguments
# from the original function called (socket())
t = { "fname" : fname,
"idname" : idname }
result = x86_header % t
stack_bias = 4
# save the regs we need
result += " pushl %ebx" + "\n"
stack_bias += 4
result += " pushl %ecx" + "\n"
stack_bias += 4
# set the call id (%ebx)
result += " mov $%d, %%ebx" % (cid) + "\n"
# set the pointer to the rest of the args into %ecx
result += " mov %esp, %ecx" + "\n"
result += " addl $%d, %%ecx" % (stack_bias) + "\n"
# now do the syscall code itself
result += x86_call % t
# now restore the saved regs
result += " popl %ecx" + "\n"
result += " popl %ebx" + "\n"
# epilog
result += x86_return % t
return result
def arm_genstub(self,fname, flags, idname):
t = { "fname" : fname,
"idname" : idname }
if flags:
numargs = int(flags)
if numargs > 4:
return arm_call_long % t
return arm_call_default % t
def arm_eabi_genstub(self,fname, flags, idname):
t = { "fname" : fname,
"idname" : idname }
if flags:
numargs = int(flags)
if numargs > 4:
return arm_eabi_call_long % t
return arm_eabi_call_default % t
def thumb_genstub(self,fname, flags, idname):
t = { "fname" : fname,
"idname" : idname }
if flags:
numargs = int(flags)
if numargs > 4:
return thumb_call_long % t
return thumb_call_default % t
def superh_genstub(self, fname, flags, idname):
numargs = int(flags)
t = { "fname" : fname,
"idname" : idname,
"numargs" : numargs }
superh_call = superh_header
if flags:
if numargs == 5:
superh_call += superh_5args_header
if numargs == 6:
superh_call += superh_6args_header
if numargs == 7:
superh_call += superh_7args_header
superh_call += superh_call_default
return superh_call % t
def process_file(self,input):
parser = SysCallsTxtParser()
parser.parse_file(input)
self.syscalls = parser.syscalls
parser = None
for t in self.syscalls:
syscall_func = t["func"]
syscall_params = t["params"]
syscall_name = t["name"]
if t["id"] >= 0:
num_regs = count_arm_param_registers(syscall_params)
if gen_thumb_stubs:
t["asm-thumb"] = self.thumb_genstub(syscall_func,num_regs,"__NR_"+syscall_name)
else:
if gen_eabi_stubs:
t["asm-arm"] = self.arm_eabi_genstub(syscall_func,num_regs,"__NR_"+syscall_name)
else:
t["asm-arm"] = self.arm_genstub(syscall_func,num_regs,"__NR_"+syscall_name)
if t["id2"] >= 0:
num_regs = count_generic_param_registers(syscall_params)
if t["cid"] >= 0:
t["asm-x86"] = self.x86_genstub_cid(syscall_func, num_regs, "__NR_"+syscall_name, t["cid"])
else:
t["asm-x86"] = self.x86_genstub(syscall_func, num_regs, "__NR_"+syscall_name)
elif t["cid"] >= 0:
E("cid for dispatch syscalls is only supported for x86 in "
"'%s'" % syscall_name)
return
def gen_NR_syscall(self,fp,name,id):
fp.write( "#define __NR_%-25s (__NR_SYSCALL_BASE + %d)\n" % (name,id) )
# now dump the content of linux/_syscalls.h
def gen_linux_syscalls_h(self,odir):
path = "libc/include/sys/linux-syscalls.h"
D( "generating "+path )
fp = create_file( path )
fp.write( "/* auto-generated by gensyscalls.py, do not touch */\n" )
fp.write( "#ifndef _BIONIC_LINUX_SYSCALLS_H_\n\n" )
fp.write( "#if !defined __ASM_ARM_UNISTD_H && !defined __ASM_I386_UNISTD_H\n" )
fp.write( "#if defined __arm__ && !defined __ARM_EABI__ && !defined __thumb__\n" )
fp.write( " # define __NR_SYSCALL_BASE 0x900000\n" )
fp.write( " #else\n" )
fp.write( " # define | |
r"""Implementation of games in extensive form.
The most important class of the module is ``ExtensiveFormGame'', which
provides support for n-player extensive form games, including chance moves.
It also provides support for a graphical representation of the game tree and
implementation for the backward induction algorithm, which is used to compute
subgame-perfect equilibrium strategies and equilibrium paths that are expected
to be played by perfectly rational agents.
References
----------
<NAME>, <NAME>, <NAME>, An Introductory
Course on Mathematical Game Theory, American Mathematical Society and Real
Sociedad Matemática Española, 2010. https://doi.org/10.1016/j.geb.2010.12.006.
"""
import networkx as nx
from networkx.algorithms.simple_paths import all_simple_edge_paths
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import pandas as pd
import random
from itertools import combinations
from copy import deepcopy
from typing import Any, Dict, List, Set, Tuple
class ExtensiveFormGame:
r"""Implementation of a game in extensive form.
The game is initialized 'empty', meaning with minimal attribute
assignments. Attributes are then set through the various methods. The
extensive form game is modelled as described in the reference, see the
chapter on extensive games.
Parameters
----------
**kwargs
Additional keyword arguments.
Attributes
----------
game_tree : networkx.DiGraph
Game tree, directed graph. Other than the methods and attributes of the
class, two additional attributes are set:
* root : Any
The root node, initialized to None.
* terminal_nodes : List[Any]
The list of terminal nodes, initialized to an empty list.
The game tree is initialized as empty.
information_partition : Dict[Any, List[Set[Any]]]
For every player (key), it maps it to the list of the information
sets (values).
is_perfect_informtion : bool, `True`
The game is initialized as being of perfect information.
players : List[Any]
List of players in the game. It is initialized empty.
probability : Dict[Any, Dict[Tuple[Any, Any], float]]
Probability distributions over the outgoing edges at every node where
chance takes an action. The keys are the nodes where chance acts. The
values are dictionaries mapping every outgoing edge from that node to
its probability.
turn_function : Dict[Any, Any]
Function that maps every non-terminal node to the player whose turn it
is to take an action at the node.
utility : Dict[Any, Dict[Any, float]]
For every terminal node, it maps the utility that the various players
(excluding chance) assign to it.
See Also
--------
networkx.DiGraph
"""
def __init__(self, **kwargs) -> None:
# players
self.players = []
# game tree
self.game_tree = nx.DiGraph()
self.game_tree.root = None
self.game_tree.terminal_nodes = []
# turn function
self.turn_function = {}
# information partition
self.information_partition = {}
self.is_perfect_information = True
# probability distribution over chance edges
self.probability = {}
# utility function
self.utility = {}
# additional info
for k, v in kwargs.items():
setattr(self, k, v)
def __check_player_in_game(self, player_id: Any) -> None:
r"""Check that the given player is actually in the game.
Parameters
----------
player_id : Any
Raises
------
ValueError
If the player is not in the game.
"""
if player_id not in self.players:
raise ValueError("player {} not in game".format(player_id))
def __check_nonterminal_node(self, node_id: Any) -> None:
r"""Check that a node is in the game tree.
Parameters
----------
node_id : Any
Raises
------
ValueError
If the node is not in the game tree.
"""
if node_id not in self.get_nonterminal_nodes():
raise ValueError("node {} is a terminal node".format(node_id))
def __check_terminal_node(self, node_id: Any) -> None:
r"""Check that a node is terminal.
Parameters
----------
node_id : Any
Raises
------
ValueError
If the node is not terminal.
"""
if node_id not in self.game_tree.terminal_nodes:
raise ValueError("node {} is not a terminal node".format(node_id))
def add_players(self, *players_id: Any) -> None:
r"""Add a lists of players to the game, encoded in any data structure.
Parameters
----------
players_id : List[Any]
Players to be added to the game. Exclude 'chance'.
Raises
------
ValueError
If 'chance' is among the players to be added.
"""
for p in players_id:
if p == 'chance':
raise ValueError("player 'chance' should not added to the \
game")
if p not in self.players:
self.players.append(p)
self.information_partition[p] = []
def add_node(self, node_id: Any, player_turn: Any = None,
is_root: bool = False) -> None:
r"""Add a node the game tree.
If the node is non-terminal and it is not a chance node, perfect
information is assumed. A set containing the single node is added to
the information partition of the player playing at the node.
Also, if the node is non-terminal (regardless of whether it is a
chance node or not), it is added to `turn_function` and its player is
assigned.
Parameters
----------
node_id : Any
Node to be added.
player_turn : Any, optional
Whose player has the turn at the node. If None is given, it is
assumed that the node is terminal. The default is None.
is_root : bool, optional
Whether the node is the root of the game tree. The default is False.
"""
self.game_tree.add_node(node_id)
# if player turn given
if player_turn:
self.turn_function[node_id] = player_turn
# add player to game if not already there
if player_turn not in self.players and player_turn != 'chance':
self.players.append(player_turn)
# if not a chance node, assume perfect information
if player_turn != 'chance':
self.__check_player_in_game(player_turn)
self.information_partition[player_turn].append({node_id})
# if player turn not given, it is a terminal node
else:
self.game_tree.terminal_nodes.append(node_id)
# assign as root if indicated
if is_root:
self.game_tree.root = node_id
def set_node_player(self, node_id: Any, player_turn: Any) -> None:
r"""Set the player at a node after it has been added to the game tree.
If the node had been designated as a terminal, remove it from that
list.
Parameters
----------
node_id : Any
The node whose player changes.
player_turn : Any
The new player that takes turn at the node.
"""
self.turn_function[node_id] = player_turn
# add player to game if not already there
if player_turn not in self.players and player_turn != 'chance':
self.players.append(player_turn)
# delete node from terminal nodes if there
if node_id in self.game_tree.terminal_nodes:
self.game_tree.terminal_nodes.remove(node_id)
def add_edge(self, from_node: Any, to_node: Any, label: Any) -> None:
r"""Add an edge to the game tree between two nodes.
Parameters
----------
from_node : Any
Origin node of the edge.
to_node : Any
Destination node of the edge.
label : Any
The edge label corresponsing to the action being take.
"""
self.game_tree.add_edge(from_node, to_node, action=label)
def get_nonterminal_nodes(self) -> List[Any]:
r"""Obtain the list of non-terminal nodes in the game tree.
Returns
-------
List[Any]
List of non-terminal nodes.
"""
nonterminal_nodes = []
for n in self.game_tree.nodes:
if n not in self.game_tree.terminal_nodes:
nonterminal_nodes.append(n)
return nonterminal_nodes
def get_theta_partition(self) -> Dict[Any, Set[Any]]:
r"""Get the turns partition.
The turns partition (or :math:`\Theta` partition) splits the
non-terminal nodes into disjunct sets, according to whose turn it is
to play at the node (including the 'chance' player).
Returns
-------
Dict[Any, Set[Any]]
For every player in the game, including 'chance', the set of nodes
where it is that player's turn to play.
"""
# initialize partitions to empty set
theta_partition = {}
for p in self.players:
theta_partition[p] = set()
theta_partition['chance'] = set()
# add nodes to their corresponding partition
for n in self.get_nonterminal_nodes():
node_turn = self.turn_function[n]
theta_partition[node_turn].add(n)
return theta_partition
def get_player_utility(self, player_id: Any) -> Dict[Any, float]:
r"""Return the utility function for the given player.
Parameters
----------
player_id : Any
Returns
-------
Dict[Any, float]
A map from every terminal node to the utility assigned to it by
the given player.
"""
self.__check_player_in_game(player_id)
utility_i = {}
for n in self.game_tree.terminal_nodes:
utility_i[n] = self.utility[n][player_id]
return utility_i
def get_available_actions(self, node: Any) -> Set[Any]:
r"""Get what actions are available at the given node.
Parameters
----------
node : Any
Returns
-------
Set[Any]
Set of available actions according to the game tree.
"""
actions = set()
for e in self.game_tree.out_edges(node):
a = self.game_tree.get_edge_data(*e)['action']
actions.add(a)
return actions
def get_choice_set(self, player_id: Any, information_set: Set[Any]) \
-> Set[Any]:
r"""Get the choice set for some player at some information set.
Parameters
----------
player_id : Any
information_set : Set[Any]
The information set for which the choice set is to be retrieved.
Returns
-------
List[Tuple[Any]]
List of edges outgoing from every node in the information set.
"""
self.__check_player_in_game(player_id)
assert information_set in self.information_partition[player_id], \
"information set {} does not belong to player {}'s information \
partition".format(information_set, player_id)
choice_set | |
2*m.b150*m.b971 + 2*m.b151*m.b153
- 2*m.b151 + 2*m.b151*m.b265 - 2*m.b265 - 2*m.b151*m.b566 + 4*m.b566 + 2*m.b151*m.b716 + 2*
m.b152*m.b154 + 2*m.b154 + 2*m.b152*m.b933 - 2*m.b152*m.b951 + 2*m.b153*m.b910 + 2*m.b153*m.b951
- 2*m.b154*m.b466 + 2*m.b466 - 2*m.b154*m.b648 - 2*m.b154*m.b788 + 2*m.b155*m.b156 - 2*m.b156 +
2*m.b156*m.b208 + 2*m.b156*m.b308 - 4*m.b308 - 2*m.b156*m.b630 - 2*m.b630 + 2*m.b157*m.b778 + 2*
m.b157*m.b843 + 2*m.b158*m.b235 - 2*m.b158 + 2*m.b158*m.b475 + 2*m.b475 + 2*m.b158*m.b634 - 4*
m.b634 - 2*m.b158*m.b912 + 2*m.b159*m.b160 - 2*m.b160 + 2*m.b159*m.b273 - 2*m.b273 + 2*m.b160*
m.b209 - 4*m.b209 + 2*m.b160*m.b314 - 2*m.b314 - 2*m.b160*m.b844 + 2*m.b161*m.b162 - 2*m.b162 + 2
*m.b161*m.b479 - 4*m.b479 + 2*m.b162*m.b314 + 2*m.b163*m.b165 - 2*m.b163 - 4*m.b165 + 2*m.b163*
m.b719 + 2*m.b164*m.b585 + 2*m.b585 + 2*m.b164*m.b913 - 2*m.b164*m.b915 + 2*m.b165*m.b531 - 2*
m.b531 + 2*m.b165*m.b695 + 2*m.b165*m.b915 + 2*m.b166*m.b167 - 2*m.b167 - 2*m.b166*m.b917 + 2*
m.b167*m.b169 + 2*m.b167*m.b362 - 4*m.b362 - 2*m.b167*m.b401 - 2*m.b168*m.b218 - 2*m.b218 - 2*
m.b168*m.b722 + 2*m.b168*m.b904 + 2*m.b169*m.b218 - 2*m.b169*m.b249 + 2*m.b249 + 2*m.b170*m.b172
- 2*m.b172 - 2*m.b170*m.b292 + 2*m.b292 + 2*m.b171*m.b290 + 2*m.b171*m.b330 - 2*m.b330 - 2*
m.b171*m.b833 - 2*m.b172*m.b662 + 2*m.b172*m.b833 + 2*m.b172*m.b969 - 2*m.b173*m.b405 + 4*m.b173
- 2*m.b405 - 2*m.b173*m.b735 - 2*m.b173*m.b737 - 2*m.b173*m.b890 + 2*m.b174*m.b724 - 4*m.b174 +
2*m.b174*m.b808 + 2*m.b174*m.b890 + 2*m.b174*m.b959 + 2*m.b175*m.b202 - 4*m.b175 - 4*m.b202 + 2*
m.b175*m.b332 - 4*m.b332 + 2*m.b175*m.b610 - 2*m.b610 + 2*m.b175*m.b796 + 2*m.b176*m.b177 - 4*
m.b176 + 2*m.b176*m.b506 - 4*m.b506 + 2*m.b176*m.b712 + 2*m.b176*m.b825 + 2*m.b177*m.b335 - 2*
m.b335 - 2*m.b177*m.b977 + 2*m.b178*m.b299 - 4*m.b178 - 2*m.b299 + 2*m.b178*m.b335 + 2*m.b178*
m.b458 + 2*m.b458 + 2*m.b178*m.b699 - 2*m.b179*m.b180 + 2*m.b180 + 2*m.b179*m.b727 + 2*m.b179*
m.b775 + 2*m.b180*m.b423 + 2*m.b423 - 2*m.b180*m.b626 - 2*m.b180*m.b979 + 2*m.b181*m.b182 - 2*
m.b182 + 2*m.b182*m.b234 + 2*m.b182*m.b346 - 4*m.b346 - 2*m.b182*m.b572 - 2*m.b572 + 2*m.b183*
m.b766 + 2*m.b183*m.b829 + 2*m.b184*m.b185 - 4*m.b185 + 2*m.b184*m.b311 - 2*m.b311 + 2*m.b184*
m.b900 + 2*m.b185*m.b240 - 4*m.b240 + 2*m.b185*m.b351 - 2*m.b351 + 2*m.b185*m.b844 + 2*m.b186*
m.b187 - 2*m.b187 + 2*m.b186*m.b528 - 4*m.b528 + 2*m.b187*m.b351 + 2*m.b188*m.b681 - 2*m.b188 + 2
*m.b188*m.b805 + 2*m.b188*m.b902 - 2*m.b188*m.b916 + 2*m.b189*m.b279 - 2*m.b189 + 2*m.b189*m.b437
- 2*m.b437 - 2*m.b189*m.b831 + 2*m.b189*m.b916 + 2*m.b190*m.b191 - 2*m.b191 + 2*m.b190*m.b929 +
2*m.b191*m.b193 + 2*m.b191*m.b398 - 4*m.b398 - 2*m.b191*m.b444 - 2*m.b192*m.b253 - 2*m.b253 - 2*
m.b192*m.b733 + 2*m.b192*m.b917 - 2*m.b193*m.b215 + 2*m.b215 + 2*m.b193*m.b253 + 2*m.b194*m.b196
- 2*m.b196 - 2*m.b194*m.b256 + 2*m.b256 + 2*m.b195*m.b196 - 2*m.b195 + 2*m.b195*m.b253 - 2*
m.b195*m.b686 + 2*m.b195*m.b968 + 2*m.b196*m.b197 - 2*m.b197 - 2*m.b196*m.b656 - 2*m.b197*m.b198
- 2*m.b198 + 2*m.b197*m.b849 + 2*m.b197*m.b950 + 2*m.b198*m.b200 - 4*m.b200 + 2*m.b198*m.b670 +
2*m.b198*m.b821 - 2*m.b199*m.b446 - 2*m.b446 - 2*m.b199*m.b750 - 2*m.b199*m.b884 + 2*m.b200*
m.b737 + 2*m.b200*m.b884 + 2*m.b200*m.b950 + 2*m.b201*m.b228 - 4*m.b201 - 4*m.b228 + 2*m.b201*
m.b369 - 4*m.b369 + 2*m.b201*m.b642 + 2*m.b201*m.b783 + 2*m.b202*m.b451 + 2*m.b202*m.b759 + 2*
m.b202*m.b876 + 2*m.b203*m.b204 - 4*m.b203 + 2*m.b203*m.b557 - 4*m.b557 + 2*m.b203*m.b700 + 2*
m.b203*m.b812 + 2*m.b204*m.b372 - 2*m.b372 - 2*m.b204*m.b985 - 2*m.b205*m.b764 + 2*m.b205 + 2*
m.b205*m.b790 - 2*m.b205*m.b842 - 2*m.b205*m.b987 + 2*m.b206*m.b207 - 2*m.b207 - 2*m.b206*m.b869
+ 2*m.b207*m.b269 + 2*m.b207*m.b384 - 4*m.b384 - 2*m.b207*m.b521 - 2*m.b521 + 2*m.b208*m.b270 -
2*m.b270 + 2*m.b208*m.b817 + 2*m.b209*m.b210 - 4*m.b210 + 2*m.b209*m.b349 - 2*m.b349 + 2*m.b209*
m.b912 + 2*m.b210*m.b274 - 4*m.b274 + 2*m.b210*m.b389 - 2*m.b389 + 2*m.b210*m.b857 + 2*m.b211*
m.b212 - 2*m.b212 + 2*m.b211*m.b583 - 4*m.b583 + 2*m.b212*m.b389 + 2*m.b213*m.b486 - 2*m.b213 - 2
*m.b486 - 2*m.b213*m.b819 + 2*m.b213*m.b928 + 2*m.b213*m.b990 + 2*m.b214*m.b216 - 2*m.b216 + 2*
m.b214*m.b917 + 2*m.b214*m.b939 + 2*m.b215*m.b217 - 2*m.b215*m.b595 - 2*m.b595 - 2*m.b215*m.b661
+ 2*m.b216*m.b217 + 2*m.b216*m.b442 - 4*m.b442 - 2*m.b216*m.b862 + 2*m.b217*m.b289 - 2*m.b289 +
2*m.b218*m.b220 - 2*m.b220 + 2*m.b218*m.b969 - 2*m.b219*m.b221 + 2*m.b221 + 2*m.b219*m.b222 - 4*
m.b222 + 2*m.b220*m.b222 + 2*m.b220*m.b289 - 2*m.b220*m.b600 + 2*m.b221*m.b223 - 2*m.b223 - 2*
m.b221*m.b599 - 2*m.b599 - 2*m.b221*m.b958 + 2*m.b222*m.b223 + 2*m.b222*m.b656 - 2*m.b223*m.b808
+ 2*m.b223*m.b941 + 2*m.b224*m.b225 - 2*m.b224 - 4*m.b225 + 2*m.b224*m.b685 - 2*m.b224*m.b833 +
2*m.b224*m.b834 + 2*m.b225*m.b750 + 2*m.b225*m.b874 + 2*m.b225*m.b941 - 2*m.b226*m.b609 + 2*
m.b609 + 2*m.b226*m.b687 + 2*m.b226*m.b884 + 2*m.b227*m.b609 + 2*m.b227*m.b698 + 2*m.b227*m.b874
+ 2*m.b228*m.b770 + 2*m.b228*m.b853 + 2*m.b228*m.b865 + 2*m.b229*m.b230 + 2*m.b229*m.b612 - 4*
m.b612 + 2*m.b230*m.b412 - 2*m.b412 - 2*m.b230*m.b453 + 2*m.b453 - 2*m.b231*m.b755 + 2*m.b231 + 2
*m.b231*m.b801 - 2*m.b231*m.b855 - 2*m.b231*m.b995 + 2*m.b232*m.b233 - 2*m.b233 + 2*m.b232*m.b573
+ 2*m.b233*m.b308 + 2*m.b233*m.b428 - 4*m.b428 - 2*m.b233*m.b470 - 2*m.b470 + 2*m.b234*m.b236 -
4*m.b236 + 2*m.b234*m.b803 + 2*m.b235*m.b238 - 4*m.b238 + 2*m.b235*m.b576 - 4*m.b576 + 2*m.b236*
m.b238 + 2*m.b236*m.b428 + 2*m.b236*m.b777 - 2*m.b237*m.b239 + 2*m.b237 + 2*m.b237*m.b474 - 4*
m.b474 - 2*m.b237*m.b650 - 2*m.b237*m.b856 + 2*m.b238*m.b239 + 2*m.b238*m.b650 + 2*m.b239*m.b312
- 4*m.b312 + 2*m.b240*m.b241 + 2*m.b240*m.b387 - 2*m.b387 + 2*m.b240*m.b924 + 2*m.b241*m.b312 +
2*m.b241*m.b434 - 4*m.b434 + 2*m.b242*m.b243 - 2*m.b243 + 2*m.b242*m.b639 - 4*m.b639 + 2*m.b243*
m.b434 + 2*m.b244*m.b356 - 2*m.b244 - 2*m.b356 + 2*m.b244*m.b533 - 2*m.b533 - 2*m.b244*m.b806 + 2
*m.b244*m.b938 + 2*m.b245*m.b731 - 2*m.b245*m.b883 + 2*m.b246*m.b320 + 2*m.b246 - 4*m.b320 - 2*
m.b246*m.b745 - 2*m.b246*m.b781 - 2*m.b246*m.b975 + 2*m.b247*m.b320 + 2*m.b247*m.b683 + 2*m.b247*
m.b883 + 2*m.b248*m.b250 - 4*m.b248 - 2*m.b250 + 2*m.b248*m.b904 + 2*m.b248*m.b929 + 2*m.b248*
m.b947 + 2*m.b249*m.b252 - 2*m.b249*m.b542 - 2*m.b542 - 2*m.b249*m.b655 + 2*m.b250*m.b252 + 2*
m.b250*m.b492 - 4*m.b492 - 2*m.b250*m.b709 - 2*m.b251*m.b325 - 2*m.b325 + 2*m.b251*m.b544 - 4*
m.b544 - 2*m.b251*m.b917 + 2*m.b252*m.b325 + 2*m.b253*m.b255 - 2*m.b255 + 2*m.b254*m.b257 - 4*
m.b257 - 2*m.b254*m.b849 + 2*m.b255*m.b257 + 2*m.b255*m.b325 - 2*m.b255*m.b546 - 2*m.b546 + 2*
m.b256*m.b258 - 2*m.b258 - 2*m.b256*m.b545 - 2*m.b545 - 2*m.b256*m.b949 + 2*m.b257*m.b258 + 2*
m.b257*m.b662 + 2*m.b258*m.b603 - 2*m.b258*m.b794 + 2*m.b259*m.b260 + 2*m.b259 - 2*m.b259*m.b550
- 2*m.b259*m.b822 - 2*m.b259*m.b997 + 2*m.b260*m.b554 + 2*m.b554 + 2*m.b260*m.b710 + 2*m.b261*
m.b782 - 2*m.b261 + 2*m.b261*m.b852 + 2*m.b261*m.b866 - 2*m.b261*m.b897 + 2*m.b262*m.b263 - 2*
m.b262*m.b641 + 2*m.b263*m.b455 - 2*m.b455 - 2*m.b263*m.b507 + 2*m.b507 + 2*m.b264*m.b337 - 2*
m.b264 + 2*m.b337 + 2*m.b264*m.b455 - 2*m.b264*m.b616 + 2*m.b616 + 2*m.b264*m.b961 + 2*m.b265*
m.b338 - 2*m.b338 - 2*m.b265*m.b463 + 2*m.b463 + 2*m.b265*m.b787 - 2*m.b266*m.b742 + 2*m.b266 + 2
*m.b266*m.b815 - 2*m.b266*m.b867 - 2*m.b266*m.b1000 + 2*m.b267*m.b268 - 2*m.b268 + 2*m.b267*
m.b869 + 2*m.b268*m.b346 - 2*m.b268*m.b426 - 2*m.b426 + 2*m.b268*m.b472 - 4*m.b472 + 2*m.b269*
m.b271 - 4*m.b271 + 2*m.b269*m.b791 + 2*m.b270*m.b384 + 2*m.b270*m.b765 - 2*m.b270*m.b988 + 2*
m.b271*m.b431 + 2*m.b431 + 2*m.b271*m.b472 + 2*m.b271*m.b988 - 2*m.b272*m.b273 + 2*m.b272 + 2*
m.b272*m.b430 - 4*m.b430 - 2*m.b272*m.b579 - 2*m.b272*m.b843 + 2*m.b273*m.b350 - 4*m.b350 + 2*
m.b273*m.b988 + 2*m.b274*m.b275 + 2*m.b274*m.b432 - 2*m.b432 + 2*m.b274*m.b935 + 2*m.b275*m.b350
+ 2*m.b275*m.b480 - 4*m.b480 + 2*m.b276*m.b277 - 2*m.b277 - 2*m.b276*m.b678 + 2*m.b277*m.b480 +
2*m.b278*m.b393 - 2*m.b278 - 2*m.b393 + 2*m.b278*m.b586 - 4*m.b586 - 2*m.b278*m.b792 + 2*m.b278*
m.b945 + 2*m.b279*m.b654 - 2*m.b279*m.b982 + 2*m.b280*m.b720 + 2*m.b280*m.b831 - 2*m.b280*m.b873
- 2*m.b281*m.b651 + 4*m.b281 - 2*m.b281*m.b747 - 2*m.b281*m.b991 - 2*m.b281*m.b992 + 2*m.b282*
m.b361 + 2*m.b282 - 4*m.b361 - 2*m.b282*m.b731 - 2*m.b282*m.b793 - 2*m.b282*m.b966 + 2*m.b283*
m.b442 - 2*m.b283 + 2*m.b283*m.b541 - 4*m.b541 + 2*m.b283*m.b769 - 2*m.b283*m.b992 + 2*m.b284*
m.b286 - 4*m.b284 - 4*m.b286 + 2*m.b284*m.b893 + 2*m.b284*m.b939 + 2*m.b284*m.b956 + 2*m.b285*
m.b288 - 2*m.b285*m.b493 - 2*m.b493 - 2*m.b285*m.b652 + 2*m.b286*m.b288 + 2*m.b286*m.b541 + 2*
m.b286*m.b862 - 2*m.b287*m.b367 - 2*m.b367 + 2*m.b287*m.b543 - 2*m.b543 - 2*m.b287*m.b904 + 2*
m.b288*m.b367 + 2*m.b289*m.b291 - 2*m.b291 - 2*m.b289*m.b748 + 2*m.b290*m.b293 - 4*m.b293 - 2*
m.b290*m.b598 - 2*m.b598 + 2*m.b291*m.b293 + 2*m.b291*m.b367 - 2*m.b291*m.b930 + 2*m.b292*m.b294
- 2*m.b292*m.b495 - 2*m.b495 - 2*m.b292*m.b940 + 2*m.b293*m.b294 + 2*m.b293*m.b670 + 2*m.b294*
m.b548 + 2*m.b295*m.b296 + 2*m.b295 - 2*m.b295*m.b499 - 2*m.b295*m.b835 - 2*m.b295*m.b1002 + 2*
m.b296*m.b503 + 2*m.b503 + 2*m.b296*m.b724 + 2*m.b297*m.b795 - 2*m.b297 + 2*m.b297*m.b837 + 2*
m.b297*m.b877 - 2*m.b297*m.b908 + 2*m.b298*m.b813 - 2*m.b298 - 2*m.b298*m.b898 + 2*m.b298*m.b971
+ 2*m.b298*m.b1004 + 2*m.b299*m.b300 - 2*m.b299*m.b373 - 2*m.b373 + 2*m.b299*m.b417 - 2*m.b417
+ 2*m.b300*m.b302 - 2*m.b302 + 2*m.b300*m.b813 + 2*m.b301*m.b375 - 2*m.b375 - 2*m.b301*m.b420 +
2*m.b420 + 2*m.b301*m.b774 + 2*m.b302*m.b420 + 2*m.b302*m.b619 - 2*m.b619 - 2*m.b302*m.b933 - 2*
m.b303*m.b304 + 2*m.b303 + 2*m.b304 - 2*m.b303*m.b622 + 2*m.b622 - 2*m.b303*m.b691 | |
the PSU. <br><br>See [PSU identifiers](https://docs.yapily.com/knowledge/psu_identifiers/) to see if this header is required.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ApiResponseOfPaymentResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'consent',
'payment_request',
'x_yapily_api_version',
'psu_id',
'psu_corporate_id',
'psu_ip_address'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_payment_with_sort_code_using_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'consent' is set
if self.api_client.client_side_validation and ('consent' not in local_var_params or # noqa: E501
local_var_params['consent'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `consent` when calling `create_payment_with_sort_code_using_post`") # noqa: E501
# verify the required parameter 'payment_request' is set
if self.api_client.client_side_validation and ('payment_request' not in local_var_params or # noqa: E501
local_var_params['payment_request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `payment_request` when calling `create_payment_with_sort_code_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'x_yapily_api_version' in local_var_params:
header_params['x-yapily-api-version'] = local_var_params['x_yapily_api_version'] # noqa: E501
if 'consent' in local_var_params:
header_params['consent'] = local_var_params['consent'] # noqa: E501
if 'psu_id' in local_var_params:
header_params['psu-id'] = local_var_params['psu_id'] # noqa: E501
if 'psu_corporate_id' in local_var_params:
header_params['psu-corporate-id'] = local_var_params['psu_corporate_id'] # noqa: E501
if 'psu_ip_address' in local_var_params:
header_params['psu-ip-address'] = local_var_params['psu_ip_address'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'payment_request' in local_var_params:
body_params = local_var_params['payment_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json;charset=UTF-8']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'tokenAuth'] # noqa: E501
return self.api_client.call_api(
'/payment-sortcode', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ApiResponseOfPaymentResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_payment_status_using_get(self, payment_id, consent, **kwargs): # noqa: E501
"""Get status of a payment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_payment_status_using_get(payment_id, consent, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str payment_id: __Mandatory__. The payment Id of the payment. (required)
:param str consent: __Mandatory__. The `consent-token` containing the user's authorisation to make the request. (required)
:param str x_yapily_api_version: __Optional__. Determines the API version to use. Valid values are `1.0` or `2.0-ALPHA`. Defaults to `1.0`
:param str psu_id: __Conditional__. Represents the user's login ID for the `Institution` to a personal account. <br><br>See [PSU identifiers](https://docs.yapily.com/knowledge/psu_identifiers/) to see if this header is required.
:param str psu_corporate_id: __Conditional__. Represents the user's login ID for the `Institution` to a business account. <br><br>See [PSU identifiers](https://docs.yapily.com/knowledge/psu_identifiers/) to see if this header is required.
:param str psu_ip_address: __Conditional__. The IP address of the PSU. <br><br>See [PSU identifiers](https://docs.yapily.com/knowledge/psu_identifiers/) to see if this header is required.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ApiResponseOfPaymentResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_payment_status_using_get_with_http_info(payment_id, consent, **kwargs) # noqa: E501
def get_payment_status_using_get_with_http_info(self, payment_id, consent, **kwargs): # noqa: E501
"""Get status of a payment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_payment_status_using_get_with_http_info(payment_id, consent, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str payment_id: __Mandatory__. The payment Id of the payment. (required)
:param str consent: __Mandatory__. The `consent-token` containing the user's authorisation to make the request. (required)
:param str x_yapily_api_version: __Optional__. Determines the API version to use. Valid values are `1.0` or `2.0-ALPHA`. Defaults to `1.0`
:param str psu_id: __Conditional__. Represents the user's login ID for the `Institution` to a personal account. <br><br>See [PSU identifiers](https://docs.yapily.com/knowledge/psu_identifiers/) to see if this header is required.
:param str psu_corporate_id: __Conditional__. Represents the user's login ID for the `Institution` to a business account. <br><br>See [PSU identifiers](https://docs.yapily.com/knowledge/psu_identifiers/) to see if this header is required.
:param str psu_ip_address: __Conditional__. The IP address of the PSU. <br><br>See [PSU identifiers](https://docs.yapily.com/knowledge/psu_identifiers/) to see if this header is required.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ApiResponseOfPaymentResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'payment_id',
'consent',
'x_yapily_api_version',
'psu_id',
'psu_corporate_id',
'psu_ip_address'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_payment_status_using_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'payment_id' is set
if self.api_client.client_side_validation and ('payment_id' not in local_var_params or # noqa: E501
local_var_params['payment_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `payment_id` when calling `get_payment_status_using_get`") # noqa: E501
# verify the required parameter 'consent' is set
if self.api_client.client_side_validation and ('consent' not in local_var_params or # noqa: E501
local_var_params['consent'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `consent` when calling `get_payment_status_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'payment_id' in local_var_params:
path_params['paymentId'] = local_var_params['payment_id'] # noqa: E501
query_params = []
header_params = {}
if 'x_yapily_api_version' in local_var_params:
header_params['x-yapily-api-version'] = local_var_params['x_yapily_api_version'] # noqa: E501
if 'consent' in local_var_params:
header_params['consent'] = local_var_params['consent'] # noqa: E501
if 'psu_id' in local_var_params:
header_params['psu-id'] = local_var_params['psu_id'] # noqa: E501
if 'psu_corporate_id' in local_var_params:
header_params['psu-corporate-id'] = local_var_params['psu_corporate_id'] # noqa: E501
if 'psu_ip_address' in local_var_params:
header_params['psu-ip-address'] = local_var_params['psu_ip_address'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'tokenAuth'] # noqa: E501
return self.api_client.call_api(
'/payments/{paymentId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ApiResponseOfPaymentResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_payments_using_get(self, payment_id, consent, **kwargs): # noqa: E501
"""Get payments details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_payments_using_get(payment_id, consent, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str payment_id: __Mandatory__. The payment Id of the payment. (required)
:param str consent: __Mandatory__. The `consent-token` containing the user's authorisation to make the request. (required)
:param str x_yapily_api_version: __Optional__. Determines the API version to use. Valid values are `1.0` or `2.0-ALPHA`. Defaults to `1.0`
:param str psu_id: __Conditional__. Represents the user's login ID for the `Institution` to a personal account. <br><br>See [PSU identifiers](https://docs.yapily.com/knowledge/psu_identifiers/) to see if this header is required.
:param str psu_corporate_id: __Conditional__. Represents the user's login ID for the `Institution` to a business account. <br><br>See [PSU identifiers](https://docs.yapily.com/knowledge/psu_identifiers/) to see if this header is required.
:param str psu_ip_address: __Conditional__. The IP address of the PSU. <br><br>See [PSU identifiers](https://docs.yapily.com/knowledge/psu_identifiers/) to see if this header is required.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ApiResponseOfPaymentResponses
If | |
colors=inner_colors,
shadow=False, wedgeprops=dict(width=size, edgecolor='#FFFFFF'))
ax5.set(aspect="equal", title='Luminance modulated cells summary`')
if self.save_fig:
if os.path.exists(self.save_dir):
fig2.savefig(f'{self.save_dir}{os.sep}{decode_what}_modulation_summary.{self.fig_format}')
else:
print("Specified save directory doesn't exist. Try again.")
sys.exit()
plt.show()
# make SMI histograms
smi = [statistics_dict[cluster]['luminance_modulation_index'] for cluster in statistics_dict.keys()]
smi_neg = [statistics_dict[cluster]['luminance_modulation_index'] for cluster in statistics_dict.keys()
if (statistics_dict[cluster]['luminance_modulation_index'] < 0 and statistics_dict[cluster]['p_value'] < self.critical_p_value)]
smi_pos = [statistics_dict[cluster]['luminance_modulation_index'] for cluster in statistics_dict.keys()
if (statistics_dict[cluster]['luminance_modulation_index'] > 0 and statistics_dict[cluster]['p_value'] < self.critical_p_value)]
fig3 = plt.figure(figsize=(8, 6), dpi=300)
bins = np.linspace(-1, 1, 20)
ax6 = fig3.add_subplot(111, label='6')
ax6.hist(smi, bins=bins, color='#DEDEDE', alpha=.6, edgecolor='#000000')
ax6.hist(smi_neg, bins=bins, color='#00008B', alpha=.6)
ax6.hist(smi_pos, bins=bins, color='#EEC900', alpha=.6)
ax6.set_xlabel('Luminance modulation index')
ax6.set_ylabel('Number of cells')
for side in ['right', 'top']:
ax6.spines[side].set_visible(False)
if self.save_fig:
if os.path.exists(self.save_dir):
fig3.savefig(f'{self.save_dir}{os.sep}{decode_what}_modulation_distribution.{self.fig_format}')
else:
print("Specified save directory doesn't exist. Try again.")
sys.exit()
plt.show()
def decoding_summary(self, **kwargs):
"""
Description
----------
This method plots the event (sound, luminance, etc.) decoding accuracy separately
for each animal. The lines representing animals represent the means of decoding
accuracy across the 10 obtained runs for each number of clusters on the x-axis
(which is, by default, [5, 10, 20, 50, 100]). The vertical lines represent 3*SEM
at each of these points. The grey shaded area represent the results for 99% of the
shuffled data.
----------
Parameters
----------
**kwargs (dictionary)
x_values_arr (np.ndarray)
An array of numbers of cells to decode with; defaults to np.array([5, 10, 20, 50, 100]).
decoding_event (str)
Decoding event for figure title; defaults to 'sound stimulation'.
z_value_sem (float)
The z-value for the SEM calculation; defaults to 2.58 (3 SD).
areas (list)
The brain areas decoding was performed on; defaults to ['A', 'V'].
animals_1 (list)
Animals for the first brain area; defaults to ['kavorka', 'frank', 'johnjohn']
animals_2 (list)
Animals for the first brain area; defaults to ['kavorka', 'frank', 'johnjohn']
----------
Returns
----------
decoding_accuracy (fig)
A plot of decoding accuracy across A and V cortices for a particular event.
----------
"""
x_values_arr = kwargs['x_values_arr'] if 'x_values_arr' in kwargs.keys() and type(kwargs['x_values_arr']) == np.ndarray else np.array([5, 10, 20, 50, 100])
decoding_event = kwargs['decoding_event'] if 'decoding_event' in kwargs.keys() and type(kwargs['decoding_event']) == str else 'sound stimulation'
z_value_sem = kwargs['z_value_sem'] if 'z_value_sem' in kwargs.keys() and type(kwargs['z_value_sem']) == float else 2.58
areas = kwargs['areas'] if 'areas' in kwargs.keys() and type(kwargs['areas']) == list else ['A', 'V']
animals_1 = kwargs['animals_1'] if 'animals_1' in kwargs.keys() and type(kwargs['animals_1']) == list else ['kavorka', 'frank', 'johnjohn']
animals_2 = kwargs['animals_2'] if 'animals_2' in kwargs.keys() and type(kwargs['animals_2']) == list else ['kavorka', 'frank', 'johnjohn']
file_dict = {'data': {areas[0]: [], areas[1]: []}, 'shuffled': {areas[0]: [], areas[1]: []}}
if not os.path.exists(self.decoding_dir):
print(f"Invalid location for directory {self.decoding_dir}. Please try again.")
sys.exit()
else:
for decoding_file_name in os.listdir(self.decoding_dir):
if 'shuffled' in decoding_file_name:
if areas[0] in decoding_file_name:
file_dict['shuffled'][areas[0]].append(decoding_file_name)
else:
file_dict['shuffled'][areas[1]].append(decoding_file_name)
else:
if areas[0] in decoding_file_name:
file_dict['data'][areas[0]].append(decoding_file_name)
else:
file_dict['data'][areas[1]].append(decoding_file_name)
# sort dict by file name
for data_type in file_dict.keys():
for data_area in file_dict[data_type].keys():
file_dict[data_type][data_area].sort()
# load the data
decoding_data = {'data': {areas[0]: {}, areas[1]: {}}, 'shuffled': {areas[0]: {}, areas[1]: {}}}
for data_type in decoding_data.keys():
for data_area in decoding_data[data_type].keys():
for one_file in file_dict[data_type][data_area]:
animal_name = [animal for animal in self.animal_ids.keys() if animal in one_file][0]
decoding_data[data_type][data_area][animal_name] = np.load(f'{self.decoding_dir}{os.sep}{one_file}')
# get data to plot
plot_data = {areas[0]: {'decoding_accuracy': {'mean': {}, 'sem': {}}, 'shuffled': np.array([[1000., 0.]] * 5)},
areas[1]: {'decoding_accuracy': {'mean': {}, 'sem': {}}, 'shuffled': np.array([[1000., 0.]] * 5)}}
for area in decoding_data['data']:
for animal in decoding_data['data'][area].keys():
plot_data[area]['decoding_accuracy']['mean'][animal] = decoding_data['data'][area][animal].mean(axis=1)
plot_data[area]['decoding_accuracy']['sem'][animal] = sem(decoding_data['data'][area][animal], axis=1)
down_percentiles = np.percentile(decoding_data['shuffled'][area][animal], q=.5, axis=1)
for d_idx, d_per in enumerate(down_percentiles):
if d_per < plot_data[area]['shuffled'][d_idx, 0]:
plot_data[area]['shuffled'][d_idx, 0] = d_per
up_percentiles = np.percentile(decoding_data['shuffled'][area][animal], q=99.5, axis=1)
for u_idx, u_per in enumerate(up_percentiles):
if u_per > plot_data[area]['shuffled'][u_idx, 1]:
plot_data[area]['shuffled'][u_idx, 1] = u_per
# plot
x_values = x_values_arr
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8, 5), dpi=300, tight_layout=True)
ax[0].errorbar(x=x_values, y=plot_data[areas[0]]['decoding_accuracy']['mean'][animals_1[0]], yerr=plot_data[areas[0]]['decoding_accuracy']['sem'][animals_1[0]] * z_value_sem,
color='#000000', fmt='-o', label=f"#{self.animal_ids[animals_1[0]]}")
ax[0].errorbar(x=x_values, y=plot_data[areas[0]]['decoding_accuracy']['mean'][animals_1[1]], yerr=plot_data[areas[0]]['decoding_accuracy']['sem'][animals_1[1]] * z_value_sem,
color='#000000', fmt='-^', label=f"#{self.animal_ids[animals_1[1]]}")
ax[0].errorbar(x=x_values, y=plot_data[areas[0]]['decoding_accuracy']['mean'][animals_1[2]], yerr=plot_data[areas[0]]['decoding_accuracy']['sem'][animals_1[2]] * z_value_sem,
color='#000000', fmt='-s', label=f"#{self.animal_ids[animals_1[2]]}")
ax[0].fill_between(x=x_values, y1=plot_data[areas[0]]['shuffled'][:, 0], y2=plot_data[areas[0]]['shuffled'][:, 1], color='grey', alpha=.25)
ax[0].set_ylim(.3, 1)
ax[0].set_xlim(0)
ax[0].legend()
ax[0].set_title(f'{areas[0]} units')
ax[0].set_xlabel('Number of units')
ax[0].set_ylabel('Decoding accuracy')
ax[1].errorbar(x=x_values, y=plot_data[areas[1]]['decoding_accuracy']['mean'][animals_2[0]], yerr=plot_data[areas[1]]['decoding_accuracy']['sem'][animals_2[0]] * z_value_sem,
color='#000000', fmt='-o', label=f"#{self.animal_ids[animals_2[0]]}")
ax[1].errorbar(x=x_values, y=plot_data[areas[1]]['decoding_accuracy']['mean'][animals_2[1]], yerr=plot_data[areas[1]]['decoding_accuracy']['sem'][animals_2[1]] * z_value_sem,
color='#000000', fmt='-^', label=f"#{self.animal_ids[animals_2[1]]}")
ax[1].errorbar(x=x_values, y=plot_data[areas[1]]['decoding_accuracy']['mean'][animals_2[2]], yerr=plot_data[areas[1]]['decoding_accuracy']['sem'][animals_2[2]] * z_value_sem,
color='#000000', fmt='-s', label=f"#{self.animal_ids[animals_2[2]]}")
ax[1].fill_between(x=x_values, y1=plot_data[areas[1]]['shuffled'][:, 0], y2=plot_data[areas[1]]['shuffled'][:, 1], color='#808080', alpha=.25)
ax[1].set_ylim(.3, 1)
ax[1].set_xlim(0)
ax[1].legend()
ax[1].set_title(f'{areas[1]} units')
ax[1].set_xlabel('Number of units')
ax[1].set_ylabel('Decoding accuracy')
if self.save_fig:
if os.path.exists(self.save_dir):
fig.savefig(f'{self.save_dir}{os.sep}{decoding_event}_decoding_accuracy.{self.fig_format}')
else:
print("Specified save directory doesn't exist. Try again.")
sys.exit()
plt.show()
def modulation_along_probe(self, **kwargs):
"""
Description
----------
This method plots sound and luminance modulation significant units with respect
to their position along the probe. It sums all the significantly modulated units
(suppressed or excited) at their respective peak channels and normalizes their
counts by the maximum number of units at any channel.
----------
Parameters
----------
**kwargs (dictionary)
cmap_smi (str)
The colormap for SMI; defaults to 'Blues'.
cmap_lmi (str)
The colormap for LMI; defaults to 'Reds'.
----------
Returns
----------
modulation_along_probe (fig)
A plot of SMI and LMI significant unit concentration along probe.
----------
"""
cmap_smi = kwargs['cmap_smi'] if 'cmap_smi' in kwargs.keys() and type(kwargs['cmap_smi']) == str else 'Blues'
cmap_lmi = kwargs['cmap_lmi'] if 'cmap_lmi' in kwargs.keys() and type(kwargs['cmap_lmi']) == str else 'Reds'
data = {}
for file in os.listdir(self.modulation_indices_dir):
with open(f'{self.modulation_indices_dir}{os.sep}{file}') as json_file:
temp_data = json.load(json_file)
index_type = 'smi' if 'smi' in file else 'lmi'
brain_area = 'V' if 'V' in file else 'A'
data[f'{index_type}_{brain_area}'] = temp_data
for animal in ['frank', 'johnjohn', 'kavorka']:
plot_modulation_data = {'smi': list(data['smi_A'][animal].keys()) + list(data['smi_V'][animal].keys()),
'lmi_distal': list(data['lmi_A'][animal]['distal'].keys()) + list(data['lmi_V'][animal]['distal'].keys()),
'lmi_intermediate': list(data['lmi_V'][animal]['intermediate'].keys())}
plot_modulation_arrays = {'smi_probe_arr': np.zeros((384, 2)),
'lmi_probe_arr': np.zeros((384, 2))}
for data_type in plot_modulation_data.keys():
index_type = 'smi' if 'smi' in data_type else 'lmi'
bank = 'intermediate' if 'intermediate' in data_type else 'distal'
for item in plot_modulation_data[data_type]:
if bank == 'distal':
ch = int(item[item.index('ch') + 2:])
else:
ch = int(item[item.index('ch') + 2:]) + 384
modulo = ch % 2
row = ch // 2
if modulo == 0:
col = 0
else:
col = 1
plot_modulation_arrays[f'{index_type}_probe_arr'][row, col] += 1
reduction_factor = 2
reduced_plot_modulation_arrays = {'smi_probe_arr': np.zeros((384 // reduction_factor, 1)),
'lmi_probe_arr': np.zeros((384 // reduction_factor, 1))}
for arr_name in plot_modulation_arrays:
for rr_idx, reduced_row in enumerate(range(0, 384, reduction_factor)):
reduced_plot_modulation_arrays[arr_name][rr_idx, :] = plot_modulation_arrays[arr_name][reduced_row:reduced_row + reduction_factor, :].sum()
for arr_name in reduced_plot_modulation_arrays:
smoothed_arr = neural_activity.gaussian_smoothing(array=reduced_plot_modulation_arrays[arr_name],
sigma=3,
axis=0)
reduced_plot_modulation_arrays[arr_name] = smoothed_arr / smoothed_arr.max()
fig = plt.figure(figsize=(2, 8))
ax = fig.add_subplot(121)
im = ax.imshow(reduced_plot_modulation_arrays['smi_probe_arr'], aspect='auto', vmin=0, vmax=1, cmap=cmap_smi, alpha=1, origin='lower')
ax2 = fig.add_subplot(122)
im2 = ax2.imshow(reduced_plot_modulation_arrays['lmi_probe_arr'], aspect='auto', vmin=0, vmax=1, cmap=cmap_lmi, alpha=1, origin='lower')
"""cbar = fig.colorbar(im, orientation='vertical', shrink=.3)
cbar.ax.tick_params(size=0)"""
cbar2 = fig.colorbar(im2, orientation='vertical', shrink=.3)
cbar2.ax.tick_params(size=0)
if self.save_fig:
if os.path.exists(self.save_dir):
fig.savefig(f'{self.save_dir}{os.sep}{animal}_modulation_along_probe.{self.fig_format}', dpi=300)
else:
print("Specified save directory doesn't exist. Try again.")
sys.exit()
plt.show()
def light_dark_fr_correlations(self, **kwargs):
"""
Description
----------
This method plots the firing rate distribution changes across three different
sessions and the correlation distribution of population vectors from session 3
to the population averages of session 1 and session 2.
----------
Parameters
----------
**kwargs (dictionary)
get_cl_profiles (bool)
Get profiles (RS / FS) of clusters; defaults to False.
total_fr_correlations (int)
Total number of frames to correlate with; defaults to 1e4.
----------
Returns
----------
spike_count_distributions (fig)
A plot of spike count distributions for the specified 3 files.
----------
"""
get_cl_profiles = kwargs['get_cl_profiles'] if 'get_cl_profiles' in kwargs.keys() and type(kwargs['get_cl_profiles']) == bool else False
total_fr_correlations = kwargs['total_fr_correlations'] if 'total_fr_correlations' in kwargs.keys() and type(kwargs['total_fr_correlations']) == int else 10000
clusters_across_sessions = {}
all_common_clusters = {}
for animal in self.all_animals_012.keys():
clusters_across_sessions[animal] = {0: [], 1: [], 2: []}
for session_id, session in enumerate(self.all_animals_012[animal]):
clusters_across_sessions[animal][session_id] = select_clusters.ClusterFinder(session=session,
cluster_groups_dir=self.cluster_groups_dir,
sp_profiles_csv=self.sp_profiles_csv).get_desired_clusters(
filter_by_cluster_type=self.relevant_cluster_types,
filter_by_area=self.relevant_areas)
all_common_clusters[animal] = list(set(clusters_across_sessions[animal][0]).intersection(clusters_across_sessions[animal][1], clusters_across_sessions[animal][2]))
print(len(all_common_clusters['kavorka']), len(all_common_clusters['frank']), len(all_common_clusters['johnjohn']))
activity_across_sessions = {}
for animal in self.all_animals_012.keys():
activity_across_sessions[animal] = {0: {}, 1: {}, 2: {}}
for session_id, session in enumerate(self.all_animals_012[animal]):
the_session, activity_dictionary, purged_spikes_dict = neural_activity.Spikes(input_file=session).convert_activity_to_frames_with_shuffles(get_clusters=all_common_clusters[animal],
to_shuffle=False,
condense_arr=True)
activity_across_sessions[animal][session_id] = activity_dictionary
if get_cl_profiles:
cluster_profiles = {}
for animal in self.all_animals_012.keys():
file_bank = [bank for bank in ['distal', 'intermediate'] if bank in self.all_animals_012[animal][0]][0]
get_date_idx = [date.start() for date in re.finditer('20', self.all_animals_012[animal][0])][-1]
file_date = self.all_animals_012[animal][0][get_date_idx - 4:get_date_idx + 2]
cluster_profiles[animal] = define_spiking_profile.get_cluster_spiking_profiles(cluster_list=all_common_clusters[animal],
recording_day=f'{animal}_{file_date}_{file_bank}',
sp_profiles_csv=self.sp_profiles_csv)
activity_arrays = {}
for animal in self.all_animals_012.keys():
zero_ses_name, zero_extracted_frame_info = sessions2load.Session(session=self.all_animals_012[animal][0]).data_loader(extract_variables=['total_frame_num'])
first_ses_name, first_extracted_frame_info = sessions2load.Session(session=self.all_animals_012[animal][1]).data_loader(extract_variables=['total_frame_num'])
second_ses_name, second_extracted_frame_info = sessions2load.Session(session=self.all_animals_012[animal][2]).data_loader(extract_variables=['total_frame_num'])
min_total_frame_num = np.array([zero_extracted_frame_info['total_frame_num'],
| |
<filename>python/ggtk/term_set_sim.py
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 1.3.40
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
# This file is compatible with both classic and new-style classes.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_term_set_sim', [dirname(__file__)])
except ImportError:
import _term_set_sim
return _term_set_sim
if fp is not None:
try:
_mod = imp.load_module('_term_set_sim', fp, pathname, description)
finally:
fp.close()
return _mod
_term_set_sim = swig_import_helper()
del swig_import_helper
else:
import _term_set_sim
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
class SwigPyIterator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _term_set_sim.delete_SwigPyIterator
__del__ = lambda self : None;
def value(self): return _term_set_sim.SwigPyIterator_value(self)
def incr(self, n = 1): return _term_set_sim.SwigPyIterator_incr(self, n)
def decr(self, n = 1): return _term_set_sim.SwigPyIterator_decr(self, n)
def distance(self, *args): return _term_set_sim.SwigPyIterator_distance(self, *args)
def equal(self, *args): return _term_set_sim.SwigPyIterator_equal(self, *args)
def copy(self): return _term_set_sim.SwigPyIterator_copy(self)
def next(self): return _term_set_sim.SwigPyIterator_next(self)
def __next__(self): return _term_set_sim.SwigPyIterator___next__(self)
def previous(self): return _term_set_sim.SwigPyIterator_previous(self)
def advance(self, *args): return _term_set_sim.SwigPyIterator_advance(self, *args)
def __eq__(self, *args): return _term_set_sim.SwigPyIterator___eq__(self, *args)
def __ne__(self, *args): return _term_set_sim.SwigPyIterator___ne__(self, *args)
def __iadd__(self, *args): return _term_set_sim.SwigPyIterator___iadd__(self, *args)
def __isub__(self, *args): return _term_set_sim.SwigPyIterator___isub__(self, *args)
def __add__(self, *args): return _term_set_sim.SwigPyIterator___add__(self, *args)
def __sub__(self, *args): return _term_set_sim.SwigPyIterator___sub__(self, *args)
def __iter__(self): return self
SwigPyIterator_swigregister = _term_set_sim.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
class SizeVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SizeVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SizeVector, name)
__repr__ = _swig_repr
def iterator(self): return _term_set_sim.SizeVector_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _term_set_sim.SizeVector___nonzero__(self)
def __bool__(self): return _term_set_sim.SizeVector___bool__(self)
def __len__(self): return _term_set_sim.SizeVector___len__(self)
def pop(self): return _term_set_sim.SizeVector_pop(self)
def __getslice__(self, *args): return _term_set_sim.SizeVector___getslice__(self, *args)
def __setslice__(self, *args): return _term_set_sim.SizeVector___setslice__(self, *args)
def __delslice__(self, *args): return _term_set_sim.SizeVector___delslice__(self, *args)
def __delitem__(self, *args): return _term_set_sim.SizeVector___delitem__(self, *args)
def __getitem__(self, *args): return _term_set_sim.SizeVector___getitem__(self, *args)
def __setitem__(self, *args): return _term_set_sim.SizeVector___setitem__(self, *args)
def append(self, *args): return _term_set_sim.SizeVector_append(self, *args)
def empty(self): return _term_set_sim.SizeVector_empty(self)
def size(self): return _term_set_sim.SizeVector_size(self)
def clear(self): return _term_set_sim.SizeVector_clear(self)
def swap(self, *args): return _term_set_sim.SizeVector_swap(self, *args)
def get_allocator(self): return _term_set_sim.SizeVector_get_allocator(self)
def begin(self): return _term_set_sim.SizeVector_begin(self)
def end(self): return _term_set_sim.SizeVector_end(self)
def rbegin(self): return _term_set_sim.SizeVector_rbegin(self)
def rend(self): return _term_set_sim.SizeVector_rend(self)
def pop_back(self): return _term_set_sim.SizeVector_pop_back(self)
def erase(self, *args): return _term_set_sim.SizeVector_erase(self, *args)
def __init__(self, *args):
this = _term_set_sim.new_SizeVector(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _term_set_sim.SizeVector_push_back(self, *args)
def front(self): return _term_set_sim.SizeVector_front(self)
def back(self): return _term_set_sim.SizeVector_back(self)
def assign(self, *args): return _term_set_sim.SizeVector_assign(self, *args)
def resize(self, *args): return _term_set_sim.SizeVector_resize(self, *args)
def insert(self, *args): return _term_set_sim.SizeVector_insert(self, *args)
def reserve(self, *args): return _term_set_sim.SizeVector_reserve(self, *args)
def capacity(self): return _term_set_sim.SizeVector_capacity(self)
__swig_destroy__ = _term_set_sim.delete_SizeVector
__del__ = lambda self : None;
SizeVector_swigregister = _term_set_sim.SizeVector_swigregister
SizeVector_swigregister(SizeVector)
class StringArray(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, StringArray, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, StringArray, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _term_set_sim.new_StringArray(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _term_set_sim.delete_StringArray
__del__ = lambda self : None;
def __getitem__(self, *args): return _term_set_sim.StringArray___getitem__(self, *args)
def __setitem__(self, *args): return _term_set_sim.StringArray___setitem__(self, *args)
def cast(self): return _term_set_sim.StringArray_cast(self)
__swig_getmethods__["frompointer"] = lambda x: _term_set_sim.StringArray_frompointer
if _newclass:frompointer = staticmethod(_term_set_sim.StringArray_frompointer)
StringArray_swigregister = _term_set_sim.StringArray_swigregister
StringArray_swigregister(StringArray)
def StringArray_frompointer(*args):
return _term_set_sim.StringArray_frompointer(*args)
StringArray_frompointer = _term_set_sim.StringArray_frompointer
class StringVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, StringVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, StringVector, name)
__repr__ = _swig_repr
def iterator(self): return _term_set_sim.StringVector_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _term_set_sim.StringVector___nonzero__(self)
def __bool__(self): return _term_set_sim.StringVector___bool__(self)
def __len__(self): return _term_set_sim.StringVector___len__(self)
def pop(self): return _term_set_sim.StringVector_pop(self)
def __getslice__(self, *args): return _term_set_sim.StringVector___getslice__(self, *args)
def __setslice__(self, *args): return _term_set_sim.StringVector___setslice__(self, *args)
def __delslice__(self, *args): return _term_set_sim.StringVector___delslice__(self, *args)
def __delitem__(self, *args): return _term_set_sim.StringVector___delitem__(self, *args)
def __getitem__(self, *args): return _term_set_sim.StringVector___getitem__(self, *args)
def __setitem__(self, *args): return _term_set_sim.StringVector___setitem__(self, *args)
def append(self, *args): return _term_set_sim.StringVector_append(self, *args)
def empty(self): return _term_set_sim.StringVector_empty(self)
def size(self): return _term_set_sim.StringVector_size(self)
def clear(self): return _term_set_sim.StringVector_clear(self)
def swap(self, *args): return _term_set_sim.StringVector_swap(self, *args)
def get_allocator(self): return _term_set_sim.StringVector_get_allocator(self)
def begin(self): return _term_set_sim.StringVector_begin(self)
def end(self): return _term_set_sim.StringVector_end(self)
def rbegin(self): return _term_set_sim.StringVector_rbegin(self)
def rend(self): return _term_set_sim.StringVector_rend(self)
def pop_back(self): return _term_set_sim.StringVector_pop_back(self)
def erase(self, *args): return _term_set_sim.StringVector_erase(self, *args)
def __init__(self, *args):
this = _term_set_sim.new_StringVector(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _term_set_sim.StringVector_push_back(self, *args)
def front(self): return _term_set_sim.StringVector_front(self)
def back(self): return _term_set_sim.StringVector_back(self)
def assign(self, *args): return _term_set_sim.StringVector_assign(self, *args)
def resize(self, *args): return _term_set_sim.StringVector_resize(self, *args)
def insert(self, *args): return _term_set_sim.StringVector_insert(self, *args)
def reserve(self, *args): return _term_set_sim.StringVector_reserve(self, *args)
def capacity(self): return _term_set_sim.StringVector_capacity(self)
__swig_destroy__ = _term_set_sim.delete_StringVector
__del__ = lambda self : None;
StringVector_swigregister = _term_set_sim.StringVector_swigregister
StringVector_swigregister(StringVector)
class DoubleVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, DoubleVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, DoubleVector, name)
__repr__ = _swig_repr
def iterator(self): return _term_set_sim.DoubleVector_iterator(self)
def __iter__(self): return self.iterator()
def __nonzero__(self): return _term_set_sim.DoubleVector___nonzero__(self)
def __bool__(self): return _term_set_sim.DoubleVector___bool__(self)
def __len__(self): return _term_set_sim.DoubleVector___len__(self)
def pop(self): return _term_set_sim.DoubleVector_pop(self)
def __getslice__(self, *args): return _term_set_sim.DoubleVector___getslice__(self, *args)
def __setslice__(self, *args): return _term_set_sim.DoubleVector___setslice__(self, *args)
def __delslice__(self, *args): return _term_set_sim.DoubleVector___delslice__(self, *args)
def __delitem__(self, *args): return _term_set_sim.DoubleVector___delitem__(self, *args)
def __getitem__(self, *args): return _term_set_sim.DoubleVector___getitem__(self, *args)
def __setitem__(self, *args): return _term_set_sim.DoubleVector___setitem__(self, *args)
def append(self, *args): return _term_set_sim.DoubleVector_append(self, *args)
def empty(self): return _term_set_sim.DoubleVector_empty(self)
def size(self): return _term_set_sim.DoubleVector_size(self)
def clear(self): return _term_set_sim.DoubleVector_clear(self)
def swap(self, *args): return _term_set_sim.DoubleVector_swap(self, *args)
def get_allocator(self): return _term_set_sim.DoubleVector_get_allocator(self)
def begin(self): return _term_set_sim.DoubleVector_begin(self)
def end(self): return _term_set_sim.DoubleVector_end(self)
def rbegin(self): return _term_set_sim.DoubleVector_rbegin(self)
def rend(self): return _term_set_sim.DoubleVector_rend(self)
def pop_back(self): return _term_set_sim.DoubleVector_pop_back(self)
def erase(self, *args): return _term_set_sim.DoubleVector_erase(self, *args)
def __init__(self, *args):
this = _term_set_sim.new_DoubleVector(*args)
try: self.this.append(this)
except: self.this = this
def push_back(self, *args): return _term_set_sim.DoubleVector_push_back(self, *args)
def front(self): return _term_set_sim.DoubleVector_front(self)
def back(self): return _term_set_sim.DoubleVector_back(self)
def assign(self, *args): return _term_set_sim.DoubleVector_assign(self, *args)
def resize(self, *args): return _term_set_sim.DoubleVector_resize(self, *args)
def insert(self, *args): return _term_set_sim.DoubleVector_insert(self, *args)
def reserve(self, *args): return _term_set_sim.DoubleVector_reserve(self, *args)
def capacity(self): return _term_set_sim.DoubleVector_capacity(self)
__swig_destroy__ = _term_set_sim.delete_DoubleVector
__del__ = lambda self : None;
DoubleVector_swigregister = _term_set_sim.DoubleVector_swigregister
DoubleVector_swigregister(DoubleVector)
class BoostSet(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, BoostSet, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, BoostSet, name)
__repr__ = _swig_repr
def __init__(self):
this = _term_set_sim.new_BoostSet()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _term_set_sim.delete_BoostSet
__del__ = lambda self : None;
BoostSet_swigregister = _term_set_sim.BoostSet_swigregister
BoostSet_swigregister(BoostSet)
class TermSimilarityInterface(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, TermSimilarityInterface, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, TermSimilarityInterface, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
def calculateTermSimilarity(self, *args): return _term_set_sim.TermSimilarityInterface_calculateTermSimilarity(self, *args)
def calculateNormalizedTermSimilarity(self, *args): return _term_set_sim.TermSimilarityInterface_calculateNormalizedTermSimilarity(self, *args)
__swig_destroy__ = _term_set_sim.delete_TermSimilarityInterface
__del__ = lambda self : None;
TermSimilarityInterface_swigregister = _term_set_sim.TermSimilarityInterface_swigregister
TermSimilarityInterface_swigregister(TermSimilarityInterface)
class TermSetSimilarityInterface(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, TermSetSimilarityInterface, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, TermSetSimilarityInterface, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
def calculateSimilarity(self, *args): return _term_set_sim.TermSetSimilarityInterface_calculateSimilarity(self, *args)
__swig_destroy__ = _term_set_sim.delete_TermSetSimilarityInterface
__del__ = lambda self : None;
TermSetSimilarityInterface_swigregister = _term_set_sim.TermSetSimilarityInterface_swigregister
TermSetSimilarityInterface_swigregister(TermSetSimilarityInterface)
class AllPairsAverageSetSimilarity(TermSetSimilarityInterface):
__swig_setmethods__ = {}
for _s in [TermSetSimilarityInterface]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, AllPairsAverageSetSimilarity, name, value)
__swig_getmethods__ = {}
for _s in [TermSetSimilarityInterface]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, AllPairsAverageSetSimilarity, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _term_set_sim.new_AllPairsAverageSetSimilarity(*args)
try: self.this.append(this)
except: self.this = this
def calculateSimilarity(self, *args): | |
+ ": " + str(para_back[2 + i * 4])+"\n"
info = info + "Sigma " + str(i + 1) + ": " + str(para_back[3 + i * 4])+"\n"
info = info + "Step " + str(i + 1) + ": " + str(para_back[4 + i * 4])+"\n"
info_area = info_area + "Area " + str(i + 1) + ": " + str(np.sum(peak_gauss[i][0]))+"\n"
info = info + "============\n"
info = info + info_area
info = info + "R2: "+str(R2)+"\n"
self.info_out(info)
self.cmd_out("DO: Finish peak fitting")
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
def slot_btn_fit_semi_do(self):
try:
if len(self.let_fit_sci_left_2.text())==0 or len(self.let_fit_sci_right_2.text())==0 or len(self.let_fit_sci_bg_2.text())==0:
QMessageBox.warning(self,
"Error!",
"Missing parameters!")
return
# roi parameters
left = int(self.let_fit_sci_left_2.text())
right = int(self.let_fit_sci_right_2.text())
# bg parameters
bg = float(self.let_fit_sci_bg_2.text())
if self.table_fit_semi_p.rowCount() == 0:
QMessageBox.warning(self,
"Error!",
"Missing parameters")
return
peak_paras = []
for i in range(self.table_fit_semi_p.rowCount()):
if self.table_fit_semi_p.item(i, 0) == None and self.table_fit_semi_p.item(i, 1) == None and \
self.table_fit_semi_p.item(i, 2) == None and self.table_fit_semi_p.item(i, 3) == None \
and self.table_fit_semi_p.item(i, 4) == None and self.table_fit_semi_p.item(i, 5) == None \
and self.table_fit_semi_p.item(i, 6) == None and self.table_fit_semi_p.item(i, 7) == None:
pass
elif self.table_fit_semi_p.item(i, 0) != None and self.table_fit_semi_p.item(i, 1) != None and \
self.table_fit_semi_p.item(i, 2) != None and self.table_fit_semi_p.item(i, 3) != None \
and self.table_fit_semi_p.item(i, 4) != None and self.table_fit_semi_p.item(i, 5) != None \
and self.table_fit_semi_p.item(i, 6) != None and self.table_fit_semi_p.item(i, 7) != None:
peak_paras.append([float(self.table_fit_semi_p.item(i, 0).text()),
float(self.table_fit_semi_p.item(i, 1).text()),
float(self.table_fit_semi_p.item(i, 2).text()),
float(self.table_fit_semi_p.item(i, 3).text()),
float(self.table_fit_semi_p.item(i, 4).text()),
float(self.table_fit_semi_p.item(i, 5).text()),
float(self.table_fit_semi_p.item(i, 6).text()),
float(self.table_fit_semi_p.item(i, 7).text())])
else:
QMessageBox.warning(self,
"Error!",
"data error, row: " + str(i + 1))
if len(peak_paras)==0:
QMessageBox.warning(self,
"Error!",
"Missing parameters 2")
return
x_list = self.energy[left:right + 1]
y_list = self.spec_now[left:right + 1]
# print("???????",len(peak_paras))
para_back, x_list_back, bg_back, peak_gauss, R2 = geFit().semi_peak_fit(x_list, y_list, bg, peak_paras,
self.cbx_fit_sci_ef_2.isChecked(),False)
# print("!!!!!!!", len(para_back), len(peak_gauss))
# print("here2")
bg_for_pic = bg_back
self.ax.cla()
self.ax.ticklabel_format(axis="y", style="sci", scilimits=(0, 0))
self.ax.scatter(x_list, y_list, color="black", label="points")
peaks = []
all = np.zeros_like(x_list_back)
if True:
for i in range(len(peak_gauss)):
self.ax.fill_between(x_list_back, 0, peak_gauss[i][0]+peak_gauss[i][1], label="peak " + str(i + 1), alpha=0.5)
bg_for_pic = bg_for_pic + peak_gauss[i][2] + peak_gauss[i][3]
peaks = peak_gauss[i][0]+peak_gauss[i][1]
all = all + peak_gauss[i][0] + peak_gauss[i][1]
# print("here3")
self.ax.plot(x_list_back, bg_for_pic, color="darkblue", label="background+steps+tails")
self.ax.plot(x_list_back, bg_for_pic+all, color="darkgreen", label="fit")
self.ax.legend()
self.canvas.draw()
info = "============\n"
info = info + "the parameters are:\n"
info = info + "Bg: " + str(para_back[0]) + "\n"
# print(2)
info_area = ""
if True:
for i in range((int((len(para_back) - 1) / 8))):
info = info + "------------\n"
info = info + "E " + str(i + 1) + ": " + str(para_back[1 + i * 8]) + "\n"
info = info + "Amp " + str(i + 1) + ": " + str(para_back[2 + i * 8]) + "\n"
info = info + "Sigma " + str(i + 1) + ": " + str(para_back[3 + i * 8]) + "\n"
info = info + "A l-skew " + str(i + 1) + ": " + str(para_back[4 + i * 8]) + "\n"
info = info + "B l-skew " + str(i + 1) + ": " + str(para_back[5 + i * 8]) + "\n"
info = info + "A tail " + str(i + 1) + ": " + str(para_back[6 + i * 8]) + "\n"
info = info + "B tail " + str(i + 1) + ": " + str(para_back[7 + i * 8]) + "\n"
info = info + "A step " + str(i + 1) + ": " + str(para_back[8 + i * 8]) + "\n"
info_area = info_area + "Area " + str(i + 1) + ": " + str(np.sum(peak_gauss[i][0]+peak_gauss[i][1])) + "\n"
# print(3)
info = info + "============\n"
info = info + info_area
info = info + "R2: " + str(R2) + "\n"
# print(4)
self.info_out(info)
self.cmd_out("DO: Finish peak fitting")
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
# ===================================================================================
# 10 SLOT Simulation
# simulation-broaden
def slot_rbn_simu_edep(self):
try:
print("excu slot_rbn_simu_edep")
fileName_choose, filetype = QFileDialog.getOpenFileName(self,
"Open the energy deposition file",
"./", # 起始路径
"Text Files (*.txt);;All Files (*)") # 设置文件扩展名过滤,用双分号间隔
if fileName_choose == "":
return
self.simu_bro_edep_fp = fileName_choose
self.flas_simu_broaden = 0
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
def slot_rbn_simu_spec(self):
try:
fileName_choose, filetype = QFileDialog.getOpenFileName(self,
"Open the energy deposition file",
"./", # 起始路径
"Text Files (*.txt);;All Files (*)") # 设置文件扩展名过滤,用双分号间隔
if fileName_choose == "":
self.radioButton_2.setChecked(False)
return
self.spec_now = np.loadtxt(fileName_choose)
self.flas_simu_broaden = 0
self.canvas_update()
self.cmd_out("DO: Read the energy deposition spectrum")
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
def slot_rbn_simu_spec_conv(self):
try:
print("excu slot_rbn_simu_spec")
fileName_choose, filetype = QFileDialog.getOpenFileName(self,
"Open the energy deposition file",
"./", # 起始路径
"Text Files (*.txt);;All Files (*)") # 设置文件扩展名过滤,用双分号间隔
if fileName_choose == "":
self.rbt_simu_broaden_conv.setChecked(False)
return
self.spec_now = np.loadtxt(fileName_choose)
self.flas_simu_broaden = 1
self.canvas_update()
self.cmd_out("DO: Read the energy deposition spectrum")
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
def slot_btn_simu_bro_mc_draw(self):
try:
if len(self.energy)==0:
QMessageBox.warning(self,
"Error!",
"Please choose the energy list")
return
a = float(self.let_simu_bro_mc_a.text())
b = float(self.let_simu_bro_mc_b.text())
c = float(self.let_simu_bro_mc_c.text())
self.fwhm_curve = a + np.sqrt(b * (self.energy + c * np.power(self.energy, 2)))
self.ax.cla()
self.ax.ticklabel_format(axis="y", style="sci", scilimits=(0, 0))
self.ax.plot(self.energy, self.fwhm_curve, color="red", label="FWHM")
self.ax.legend()
self.canvas.draw()
self.cmd_out("DO: Draw the FWHM curve: MCNP model")
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
def slot_btn_simu_bro_mc_do(self):
try:
if len(self.energy)==0:
QMessageBox.warning(self,
"Error!",
"Please choose the energy list")
return
a = float(self.let_simu_bro_mc_a.text())
b = float(self.let_simu_bro_mc_b.text())
c = float(self.let_simu_bro_mc_c.text())
self.fwhm_curve = a + np.sqrt(b * (self.energy + c * np.power(self.energy, 2)))
if self.radioButton.isChecked(): # edep
self.cmd_out("DO: Broaden...")
spec_broaden = geBroaden().broaden_sampling(self.simu_bro_edep_fp, self.energy, self.fwhm_curve)
self.spec_last = self.spec_now + 0
self.spec_now = spec_broaden
self.canvas_update()
self.cmd_out("DO: Finish the spectrum broaden")
elif self.radioButton_2.isChecked(): # spec
if len(self.spec_now)==0:
QMessageBox.warning(self,
"Error!",
"Please choose the spectrum")
return
self.cmd_out("DO: Broaden...")
spec_broaden = geBroaden().broaden_sampling_spec(self.spec_now,self.energy,self.fwhm_curve)
self.spec_last = self.spec_now + 0
self.spec_now = spec_broaden
self.canvas_update()
self.cmd_out("DO: Finish the spectrum broaden")
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
def slot_btn_simu_bro_cr_draw(self):
try:
if len(self.energy)==0:
QMessageBox.warning(self,
"Error!",
"Please choose the energy list")
return
d = float(self.let_simu_bro_cr_a.text())
e = float(self.let_simu_bro_cr_e.text())
self.fwhm_curve = np.dot(d,np.power(self.energy,e))+0
self.ax.cla()
self.ax.ticklabel_format(axis="y", style="sci", scilimits=(0, 0))
self.ax.plot(self.energy, self.fwhm_curve, color="red", label="FWHM")
self.ax.legend()
self.canvas.draw()
self.cmd_out("DO: Draw the FWHM curve: CEAR model")
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
def slot_btn_simu_bro_cr_do(self):
try:
if len(self.energy)==0:
QMessageBox.warning(self,
"Error!",
"Please choose the energy list")
return
d = float(self.let_simu_bro_cr_a.text())
e = float(self.let_simu_bro_cr_e.text())
self.fwhm_curve = np.dot(d,np.power(self.energy,e))+0
if self.radioButton.isChecked(): # edep
self.cmd_out("DO: Broaden...")
spec_broaden = geBroaden().broaden_sampling(self.simu_bro_edep_fp, self.energy, self.fwhm_curve)
self.spec_last = self.spec_now + 0
self.spec_now = spec_broaden
self.canvas_update()
self.cmd_out("DO: Finish the spectrum broaden")
elif self.radioButton_2.isChecked(): # spec
if len(self.spec_now)==0:
QMessageBox.warning(self,
"Error!",
"Please choose the spectrum")
return
self.cmd_out("DO: Broaden...")
spec_broaden = geBroaden().broaden_sampling_spec(self.spec_now,self.energy,self.fwhm_curve)
self.spec_last = self.spec_now + 0
self.spec_now = spec_broaden
self.canvas_update()
self.cmd_out("DO: Finish the spectrum broaden")
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
def slot_btn_simu_bro_ln_draw(self):
try:
if len(self.energy)==0:
QMessageBox.warning(self,
"Error!",
"Please choose the energy list")
return
k = float(self.let_simu_bro_ln_k.text())
b = float(self.let_simu_bro_ln_b.text())
self.fwhm_curve = np.dot(k, self.energy) + b
self.ax.cla()
self.ax.ticklabel_format(axis="y", style="sci", scilimits=(0, 0))
self.ax.plot(self.energy, self.fwhm_curve, color="red", label="FWHM")
self.ax.legend()
self.canvas.draw()
self.cmd_out("DO: Draw the FWHM curve: LINEAR model")
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
def slot_btn_simu_bro_ln_do(self):
try:
if len(self.energy)==0:
QMessageBox.warning(self,
"Error!",
"Please choose the energy list")
return
k = float(self.let_simu_bro_ln_k.text())
b = float(self.let_simu_bro_ln_b.text())
self.fwhm_curve = np.dot(k, self.energy) + b
if self.flas_simu_broaden==0:
if self.radioButton.isChecked(): # edep
self.cmd_out("DO: Broaden...")
spec_broaden = geBroaden().broaden_sampling(self.simu_bro_edep_fp, self.energy, self.fwhm_curve)
self.spec_last = self.spec_now + 0
self.spec_now = spec_broaden
self.canvas_update()
self.cmd_out("DO: Finish the spectrum broaden")
elif self.radioButton_2.isChecked(): # spec
if len(self.spec_now)==0:
QMessageBox.warning(self,
"Error!",
"Please choose the spectrum")
return
self.cmd_out("DO: Broaden...")
spec_broaden = geBroaden().broaden_sampling_spec(self.spec_now,self.energy,self.fwhm_curve)
self.spec_last = self.spec_now + 0
self.spec_now = spec_broaden
self.canvas_update()
self.cmd_out("DO: Finish the spectrum broaden")
else:
if len(self.spec_now) == 0:
QMessageBox.warning(self,
"Error!",
"Please choose the spectrum")
return
self.cmd_out("DO: Broaden...")
spec_broaden = geBroaden().broaden_conv_spec(self.spec_now, self.energy, self.fwhm_curve)
self.spec_last = self.spec_now + 0
self.spec_now = spec_broaden
self.canvas_update()
self.cmd_out("DO: Finish the spectrum broaden")
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
def slot_btn_simu_mcr_open(self):
try:
txtNames, filetype = QFileDialog.getOpenFileNames(self,
"Open the MCNP output file",
"./", # 起始路径
"All Files (*);;Text Files (*.txt)") # 设置文件扩展名过滤,用双分号间隔
if len(txtNames)==0:
# print("\n取消选择")
return
self.geMCR = geMCReader()
self.geMCR.open(txtNames,self.procB_simu_mcr)
self.pbt_simu_mcr_save.setEnabled(True)
self.cmd_out("DO: Finish read "+str(len(txtNames))+" MCNP output files.")
except BaseException as e:
QMessageBox.warning(self,
"Error!",
e.__str__())
def slot_btn_simu_mcr_save(self):
try:
file_path,file_type = QFileDialog.getSaveFileName(self, "Save | |
# -*- coding: utf-8 -*-
"""
User class for the locust scenario runner
"""
from __future__ import print_function, unicode_literals
import json
import random
import requests
import sys
import time
from datetime import datetime as dt
from locust import TaskSet
def get_pk( node):
# Old versions of Kolibri use 'id' instead of 'pk'
pk ='pk'
if b'pk' not in node:
pk = 'id'
return pk
class AdminUser(object):
USERNAME = 'admin'
PASSWORD = '<PASSWORD>'
def __init__(self, base_url):
self.base_url = base_url
self.headers = None
self.download_url = 'storage_url'
def login_admin(self):
data = {'username': AdminUser.USERNAME, 'password': <PASSWORD>}
r = requests.get('{base_url}/user/'.format(base_url=self.base_url))
session_identifier = 'kolibri' if 'kolibri' in r.cookies else 'sessionid'
r = requests.get('{base_url}/api/auth/session/current/?active=false'.format(base_url=self.base_url))
csrf_token = '' if 'csrftoken' in r.cookies else r.cookies['csrftoken']
session_id = r.cookies['csrftoken']
cookie_header = '{session_identifier}={session_id}; csrftoken={csrf_token}'.format(
session_identifier=session_identifier, session_id=session_id, csrf_token=csrf_token)
headers = {'X-CSRFToken': csrf_token, 'Cookie': cookie_header}
r = requests.post('{base_url}/api/session/'.format(base_url=self.base_url), data=data, headers=headers)
if r.status_code == 404: # Kolibri version > v0.11
r = requests.post('{base_url}/api/auth/session/'.format(base_url=self.base_url), data=data, headers=headers)
# update headers with the new set of entries
self.headers = {'X-CSRFToken': r.cookies['csrftoken'],
'Cookie': '{session_identifier}={session_id}; csrftoken={csrf_token}'.format(
session_identifier=session_identifier, session_id=r.cookies[session_identifier],
csrf_token=r.cookies['csrftoken'])}
def get_users(self):
if not self.headers:
self.login_admin()
r = requests.get('{base_url}/api/facilityuser/'.format(base_url=self.base_url), headers=self.headers)
if r.status_code != 200: # Kolibri version > v0.11
r = requests.get('{base_url}/api/auth/facilityuser/'.format(base_url=self.base_url), headers=self.headers)
if r.status_code != 200:
return []
return [{'username': u['username'], 'id':u['id'], 'facility':u['facility']}
for u in json.loads(r.content) if u['roles'] == []]
def get_content_resources(self, contents, kind):
resources = []
if contents:
pk = get_pk(contents[0])
try:
resources = [{'content_id': content[pk],
'files':[file[self.download_url] for file in content['files'] if 'files' in content.keys()],
'channel_id': content['channel_id'],
'assessment_item_ids': None if kind != 'exercise' else
[content['assessment_item_ids'] for content in content['assessmentmetadata']]}
for content in contents if content['kind'] == kind]
except KeyError:
# old api format
self.download_url = 'download_url'
return resources
def get_resources(self):
resources = {'video': [], 'html5': [], 'document': [], 'exercise': []}
if not self.headers:
self.login_admin()
# get available channels:
r = requests.get('{base_url}/api/contentnode/all_content/'.format(base_url=self.base_url),
headers=self.headers)
if r.status_code == 404:
r = requests.get('{base_url}/api/content/contentnode_slim/'.format(base_url=self.base_url),
headers=self.headers)
if r.status_code != 200:
return resources
else:
try:
contents = json.loads(r.content)
for kind in resources.keys():
resources[kind] = resources[kind] + self.get_content_resources(contents, kind)
except ValueError:
# bad response from the server
pass
finally:
return resources
class KolibriUserBehavior(TaskSet):
KOLIBRI_USERS = []
RESOURCES = {'video': [], 'html5': [], 'document': [], 'exercise': []}
TIMEOUT = (60, 60)
RANDOMIZE = True
def on_start(self):
# retrieve headers for the current user
self.headers = self.get_headers()
self.current_user = None
self.logs_ids = self.get_logs_ids_dict()
self.kolibri_new_version = False
self.url_auth_prefix = ""
self.url_logger_prefix = ""
self.url_content_prefix = "contentnode/"
if KolibriUserBehavior.KOLIBRI_USERS:
self.current_user = random.choice(KolibriUserBehavior.KOLIBRI_USERS)
print('Current user: {}'.format(self.current_user['username']))
self.log_in(self.current_user['username'], facility=self.current_user['facility'])
else:
# TODO: add appropiate logging
print('No learners to run the tests. At least 1 admin + 1 coach + 1 learner are needed')
sys.exit(1)
def log_in(self, username, password=None, headers=None, facility=None):
data = {'username': username}
if password:
data['password'] = password
if facility:
data['facility'] = facility
if not headers:
headers = self.headers
r = self.client.post('/api/session/', data=data, headers=headers,
timeout=KolibriUserBehavior.TIMEOUT)
if r.status_code == 404: # Kolibri version > v0.11
self.kolibri_new_version = True
self.url_auth_prefix = "auth/"
self.url_logger_prefix = "logger/"
self.url_content_prefix = "content/contentnode_slim/"
r = self.client.post('/api/{}session/'.format(self.url_auth_prefix), data=data, headers=headers,
timeout=KolibriUserBehavior.TIMEOUT)
# update headers with the new set of entries
session_identifier = 'kolibri' if 'kolibri' in r.cookies else 'sessionid'
self.set_headers({'X-CSRFToken': r.cookies['csrftoken'],
'Cookie': '{session_identifier}={session_id}; csrftoken={csrf_token}'.format(
session_identifier=session_identifier, session_id=r.cookies[session_identifier],
csrf_token=r.cookies['csrftoken'])})
return r.status_code == 200
def log_out(self, headers=None):
if not headers:
headers = self.headers
r = self.client.delete('/api/{}session/current/'.format(self.url_auth_prefix), headers=headers)
return r.status_code == 200
def get_headers(self):
r = self.client.get('/user/')
r = self.client.get('/api/auth/session/current/?active=false')
self.csrf_token = r.cookies['csrftoken']
session_identifier = 'kolibri' if 'kolibri' in r.cookies else 'sessionid'
self.session_id = r.cookies['csrftoken']
cookie_header = '{session_identifier}={session_id}; csrftoken={csrf_token}'.format(
session_identifier=session_identifier, session_id=self.session_id, csrf_token=self.csrf_token)
return {'X-CSRFToken': self.csrf_token, 'Cookie': cookie_header}
def set_headers(self, headers):
self.headers = headers
def get_kolibri_users(self):
r = self.client.get('/api/facilityuser/')
# users with coach or admin role need password to login:
return [{'username': u['username'], 'id':u['id'], 'facility':u['facility']}
for u in json.loads(r.content) if u['roles'] == []]
def browse_resource(self, parent_node=None):
if not parent_node:
self.get_facility()
# select a channel
channels = self.get_available_channels()
if not channels:
return
channel = random.choice(channels) if KolibriUserBehavior.RANDOMIZE else channels[0]
# get the channel data
parent_node = self.get_content_node(channel['id'])
if not parent_node:
return
# "click" on the node
pk = get_pk(parent_node)
self.get_content_node_ancestors(parent_node[pk])
child_nodes = self.get_content_nodes_by_parent(parent_node[pk])
if not child_nodes:
return
# "click" on a content node item
child_node = random.choice(child_nodes) if KolibriUserBehavior.RANDOMIZE else child_nodes[0]
kind = child_node['kind']
# if the child node item is topic, do another round
if kind == 'topic':
self.browse_resource(parent_node=child_node)
# fetch the full data for the "final" node and set `skip_node_endpoint` to True
# when calling `do_resource` so that we don't fire that request for the 2nd time
final_node = self.get_content_node(child_node[pk])
if self.kolibri_new_version:
resource = {'content_id': final_node[pk],
'channel_id': final_node['channel_id'],
'assessment_item_ids': None,
'files': [file['storage_url'] for file in final_node['files']]}
else:
resource = {'content_id': final_node[pk],
'channel_id': final_node['channel_id'],
'assessment_item_ids': None,
'files': [file['download_url'] for file in final_node['files']]}
if kind == 'exercise' and 'assessmentmetadata' in final_node:
assessment_item_ids = [assessment_item['assessment_item_ids']
for assessment_item in final_node['assessmentmetadata']]
resource['assessment_item_ids'] = assessment_item_ids
self.do_resource(resource, kind, skip_node_endpoint=True)
def load_resource(self, kind):
resources_per_kind = KolibriUserBehavior.KOLIBRI_RESOURCES[kind]
if resources_per_kind:
if KolibriUserBehavior.RANDOMIZE:
resource = random.choice(resources_per_kind)
else:
resource = resources_per_kind[0]
self.do_resource(resource, kind)
def do_resource(self, resource, kind, skip_node_endpoint=False):
"""
This method simulates realistic usage scenario observed while interacting
with Kolibri directly in the browser
If `skip_node_endpoint` has been passed as True, call to `get_content_node`
will be skipped, as we've already fired that request during content browsing
simulation and want to test as much realistically as possible
"""
content_id = resource['content_id']
channel_id = resource['channel_id']
self.get_next_content_node(content_id)
self.do_contentsessionlog(content_id, channel_id, kind)
self.do_contentsummarylog(content_id, channel_id, kind)
self.get_content_node_ancestors(content_id)
self.do_userprogress()
self.do_usersessionlog()
self.fetch_resource_files(resource, kind)
# fetch contend node details only we haven't already done that while browsing content
if not skip_node_endpoint:
self.get_content_node(content_id)
# log masterylog only if contentsummarylog has been logged and masterylog hasn't been yet
if self.logs_ids.get('summarylog_id') and not self.logs_ids.get('masterylog_id'):
self.do_masterylog(content_id, channel_id, kind)
# log attemptlog only if content type is exercise
if kind == 'exercise':
self.do_attemptlog(resource)
def fetch_resource_files(self, resource, kind):
if kind == 'exercise':
exercise_base = resource['files'][0]
for assessment_item_id in resource['assessment_item_ids'][0]:
self.client.get("{base}{assessment}.json".format(base=exercise_base, assessment=assessment_item_id),
timeout=KolibriUserBehavior.TIMEOUT)
else:
for file_url in resource['files']:
if file_url:
self.client.get(file_url, timeout=KolibriUserBehavior.TIMEOUT)
def get_logs_ids_dict(self):
return {
'contentsessionlog_id': None,
'masterylog_id': None,
'contentsummarylog_id': None,
'attemptlog_id': None
}
def do_contentsessionlog(self, content_id, channel_id, kind):
log_url = '/api/{}contentsessionlog/'.format(self.url_logger_prefix)
# create POST request to get the log id
timestamp = dt.now().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
data = {
'channel_id': channel_id,
'content_id': content_id,
'end_timestamp': timestamp,
'extra_fields': '{}',
'kind': kind,
'progress': 0,
'start_timestamp': timestamp,
'time_spent': 0,
'user': self.current_user['id']
}
contentsessionlog_url = '/api/{}contentsessionlog/'.format(self.url_logger_prefix)
r = self.client.post(log_url, data=data, headers=self.headers,
timeout=KolibriUserBehavior.TIMEOUT, name=contentsessionlog_url)
if not r.status_code == 201:
return False
# create PATCH request to update the log
pk = get_pk(json.loads(r.content))
data['pk'] = json.loads(r.content)[pk]
log_url_patch = '{log_url}{log_id}/'.format(log_url=log_url, log_id=data['pk'])
r = self.client.patch(log_url_patch, data=data, headers=self.headers, timeout=KolibriUserBehavior.TIMEOUT,
name='{}{}'.format(contentsessionlog_url, self.current_user['username']))
# set log id for other log methods to use if necessary
self.logs_ids['contentsessionlog_id'] = data['pk']
return r.status_code == 200
def do_contentsummarylog(self, content_id, channel_id, kind):
log_url = '/api/{}contentsummarylog/'.format(self.url_logger_prefix)
# set general data object (for PATCH and optionally POST requests)
timestamp = dt.now().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
data = {
'channel_id': channel_id,
'content_id': content_id,
'end_timestamp': timestamp,
'extra_fields': '{}',
'kind': kind,
'progress': 0,
'start_timestamp': timestamp,
'time_spent': 0,
'user': self.current_user['id'],
'completion_timestamp': None,
'currentmasterylog': None
}
# create a GET request to check if the log already exists
log_url_get = '{log_url}?content_id={content_id}&user_id={user_id}'.format(
log_url=log_url, content_id=content_id, user_id=self.current_user['id'])
r = self.client.get(log_url_get, timeout=KolibriUserBehavior.TIMEOUT,
name='{}{}'.format(log_url, self.current_user['username']))
if not r.status_code == 200:
return False
contents = json.loads(r.content)
if len(contents) > 0:
# log exists, extract the log id from the GET response
pk = get_pk(contents[0])
log_id = contents[0][pk]
else:
# create summarylog if it doesn't exists yet
r = self.client.post(log_url, data=data, headers=self.headers, timeout=KolibriUserBehavior.TIMEOUT,
name='{}{}'.format(log_url, self.current_user['username']))
if not r.status_code == 201:
return False
pk = get_pk(r.content)
log_id = json.loads(r.content)[pk]
# create PATCH request to update the log
data['pk'] = log_id
log_url_patch = '{log_url}{log_id}/'.format(log_url=log_url, log_id=log_id)
r = self.client.patch(log_url_patch, data=data, headers=self.headers, timeout=KolibriUserBehavior.TIMEOUT,
name='{}{}'.format(log_url, self.current_user['username']))
# set log id for other log methods to use if necessary
self.logs_ids['contentsummarylog_id'] = log_id
return r.status_code == 200
def do_masterylog(self, content_id, channel_id, kind):
log_url = '/api/{}masterylog/'.format(self.url_logger_prefix)
timestamp = dt.now().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
data = {
'user': self.current_user['id'],
'summarylog': self.logs_ids.get('contentsummarylog_id'),
'start_timestamp': timestamp,
'completion_timestamp': None,
'end_timestamp': None,
'mastery_level': 1,
'complete': False,
'responsehistory': [],
'pastattempts': [],
'totalattempts': 0,
'mastery_criterion': '{}'
}
r = self.client.post(log_url, data=data, headers=self.headers, timeout=KolibriUserBehavior.TIMEOUT,
name='{}{}'.format(log_url, self.current_user['username']))
if not r.status_code == 201:
return False
log_id = json.loads(r.content)['id']
# set log id for other | |
# Auxialary functions
#
# <NAME>, 2020
# <EMAIL>
import math
import torch
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import copy
from tqdm import tqdm
from sklearn import metrics
from scipy import stats
import scipy.special as special
import icenet.tools.prints as prints
import numba
def split(a, n):
"""
Generator which returns approx equally sized chunks.
Args:
a : Total number
n : Number of chunks
Example:
list(split(10, 3))
"""
k, m = divmod(len(a), n)
return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))
def split_start_end(a, n):
"""
Returns approx equally sized chunks.
Args:
a : Total number
n : Number of chunks
Example:
list(split(10, 3))
"""
ll = list(split(a,n))
out = []
for i in range(len(ll)):
out.append([ll[i][0], ll[i][-1]])
return out
def apply_cutflow(cut, names, xcorr_flow=True):
""" Apply cutflow
Args:
cut : list of pre-calculated cuts, each is a boolean array
names : list of names (description of each cut, for printout only)
xcorr_flow : compute full N-point correlations
Returns:
ind : list of indices, 1 = pass, 0 = fail
"""
print(__name__ + '.apply_cutflow: \n')
# Print out "serial flow"
N = len(cut[0])
ind = np.ones(N, dtype=np.uint8)
for i in range(len(cut)):
ind = np.logical_and(ind, cut[i])
print(f'cut[{i}][{names[i]:>25}]: pass {np.sum(cut[i]):>10}/{N} = {np.sum(cut[i])/N:.4f} | total = {np.sum(ind):>10}/{N} = {np.sum(ind)/N:0.4f}')
# Print out "parallel flow"
if xcorr_flow:
print('\n')
print(__name__ + '.apply_cutflow: Computing N-point correlations <xcorr_flow = True>')
vec = np.zeros((len(cut[0]), len(cut)))
for j in range(vec.shape[1]):
vec[:,j] = np.array(cut[j])
intmat = binaryvec2int(vec)
BMAT = generatebinary(vec.shape[1])
print(f'Boolean combinations for {names}: \n')
for i in range(BMAT.shape[0]):
print(f'{BMAT[i,:]} : {np.sum(intmat == i):>10} ({np.sum(intmat == i) / len(intmat):.4f})')
print('\n')
return ind
def count_targets(events, names, entrystart=0, entrystop=None):
""" Targets statistics printout
Args:
events : uproot object
names : list of branch names
entrystart : uproot starting point
entrystop : uproot ending point
Returns:
Printout on stdout
"""
K = len(names)
vec = np.array([events.array(name, entrystart=entrystart, entrystop=entrystop) for name in names])
vec = vec.T
intmat = binaryvec2int(vec)
BMAT = generatebinary(K)
print(__name__ + f'.count_targets: {names}')
for i in range(BMAT.shape[0]):
print(f'{BMAT[i,:]} : {np.sum(intmat == i):>10} ({np.sum(intmat == i) / len(intmat):.4f})')
return
def longvec2matrix(X, M, D, order='F'):
""" A matrix representation / dimension converter function.
Args:
X: Input matrix
M: Number of set elements
D: Feature dimension
order: Reshape direction
Returns:
Y: Output matrix
Examples:
X = [# number of samples N ] x [# M x D long feature vectors]
-->
Y = [# number of samples N ] x [# number of set elements M] x [# vector dimension D]
"""
Y = np.zeros((X.shape[0], M, D))
for i in range(X.shape[0]):
Y[i,:,:] = np.reshape(X[i,:], (M,D), order)
return Y
@numba.njit
def number_of_set_bits(i):
""" Return how many bits are active of an integer in a standard binary representation.
"""
i = i - ((i >> 1) & 0x55555555)
i = (i & 0x33333333) + ((i >> 2) & 0x33333333)
return (((i + (i >> 4) & 0xF0F0F0F) * 0x1010101) & 0xffffffff) >> 24
@numba.njit
def binvec_are_equal(a,b):
""" Compare equality of two binary vectors a and b.
Args:
a,b : binary vectors
Returns
true or false
"""
if (np.sum(np.abs(a - b)) == 0):
return True
else:
return False
@numba.njit
def binvec2powersetindex(X, B):
"""
Binary vector to powerset index.
Args:
X : matrix of binary vectors [# number of vectors x dimension]
B : the powerset matrix
Returns:
y : array of powerset indices
"""
y = np.zeros(X.shape[0])
# Over all vectors
for i in range(X.shape[0]):
# Find corresponding powerset index
for j in range(B.shape[0]):
if binvec_are_equal(X[i,:], B[j,:]):
y[i] = j
break
return y
def to_graph(l):
""" Turn the list into a graph.
"""
G = networkx.Graph()
for part in l:
# Each sublist is a set of nodes
G.add_nodes_from(part)
# It also gives the number of edges
G.add_edges_from(to_edges(part))
return G
def to_edges(l):
""" treat `l` as a Graph and returns it's edges
Examples:
to_edges(['a','b','c','d']) -> [(a,b), (b,c),(c,d)]
"""
it = iter(l)
last = next(it)
for current in it:
yield last, current
last = current
def merge_connected(lists):
""" Merge sets with common elements (find connected graphs problem).
Examples:
Input: [{0, 1}, {0, 1}, {2, 3}, {2, 3}, {4, 5}, {4, 5}, {6, 7}, {6, 7}, {8, 9}, {8, 9}, {10}, {11}]
Output: [{0, 1}, {2, 3}, {4, 5}, {6, 7}, {8, 9}, {10}, {11}]
"""
sets = [set(lst) for lst in lists if lst]
merged = True
while merged:
merged = False
results = []
while sets:
common, rest = sets[0], sets[1:]
sets = []
for x in rest:
# Two sets are said to be disjoint sets if they have no common elements
if x.isdisjoint(common):
sets.append(x)
else:
merged = True
common |= x
results.append(common)
sets = results
return sets
def los2lol(listOsets):
""" Convert a list of sets [{},{},..,{}] to a list of of lists [[], [], ..., []].
"""
lists = []
for i in listOsets:
lists.append(list(i))
return lists
def bin_array(num, N):
""" Convert a positive integer num into an N-bit bit vector.
"""
return np.array(list(np.binary_repr(num).zfill(N))).astype(dtype=np.uint8)
def binomial(n,k):
""" Binomial coefficient C(n,k).
"""
return np.int64(math.factorial(n) / (math.factorial(k) * math.factorial(n-k)))
def generatebinary_fixed(n,k):
""" Generate all combinations of n bits with fixed k ones.
"""
# Initialize
c = [0] * (n - k) + [1] * k
X = np.zeros(shape=(binomial(n,k), n), dtype=np.uint8)
X[0,:] = c
z = 1
while True:
# Find the right-most [0,1] AND keep count of ones
i = n - 2
ones = 0
while i >= 0 and c[i:i+2] != [0,1]:
if c[i+1] == 1:
ones += 1
i -= 1
if i < 0:
break
# Change the 01 to 10 and reset the suffix to the smallest
# lexicographic string with the right number of ones and zeros
c[i:] = [1] + [0] * (n - i - ones - 1) + [1] * ones
# Save it
X[z,:] = c
z += 1
return X
def generatebinary(N, M=None, verbose=False):
""" Function to generate all 2**N binary vectors (as boolean matrix rows)
with 1 <= M <= N number of ones (hot bits) (default N)
"""
if M is None: M = N
if (M < 1) | (M > N):
raise Exception(f'generatebinary: M = {M} cannot be less than 1 or greater than N = {N}')
# Count the number of vectors (rows) needed using binomial coefficients
K = 1
for k in range(1,M+1):
K += binomial(N,k)
if verbose:
print(__name__ + f'.generatebinary: Binary matrix dimension {K} x {N}')
X = np.zeros((K, N), dtype=np.uint8)
ivals = np.zeros(K, dtype = np.double)
# Generate up to each m separately here, then sort
i = 0
for m in range(0,M+1):
Y = generatebinary_fixed(N,m)
for z in range(Y.shape[0]):
X[i,:] = Y[z,:]
ivals[i] = bin2int(X[i,:])
i += 1
# Sort them to lexicographic order
lexind = np.argsort(ivals)
return X[lexind,:]
def bin2int(b):
""" Binary vector to integer.
"""
base = int(2)
if len(b) > 63: # Doubles for large number of bits
base = np.double(base)
return b.dot(base**np.arange(b.size)[::-1])
def binom_coeff_all(N, MAX = None):
""" Sum all all binomial coefficients up to MAX.
"""
B = generatebinary(N, MAX)
s = np.sum(B, axis=1)
c = np.zeros(N+1, dtype=np.int64)
for i in range(N+1):
c[i] = np.sum(s == i)
return c
def binaryvec2int(X):
""" Turn a matrix of binary vectors row-by-row into integer reps.
"""
if X.shape[1] > 63:
# double because we may have over 63 bits
Y = np.zeros(X.shape[0], dtype=np.double)
else:
Y = np.zeros(X.shape[0], dtype=np.int)
for i in range(len(Y)):
Y[i] = bin2int(X[i,:])
return Y
def weight2onehot(weights, Y, N_classes):
"""
Weights into one-hot encoding.
Args:
weights : array of weights
Y : targets
N_classes : number of classes
"""
one_hot_weights = np.zeros((len(weights), N_classes))
for i in range(N_classes):
try:
one_hot_weights[Y == i, i] = weights[Y == i]
except:
print(__name__ + f'weight2onehot: Failed with class = {i} (zero samples)')
return one_hot_weights
def int2onehot(Y, N_classes):
""" Integer class vector to class "one-hot encoding"
Args:
Y: Class indices (# samples)
N_classes: Number of classes
Returns:
onehot: Onehot representation
| |
import turing
import turing.batch
import turing.batch.config
import turing.router.config.router_config
from turing.router.config.route import Route
from turing.router.config.router_config import RouterConfig
from turing.router.config.router_version import RouterStatus
from turing.router.config.resource_request import ResourceRequest
from turing.router.config.log_config import LogConfig, ResultLoggerType
from turing.router.config.traffic_rule import TrafficRule, HeaderTrafficRuleCondition, PayloadTrafficRuleCondition
from turing.router.config.enricher import Enricher
from turing.router.config.router_ensembler_config import DockerRouterEnsemblerConfig
from turing.router.config.common.env_var import EnvVar
from turing.router.config.experiment_config import ExperimentConfig
def main(turing_api: str, project: str):
# Initialize Turing client
turing.set_url(turing_api)
turing.set_project(project)
# Build a router config in order to create a router
# Note: When constructing a `RouterConfig` object from scratch, it is **highly recommended** that you construct each
# individual component using the Turing SDK classes provided instead of using `dict` objects which do not perform
# any schema validation.
# Create some routes
routes = [
Route(
id='meow',
endpoint='http://fox-says.meow',
timeout='20ms'
),
Route(
id='woof',
endpoint='http://fox-says.woof',
timeout='20ms'
),
Route(
id='baaa',
endpoint='http://fox-says.baa',
timeout='20ms'
),
Route(
id='oink',
endpoint='http://fox-says.oink',
timeout='20ms'
),
Route(
id='ring-ding-ding',
endpoint='http://fox-says.ring-ding-ding',
timeout='20ms'
),
Route(
id='control',
endpoint='http://fox-says.control',
timeout='20ms'
)
]
# Create some traffic rules
# Note: Each traffic rule is defined by at least one `TrafficRuleCondition` and one route. Routes are essentially
# the `id`s of `Route` objects that you intend to specify for the entire `TrafficRule`.
#
# When defining a traffic rule, one would need to decide between using a `HeaderTrafficRuleCondition` or a
# `PayloadTrafficRuleCondition`. These subclasses can be used to build a `TrafficRuleCondition` without having to
# manually set attributes such as `field_source` or `operator`.
rules = [
TrafficRule(
conditions=[
HeaderTrafficRuleCondition(
field='name',
values=['cat']
)
],
routes=[
'meow'
]
),
TrafficRule(
conditions=[
HeaderTrafficRuleCondition(
field='name',
values=['dog']
)
],
routes=[
'woof'
]
),
TrafficRule(
conditions=[
HeaderTrafficRuleCondition(
field='name',
values=['sheep']
)
],
routes=[
'baaa'
]
),
TrafficRule(
conditions=[
HeaderTrafficRuleCondition(
field='name',
values=['pig']
)
],
routes=[
'oink'
]
),
TrafficRule(
conditions=[
PayloadTrafficRuleCondition(
field='body',
values=['sus']
)
],
routes=[
'meow',
'woof',
'baaa',
'oink',
'ring-ding-ding'
]
)
]
# Create an experiment config
# The `ExperimentConfig` class is a simple container to carry configuration related to an experiment to be used by a
# Turing Router. Note that as Turing does not create experiments automatically, you would need to create your
# experiments separately prior to specifying their configuration here.
#
# Also, notice that `ExperimentConfig` does not contain any fixed schema as it simply carries configuration for
# experiment engines, which are used as plug-ins for Turing. When building an `ExperimentConfig` from scratch, you
# would need to consider the underlying schema for the `config` attribute as well as the appropriate `type` that
# corresponds to your selected experiment engine.
experiment_config = ExperimentConfig(
type="test-exp",
config={
'variables':
[
{'name': 'farm_id', 'field': 'farm_id', 'field_source': 'header'},
{'name': 'country_code', 'field': 'country', 'field_source': 'header'},
{'name': 'latitude', 'field': 'farm_lat', 'field_source': 'header'},
{'name': 'longitude', 'field': 'farm_long', 'field_source': 'header'}
],
'project_id': 102
}
)
# Create a resource request config for the router
# Note: The units for CPU and memory requests are measured in cpu units and bytes respectively. You may wish to
# read more about how these are measured here:
# https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/.
resource_request = ResourceRequest(
min_replica=0,
max_replica=2,
cpu_request="500m",
memory_request="512Mi"
)
# Create a log config for the router
# Note: Logging for Turing Routers is done through BigQuery or Kafka, and its configuration is managed by the
# `LogConfig` class. Two helper classes (child classes of `LogConfig`) have been created to assist you in
# constructing these objects - `BigQueryLogConfig` and `KafkaLogConfig`.
#
# If you do not intend to use any logging, simply create a regular `LogConfig` object with `result_loggger_type` set
# as `ResultLoggerType.NOP`, without defining the other arguments.
log_config = LogConfig(
result_logger_type=ResultLoggerType.NOP
)
# Create an enricher for the router
enricher = Enricher(
image="docker.io/ealen/echo-server:0.5.1",
resource_request=ResourceRequest(
min_replica=0,
max_replica=2,
cpu_request="500m",
memory_request="512Mi"
),
endpoint="/echo",
timeout="60ms",
port=3000,
env=[
EnvVar(
name="humans",
value="farmer-joe"
)
]
)
# Create an ensembler for the router
# Note: Ensembling for Turing Routers is done through Nop, Standard, Docker or Pyfunc ensemblers, and its configuration
# is managed by the `RouterEnsemblerConfig` class. Helper classes (child classes of `RouterEnsemblerConfig`)
# have been created to assist you in constructing these objects - `NopRouterEnsemblerConfig`,
# `StandardRouterEnsemblerConfig`, `DockerRouterEnsemblerConfig` and `PyfuncRouterEnsemblerConfig`.
ensembler = DockerRouterEnsemblerConfig(
image="docker.io/ealen/echo-server:0.5.1",
resource_request=ResourceRequest(
min_replica=1,
max_replica=3,
cpu_request="500m",
memory_request="512Mi"
),
endpoint="/echo",
timeout="60ms",
port=3000,
env=[],
)
# Create the RouterConfig instance
router_config = RouterConfig(
environment_name="id-dev",
name="what-does-the-fox-say",
routes=routes,
rules=rules,
default_route_id="control",
experiment_engine=experiment_config,
resource_request=resource_request,
timeout="100ms",
log_config=log_config,
enricher=enricher,
ensembler=ensembler
)
# 1. Create a new router using the RouterConfig object
# Note: A `Router` object represents a router that is created on Turing API. It does not (and should not) ever be
# created manually by using its constructor directly. Instead, you should only be manipulating with `Router`
# instances that get returned as a result of using the various `Router` class and instance methods that interact
# with Turing API, such as the one below.
new_router = turing.Router.create(router_config)
print(f"1. You have created a router with id: {new_router.id}")
# 2. List all routers
routers = turing.Router.list()
print(f"2. You have just retrieved a list of {len(routers)} routers:")
for r in routers:
if r.name == new_router.name:
my_router = r
print(r)
# Wait for the router to get deployed
try:
my_router.wait_for_status(RouterStatus.DEPLOYED)
except TimeoutError:
raise Exception(f"Turing API is taking too long for router {my_router.id} to get deployed.")
# 3. Get the router you just created using the router_id obtained
my_router = turing.Router.get(my_router.id)
print(f"3. You have retrieved the router with name: {my_router.name}")
# Access the router config from the returned Router object directly
my_router_config = my_router.config
# Modify something in the router config
my_router_config.routes.append(
Route(
id='fee-fi-fo-fum',
endpoint='http://fox-says.fee-fi-fo-fum',
timeout='20ms'
)
)
# 4. Update the router with the new router config
my_router.update(my_router_config)
print(f"4. You have just updated your router with a new config.")
# 5. List all the router config versions of your router
# Note: A `RouterVersion` represents a single version (and configuration) of a Turing Router. Just as `Router`
# objects, they should almost never be created manually by using their constructor.
#
# Besides accessing attributes of a `RouterVersion` object directly, which will allow you to access basic
# attributes, you may also consider retrieving the entire router configuration from a specific `RouterVersion`
# object as a `RouterConfig` for further manipulation by performing something like:
#
# `my_config = router_version.get_config()`
my_router_versions = my_router.list_versions()
print(f"5. You have just retrieved a list of {len(my_router_versions)} versions for your router:")
for ver in my_router_versions:
print(ver)
# Sort the versions returned by version number
my_router_versions.sort(key=lambda x: x.version)
# Get the version number of the first version returned
first_ver_no = my_router_versions[0].version
# Get the version number of the latest version returned
latest_ver_no = my_router_versions[-1].version
# Wait for the latest version to get deployed
try:
my_router.wait_for_version_status(RouterStatus.DEPLOYED, latest_ver_no)
except TimeoutError:
raise Exception(f"Turing API is taking too long for router {my_router.id} with version {latest_ver_no} to get "
f"deployed.")
# 6. Deploy a specific router config version (the first one we created)
response = my_router.deploy_version(first_ver_no)
print(f"6. You have deployed version {response['version']} of router {response['router_id']}.")
# Wait for the first version to get deployed
try:
my_router.wait_for_version_status(RouterStatus.DEPLOYED, first_ver_no)
except TimeoutError:
raise Exception(f"Turing API is taking too long for router {my_router.id} with version {first_ver_no} to get "
f"deployed.")
# 7. Undeploy the current active router configuration
response = my_router.undeploy()
print(f"7. You have undeployed router {response['router_id']}.")
# Wait for the router to get undeployed
try:
my_router.wait_for_status(RouterStatus.UNDEPLOYED)
except TimeoutError:
raise Exception(f"Turing API is taking too long for router {my_router.id} to get undeployed.")
# 8. Deploy the router's *current* configuration (notice how it still deploys the *first* version)
response = my_router.deploy()
print(f"8. You have deployed version {response['version']} of router {response['router_id']}.")
# Wait for the router to get deployed
try:
my_router.wait_for_status(RouterStatus.DEPLOYED)
except TimeoutError:
raise Exception(f"Turing API is taking too long for router {my_router.id} to get deployed.")
# Undeploy the router
response = my_router.undeploy()
print(f"You have undeployed router {response['router_id']}.")
# 9. Get a specific router version of the router
my_router_ver = my_router.get_version(first_ver_no)
print(f"9. You have just retrieved version {my_router_ver.version} of your router.")
# 10. Delete a specific router version of the router
response = my_router.delete_version(latest_ver_no)
print(f"10. You have deleted version {response['version']} of | |
coeff includes
parity due to sorting. opa and opb are integer arrays
"""
n_a = len(opa)
n_b = len(opb)
coeff *= (-1)**(n_a * (n_a - 1) // 2 + n_b * (n_b - 1) // 2)
amap = set()
bmap = set()
amask = reverse_integer_index(opa)
bmask = reverse_integer_index(opb)
for index in range(self.lena()):
current = self._core.string_alpha(index)
if (~current) & amask == 0:
amap.add(index)
for index in range(self.lenb()):
current = self._core.string_beta(index)
if (~current) & bmask == 0:
bmap.add(index)
factor = numpy.exp(-time * numpy.real(coeff) * 2.j)
lamap = list(amap)
lbmap = list(bmap)
if len(lamap) != 0 and len(lbmap) != 0:
xi, yi = numpy.meshgrid(lamap, lbmap, indexing='ij')
self.coeff[xi, yi] *= factor
def evolve_inplace_individual_nbody_nontrivial(
self, time: float, coeff: complex, daga: List[int],
undaga: List[int], dagb: List[int], undagb: List[int]) -> None:
"""
This code time-evolves a wave function with an individual n-body
generator which is spin-conserving. It is assumed that hat{T}^2 = 0.
Using :math:`TT = 0` and :math:`TT^\\dagger` is diagonal in the determinant
space, one could evaluate as
.. math::
\\exp(-i(T+T^\\dagger)t)
&= 1 + i(T+T^\\dagger)t - \\frac{1}{2}(TT^\\dagger + T^\\dagger T)t^2
- i\\frac{1}{6}(TT^\\dagger T + T^\\dagger TT^\\dagger)t^3 + \\cdots \\\\
&= -1 + \\cos(t\\sqrt{TT^\\dagger}) + \\cos(t\\sqrt{T^\\dagger T})
- iT\\frac{\\sin(t\\sqrt{T^\\dagger T})}{\\sqrt{T^\\dagger T}}
- iT^\\dagger\\frac{\\sin(t\\sqrt{TT^\\dagger})}{\\sqrt{TT^\\dagger}}
"""
def isolate_number_operators(dag: List[int], undag: List[int],
dagwork: List[int], undagwork: List[int],
number: List[int]) -> int:
"""
Pair-up daggered and undaggered operators that correspond to the
same spin-orbital and isolate them, because they have to be treated
differently.
"""
par = 0
for current in dag:
if current in undag:
index1 = dagwork.index(current)
index2 = undagwork.index(current)
par += len(dagwork) - (index1 + 1) + index2
dagwork.remove(current)
undagwork.remove(current)
number.append(current)
return par
dagworka = copy.deepcopy(daga)
dagworkb = copy.deepcopy(dagb)
undagworka = copy.deepcopy(undaga)
undagworkb = copy.deepcopy(undagb)
numbera: List[int] = []
numberb: List[int] = []
parity = 0
parity += isolate_number_operators(daga, undaga, dagworka, undagworka,
numbera)
parity += isolate_number_operators(dagb, undagb, dagworkb, undagworkb,
numberb)
ncoeff = coeff * (-1)**parity
# code for (TTd)
phase = (-1)**((len(daga) + len(undaga)) * (len(dagb) + len(undagb)))
(cosdata1,
sindata1) = self.apply_cos_sin(time, ncoeff, numbera + dagworka,
undagworka, numberb + dagworkb,
undagworkb)
work_cof = numpy.conj(coeff) * phase
cosdata1.ax_plus_y(
-1.0j,
sindata1.apply_individual_nbody(work_cof, undaga, daga, undagb,
dagb))
# code for (TdT)
(cosdata2,
sindata2) = self.apply_cos_sin(time, ncoeff, numbera + undagworka,
dagworka, numberb + undagworkb,
dagworkb)
cosdata2.ax_plus_y(
-1.0j,
sindata2.apply_individual_nbody(coeff, daga, undaga, dagb, undagb))
self.coeff = cosdata1.coeff + cosdata2.coeff - self.coeff
def apply_cos_sin(self, time: float, ncoeff: complex, opa: List[int],
oha: List[int], opb: List[int],
ohb: List[int]) -> Tuple['FqeData', 'FqeData']:
"""
Utility internal function that performs part of the operations in
evolve_inplace_individual_nbody_nontrivial. Isolated because it is
also used in the counterpart in FqeDataSet.
"""
amap = set()
bmap = set()
apmask = reverse_integer_index(opa)
ahmask = reverse_integer_index(oha)
bpmask = reverse_integer_index(opb)
bhmask = reverse_integer_index(ohb)
for index in range(self.lena()):
current = self._core.string_alpha(index)
if ((~current) & apmask) == 0 and (current & ahmask) == 0:
amap.add(index)
for index in range(self.lenb()):
current = self._core.string_beta(index)
if ((~current) & bpmask) == 0 and (current & bhmask) == 0:
bmap.add(index)
absol = numpy.absolute(ncoeff)
cosfactor = numpy.cos(time * absol)
sinfactor = numpy.sin(time * absol) / absol
cosdata = copy.deepcopy(self)
sindata = copy.deepcopy(self)
sindata.coeff.fill(0.0)
lamap = list(amap)
lbmap = list(bmap)
if len(lamap) == 0 or len(lbmap) == 0:
return (cosdata, sindata)
else:
xi, yi = numpy.meshgrid(lamap, lbmap, indexing='ij')
cosdata.coeff[xi, yi] *= cosfactor
sindata.coeff[xi, yi] = self.coeff[xi, yi] * sinfactor
return (cosdata, sindata)
def alpha_map(self, iorb: int, jorb: int) -> List[Tuple[int, int, int]]:
"""Access the mapping for a singlet excitation from the current
sector for alpha orbitals
"""
return self._core.alpha_map(iorb, jorb)
def beta_map(self, iorb: int, jorb: int) -> List[Tuple[int, int, int]]:
"""Access the mapping for a singlet excitation from the current
sector for beta orbitals
"""
return self._core.beta_map(iorb, jorb)
def ax_plus_y(self, sval: complex, other: 'FqeData') -> 'FqeData':
"""Scale and add the data in the fqedata structure
= sval*coeff + other
"""
assert hash(self) == hash(other)
self.coeff += other.coeff * sval
return self
def __hash__(self):
"""Fqedata sructures are unqiue in nele, s_z and the dimension.
"""
return hash((self._nele, self._m_s))
def conj(self) -> None:
"""Conjugate the coefficients
"""
numpy.conjugate(self.coeff, self.coeff)
def lena(self) -> int:
"""Length of the alpha configuration space
"""
return self._core.lena()
def lenb(self) -> int:
"""Length of the beta configuration space
"""
return self._core.lenb()
def nalpha(self) -> int:
"""Number of alpha electrons
"""
return self._core.nalpha()
def nbeta(self) -> int:
"""Number of beta electrons
"""
return self._core.nbeta()
def n_electrons(self) -> int:
"""Particle number getter
"""
return self._nele
def generator(self):
"""Iterate over the elements of the sector as alpha string, beta string
coefficient
"""
for inda in range(self._core.lena()):
alpha_str = self._core.string_alpha(inda)
for indb in range(self._core.lenb()):
beta_str = self._core.string_beta(indb)
yield alpha_str, beta_str, self.coeff[inda, indb]
def norb(self) -> int:
"""Number of beta electrons
"""
return self._core.norb()
def norm(self) -> float:
"""Return the norm of the the sector wavefunction
"""
return numpy.linalg.norm(self.coeff)
def print_sector(self, pformat=None, threshold=0.0001):
"""Iterate over the strings and coefficients and print then
using the print format
"""
if pformat is None:
def print_format(astr, bstr):
return '{0:b}:{1:b}'.format(astr, bstr)
pformat = print_format
print('Sector N = {} : S_z = {}'.format(self._nele, self._m_s))
for inda in range(self._core.lena()):
alpha_str = self._core.string_alpha(inda)
for indb in range(self._core.lenb()):
beta_str = self._core.string_beta(indb)
if numpy.abs(self.coeff[inda, indb]) > threshold:
print('{} {}'.format(pformat(alpha_str, beta_str),
self.coeff[inda, indb]))
def beta_inversion(self):
"""Return the coefficients with an inversion of the beta strings.
"""
return numpy.flip(self.coeff, 1)
def scale(self, sval: complex):
""" Scale the wavefunction by the value sval
Args:
sval (complex) - value to scale by
Returns:
nothing - Modifies the wavefunction in place
"""
self.coeff = self.coeff.astype(numpy.complex128) * sval
def fill(self, value: complex):
""" Fills the wavefunction with the value specified
"""
self.coeff.fill(value)
def set_wfn(self,
strategy: Optional[str] = None,
raw_data: 'Nparray' = numpy.empty(0)) -> None:
"""Set the values of the fqedata wavefunction based on a strategy
Args:
strategy (string) - the procedure to follow to set the coeffs
raw_data (numpy.array(dim(self.lena(), self.lenb()), \
dtype=numpy.complex128)) - the values to use
if setting from data. If vrange is supplied, the first column
in data will correspond to the first index in vrange
Returns:
nothing - modifies the wavefunction in place
"""
strategy_args = ['ones', 'zero', 'random', 'from_data', 'hartree-fock']
if strategy is None and raw_data.shape == (0,):
raise ValueError('No strategy and no data passed.'
' Cannot initialize')
if strategy == 'from_data' and raw_data.shape == (0,):
raise ValueError('No data passed to initialize from')
if raw_data.shape != (0,) and strategy not in ['from_data', None]:
raise ValueError('Inconsistent strategy for set_vec passed with'
'data')
if strategy not in strategy_args:
raise ValueError('Unknown Argument passed to set_vec')
if strategy == 'from_data':
chkdim = raw_data.shape
if chkdim[0] != self.lena() or chkdim[1] != self.lenb():
raise ValueError('Dim of data passed {},{} is not compatible' \
' with {},{}'.format(chkdim[0],
chkdim[1],
self.lena(),
self.lenb()))
if strategy == 'ones':
self.coeff.fill(1. + .0j)
elif strategy == 'zero':
self.coeff.fill(0. + .0j)
elif strategy == 'random':
self.coeff[:, :] = rand_wfn(self.lena(), self.lenb())
elif strategy == 'from_data':
self.coeff = numpy.copy(raw_data)
elif strategy == 'hartree-fock':
self.coeff.fill(0 + .0j)
self.coeff[0, 0] = 1.
def __copy__(self):
# FCIGraph is passed as by reference
new_data = FqeData(nalpha=self._core.nalpha(),
nbeta=self._core.nbeta(),
norb=self._core.norb(),
fcigraph=self._core,
dtype=self._dtype)
new_data._low_thresh = self._low_thresh
new_data.coeff[:, :] = self.coeff[:, :]
return new_data
def __deepcopy__(self, memodict={}): # pylint: disable=dangerous-default-value
# FCIGraph is passed as by reference
new_data = FqeData(nalpha=self._core.nalpha(),
nbeta=self._core.nbeta(),
norb=self._core.norb(),
fcigraph=self._core,
dtype=self._dtype)
new_data._low_thresh = self._low_thresh
# NOTE: numpy.copy only okay for numeric type self.coeff
# NOTE: Otherwise implement copy.deepcopy(self.coeff)
new_data.coeff[:, :] = self.coeff[:, :]
return new_data
def get_spin_opdm(self):
"""estimate the alpha-alpha and beta-beta block of the 1-RDM"""
dveca, dvecb = self.calculate_dvec_spin()
alpha_opdm = numpy.einsum('ijkl,kl->ij', dveca, self.coeff.conj())
beta_opdm = numpy.einsum('ijkl,kl->ij', dvecb, self.coeff.conj())
return alpha_opdm, beta_opdm
def get_ab_tpdm(self):
"""Get the alpha-beta block of the 2-RDM
tensor[i, j, k, l] = <ia^ jb^ kb la>
"""
dveca, dvecb = self.calculate_dvec_spin()
tpdm_ab = numpy.einsum('liab,jkab->ijkl', dveca.conj(), dvecb)
return tpdm_ab
def get_aa_tpdm(self):
"""Get the alpha-alpha block of the 2-RDM
tensor[i, j, k, l] = <ia^ ja^ ka la>
"""
dveca, _ = self.calculate_dvec_spin()
alpha_opdm = numpy.einsum('ijkl,kl->ij', dveca, self.coeff.conj())
nik_njl_aa = numpy.einsum('kiab,jlab->ikjl', dveca.conj(), dveca)
tensor_aa = numpy.einsum('il,jk->ikjl', alpha_opdm,
numpy.eye(alpha_opdm.shape[0]))
return alpha_opdm, | |
2*m.b57*m.b214 - 4*m.b57 - 4*m.b214 + 2*m.b57*m.b280 + 2*m.b57*
m.b536 + 2*m.b57*m.b674 + 2*m.b58*m.b353 - 2*m.b58 - 4*m.b353 + 2*m.b58*m.b355 - 2*m.b58*m.b498
+ 2*m.b498 + 2*m.b58*m.b591 + 2*m.b59*m.b60 - 2*m.b59 - 2*m.b60 + 2*m.b59*m.b116 - 2*m.b116 + 2*
m.b59*m.b220 - 4*m.b220 - 2*m.b59*m.b612 + 2*m.b60*m.b95 - 2*m.b95 + 2*m.b60*m.b140 - 2*m.b140 -
2*m.b60*m.b288 - 2*m.b288 + 2*m.b61*m.b119 - 2*m.b119 - 2*m.b61*m.b529 - 2*m.b61*m.b781 + 2*m.b62
*m.b262 - 4*m.b62 + 2*m.b62*m.b368 - 4*m.b368 + 2*m.b62*m.b410 - 4*m.b410 + 2*m.b62*m.b716 + 2*
m.b63*m.b101 - 4*m.b63 + 2*m.b63*m.b164 - 4*m.b164 + 2*m.b63*m.b368 + 2*m.b63*m.b682 + 2*m.b64*
m.b190 - 4*m.b190 + 2*m.b64*m.b695 + 2*m.b64*m.b727 - 2*m.b65*m.b66 - 2*m.b65 + 2*m.b66 + 2*m.b65
*m.b532 + 2*m.b65*m.b547 + 2*m.b65*m.b728 + 2*m.b66*m.b103 - 4*m.b103 - 2*m.b66*m.b749 - 2*m.b66*
m.b782 + 2*m.b67*m.b68 - 2*m.b67 - 2*m.b68 + 2*m.b67*m.b751 - 2*m.b67*m.b761 + 2*m.b67*m.b762 - 2
*m.b68*m.b69 - 2*m.b69 + 2*m.b68*m.b103 + 2*m.b68*m.b168 - 4*m.b168 + 2*m.b69*m.b70 - 2*m.b70 + 2
*m.b69*m.b470 - 2*m.b470 + 2*m.b69*m.b736 + 2*m.b70*m.b168 + 2*m.b71*m.b72 - 2*m.b71 - 2*m.b72 +
2*m.b71*m.b423 - 2*m.b423 + 2*m.b72*m.b170 - 2*m.b170 - 2*m.b72*m.b570 + 2*m.b72*m.b752 + 2*m.b73
*m.b538 - 2*m.b73 + 2*m.b73*m.b641 + 2*m.b73*m.b722 - 2*m.b73*m.b784 + 2*m.b74*m.b484 - 4*m.b74
- 2*m.b484 + 2*m.b74*m.b584 + 2*m.b74*m.b740 + 2*m.b74*m.b784 - 2*m.b75*m.b76 + 2*m.b75*m.b176
- 2*m.b176 + 2*m.b75*m.b583 - 2*m.b76*m.b175 - 2*m.b175 + 2*m.b76*m.b786 + 2*m.b77*m.b588 - 2*
m.b77 + 2*m.b77*m.b645 + 2*m.b77*m.b779 - 2*m.b77*m.b787 + 2*m.b78*m.b251 - 4*m.b78 - 4*m.b251 +
2*m.b78*m.b314 + 2*m.b78*m.b531 + 2*m.b78*m.b661 + 2*m.b79*m.b317 - 2*m.b79 + 2*m.b79*m.b399 - 4*
m.b399 + 2*m.b79*m.b601 - 2*m.b79*m.b724 + 2*m.b80*m.b292 - 4*m.b80 + 2*m.b80*m.b412 - 2*m.b412
+ 2*m.b80*m.b458 - 4*m.b458 + 2*m.b80*m.b706 + 2*m.b81*m.b122 - 4*m.b81 + 2*m.b81*m.b189 - 4*
m.b189 + 2*m.b81*m.b412 + 2*m.b81*m.b694 + 2*m.b82*m.b683 + 2*m.b82*m.b731 + 2*m.b83*m.b84 + 2*
m.b83*m.b761 + 2*m.b83*m.b773 + 2*m.b84*m.b123 - 4*m.b123 + 2*m.b84*m.b197 - 2*m.b197 + 2*m.b85*
m.b86 - 2*m.b86 + 2*m.b85*m.b418 - 2*m.b418 + 2*m.b86*m.b197 + 2*m.b87*m.b88 - 2*m.b87 - 2*m.b88
+ 2*m.b87*m.b473 + 2*m.b88*m.b199 - 2*m.b199 + 2*m.b88*m.b474 + 2*m.b474 - 2*m.b88*m.b560 - 2*
m.b89*m.b438 + 2*m.b438 - 2*m.b89*m.b562 - 2*m.b89*m.b768 + 2*m.b90*m.b598 - 2*m.b90 + 2*m.b90*
m.b660 + 2*m.b90*m.b768 - 2*m.b90*m.b795 + 2*m.b91*m.b92 - 4*m.b91 - 2*m.b92 + 2*m.b91*m.b279 - 4
*m.b279 + 2*m.b91*m.b351 + 2*m.b91*m.b648 + 2*m.b92*m.b352 + 2*m.b352 + 2*m.b92*m.b535 - 2*m.b92*
m.b796 + 2*m.b93*m.b94 - 2*m.b93 + 2*m.b93*m.b283 + 2*m.b93*m.b445 - 4*m.b445 - 2*m.b93*m.b729 -
2*m.b94*m.b797 + 2*m.b94*m.b798 + 2*m.b95*m.b97 - 4*m.b97 + 2*m.b95*m.b529 - 2*m.b95*m.b680 + 2*
m.b96*m.b97 - 4*m.b96 + 2*m.b96*m.b602 + 2*m.b96*m.b653 + 2*m.b96*m.b760 + 2*m.b97*m.b457 + 2*
m.b457 + 2*m.b97*m.b528 + 2*m.b98*m.b332 - 4*m.b98 + 2*m.b98*m.b460 - 2*m.b460 + 2*m.b98*m.b513
- 4*m.b513 + 2*m.b98*m.b692 + 2*m.b99*m.b100 - 4*m.b100 + 2*m.b99*m.b667 + 2*m.b100*m.b144 + 2*
m.b100*m.b227 - 4*m.b227 + 2*m.b100*m.b460 + 2*m.b101*m.b670 + 2*m.b101*m.b728 + 2*m.b102*m.b190
- 2*m.b102 + 2*m.b102*m.b467 + 2*m.b467 + 2*m.b102*m.b520 - 4*m.b520 - 2*m.b102*m.b762 + 2*
m.b103*m.b104 + 2*m.b103*m.b783 + 2*m.b104*m.b146 - 4*m.b146 + 2*m.b104*m.b234 - 2*m.b234 + 2*
m.b105*m.b106 - 2*m.b106 + 2*m.b105*m.b377 - 4*m.b377 - 2*m.b105*m.b751 + 2*m.b106*m.b234 + 2*
m.b107*m.b108 - 2*m.b107 - 2*m.b108 + 2*m.b107*m.b606 + 2*m.b108*m.b236 - 2*m.b236 + 2*m.b108*
m.b424 + 2*m.b424 - 2*m.b108*m.b549 + 2*m.b109*m.b431 - 4*m.b431 + 2*m.b109*m.b766 + 2*m.b109*
m.b804 + 2*m.b110*m.b573 - 2*m.b110*m.b767 - 2*m.b111*m.b574 - 2*m.b111*m.b757 - 2*m.b111*m.b758
+ 2*m.b112*m.b310 - 4*m.b112 - 4*m.b310 + 2*m.b112*m.b609 + 2*m.b112*m.b673 + 2*m.b112*m.b758 -
2*m.b113*m.b315 + 4*m.b315 + 2*m.b113*m.b530 + 2*m.b113*m.b621 + 2*m.b114*m.b115 - 2*m.b114 + 2*
m.b114*m.b499 - 4*m.b499 + 2*m.b114*m.b622 - 2*m.b114*m.b734 - 2*m.b115*m.b354 + 2*m.b354 + 2*
m.b115*m.b806 + 2*m.b116*m.b118 - 4*m.b118 + 2*m.b116*m.b221 - 2*m.b221 - 2*m.b116*m.b329 + 4*
m.b329 + 2*m.b117*m.b592 - 2*m.b117 + 2*m.b117*m.b636 + 2*m.b117*m.b771 - 2*m.b117*m.b789 + 2*
m.b118*m.b666 + 2*m.b118*m.b748 + 2*m.b118*m.b789 + 2*m.b119*m.b368 + 2*m.b119*m.b515 - 2*m.b515
- 2*m.b119*m.b528 + 2*m.b120*m.b121 - 4*m.b121 + 2*m.b121*m.b164 + 2*m.b121*m.b264 - 4*m.b264 +
2*m.b121*m.b515 + 2*m.b122*m.b656 + 2*m.b122*m.b718 + 2*m.b123*m.b124 - 2*m.b124 + 2*m.b123*
m.b750 + 2*m.b123*m.b792 + 2*m.b124*m.b166 - 4*m.b166 + 2*m.b124*m.b269 - 2*m.b269 - 2*m.b124*
m.b719 + 2*m.b125*m.b126 - 2*m.b126 + 2*m.b125*m.b419 - 4*m.b419 + 2*m.b125*m.b751 + 2*m.b126*
m.b269 + 2*m.b127*m.b128 - 2*m.b127 - 4*m.b128 + 2*m.b127*m.b595 + 2*m.b128*m.b383 + 2*m.b383 + 2
*m.b128*m.b560 + 2*m.b128*m.b811 + 2*m.b129*m.b173 - 2*m.b129 - 2*m.b173 + 2*m.b129*m.b483 - 4*
m.b483 - 2*m.b129*m.b659 + 2*m.b129*m.b756 + 2*m.b130*m.b131 - 2*m.b131 - 2*m.b130*m.b390 + 2*
m.b390 + 2*m.b131*m.b391 - 2*m.b391 - 2*m.b131*m.b561 + 2*m.b131*m.b805 + 2*m.b132*m.b134 - 2*
m.b132 - 4*m.b134 - 2*m.b132*m.b248 - 2*m.b248 + 2*m.b132*m.b586 + 2*m.b132*m.b686 - 2*m.b133*
m.b588 + 2*m.b133 + 2*m.b133*m.b597 - 2*m.b133*m.b744 - 2*m.b133*m.b745 + 2*m.b134*m.b620 + 2*
m.b134*m.b745 + 2*m.b134*m.b795 + 2*m.b135*m.b137 - 2*m.b135 - 2*m.b135*m.b535 + 2*m.b135*m.b610
+ 2*m.b135*m.b734 - 2*m.b136*m.b284 - 2*m.b284 - 2*m.b136*m.b576 - 2*m.b136*m.b770 + 2*m.b137*
m.b284 - 2*m.b137*m.b400 + 2*m.b400 + 2*m.b138*m.b219 - 4*m.b138 - 2*m.b219 + 2*m.b138*m.b284 + 2
*m.b138*m.b322 + 2*m.b322 + 2*m.b138*m.b576 + 2*m.b139*m.b141 - 2*m.b139 - 4*m.b141 + 2*m.b139*
m.b184 - 2*m.b184 - 2*m.b139*m.b365 + 4*m.b365 + 2*m.b139*m.b603 + 2*m.b140*m.b579 + 2*m.b140*
m.b625 - 2*m.b140*m.b781 + 2*m.b141*m.b529 + 2*m.b141*m.b739 + 2*m.b141*m.b781 + 2*m.b142*m.b143
- 2*m.b143 - 2*m.b142*m.b727 + 2*m.b143*m.b189 + 2*m.b143*m.b295 - 4*m.b295 - 2*m.b143*m.b516 -
2*m.b516 + 2*m.b144*m.b638 + 2*m.b144*m.b708 + 2*m.b145*m.b415 - 2*m.b415 - 2*m.b145*m.b728 - 2*
m.b145*m.b783 + 2*m.b146*m.b147 - 2*m.b147 + 2*m.b146*m.b194 - 2*m.b194 + 2*m.b146*m.b762 + 2*
m.b147*m.b195 - 4*m.b195 + 2*m.b147*m.b299 - 2*m.b299 - 2*m.b147*m.b709 + 2*m.b148*m.b149 - 2*
m.b149 + 2*m.b148*m.b471 - 4*m.b471 + 2*m.b149*m.b299 + 2*m.b150*m.b151 - 4*m.b151 + 2*m.b151*
m.b341 + 2*m.b341 + 2*m.b151*m.b570 + 2*m.b151*m.b816 + 2*m.b152*m.b238 - 2*m.b152 - 4*m.b238 + 2
*m.b152*m.b428 - 2*m.b428 + 2*m.b152*m.b740 - 2*m.b152*m.b817 + 2*m.b153*m.b155 - 2*m.b155 - 2*
m.b153*m.b346 + 2*m.b346 + 2*m.b153*m.b643 + 2*m.b154*m.b155 - 2*m.b154 + 2*m.b154*m.b204 - 2*
m.b204 - 2*m.b154*m.b587 + 2*m.b154*m.b804 + 2*m.b155*m.b436 - 2*m.b436 - 2*m.b155*m.b553 + 2*
m.b156*m.b158 - 4*m.b158 - 2*m.b156*m.b210 - 2*m.b210 + 2*m.b156*m.b700 - 2*m.b157*m.b311 + 4*
m.b157 - 2*m.b311 - 2*m.b157*m.b597 - 2*m.b157*m.b598 - 2*m.b157*m.b738 + 2*m.b158*m.b632 + 2*
m.b158*m.b738 + 2*m.b158*m.b787 + 2*m.b159*m.b160 - 2*m.b159 - 2*m.b159*m.b530 + 2*m.b159*m.b600
+ 2*m.b159*m.b729 + 2*m.b160*m.b318 - 2*m.b318 - 2*m.b160*m.b446 + 2*m.b446 - 2*m.b161*m.b614 +
2*m.b161 - 2*m.b161*m.b692 + 2*m.b161*m.b693 - 2*m.b161*m.b819 + 2*m.b162*m.b163 - 2*m.b163 + 2*
m.b162*m.b462 + 2*m.b163*m.b227 + 2*m.b163*m.b334 - 4*m.b334 - 2*m.b163*m.b461 - 2*m.b461 + 2*
m.b164*m.b228 - 2*m.b228 + 2*m.b164*m.b695 + 2*m.b165*m.b373 + 2*m.b165 - 4*m.b373 - 2*m.b165*
m.b522 - 2*m.b165*m.b718 - 2*m.b165*m.b792 + 2*m.b166*m.b167 - 4*m.b167 + 2*m.b166*m.b231 - 2*
m.b231 + 2*m.b166*m.b773 + 2*m.b167*m.b232 - 4*m.b232 + 2*m.b167*m.b339 - 4*m.b339 + 2*m.b167*
m.b709 + 2*m.b168*m.b169 - 2*m.b169 + 2*m.b168*m.b526 - 4*m.b526 + 2*m.b169*m.b339 + 2*m.b170*
m.b171 - 4*m.b171 + 2*m.b171*m.b300 + 2*m.b300 + 2*m.b171*m.b582 + 2*m.b171*m.b821 + 2*m.b172*
m.b480 - 2*m.b172 - 2*m.b480 - 2*m.b172*m.b584 + 2*m.b172*m.b630 + 2*m.b172*m.b753 + 2*m.b173*
m.b175 - 2*m.b173*m.b484 + 2*m.b173*m.b805 + 2*m.b174*m.b177 - 4*m.b177 - 2*m.b174*m.b307 + 2*
m.b307 + 2*m.b174*m.b659 + 2*m.b175*m.b177 + 2*m.b175*m.b243 - 2*m.b243 + 2*m.b176*m.b209 - 2*
m.b209 + 2*m.b176*m.b305 - 2*m.b176*m.b489 - 2*m.b489 + 2*m.b177*m.b489 + 2*m.b177*m.b553 - 2*
m.b178*m.b179 + 2*m.b179 - 2*m.b178*m.b348 - 2*m.b348 - 2*m.b178*m.b609 + 2*m.b179*m.b181 - 2*
m.b179*m.b661 - 2*m.b179*m.b823 + 2*m.b180*m.b588 + 2*m.b180*m.b738 - 2*m.b180*m.b824 + 2*m.b181*
m.b598 + 2*m.b181*m.b824 + 2*m.b182*m.b183 - 4*m.b182 + 2*m.b182*m.b535 + 2*m.b182*m.b590 + 2*
m.b182*m.b724 + 2*m.b183*m.b356 - 2*m.b356 - 2*m.b183*m.b500 + 2*m.b500 + 2*m.b184*m.b324 - 2*
m.b324 - 2*m.b184*m.b453 + 2*m.b453 + 2*m.b184*m.b679 - 2*m.b185*m.b186 + 2*m.b185 + 2*m.b186 - 2
*m.b185*m.b566 + 2*m.b185*m.b604 - 2*m.b185*m.b760 - 2*m.b186*m.b706 + 2*m.b186*m.b707 - 2*m.b186
*m.b827 + 2*m.b187*m.b188 - 2*m.b188 + 2*m.b187*m.b727 + 2*m.b188*m.b264 + 2*m.b188*m.b371 - 4*
m.b371 - 2*m.b188*m.b413 - 2*m.b413 + 2*m.b189*m.b191 - 4*m.b191 + 2*m.b189*m.b683 + 2*m.b190*
m.b193 + 2*m.b190*m.b465 - 4*m.b465 + 2*m.b191*m.b193 + 2*m.b191*m.b371 + 2*m.b191*m.b655 - 2*
m.b192*m.b194 + 2*m.b192 + 2*m.b192*m.b336 - 4*m.b336 - 2*m.b192*m.b467 - 2*m.b192*m.b708 + 2*
m.b193*m.b194 + 2*m.b194*m.b268 - 4*m.b268 + 2*m.b195*m.b196 - 4*m.b196 + 2*m.b195*m.b267 - 2*
m.b267 + 2*m.b195*m.b783 + 2*m.b196*m.b268 + 2*m.b196*m.b378 - 4*m.b378 + 2*m.b196*m.b719 + 2*
m.b197*m.b198 - 2*m.b198 | |
# Copyright (C) 2021-2022 Intel Corporation
#
# SPDX-License-Identifier: MIT
from __future__ import annotations
from typing import Callable, Iterable, Iterator, Optional, Tuple, Union
import os
import os.path as osp
import shutil
import weakref
import cv2
import numpy as np
from datumaro.util.image import (
_image_loading_errors, decode_image, lazy_image, save_image,
)
class MediaElement:
def __init__(self, path: str) -> None:
self._path = path
@property
def path(self) -> str:
"""Path to the media file"""
return self._path
@property
def ext(self) -> str:
"""Media file extension (with the leading dot)"""
return osp.splitext(osp.basename(self.path))[1]
def __eq__(self, other: object) -> bool:
# We need to compare exactly with this type
if type(other) is not __class__: # pylint: disable=unidiomatic-typecheck
return False
return self._path == other._path
class Image(MediaElement):
def __init__(self,
data: Union[np.ndarray, Callable[[str], np.ndarray], None] = None,
*,
path: Optional[str] = None,
ext: Optional[str] = None,
size: Optional[Tuple[int, int]] = None) -> None:
"""
Creates an image.
Any combination of the `data`, `path` and `size` is possible,
but at least one of these arguments must be provided.
The `ext` parameter cannot be used as a single argument for
construction.
Args:
data - Image pixels or a function to retrieve them. The expected
image shape is (H, W [, C]). If a function is provided,
it must accept image path as the first argument.
path - Image path
ext - Image extension. Cannot be used together with `path`. It can
be used for saving with a custom extension - in that case,
the image need to have the `data` and `ext` fields defined.
size - A pair (H, W), which represents image size.
"""
assert size is None or len(size) == 2, size
if size is not None:
assert len(size) == 2 and 0 < size[0] and 0 < size[1], size
size = tuple(map(int, size))
self._size = size # (H, W)
if path is None:
path = ''
elif path:
path = path.replace('\\', '/')
self._path = path
if ext:
assert not path, "Can't specify both 'path' and 'ext' for image"
if not ext.startswith('.'):
ext = '.' + ext
ext = ext.lower()
else:
ext = None
self._ext = ext
if not isinstance(data, np.ndarray):
assert path or callable(data) or size, "Image can not be empty"
assert data is None or callable(data)
if data or path and osp.isfile(path):
data = lazy_image(path, loader=data)
self._data = data
@property
def data(self) -> np.ndarray:
"""Image data in BGR HWC [0; 255] (float) format"""
if callable(self._data):
data = self._data()
else:
data = self._data
if self._size is None and data is not None:
self._size = tuple(map(int, data.shape[:2]))
return data
@property
def has_data(self) -> bool:
return self._data is not None
@property
def has_size(self) -> bool:
"""Indicates that size info is cached and won't require image loading"""
return self._size is not None or isinstance(self._data, np.ndarray)
@property
def size(self) -> Optional[Tuple[int, int]]:
"""Returns (H, W)"""
if self._size is None:
try:
data = self.data
except _image_loading_errors:
return None
if data is not None:
self._size = tuple(map(int, data.shape[:2]))
return self._size
@property
def ext(self) -> str:
"""Media file extension"""
if self._ext is not None:
return self._ext
else:
return osp.splitext(osp.basename(self.path))[1]
def __eq__(self, other):
if not isinstance(other, __class__):
return False
return \
(np.array_equal(self.size, other.size)) and \
(self.has_data == other.has_data) and \
(self.has_data and np.array_equal(self.data, other.data) or \
not self.has_data)
def save(self, path):
cur_path = osp.abspath(self.path)
path = osp.abspath(path)
cur_ext = self.ext.lower()
new_ext = osp.splitext(osp.basename(path))[1].lower()
os.makedirs(osp.dirname(path), exist_ok=True)
if cur_ext == new_ext and osp.isfile(cur_path):
if cur_path != path:
shutil.copyfile(cur_path, path)
else:
save_image(path, self.data)
class ByteImage(Image):
_FORMAT_MAGICS = (
(b'\x89PNG\r\n\x1a\n', '.png'),
(b'\xff\xd8\xff', '.jpg'),
(b'BM', '.bmp'),
)
def __init__(self,
data: Union[bytes, Callable[[str], bytes], None] = None,
*,
path: Optional[str] = None,
ext: Optional[str] = None,
size: Optional[Tuple[int, int]] = None):
if not isinstance(data, bytes):
assert path or callable(data), "Image can not be empty"
assert data is None or callable(data)
if path and osp.isfile(path) or data:
data = lazy_image(path, loader=data)
self._bytes_data = data
if ext is None and path is None and isinstance(data, bytes):
ext = self._guess_ext(data)
super().__init__(path=path, ext=ext, size=size,
data=lambda _: decode_image(self.get_bytes()))
if data is None:
# We don't expect decoder to produce images from nothing,
# otherwise using this class makes no sense. We undefine
# data to avoid using default image loader for loading binaries
# from the path, when no data is provided.
self._data = None
@classmethod
def _guess_ext(cls, data: bytes) -> Optional[str]:
return next(
(ext for magic, ext in cls._FORMAT_MAGICS
if data.startswith(magic)),
None,
)
def get_bytes(self):
if callable(self._bytes_data):
return self._bytes_data()
return self._bytes_data
def save(self, path):
cur_path = osp.abspath(self.path)
path = osp.abspath(path)
cur_ext = self.ext.lower()
new_ext = osp.splitext(osp.basename(path))[1].lower()
os.makedirs(osp.dirname(path), exist_ok=True)
if cur_ext == new_ext and osp.isfile(cur_path):
if cur_path != path:
shutil.copyfile(cur_path, path)
elif cur_ext == new_ext:
with open(path, 'wb') as f:
f.write(self.get_bytes())
else:
save_image(path, self.data)
class VideoFrame(Image):
def __init__(self, video: Video, index: int):
self._video = video
self._index = index
super().__init__(lambda _: self._video.get_frame_data(self._index))
@property
def size(self) -> Tuple[int, int]:
return self._video.frame_size
@property
def index(self) -> int:
return self._index
@property
def video(self) -> Video:
return self._video
class _VideoFrameIterator(Iterator[VideoFrame]):
"""
Provides sequential access to the video frames.
"""
_video: Video
_iterator: Iterator[VideoFrame]
_pos: int
_current_frame_data: Optional[np.ndarray]
def __init__(self, video: Video):
self._video = video
self._reset()
def _reset(self):
self._video._reset_reader()
self._iterator = self._decode(self._video._get_reader())
self._pos = -1
self._current_frame_data = None
def _decode(self, cap) -> Iterator[VideoFrame]:
"""
Decodes video frames using opencv
"""
self._pos = -1
success, frame = cap.read()
while success:
self._pos += 1
if self._video._includes_frame(self._pos):
self._current_frame_data = frame.astype(float)
yield self._make_frame(index=self._pos)
success, frame = cap.read()
if self._video._frame_count is None:
self._video._frame_count = self._pos + 1
def _make_frame(self, index) -> VideoFrame:
return VideoFrame(self._video, index=index)
def __next__(self):
return next(self._iterator)
def __getitem__(self, idx: int) -> VideoFrame:
if not self._video._includes_frame(idx):
raise IndexError(f"Video doesn't contain frame #{idx}.")
return self._navigate_to(idx)
def get_frame_data(self, idx: int) -> np.ndarray:
self._navigate_to(idx)
return self._current_frame_data
def _navigate_to(self, idx: int) -> VideoFrame:
"""
Iterates over frames to the required position.
"""
if idx < 0:
raise IndexError()
if idx < self._pos:
self._reset()
if self._pos < idx:
try:
while self._pos < idx:
v = self.__next__()
except StopIteration as e:
raise IndexError() from e
else:
v = self._make_frame(index=self._pos)
return v
class Video(MediaElement, Iterable[VideoFrame]):
"""
Provides random access to the video frames.
"""
def __init__(self, path: str, *,
step: int = 1, start_frame: int = 0,
end_frame: Optional[int] = None) -> None:
super().__init__(path)
if end_frame:
assert start_frame < end_frame
assert 0 < step
self._step = step
self._start_frame = start_frame
self._end_frame = end_frame or None
self._reader = None
self._iterator: Optional[_VideoFrameIterator] = None
self._frame_size: Optional[Tuple[int, int]] = None
# We don't provide frame count unless we have a reliable source of
# this information.
# - Not all videos provide length / duration metainfo
# - We can get an estimation based on metadata, but it
# can be invalid or inaccurate due to variable frame rate
# or fractional values rounded up. Relying on the value will give
# errors during requesting frames.
# https://stackoverflow.com/a/47796468
self._frame_count = None
self._length = None
from .media_manager import MediaManager
MediaManager.get_instance().push(weakref.ref(self), self)
def close(self):
self._iterator = None
if self._reader is not None:
self._reader.release()
self._reader = None
def __getitem__(self, idx: int) -> VideoFrame:
if not self._includes_frame(idx):
raise IndexError(f"Video doesn't contain frame #{idx}.")
return self._get_iterator()[idx]
def get_frame_data(self, idx: int) -> VideoFrame:
if not self._includes_frame(idx):
raise IndexError(f"Video doesn't contain frame #{idx}.")
return self._get_iterator().get_frame_data(idx)
def __iter__(self) -> Iterator[VideoFrame]:
"""
Iterates over frames lazily, if possible.
"""
if self._frame_count is not None:
# Decoding is not necessary to get frame pointers
# However, it can be inacurrate
end_frame = self._get_end_frame()
for index in range(self._start_frame, end_frame, self._step):
yield VideoFrame(video=self, index=index)
else:
# Need to decode to iterate over frames
yield from self._get_iterator()
@property
def length(self) -> Optional[int]:
"""
Returns frame count, if video provides such information.
Note that not all videos provide length / duration metainfo, so the
result may be undefined.
Also note, that information may be inaccurate because of variable
FPS in video or incorrect metainfo. The count is only guaranteed to
be valid after video is completely read once.
The count is affected by the frame filtering options of the object,
i.e. | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""local_inference_test.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1DoufA_ZZTQ4Cbkrwxll2eoD-ek4rkO7B
## Run inference test
Test with images in repository `object_detection_demo/test` directory.
"""
import os
import glob
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = './models/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = './models/label_map.pbtxt'
# If you want to test the code with your images, just add images files to the PATH_TO_TEST_IMAGES_DIR (./test).
PATH_TO_TEST_IMAGES_DIR = './test'
assert os.path.isfile(PATH_TO_CKPT)
assert os.path.isfile(PATH_TO_LABELS)
TEST_IMAGE_PATHS = glob.glob(os.path.join(PATH_TO_TEST_IMAGES_DIR, "*.*"))
assert len(TEST_IMAGE_PATHS) > 0, 'No image found in `{}`.'.format(PATH_TO_TEST_IMAGES_DIR)
print(TEST_IMAGE_PATHS)
# %cd /content/models/research/object_detection
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
from object_detection.utils import ops as utils_ops
# This is needed to display the images.
# %matplotlib inline
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
import visualization_utils as local_vis_util
NUM_CLASSES = 2
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
def run_inference_for_single_image(image, graph):
with graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {
output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(
tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(
tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(
tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [
real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [
real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(
output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
i=1
import time
for image_path in TEST_IMAGE_PATHS:
image = Image.open(image_path)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
print('###############')
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
print('the type should be:' + str(type(image_np)))
print('the shape should be ' + str(image_np.shape))
# Actual detection.
start = time.time()
output_dict = run_inference_for_single_image(image_np, detection_graph)
end = time.time()
print('the time is: ' +str(end - start))
# Visualization of the results of a detection.
print('the type should be:' + str(type(image_np)))
print('the shape should be ' + str(image_np.shape))
print(output_dict)
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=8)
plt.figure(figsize=IMAGE_SIZE)
plt.imshow(image_np)
plt.savefig('test' +str(i) +'.jpg')
i+=1
"""### Benchmark TensorFlow inference speed"""
import time
def run_inference_benchmark(image, graph, trial=20, gpu=True):
"""Run TensorFlow inference benchmark.
Arguments:
image {np.array} -- Input image as an Numpy array.
graph {tf.Graph} -- TensorFlow graph object.
Keyword Arguments:
trial {int} -- Number of inference to run for averaging. (default: {20})
gpu {bool} -- Use Nvidia GPU when available. (default: {True})
Returns:
int -- Frame per seconds benchmark result.
"""
with graph.as_default():
if gpu:
config = tf.ConfigProto()
else:
config = tf.ConfigProto(device_count={"GPU": 0})
with tf.Session(config=config) as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
"num_detections",
"detection_boxes",
"detection_scores",
"detection_classes",
"detection_masks",
]:
tensor_name = key + ":0"
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name
)
if "detection_masks" in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict["detection_boxes"], [0])
detection_masks = tf.squeeze(tensor_dict["detection_masks"], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(
tensor_dict["num_detections"][0], tf.int32
)
detection_boxes = tf.slice(
detection_boxes, [0, 0], [real_num_detection, -1]
)
detection_masks = tf.slice(
detection_masks, [0, 0, 0], [real_num_detection, -1, -1]
)
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1]
)
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8
)
# Follow the convention by adding back the batch dimension
tensor_dict["detection_masks"] = tf.expand_dims(
detection_masks_reframed, 0
)
image_tensor = tf.get_default_graph().get_tensor_by_name(
"image_tensor:0"
)
# Run inference
times = []
# Kick start the first inference which takes longer and followings.
output_dict = sess.run(
tensor_dict, feed_dict={image_tensor: np.expand_dims(image, 0)}
)
# for i in range(trial):
# start_time = time.time()
# output_dict = sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(image, 0)})
# vis_util.visualize_boxes_and_labels_on_image_array(
# image,
# output_dict['detection_boxes'],
# output_dict['detection_classes'],
# output_dict['detection_scores'],
# category_index,
# instance_masks=output_dict.get('detection_masks'),
# use_normalized_coordinates=True,
# line_thickness=8)
#print(output_dict['detection_boxes'][0])
# delta = time.time() - start_time
# times.append(delta)
#mean_delta = np.array(times).mean()
#fps = 1 / mean_delta
#print("average(sec):{:.3f},fps:{:.2f}".format(mean_delta, fps))
return(0, image)
def test():
image = Image.open(TEST_IMAGE_PATHS[-1])
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection benchmark.
fps = run_inference_benchmark(image_np, detection_graph, trial201, gpu=False)
def camera_test():
import time
import cv2
camera = cv2.VideoCapture()
time.sleep(1)
camera.open(0)
time.sleep(1)
opened = camera.isOpened()
time.sleep(1)
if not opened:
print("Camera not open")
exit
camera.set(cv2.CAP_PROP_FRAME_WIDTH, 1920/4)
camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080/4)
while True:
ret, frame = camera.read()
reduction = 2.5
frame = cv2.resize(frame, (int(320/reduction), int(240/reduction)))
print(type(frame))
print(frame.shape)
#
# frame = frame.astype(np.uint8)
#
# print(type(frame))
# print('shape test ')
# print((frame.shape))
# #print(frame)
#
# #(im_width, im_height) = frame.size
#
# #frame = np.array(frame.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)
#
#
# # the array based representation of the image will be used later in order to prepare the
# # result image with boxes and labels on it.
# #image_np = load_image_into_numpy_array(frame)
# image_np = frame
# # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
# image_np_expanded = np.expand_dims(image_np, axis=0)
# # Actual detection benchmark.
fps,image_np = run_inference_benchmark(frame, detection_graph, trial=1, gpu=True)
cv2.imshow("Camera", frame)
if not ret:
break
k = cv2.waitKey(1)
if k % 256 == 27:
print("Done")
break
camera.release()
cv2.destroyAllWindows()
def camera_test_speed():
graph = detection_graph
gpu = True
with graph.as_default():
if gpu:
config = tf.ConfigProto()
else:
config = tf.ConfigProto(device_count={"GPU": 0})
with tf.Session(config=config) as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
"num_detections",
"detection_boxes",
"detection_scores",
"detection_classes",
"detection_masks",
]:
tensor_name = key + ":0"
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name
)
if "detection_masks" in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict["detection_boxes"], [0])
detection_masks = tf.squeeze(tensor_dict["detection_masks"], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(
tensor_dict["num_detections"][0], tf.int32
)
detection_boxes = tf.slice(
detection_boxes, [0, 0], [real_num_detection, -1]
)
detection_masks = tf.slice(
detection_masks, [0, 0, 0], [real_num_detection, -1, -1]
)
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1]
)
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8
)
# Follow the convention by adding back the batch dimension
tensor_dict["detection_masks"] = tf.expand_dims(
detection_masks_reframed, 0
)
image_tensor = tf.get_default_graph().get_tensor_by_name(
"image_tensor:0"
)
import time
import cv2
cv2.namedWindow("preview")
vc = cv2.VideoCapture(1)
ret, frame = vc.read()
while True:
ret, frame = vc.read()
reduction = 1.5
frame = cv2.resize(frame, (int(320/reduction), int(240/reduction)))
#print(type(frame))
#print(frame.shape)
output_dict = sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(frame, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(
| |
= 0
checks = 20
x_lis = []
previousStart = 0
forceAv = 0
forceAbsAv = 0
x = f
force = np.zeros(periods)
else:
# R=i/100
t0 = time.time() #Calling computer clock
x0 = [1,0,0,0,0,0] #Initial values. Change here.
totPoints = 1000000*factor
periods = 1000*factor
pointPerPeroid = int(totPoints/periods)
stepSize = (2*math.pi)/om/pointPerPeroid
force = []
forceAbsolute = 0
checks = 20
x_lis = []
previousStart = 0
# for i in range(checks):
# k = i+1
for i in range(periods):
g = g + random.uniform(-0.0005, 0.0005)
# gam = gam + random.uniform(-0.0005, 0.0005)
start = i*(2*math.pi)/om
end = (i+1)*(2*math.pi)/om
# print((end-previousStart)/pointPerPeroid)
t = np.linspace(start, end, pointPerPeroid)
z = odeint(Harvest, x0, t) #(Function, initial condition, time)
z_list = z.tolist()
# print(z_list[0],z_list[1],z_list[3])
for j in range(z.__len__()-1):
x_lis.append(z_list[j])
# print(z)`
t = np.linspace(end-stepSize, end+stepSize, 2)
y = odeint(Harvest, z[pointPerPeroid-1], t)
# print(x_lis[x_lis.__len__()-1])
# print(y)
y_list = y.tolist()
qY = y_list[0][0]
pY = y_list[0][1]
vY = y_list[0][2]
if i+1 < periods:
qActNext = f[(i+1)*pointPerPeroid][0]
pActNext = f[(i+1)*pointPerPeroid][1]
vActNext = f[(i+1)*pointPerPeroid][3]
else:
qActNext = f[(i+1)*pointPerPeroid-1][0]
pActNext = f[(i+1)*pointPerPeroid-1][1]
vActNext = f[(i+1)*pointPerPeroid-1][3]
dq = qActNext-qY
dp = pActNext-pY
dv = vActNext-vY
x_lis.append([qActNext, pActNext, vActNext, y[0][3], y[0][4], y[0][5]])
# print(y)
# print(z)
# print(y[1])
# print(dq, dp, stepSize)
force.append(dp/stepSize)
# forceTot = forceTot+(dp/stepSize+dq/(stepSize**2))**2
forceAbsolute = forceAbsolute + abs(dp/stepSize)
x0 = x_lis[x_lis.__len__()-1]
# print(force)
# print(x_lis[i*pointPerPeroid-1], f[i*pointPerPeroid-1])
x = np.array(x_lis)
# forceAv = forceTot/periods
forceAbsAv = forceAbsolute/periods
# print(x)
# print(x.__len__())
print(forceAbsAv)
# np.save('GCorrectiveDATAxi{:.2f}g{:.4f}r{:.2f}om{:.3f}param.png'.format(xi,g,R,om), x)
#Going from 0 to 50000, with 100000 points of data.
#print(x)
#Going to produce five columns of results, first colum is q, then p , then v
numOfPoints = 980000*factor #This is the transient number of points to be rejected right here
q = x[:,0][numOfPoints:] #Starting FROM numOfPoints
p = x[:,1][numOfPoints:]
v = x[:,2][numOfPoints:]
# print(v)
Edrive = x[:,3][numOfPoints:]
Ediss = x[:,4][numOfPoints:]
# Ecap = x[:,5][600000:]
ER = x[:,5][numOfPoints:]
# EOsc = x[:,7][600000:]
# for i in range(periods):
# start = i*(2*math.pi)/om
# end = (i+1)*(2*math.pi)/om
# # print((end-previousStart)/pointPerPeroid)
# t = np.linspace(start, end, pointPerPeroid)
#
# z = odeint(Harvest, x0, t) #(Function, initial condition, time)
# z_list = z.tolist()
# # print(z_list[0],z_list[1],z_list[3])
# if i%checkEveryXPeriods==0:
# for j in range(z.__len__()-1):
# x_lis.append(z_list[j])
# # print(z)
#
# t = np.linspace(end-stepSize, end+stepSize, 2)
#
# y = Correct(z, pointPerPeroid, i, end, stepSize)
# # print(type(y))
# x_lis.append(y)
# # print(y)
# # print(z)
# # print(y[1])
# x0 = y
# else:
# for j in range(z.__len__()):
# x_lis.append(z_list[j])
# print(i)
# for i in range(periods):
# periodsAcheck = int(periods/checks)
# start = i*(2*math.pi)/om
# end = (i+1)*(2*math.pi)/om
# # print((end-previousStart)/pointPerPeroid)
# t = np.linspace(start, end, pointPerPeroid)
#
# z = odeint(Harvest, x0, t) #(Function, initial condition, time)
# z_list = z.tolist()
# # print(z_list[0],z_list[1],z_list[3])
# for j in range(z.__len__()-1):
# x_lis.append(z_list[j])
# # print(z)
#
# t = np.linspace(end-stepSize, end+stepSize, 2)
#
# y = Correct(z, pointPerPeroid, i, end, stepSize)
# # print(type(y))
# x_lis.append(y)
# # print(y)
# # print(z)
# # print(y[1])
# x0 = y
# print(i)
# # print(y)
# for i in range(checks):
#
# start = i*periodsAcheck*(2*math.pi)/om
# end = (i+1)*periodsAcheck*(2*math.pi)/om
# # print((end-previousStart)/pointPerPeroid)
# t = np.linspace(start, end, pointPerPeroid*periodsAcheck)
#
# z = odeint(Harvest, x0, t) #(Function, initial condition, time)
# # print(f[i*periodsAcheck+10][0])
# z_list = z.tolist()
# # print(z_list[0],z_list[1],z_list[3])
# for j in range(z.__len__()-1):
# x_lis.append(z_list[j])
# # print(z)
# # print(x_lis[i*periodsAcheck+10][0])
# t = np.linspace(end-stepSize, end+stepSize, 2)
#
# y = Correct(z, pointPerPeroid, (i+1)*periodsAcheck-1, end, stepSize) # need to adjust periodsize x value for periods a check
# # print(type(y))
# x_lis.append(y)
# # print(y)
# # print(z)
# # print(y[1])
# # print(x_lis[i*periodsAcheck+10][0])
# x0 = y
# print(i)
# # print(y)
#Utility function being defined on the fly for averaging energy throughput
# def Average(lst):
# return sum(lst) / len(lst)
#Where do we use this?
# HEnergyinonedrive = (ER[-1]-ER[-(totPoints-(numOfPoints+1))])/((totPoints-numOfPoints)/pointPerPeroid)
# #160 because 200-40, Harvested energy in one drive (takes the last value subtracts before transient,
# #then divides by number of periods)
# Energyinonedrive = (Edrive[-1]-Edrive[-(totPoints-(numOfPoints+1))])/((totPoints-numOfPoints)/pointPerPeroid) #Driven energy
# DissEnergyinonedrive = (Ediss[-1]-Ediss[-(totPoints-(numOfPoints+1))])/((totPoints-numOfPoints)/pointPerPeroid)
# enEffNum = HEnergyinonedrive/Energyinonedrive
#
#
# dissList.append(DissEnergyinonedrive)
# driveList.append(Energyinonedrive)
# harvestList.append(HEnergyinonedrive)
# effList.append(enEffNum)
# Data saved
# Nice plotting set up
col = 4
row = 3
phase = plt.subplot(col,row,2)
plt.scatter(q,p, s=1)
#Translating to Poincare
xs = []
ys = []
xs = [x[int(totPoints/periods)*i,0] for i in range(periods)]
ys = [x[int(totPoints/periods)*i,1] for i in range(periods)]
plt.scatter(xs[990*factor:], ys[990*factor:], color="red")
#Poincare
# averagePoincare.append([statistics.mean(xs[9900:]), statistics.mean(ys[9900:])])
# What the heck is this ? Oh something we were doing earlier, not to worry
# print(qmin, qmax, pmin, pmax)
plt.xlabel('q')
# plt.xlim(qmin-5, qmax+5)
plt.ylabel('p')
# plt.ylim(pmin-5, pmax+5)
plt.axis([np.amin(q)-1, np.amax(q)+1, np.amin(p)-1, np.amax(p)+1])
# plt.axis([-1.75,1.75,-1, 1])
phase.set_title('b) Phase Plot for 10,000 Oscillations'.format(gOr), loc = 'left')
# phase.set_title('Phase Space')
# EHistplt = plt.subplot(col,row,2)
# plt.hexbin(q,p, extent=[np.amin(q)-1, np.amax(q)+1, np.amin(p)-1, np.amax(p)+1])
# # plt.hexbin(q,p, extent=[-1.75,1.75, -1, 1])
# plt.xlabel('q')
# plt.ylabel('p')
# EHistplt.set_title('b)', loc = 'left')
# # EHistplt.set_title("Histogram of Phase Space")
#
#
# # Histogram = plt.subplot(col,row,3)
# # plt.hist(p, bins=500, density = True)
# # # fig = plt.figure()
# # # ax = fig.add_axes([0,0,1,1])
# # # langs = ['C', 'C++', 'Java', 'Python', 'PHP']
# # # students = [23,17,35,29,12]
# # Histogram.bar(["Average Force Squared"],[forceAv])
# # # plt.show()
# # # plt.xlabel('p')
# # # plt.ylabel(r'$P_p$')
# # Histogram.set_title('c)', loc = 'left')
# # # Histogram.set_title("Histogram of p")
#
# Histogram = plt.subplot(col,row,3)
# plt.hist(p, bins=500, density = True)
# # fig = plt.figure()
# # ax = fig.add_axes([0,0,1,1])
# # langs = ['C', 'C++', 'Java', 'Python', 'PHP']
# # students = [23,17,35,29,12]
# Histogram.bar(["Magnitude of Average Force "], [forceAbsAv])
# # plt.show()
# # plt.xlabel('p')
# # plt.ylabel(r'$P_p$')
# Histogram.set_title('c)', loc = 'left')
# # Histogram.set_title("Histogram of p")
#
# capacitor = [x * xi for x in v]
#
# CapSpace = plt.subplot(col, row, 4)
# plt.scatter(q, v,s=0.5, )
# plt.xlabel('q')
# plt.ylabel('v')
# plt.axis([np.amin(q)-1, np.amax(q)+1, np.amin(v)-1, np.amax(v)+1])
# # plt.axis([-1.5,1.5, -0.2,0.2])
# CapSpace.set_title('d)', loc = 'left')
# # CapSpace.set_title("Capacitor Space")
#
# HistCapacitor = plt.subplot(col,row,5)
# # plt.hexbin(q,capacitor, extent=[-3.5,3.5, -0.1,0.1])
# # plt.hexbin(p,v, extent=[-1,1, -0.2,0.2])
# plt.hexbin(v, p, extent=[np.amin(v)-1, np.amax(v)+1, np.amin(p)-1, np.amax(p)+1])
# plt.xlabel('v')
# plt.xlim(np.amin(v)-1, np.amax(v)+1)
# plt.ylabel('p')
# # plt.axis([-1.8,1.8, -0.4,0.4])
# HistCapacitor.set_title('e)', loc = 'left')
# # HistCapacitor.set_title("Histogram of Capacitor Space")
#
LineGraphF = plt.subplot(col,row,3)
plt.plot(np.linspace(0,periods*2*math.pi/om, periods),force)
plt.xlabel('Time')
plt.ylabel('Force')
# plt.xlim(0,periods*2*math.pi/om)
# plt.hist(v, bins=500, density = True)
# plt.xlabel('v')
# plt.ylabel(r'$P_v$')
LineGraphF.set_title('c) Force v. Time for 10,000 Oscillations'.format(gOr), loc = 'left')
# HistogramV.set_title("Histogram of v")
# damping = [x * -2 * gam for x in p]
# # What the heck is this ?
# # drove = [g * math.cos(om * t) for t in np.linspace(0, 100*(2*math.pi)/om, (totPoints-numOfPoints))]
# drove = [g * math.cos(om * t) for t in np.linspace(0, (totPoints-numOfPoints)/pointPerPeroid*(2*math.pi)/om, (totPoints-numOfPoints))]
# driving = plt.subplot(col, row, 7)
# driving = plt.axes(projection='3d')
# driving.scatter3D(p,q,v)
# driving.set_xlabel('p')
# driving.set_ylabel('q')
# driving.set_zlabel('v')
# # plt.zlabel("z")
# # driving.xlim(np.amin(p)-1,np.amax(p)+1)
# # driving.ylim(np.amin(q)-1,np.amax(q)+1)
# # driving.zlim(np.amin(v)-1,np.amax(v)+1)
# # plt.axis([-1.75,1.75, -1,1])
# driving.set_title('g)', loc = 'left')
# driving.set_title('3d Projection of Phase Space')
# HistDrive = plt.subplot(col,row,8)
# plt.hexbin(p,drove, extent=[np.amin(p)-1,np.amax(p)+1, np.amin(drove)-1, np.amax(drove)+1])
# # plt.hexbin(p,drove, extent=[-1.75,1.75, -1,1])
# plt.xlabel('p')
# plt.ylabel(r'$g\mathrm{cos}(\omega t)$')
# HistDrive.set_title('h)', loc = 'left')
# HistDrive.set_title("Histogram of Driving Space")
# Histogramdrive = plt.subplot(col,row,9)
# labels = [r'$E_R$',r'$E_{Drive}$',r'$E_{Diss}$']
# barNum = [round(HEnergyinonedrive,3),round(Energyinonedrive,3),round(DissEnergyinonedrive,3)]
# x = np.arange(len(labels))
# width = 0.35
# Histogramdrive.bar(x, barNum, width)
# Histogramdrive.set_xticks(x)
# Histogramdrive.set_xticklabels(labels)
# plt.ylim(top=barNum[0]+2)
# plt.ylabel('Average Energy per Period')
# Histogramdrive.set_title('i)', loc = 'left')
t1 = time.time()
print(t1-t0)
plt.subplots_adjust(hspace=0.4, wspace=0.4)
# plt.suptitle("Om = {}, xi = {}, C = {}, R = {}, g = {}, gam = {}".format(om, xi, C, R, g, gam), fontsize = 25)
# plt.savefig('GCorrectionxi{:.2f}g{:.4f}r{:.2f}om{:.3f}param.png'.format(xi,g,R,om),bbox_inches='tight', dpi = 100)
# plt.show()
# plt.close('all')
averageForces.append(forceAbsAv)
plt.savefig('Fig.8.png',bbox_inches='tight', dpi = 100)
plt.close('all')
# plt.title("Force against g values")
# plt.xlabel("g")
# | |
set of 20 colors, dark then light
nesr_colors = {0 : 'blue', 1: 'gold', 2: 'green', 3: 'yellow', 4: 'purple', 5: 'white',
6 : 'red', 7: 'bisque', 8: 'maroon', 9: 'aqua', 10: 'black', 11: 'lime',
12: 'indigo', 13: 'fuchsia', 14: 'darkcyan', 15: 'gold', 16: 'navi',
17: 'khaki', 18: 'saddlebrown', 19: 'lightsteelblue'}
# set of dark colors only
nesr_colors_dark = {}
for i in range(0, 20, 2):
nesr_colors_dark[i / 2] = nesr_colors[i]
num_inputs_original = self.source_data.num_inputs
num_outputs_original = self.source_data.num_outputs
print '...Now drawing graphs',
# figure_page.append(plt.figure(figure_number))
plt.rcParams["figure.figsize"] = [20, 12]
# x = range(10)
# y = x
# plt.plot(x,y,'r.')
# plt.grid(True)
# plt.close
elif section == 1:
# Displaying errors and -ve contribution chart
# $^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$
# ===============================================================
# Graphing the training error
# cur_plt = 0
try:
figure_page.append(plt.figure(figure_number))
# cur_plt = 1
# =-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=
draw_cost_function() # =-.-=-.-=-.-=-.-=
# =-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=
print figure_number,
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
if not self.display_graph_windows:
plt.close()
except Exception, err:
print "\nError graphing the training error, panel 1."
print err
elif section == 2:
# $^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$
try:
figure_number += 1
figure_page.append(plt.figure(figure_number))
# Graphing the artificial neural network diagram
# =-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=
w12, w23, b12, b23, structure = draw_full_ann() # =-.-=-.-=-.-=-.-=
# =-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=
print figure_number,
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
if not self.display_graph_windows:
plt.close()
except Exception, err:
print "\nError graphing the artificial neural network diagram, panel 2."
print err
elif section == 3:
# $^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$
try:
figure_number += 1
figure_page.append(plt.figure(figure_number))
# ===============================================================
# Graphing the artificial BRIEF neural network diagram
# =-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=
draw_brief_ann(w12, w23, b12, b23, structure) # =-.-=-.-=-.-=-.-=
# =-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=
print figure_number,
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
if not self.display_graph_windows:
plt.close()
except Exception, err:
print "\nError graphing the BRIEF neural network diagram, panel 3."
print err
elif section == 4:
# ===============================================================
# =========================
# The next page of plots.
# =========================
# $^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$
try:
figure_number += 1
figure_page.append(plt.figure(figure_number, facecolor='white'))
# ===============================================================
# Graphing relative importance (-ve)
# =-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=
draw_relative_importance() # =-.-=-.-=-.-=-.-=
# =-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=
print figure_number,
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
if not self.display_graph_windows:
plt.close()
except Exception, err:
print "\nError graphing the relative_importance diagram, panel 4."
print err
elif section == 5:
# ===============================================================
# =========================
# The next page of plots.
# =========================
# $^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$
try:
figure_number += 1
figure_page.append(plt.figure(figure_number))
# =-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=
out_graph_o, graph_grid_sub, graph_grid2, scatter_categoric = draw_prediction_cloud()
# =-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=
print figure_number,
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
if not self.display_graph_windows:
plt.close()
except Exception, err:
print "\nError graphing the prediction clouds, panel 5."
print err
elif section == 6:
# $^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$^$
try:
figure_number += 1
figure_page.append(plt.figure(figure_number))
# =-.-= -.-= -.-= -.-= -.-= -.-= -.-= -.-= -.-= -.-=
draw_real_vs_forecasted() # =-.-=-.-=-.-=-.-=
# =-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=-.-=
print figure_number,
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
if not self.display_graph_windows:
plt.close()
except Exception, err:
print "\nError graphing the real vs. forecasted graphs, panel 6."
print err
elif section == 7:
try:
# =-.-= -.-= -.-= -.-= -.-= -.-= -.-= -.-= -.-= -.-=
draw_parametric_graphs(figure_number, self)
# =-.-= -.-= -.-= -.-= -.-= -.-= -.-= -.-= -.-= -.-=
except Exception, err:
print "\nError graphing one of the parametric graphs, panel 7+"
print err
pass
if initial_time != 0:
print "\nElapsed time throughout the study: \n **Execluding time of 'showing' the graphs**\n", \
elapsed_time(initial_time, time.time())
# def shorten_time_stamp(time_stamp_text):
# short_stamp = ""
# split_stamp = map(lambda x: int(x), re.findall('.{1,2}', time_stamp_text))
# short_stamp += chr(split_stamp[0] + 50)
# short_stamp += str(split_stamp[1]) if split_stamp[1] < 10 else chr(split_stamp[1] + 55)
# short_stamp += str(split_stamp[2]) if split_stamp[2] < 10 else chr(split_stamp[2] + 55)
# short_stamp += str(split_stamp[3]) if split_stamp[3] < 10 else chr(split_stamp[3] + 55)
# short_stamp += str(split_stamp[4])
# short_stamp += str(split_stamp[5])
# return short_stamp
# Closing The log file of the console.
self.log_file.close()
# Converting the text file to pdf
time_stamp = self.time_stamp
console_pdf_name = 'NrCh_PdfLog_' + time_stamp[-4:] + '.pdf'
# console_pdf_file = self.convert_text_to_pdf("NesrLog.txt", console_pdf_name)
self.convert_text_to_pdf(self.new_folder_path + '\\' + "NrChLog.txt", console_pdf_name)
# Making the pdf of all charts
pdf_name = 'NrCh_OutputCharts_' + time_stamp[-4:] + '.pdf'
# # Create a folder and put all files in:
# current_folder = os.getcwd()
# directory_name = 'NrCh' + time_stamp
# new_folder_path = current_folder + "\\" + directory_name
# if not os.path.exists(new_folder_path):
# os.makedirs(new_folder_path)
with PdfPages(pdf_name) as pdf:
for page_num, page in enumerate(figure_page):
pdf.attach_note(pages_titles[page_num])
pdf.savefig(page)
# We can also set the file's metadata via the PdfPages object:
d = pdf.infodict()
d['Title'] = 'NeuroCharter, A python open source neural networks simulator'
d['Author'] = 'Dr. <NAME>' # u'<NAME>\xe4nen'
d['Subject'] = 'NeuroCharter Simulation Results'
d['Keywords'] = 'Neural networks AWC-KSU King Saud University Alamoudi Water Chair'
d['CreationDate'] = dt(2016, 4, 13)
d['ModDate'] = dt.today()
# Moving files
def move_file_to_folder(old_name, new_name, rename=False):
destination = self.new_folder_path
source = self.new_folder_path if rename else self.current_folder
try:
os.rename(source + "\\" + old_name, destination + "\\" + new_name)
except:
# rename if exist with a random number
rand_num = str(int(random.random() * 100000))
os.rename(source + "\\" + old_name, destination + "\\" + rand_num + new_name)
# Saving Graph Data
move_file_to_folder(console_pdf_name, console_pdf_name)
move_file_to_folder(pdf_name, pdf_name)
# Rename Other files
sub_stamp = time_stamp[-4:]
move_file_to_folder("NrChLog.txt", "NrCh__Log__" + sub_stamp + ".txt", rename=True)
move_file_to_folder("PredictionClouds.csv", "NrCh_Clouds_" + sub_stamp + ".csv", rename=True)
move_file_to_folder("NeuroCharterNet.nsr", "NrCh_StoredANN_" + sub_stamp + ".nsr", rename=True)
move_file_to_folder("Outputs.txt", "NrCh_Outputs_" + sub_stamp + ".txt", rename=True)
move_file_to_folder("Weights.csv", "NrCh_Weights_" + sub_stamp + ".csv", rename=True)
move_file_to_folder("NormalizedData.csv", "NrCh_NormData_" + sub_stamp + ".csv", rename=True)
charts_data_files = [("Grf1_Costs.csv", "NrCh_C1_Costs_"),
("Grf2_Importance.csv", "NrCh_C2_Importance_"),
("Grf3_Clouds.csv", "NrCh_C3_Clouds_"),
("Grf4_Predictions.csv", "NrCh_C4_Predictions_"),
("Grf5_Relationships.csv", "NrCh_C5_Studies_")]
for data_file in charts_data_files:
try:
move_file_to_folder(data_file[0], data_file[1] + sub_stamp + ".csv", rename=True)
except:
pass
if self.display_graph_pdf:
Popen(self.new_folder_path + "\\" + pdf_name, shell=True)
if self.display_graph_windows:
plt.show()
pass
@staticmethod
def convert_text_to_pdf(text_file_name, pdf_file_name, maximum_text_width=85):
"""
A function to convert text file to pdf file
@param text_file_name: the text file you want to convert to pdf
@param pdf_file_name: the name of the pdf file that will be crated
@param maximum_text_width: maximum number of characters allowed in a line.
@return: None
"""
# from reportlab.pdfbase import pdfmetrics
# from reportlab.pdfbase.ttfonts import TTFont
#
# pdfmetrics.registerFont(TTFont('Courier New Regular', 'COUR_0.ttf'))
# pdfmetrics.registerFont(TTFont('Courier New Italic', 'COURI_0.ttf'))
# pdfmetrics.registerFont(TTFont('Courier New Bold', 'COURBD_0.ttf'))
# pdfmetrics.registerFont(TTFont('Courier New Bold Italic', 'COURBI_0.ttf'))
temp_file_read = open(text_file_name, "r") # text file I need to convert
lines = temp_file_read.readlines()
temp_file_read.close()
i = 750
line_number = 0
# Open an empty pdf file
pdf_file = canvas.Canvas(pdf_file_name)
# pdf_file.setFont('Courier New Regular', 12)
# finding actual number of lines
actual_lines = []
for line in lines:
line = line.replace('\n', '')
if len(line) < maximum_text_width:
actual_lines.append(line)
else:
line_list = line.split(",")
temp_line = ""
for r in range(len(line_list)):
if len(temp_line) + len(str(line_list[r]) + ",") < maximum_text_width:
temp_line += str(line_list[r])
if r != len(line_list) - 1:
temp_line += ","
else:
actual_lines.append(temp_line)
temp_line = " " + str(line_list[r])
if r != len(line_list) - 1:
temp_line += ","
else:
actual_lines.append(temp_line)
# Start printing lines to the pdf file
while line_number < len(actual_lines):
if line_number - len(actual_lines) < 60: # to finish the page every 60 line
i = 750 # the initial Y location
for sub_line in actual_lines[line_number:line_number + 60]:
pdf_file.drawString(15, i, sub_line.strip())
line_number += 1
i -= 12
pdf_file.showPage()
else:
i = 750 # the initial Y location
for sub_line in actual_lines[line_number:]:
pdf_file.drawString(15, i, sub_line.strip())
line_number += 1
i -= 12
pdf_file.showPage()
pdf_file.save()
@staticmethod
def find_suitable_grid(num):
"""
A list to find the suitable grid of graphs in each figure
@param num: the number of charts needed to be plotted
@return: a tuple of (tuple of maximum grid, tuple of current location)
example: if num = 11, then it returns ((3, 4), (2, 3))
"""
suit = {0 : (0, 0), 1: (0, 1), 2: (1, 0), 3: (1, 1), 4: (0, 2), 5: (1, 2), 6: (2, 0),
7 : (2, 1), 8: (2, 2), 9: (0, 3), 10: (1, 3), 11: (2, 3), 12: (3, 0), 13: (3, 1),
14: (3, 2), 15: (3, 3), 16: (0, 4), 17: (1, 4), 18: (2, 4), 19: (3, 4), 20: (4, 0),
21: (4, 1), 22: (4, 2), 23: (4, 3), 24: (4, 4)}
grid_dict = {0 : (1, 1), 1: (1, 2), 2: | |
tand = 0.02
print("\n\nCalculating for FR4 patch.")
W, L, h, Er = DesignPatch(Er, h, freq)
eff = CalculatePatchEff(Er, W, L, h, tand, sigma, freq, VSWR)
CalcDirectivity(eff, PatchFunction, freq, W, L, h, Er)
# Rogers RO4350
print("\n\nCalculating for RO4350 patch.")
Er = 3.48
tand = 0.004
W, L, h, Er = DesignPatch(Er, h, freq)
eff = CalculatePatchEff(Er, W, L, h, tand, sigma, freq, VSWR)
CalcDirectivity(eff, PatchFunction, freq, W, L, h, Er)
# end def
# ======================================== #
"""
Function to calculate peak directivity.
Also includes some examples that are used to check result.
"""
def SqrtSinPattern(Theta, Phi, *args):
"""
See Fig1 @ http://www.antenna-theory.com/basics/directivity.php
Expect Directivity to be 1.05dB.
"""
return sqrt(sin(radians(Theta)))
def SinPowerPattern(Theta, Phi, *args):
"""
See Fig1 @ http://www.antenna-theory.com/basics/directivity.php
Expect Directivity to be 2.707dB.
"""
return sin(radians(Theta)) ** 5
def IsotropicPattern(Theta, Phi, *args):
"""
Isotropic directional pattern. i.e. radiation is same in all directions.
Expect directivity to be 0dB.
"""
return 1
def xfrange(start, stop, step):
"""
Creates range of float values.
"""
i = 0
while start + i * step < stop:
yield start + i * step
i += 1
def CalcDirectivity(Efficiency, RadPatternFunction, *args):
"""
Based on calc_directivity.m from ArrayCalc.
Calculates peak directivity in dBi value using numerical integration.
If the array efficiency is set to below 100% then the returned value is referred to as Gain (dB).
Usage: ThetaMax, PhiMax = CalcDirectivity(RadPatternFunction, Efficiency)
RadPatternFunction - antennas radiation pattern function. F(Theta, Phi)
Efficiency - Efficiency of antenna in %. Default 100%.
Returned values:
ThetaMax - Theta value for direction of maximum directivity (Deg)
PhiMax - Phi value for direction of maximum directivity (Deg)
Integration is of the form :
%
% 360 180
% Int{ Int{ (E(theta,phi)*conj(E(theta,phi))*sin(theta) d(theta) d(phi)
% 0 0
%
% z
% |-theta (theta 0-180 measured from z-axis)
% |/
% |_____ y
% /\
% /-phi (phi 0-360 measured from x-axis)
% x
%
"""
print("Calculating Directivity for " + RadPatternFunction.__name__)
deltheta = 2 # Step value of theta (Deg)
delphi = 2 # Step value for phi (Deg)
dth = radians(deltheta)
dph = radians(delphi)
Psum = 0
Pmax = 0
Thmax = 0
Phmax = 0
for phi in xfrange(0, 360, delphi): # Phi Integration Loop 0-360 degrees
for theta in xfrange(0, 180, deltheta): # Theta Integration Loop 0-180 degrees
eField = RadPatternFunction(theta, phi, *args) # Total E-field at point
Pthph = eField * np.conjugate(eField) # Convert to power
if Pthph > Pmax:
Pmax = Pthph # Store peak value
Thmax = theta # Store theta value for the maximum
Phmax = phi # Store phi value for the maximum
# print(str(theta) + "," + str(phi) + ": " + str(Pthph))
Psum = Psum + Pthph * sin(radians(theta)) * dth * dph # Summation
Pmax = Pmax * (Efficiency / 100) # Apply antenna efficiency
directivity_lin = Pmax / (Psum / (4 * pi)) # Directivity (linear ratio)
directivity_dBi = 10 * log10(directivity_lin) # Directivity (dB wrt isotropic)
if Efficiency < 100: # Gain case
dBdiff = 10 * log10(abs(100 / Efficiency)) # Difference between gain and directivity
print("Directivity = " + str(directivity_dBi + dBdiff) + "dBi") # Display what directivity would be for ref.
print("Efficiency = " + str(Efficiency) + "%")
print("Gain = " + str(directivity_dBi) + "dB")
else: # Directivity case
print("Directivity = " + str(directivity_dBi) + "dBi")
print("At Theta = " + str(Thmax) + ", Phi = " + str(Phmax))
return Thmax, Phmax
def exampleDirectivity():
CalcDirectivity(100, SqrtSinPattern)
print("\n\n")
CalcDirectivity(90, SinPowerPattern)
print("\n\n")
CalcDirectivity(100, IsotropicPattern)
print("\n\n")
freq = 14e9
Er = 3.66 # RO4350B
h = 0.101e-3
W, L, h, Er = DesignPatch(Er, h, freq)
CalcDirectivity(100, PatchFunction, freq, W, L, h, Er)
fields = PatchEHPlanePlot(freq, W, L, h, Er)
SurfacePlot(fields, freq, W, L, h, Er)
W = 10.7e-3
L = 10.47e-3
h = 3e-3
Er = 2.5
print("\n\n")
CalcDirectivity(100, PatchFunction, freq, W, L, h, Er)
fields = PatchEHPlanePlot(freq, W, L, h, Er)
SurfacePlot(fields, freq, W, L, h, Er)
# end def
# ======================================== #
def PatchFunction(thetaInDeg, phiInDeg, Freq, W, L, h, Er):
"""
Taken from Design_patchr
Calculates total E-field pattern for patch as a function of theta and phi
Patch is assumed to be resonating in the (TMx 010) mode.
E-field is parallel to x-axis
W......Width of patch (m)
L......Length of patch (m)
h......Substrate thickness (m)
Er.....Dielectric constant of substrate
Refrence C.<NAME> 2nd Edition Page 745
"""
lamba = light_velocity / Freq
theta_in = math.radians(thetaInDeg)
phi_in = math.radians(phiInDeg)
ko = 2 * math.pi / lamba
xff, yff, zff = sph2cart1(999, theta_in, phi_in) # Rotate coords 90 deg about x-axis to match array_utils coord system with coord system used in the model.
xffd = zff
yffd = xff
zffd = yff
r, thp, php = cart2sph1(xffd, yffd, zffd)
phi = php
theta = thp
if theta == 0:
theta = 1e-9 # Trap potential division by zero warning
if phi == 0:
phi = 1e-9
Ereff = ((Er + 1) / 2) + ((Er - 1) / 2) * (1 + 12 * (h / W)) ** -0.5 # Calculate effictive dielectric constant for microstrip line of width W on dielectric material of constant Er
F1 = (Ereff + 0.3) * (W / h + 0.264) # Calculate increase length dL of patch length L due to fringing fields at each end, giving total effective length Leff = L + 2*dL
F2 = (Ereff - 0.258) * (W / h + 0.8)
dL = h * 0.412 * (F1 / F2)
Leff = L + 2 * dL
Weff = W # Calculate effective width Weff for patch, uses standard Er value.
heff = h * sqrt(Er)
# Patch pattern function of theta and phi, note the theta and phi for the function are defined differently to theta_in and phi_in
Numtr2 = sin(ko * heff * cos(phi) / 2)
Demtr2 = (ko * heff * cos(phi)) / 2
Fphi = (Numtr2 / Demtr2) * cos((ko * Leff / 2) * sin(phi))
Numtr1 = sin((ko * heff / 2) * sin(theta))
Demtr1 = ((ko * heff / 2) * sin(theta))
Numtr1a = sin((ko * Weff / 2) * cos(theta))
Demtr1a = ((ko * Weff / 2) * cos(theta))
Ftheta = ((Numtr1 * Numtr1a) / (Demtr1 * Demtr1a)) * sin(theta)
# Due to groundplane, function is only valid for theta values : 0 < theta < 90 for all phi
# Modify pattern for theta values close to 90 to give smooth roll-off, standard model truncates H-plane at theta=90.
# PatEdgeSF has value=1 except at theta close to 90 where it drops (proportional to 1/x^2) to 0
rolloff_factor = 0.5 # 1=sharp, 0=softer
theta_in_deg = theta_in * 180 / math.pi # theta_in in Deg
F1 = 1 / (((rolloff_factor * (abs(theta_in_deg) - 90)) ** 2) + 0.001) # intermediate calc
PatEdgeSF = 1 / (F1 + 1) # Pattern scaling factor
UNF = 1.0006 # Unity normalisation factor for element pattern
if theta_in <= math.pi / 2:
Etot = Ftheta * Fphi * PatEdgeSF * UNF # Total pattern by pattern multiplication
else:
Etot = 0
return Etot
def sph2cart1(r, th, phi):
x = r * cos(phi) * sin(th)
y = r * sin(phi) * sin(th)
z = r * cos(th)
return x, y, z
def cart2sph1(x, y, z):
r = sqrt(x**2 + y**2 + z**2) + 1e-15
th = acos(z / r)
phi = atan2(y, x)
return r, th, phi
# ========================================================================= #
# ========================================================================= #
class Result:
def __init__(self):
self.frequency = None
self.patch_width = None
self.patch_length = None
self.feeder_width = None
self.feeder_length = None
self.inset_gap_width = None
self.inset_length = None
self.ground_length = None
self.ground_width = None
self.input_edge_impedance = None
def design_string(resonant_frequency, dielectric_constant, thickness):
return json.dumps(design_result(resonant_frequency, dielectric_constant, thickness).__dict__, indent=4)
def design_result(resonant_frequency, dielectric_constant, thickness):
return design(resonant_frequency, dielectric_constant, thickness).get_result()
def design(resonant_frequency, dielectric_constant, thickness):
"""calculates length and width of patch antenna from dielectric constant, thickness and resonant frequency"""
return PatchDesigner(resonant_frequency, dielectric_constant, thickness)
class PatchDesigner:
"""All parameter calculations"""
freq = | |
= "data_0924"
file_ext = "sff"
def sniff(self, filename):
# The first 4 bytes of any sff file is '.sff', and the file is binary. For details
# about the format, see http://www.ncbi.nlm.nih.gov/Traces/trace.cgi?cmd=show&f=formats&m=doc&s=format
try:
header = open(filename, 'rb').read(4)
if header == b'.sff':
return True
return False
except Exception:
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary sff file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "Binary sff file (%s)" % (nice_size(dataset.get_size()))
class BigWig(Binary):
"""
Accessing binary BigWig files from UCSC.
The supplemental info in the paper has the binary details:
http://bioinformatics.oxfordjournals.org/cgi/content/abstract/btq351v1
"""
edam_format = "format_3006"
edam_data = "data_3002"
file_ext = "bigwig"
track_type = "LineTrack"
data_sources = {"data_standalone": "bigwig"}
def __init__(self, **kwd):
Binary.__init__(self, **kwd)
self._magic = 0x888FFC26
self._name = "BigWig"
def _unpack(self, pattern, handle):
return struct.unpack(pattern, handle.read(struct.calcsize(pattern)))
def sniff(self, filename):
try:
magic = self._unpack("I", open(filename, 'rb'))
return magic[0] == self._magic
except Exception:
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary UCSC %s file" % self._name
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "Binary UCSC %s file (%s)" % (self._name, nice_size(dataset.get_size()))
class BigBed(BigWig):
"""BigBed support from UCSC."""
edam_format = "format_3004"
edam_data = "data_3002"
file_ext = "bigbed"
data_sources = {"data_standalone": "bigbed"}
def __init__(self, **kwd):
Binary.__init__(self, **kwd)
self._magic = 0x8789F2EB
self._name = "BigBed"
class TwoBit(Binary):
"""Class describing a TwoBit format nucleotide file"""
edam_format = "format_3009"
edam_data = "data_0848"
file_ext = "twobit"
def sniff(self, filename):
try:
# All twobit files start with a 16-byte header. If the file is smaller than 16 bytes, it's obviously not a valid twobit file.
if os.path.getsize(filename) < 16:
return False
header = open(filename, 'rb').read(TWOBIT_MAGIC_SIZE)
magic = struct.unpack(">L", header)[0]
if magic == TWOBIT_MAGIC_NUMBER or magic == TWOBIT_MAGIC_NUMBER_SWAP:
return True
except IOError:
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary TwoBit format nucleotide file"
dataset.blurb = nice_size(dataset.get_size())
else:
return super(TwoBit, self).set_peek(dataset)
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "Binary TwoBit format nucleotide file (%s)" % (nice_size(dataset.get_size()))
@dataproviders.decorators.has_dataproviders
class SQlite(Binary):
"""Class describing a Sqlite database """
MetadataElement(name="tables", default=[], param=ListParameter, desc="Database Tables", readonly=True, visible=True, no_value=[])
MetadataElement(name="table_columns", default={}, param=DictParameter, desc="Database Table Columns", readonly=True, visible=True, no_value={})
MetadataElement(name="table_row_count", default={}, param=DictParameter, desc="Database Table Row Count", readonly=True, visible=True, no_value={})
file_ext = "sqlite"
edam_format = "format_3621"
def init_meta(self, dataset, copy_from=None):
Binary.init_meta(self, dataset, copy_from=copy_from)
def set_meta(self, dataset, overwrite=True, **kwd):
try:
tables = []
columns = dict()
rowcounts = dict()
conn = sqlite.connect(dataset.file_name)
c = conn.cursor()
tables_query = "SELECT name,sql FROM sqlite_master WHERE type='table' ORDER BY name"
rslt = c.execute(tables_query).fetchall()
for table, _ in rslt:
tables.append(table)
try:
col_query = 'SELECT * FROM %s LIMIT 0' % table
cur = conn.cursor().execute(col_query)
cols = [col[0] for col in cur.description]
columns[table] = cols
except Exception as exc:
log.warning('%s, set_meta Exception: %s', self, exc)
for table in tables:
try:
row_query = "SELECT count(*) FROM %s" % table
rowcounts[table] = c.execute(row_query).fetchone()[0]
except Exception as exc:
log.warning('%s, set_meta Exception: %s', self, exc)
dataset.metadata.tables = tables
dataset.metadata.table_columns = columns
dataset.metadata.table_row_count = rowcounts
except Exception as exc:
log.warning('%s, set_meta Exception: %s', self, exc)
def sniff(self, filename):
# The first 16 bytes of any SQLite3 database file is 'SQLite format 3\0', and the file is binary. For details
# about the format, see http://www.sqlite.org/fileformat.html
try:
header = open(filename, 'rb').read(16)
if header == b'SQLite format 3\0':
return True
return False
except Exception:
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "SQLite Database"
lines = ['SQLite Database']
if dataset.metadata.tables:
for table in dataset.metadata.tables:
try:
lines.append('%s [%s]' % (table, dataset.metadata.table_row_count[table]))
except Exception:
continue
dataset.peek = '\n'.join(lines)
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "SQLite Database (%s)" % (nice_size(dataset.get_size()))
@dataproviders.decorators.dataprovider_factory('sqlite', dataproviders.dataset.SQliteDataProvider.settings)
def sqlite_dataprovider(self, dataset, **settings):
dataset_source = dataproviders.dataset.DatasetDataProvider(dataset)
return dataproviders.dataset.SQliteDataProvider(dataset_source, **settings)
@dataproviders.decorators.dataprovider_factory('sqlite-table', dataproviders.dataset.SQliteDataTableProvider.settings)
def sqlite_datatableprovider(self, dataset, **settings):
dataset_source = dataproviders.dataset.DatasetDataProvider(dataset)
return dataproviders.dataset.SQliteDataTableProvider(dataset_source, **settings)
@dataproviders.decorators.dataprovider_factory('sqlite-dict', dataproviders.dataset.SQliteDataDictProvider.settings)
def sqlite_datadictprovider(self, dataset, **settings):
dataset_source = dataproviders.dataset.DatasetDataProvider(dataset)
return dataproviders.dataset.SQliteDataDictProvider(dataset_source, **settings)
class GeminiSQLite(SQlite):
"""Class describing a Gemini Sqlite database """
MetadataElement(name="gemini_version", default='0.10.0', param=MetadataParameter, desc="Gemini Version",
readonly=True, visible=True, no_value='0.10.0')
file_ext = "gemini.sqlite"
edam_format = "format_3622"
edam_data = "data_3498"
def set_meta(self, dataset, overwrite=True, **kwd):
super(GeminiSQLite, self).set_meta(dataset, overwrite=overwrite, **kwd)
try:
conn = sqlite.connect(dataset.file_name)
c = conn.cursor()
tables_query = "SELECT version FROM version"
result = c.execute(tables_query).fetchall()
for version, in result:
dataset.metadata.gemini_version = version
# TODO: Can/should we detect even more attributes, such as use of PED file, what was input annotation type, etc.
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def sniff(self, filename):
if super(GeminiSQLite, self).sniff(filename):
gemini_table_names = ["gene_detailed", "gene_summary", "resources", "sample_genotype_counts", "sample_genotypes", "samples",
"variant_impacts", "variants", "version"]
try:
conn = sqlite.connect(filename)
c = conn.cursor()
tables_query = "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name"
result = c.execute(tables_query).fetchall()
result = [_[0] for _ in result]
for table_name in gemini_table_names:
if table_name not in result:
return False
return True
except Exception as e:
log.warning('%s, sniff Exception: %s', self, e)
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Gemini SQLite Database, version %s" % (dataset.metadata.gemini_version or 'unknown')
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "Gemini SQLite Database, version %s" % (dataset.metadata.gemini_version or 'unknown')
class CuffDiffSQlite(SQlite):
"""Class describing a CuffDiff SQLite database """
MetadataElement(name="cuffdiff_version", default='2.2.1', param=MetadataParameter, desc="CuffDiff Version",
readonly=True, visible=True, no_value='2.2.1')
MetadataElement(name="genes", default=[], param=MetadataParameter, desc="Genes",
readonly=True, visible=True, no_value=[])
MetadataElement(name="samples", default=[], param=MetadataParameter, desc="Samples",
readonly=True, visible=True, no_value=[])
file_ext = "cuffdiff.sqlite"
# TODO: Update this when/if there is a specific EDAM format for CuffDiff SQLite data.
edam_format = "format_3621"
def set_meta(self, dataset, overwrite=True, **kwd):
super(CuffDiffSQlite, self).set_meta(dataset, overwrite=overwrite, **kwd)
try:
genes = []
samples = []
conn = sqlite.connect(dataset.file_name)
c = conn.cursor()
tables_query = "SELECT value FROM runInfo where param = 'version'"
result = c.execute(tables_query).fetchall()
for version, in result:
dataset.metadata.cuffdiff_version = version
genes_query = 'SELECT gene_id, gene_short_name FROM genes ORDER BY gene_short_name'
result = c.execute(genes_query).fetchall()
for gene_id, gene_name in result:
if gene_name is None:
continue
gene = '%s: %s' % (gene_id, gene_name)
if gene not in genes:
genes.append(gene)
samples_query = 'SELECT DISTINCT(sample_name) as sample_name FROM samples ORDER BY sample_name'
result = c.execute(samples_query).fetchall()
for sample_name, in result:
if sample_name not in samples:
samples.append(sample_name)
dataset.metadata.genes = genes
dataset.metadata.samples = samples
except Exception as e:
log.warning('%s, set_meta Exception: %s', self, e)
def sniff(self, filename):
if super(CuffDiffSQlite, self).sniff(filename):
# These tables should be in any CuffDiff SQLite output.
cuffdiff_table_names = ['CDS', 'genes', 'isoforms', 'replicates',
'runInfo', 'samples', 'TSS']
try:
conn = sqlite.connect(filename)
c = conn.cursor()
tables_query = "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name"
result = c.execute(tables_query).fetchall()
result = [_[0] for _ in result]
for table_name in cuffdiff_table_names:
if table_name not in result:
return False
return True
except Exception as e:
log.warning('%s, sniff Exception: %s', self, e)
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "CuffDiff SQLite Database, version %s" % (dataset.metadata.cuffdiff_version or 'unknown')
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
return dataset.peek
except Exception:
return "CuffDiff SQLite Database, version %s" % (dataset.metadata.gemini_version or 'unknown')
class MzSQlite(SQlite):
"""Class describing a Proteomics Sqlite database """
file_ext = "mz.sqlite"
def set_meta(self, dataset, overwrite=True, **kwd):
super(MzSQlite, self).set_meta(dataset, overwrite=overwrite, **kwd)
def sniff(self, filename):
if super(MzSQlite, self).sniff(filename):
mz_table_names = ["DBSequence", "Modification", "Peaks", "Peptide", "PeptideEvidence", "Score", "SearchDatabase", "Source", "SpectraData", "Spectrum", "SpectrumIdentification"]
try:
conn = sqlite.connect(filename)
c = conn.cursor()
tables_query = "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name"
result = c.execute(tables_query).fetchall()
result = [_[0] for _ in result]
for table_name in mz_table_names:
if table_name not in result:
return False
return True
except Exception as e:
log.warning('%s, sniff Exception: %s', self, e)
return False
class BlibSQlite(SQlite):
"""Class describing a Proteomics Spectral Library Sqlite database """
MetadataElement(name="blib_version", default='1.8', param=MetadataParameter, desc="Blib | |
<filename>test/functional/abc-block-sigchecks-activation.py<gh_stars>1-10
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test activation of block sigchecks limits
"""
from test_framework.blocktools import (
create_block,
create_coinbase,
make_conform_to_ctor,
)
from test_framework.cdefs import (
BLOCK_MAXBYTES_MAXSIGCHECKS_RATIO
)
from test_framework.messages import (
CBlock,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
FromHex,
)
from test_framework.mininode import P2PDataStore
from test_framework.script import (
CScript,
OP_CHECKDATASIG,
OP_CHECKDATASIGVERIFY,
OP_3DUP,
OP_RETURN,
OP_TRUE,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.txtools import pad_tx
from test_framework.util import assert_equal
from collections import deque
# Set test to run with sigops deactivation far in the future.
SIGCHECKS_ACTIVATION_TIME = 2000000000
# If we don't do this, autoreplay protection will activate before graviton and
# all our sigs will mysteriously fail.
REPLAY_PROTECTION_START_TIME = SIGCHECKS_ACTIVATION_TIME * 2
# We are going to use a tiny block size so we don't need to waste too much
# time with making transactions. (note -- minimum block size is 1000000)
# (just below a multiple, to test edge case)
MAXBLOCKSIZE = 8000 * BLOCK_MAXBYTES_MAXSIGCHECKS_RATIO - 1
assert MAXBLOCKSIZE == 1127999
# Blocks with too many sigchecks from cache give this error in log file:
BLOCK_SIGCHECKS_CACHED_ERROR = "blk-bad-inputs, CheckInputs exceeded SigChecks limit"
# Blocks with too many sigchecks discovered during parallel checks give
# this error in log file:
BLOCK_SIGCHECKS_PARALLEL_ERROR = "blk-bad-inputs, parallel script check failed"
MAX_TX_SIGCHECK = 3000
def create_transaction(spendfrom, custom_script, amount=None):
# Fund and sign a transaction to a given output.
# spendfrom should be a CTransaction with first output to OP_TRUE.
# custom output will go on position 1, after position 0 which will be
# OP_TRUE (so it can be reused).
customout = CTxOut(0, bytes(custom_script))
# set output amount to required dust if not given
customout.nValue = amount or (len(customout.serialize()) + 148) * 3
ctx = CTransaction()
ctx.vin.append(CTxIn(COutPoint(spendfrom.sha256, 0), b''))
ctx.vout.append(
CTxOut(0, bytes([OP_TRUE])))
ctx.vout.append(customout)
pad_tx(ctx)
fee = len(ctx.serialize())
ctx.vout[0].nValue = spendfrom.vout[0].nValue - customout.nValue - fee
ctx.rehash()
return ctx
def check_for_ban_on_rejected_tx(node, tx, reject_reason=None):
"""Check we are disconnected when sending a txn that the node rejects,
then reconnect after.
(Can't actually get banned, since bitcoind won't ban local peers.)"""
node.p2p.send_txs_and_test(
[tx], node, success=False, expect_disconnect=True, reject_reason=reject_reason)
node.disconnect_p2ps()
node.add_p2p_connection(P2PDataStore())
def check_for_ban_on_rejected_block(node, block, reject_reason=None):
"""Check we are disconnected when sending a block that the node rejects,
then reconnect after.
(Can't actually get banned, since bitcoind won't ban local peers.)"""
node.p2p.send_blocks_and_test(
[block], node, success=False, reject_reason=reject_reason, expect_disconnect=True)
node.disconnect_p2ps()
node.add_p2p_connection(P2PDataStore())
def check_for_no_ban_on_rejected_tx(node, tx, reject_reason=None):
"""Check we are not disconnected when sending a txn that the node rejects."""
node.p2p.send_txs_and_test(
[tx], node, success=False, reject_reason=reject_reason)
class BlockSigChecksActivationTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.block_heights = {}
self.extra_args = [["-phononactivationtime={}".format(
SIGCHECKS_ACTIVATION_TIME),
"-replayprotectionactivationtime={}".format(
REPLAY_PROTECTION_START_TIME),
"-excessiveblocksize={}".format(MAXBLOCKSIZE),
"-blockmaxsize={}".format(MAXBLOCKSIZE)]]
def getbestblock(self, node):
"""Get the best block. Register its height so we can use build_block."""
block_height = node.getblockcount()
blockhash = node.getblockhash(block_height)
block = FromHex(CBlock(), node.getblock(blockhash, 0))
block.calc_sha256()
self.block_heights[block.sha256] = block_height
return block
def build_block(self, parent, transactions=(),
nTime=None, cbextrascript=None):
"""Make a new block with an OP_1 coinbase output.
Requires parent to have its height registered."""
parent.calc_sha256()
block_height = self.block_heights[parent.sha256] + 1
block_time = (parent.nTime + 1) if nTime is None else nTime
block = create_block(
parent.sha256, create_coinbase(block_height), block_time)
if cbextrascript is not None:
block.vtx[0].vout.append(CTxOut(0, cbextrascript))
block.vtx[0].rehash()
block.vtx.extend(transactions)
make_conform_to_ctor(block)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
self.block_heights[block.sha256] = block_height
return block
def run_test(self):
[node] = self.nodes
node.add_p2p_connection(P2PDataStore())
# Get out of IBD
node.generatetoaddress(1, node.get_deterministic_priv_key().address)
tip = self.getbestblock(node)
self.log.info("Create some blocks with OP_1 coinbase for spending.")
blocks = []
for _ in range(20):
tip = self.build_block(tip)
blocks.append(tip)
node.p2p.send_blocks_and_test(blocks, node, success=True)
self.spendable_outputs = deque(block.vtx[0] for block in blocks)
self.log.info("Mature the blocks.")
node.generatetoaddress(100, node.get_deterministic_priv_key().address)
tip = self.getbestblock(node)
# To make compact and fast-to-verify transactions, we'll use
# CHECKDATASIG over and over with the same data.
# (Using the same stuff over and over again means we get to hit the
# node's signature cache and don't need to make new signatures every
# time.)
cds_message = b''
# r=1 and s=1 ecdsa, the minimum values.
cds_signature = bytes.fromhex('3006020101020101')
# Recovered pubkey
cds_pubkey = bytes.fromhex(
'03089b476b570d66fad5a20ae6188ebbaf793a4c2a228c65f3d79ee8111d56c932')
def minefunding2(n):
""" Mine a block with a bunch of outputs that are very dense
sigchecks when spent (2 sigchecks each); return the inputs that can
be used to spend. """
cds_scriptpubkey = CScript(
[cds_message, cds_pubkey, OP_3DUP, OP_CHECKDATASIGVERIFY, OP_CHECKDATASIGVERIFY])
# The scriptsig is carefully padded to have size 26, which is the
# shortest allowed for 2 sigchecks for mempool admission.
# The resulting inputs have size 67 bytes, 33.5 bytes/sigcheck.
cds_scriptsig = CScript([b'x' * 16, cds_signature])
assert_equal(len(cds_scriptsig), 26)
self.log.debug("Gen {} with locking script {} unlocking script {} .".format(
n, cds_scriptpubkey.hex(), cds_scriptsig.hex()))
tx = self.spendable_outputs.popleft()
usable_inputs = []
txes = []
for _ in range(n):
tx = create_transaction(tx, cds_scriptpubkey)
txes.append(tx)
usable_inputs.append(
CTxIn(COutPoint(tx.sha256, 1), cds_scriptsig))
newtip = self.build_block(tip, txes)
node.p2p.send_blocks_and_test([newtip], node)
return usable_inputs, newtip
self.log.info("Funding special coins that have high sigchecks")
# mine 5000 funded outputs (10000 sigchecks)
# will be used pre-activation and post-activation
usable_inputs, tip = minefunding2(5000)
# assemble them into 50 txes with 100 inputs each (200 sigchecks)
submittxes_1 = []
while len(usable_inputs) >= 100:
tx = CTransaction()
tx.vin = [usable_inputs.pop() for _ in range(100)]
tx.vout = [CTxOut(0, CScript([OP_RETURN]))]
tx.rehash()
submittxes_1.append(tx)
# mine 5000 funded outputs (10000 sigchecks)
# will be used post-activation
usable_inputs, tip = minefunding2(5000)
# assemble them into 50 txes with 100 inputs each (200 sigchecks)
submittxes_2 = []
while len(usable_inputs) >= 100:
tx = CTransaction()
tx.vin = [usable_inputs.pop() for _ in range(100)]
tx.vout = [CTxOut(0, CScript([OP_RETURN]))]
tx.rehash()
submittxes_2.append(tx)
# Check high sigcheck transactions
self.log.info("Create transaction that have high sigchecks")
fundings = []
def make_spend(sigcheckcount):
# Add a funding tx to fundings, and return a tx spending that using
# scriptsig.
self.log.debug(
"Gen tx with {} sigchecks.".format(sigcheckcount))
def get_script_with_sigcheck(count):
return CScript([cds_message,
cds_pubkey] + (count - 1) * [OP_3DUP, OP_CHECKDATASIGVERIFY] + [OP_CHECKDATASIG])
# get funds locked with OP_1
sourcetx = self.spendable_outputs.popleft()
# make funding that forwards to scriptpubkey
last_sigcheck_count = ((sigcheckcount - 1) % 30) + 1
fundtx = create_transaction(
sourcetx, get_script_with_sigcheck(last_sigcheck_count))
fill_sigcheck_script = get_script_with_sigcheck(30)
remaining_sigcheck = sigcheckcount
while remaining_sigcheck > 30:
fundtx.vout[0].nValue -= 1000
fundtx.vout.append(CTxOut(100, bytes(fill_sigcheck_script)))
remaining_sigcheck -= 30
fundtx.rehash()
fundings.append(fundtx)
# make the spending
scriptsig = CScript([cds_signature])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(fundtx.sha256, 1), scriptsig))
input_index = 2
remaining_sigcheck = sigcheckcount
while remaining_sigcheck > 30:
tx.vin.append(
CTxIn(
COutPoint(
fundtx.sha256,
input_index),
scriptsig))
remaining_sigcheck -= 30
input_index += 1
tx.vout.append(CTxOut(0, CScript([OP_RETURN])))
pad_tx(tx)
tx.rehash()
return tx
# Create transactions with many sigchecks.
good_tx = make_spend(MAX_TX_SIGCHECK)
bad_tx = make_spend(MAX_TX_SIGCHECK + 1)
tip = self.build_block(tip, fundings)
node.p2p.send_blocks_and_test([tip], node)
# Both tx are accepted before the activation.
pre_activation_sigcheck_block = self.build_block(
tip, [good_tx, bad_tx])
node.p2p.send_blocks_and_test([pre_activation_sigcheck_block], node)
node.invalidateblock(pre_activation_sigcheck_block.hash)
# Activation tests
self.log.info("Approach to just before upgrade activation")
# Move our clock to the uprade time so we will accept such
# future-timestamped blocks.
node.setmocktime(SIGCHECKS_ACTIVATION_TIME + 10)
# Mine six blocks with timestamp starting at
# SIGCHECKS_ACTIVATION_TIME-1
blocks = []
for i in range(-1, 5):
tip = self.build_block(tip, nTime=SIGCHECKS_ACTIVATION_TIME + i)
blocks.append(tip)
node.p2p.send_blocks_and_test(blocks, node)
assert_equal(node.getblockchaininfo()[
'mediantime'], SIGCHECKS_ACTIVATION_TIME - 1)
self.log.info(
"The next block will activate, but the activation block itself must follow old rules")
# Send the 50 txes and get the node to mine as many as possible (it should do all)
# The node is happy mining and validating a 10000 sigcheck block before
# activation.
node.p2p.send_txs_and_test(submittxes_1, node)
[blockhash] = node.generatetoaddress(
1, node.get_deterministic_priv_key().address)
assert_equal(set(node.getblock(blockhash, 1)["tx"][1:]), {
t.hash for t in submittxes_1})
# We have activated, but let's invalidate that.
assert_equal(node.getblockchaininfo()[
'mediantime'], SIGCHECKS_ACTIVATION_TIME)
node.invalidateblock(blockhash)
# Try again manually and invalidate that too
goodblock = self.build_block(tip, submittxes_1)
node.p2p.send_blocks_and_test([goodblock], node)
node.invalidateblock(goodblock.hash)
# All transactions should be back in mempool.
assert_equal(set(node.getrawmempool()), {t.hash for t in submittxes_1})
self.log.info("Mine the activation block itself")
tip = self.build_block(tip)
node.p2p.send_blocks_and_test([tip], node)
self.log.info("We have activated!")
assert_equal(node.getblockchaininfo()[
'mediantime'], SIGCHECKS_ACTIVATION_TIME)
self.log.info(
"Try a block with a transaction going over the limit (limit: {})".format(MAX_TX_SIGCHECK))
bad_tx_block = self.build_block(tip, [bad_tx])
check_for_ban_on_rejected_block(
node, bad_tx_block, reject_reason=BLOCK_SIGCHECKS_PARALLEL_ERROR)
self.log.info(
"Try a block with a transaction just under the limit (limit: {})".format(MAX_TX_SIGCHECK))
good_tx_block = self.build_block(tip, [good_tx])
node.p2p.send_blocks_and_test([good_tx_block], node)
node.invalidateblock(good_tx_block.hash)
# save this tip for later
# ~ upgrade_block = tip
# Transactions still in pool:
assert_equal(set(node.getrawmempool()), {t.hash for t in submittxes_1})
self.log.info("Try sending 10000-sigcheck blocks after activation (limit: {})".format(
MAXBLOCKSIZE | |
default='', nargs='?',
help="Input filename of 2D spectrum")
parser_ext.add_argument("-o", "--output", type=str, default='',
help="Output filename of 1D spectrum (FITS Table)")
parser_ext.add_argument("--axis", type=int, default=1,
help="Dispersion axis: 1 horizontal, 2: vertical")
parser_ext.add_argument('--auto', action='store_true',
help="Use automatic extraction instead of interactive GUI")
# Define parameters based on default values:
set_default_pars(parser_ext, section='extract', default_type=int,
ignore_pars=['interactive'])
# Spectral Redux:
parser_redux = tasks.add_parser('spex', formatter_class=set_help_width(30),
help="Run the full spectroscopic pipeline")
parser_redux.add_argument("params", type=str,
help="Input filename of pipeline configuration in YAML format")
parser_redux.add_argument('-O', "--object", type=str, nargs='+',
help="Object name of targets to reduce. Must match OBJECT keyword in the FITS header")
parser_redux.add_argument("-s", "--silent", action="store_false",
help="Minimze the output to terminal")
parser_redux.add_argument("-i", "--interactive", action="store_true",
help="Use interactive interface throughout")
parser_break = tasks.add_parser('', help="")
# Imaging Redux:
parser_phot = tasks.add_parser('phot', formatter_class=set_help_width(30),
help="Run the full imaging pipeline")
parser_phot.add_argument("params", type=str,
help="Input filename of pipeline configuration in YAML format")
parser_phot.add_argument("-s", "--silent", action="store_false",
help="Minimze the output to terminal")
parser_imtrim = tasks.add_parser('imtrim', formatter_class=set_help_width(30),
help="Trim images")
parser_imtrim.add_argument("input", type=str,
help="List of filenames to trim")
parser_imtrim.add_argument("--dir", type=str, default='',
help="Output directory")
parser_imtrim.add_argument("--flat", type=str, default='',
help="Flat field image to use for edge detection")
parser_imtrim.add_argument('-e', "--edges", type=int, nargs=4,
help="Trim edges [left right bottom top]")
parser_imcomb = tasks.add_parser('imcombine', formatter_class=set_help_width(44),
help="Combine images")
parser_imcomb.add_argument("input", type=str,
help="List of filenames to combine")
parser_imcomb.add_argument("output", type=str,
help="Filename of combined image")
parser_imcomb.add_argument("--log", type=str, default='',
help="Filename of image combination log")
parser_imcomb.add_argument("--fringe", type=str, default='',
help="Filename of normalized fringe image")
set_default_pars(parser_imcomb, section='combine', default_type=int, mode='img')
parser_fringe = tasks.add_parser('fringe', formatter_class=set_help_width(30),
help="Create average fringe images")
parser_fringe.add_argument("input", type=str,
help="List of filenames to combine")
parser_fringe.add_argument("output", type=str,
help="Filename of normalized fringe image")
parser_fringe.add_argument("--fig", type=str, default='',
help="Filename of figure showing fringe image")
parser_fringe.add_argument("--sigma", type=float, default=3,
help="Masking threshold (default = 3.0)")
parser_sep = tasks.add_parser('sep', formatter_class=set_help_width(40),
help="Perform source extraction using SEP (SExtractor)")
parser_sep.add_argument("input", type=str,
help="Input image to analyse")
parser_sep.add_argument('-z', "--zero", type=float, default=0.,
help="Magnitude zeropoint, default is 0 (instrument mags)")
set_default_pars(parser_sep, section='sep-background', default_type=int, mode='img')
set_default_pars(parser_sep, section='sep-extract', default_type=int, mode='img')
parser_wcs = tasks.add_parser('wcs', formatter_class=set_help_width(30),
help="Perform WCS calibration")
parser_wcs.add_argument("input", type=str,
help="Input image to analyse")
parser_wcs.add_argument("table", type=str,
help="Source identification table from SEP (_phot.fits)")
parser_wcs.add_argument('-o', "--output", type=str, default='',
help="Filename of WCS calibrated image (.fits)")
parser_wcs.add_argument("--fig", type=str, default='',
help="Filename of diagnostic figure (.pdf)")
set_default_pars(parser_wcs, section='wcs', default_type=int, mode='img')
parser_autozp = tasks.add_parser('autozp', formatter_class=set_help_width(30),
help="Perform auto-calibration of magnitude zero point using SDSS data")
parser_autozp.add_argument("input", type=str,
help="Input WCS calibrated image to analyse")
parser_autozp.add_argument("table", type=str,
help="Source identification table from SEP (_phot.fits)")
parser_autozp.add_argument("--fig", type=str, default='',
help="Filename of diagnostic figure (.pdf), autogenerated by default")
set_default_pars(parser_autozp, section='sdss_flux', default_type=int, mode='img')
parser_findnew = tasks.add_parser('findnew', formatter_class=set_help_width(30),
help="Identify transient sources compared to Gaia")
parser_findnew.add_argument("input", type=str,
help="Input WCS calibrated image to analyse")
parser_findnew.add_argument("table", type=str,
help="Source identification table from SEP (_phot.fits)")
parser_findnew.add_argument("--bat", type=float, nargs=3,
help="Localisation constraint from SWIFT/BAT (ra [deg] dec [deg] radius [arcmin])")
parser_findnew.add_argument("--xrt", type=float, nargs=3,
help="Localisation constraint from SWIFT/XRT (ra [deg] dec [deg] radius [arcsec])")
parser_findnew.add_argument("--limit", type=float, default=20.1,
help="Magnitude limit (default = 20.1 mag to match Gaia depth)")
parser_findnew.add_argument('-z', "--zp", type=float,
help="Magnitude zero point in case the source catalog has not been flux calibrated")
args = parser.parse_args()
# -- Define Workflow
task = args.task
log = ""
if task == 'init':
initialize(args.path, args.mode, pfc_fname=args.output, pars_fname=args.pars, verbose=args.silent)
elif task == 'spex':
from pynot.redux import run_pipeline
# print_credits()
run_pipeline(options_fname=args.params,
object_id=args.object,
verbose=args.silent,
interactive=args.interactive)
elif task == 'bias':
from pynot.calibs import combine_bias_frames
print("Running task: Bias combination")
input_list = np.loadtxt(args.input, dtype=str, usecols=(0,))
_, log = combine_bias_frames(input_list, args.output, kappa=args.kappa, method=args.method)
elif task == 'sflat':
from pynot.calibs import combine_flat_frames, normalize_spectral_flat
print("Running task: Spectral flat field combination and normalization")
input_list = np.loadtxt(args.input, dtype=str, usecols=(0,))
flatcombine, log = combine_flat_frames(input_list, output='', mbias=args.bias, mode='spec',
dispaxis=args.axis, kappa=args.kappa, method=args.method)
options = copy(vars(args))
vars_to_remove = ['task', 'input', 'output', 'axis', 'bias', 'kappa']
for varname in vars_to_remove:
options.pop(varname)
_, log = normalize_spectral_flat(flatcombine, args.output, dispaxis=args.axis, **options)
elif task == 'corr':
from pynot.scired import correct_raw_file
import glob
print("Running task: Bias subtraction and flat field correction")
if '.fits' in args.input:
# Load image or wildcard list:
if '?' in args.input or '*' in args.input:
input_list = glob.glob(args.input)
else:
input_list = [args.input]
else:
input_list = np.loadtxt(args.input, dtype=str, usecols=(0,))
# Mode determines the header keywords to update (CDELT or CD-matrix)
if args.img:
mode = 'img'
else:
mode = 'spec'
if args.dir != '' and not os.path.exists(args.dir):
os.mkdir(args.dir)
for fname in input_list:
basename = os.path.basename(fname)
output = 'corr_%s' % basename
if args.dir != '':
output = os.path.join(args.dir, output)
_ = correct_raw_file(fname, output=output, bias_fname=args.bias, flat_fname=args.flat,
overscan=50, overwrite=True, mode=mode)
print(" - Image: %s -> %s" % (fname, output))
elif task == 'identify':
from PyQt5 import QtWidgets
from pynot.identify_gui import GraphicInterface
# Launch App:
app = QtWidgets.QApplication(sys.argv)
gui = GraphicInterface(args.arc,
linelist_fname=args.lines,
dispaxis=args.axis,
air=args.air,
loc=args.loc,
output=args.output)
gui.show()
app.exit(app.exec_())
elif task == 'response':
from PyQt5 import QtWidgets
from pynot.response_gui import ResponseGUI
# Launch App:
app = QtWidgets.QApplication(sys.argv)
gui = ResponseGUI(args.input, output_fname=args.output)
gui.show()
app.exit(app.exec_())
elif task == 'wave1d':
from pynot.wavecal import wavecal_1d
print("Running task: 1D Wavelength Calibration")
log = wavecal_1d(args.input, args.table, output=args.output, order_wl=args.order_wl,
log=args.log, N_out=args.npix, linearize=args.no_int)
elif task == 'wave2d':
from pynot.wavecal import rectify
print("Running task: 2D Rectification and Wavelength Calibration")
options = copy(vars(args))
vars_to_remove = ['task', 'input', 'arc', 'table', 'output', 'axis']
for varname in vars_to_remove:
options.pop(varname)
log = rectify(args.input, args.arc, args.table, output=args.output, fig_dir='./',
dispaxis=args.axis, **options)
elif task == 'skysub':
from pynot.scired import auto_fit_background
print("Running task: Background Subtraction")
options = copy(vars(args))
vars_to_remove = ['task', 'input', 'output', 'axis', 'auto']
for varname in vars_to_remove:
options.pop(varname)
log = auto_fit_background(args.input, args.output, dispaxis=args.axis,
plot_fname="skysub_diagnostics.pdf",
**options)
elif task == 'crr':
from pynot.scired import correct_cosmics
print("Running task: Cosmic Ray Rejection")
options = copy(vars(args))
vars_to_remove = ['task', 'input', 'output']
for varname in vars_to_remove:
options.pop(varname)
log = correct_cosmics(args.input, args.output, **options)
elif task == 'flux1d':
from pynot.response import flux_calibrate_1d
print("Running task: Flux Calibration of 1D Spectrum")
log = flux_calibrate_1d(args.input, output=args.output, response=args.response)
elif task == 'flux2d':
from pynot.response import flux_calibrate
print("Running task: Flux Calibration of 2D Image")
log = flux_calibrate(args.input, output=args.output, response=args.response)
elif task == 'extract':
from PyQt5 import QtWidgets
from pynot.extract_gui import ExtractGUI
options = copy(vars(args))
vars_to_remove = ['task', 'input', 'output', 'axis']
for varname in vars_to_remove:
options.pop(varname)
if args.auto:
from pynot.extraction import auto_extract
print("Running task: 1D Extraction")
log = auto_extract(args.input, args.output, dispaxis=args.axis,
pdf_fname="extract_diagnostics.pdf", **options)
else:
app = QtWidgets.QApplication(sys.argv)
gui = ExtractGUI(args.input, output_fname=args.output, dispaxis=args.axis, **options)
gui.show()
app.exit(app.exec_())
elif task == 'phot':
from pynot.phot_redux import run_pipeline
run_pipeline(options_fname=args.params,
verbose=args.silent)
elif task == 'imflat':
print("Running task: Combination of Imaging Flat Fields")
from pynot.calibs import combine_flat_frames
input_list = np.loadtxt(args.input, dtype=str, usecols=(0,))
_, log = combine_flat_frames(input_list, output=args.output, mbias=args.bias, mode='img',
kappa=args.kappa, method=args.method)
elif task == 'imtrim':
print("Running task: Image Trimming")
from pynot.scired import detect_filter_edge, trim_filter_edge
if args.edges is not None:
image_region = args.edges
elif args.flat is not None:
image_region = detect_filter_edge(args.flat)
print(" Automatically detected image edges:")
print(" left=%i right=%i bottom=%i top=%i" % image_region)
print("")
else:
print(" Invalid input! Either '--flat' or '--edges' must be set!")
return
input_list = np.loadtxt(args.input, dtype=str, usecols=(0,))
for fname in input_list:
print(" Trimming image: %s" % fname)
trim_filter_edge(fname, *image_region, output_dir=args.dir)
elif task == 'imcombine':
print("Running task: Image Combination")
from pynot.phot import image_combine
input_list = np.loadtxt(args.input, dtype=str, usecols=(0,))
options = copy(vars(args))
vars_to_remove = ['task', 'input', 'output', 'log', 'fringe']
for varname in vars_to_remove:
options.pop(varname)
log = image_combine(input_list, output=args.output, log_name=args.log, fringe_image=args.fringe, **options)
elif task == 'fringe':
print("Running task: Creating Average Fringe Image")
from pynot.phot import create_fringe_image
input_list = np.loadtxt(args.input, dtype=str, usecols=(0,))
log = create_fringe_image(input_list, output=args.output, fig_fname=args.fig, threshold=args.sigma)
elif task == 'sep':
print("Running task: Extracting Sources and Measuring Aperture Fluxes")
from pynot.phot import source_detection
options = copy(vars(args))
vars_to_remove = ['task', 'input', 'zero']
bg_options = {}
ext_options = {}
for varname, value in options.items():
if varname in vars_to_remove:
continue
elif varname in ['bw', 'bh', 'fw', 'fh', 'fthresh']:
bg_options[varname] = value
else:
ext_options[varname] = value
_, _, log = source_detection(args.input, zeropoint=args.zero,
kwargs_bg=bg_options, kwargs_ext=ext_options)
elif task == 'wcs':
print("Running task: WCS calibration")
from pynot.wcs import correct_wcs
options = copy(vars(args))
vars_to_remove = ['task', 'input', 'output', 'fig', 'table']
for varname in vars_to_remove:
options.pop(varname)
log = correct_wcs(args.input, args.table, output_fname=args.output, fig_fname=args.fig, **options)
elif task == 'autozp':
print("Running task: Zero point auto-calibration (SDSS)")
from pynot.phot import flux_calibration_sdss
options = copy(vars(args))
vars_to_remove = ['task', 'input', 'table', 'fig']
for varname in vars_to_remove:
options.pop(varname)
log = flux_calibration_sdss(args.input, args.table, fig_fname=args.fig, **options)
elif task == 'findnew':
print("Running task: Transient identification")
from pynot.transients import find_new_sources
if args.bat | |
# Copyright (c) 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (C) 2013 Association of Universities for Research in Astronomy
# (AURA)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of AURA and its representatives may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
import email
import email.errors
import imp
import os
import re
import sysconfig
import tempfile
import textwrap
import fixtures
import mock
import pkg_resources
import six
import testscenarios
import testtools
from testtools import matchers
import virtualenv
from wheel import wheelfile
from pbr import git
from pbr import packaging
from pbr.tests import base
PBR_ROOT = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
class TestRepo(fixtures.Fixture):
"""A git repo for testing with.
Use of TempHomeDir with this fixture is strongly recommended as due to the
lack of config --local in older gits, it will write to the users global
configuration without TempHomeDir.
"""
def __init__(self, basedir):
super(TestRepo, self).__init__()
self._basedir = basedir
def setUp(self):
super(TestRepo, self).setUp()
base._run_cmd(['git', 'init', '.'], self._basedir)
base._config_git()
base._run_cmd(['git', 'add', '.'], self._basedir)
def commit(self, message_content='test commit'):
files = len(os.listdir(self._basedir))
path = self._basedir + '/%d' % files
open(path, 'wt').close()
base._run_cmd(['git', 'add', path], self._basedir)
base._run_cmd(['git', 'commit', '-m', message_content], self._basedir)
def uncommit(self):
base._run_cmd(['git', 'reset', '--hard', 'HEAD^'], self._basedir)
def tag(self, version):
base._run_cmd(
['git', 'tag', '-sm', 'test tag', version], self._basedir)
class GPGKeyFixture(fixtures.Fixture):
"""Creates a GPG key for testing.
It's recommended that this be used in concert with a unique home
directory.
"""
def setUp(self):
super(GPGKeyFixture, self).setUp()
tempdir = self.useFixture(fixtures.TempDir())
gnupg_version_re = re.compile('^gpg\s.*\s([\d+])\.([\d+])\.([\d+])')
gnupg_version = base._run_cmd(['gpg', '--version'], tempdir.path)
for line in gnupg_version[0].split('\n'):
gnupg_version = gnupg_version_re.match(line)
if gnupg_version:
gnupg_version = (int(gnupg_version.group(1)),
int(gnupg_version.group(2)),
int(gnupg_version.group(3)))
break
else:
if gnupg_version is None:
gnupg_version = (0, 0, 0)
config_file = tempdir.path + '/key-config'
f = open(config_file, 'wt')
try:
if gnupg_version[0] == 2 and gnupg_version[1] >= 1:
f.write("""
%no-protection
%transient-key
""")
f.write("""
%no-ask-passphrase
Key-Type: RSA
Name-Real: Example Key
Name-Comment: N/A
Name-Email: <EMAIL>
Expire-Date: 2d
Preferences: (setpref)
%commit
""")
finally:
f.close()
# Note that --quick-random (--debug-quick-random in GnuPG 2.x)
# does not have a corresponding preferences file setting and
# must be passed explicitly on the command line instead
if gnupg_version[0] == 1:
gnupg_random = '--quick-random'
elif gnupg_version[0] >= 2:
gnupg_random = '--debug-quick-random'
else:
gnupg_random = ''
base._run_cmd(
['gpg', '--gen-key', '--batch', gnupg_random, config_file],
tempdir.path)
class Venv(fixtures.Fixture):
"""Create a virtual environment for testing with.
:attr path: The path to the environment root.
:attr python: The path to the python binary in the environment.
"""
def __init__(self, reason, modules=(), pip_cmd=None):
"""Create a Venv fixture.
:param reason: A human readable string to bake into the venv
file path to aid diagnostics in the case of failures.
:param modules: A list of modules to install, defaults to latest
pip, wheel, and the working copy of PBR.
:attr pip_cmd: A list to override the default pip_cmd passed to
python for installing base packages.
"""
self._reason = reason
if modules == ():
pbr = 'file://%s#egg=pbr' % PBR_ROOT
modules = ['pip', 'wheel', pbr]
self.modules = modules
if pip_cmd is None:
self.pip_cmd = ['-m', 'pip', 'install']
else:
self.pip_cmd = pip_cmd
def _setUp(self):
path = self.useFixture(fixtures.TempDir()).path
virtualenv.create_environment(path, clear=True)
python = os.path.join(path, 'bin', 'python')
command = [python] + self.pip_cmd + ['-U']
if self.modules and len(self.modules) > 0:
command.extend(self.modules)
self.useFixture(base.CapturedSubprocess(
'mkvenv-' + self._reason, command))
self.addCleanup(delattr, self, 'path')
self.addCleanup(delattr, self, 'python')
self.path = path
self.python = python
return path, python
class CreatePackages(fixtures.Fixture):
"""Creates packages from dict with defaults
:param package_dirs: A dict of package name to directory strings
{'pkg_a': '/tmp/path/to/tmp/pkg_a', 'pkg_b': '/tmp/path/to/tmp/pkg_b'}
"""
defaults = {
'setup.py': textwrap.dedent(six.u("""\
#!/usr/bin/env python
import setuptools
setuptools.setup(
setup_requires=['pbr'],
pbr=True,
)
""")),
'setup.cfg': textwrap.dedent(six.u("""\
[metadata]
name = {pkg_name}
"""))
}
def __init__(self, packages):
"""Creates packages from dict with defaults
:param packages: a dict where the keys are the package name and a
value that is a second dict that may be empty, containing keys of
filenames and a string value of the contents.
{'package-a': {'requirements.txt': 'string', 'setup.cfg': 'string'}
"""
self.packages = packages
def _writeFile(self, directory, file_name, contents):
path = os.path.abspath(os.path.join(directory, file_name))
path_dir = os.path.dirname(path)
if not os.path.exists(path_dir):
if path_dir.startswith(directory):
os.makedirs(path_dir)
else:
raise ValueError
with open(path, 'wt') as f:
f.write(contents)
def _setUp(self):
tmpdir = self.useFixture(fixtures.TempDir()).path
package_dirs = {}
for pkg_name in self.packages:
pkg_path = os.path.join(tmpdir, pkg_name)
package_dirs[pkg_name] = pkg_path
os.mkdir(pkg_path)
for cf in ['setup.py', 'setup.cfg']:
if cf in self.packages[pkg_name]:
contents = self.packages[pkg_name].pop(cf)
else:
contents = self.defaults[cf].format(pkg_name=pkg_name)
self._writeFile(pkg_path, cf, contents)
for cf in self.packages[pkg_name]:
self._writeFile(pkg_path, cf, self.packages[pkg_name][cf])
self.useFixture(TestRepo(pkg_path)).commit()
self.addCleanup(delattr, self, 'package_dirs')
self.package_dirs = package_dirs
return package_dirs
class TestPackagingInGitRepoWithCommit(base.BaseTestCase):
scenarios = [
('preversioned', dict(preversioned=True)),
('postversioned', dict(preversioned=False)),
]
def setUp(self):
super(TestPackagingInGitRepoWithCommit, self).setUp()
self.repo = self.useFixture(TestRepo(self.package_dir))
self.repo.commit()
def test_authors(self):
self.run_setup('sdist', allow_fail=False)
# One commit, something should be in the authors list
with open(os.path.join(self.package_dir, 'AUTHORS'), 'r') as f:
body = f.read()
self.assertNotEqual(body, '')
def test_changelog(self):
self.run_setup('sdist', allow_fail=False)
with open(os.path.join(self.package_dir, 'ChangeLog'), 'r') as f:
body = f.read()
# One commit, something should be in the ChangeLog list
self.assertNotEqual(body, '')
def test_changelog_handles_astrisk(self):
self.repo.commit(message_content="Allow *.openstack.org to work")
self.run_setup('sdist', allow_fail=False)
with open(os.path.join(self.package_dir, 'ChangeLog'), 'r') as f:
body = f.read()
self.assertIn('\*', body)
def test_changelog_handles_dead_links_in_commit(self):
self.repo.commit(message_content="See os_ for to_do about qemu_.")
self.run_setup('sdist', allow_fail=False)
with open(os.path.join(self.package_dir, 'ChangeLog'), 'r') as f:
body = f.read()
self.assertIn('os\_', body)
self.assertIn('to\_do', body)
self.assertIn('qemu\_', body)
def test_changelog_handles_backticks(self):
self.repo.commit(message_content="Allow `openstack.org` to `work")
self.run_setup('sdist', allow_fail=False)
with open(os.path.join(self.package_dir, 'ChangeLog'), 'r') as f:
body = f.read()
self.assertIn('\`', body)
def test_manifest_exclude_honoured(self):
self.run_setup('sdist', allow_fail=False)
with open(os.path.join(
self.package_dir,
'pbr_testpackage.egg-info/SOURCES.txt'), 'r') as f:
body = f.read()
self.assertThat(
body, matchers.Not(matchers.Contains('pbr_testpackage/extra.py')))
self.assertThat(body, matchers.Contains('pbr_testpackage/__init__.py'))
def test_install_writes_changelog(self):
stdout, _, _ = self.run_setup(
'install', '--root', self.temp_dir + 'installed',
allow_fail=False)
self.expectThat(stdout, matchers.Contains('Generating ChangeLog'))
class TestExtrafileInstallation(base.BaseTestCase):
def test_install_glob(self):
stdout, _, _ = self.run_setup(
'install', '--root', self.temp_dir + 'installed',
allow_fail=False)
self.expectThat(
stdout, matchers.Contains('copying data_files/a.txt'))
self.expectThat(
stdout, matchers.Contains('copying data_files/b.txt'))
class TestPackagingInGitRepoWithoutCommit(base.BaseTestCase):
def setUp(self):
super(TestPackagingInGitRepoWithoutCommit, self).setUp()
self.useFixture(TestRepo(self.package_dir))
self.run_setup('sdist', allow_fail=False)
def test_authors(self):
# No commits, no authors in list
with open(os.path.join(self.package_dir, 'AUTHORS'), 'r') as f:
body = f.read()
self.assertEqual('\n', body)
def test_changelog(self):
# No commits, nothing should be in the ChangeLog list
with open(os.path.join(self.package_dir, 'ChangeLog'), 'r') as f:
body = f.read()
self.assertEqual('CHANGES\n=======\n\n', body)
class TestPackagingWheels(base.BaseTestCase):
def setUp(self):
super(TestPackagingWheels, self).setUp()
self.useFixture(TestRepo(self.package_dir))
# Build the wheel
self.run_setup('bdist_wheel', allow_fail=False)
# Slowly construct the path to the generated whl
dist_dir = os.path.join(self.package_dir, 'dist')
relative_wheel_filename = os.listdir(dist_dir)[0]
absolute_wheel_filename = os.path.join(
dist_dir, relative_wheel_filename)
wheel_file = wheelfile.WheelFile(absolute_wheel_filename)
wheel_name = wheel_file.parsed_filename.group('namever')
# Create a directory path to unpack the wheel to
self.extracted_wheel_dir = os.path.join(dist_dir, wheel_name)
# Extract the wheel contents to the directory we just created
wheel_file.extractall(self.extracted_wheel_dir)
wheel_file.close()
def test_data_directory_has_wsgi_scripts(self):
# Build the path to the scripts directory
scripts_dir = os.path.join(
self.extracted_wheel_dir, 'pbr_testpackage-0.0.data/scripts')
self.assertTrue(os.path.exists(scripts_dir))
scripts = os.listdir(scripts_dir)
self.assertIn('pbr_test_wsgi', scripts)
self.assertIn('pbr_test_wsgi_with_class', scripts)
self.assertNotIn('pbr_test_cmd', scripts)
self.assertNotIn('pbr_test_cmd_with_class', scripts)
def test_generates_c_extensions(self):
built_package_dir = os.path.join(
self.extracted_wheel_dir, 'pbr_testpackage')
static_object_filename = 'testext.so'
soabi = get_soabi()
if soabi:
static_object_filename = 'testext.{0}.so'.format(soabi)
static_object_path = os.path.join(
built_package_dir, static_object_filename)
self.assertTrue(os.path.exists(built_package_dir))
self.assertTrue(os.path.exists(static_object_path))
class TestPackagingHelpers(testtools.TestCase):
def test_generate_script(self):
group = 'console_scripts'
entry_point = pkg_resources.EntryPoint(
name='test-ep',
module_name='pbr.packaging',
attrs=('LocalInstallScripts',))
header = '#!/usr/bin/env fake-header\n'
template = ('%(group)s %(module_name)s %(import_target)s '
'%(invoke_target)s')
generated_script = packaging.generate_script(
group, | |
0), # East Asian ideograph
0x213157: (0x4FE1, 0), # East Asian ideograph
0x223158: (0x6357, 0), # East Asian ideograph
0x213159: (0x4FB5, 0), # East Asian ideograph
0x22315A: (0x633C, 0), # East Asian ideograph
0x22315B: (0x6358, 0), # East Asian ideograph
0x21315C: (0x4FDE, 0), # East Asian ideograph
0x27315D: (0x4FA0, 0), # East Asian ideograph
0x21315E: (0x4FCF, 0), # East Asian ideograph
0x22315F: (0x6354, 0), # East Asian ideograph
0x213160: (0x4FDA, 0), # East Asian ideograph
0x213161: (0x4FDD, 0), # East Asian ideograph
0x213162: (0x4FC3, 0), # East Asian ideograph
0x213163: (0x4FD8, 0), # East Asian ideograph
0x233164: (0x89F7, 0), # East Asian ideograph
0x213165: (0x4FCA, 0), # East Asian ideograph
0x213166: (0x4FAE, 0), # East Asian ideograph
0x213167: (0x4FD0, 0), # East Asian ideograph
0x223168: (0x637D, 0), # East Asian ideograph
0x273169: (0x7CFB, 0), # East Asian ideograph (duplicate simplified)
0x22316A: (0x63B6, 0), # East Asian ideograph
0x22316B: (0x6382, 0), # East Asian ideograph
0x27316C: (0x4ED3, 0), # East Asian ideograph
0x23316D: (0x8A07, 0), # East Asian ideograph
0x22316E: (0x639F, 0), # East Asian ideograph
0x21483D: (0x6E9D, 0), # East Asian ideograph
0x233170: (0x8A0F, 0), # East Asian ideograph
0x233171: (0x8A11, 0), # East Asian ideograph
0x233172: (0x8A12, 0), # East Asian ideograph
0x233173: (0x8A0D, 0), # East Asian ideograph
0x213174: (0x4FF8, 0), # East Asian ideograph
0x213175: (0x5028, 0), # East Asian ideograph
0x213176: (0x5014, 0), # East Asian ideograph
0x213177: (0x5016, 0), # East Asian ideograph
0x213178: (0x5029, 0), # East Asian ideograph
0x223179: (0x6381, 0), # East Asian ideograph
0x23317A: (0x8A27, 0), # East Asian ideograph
0x22317B: (0x6397, 0), # East Asian ideograph
0x21317C: (0x503C, 0), # East Asian ideograph
0x23317D: (0x8A29, 0), # East Asian ideograph
0x21317E: (0x4FFA, 0), # East Asian ideograph
0x234840: (0x9445, 0), # East Asian ideograph
0x274841: (0x6C85, 0), # East Asian ideograph
0x6F5B6C: (0xD30C, 0), # Korean hangul
0x234842: (0x9450, 0), # East Asian ideograph
0x273266: (0x4F18, 0), # East Asian ideograph
0x4B513B: (0x7CF8, 0), # East Asian ideograph
0x6F4B6D: (0xB0EC, 0), # Korean hangul
0x274844: (0x6E7F, 0), # East Asian ideograph
0x2D4845: (0x6E29, 0), # East Asian ideograph
0x4B4846: (0x78C6, 0), # East Asian ideograph
0x226F69: (0x7CC5, 0), # East Asian ideograph
0x274848: (0x6CA7, 0), # East Asian ideograph
0x4B3622: (0x8C18, 0), # East Asian ideograph
0x6F4A61: (0xAED0, 0), # Korean hangul
0x273733: (0x5578, 0), # East Asian ideograph
0x23484A: (0x944A, 0), # East Asian ideograph
0x27484B: (0x51C6, 0), # East Asian ideograph
0x6F5B6E: (0xD30E, 0), # Korean hangul
0x2F3639: (0x8C7C, 0), # East Asian ideograph
0x4B484C: (0x6F91, 0), # East Asian ideograph
0x22484D: (0x6D26, 0), # East Asian ideograph
0x22484E: (0x6D27, 0), # East Asian ideograph
0x294375: (0x9514, 0), # East Asian ideograph
0x22484F: (0x6D0F, 0), # East Asian ideograph
0x224850: (0x6D0A, 0), # East Asian ideograph
0x2D4466: (0x6973, 0), # East Asian ideograph
0x224851: (0x6D3F, 0), # East Asian ideograph
0x226329: (0x77BE, 0), # East Asian ideograph
0x234853: (0x9466, 0), # East Asian ideograph
0x47594E: (0x9C3A, 0), # East Asian ideograph
0x274854: (0x6E0D, 0), # East Asian ideograph
0x514E5B: (0x9271, 0), # East Asian ideograph
0x274855: (0x6DA8, 0), # East Asian ideograph
0x6F5B70: (0xD314, 0), # Korean hangul
0x274842: (0x706D, 0), # East Asian ideograph
0x6F572D: (0xC7BF, 0), # Korean hangul
0x284C41: (0x6CA4, 0), # East Asian ideograph
0x234560: (0x93BE, 0), # East Asian ideograph (not in Unicode)
0x29243A: (0x83BC, 0), # East Asian ideograph
0x274857: (0x6C49, 0), # East Asian ideograph
0x273B79: (0x5C9B, 0), # East Asian ideograph
0x234858: (0x9462, 0), # East Asian ideograph
0x2F252D: (0x6A22, 0), # East Asian ideograph
0x6F575D: (0xC8A8, 0), # Korean hangul
0x3F4621: (0x9A69, 0), # East Asian ideograph
0x274859: (0x6D9F, 0), # East Asian ideograph
0x286622: (0x7857, 0), # East Asian ideograph
0x22485A: (0x6D07, 0), # East Asian ideograph
0x4B4D7B: (
0x77D7,
0,
), # East Asian ideograph (variant of 214D7B which maps to 77D7)
0x6F5B71: (0xD31C, 0), # Korean hangul
0x213221: (0x5018, 0), # East Asian ideograph
0x213222: (0x4FF1, 0), # East Asian ideograph
0x22485B: (0x6D04, 0), # East Asian ideograph
0x273224: (0x4E2A, 0), # East Asian ideograph
0x213225: (0x5019, 0), # East Asian ideograph
0x273226: (0x4F25, 0), # East Asian ideograph
0x223227: (0x638E, 0), # East Asian ideograph
0x233228: (0x8A4A, 0), # East Asian ideograph
0x22485C: (0x6CDA, 0), # East Asian ideograph
0x23322A: (0x8A4E, 0), # East Asian ideograph
0x21322B: (0x4FFE, 0), # East Asian ideograph
0x21322C: (0x502A, 0), # East Asian ideograph
0x27322D: (0x4F26, 0), # East Asian ideograph
0x27322E: (0x4EC3, 0), # East Asian ideograph (duplicate simplified)
0x22322F: (0x6375, 0), # East Asian ideograph
0x223230: (0x63AF, 0), # East Asian ideograph
0x213231: (0x5047, 0), # East Asian ideograph
0x213232: (0x505A, 0), # East Asian ideograph
0x273233: (0x4F1F, 0), # East Asian ideograph
0x213234: (0x5043, 0), # East Asian ideograph
0x23485E: (0x945E, 0), # East Asian ideograph
0x213236: (0x5076, 0), # East Asian ideograph
0x213237: (0x504E, 0), # East Asian ideograph
0x223238: (0x63B0, 0), # East Asian ideograph
0x223239: (0x63AE, 0), # East Asian ideograph
0x22323A: (0x637C, 0), # East Asian ideograph
0x27485F: (0x6EDE, 0), # East Asian ideograph
0x21323C: (0x5077, 0), # East Asian ideograph
0x22323D: (0x63AD, 0), # East Asian ideograph
0x27323E: (0x5BB6, 0), # East Asian ideograph
0x21323F: (0x5085, 0), # East Asian ideograph
0x273240: (0x5907, 0), # East Asian ideograph
0x224860: (0x6D2E, 0), # East Asian ideograph
0x233242: (0x8A45, 0), # East Asian ideograph
0x273243: (0x4F27, 0), # East Asian ideograph
0x273244: (0x4F1E, 0), # East Asian ideograph
0x213245: (0x50AD, 0), # East Asian ideograph
0x273246: (0x4F20, 0), # East Asian ideograph
0x224861: (0x6D35, 0), # East Asian ideograph
0x213248: (0x50B2, 0), # East Asian ideograph
0x273249: (0x4EC5, 0), # East Asian ideograph
0x27324A: (0x503E, 0), # East Asian ideograph
0x21324B: (0x50AC, 0), # East Asian ideograph
0x27324C: (0x4F24, 0), # East Asian ideograph
0x224862: (0x6D3A, 0), # East Asian ideograph
0x21324E: (0x50E7, 0), # East Asian ideograph
0x22324F: (0x63BD, 0), # East Asian ideograph
0x223250: (0x63C3, 0), # East Asian ideograph
0x273251: (0x4FA5, 0), # East Asian ideograph
0x223252: (0x63F5, 0), # East Asian ideograph
0x213253: (0x50ED, 0), # East Asian ideograph
0x213254: (0x50DA, 0), # East Asian ideograph
0x273255: (0x4EC6, 0), # East Asian ideograph
0x273256: (0x4F2A, 0), # East Asian ideograph
0x273257: (0x8C61, 0), # East Asian ideograph
0x273258: (0x4FA8, 0), # East Asian ideograph
0x233259: (0x8A82, 0), # East Asian ideograph
0x27325A: (0x4EBF, 0), # East Asian ideograph
0x22325B: (0x63E0, 0), # East Asian ideograph
0x22325C: (0x63D5, 0), # East Asian ideograph
0x23325D: (0x8A84, 0), # East Asian ideograph
0x23325E: (0x8A75, 0), # East Asian ideograph
0x274865: (0x6E14, 0), # East Asian ideograph
0x273260: (0x4FA9, 0), # East Asian ideograph
0x273261: (0x4FED, 0), # East Asian ideograph
0x273262: (0x50A7, 0), # East Asian ideograph
0x273263: (0x5C3D, 0), # East Asian ideograph (duplicate simplified)
0x213264: (0x5112, 0), # East Asian ideograph
0x273265: (0x4FE6, 0), # East Asian ideograph
0x223266: (0x63C5, 0), # East Asian ideograph (not in Unicode)
0x213267: (0x511F, 0), # East Asian ideograph
0x213268: (0x5121, 0), # East Asian ideograph
0x273269: (0x50A8, 0), # East Asian ideograph
0x21326A: (0x5137, 0), # East Asian ideograph
0x27326B: (0x4FE8, 0), # East Asian ideograph
0x21326C: (0x5140, 0), # East Asian ideograph
0x21326D: (0x5143, 0), # East Asian ideograph
0x21326E: (0x5141, 0), # East Asian ideograph
0x23326F: (0x8A96, 0), # East Asian ideograph
0x213270: (0x5144, 0), # East Asian ideograph
0x233271: (0x8A9A, 0), # East Asian ideograph
0x213272: (0x5149, 0), # East Asian ideograph
0x273273: (0x51F6, 0), # East Asian ideograph
0x213274: (0x5148, 0), # East Asian ideograph
0x274C3C: (0x8FED, 0), # East Asian ideograph
0x223276: (0x63D1, 0), # East Asian ideograph (not in Unicode)
0x234869: (0x946D, 0), # East Asian ideograph
0x213278: (0x5155, 0), # East Asian ideograph
0x223279: (0x63C4, 0), # East Asian ideograph
0x27327A: (0x513F, 0), # East Asian | |
<reponame>RicoloveFeng/KDraw<filename>src/CLI/Kdraw.py
'''
KDraw v0.0.1
1. setColor has been writen in opcodes when in instruction.py
2. for resetCanvas command, change img object and clear oplist
3. for draw command, put it into list
4. for adjust command, find in list according to id, and adjusting points
5. for saveCanvas command, run drawing commands and save canvas
'''
from PIL import Image
from scipy.special import comb
import math
img = Image.new("RGB", (256, 256))
now_width = 256
now_height = 256
opQueue = []
save = ""
def getOpc(opid):
'''
return opid dict
always succeed
'''
global opQueue
for i in opQueue:
if i["id"] == opid: return i
def drawPoint(x, y, color):
global img
pixels = img.load()
width = img.width
height = img.height
x = int(round(x))
y = int(round(y))
try:
if (0 <= x < width) and (0 <= y < height):
pixels[x, height - y - 1] = color
except IndexError:
print(x, y, width, height)
def resetCanvas(opcode):
global img, opQueue, now_height, now_width
now_width = int(opcode["width"])
now_height = int(opcode["height"])
img = Image.new("RGB", (now_width, now_height), 0xffffff)
opQueue = []
def drawLine(opcode):
x1 = round(opcode["points"][0][0])
y1 = round(opcode["points"][0][1])
x2 = round(opcode["points"][1][0])
y2 = round(opcode["points"][1][1])
if opcode["algorithm"] == "N/A": # in case the line has been cliped
pass
elif opcode["algorithm"] == "DDA":
dx = x2 - x1
dy = y2 - y1
step = abs(dx) if abs(dx) >= abs(dy) else abs(dy)
dx = dx / step
dy = dy / step
x = x1
y = y1
for _ in range(int(step)):
drawPoint(x, y, opcode["color"])
x = x + dx
y = y + dy
else: #bresenham
def drawLow(a1, b1, a2, b2):
dx = a2-a1
dy = b2-b1
yi = 1
if dy < 0:
yi = -1
dy = -dy
ddy = 2*dy
ddx = 2*dx
D = ddy - dx
y = b1
for x in range(a1, a2 + 1):
drawPoint(x, y, opcode["color"])
if D >= 0:
y = y + yi
D = D - ddx
D = D + ddy
def drawHigh(a1, b1, a2, b2):
dx = a2-a1
dy = b2-b1
xi = 1
if dx < 0:
xi = -1
dx = -dx
ddy = 2*dy
ddx = 2*dx
D = ddx - dy
x = a1
for y in range(b1, b2 + 1):
drawPoint(x, y, opcode["color"])
if D >= 0:
x = x + xi
D = D - ddy
D = D + ddx
if abs(y2 - y1) < abs(x2 - x1):
drawLow(x2,y2,x1,y1) if x1 > x2 else drawLow(x1,y1,x2,y2)
else:
drawHigh(x2,y2,x1,y1) if y1 > y2 else drawHigh(x1,y1,x2,y2)
def drawPolygon(opcode):
pointsCount = len(opcode["points"])
if opcode["algorithm"] == "DDA":
i = 0
while i + 1 < pointsCount:
p1 = opcode["points"][i]
p2 = opcode["points"][i+1]
drawLine({
"points":[p1, p2],
"algorithm": "DDA",
"color": opcode["color"]
})
i += 1
drawLine({
"points":[opcode["points"][-1], opcode["points"][0]],
"algorithm": "DDA",
"color": opcode["color"]
})
else: #bresenham
i = 0
while i + 1 < pointsCount:
p1 = opcode["points"][i]
p2 = opcode["points"][i+1]
drawLine({
"points":[p1, p2],
"algorithm": "Brehensam",
"color": opcode["color"]
})
i += 1
drawLine({
"points":[opcode["points"][-1], opcode["points"][0]],
"algorithm": "Brehensam",
"color": opcode["color"]
})
def drawEllipse(opcode):
cx = round(opcode["points"][0][0])
cy = round(opcode["points"][0][1])
def drawEllipsePoint(x, y): # refer center at above
color = opcode["color"]
drawPoint(cx+x, cy+y, color)
drawPoint(cx-x, cy+y, color)
drawPoint(cx+x, cy-y, color)
drawPoint(cx-x, cy-y, color)
rx = round(opcode["rx"])
ry = round(opcode["ry"])
rx2 = rx * rx
ry2 = ry * ry
trx2 = 2 * rx2
try2 = 2 * ry2
x = 0
y = ry
px = 0
py = trx2 * y
#initial
drawEllipsePoint(x, y)
# region 1
p = round(ry2 - rx2 * ry + 0.25 * rx2)
while px < py:
x += 1
px += try2
if p < 0:
p += ry2 + px
else:
y -= 1
py -= trx2
p += ry2 + px - py
drawEllipsePoint(x, y)
# region 2
p = round(ry2 * (x + 0.5) ** 2 + rx2 * (y - 1) ** 2 - rx2 * ry2)
while y > 0:
y -= 1
py -= trx2
if p > 0:
p += rx2 - py
else:
x += 1
px += try2
p += rx2 - py + px
drawEllipsePoint(x, y)
def drawCurve(opcode):
global img
if opcode["algorithm"] == "Bezier":
#18 sample points
points = opcode["points"]
pointsCount = len(points)
samples = 18
cof = []
for i in range(pointsCount):
cof.append(comb(pointsCount - 1, i))
res = []
for s in range(samples):
t = s / (samples - 1)
tinv = 1 - t
x = 0.0
y = 0.0
for i in range(pointsCount):
fac = cof[i] * t ** i * tinv ** (pointsCount- i - 1)
x += points[i][0] * fac
y += points[i][1] * fac
res.append((int(round(x)), int(round(y))))
for i in range(samples - 1):
drawLine({
"points":[res[i], res[i+1]],
"algorithm": "Brehensam",
"color": opcode["color"]
})
else:
#Using Clamped B-spline curve
points = opcode["points"]
n = len(points) - 1 # n+1 control points, n intervals
k = 3 # order 3
samples = 200
res = []
#calculate knot vector, n + k + 1 in total
u = []
for _ in range(k - 1): # 0 ~ k-2, k-1 in total
u.append(0.0)
for i in range(n - k + 3): # k-1 ~ n+1, n - k + 3 in total
u.append( i / (n - k + 2))
for _ in range(k - 1): # n+2 ~ n+k, k - 1 in total
u.append(1.0)
#basic function, using de Boor-Cox euqation
def B(i, k, t):
if k == 1:
return 1.0 if u[i] <= t <= u[i+1] else 0 # assuming 0 / 0 = 0
else:
ret = 0
fac1 = B(i, k - 1, t)
fac2 = B(i + 1, k - 1, t)
if fac1 and t - u[i]:
ret += (t - u[i]) / (u[i+k-1] - u[i]) * fac1
if fac2 and u[i+k] - t:
ret += (u[i+k] - t) / (u[i+k] - u[i+1]) * fac2
return ret
for s in range(samples):
t = s / (samples - 1) # u[k-1] is 0 and u[n+1] is 1
x = 0.0
y = 0.0
for i in range(n + 1):
fac = B(i, k, t)
x += points[i][0] * fac
y += points[i][1] * fac
res.append((int(round(x)), int(round(y))))
for i in range(samples - 1):
drawLine({
"points":[res[i], res[i+1]],
"algorithm": "Brehensam",
"color": opcode["color"]
})
def translate(opcode):
obj = getOpc(opcode["id"])
dx = opcode["dx"]
dy = opcode["dy"]
for i in range(len(obj["points"])):
p = list(obj["points"][i])
p[0] = p[0] + dx
p[1] = p[1] + dy
obj["points"][i] = tuple(p) #保存结果
def rotate(opcode):
obj = getOpc(opcode["id"])
xr = opcode["x"]
yr = opcode["y"]
r = opcode["r"] / 180 * math.pi * -1
for i in range(len(obj["points"])):
p = list(obj["points"][i])
x = p[0]
y = p[1]
p[0] = xr + (x - xr) * math.cos(r) - (y - yr) * math.sin(r)
p[1] = yr + (x - xr) * math.sin(r) + (y - yr) * math.cos(r)
obj["points"][i] = tuple(p) #保存结果
def scale(opcode):
obj = getOpc(opcode["id"])
xf = opcode["x"]
yf = opcode["y"]
s = opcode["s"]
for i in range(len(obj["points"])):
p = list(obj["points"][i])
p[0] = p[0] * s + xf * (1 - s)
p[1] = p[1] * s + yf * (1 - s)
if obj["command"] == "DE": # 椭圆的特殊处理
obj["rx"] = obj["rx"] * s
obj["ry"] = obj["ry"] * s
obj["points"][i] = tuple(p) # 保存结果
def clip(opcode):
print("start:",opQueue)
obj = getOpc(opcode["id"])
algorithm = opcode["algorithm"]
top = opcode["top"]
left = opcode["left"]
bottom = opcode["bottom"]
right = opcode["right"]
x1 = obj["points"][0][0]
y1 = obj["points"][0][1]
x2 = obj["points"][1][0]
y2 = obj["points"][1][1]
if algorithm == "Cohen-Sutherland":
L = 1
R = 2
B = 4
T = 8
def nodeCode(x, y):
code = 0
if x < left: code += L
elif x > right: code += R
if y > top: code += T
elif y < bottom: code += B
return code
code1 = nodeCode(x1, y1)
code2 = nodeCode(x2, y2)
while True:
if (code1 | code2) == 0: # 全都在0000
| |
range(len(enum_hypothesis_list))[::-1]:
hypothesis_syns = set(
chain(
*[
[
lemma.name()
for lemma in synset.lemmas()
if lemma.name().find("_") < 0
]
for synset in wordnet.synsets(enum_hypothesis_list[i][1])
]
)
).union({enum_hypothesis_list[i][1]})
for j in range(len(enum_reference_list))[::-1]:
if enum_reference_list[j][1] in hypothesis_syns:
word_match.append(
(enum_hypothesis_list[i][0], enum_reference_list[j][0])
)
enum_hypothesis_list.pop(i), enum_reference_list.pop(j)
break
return word_match, enum_hypothesis_list, enum_reference_list
def wordnetsyn_match(hypothesis, reference, wordnet=wordnet):
"""
Matches each word in reference to a word in hypothesis if any synonym
of a hypothesis word is the exact match to the reference word.
:param hypothesis: hypothesis string
:param reference: reference string
:param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet)
:type wordnet: WordNetCorpusReader
:return: list of mapped tuples
:rtype: list of tuples
"""
enum_hypothesis_list, enum_reference_list = _generate_enums(hypothesis, reference)
return _enum_wordnetsyn_match(
enum_hypothesis_list, enum_reference_list, wordnet=wordnet
)
def _enum_allign_words(
enum_hypothesis_list, enum_reference_list, stemmer=PorterStemmer(), wordnet=wordnet
):
"""
Aligns/matches words in the hypothesis to reference by sequentially
applying exact match, stemmed match and wordnet based synonym match.
in case there are multiple matches the match which has the least number
of crossing is chosen. Takes enumerated list as input instead of
string input
:param enum_hypothesis_list: enumerated hypothesis list
:param enum_reference_list: enumerated reference list
:param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer())
:type stemmer: nltk.stem.api.StemmerI or any class that implements a stem method
:param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet)
:type wordnet: WordNetCorpusReader
:return: sorted list of matched tuples, unmatched hypothesis list,
unmatched reference list
:rtype: list of tuples, list of tuples, list of tuples
"""
exact_matches, enum_hypothesis_list, enum_reference_list = _match_enums(
enum_hypothesis_list, enum_reference_list
)
stem_matches, enum_hypothesis_list, enum_reference_list = _enum_stem_match(
enum_hypothesis_list, enum_reference_list, stemmer=stemmer
)
wns_matches, enum_hypothesis_list, enum_reference_list = _enum_wordnetsyn_match(
enum_hypothesis_list, enum_reference_list, wordnet=wordnet
)
return (
sorted(
exact_matches + stem_matches + wns_matches, key=lambda wordpair: wordpair[0]
),
enum_hypothesis_list,
enum_reference_list,
)
def allign_words(hypothesis, reference, stemmer=PorterStemmer(), wordnet=wordnet):
"""
Aligns/matches words in the hypothesis to reference by sequentially
applying exact match, stemmed match and wordnet based synonym match.
In case there are multiple matches the match which has the least number
of crossing is chosen.
:param hypothesis: hypothesis string
:param reference: reference string
:param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer())
:type stemmer: nltk.stem.api.StemmerI or any class that implements a stem method
:param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet)
:type wordnet: WordNetCorpusReader
:return: sorted list of matched tuples, unmatched hypothesis list, unmatched reference list
:rtype: list of tuples, list of tuples, list of tuples
"""
enum_hypothesis_list, enum_reference_list = _generate_enums(hypothesis, reference)
return _enum_allign_words(
enum_hypothesis_list, enum_reference_list, stemmer=stemmer, wordnet=wordnet
)
def _count_chunks(matches):
"""
Counts the fewest possible number of chunks such that matched unigrams
of each chunk are adjacent to each other. This is used to caluclate the
fragmentation part of the metric.
:param matches: list containing a mapping of matched words (output of allign_words)
:return: Number of chunks a sentence is divided into post allignment
:rtype: int
"""
i = 0
chunks = 1
while i < len(matches) - 1:
if (matches[i + 1][0] == matches[i][0] + 1) and (
matches[i + 1][1] == matches[i][1] + 1
):
i += 1
continue
i += 1
chunks += 1
return chunks
def single_meteor_score(
reference,
hypothesis,
preprocess=str.lower,
stemmer=PorterStemmer(),
wordnet=wordnet,
alpha=0.9,
beta=3,
gamma=0.5,
):
"""
Calculates METEOR score for single hypothesis and reference as per
"Meteor: An Automatic Metric for MT Evaluation with HighLevels of
Correlation with Human Judgments" by <NAME> and <NAME>,
in Proceedings of ACL.
http://www.cs.cmu.edu/~alavie/METEOR/pdf/Lavie-Agarwal-2007-METEOR.pdf
>>> hypothesis1 = 'It is a guide to action which ensures that the military always obeys the commands of the party'
>>> reference1 = 'It is a guide to action that ensures that the military will forever heed Party commands'
>>> round(single_meteor_score(reference1, hypothesis1),4)
0.7398
If there is no words match during the alignment the method returns the
score as 0. We can safely return a zero instead of raising a
division by zero error as no match usually implies a bad translation.
>>> round(meteor_score('this is a cat', 'non matching hypothesis'),4)
0.0
:param references: reference sentences
:type references: list(str)
:param hypothesis: a hypothesis sentence
:type hypothesis: str
:param preprocess: preprocessing function (default str.lower)
:type preprocess: method
:param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer())
:type stemmer: nltk.stem.api.StemmerI or any class that implements a stem method
:param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet)
:type wordnet: WordNetCorpusReader
:param alpha: parameter for controlling relative weights of precision and recall.
:type alpha: float
:param beta: parameter for controlling shape of penalty as a
function of as a function of fragmentation.
:type beta: float
:param gamma: relative weight assigned to fragmentation penality.
:type gamma: float
:return: The sentence-level METEOR score.
:rtype: float
"""
enum_hypothesis, enum_reference = _generate_enums(
hypothesis, reference, preprocess=preprocess
)
translation_length = len(enum_hypothesis)
reference_length = len(enum_reference)
matches, _, _ = _enum_allign_words(enum_hypothesis, enum_reference, stemmer=stemmer)
matches_count = len(matches)
try:
precision = float(matches_count) / translation_length
recall = float(matches_count) / reference_length
fmean = (precision * recall) / (alpha * precision + (1 - alpha) * recall)
chunk_count = float(_count_chunks(matches))
frag_frac = chunk_count / matches_count
except ZeroDivisionError:
return 0.0
penalty = gamma * frag_frac ** beta
return (1 - penalty) * fmean
def meteor_score(
references,
hypothesis,
preprocess=str.lower,
stemmer=PorterStemmer(),
wordnet=wordnet,
alpha=0.9,
beta=3,
gamma=0.5,
):
"""
Calculates METEOR score for hypothesis with multiple references as
described in "Meteor: An Automatic Metric for MT Evaluation with
HighLevels of Correlation with Human Judgments" by <NAME> and
<NAME>, in Proceedings of ACL.
http://www.cs.cmu.edu/~alavie/METEOR/pdf/Lavie-Agarwal-2007-METEOR.pdf
In case of multiple references the best score is chosen. This method
iterates over single_meteor_score and picks the best pair among all
the references for a given hypothesis
>>> hypothesis1 = 'It is a guide to action which ensures that the military always obeys the commands of the party'
>>> hypothesis2 = 'It is to insure the troops forever hearing the activity guidebook that party direct'
>>> reference1 = 'It is a guide to action that ensures that the military will forever heed Party commands'
>>> reference2 = 'It is the guiding principle which guarantees the military forces always being under the command of the Party'
>>> reference3 = 'It is the practical guide for the army always to heed the directions of the party'
>>> round(meteor_score([reference1, reference2, reference3], hypothesis1),4)
0.7398
If there is no words match during the alignment the method returns the
score as 0. We can safely return a zero instead of raising a
division by zero error as no match usually implies a bad translation.
>>> round(meteor_score(['this is a cat'], 'non matching hypothesis'),4)
0.0
:param references: reference sentences
:type references: list(str)
:param hypothesis: a hypothesis sentence
:type hypothesis: str
:param preprocess: preprocessing function (default str.lower)
:type preprocess: method
:param stemmer: nltk.stem.api.StemmerI object (default PorterStemmer())
:type stemmer: nltk.stem.api.StemmerI or any class that implements a stem method
:param wordnet: a wordnet corpus reader object (default nltk.corpus.wordnet)
:type wordnet: WordNetCorpusReader
:param alpha: parameter for controlling relative weights of precision and recall.
:type alpha: float
:param beta: parameter for controlling shape of penalty as a function
of as a function of fragmentation.
:type beta: float
:param gamma: relative weight assigned to fragmentation penality.
:type gamma: float
:return: The sentence-level METEOR score.
:rtype: float
"""
return max(
[
single_meteor_score(
reference,
hypothesis,
stemmer=stemmer,
wordnet=wordnet,
alpha=alpha,
beta=beta,
gamma=gamma,
)
for reference in references
]
)
# Creating a reverse dictionary word_tokenizer.sequences_to_texts(word_train_inputs)
reverse_word_map = dict(map(reversed, word_tokenizer.word_index.items()))
# Function takes a tokenized sentence and returns the words
def sequence_to_text(list_of_indices):
# Looking up words in dictionary
words = [reverse_word_map.get(letter) for letter in list_of_indices if letter != 0]
return(words)
# Creating texts
#list(map(sequence_to_text, word_train_inputs))
from sklearn.metrics import f1_score
from sklearn_crfsuite.metrics import flat_classification_report, flat_f1_score
import nltk
nltk.download('wordnet')
class SeqF1Callback(tf.keras.callbacks.Callback):
def __init__(self, model, inputs, targets, filename, patience):
self.model = model
self.inputs = inputs
self.targets = targets
self.filename = filename
self.patience = patience
self.best_score = 0
self.bad_epoch = 0
def on_epoch_end(self, epoch, logs):
pred = self.model.predict(self.inputs)
#print(pred.argmax(-1), self.targets)
score = flat_f1_score(self.targets, pred.argmax(-1), average='macro')
if score > self.best_score:
self.best_score = score
self.model.save_weights(self.filename)
print ("\nScore {}. Model saved in {}.".format(score, self.filename))
self.bad_epoch = 0
else:
print ("\nScore {}. Model not saved.".format(score))
self.bad_epoch += 1
if self.bad_epoch >= self.patience:
print ("\nEpoch {}: early stopping.".format(epoch))
self.model.stop_training = True
"""## Loss and metrics
Since the target sequences are | |
numpify(msg)
# roll = torch.full(sz, np.nan, dtype=torch.float32)
# pitch = torch.full(sz, np.nan, dtype=torch.float32)
# z = torch.full(sz, np.nan, dtype=torch.float32)
# for i, yaw in enumerate(range(0, 360, 45)):
# roll[i, ...] = torch.tensor(cloud['roll_%i' % yaw], dtype=torch.float32).reshape(sz[1:])
# pitch[i, ...] = torch.tensor(cloud['pitch_%i' % yaw], dtype=torch.float32).reshape(sz[1:])
# z[i, ...] = torch.tensor(cloud['z_%i' % yaw], dtype=torch.float32).reshape(sz[1:])
# rospy.loginfo(roll.shape)
# with self.rpz_lock:
# self.rpz_msg = msg
# self.roll = roll
# self.pitch = pitch
# self.z = z
# roll, pitch, z = msg_to_rpz_tensor(msg)
rpz, cloud = cloud_msg_to_rpz_tensor(msg, self.order)
cloud_to_grid = cloud_to_grid_transform(cloud)
grid_to_cloud = torch.inverse(cloud_to_grid)
rospy.logdebug('Map to grid:\n%s', cloud_to_grid)
rospy.logdebug('Grid to map:\n%s', grid_to_cloud)
with self.rpz_lock:
self.rpz_msg = msg
# self.roll = roll
# self.pitch = pitch
# self.z = z
self.rpz_cloud = cloud
# Yaw offset
# p0 = np.array([self.rpz_cloud['x'][0, 0], self.rpz_cloud['y'][0, 0]])
# p1 = np.array([self.rpz_cloud['x'][0, 1], self.rpz_cloud['y'][0, 1]])
# p0 = torch.tensor([self.rpz_cloud['x'][0, 0], self.rpz_cloud['y'][0, 0]])
# p1 = torch.tensor([self.rpz_cloud['x'][0, 1], self.rpz_cloud['y'][0, 1]])
# x = (p1 - p0).norm()
# yaw_offset = torch.atan2(self.rpz_cloud['y'][0, 1] - self.rpz_cloud['y'][0, 0],
# self.rpz_cloud['x'][0, 1] - self.rpz_cloud['x'][0, 0])
self.rpz_all = rpz
self.map_to_grid = cloud_to_grid
self.grid_to_map = grid_to_cloud
rospy.logdebug('RPZ processed and stored (%.3f s).', (timer() - t))
@timing
def get_available_cameras(self):
"""
Get available cameras:
camera-to-robot transforms, frustum planes and K-matrixes (intrinsics)
"""
def K_from_msg(msg):
k = torch.as_tensor(msg.K).view(3, 3)
K = torch.eye(4)
K[:3, :3] = k
return K
with self.cam_info_lock:
cam_to_robot = [tf for tf in self.cam_to_robot if tf is not None]
if not cam_to_robot:
return None, None, None
frustums = [f for f in self.cam_frustums if f is not None]
intrins = {'Ks': [K_from_msg(msg) for msg in self.cam_infos if msg is not None],
'hw': [torch.tensor([msg.height, msg.width]) for msg in self.cam_infos if msg is not None]}
assert len(cam_to_robot) == len(frustums)
cam_to_robot = torch.stack(cam_to_robot)
# n_cams, 4, 4
assert cam_to_robot.dim() == 3
assert cam_to_robot.shape[1:] == (4, 4)
frustums = torch.stack(frustums)
# n_cams, 4, n_planes
assert frustums.dim() == 3
assert frustums.shape[-2] == 4
intrins['Ks'] = torch.stack(intrins['Ks'])
# n_cams, 4, 4
assert intrins['Ks'].dim() == 3
assert intrins['Ks'].shape[-2] == 4
intrins['hw'] = torch.stack(intrins['hw'])
# n_cams, 2
assert intrins['hw'].dim() == 2
assert intrins['hw'].shape[-1] == 2
return cam_to_robot, frustums, intrins
def path_reward(self, xyzrpy, map, cam_to_robot, frustums, vis_cams=False):
assert isinstance(xyzrpy, torch.Tensor)
# (..., N, 6)
assert xyzrpy.shape[-1] == 6
assert xyzrpy.dim() >= 2
# n_cams, 4, n_planes
n_cams, _, n_planes = frustums.shape
assert cam_to_robot.shape == (n_cams, 4, 4)
assert xyzrpy.shape[1] == 6
n_poses = xyzrpy.shape[0]
t = timer()
cam_to_robot = cam_to_robot.to(self.device)
frustums = frustums.to(self.device)
map = map.to(self.device)
assert map.shape[0] == 4
n_pts = map.shape[-1]
xyzrpy = xyzrpy.to(self.device)
rospy.logdebug('Moving to %s: %.3f s', self.device, timer() - t)
# Keep map coordinates, convert to grid just for RPZ interpolation.
# Assume map-to-grid transform being 2D similarity with optional z offset.
# Optimize xy pairs, with yaw defined by xy steps.
# Allow start and/or end xy fixed.
# For interpolation we'll have: rpz(to_grid(xyyaw)).
# Optionally, offset z if needed.
# Prepare reward cloud for visualization.
reward_cloud = np.zeros((n_pts,), dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
('visibility', 'f4'), ('fov', 'f4'), ('distance', 'f4'),
('reward', 'f4')])
for i, f in enumerate(['x', 'y', 'z']):
reward_cloud[f] = map[i].detach().cpu().numpy()
# Get camera to map transforms.
t = timer()
robot_to_map = xyzrpy_matrix(xyzrpy)
assert robot_to_map.shape == (n_poses, 4, 4)
cam_to_map = robot_to_map[:, None].matmul(cam_to_robot[None])
assert cam_to_map.shape == (n_poses, n_cams, 4, 4)
map_to_cam = isometry_inverse(cam_to_map)
assert map_to_cam.shape == (n_poses, n_cams, 4, 4)
rospy.logdebug('Camera to map transforms: %.3f s', timer() - t)
# Visibility / occlusion mask.
t = timer()
vis_mask = compute_vis_mask(map, cam_to_map, param=0.01)
with torch.no_grad():
reward_cloud['visibility'] = reduce_rewards(vis_mask).detach().cpu().numpy()
rospy.logdebug('Point visibility computation took: %.3f s.', timer() - t)
# compute smooth version of FOV mask
# fov_mask = compute_fov_mask_smooth(map, intrins, map_to_cam)
fov_mask = compute_fov_mask(map, frustums, map_to_cam)
assert fov_mask.shape == (n_poses, n_cams, n_pts)
with torch.no_grad():
reward_cloud['fov'] = reduce_rewards(fov_mask).detach().cpu().numpy()
# Compute point to sensor distances.
# TODO: Optimize, avoid exhaustive distance computation.
dist_mask = compute_dist_mask(map, cam_to_map)
with torch.no_grad():
reward_cloud['distance'] = reduce_rewards(dist_mask).detach().cpu().numpy()
# Compute rewards
rewards = vis_mask * fov_mask * dist_mask
assert rewards.shape == (n_poses, n_cams, n_pts)
# share and sum rewards over multiple sensors and view points
# rewards = log_odds_conversion(rewards)
# instead of log odds: max of rewards over all sensors and wps poses
rewards = reduce_rewards(rewards)
reward = rewards.sum()
assert isinstance(reward, torch.Tensor)
assert reward.shape == ()
# reward cloud for visualization
reward_cloud['reward'] = rewards.detach().cpu().numpy()
# Visualize cameras (first, middle and last).
if vis_cams:
t = timer()
self.visualize_cams(robot_to_map[0].detach(), id=0)
self.visualize_cams(robot_to_map[n_poses // 2].detach(), id=1)
self.visualize_cams(robot_to_map[-1].detach(), id=2)
rospy.logdebug('Cameras visualized for %i poses (%.3f s).', 3, timer() - t)
return reward, reward_cloud
def path_cost(self, xyzrpy):
assert isinstance(xyzrpy, torch.Tensor)
# (..., N, 6)
assert xyzrpy.shape[-1] == 6
assert xyzrpy.dim() >= 2
# Distance cost.
# xy_diff = torch.diff(xyzrpy[..., :2], dim=-2)
xy_diff = xyzrpy[..., 1:, :2] - xyzrpy[..., :-1, :2]
edges = xy_diff.norm(dim=-1, keepdim=True)
dist_cost = edges.sum() / self.linear_speed
# rospy.logdebug('Distance cost: %.1f s.', dist_cost.item())
# Turning cost.
# yaw_diff = torch.diff(xyzrpy[..., -1:], dim=-2).abs()
yaw_diff = (xyzrpy[..., 1:, -1:] - xyzrpy[..., :-1, -1:]).abs()
# yaw_diff = torch.remainder(torch.diff(xyzrpy[..., -1:], dim=-2), 2 * np.pi)
yaw_diff = torch.min(yaw_diff, 2 * np.pi - yaw_diff)
turn_cost = yaw_diff.sum() / self.angular_speed
# rospy.logdebug('Turning cost: %.1f s.', turn_cost.item())
# Traversability cost, penalty for roll and pitch.
# TODO: Convert to time cost using control parameters.
rp = xyzrpy[..., 1:, 3:5]
# Use edge lenghts to scale roll and pitch penalties.
rp = rp * edges
# trav_cost = rp.abs().mean()
trav_cost = (rp.abs().sum(dim=0) / torch.tensor([self.max_roll, self.max_pitch]).to(xyzrpy.device)).sum()
return dist_cost, turn_cost, trav_cost
@timing
def path_received(self, msg):
assert isinstance(msg, Path)
# # Discard old messages.
# age = (rospy.Time.now() - msg.header.stamp).to_sec()
# if age > self.max_age:
# rospy.logwarn('Discarding path %.1f s > %.1f s old.', age, self.max_age)
# return
# Subsample input path to reduce computation.
if self.path_step > 1:
msg.poses = msg.poses[::self.path_step]
# Check compatibility of path and map frames.
if not msg.header.frame_id:
rospy.logwarn_once('Map frame %s will be used instead of empty path frame.',
self.map_frame)
msg.header.frame_id = self.map_frame
elif not self.map_frame:
self.map_frame = msg.header.frame_id
elif self.map_frame and msg.header.frame_id != self.map_frame:
rospy.logwarn_once('Map frame %s will be used instead of path frame %s.',
self.map_frame, msg.header.frame_id)
with self.path_lock:
self.path_msg = msg
self.path_xyzrpy = path_msg_to_xyzrpy(self.path_msg)
# Get RPZ subspace and map for optimization.
with self.rpz_lock:
if self.rpz_msg is None:
rospy.logwarn('Skipping path. RPZ cloud not yet received.')
return
assert isinstance(self.rpz_msg, PointCloud2)
assert self.rpz_msg.header.frame_id == self.path_msg.header.frame_id
rpz_all = self.rpz_all
map_to_grid = self.map_to_grid
grid_to_map = self.grid_to_map
assert map_to_grid.shape == (3, 3)
assert grid_to_map.shape == (3, 3)
# rospy.loginfo('Map to grid:\n%s', map_to_grid.detach().numpy())
rospy.logdebug('Grid to map:\n%s', grid_to_map.detach().numpy())
assert self.path_xyzrpy.dim() == 2
assert self.path_xyzrpy.shape[1] == 6
n_poses = self.path_xyzrpy.shape[0]
# yaw = xy_to_azimuth(xy)
xy = self.path_xyzrpy[:, :2]
yaw_tail = xy_to_azimuth(xy[1:, :] - xy[:-1, :])
# TODO: Add starting yaw.
yaw = torch.cat((yaw_tail[:1, :], yaw_tail), dim=-2)
# rospy.loginfo('Yaw:\n%s', np.degrees(yaw.detach().numpy()))
if self.order == DimOrder.X_Y_YAW:
xyyaw = torch.cat((xy, yaw), dim=-1)
elif self.order == DimOrder.YAW_X_Y:
xyyaw = torch.cat((yaw, xy), dim=-1)
assert xyyaw.shape == (n_poses, 3)
xyyaw_grid = transform_xyyaw_tensor(map_to_grid, xyyaw, order=self.order)
rpz = interpolate_rpz(rpz_all, xyyaw_grid, order=self.order)
# Transform z?
assert rpz.shape == (n_poses, 3)
assert xyyaw.shape == (n_poses, 3)
if self.order == DimOrder.X_Y_YAW:
# Fix yaw from XY.
self.path_xyzrpy = torch.cat((xyyaw[:, :2], rpz[:, 2:], rpz[:, :2], xyyaw[:, 2:]), dim=-1)
elif self.order == DimOrder.YAW_X_Y:
self.path_xyzrpy = torch.cat((xyyaw[:, 1:], rpz[:, 2:], rpz[:, :2], xyyaw[:, :1]), dim=-1)
assert self.path_xyzrpy.dim() == 2
assert self.path_xyzrpy.shape[1] == 6
def run(self, event):
with self.path_lock:
if self.path_msg is None:
rospy.logwarn('Path is not yet received.')
return
with self.map_lock:
if self.map_msg is None:
rospy.logwarn('Map cloud is not yet received.')
return
assert isinstance(self.map_msg, PointCloud2)
assert self.map_msg.header.frame_id == self.path_msg.header.frame_id
# Get frustums and intrinsics of available cameras.
cam_to_robot, frustums, _ = self.get_available_cameras()
if cam_to_robot is None:
rospy.logwarn('No cameras available.')
return
map = self.map
path_xyzrpy = self.path_xyzrpy
path_msg = self.path_msg
if torch.isnan(path_xyzrpy).any():
rospy.logwarn("Path contains NANs. Evaluation will not be performed this time")
return
xyzrpy = self.get_robot_xyzrpy(path_msg.header.frame_id)
if xyzrpy is None:
rospy.logwarn('Unable to find robot pose on the map')
return
# construct actual path with ~the same dists between waypoints
prev_pose = xyzrpy[:3]
actual_path_xyzrpy = [xyzrpy.unsqueeze(0)]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.