input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#
# Methods comonly shared by the tool scripts
#
import codecs
import operator # only for sortedDict()
import sys # only for raw_encoded_input()
import locale # only for raw_encoded_input()
import json # for loadJsonConfig
def loadJsonConfig(filename='config.json'):
'''
Load and return json config file as a dict.
Looks in local directory first.
If file isn't there then looks in user directory.
If file is in neither location then error is raised
'''
try:
f = open(filename, 'r')
config = json.load(f)
f.close()
except IOError, e:
if e.errno == 2: # file not found
import os
path = os.getenv("HOME")
f = open(os.path.join(path, filename), 'r')
config = json.load(f)
f.close()
else:
raise
return config
def openFile(filename):
'''opens a given file (utf-8) and returns the lines'''
fin = codecs.open(filename, 'r', 'utf8')
txt = fin.read()
fin.close()
lines = txt.split('\n')
lines.pop()
return lines
def sortedDict(ddict):
'''turns a dict into a sorted list'''
sorted_ddict = sorted(ddict.iteritems(),
key=operator.itemgetter(1),
reverse=True)
return sorted_ddict
def is_number(s):
try:
float(s)
return True
except (ValueError, TypeError):
return False
def is_int(s):
try:
int(s)
return True
except (ValueError, TypeError):
return False
def is_iso_date(s):
'''
Checks if a string is a valid YYYY-MM-DD date
NOTE: Does not validate if length of month is < 31
'''
if not isinstance(s, (str, unicode)):
return False
if not len(s) == len('YYYY-MM-DD'):
return False
if not s[4:5] == '-' and s[7:8] == '-':
return False
y, m, d = s[:4], s[5:7], s[8:]
if not is_int(y):
return False
if not (is_int(m) and int(m) in range(1, 12 + 1)):
return False
if not (is_int(d) and int(d) in range(1, 31 + 1)):
return False
return True
def raw_encoded_input(txt):
'''query for input and deal with the encoding, whatever it is'''
return raw_input(txt).decode(sys.stdin.encoding or
locale.getpreferredencoding(True))
def list_diff(a, b):
'''subtract list 2 from list 1'''
b = set(b)
return [aa for aa in a if aa not in b]
def extractName(entry):
'''
If field includes square brackets then this ignores any part of
name field which lies outside
If field contains semicolons then treats these as separate objects
'''
if u'[' in entry:
pos1 = entry.find(u'[')
pos2 = entry.find(u']')
entry = entry[pos1 + 1:pos2]
return entry.split(u';')
def extractNameParts(name):
'''Tries to separate a name into first and second name.'''
# Algorithm. If one "," in names then asume lName, fName.
# Else asume last name is the last word of the name.
# Plain name returned if all else fails
if u',' in name:
parts = name.split(u',')
if len(parts) == 2:
return u'%s;%s' % (parts[1].strip(), parts[0].strip())
if u' ' in name:
parts = name.split(u' ')
lName = parts[-1].strip()
fName = name[:-len(lName)].strip()
return u'%s;%s' % (fName, lName)
return name
def findUnit(contents, start, end, brackets=None):
'''
Method for isolating an object in a string. Will not work with
either start or end using the ¤ symbol
@input:
* content: the string to look at
* start: the substring indicateing the start of the object
* end: the substring indicating the end of the object
if end is not found then the rest of the string is returned
if explicitly set to None then it is assumed that start-string
also marks the end of an object. In this case the end-string
is returned as part of the remainder
* brackets: a dict of brackets used which must match within the object
@output:
the-object, the-remainder-of-the-string, lead-in-to-object
OR None,None if an error
OR '','' if no object is found
'''
if start in contents:
# If end is left blank
if end is None:
noEnd = True
end = start
else:
noEnd = False
uStart = contents.find(start) + len(start)
uEnd = contents.find(end, uStart)
if uEnd < 0: # process till end of string
uEnd = None
if brackets:
for bStart, bEnd in brackets.iteritems():
dummy = u'¤' * len(bEnd)
diff = contents[uStart:uEnd].count(bStart) - contents[uStart:uEnd].count(bEnd)
if diff < 0:
print 'Negative bracket missmatch for: %s <--> %s' % (bStart, bEnd)
return None, None, None
# two cases either end is one of these brackets or not
if end in bEnd: # end is part of endBracket
i = 0
while(diff > 0):
i += 1
uEnd = contents.replace(bEnd, dummy, i).find(end, uStart)
if uEnd < 0:
print 'Positive (final) bracket missmatch for: %s <--> %s' % (bStart, bEnd)
return None, None, None
diff = contents[uStart:uEnd].count(bStart) - contents[uStart:uEnd].count(bEnd)
else: # end is different from endBracket (e.g. a '|')
i = 0
while(diff > 0):
i += 1
uEnd = contents.find(end, uEnd + len(end))
if uEnd < 0:
diff = contents[uStart:].count(bStart) - contents[uStart:].count(bEnd)
if diff > 0:
print 'Positive (final) bracket missmatch for: %s <--> %s' % (bStart, bEnd)
return None, None, None
else:
diff = contents[uStart:uEnd].count(bStart) - contents[uStart:uEnd].count(bEnd)
unit = contents[uStart:uEnd]
lead_in = contents[:uStart - len(start)]
if uEnd: # i.e. if not until end of string
if noEnd:
remainder = contents[uEnd:]
else:
remainder = contents[uEnd + len(end):]
else:
remainder = ''
return (unit, remainder, lead_in)
else:
return '', '', ''
def extractLink(text, kill_tags=False):
'''
Given wikitiext this checks for the first wikilink
Limitations: Only identifies the first wikilink
kill_tags also strips out (but doesn't keep) and tags
(i.e. <bla> something </bla>)
@output: (plain_text, link)
'''
if kill_tags:
while '<' in text and '>' in text:
tag, dummy, dummy = findUnit(text, u'<', u'>')
endtag = u'</' + tag + u'>'
tag = u'<' + tag + u'>'
if endtag in text:
dummy, remainder, lead_in = findUnit(text, tag, endtag)
else:
dummy, remainder, lead_in = findUnit(text, u'<', u'>')
text = lead_in.strip() + ' ' + remainder.strip()
if u'[[' not in text:
return (text.strip(), '')
interior, dummy, dummy = findUnit(text, u'[[', u']]')
wikilink = u'[[' + interior + u']]'
pos = text.find(wikilink)
pre = text[:pos]
post = text[pos + len(wikilink):]
center = ''
link = ''
# determine which type of link we are dealing with see meta:Help:Links#Wikilinks for details
if u'|' not in interior: # [[ab]] -> ('ab', 'ab')
center = interior
link = interior.strip()
else:
pos = interior.find(u'|')
link = interior[:pos]
center = interior[pos + 1:]
if len(link) == 0: # [[|ab]] -> ('ab', 'ab')
link = center
elif len(center) > 0: # [[a|b]] -> ('b', 'a')
pass
else:
center = link
if u':' in center: # [[a:b|]] -> ('b', 'a:b')
center = center[center.find(u':') + 1:]
if u', ' in center: # [[a, b|]] -> ('a', 'a, b')
center = center.split(u', ')[0]
if u'(' in center: # [[a (b)|]] -> ('a', 'a (b)')
pos = center.find(u'(')
if u')' in center[pos:]:
center = center[:pos]
if center.endswith(' '): # the first space separating text and bracket is ignored
center = center[:-1]
return ((pre + center + post).strip(), link.strip())
def extractAllLinks(text, kill_tags=False):
'''
Given wikitext this checks for any wikilinks
@output: (plain_text, list of link)
'''
wikilinks = []
text, link = extractLink(text, kill_tags=kill_tags)
while link:
wikilinks.append(link)
text, link = extractLink(text, kill_tags=kill_tags)
return text, wikilinks
def latLonFromCoord(coord):
'''
returns lat, lon as decimals based on string using the Coord-template
Does not deal with {{coord|12.123|12.123|...}}
@output (lat,lon) as float
'''
prefixes = (u'{{coord|', u'{{coord |', u'{{coor|', u'{{coor |', u'{{location|', u'{{location |')
if not coord.lower().startswith(prefixes):
print 'incorrectly formated coordinate (prefix): %s' % coord
return None
p = coord.split('|')
if is_number(p[1]): #
try:
if is_number(p[2].rstrip('}')): #
if len(p) == 3: # {{coord|12.123|12.123}} implicitly N, E
lat = float(p[1].strip())
lon = float(p[2].strip().rstrip('}'))
lat_sign = u'N'
lon_sign = u'E'
elif is_number(p[3]): # {{coord|12|12|12.123|N|12|12|12.123|E|...}}
lat_d, lat_m, lat_s, lat_sign = float(p[1].strip()), float(p[2].strip()), float(p[3].strip()), p[4].strip().rstrip('}')
lon_d, lon_m, lon_s, lon_sign = float(p[5].strip()), float(p[6].strip()), float(p[7].strip()), p[8].strip().rstrip('}')
lat = lat_d + lat_m / 60 + lat_s / 3600
lon = lon_d + lon_m / 60 + lon_s / 3600
else: # {{coord|12|12.123|N|12|12.123|E|...}}
lat_d, lat_m, lat_sign = float(p[1].strip()), float(p[2].strip()), p[3].strip().rstrip('}')
lon_d, lon_m, lon_sign = float(p[4].strip()), float(p[5].strip()), p[6].strip().rstrip('}')
lat = lat_d + lat_m / 60
lon = lon_d + lon_m / 60
else: # {{coord|12.123|N|12.123|E|...}}
lat, lat_sign = float(p[1].strip()), p[2].strip().rstrip('}')
lon, lon_sign = float(p[3].strip()), p[4].strip().rstrip('}')
if lat_sign == u'N':
lat_sign = 1
elif lat_sign == u'S':
lat_sign = -1
else:
print 'incorrectly formated | |
= 1269
_strings['device name'] = 1271
_strings['use password protection'] = 1272
_strings['airplay'] = 1273
_strings['airtunes'] = 1274
_strings['filter %s'] = 1275
_strings['custom audio device'] = 1300
_strings['custom passthrough device'] = 1301
_strings['temperature'] = 1375
_strings['pressure'] = 1376
_strings['proximity'] = 1377
_strings['intensity'] = 1378
_strings['ragged'] = 1379
_strings['very'] = 1380
_strings['extreme'] = 1381
_strings['whirls'] = 1382
_strings['broken'] = 1384
_strings['tornado'] = 1385
_strings['tropical'] = 1386
_strings['hurricane'] = 1387
_strings['cold'] = 1388
_strings['windy'] = 1389
_strings['settings'] = 1390
_strings['breeze'] = 1391
_strings['gentle'] = 1392
_strings['high wind, near gale'] = 1393
_strings['severe'] = 1394
_strings['violent'] = 1395
_strings['drifting'] = 1396
_strings['and'] = 1397
_strings['freezing'] = 1398
_strings['late'] = 1399
_strings['isolated'] = 1400
_strings['thundershowers'] = 1401
_strings['thunder'] = 1402
_strings['sun'] = 1403
_strings['heavy'] = 1404
_strings['in'] = 1405
_strings['the'] = 1406
_strings['vicinity'] = 1407
_strings['ice'] = 1408
_strings['crystals'] = 1409
_strings['calm'] = 1410
_strings['with'] = 1411
_strings['windy'] = 1412
_strings['patches'] = 1413
_strings['thunderstorm'] = 1414
_strings['drizzle'] = 1415
_strings['foggy'] = 1416
_strings['grains'] = 1417
_strings['thunderstorms'] = 1418
_strings['shallow'] = 1419
_strings['moderate'] = 1420
_strings['windy'] = 1422
_strings['mist'] = 1423
_strings['overcast'] = 1424
_strings['pellets'] = 1425
_strings['hail'] = 1426
_strings['smoke'] = 1427
_strings['volcanic'] = 1428
_strings['ash'] = 1429
_strings['widespread'] = 1430
_strings['dust'] = 1431
_strings['sand'] = 1432
_strings['spray'] = 1433
_strings['whirls'] = 1434
_strings['sandstorm'] = 1435
_strings['blowing'] = 1436
_strings['pellet'] = 1437
_strings['small'] = 1438
_strings['and'] = 1439
_strings['sleet'] = 1440
_strings['with'] = 1441
_strings['chance'] = 1442
_strings['of'] = 1443
_strings['funnel'] = 1444
_strings['cloud'] = 1445
_strings['unknown'] = 1446
_strings['precipitation'] = 1448
_strings['partial'] = 1449
_strings['put display to sleep when idle'] = 1450
_strings['runtime'] = 2050
_strings['empty list'] = 2080
_strings['went back to parent list because the active list has been emptied'] = 2081
_strings['home'] = 10000
_strings['programs'] = 10001
_strings['pictures'] = 10002
_strings['file manager'] = 10003
_strings['settings'] = 10004
_strings['music'] = 10005
_strings['videos'] = 10006
_strings['system information'] = 10007
_strings['settings - general'] = 10008
_strings['settings - screen'] = 10009
_strings['settings - pictures'] = 10012
_strings['settings - programs'] = 10013
_strings['settings - weather'] = 10014
_strings['settings - music'] = 10015
_strings['settings - system'] = 10016
_strings['settings - videos'] = 10017
_strings['settings - network'] = 10018
_strings['settings - appearance'] = 10019
_strings['scripts'] = 10020
_strings['videos'] = 10025
_strings['login screen'] = 10029
_strings['settings - profiles'] = 10034
_strings['reset'] = 10035
_strings['basic'] = 10036
_strings['standard'] = 10037
_strings['advanced'] = 10038
_strings['expert'] = 10039
_strings['add-on browser'] = 10040
_strings['reset above settings to default'] = 10041
_strings['are you sure you want to reset the settings in this category?'] = 10042
_strings['help'] = 10043
_strings['no help available'] = 10044
_strings['resets all the visible settings to their default values.'] = 10045
_strings['no categories available'] = 10046
_strings['try changing the setting level to see additional categories and settings.'] = 10047
_strings['progress dialogue'] = 10101
_strings['file browser'] = 10126
_strings['network setup'] = 10128
_strings['media source'] = 10129
_strings['profile settings'] = 10130
_strings['lock settings'] = 10131
_strings['content settings'] = 10132
_strings['favourites'] = 10134
_strings['smart playlist editor'] = 10136
_strings['smart playlist rule editor'] = 10137
_strings['add-on settings'] = 10140
_strings['looking for subtitles...'] = 10210
_strings['looking for or caching subtitles...'] = 10211
_strings['terminating'] = 10212
_strings['buffering'] = 10213
_strings['opening stream'] = 10214
_strings['playlist editor'] = 10503
_strings['top 100 songs'] = 10504
_strings['top 100 albums'] = 10505
_strings['programs'] = 10506
_strings['configuration'] = 10507
_strings['weather forecast'] = 10508
_strings['network gaming'] = 10509
_strings['extensions'] = 10510
_strings['system info'] = 10511
_strings['music - library'] = 10516
_strings['select dialogue'] = 12000
_strings['dialogue ok'] = 12002
_strings['fullscreen video'] = 12005
_strings['audio visualisation'] = 12006
_strings['file stacking dialogue'] = 12008
_strings['rebuild index...'] = 12009
_strings['return to music window'] = 12010
_strings['return to videos window'] = 12011
_strings['resume from %s'] = 12022
_strings['0'] = 12310
_strings['1'] = 12311
_strings['2'] = 12312
_strings['3'] = 12313
_strings['4'] = 12314
_strings['5'] = 12315
_strings['6'] = 12316
_strings['7'] = 12317
_strings['8'] = 12318
_strings['9'] = 12319
_strings['c'] = 12320
_strings['*'] = 12322
_strings['locked! enter code...'] = 12325
_strings['enter password'] = <PASSWORD>
_strings['enter master code'] = 12327
_strings['enter unlock code'] = 12328
_strings['enter gamepad button combo and'] = 12330
_strings['set lock'] = 12332
_strings['unlock'] = 12333
_strings['reset lock'] = 12334
_strings['remove lock'] = 12335
_strings['numeric password'] = <PASSWORD>
_strings['gamepad button combo'] = 12338
_strings['full-text password'] = <PASSWORD>
_strings['enter new password'] = <PASSWORD>
_strings['incorrect password,'] = <PASSWORD>
_strings['retries left '] = 12343
_strings['passwords entered did not match.'] = 12344
_strings['access denied'] = 12345
_strings['password retry limit exceeded.'] = 12346
_strings['the system will now power down.'] = 12347
_strings['item locked'] = 12348
_strings['reactivate lock'] = 12353
_strings['change lock'] = 12356
_strings['source lock'] = 12357
_strings['password entry was blank. try again.'] = 12358
_strings['master lock'] = 12360
_strings['settings & file manager'] = 12373
_strings['amount of time to display each image'] = 12378
_strings['use pan and zoom effects'] = 12379
_strings['system uptime'] = 12390
_strings['minutes'] = 12391
_strings['hours'] = 12392
_strings['days'] = 12393
_strings['total uptime'] = 12394
_strings['battery level'] = 12395
_strings['weather'] = 12600
_strings['screensaver'] = 12900
_strings['fullscreen osd'] = 12901
_strings['system'] = 13000
_strings['video only'] = 13002
_strings['- delay'] = 13003
_strings['- minimum file duration'] = 13004
_strings['shutdown'] = 13005
_strings['shutdown function'] = 13008
_strings['quit'] = 13009
_strings['hibernate'] = 13010
_strings['suspend'] = 13011
_strings['exit'] = 13012
_strings['reboot'] = 13013
_strings['minimise'] = 13014
_strings['power button action'] = 13015
_strings['inhibit idle shutdown'] = 13017
_strings['allow idle shutdown'] = 13018
_strings['is another session active, perhaps over ssh?'] = 13020
_strings['joystick plugged'] = 13024
_strings['joystick unplugged'] = 13025
_strings['waiting for network to connect...'] = 13028
_strings['waiting for server to wake up...'] = 13030
_strings['extended wait for server to wake up...'] = 13031
_strings['waiting for services to launch...'] = 13032
_strings['updated for %s'] = 13034
_strings['found for %s'] = 13035
_strings['failed for %s'] = 13036
_strings['running low on battery'] = 13050
_strings['flicker filter'] = 13100
_strings['let driver choose (requires restart)'] = 13101
_strings['disabled'] = 13106
_strings['enabled during video playback'] = 13107
_strings['always enabled'] = 13108
_strings['would you like to keep this change?'] = 13111
_strings['high quality upscaling'] = 13112
_strings['disabled'] = 13113
_strings['enabled for sd content'] = 13114
_strings['always enabled'] = 13115
_strings['upscaling method'] = 13116
_strings['bicubic'] = 13117
_strings['lanczos'] = 13118
_strings['sinc'] = 13119
_strings['vdpau'] = 13120
_strings['keep skin?'] = 13123
_strings['blank other displays'] = 13130
_strings['disabled'] = 13131
_strings['blank displays'] = 13132
_strings['active connections detected!'] = 13140
_strings['change apple remote mode?'] = 13144
_strings['subnet mask'] = 13159
_strings['gateway'] = 13160
_strings['primary dns'] = 13161
_strings['initialise failed'] = 13162
_strings['never'] = 13170
_strings['immediately'] = 13171
_strings['after %i secs'] = 13172
_strings['hdd install date:'] = 13173
_strings['hdd power cycle count:'] = 13174
_strings['profiles'] = 13200
_strings['delete profile "%s"?'] = 13201
_strings['last loaded profile:'] = 13204
_strings['unknown'] = 13205
_strings['overwrite'] = 13206
_strings['alarm clock'] = 13208
_strings['alarm clock interval (in minutes)'] = 13209
_strings['started, alarm in %im'] = 13210
_strings['alarm!'] = 13211
_strings['cancelled with %im%is left'] = 13212
_strings['%2.0fm'] = 13213
_strings['%2.0fs'] = 13214
_strings['search for subtitles in rars'] = 13249
_strings['browse for subtitle...'] = 13250
_strings['move item'] = 13251
_strings['move item here'] = 13252
_strings['cancel move'] = 13253
_strings['hardware:'] = 13270
_strings['connected, but no dns is available.'] = 13274
_strings['dvd-rom'] = 13276
_strings['storage'] = 13277
_strings['default'] = 13278
_strings['network'] = 13279
_strings['video'] = 13280
_strings['hardware'] = 13281
_strings['operating system:'] = 13283
_strings['cpu speed:'] = 13284
_strings['video encoder:'] = 13286
_strings['screen resolution:'] = 13287
_strings['a/v cable:'] = 13292
_strings['dvd region:'] = 13294
_strings['internet:'] = 13295
_strings['connected'] = 13296
_strings['not connected. check network settings.'] = 13297
_strings['target temperature'] = 13299
_strings['fan speed'] = 13300
_strings['auto temperature control'] = 13301
_strings['fan speed override'] = 13302
_strings['fonts'] = 13303
_strings['enable flipping bi-directional strings'] = 13304
_strings['show rss news feeds'] = 13305
_strings['show parent folder items'] = 13306
_strings['track naming template'] = 13307
_strings['zoom effect'] = 13310
_strings['float effect'] = 13311
_strings['black bar reduction'] = 13312
_strings['restart'] = 13313
_strings['crossfade between songs'] = 13314
_strings['regenerate thumbnails'] = 13315
_strings['recursive thumbnails'] = 13316
_strings['view slideshow'] = 13317
_strings['recursive slideshow'] = 13318
_strings['randomise'] = 13319
_strings['stereo'] = 13320
_strings['left only'] = 13321
_strings['right only'] = 13322
_strings['background transparency'] = 13324
_strings['foreground transparency'] = 13325
_strings['a/v delay'] = 13326
_strings['%s not found'] = 13328
_strings['error opening %s'] = 13329
_strings['unable to load %s'] = 13330
_strings['error: out of memory'] = 13331
_strings['move up'] = 13332
_strings['move down'] = 13333
_strings['edit label'] = 13334
_strings['make default'] = 13335
_strings['remove button'] = 13336
_strings['leave as is'] = 13340
_strings['green'] = 13341
_strings['orange'] = 13342
_strings['red'] = 13343
_strings['cycle'] = 13344
_strings['switch led off on playback'] = 13345
_strings['movie information'] = 13346
_strings['queue item'] = 13347
_strings['search imdb...'] = 13348
_strings['scan for new content'] = 13349
_strings['current playlist'] = 13350
_strings['album information'] = 13351
_strings['scan item to library'] = 13352
_strings['stop scanning'] = 13353
_strings['render method'] = 13354
_strings['low quality pixel shader'] = 13355
_strings['hardware overlays'] = 13356
_strings['high quality pixel shader'] = 13357
_strings['play item'] = 13358
_strings['set artist thumb'] = 13359
_strings['automatically generate thumbnails'] = 13360
_strings['enable voice'] = 13361
_strings['enable device'] = 13375
_strings['volume'] = 13376
_strings['default view mode'] = 13377
_strings['default brightness'] = 13378
_strings['default contrast'] = 13379
_strings['default gamma'] = 13380
_strings['resume video'] = 13381
_strings['voice mask - port 1'] = 13382
_strings['voice mask - port 2'] = 13383
_strings['voice mask - port 3'] = 13384
_strings['voice mask - port 4'] = 13385
_strings['use time based seeking'] = 13386
_strings['track naming template - right'] = 13387
_strings['preset'] = 13388
_strings['there are no presets available\nfor this visualisation'] = 13389
_strings['there are no settings available\nfor this visualisation'] = 13390
_strings['use visualisation if playing audio'] = 13392
_strings['calculate size'] = 13393
_strings['calculating folder size'] = 13394
_strings['video settings'] = 13395
_strings['audio and subtitle settings'] = 13396
_strings['enable subtitles'] = 13397
_strings['shortcuts'] = 13398
_strings['crossfade between songs on the same album'] = 13400
_strings['browse for %s'] = 13401
_strings['show track position'] = 13402
_strings['clear default'] = 13403
_strings['resume'] = 13404
_strings['get thumb'] = 13405
_strings['picture information'] = 13406
_strings['%s presets'] = 13407
_strings['(imdb user rating)'] = 13408
_strings['top 250'] = 13409
_strings['tune in on last.fm'] = 13410
_strings['minimum fan speed'] = 13411
_strings['play from here'] = 13412
_strings['downloading'] = 13413
_strings['render method'] = 13415
_strings['auto detect'] = 13416
_strings['basic shaders (arb)'] = 13417
_strings['advanced shaders (glsl)'] = 13418
_strings['software'] = 13419
_strings['remove safely'] = 13420
_strings['vdpau'] = 13421
_strings['start slideshow here'] | |
import os
from os.path import join as pjoin
import scipy.io as sio
from itertools import permutations, product
from torch.utils.data import DataLoader, IterableDataset
from helpers.preprocessing import *
import itertools
class AudioDataset(IterableDataset):
def __init__(self, data_dir, rir_path, is_train=True, train_ratio=0.8, perm_skip=100, seg_len=100, seed=2021):
super(AudioDataset).__init__()
self.is_train = is_train
self.seg_len = seg_len
self.rng = np.random.default_rng(seed)
self.perm_skip = perm_skip
# Find wavs and load rirs
self.wav_list = [pjoin(root, file) for root, dirs, files in os.walk(data_dir) for file in files
if file.endswith('.wav')]
rirs_mat = sio.loadmat(rir_path)
self.H = rirs_mat['H']
self.zone_dict = rirs_mat['zone_dict']
# Split train and validation
train_len = int(np.ceil(train_ratio * len(self.wav_list)))
self.wav_list = self.wav_list[:train_len] if is_train else self.wav_list[train_len:]
train_len = int(np.ceil(train_ratio * self.H.shape[3]))
self.H = self.H[:, :, :, :train_len] if is_train else self.H[:, :, :, train_len:]
self.zone_dict = self.zone_dict[:train_len, :] if is_train else self.zone_dict[train_len:, :]
# Generate Permutations
self.perm = permutations(list(range(len(self.wav_list))), self.H.shape[2])
# Initialize generator
self.mini_batch_gen = TrainMiniBatchGenerator(self.wav_list, self.perm, self.H,
self.zone_dict, self.rng, self.perm_skip, self.seg_len)
def __iter__(self):
return iter(self.mini_batch_gen)
class TrainMiniBatchGenerator:
def __init__(self, wav_list, perm, H, zone_dict, rng, perm_skip=100, seg_len=100):
self.wav_list = wav_list
self.perm = iter(perm)
self.H = H
self.zone_dict = zone_dict
self.seg_len = seg_len
self.rng = rng
self.perm_skip = perm_skip
self.data = 0
self.tags = 0
def __iter__(self):
self.next_file()
self.seg_idx = 0
self.seg_num = self.data.shape[1] // self.seg_len
return self
def __next__(self):
"""
return data, tag
fixed data\tag size!
cycle the end of data from the start
"""
if self.seg_idx >= self.seg_num:
self.next_file()
self.seg_idx = 0
self.seg_num = self.data.shape[1] // self.seg_len
seg_start = self.seg_idx * self.seg_len
seg_end = (self.seg_idx + 1) * self.seg_len
self.seg_idx += 1
return (np.moveaxis(self.data[:, seg_start:seg_end, :], -1, 0),
self.tags[:, seg_start:seg_end])
def next_file(self):
# choose files and rirs
rand_next(self.perm, self.perm_skip, self.rng)
cur_sample_idx = next(self.perm)
rir_idx = self.rng.integers(low=0, high=self.H.shape[3])
# load files
speakers = load_example(cur_sample_idx, self.wav_list, self.H, rir_idx)
# create tags and data
self.tags = create_tags(speakers, self.zone_dict[rir_idx, :])
self.data = create_example(awgn(speakers))
# loop back to fill seg len
remainder = self.data.shape[1] % self.seg_len
if remainder > 0:
cycle_len = self.seg_len - remainder
self.data = np.concatenate((self.data, self.data[:, :cycle_len, :]), axis=1)
self.tags = np.concatenate((self.tags, self.tags[:, :cycle_len]), axis=1)
class TestAudioDataset(IterableDataset):
def __init__(self, data_dir, rir_path, perm_skip=100, seg_len=100, seed=2022):
super(TestAudioDataset).__init__()
self.seg_len = seg_len
self.rng = np.random.default_rng(seed)
self.perm_skip = perm_skip
self.wav_list = [pjoin(root, file) for root, dirs, files in os.walk(data_dir) for file in files
if file.endswith('.wav')]
self.file_names = [os.path.basename(file) for file in self.wav_list]
rirs_mat = sio.loadmat(rir_path)
self.H = rirs_mat['H']
self.zone_dict = rirs_mat['zone_dict']
self.perm = permutations(list(range(len(self.wav_list))), self.H.shape[2])
self.mini_batch_gen = TestMiniBatchGenerator(self.wav_list, self.file_names, self.perm, self.H,
self.zone_dict, self.rng, self.perm_skip, self.seg_len)
def __iter__(self):
return iter(self.mini_batch_gen)
class TestMiniBatchGenerator:
"""
return data, file_name, EOF,
and use with batch_size = 1 or None (whatever work)
this way the data can vary in length
"""
def __init__(self, wav_list, file_names, perm, H, zone_dict, rng, perm_skip=100, seg_len=100):
self.wav_list = wav_list
self.file_names = file_names
self.perm = iter(perm)
self.H = H
self.zone_dict = zone_dict
self.seg_len = seg_len
self.rng = rng
self.perm_skip = perm_skip
self.current_files = {'file_names': [], 'zone_dict': [], 'speakers': []}
self.data = 0
def __iter__(self):
self.next_file()
self.seg_idx = 0
self.seg_num = int(np.ceil(self.data.shape[1] / self.seg_len))
self.EOF = False
return self
def __next__(self):
if self.seg_idx >= self.seg_num:
self.next_file()
self.seg_idx = 0
self.seg_num = int(np.ceil(self.data.shape[1] / self.seg_len))
self.EOF = False
seg_start = self.seg_idx * self.seg_len
seg_end = (self.seg_idx + 1) * self.seg_len
self.seg_idx += 1
data_seg = np.moveaxis(self.data[:, seg_start:seg_end, :], -1, 0)
if seg_end >= self.data.shape[1]:
data_seg = np.moveaxis(self.data[:, seg_start:, :], -1, 0)
self.EOF = True
return data_seg, self.current_files, self.EOF
def next_file(self):
# choose files and rirs
rand_next(self.perm, self.perm_skip, self.rng)
cur_sample_idx = next(self.perm)
rir_idx = self.rng.integers(low=0, high=self.H.shape[3])
self.current_files['file_names'] = [self.file_names[i] for i in cur_sample_idx]
self.current_files['zone_dict'] = self.zone_dict[rir_idx, :]
# load files
speakers = load_example(cur_sample_idx, self.wav_list, self.H, rir_idx)
speakers = awgn(speakers)
self.current_files['speakers'] = [speakers[i][0] for i in range(len(speakers))]
# create data
self.data = create_example(speakers)
# pad to fill seg len
remainder = self.data.shape[1] % self.seg_len
if remainder > 0:
pad_len = self.seg_len - remainder
self.data = np.pad(self.data, ((0, 0), (0, pad_len), (0, 0)), 'constant')
class RealAudioDataset(IterableDataset):
def __init__(self, data_dir, zone_num, sp_num, is_train=True,
train_ratio=0.8, perm_skip=0, seg_len=100, seed=2021):
super(RealAudioDataset).__init__()
self.is_train = is_train
self.seg_len = seg_len
self.rng = np.random.default_rng(seed)
self.perm_skip = perm_skip
# Find wavs and load rirs
# Train/Zone1/Sentence1/mic1.wav
self.wav_list = [[[pjoin(data_dir, zone, sentence, mic) for mic in os.listdir(pjoin(data_dir, zone, sentence))]
for sentence in os.listdir(pjoin(data_dir, zone))]
for zone in os.listdir(data_dir)]
# Sort the Files so Mic9 is first and then Mic1,2,3..,8
for zone in self.wav_list:
for s_id, sentence in enumerate(zone):
zone[s_id].sort()
zone[s_id].append(zone[s_id].pop(0))
# Split train and validation
for zone_idx, zone in enumerate(self.wav_list):
train_len = int(np.ceil(train_ratio * len(zone)))
if is_train:
self.wav_list[zone_idx] = self.wav_list[zone_idx][:train_len]
else:
self.wav_list[zone_idx] = self.wav_list[zone_idx][train_len:]
# Generate Permutations
max_len = max([len(zone) for zone in self.wav_list])
self.perm = product(range(max_len), repeat=sp_num)
# Initialize generator
self.mini_batch_gen = RealTrainMiniBatchGenerator(self.wav_list, self.perm, sp_num, zone_num,
self.rng, self.perm_skip, self.seg_len)
def __iter__(self):
return iter(self.mini_batch_gen)
class RealTrainMiniBatchGenerator:
def __init__(self, wav_list, perm, sp_num, zone_num, rng, perm_skip=0, seg_len=100):
self.wav_list = wav_list
self.perm = iter(perm)
self.seg_len = seg_len
self.zone_num = zone_num
self.sp_num = sp_num
self.rng = rng
self.perm_skip = perm_skip
self.data = 0
self.tags = 0
def __iter__(self):
self.next_file()
self.seg_idx = 0
self.seg_num = self.data.shape[1] // self.seg_len
return self
def __next__(self):
"""
return data, tag
fixed data\tag size!
cycle the end of data from the start
"""
if self.seg_idx >= self.seg_num:
self.next_file()
self.seg_idx = 0
self.seg_num = self.data.shape[1] // self.seg_len
seg_start = self.seg_idx * self.seg_len
seg_end = (self.seg_idx + 1) * self.seg_len
self.seg_idx += 1
return (np.moveaxis(self.data[:, seg_start:seg_end, :], -1, 0),
self.tags[:, seg_start:seg_end])
def next_file(self):
# choose files and rirs
cur_sample_idx = next(self.perm)
rand_next(self.perm, self.perm_skip, self.rng)
zone_dict = self.rng.choice(self.zone_num, size=self.sp_num, replace=False)
# load files
speakers = load_example(cur_sample_idx, self.wav_list, zone_dict=zone_dict)
# create tags and data
self.tags = create_tags(speakers, zone_dict)
self.data = create_example(awgn(rand_sir(speakers, 3)))
# loop back to fill seg len
remainder = self.data.shape[1] % self.seg_len
if remainder > 0:
cycle_len = self.seg_len - remainder
self.data = np.concatenate((self.data, self.data[:, :cycle_len, :]), axis=1)
self.tags = np.concatenate((self.tags, self.tags[:, :cycle_len]), axis=1)
class RealTestAudioDataset(IterableDataset):
def __init__(self, data_dir, zone_num, sp_num, perm_skip=0, seg_len=100, seed=2022):
super(RealTestAudioDataset).__init__()
self.seg_len = seg_len
self.rng = np.random.default_rng(seed)
self.perm_skip = perm_skip
self.wav_list = [[[pjoin(data_dir, zone, sentence, mic) for mic in os.listdir(pjoin(data_dir, zone, sentence))]
for sentence in os.listdir(pjoin(data_dir, zone))]
for zone in os.listdir(data_dir)]
# Sort the Files so Mic9 is first and then Mic1,2,3..,8
for zone in self.wav_list:
for s_id, sentence in enumerate(zone):
zone[s_id].sort()
zone[s_id].append(zone[s_id].pop(0))
self.file_names = [[sentence[0].split(os.sep)[-2] for sentence in zone] for zone in self.wav_list]
max_len = max([len(zone) for zone in self.wav_list])
self.perm = product(range(max_len), repeat=sp_num)
self.mini_batch_gen = RealTestMiniBatchGenerator(self.wav_list, self.file_names, self.perm, sp_num,
zone_num, self.rng, self.perm_skip, self.seg_len)
def __iter__(self):
return iter(self.mini_batch_gen)
class RealTestMiniBatchGenerator:
"""
return data, file_name, EOF,
and use with batch_size = 1 or None (whatever work)
this way the data can vary in length
"""
def __init__(self, wav_list, file_names, perm, sp_num, zone_num, rng, perm_skip=0, seg_len=100):
self.wav_list = wav_list
self.file_names = file_names
self.perm = iter(perm)
self.zone_num = zone_num
self.sp_num = sp_num
self.seg_len = seg_len
self.rng = rng
self.perm_skip = perm_skip
self.current_files = {'file_names': [], 'zone_dict': [], 'speakers': []}
self.data = 0
def __iter__(self):
self.next_file()
self.seg_idx = 0
self.seg_num = int(np.ceil(self.data.shape[1] / self.seg_len))
self.EOF = False
return self
def __next__(self):
if self.seg_idx >= self.seg_num:
self.next_file()
self.seg_idx = 0
self.seg_num = int(np.ceil(self.data.shape[1] / self.seg_len))
self.EOF = False
seg_start = self.seg_idx * self.seg_len
seg_end = (self.seg_idx + 1) * self.seg_len
self.seg_idx += 1
data_seg = np.moveaxis(self.data[:, seg_start:seg_end, :], -1, 0)
if seg_end >= self.data.shape[1]:
data_seg = np.moveaxis(self.data[:, seg_start:, :], -1, 0)
self.EOF = True
return data_seg, self.current_files, self.EOF
def next_file(self):
# choose files and rirs
cur_sample_idx = next(self.perm)
rand_next(self.perm, self.perm_skip, self.rng)
zone_dict = self.rng.choice(self.zone_num, size=self.sp_num, replace=False)
pick_sentences = lambda z, s: self.file_names[zone_dict[z]][s % len(self.file_names[zone_dict[z]])]
self.current_files['file_names'] = [pick_sentences(zone_id, sentence_id)
for zone_id, sentence_id in enumerate(cur_sample_idx)]
self.current_files['zone_dict'] = zone_dict
# load files
speakers = load_example(cur_sample_idx, self.wav_list, zone_dict=zone_dict)
speakers = rand_sir(speakers, 4)
self.current_files['speakers'] = [speakers[i][0] for i in range(len(speakers))]
# create data
self.data = create_example(speakers)
# pad to fill seg len
remainder = self.data.shape[1] % self.seg_len
if remainder > 0:
pad_len = self.seg_len - remainder
self.data = np.pad(self.data, ((0, 0), (0, pad_len), (0, 0)), 'constant')
def rand_next(iterator, avg_next_num, rng):
if avg_next_num > 0:
n = rng.integers(low=0, high=avg_next_num) + avg_next_num // 2
next(itertools.islice(iterator, n, n), None)
if __name__ == '__main__':
DIR = | |
all plugin devices.")
for dev in indigo.devices.itervalues("self"):
try:
dev.updateStateImageOnServer(indigo.kStateImageSel.SensorOff)
indigo.device.enable(dev, value=False)
except Exception as sub_error:
self.plugin_error_handler(sub_error=traceback.format_exc())
self.logger.error(u"Exception when trying to kill all comms. Error: {s}. See plugin log for more "
u"information.".format(s=sub_error))
# =============================================================================
def commsUnkillAll(self):
"""
Establish communication for all disabled plugin devices
commsUnkillAll() sets the enabled status of all plugin devices to true.
-----
"""
self.logger.info(u"Starting communication with all plugin devices.")
for dev in indigo.devices.itervalues("self"):
try:
indigo.device.enable(dev, value=True)
except Exception as sub_error:
self.plugin_error_handler(sub_error=traceback.format_exc())
self.logger.error(u"Exception when trying to kill all comms. Error: {s}. See plugin log for more "
u"information.".format(s=sub_error))
# =============================================================================
def csv_check_unique(self):
"""
:return:
"""
self.logger.debug(u"Checking CSV references.")
titles = {}
# Iterate through CSV Engine devices
for dev in indigo.devices.iter(filter='self'):
if dev.deviceTypeId == 'csvEngine':
# Get the list of CSV file titles
column_dict = ast.literal_eval(dev.pluginProps['columnDict'])
# Build a dictionary where the file title is the key and the value is a list of
# devices that point to that title for a source.
for key in column_dict.keys():
title = column_dict[key][0]
if title not in titles.keys():
titles[title] = [dev.name]
else:
titles[title].append(dev.name)
# Iterate through the dict of titles
for title_name in titles.keys():
if len(titles[title_name]) > 1:
self.logger.warning(u"Audit CSV data files: CSV filename [{tname}] referenced by more than one CSV "
u"Engine device: {tnum}".format(tname=title_name, tnum=titles[title_name]))
# =============================================================================
def csv_item_add(self, values_dict=None, type_id="", dev_id=0):
"""
Add new item to CSV engine
The csv_item_add() method is called when the user clicks on the 'Add Item'
button in the CSV Engine config dialog.
-----
:param class 'indigo.Dict' values_dict:
:param unicode type_id:
:param int dev_id:
"""
dev = indigo.devices[int(dev_id)]
self.logger.threaddebug(u"[{dn}] csv item add values_dict: {vd}".format(dn=dev.name, vd=dict(values_dict)))
error_msg_dict = indigo.Dict()
try:
# Convert column_dict from a string to a literal dict
column_dict = ast.literal_eval(values_dict['columnDict'])
lister = [0]
num_lister = []
# ================================ Validation =================================
# Add data item validation. Will not allow add until all three conditions are
# met.
if values_dict['addValue'] == "":
error_msg_dict['addValue'] = u"Please enter a title value for your CSV data element."
error_msg_dict['showAlertText'] = u"Title Error.\n\nA title is required for each CSV data element."
return values_dict, error_msg_dict
if values_dict['addSource'] == "":
error_msg_dict['addSource'] = u"Please select a device or variable as a source for your CSV data " \
u"element."
error_msg_dict['showAlertText'] = u"ID Error.\n\nA source is required for each CSV data element."
return values_dict, error_msg_dict
if values_dict['addState'] == "":
error_msg_dict['addState'] = u"Please select a value source for your CSV data element."
error_msg_dict['showAlertText'] = u"Data Error.\n\nA data value is required for each CSV data element."
return values_dict, error_msg_dict
# Create a list of existing keys with the 'k' lopped off
[lister.append(key.lstrip('k')) for key in sorted(column_dict.keys())]
# Change each value to an integer for evaluation
[num_lister.append(int(item)) for item in lister]
# Generate the next key
next_key = u'k{nk}'.format(nk=int(max(num_lister)) + 1)
# Save the tuple of properties
column_dict[next_key] = (values_dict['addValue'], values_dict['addSource'], values_dict['addState'])
# Remove any empty entries as they're not going to do any good anyway.
new_dict = {}
for k, v in column_dict.iteritems():
if v != (u"", u"", u"") and v != ('None', 'None', 'None'):
new_dict[k] = v
else:
self.logger.info(u"Pruning CSV Engine.")
# Convert column_dict back to a string and prepare it for storage.
values_dict['columnDict'] = str(new_dict)
except AttributeError, sub_error:
self.plugin_error_handler(sub_error=traceback.format_exc())
self.logger.error(u"[{name}] Error adding CSV item: {s}. See plugin log for more "
u"information.".format(name=dev.name, s=sub_error))
# If the appropriate CSV file doesn't exist, create it and write the header line.
file_name = values_dict['addValue']
full_path = "{path}{fn}.csv".format(path=self.pluginPrefs['dataPath'], fn=file_name.encode("utf-8"))
if not os.path.isfile(full_path):
with open(full_path, 'w') as outfile:
outfile.write(u"{t},{fn}\n".format(t='Timestamp', fn=file_name).encode("utf-8"))
# Wipe the field values clean for the next element to be added.
for key in ('addSourceFilter', 'editSourceFilter'):
values_dict[key] = "A"
for key in ('addValue', 'addSource', 'addState'):
values_dict[key] = u""
return values_dict, error_msg_dict
# =============================================================================
def csv_item_delete(self, values_dict=None, type_id="", dev_id=0):
"""
Deletes items from the CSV Engine configuration dialog
The csv_item_delete() method is called when the user clicks on the "Delete
Item" button in the CSV Engine config dialog.
-----
:param class 'indigo.Dict' values_dict:
:param unicode type_id:
:param int dev_id:
"""
dev = indigo.devices[int(dev_id)]
self.logger.threaddebug(u"[{name}] csv item delete "
u"values_dict: {vd}".format(name=dev.name, vd=dict(values_dict)))
# Convert column_dict from a string to a literal dict.
column_dict = ast.literal_eval(values_dict['columnDict'])
try:
values_dict["editKey"] = values_dict["csv_item_list"]
del column_dict[values_dict['editKey']]
except Exception as sub_error:
self.plugin_error_handler(sub_error=traceback.format_exc())
self.logger.error(u"[{name}] Error deleting CSV item: {s}. See plugin log for more "
u"information.".format(name=dev.name, s=sub_error))
values_dict['csv_item_list'] = ""
values_dict['editKey'] = ""
values_dict['editSource'] = ""
values_dict['editState'] = ""
values_dict['editValue'] = ""
values_dict['previousKey'] = ""
values_dict['columnDict'] = str(column_dict) # Convert column_dict back to a string for storage.
return values_dict
# =============================================================================
def csv_item_list(self, filter="", values_dict=None, type_id="", target_id=0):
"""
Construct the list of CSV items
The csv_item_list() method generates the list of Item Key : Item Value
pairs that will be presented in the CVS Engine device config dialog. It's
called at open and routinely as changes are made in the dialog.
-----
:param unicode filter:
:param class 'indigo.Dict' values_dict:
:param unicode type_id:
:param int target_id:
"""
dev = indigo.devices[int(target_id)]
try:
# Returning an empty dict seems to work and may solve the 'None' issue
values_dict['columnDict'] = values_dict.get('columnDict', '{}')
# Convert column_dict from a string to a literal dict.
column_dict = ast.literal_eval(values_dict['columnDict'])
prop_list = [(key, "{n}".format(n=value[0].encode("utf-8"))) for key, value in column_dict.items()]
except Exception as sub_error:
self.plugin_error_handler(sub_error=traceback.format_exc())
self.logger.error(u"[{name}] Error generating CSV item list: {s}. See plugin log for more "
u"information.".format(name=dev.name, s=sub_error))
prop_list = []
# Return a list sorted by the value and not the key. Case insensitive sort.
result = sorted(prop_list, key=lambda tup: tup[1].lower())
return result
# =============================================================================
def csv_item_update(self, values_dict=None, type_id="", dev_id=0):
"""
Updates items from the CSV Engine configuration dialog
When the user selects the 'Update Item' button, update the dict of CSV engine
items.
-----
:param class 'indigo.Dict' values_dict:
:param unicode type_id:
:param int dev_id:
"""
dev = indigo.devices[dev_id]
self.logger.threaddebug(u"[{name}] csv item update "
u"values_dict: {vd}".format(name=dev.name, vd=dict(values_dict)))
error_msg_dict = indigo.Dict()
# Convert column_dict from a string to a literal dict.
column_dict = ast.literal_eval(values_dict['columnDict'])
try:
key = values_dict['editKey']
previous_key = values_dict['previousKey']
if key != previous_key:
if key in column_dict:
error_msg_dict['editKey'] = u"New key ({k}) already exists in the global properties, please " \
u"use a different key value".format(k=key)
values_dict['editKey'] = previous_key
else:
del column_dict[previous_key]
else:
column_dict[key] = (values_dict['editValue'],
values_dict['editSource'],
values_dict['editState']
)
values_dict['csv_item_list'] = ""
values_dict['editKey'] = ""
values_dict['editSource'] = ""
values_dict['editState'] = ""
values_dict['editValue'] = ""
if not len(error_msg_dict):
values_dict['previousKey'] = key
except Exception as sub_error:
self.plugin_error_handler(sub_error=traceback.format_exc())
self.logger.error(u"[{name}] Error updating CSV item: {s}. See plugin log for more "
u"information.".format(name=dev.name, s=sub_error))
# Remove any empty entries as they're not going to do any good anyway.
new_dict = {}
for k, v in column_dict.iteritems():
if v != ('', '', ''):
new_dict[k] = v
column_dict = new_dict
# Convert column_dict back to a string for storage.
values_dict['columnDict'] = str(column_dict)
return values_dict, error_msg_dict
# =============================================================================
def csv_item_select(self, values_dict=None, type_id="", dev_id=0):
"""
Populates CSV engine controls for updates and deletions
The csv_item_select() method is called when the user actually selects something
within the CSV engine Item List dropdown menu. When the user selects an item
from the Item List, we populate the Title, ID, and Data controls with the
relevant Item properties.
-----
:param class 'indigo.Dict' values_dict:
:param unicode type_id:
:param int dev_id:
"""
dev = indigo.devices[int(dev_id)]
self.logger.threaddebug(u"[{name}] csv item select "
u"values_dict: {vd}".format(name=dev.name, vd=dict(values_dict)))
try:
column_dict = ast.literal_eval(values_dict['columnDict'])
values_dict['editKey'] = values_dict['csv_item_list']
values_dict['editSource'] = column_dict[values_dict['csv_item_list']][1]
values_dict['editState'] = column_dict[values_dict['csv_item_list']][2]
values_dict['editValue'] = column_dict[values_dict['csv_item_list']][0]
values_dict['isColumnSelected'] = True
values_dict['previousKey'] = values_dict['csv_item_list']
except Exception as sub_error:
self.plugin_error_handler(sub_error=traceback.format_exc())
self.logger.error(u"[{name}] There was an error establishing a connection with the item you chose: {s}. "
u"See plugin log for more information.".format(name=dev.name, s=sub_error))
return values_dict
# =============================================================================
def csv_refresh(self):
"""
Refreshes data for all CSV custom devices
The csv_refresh() method manages CSV files through CSV Engine custom devices.
-----
"""
if not self.pluginIsShuttingDown:
for dev in indigo.devices.itervalues("self"):
if dev.deviceTypeId == 'csvEngine' and dev.enabled:
refresh_interval = int(dev.pluginProps['refreshInterval'])
try:
last_updated = date_parse(dev.states['csvLastUpdated'])
except ValueError:
last_updated = date_parse('1970-01-01 00:00')
diff = dt.datetime.now() - last_updated
refresh_needed = diff | |
<reponame>xkmato/py77<filename>pylib/zeus/runner.py
#!/usr/bin/env python
"""Handles run."""
__author__ = '<EMAIL> (<NAME>)'
__copyright__ = 'Copyright 2012 Room77, Inc.'
import itertools
import json
import os
import re
import sys
import time
from pylib.base.flags import Flags
from pylib.base.exec_utils import ExecUtils
from pylib.base.term_color import TermColor
from pylib.file.file_utils import FileUtils
from pylib.util.mail.mailer import Mailer
from pylib.zeus.pipeline_cmd_base import PipelineCmdBase
from pylib.zeus.pipeline_config import PipelineConfig
from pylib.zeus.pipeline_utils import PipelineUtils
class Runner(PipelineCmdBase):
"""Class to handle run."""
# The different exit codes that can be returned after running a task.
EXITCODE = {
'_LOWEST':-1, # Internal use.
'SUCCESS': 0,
'ALLOW_FAIL': 1,
'FAILURE': 2,
'ABORT_FAIL': 3,
}
EXITCODE_DESCRIPTION = {
0: 'SUCCESS',
1: 'ALLOW_FAIL',
2: 'FAILURE',
3: 'ABORT_FAIL',
}
EXITCODE_FILE = {
0: 'SUCCESS',
1: 'SUCCESS',
2: 'FAILURE',
3: 'ABORT',
}
TASK_OPTIONS = {
# Default option. Task will run regardless of if earlier tasks in the directory
# were successful or not. Task will not run if any task across the pipeline was
# marked as `abort_fail`.
'NORMAL': 0,
# If this step fails, prevent any subsequent steps across the entire pipeline from
# running. The pipeline will be aborted. All currently running tasks will finish,
# but no further tasks will be run. To enable this option, add `.abort_fail` to the
# task name.
'ABORT_FAIL': 1,
# If this step fails, do not mark the out directory as failed. Mark it as successful
# if all other tasks at this level have succeeded. This will allow publishing of a
# task directory even if the only steps that failed were marked as `allow_fail`. To
# enable this option, add `.allow_fail` to the task name.
'ALLOW_FAIL': 2,
# If any earlier tasks located in the same directory as this task failed, prevent
# this task from running. This task will also be marked as failed. To enable this
# option, add `.require_dir_success` to the task name.
'REQUIRE_DIR_SUCCESS': 3,
}
@classmethod
def Init(cls, parser):
super(Runner, cls).Init(parser)
parser.add_argument('-t', '--timeout', type=float, default=86400,
help='Timeout for each task in seconds.')
parser.add_argument('--pool_size', type=int, default=0,
help='The pool size for parallelization.')
parser.add_argument('--detailed_success_mail', action='store_true', default=False,
help='Sends a detailed mail even on success. Useful for debugging.')
parser.add_argument('--success_mail', type=str, default='',
help='The mail to use to send info in case of success.')
parser.add_argument('--failure_mail', type=str, default='',
help='The mail to use to send info in case of success.')
parser.add_argument('--mail_domain', type=str, default='corp.room77.com',
help='The domain to use when sending automated '
'pipeline mail.')
@classmethod
def WorkHorse(cls, tasks):
"""Runs the workhorse for the command.
Args:
tasks: OrderedDict {int, set(string)}: Dict from priority to set of tasks to execute at the
priority. Note: the dict is ordered by priority.
Return:
(list, list): Returns a tuple of list in the form
(successful_tasks, failed_tasks) specifying tasks that succeeded and
ones that failed.
"""
# All our binaries assume they will be run from the source root.
start = time.time()
os.chdir(FileUtils.GetSrcRoot())
cls._CreateDirsForTasks(tasks)
successful_run = []; failed_run = []
aborted_task = None
# NOTE(stephen): Storing task dir status and task out dir status separately since
# pipelines do not always have an out dir defined.
dirs_status = {}
out_dirs_status = {}
for set_tasks in tasks.values():
if aborted_task:
failed_run += set_tasks
continue
tasks_to_run = []
for task in set_tasks:
task_options = cls.__GetTaskOptions(task)
# Check if this task requires all previous tasks in the same directory to be
# successful.
if task_options[Runner.TASK_OPTIONS['REQUIRE_DIR_SUCCESS']]:
task_dir = PipelineUtils.TaskDirName(task)
cur_dir_status = dirs_status.get(task_dir)
# If any previous tasks have been run in this directory, check to ensure all
# of them were successful.
if cur_dir_status and cur_dir_status != Runner.EXITCODE['SUCCESS']:
failed_run += [task]
task_display_name = PipelineUtils.TaskDisplayName(task)
TermColor.Info('Skipped %s' % task_display_name)
TermColor.Failure(
'Skipped Task: %s due to earlier failures in task dir' % task_display_name
)
continue
tasks_to_run.append(task)
# It is possible for all steps at this priority level to be skipped due to the
# task options selected.
if set_tasks and not tasks_to_run:
continue
# Run all the tasks at the same priority in parallel.
args = zip(itertools.repeat(cls), itertools.repeat('_RunSingeTask'),
tasks_to_run)
task_res = ExecUtils.ExecuteParallel(args, Flags.ARGS.pool_size)
# task_res = []
# for task in tasks_to_run: task_res += [cls._RunSingeTask(task)]
if not task_res:
TermColor.Error('Could not process: %s' % tasks_to_run)
failed_run += tasks_to_run
continue
for (res, task) in task_res:
if res == Runner.EXITCODE['SUCCESS']:
successful_run += [task]
elif res == Runner.EXITCODE['FAILURE']:
failed_run += [task]
elif res == Runner.EXITCODE['ALLOW_FAIL']:
failed_run += [task]
elif res == Runner.EXITCODE['ABORT_FAIL']:
failed_run += [task]
aborted_task = task
else:
TermColor.Fatal('Invalid return %d code for %s' % (res, task))
# Update the current status of all tasks in the same directory.
task_dir = PipelineUtils.TaskDirName(task)
dirs_status[task_dir] = max(
dirs_status.get(task_dir, Runner.EXITCODE['_LOWEST']), res,
)
# Update the out dir status.
out_dir = PipelineUtils.GetOutDirForTask(task)
if out_dir:
out_dirs_status[out_dir] = max(
out_dirs_status.get(out_dir, Runner.EXITCODE['_LOWEST']), res,
)
# Write the status files to the dirs.
cls._WriteOutDirsStatus(out_dirs_status)
# Send the final status mail.
time_taken = time.time() - start
cls._SendFinalStatusMail(successful_run, failed_run, aborted_task, time_taken)
if aborted_task:
TermColor.Failure('Aborted by task: %s' % aborted_task)
return (successful_run, failed_run)
@classmethod
def _CreateDirsForTasks(cls, tasks):
"""Creates the relevant dirs for tasks.
Args:
tasks: OrderedDict {int, set(string)}: Dict from priority to set of tasks to execute at the
priority. Note: the dict is ordered by priority.
"""
for set_tasks in tasks.values():
for task in set_tasks:
rel_path = PipelineUtils.GetTaskOutputRelativeDir(task)
PipelineConfig.Instance().CreateAllSubDirsForPath(rel_path)
@classmethod
def _RunSingeTask(cls, task):
"""Runs a Single Task.
Args:
task: string: The task to run.
Return:
(EXITCODE, string): Returns a tuple of the result status and the task.
"""
TermColor.Info('Executing %s' % PipelineUtils.TaskDisplayName(task))
task_vars = cls.__GetEnvVarsForTask(task)
TermColor.VInfo(4, 'VARS: \n%s' % task_vars)
task_cmd = task
pipe_output = True
log_file = PipelineUtils.GetLogFileForTask(task)
if log_file:
task_cmd += ' > ' + PipelineUtils.GetLogFileForTask(task) + ' 2>&1'
pipe_output = False
timeout = cls.__GetTimeOutForTask(task)
start = time.time()
(status, out) = ExecUtils.RunCmd(task_cmd, timeout, pipe_output, task_vars)
time_taken = time.time() - start
TermColor.Info('Executed %s. Took %.2fs' % (PipelineUtils.TaskDisplayName(task), time_taken))
if status:
TermColor.Failure('Failed Task: %s' % PipelineUtils.TaskDisplayName(task))
if task_vars.get('PIPELINE_TASK_ABORT_FAIL', None):
status_code = Runner.EXITCODE['ABORT_FAIL']
elif task_vars.get('PIPELINE_TASK_ALLOW_FAIL', None):
status_code = Runner.EXITCODE['ALLOW_FAIL']
else:
status_code = Runner.EXITCODE['FAILURE']
else:
status_code = Runner.EXITCODE['SUCCESS']
cls._SendMailForTask(task, status_code, time_taken, log_file, out)
# Everything done. Mark the task as successful.
return (status_code, task)
@classmethod
def __GetEnvVarsForTask(cls, task):
"""Returns the env vars for the task.
Args:
task: string: The task for which the envvar should be prepared.
Returns:
dict {string, string}: The dictionary of IDS to values.
"""
rel_path = PipelineUtils.GetTaskOutputRelativeDir(task)
vars = {}
for k, v in PipelineConfig.Instance().GetAllSubDirsForPath(rel_path).items():
vars[k] = v
prev_dir = FileUtils.GetPreviousDatedDir(v)
if not prev_dir: prev_dir = v
vars[k + '_PREV'] = prev_dir
vars.update(PipelineConfig.Instance().GetAllENVVars())
# Check if the task is critical or not.
task_options = cls.__GetTaskOptions(task)
if task_options[Runner.TASK_OPTIONS['ABORT_FAIL']]:
vars['PIPELINE_TASK_ABORT_FAIL'] = '1'
if task_options[Runner.TASK_OPTIONS['ALLOW_FAIL']]:
vars['PIPELINE_TASK_ALLOW_FAIL'] = '1'
return vars
@classmethod
def __GetTimeOutForTask(cls, task):
"""Returns the timeout for the task.
Args:
task: string: The task for which the timeout should be prepared.
Returns:
int: The timeout in seconds.
"""
timeout = FileUtils.FileContents(task + '.timeout')
if not timeout:
timeout = FileUtils.FileContents(os.path.join(PipelineUtils.TaskDirName(task), 'timeout'))
if not timeout: return Flags.ARGS.timeout
timeout = re.sub('\s*', '', timeout)
timeout_parts = re.split('(\d+)', timeout)
if len(timeout_parts) < 3:
TermColor.Warning('Ignoring invalid timeout [%s] for task: %s' % (timeout, task))
return Flags.ARGS.timeout
timeout = float(timeout_parts[1])
annotation = timeout_parts[2]
if not annotation: return timeout
elif annotation == 'd': timeout *= 86400
elif annotation == 'h': timeout *= 3600
elif annotation == 'm': timeout *= 60
elif annotation == 'ms': timeout *= 0.001
elif annotation == 'us': timeout *= 0.000001
return timeout
@classmethod
def __GetTaskOptions(cls, task):
rel_task = PipelineUtils.TaskRelativeName(task)
options = {
v: False
for v in Runner.TASK_OPTIONS.values()
}
if '.abort_fail' in rel_task:
options[Runner.TASK_OPTIONS['ABORT_FAIL']] = True
if '.allow_fail' in rel_task:
options[Runner.TASK_OPTIONS['ALLOW_FAIL']] = True
if '.require_dir_success' in rel_task:
options[Runner.TASK_OPTIONS['REQUIRE_DIR_SUCCESS']] = True
if not options:
options[Runner.TASK_OPTIONS['NORMAL']] = True
return options
@classmethod
def _SendMailForTask(cls, task, status_code, time_taken, log_file, msg):
"""Sends the mail if required for the task.
Args:
task: string: The task for which the envvar should be prepared.
status_code: EXITCODE: The exit code for the task.
time_taken: float: Time taken in seconds.
log_file: string: The log file containing the output of the task.
msg: string: The output message piped directly. Note only one of msg or log_file will be
present at any time.
Returns:
dict {string, string}: The dictionary of | |
import heterocl as hcl
import numpy as np
import time
import user_definer_3D_Q as UD
import os
###################################### USER-DEFINED FUNCTIONS ######################################
# Given state and action, return successor states and their probabilities
# sVals: the coordinates of state
# bounds: the lower and upper limits of the state space in each dimension
# trans: holds each successor state and the probability of reaching that state
def transition(sVals, action, bounds, trans, goal):
dx = hcl.scalar(0, "dx")
dy = hcl.scalar(0, "dy")
mag = hcl.scalar(0, "mag")
# Check if moving from a goal state
dx[0] = sVals[0] - goal[0,0]
dy[0] = sVals[1] - goal[0,1]
mag[0] = hcl.sqrt((dx[0] * dx[0]) + (dy[0] * dy[0]))
with hcl.if_(hcl.and_(mag[0] <= 1.0, sVals[2] <= goal[1,1], sVals[2] >= goal[1,0])):
trans[0, 0] = 0
# Check if moving from an obstacle
with hcl.elif_(hcl.or_(sVals[0] < bounds[0,0] + 0.2, sVals[0] > bounds[0,1] - 0.2)):
trans[0, 0] = 0
with hcl.elif_(hcl.or_(sVals[1] < bounds[1,0] + 0.2, sVals[1] > bounds[1,1] - 0.2)):
trans[0, 0] = 0
# Standard move
with hcl.else_():
trans[0, 0] = 1.0
trans[0, 1] = sVals[0] + (0.6 * action[0] * hcl.cos(sVals[2]))
trans[0, 2] = sVals[1] + (0.6 * action[0] * hcl.sin(sVals[2]))
trans[0, 3] = sVals[2] + (0.6 * action[1])
# Adjust for periodic dimension
with hcl.while_(trans[0, 3] > 3.141592653589793):
trans[0, 3] -= 6.283185307179586
with hcl.while_(trans[0, 3] < -3.141592653589793):
trans[0, 3] += 6.283185307179586
# Return the reward for taking action from state
def reward(sVals, action, bounds, goal, trans):
dx = hcl.scalar(0, "dx")
dy = hcl.scalar(0, "dy")
mag = hcl.scalar(0, "mag")
rwd = hcl.scalar(0, "rwd")
# Check if moving from a collision state, if so, assign a penalty
with hcl.if_(hcl.or_(sVals[0] < bounds[0,0] + 0.2, sVals[0] > bounds[0,1] - 0.2)):
rwd[0] = -400
with hcl.elif_(hcl.or_(sVals[1] < bounds[1,0] + 0.2, sVals[1] > bounds[1,1] - 0.2)):
rwd[0] = -400
with hcl.else_():
# Check if moving from a goal state
dx[0] = sVals[0] - goal[0,0]
dy[0] = sVals[1] - goal[0,1]
mag[0] = hcl.sqrt((dx[0] * dx[0]) + (dy[0] * dy[0]))
with hcl.if_(hcl.and_(mag[0] <= 1.0, sVals[2] <= goal[1,1], sVals[2] >= goal[1,0])):
rwd[0] = 1000
# Standard move
with hcl.else_():
rwd[0] = 0
return rwd[0]
######################################### HELPER FUNCTIONS #########################################
# Update the value function at position (i,j,k)
def updateQopt(i, j, k, a, iVals, sVals, Qopt, actions, intermeds, trans, interpV, gamma, bounds, goal, ptsEachDim, useNN, fillVal):
r = hcl.scalar(0, "r")
p = hcl.scalar(0, "p")
# set iVals equal to (i,j,k) and sVals equal to the corresponding state values at (i,j,k)
updateStateVals(i, j, k, iVals, sVals, bounds, ptsEachDim)
# call the transition function to obtain the outcome(s) of action a from state (si,sj,sk)
transition(sVals, actions[a], bounds, trans, goal)
# initialize Qopt[i,j,k,a] with the immediate reward
r[0] = reward(sVals, actions[a], bounds, goal, trans)
Qopt[i,j,k,a] = r[0]
# maximize over successor Q-values
with hcl.for_(0, trans.shape[0], name="si") as si:
p[0] = trans[si,0]
sVals[0] = trans[si,1]
sVals[1] = trans[si,2]
sVals[2] = trans[si,3]
# Nearest neighbour
with hcl.if_(useNN[0] == 1):
# obtain the nearest neighbour successor state
stateToIndex(sVals, iVals, bounds, ptsEachDim)
# maximize over successor state Q-values
with hcl.if_(hcl.and_(iVals[0] < Qopt.shape[0], iVals[1] < Qopt.shape[1], iVals[2] < Qopt.shape[2])):
with hcl.if_(hcl.and_(iVals[0] >= 0, iVals[1] >= 0, iVals[2] >= 0)):
with hcl.for_(0, actions.shape[0], name="a_") as a_:
with hcl.if_((r[0] + (gamma[0] * (p[0] * Qopt[iVals[0],iVals[1],iVals[2],a_]))) > Qopt[i,j,k,a]):
Qopt[i,j,k,a] = r[0] + (gamma[0] * (p[0] * Qopt[iVals[0],iVals[1],iVals[2],a_]))
# Linear interpolation
with hcl.if_(useNN[0] == 0):
with hcl.if_(hcl.and_(sVals[0] <= bounds[0,1], sVals[1] <= bounds[1,1], sVals[2] <= bounds[2,1])):
with hcl.if_(hcl.and_(sVals[0] >= bounds[0,0], sVals[1] >= bounds[1,0], sVals[2] >= bounds[2,0])):
stateToIndexInterpolants(Qopt, sVals, actions, bounds, ptsEachDim, interpV, fillVal)
Qopt[i,j,k,a] += (gamma[0] * (p[0] * interpV[0]))
r[0] += Qopt[i,j,k,a]
# Returns 0 if convergence has been reached
def evaluateConvergence(newQ, oldQ, epsilon, reSweep):
delta = hcl.scalar(0, "delta")
# Calculate the difference, if it's negative, make it positive
delta[0] = newQ[0] - oldQ[0]
with hcl.if_(delta[0] < 0):
delta[0] = delta[0] * -1
with hcl.if_(delta[0] > epsilon[0]):
reSweep[0] = 1
# convert state values into indeces using nearest neighbour
# NOTE: have to modify this to work with modular values
def stateToIndex(sVals, iVals, bounds, ptsEachDim):
iVals[0] = ((sVals[0] - bounds[0,0]) / (bounds[0,1] - bounds[0,0])) * (ptsEachDim[0] - 1)
iVals[1] = ((sVals[1] - bounds[1,0]) / (bounds[1,1] - bounds[1,0])) * (ptsEachDim[1] - 1)
iVals[2] = ((sVals[2] - bounds[2,0]) / (bounds[2,1] - bounds[2,0])) * (ptsEachDim[2] - 1)
# NOTE: add 0.5 to simulate rounding
iVals[0] = hcl.cast(hcl.Int(), iVals[0] + 0.5)
iVals[1] = hcl.cast(hcl.Int(), iVals[1] + 0.5)
iVals[2] = hcl.cast(hcl.Int(), iVals[2] + 0.5)
# convert indices into state values
def indexToState(iVals, sVals, bounds, ptsEachDim):
sVals[0] = bounds[0,0] + ( (bounds[0,1] - bounds[0,0]) * (iVals[0] / (ptsEachDim[0]-1)) )
sVals[1] = bounds[1,0] + ( (bounds[1,1] - bounds[1,0]) * (iVals[1] / (ptsEachDim[1]-1)) )
sVals[2] = bounds[2,0] + ( (bounds[2,1] - bounds[2,0]) * (iVals[2] / (ptsEachDim[2]-1)) )
# set iVals equal to (i,j,k) and sVals equal to the corresponding state values at (i,j,k)
def updateStateVals(i, j, k, iVals, sVals, bounds, ptsEachDim):
iVals[0] = i
iVals[1] = j
iVals[2] = k
indexToState(iVals, sVals, bounds, ptsEachDim)
# given state values sVals, obtain the 8 possible successor states and their corresponding weight
def stateToIndexInterpolants(Qopt, sVals, actions, bounds, ptsEachDim, interpV, fillVal):
iMin = hcl.scalar(0, "iMin")
jMin = hcl.scalar(0, "jMin")
kMin = hcl.scalar(0, "kMin")
iMax = hcl.scalar(0, "iMax")
jMax = hcl.scalar(0, "jMax")
kMax = hcl.scalar(0, "kMax")
c000 = hcl.scalar(fillVal[0], "c000")
c001 = hcl.scalar(fillVal[0], "c001")
c010 = hcl.scalar(fillVal[0], "c010")
c011 = hcl.scalar(fillVal[0], "c011")
c100 = hcl.scalar(fillVal[0], "c100")
c101 = hcl.scalar(fillVal[0], "c101")
c110 = hcl.scalar(fillVal[0], "c110")
c111 = hcl.scalar(fillVal[0], "c111")
c00 = hcl.scalar(0, "c00")
c01 = hcl.scalar(0, "c01")
c10 = hcl.scalar(0, "c10")
c11 = hcl.scalar(0, "c11")
c0 = hcl.scalar(0, "c0")
c1 = hcl.scalar(0, "c1")
ia = hcl.scalar(0, "ia")
ja = hcl.scalar(0, "ja")
ka = hcl.scalar(0, "ka")
di = hcl.scalar(0, "di")
dj = hcl.scalar(0, "dj")
dk = hcl.scalar(0, "dk")
# obtain unrounded index values
ia[0] = ((sVals[0] - bounds[0,0]) / (bounds[0,1] - bounds[0,0])) * (ptsEachDim[0] - 1)
ja[0] = ((sVals[1] - bounds[1,0]) / (bounds[1,1] - bounds[1,0])) * (ptsEachDim[1] - 1)
ka[0] = ((sVals[2] - bounds[2,0]) / (bounds[2,1] - bounds[2,0])) * (ptsEachDim[2] - 1)
# obtain neighbouring state indeces in each direction
with hcl.if_(ia[0] < 0):
iMin[0] = hcl.cast(hcl.Int(), ia[0] - 1.0)
iMax[0] = hcl.cast(hcl.Int(), ia[0])
with hcl.else_():
iMin[0] = hcl.cast(hcl.Int(), ia[0])
iMax[0] = hcl.cast(hcl.Int(), ia[0] + 1.0)
with hcl.if_(ja[0] < 0):
jMin[0] = hcl.cast(hcl.Int(), ja[0] - 1.0)
jMax[0] = hcl.cast(hcl.Int(), ja[0])
with hcl.else_():
jMin[0] = hcl.cast(hcl.Int(), ja[0])
jMax[0] = hcl.cast(hcl.Int(), ja[0] + 1.0)
with hcl.if_(ka[0] < 0):
kMin[0] = hcl.cast(hcl.Int(), ka[0] - 1.0)
kMax[0] = hcl.cast(hcl.Int(), ka[0])
with hcl.else_():
kMin[0] = hcl.cast(hcl.Int(), ka[0])
kMax[0] = hcl.cast(hcl.Int(), ka[0] + 1.0)
# obtain weights in each direction
di[0] = ia[0] - iMin[0]
dj[0] = ja[0] - jMin[0]
dk[0] = ka[0] - kMin[0]
# Obtain value of each neighbour state
# Qopt[iMin, jMin, kMin]
with hcl.if_(hcl.and_(iMin[0] < Qopt.shape[0], jMin[0] < Qopt.shape[1], kMin[0] < Qopt.shape[2])):
with hcl.if_(hcl.and_(iMin[0] >= 0, jMin[0] >= 0, kMin[0] >= 0)):
with hcl.for_(0, actions.shape[0], name="a_") as a_:
with hcl.if_(c000[0] < Qopt[iMin[0], jMin[0], kMin[0], a_]):
c000[0] = Qopt[iMin[0], jMin[0], kMin[0], a_]
# Qopt[iMin, jMin, kMax]
with hcl.if_(hcl.and_(iMin[0] < Qopt.shape[0], jMin[0] < Qopt.shape[1], kMax[0] < Qopt.shape[2])):
with hcl.if_(hcl.and_(iMin[0] >= 0, jMin[0] >= 0, kMax[0] >= 0)):
with hcl.for_(0, actions.shape[0], name="a_") as a_:
with hcl.if_(c001[0] < Qopt[iMin[0], jMin[0], kMax[0], a_]):
c001[0] = Qopt[iMin[0], jMin[0], kMax[0], a_]
# Qopt[iMin, jMax, kMin]
with hcl.if_(hcl.and_(iMin[0] < Qopt.shape[0], jMax[0] < Qopt.shape[1], kMin[0] < Qopt.shape[2])):
with hcl.if_(hcl.and_(iMin[0] >= 0, jMax[0] >= 0, kMin[0] >= 0)):
with hcl.for_(0, actions.shape[0], name="a_") as a_:
with hcl.if_(c010[0] < Qopt[iMin[0], jMax[0], kMin[0], a_]):
c010[0] = Qopt[iMin[0], jMax[0], kMin[0], a_]
# Qopt[iMin, jMax, kMax]
with hcl.if_(hcl.and_(iMin[0] < Qopt.shape[0], jMax[0] < Qopt.shape[1], kMax[0] < Qopt.shape[2])):
with hcl.if_(hcl.and_(iMin[0] >= 0, jMax[0] >= 0, kMax[0] >= 0)):
with hcl.for_(0, actions.shape[0], name="a_") as a_:
with hcl.if_(c011[0] < Qopt[iMin[0], jMax[0], kMax[0], a_]):
c011[0] = Qopt[iMin[0], jMax[0], kMax[0], a_]
# Qopt[iMax, jMin, kMin]
with hcl.if_(hcl.and_(iMax[0] < Qopt.shape[0], jMin[0] < Qopt.shape[1], kMin[0] < Qopt.shape[2])):
with hcl.if_(hcl.and_(iMax[0] >= 0, jMin[0] >= 0, kMin[0] >= 0)):
with hcl.for_(0, actions.shape[0], name="a_") as a_:
with hcl.if_(c100[0] < Qopt[iMax[0], jMin[0], | |
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'description', 'id', 'info', 'instrumentDataFile',
'instrumentModel', 'library', 'libraryLayout', 'molecule',
'name', 'platformUnit', 'recordCreateTime',
'recordUpdateTime', 'runTime', 'selection',
'sequencingCenter', 'strategy'
]
def __init__(self, **kwargs):
self.description = kwargs.get(
'description', None)
self.id = kwargs.get(
'id', None)
self.info = kwargs.get(
'info', {})
self.instrumentDataFile = kwargs.get(
'instrumentDataFile', None)
self.instrumentModel = kwargs.get(
'instrumentModel', None)
self.library = kwargs.get(
'library', None)
self.libraryLayout = kwargs.get(
'libraryLayout', None)
self.molecule = kwargs.get(
'molecule', None)
self.name = kwargs.get(
'name', None)
self.platformUnit = kwargs.get(
'platformUnit', None)
self.recordCreateTime = kwargs.get(
'recordCreateTime', None)
self.recordUpdateTime = kwargs.get(
'recordUpdateTime', None)
self.runTime = kwargs.get(
'runTime', None)
self.selection = kwargs.get(
'selection', None)
self.sequencingCenter = kwargs.get(
'sequencingCenter', None)
self.strategy = kwargs.get(
'strategy', None)
class ExternalIdentifier(ProtocolElement):
"""
Identifier from a public database
"""
_schemaSource = """
{"type": "record", "name": "ExternalIdentifier", "namespace": "org.ga4gh.models", "doc": "",
"fields": [{"name": "database", "type": "string", "doc": ""}, {"name": "identifier", "type":
"string", "doc": ""}, {"name": "version", "type": "string", "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"database",
"identifier",
"version",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'database', 'identifier', 'version'
]
def __init__(self, **kwargs):
self.database = kwargs.get(
'database', None)
self.identifier = kwargs.get(
'identifier', None)
self.version = kwargs.get(
'version', None)
class Fragment(ProtocolElement):
"""
A fragment represents a contiguous stretch of a DNA or RNA
molecule. Reads can be associated with a fragment to specify they
derive from the same molecule.
"""
_schemaSource = """
{"type": "record", "name": "Fragment", "namespace": "org.ga4gh.models", "doc": "", "fields":
[{"name": "id", "type": "string", "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"id",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'id'
]
def __init__(self, **kwargs):
self.id = kwargs.get(
'id', None)
class GAException(ProtocolElement):
"""
A general exception type.
"""
_schemaSource = """
{"type": "error", "name": "GAException", "namespace": "org.ga4gh.methods", "doc": "", "fields":
[{"name": "message", "type": "string", "doc": ""}, {"name": "errorCode", "type": "int", "doc": "",
"default": -1}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"message",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'errorCode', 'message'
]
def __init__(self, **kwargs):
self.errorCode = kwargs.get(
'errorCode', -1)
self.message = kwargs.get(
'message', None)
class LinearAlignment(ProtocolElement):
"""
A linear alignment can be represented by one CIGAR string.
"""
_schemaSource = """
{"type": "record", "name": "LinearAlignment", "namespace": "org.ga4gh.models", "doc": "", "fields":
[{"name": "position", "type": {"type": "record", "name": "Position", "doc": "", "fields": [{"name":
"referenceName", "type": "string", "doc": ""}, {"name": "position", "type": "long", "doc": ""},
{"name": "strand", "type": {"type": "enum", "name": "Strand", "doc": "", "symbols": ["NEG_STRAND",
"POS_STRAND"]}, "doc": ""}]}, "doc": ""}, {"name": "mappingQuality", "type": ["null", "int"], "doc":
"", "default": null}, {"name": "cigar", "type": {"type": "array", "items": {"type": "record",
"name": "CigarUnit", "doc": "", "fields": [{"name": "operation", "type": {"type": "enum", "name":
"CigarOperation", "doc": "", "symbols": ["ALIGNMENT_MATCH", "INSERT", "DELETE", "SKIP", "CLIP_SOFT",
"CLIP_HARD", "PAD", "SEQUENCE_MATCH", "SEQUENCE_MISMATCH"]}, "doc": ""}, {"name": "operationLength",
"type": "long", "doc": ""}, {"name": "referenceSequence", "type": ["null", "string"], "doc": "",
"default": null}]}}, "doc": "", "default": []}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"position",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'cigar': CigarUnit,
'position': Position,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'cigar': CigarUnit,
'position': Position,
}
return embeddedTypes[fieldName]
__slots__ = [
'cigar', 'mappingQuality', 'position'
]
def __init__(self, **kwargs):
self.cigar = kwargs.get(
'cigar', [])
self.mappingQuality = kwargs.get(
'mappingQuality', None)
self.position = kwargs.get(
'position', Position())
class ListReferenceBasesRequest(ProtocolElement):
"""
The query parameters for a request to `GET
/references/{id}/bases`, for example: `GET
/references/{id}/bases?start=100&end=200`
"""
_schemaSource = """
{"type": "record", "name": "ListReferenceBasesRequest", "namespace": "org.ga4gh.methods", "doc": "",
"fields": [{"name": "start", "type": "long", "doc": "", "default": 0}, {"name": "end", "type":
["null", "long"], "doc": "", "default": null}, {"name": "pageToken", "type": ["null", "string"],
"doc": "", "default": null}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'end', 'pageToken', 'start'
]
def __init__(self, **kwargs):
self.end = kwargs.get(
'end', None)
self.pageToken = kwargs.get(
'pageToken', None)
self.start = kwargs.get(
'start', 0)
class ListReferenceBasesResponse(ProtocolElement):
"""
The response from `GET /references/{id}/bases` expressed as JSON.
"""
_schemaSource = """
{"type": "record", "name": "ListReferenceBasesResponse", "namespace": "org.ga4gh.methods", "doc":
"", "fields": [{"name": "offset", "type": "long", "doc": "", "default": 0}, {"name": "sequence",
"type": "string", "doc": ""}, {"name": "nextPageToken", "type": ["null", "string"], "doc": "",
"default": null}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"sequence",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'nextPageToken', 'offset', 'sequence'
]
def __init__(self, **kwargs):
self.nextPageToken = kwargs.get(
'nextPageToken', None)
self.offset = kwargs.get(
'offset', 0)
self.sequence = kwargs.get(
'sequence', None)
class Position(ProtocolElement):
"""
A `Position` is an unoriented base in some `Reference`. A
`Position` is represented by a `Reference` name, and a base number
on that `Reference` (0-based).
"""
_schemaSource = """
{"type": "record", "name": "Position", "namespace": "org.ga4gh.models", "doc": "", "fields":
[{"name": "referenceName", "type": "string", "doc": ""}, {"name": "position", "type": "long", "doc":
""}, {"name": "strand", "type": {"type": "enum", "name": "Strand", "doc": "", "symbols":
["NEG_STRAND", "POS_STRAND"]}, "doc": ""}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"position",
"referenceName",
"strand",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'position', 'referenceName', 'strand'
]
def __init__(self, **kwargs):
self.position = kwargs.get(
'position', None)
self.referenceName = kwargs.get(
'referenceName', None)
self.strand = kwargs.get(
'strand', None)
class Program(ProtocolElement):
"""
No documentation
"""
_schemaSource = """
{"type": "record", "name": "Program", "namespace": "org.ga4gh.models", "fields": [{"name":
"commandLine", "type": ["null", "string"], "doc": "", "default": null}, {"name": "id", "type":
["null", "string"], "doc": "", "default": null}, {"name": "name", "type": ["null", "string"], "doc":
"", "default": null}, {"name": "prevProgramId", "type": ["null", "string"], "doc": "", "default":
null}, {"name": "version", "type": ["null", "string"], "doc": "", "default": null}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'commandLine', 'id', 'name', 'prevProgramId', 'version'
]
def __init__(self, **kwargs):
self.commandLine = kwargs.get(
'commandLine', None)
self.id = kwargs.get(
'id', None)
self.name = kwargs.get(
'name', None)
self.prevProgramId = kwargs.get(
'prevProgramId', None)
self.version = kwargs.get(
'version', None)
class ReadAlignment(ProtocolElement):
"""
Each read alignment describes an alignment with additional
information about the fragment and the read. A read alignment
object is equivalent to a line in a SAM file.
"""
_schemaSource = """
{"type": "record", "name": "ReadAlignment", "namespace": "org.ga4gh.models", "doc": "", "fields":
[{"name": "id", "type": ["null", "string"], "doc": ""}, {"name": "readGroupId", "type": "string",
"doc": ""}, {"name": "fragmentId", "type": "string", "doc": ""}, {"name": "fragmentName", "type":
"string", "doc": ""}, {"name": "properPlacement", "type": ["null", "boolean"], "doc": "", "default":
null}, {"name": "duplicateFragment", "type": ["null", "boolean"], "doc": "", "default": null},
{"name": "numberReads", "type": ["null", "int"], "doc": "", "default": null}, {"name":
"fragmentLength", "type": ["null", "int"], "doc": "", "default": null}, {"name": "readNumber",
"type": ["null", "int"], "doc": "", "default": null}, {"name": "failedVendorQualityChecks", "type":
["null", "boolean"], "doc": "", "default": null}, {"name": "alignment", "type": ["null", {"type":
"record", "name": "LinearAlignment", "doc": "", "fields": [{"name": "position", "type": {"type":
"record", "name": "Position", "doc": "", "fields": [{"name": "referenceName", "type": "string",
"doc": ""}, {"name": "position", "type": "long", "doc": ""}, {"name": "strand", "type": {"type":
"enum", "name": "Strand", "doc": "", "symbols": ["NEG_STRAND", "POS_STRAND"]}, "doc": ""}]}, "doc":
""}, {"name": "mappingQuality", "type": ["null", "int"], "doc": "", "default": null}, {"name":
"cigar", "type": {"type": "array", "items": {"type": "record", "name": "CigarUnit", "doc": "",
"fields": [{"name": "operation", "type": {"type": "enum", "name": "CigarOperation", "doc": "",
"symbols": ["ALIGNMENT_MATCH", "INSERT", "DELETE", "SKIP", "CLIP_SOFT", "CLIP_HARD", "PAD",
"SEQUENCE_MATCH", "SEQUENCE_MISMATCH"]}, "doc": ""}, {"name": "operationLength", "type": "long",
"doc": ""}, {"name": "referenceSequence", "type": ["null", "string"], "doc": "", "default":
null}]}}, "doc": "", "default": []}]}], "doc": "", "default": null}, {"name": "secondaryAlignment",
"type": ["null", "boolean"], "doc": "", "default": null}, {"name": "supplementaryAlignment", "type":
["null", "boolean"], "doc": "", "default": null}, {"name": "alignedSequence", "type": ["null",
"string"], "doc": "", "default": null}, {"name": "alignedQuality", "type": {"type": "array",
"items": "int"}, "doc": "", "default": []}, {"name": "nextMatePosition", "type": ["null",
"Position"], "doc": "", "default": null}, {"name": "info", "type": {"type": "map", "values":
{"type": "array", "items": "string"}}, "doc": "", "default": {}}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"fragmentId",
"fragmentName",
"id",
"readGroupId",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'alignment': LinearAlignment,
'nextMatePosition': Position,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'alignment': | |
no flattened fields.
if not isinstance(request, certificate_manager.GetCertificateRequest):
request = certificate_manager.GetCertificateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_certificate]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def create_certificate(
self,
request: Union[certificate_manager.CreateCertificateRequest, dict] = None,
*,
parent: str = None,
certificate: certificate_manager.Certificate = None,
certificate_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Creates a new Certificate in a given project and
location.
.. code-block:: python
from google.cloud import certificate_manager_v1
def sample_create_certificate():
# Create a client
client = certificate_manager_v1.CertificateManagerClient()
# Initialize request argument(s)
request = certificate_manager_v1.CreateCertificateRequest(
parent="parent_value",
certificate_id="certificate_id_value",
)
# Make the request
operation = client.create_certificate(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.certificate_manager_v1.types.CreateCertificateRequest, dict]):
The request object. Request for the `CreateCertificate`
method.
parent (str):
Required. The parent resource of the certificate. Must
be in the format ``projects/*/locations/*``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
certificate (google.cloud.certificate_manager_v1.types.Certificate):
Required. A definition of the
certificate to create.
This corresponds to the ``certificate`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
certificate_id (str):
Required. A user-provided name of the
certificate.
This corresponds to the ``certificate_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.certificate_manager_v1.types.Certificate`
Defines TLS certificate.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, certificate, certificate_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a certificate_manager.CreateCertificateRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, certificate_manager.CreateCertificateRequest):
request = certificate_manager.CreateCertificateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if certificate is not None:
request.certificate = certificate
if certificate_id is not None:
request.certificate_id = certificate_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_certificate]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
certificate_manager.Certificate,
metadata_type=certificate_manager.OperationMetadata,
)
# Done; return the response.
return response
def update_certificate(
self,
request: Union[certificate_manager.UpdateCertificateRequest, dict] = None,
*,
certificate: certificate_manager.Certificate = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Updates a Certificate.
.. code-block:: python
from google.cloud import certificate_manager_v1
def sample_update_certificate():
# Create a client
client = certificate_manager_v1.CertificateManagerClient()
# Initialize request argument(s)
request = certificate_manager_v1.UpdateCertificateRequest(
)
# Make the request
operation = client.update_certificate(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.certificate_manager_v1.types.UpdateCertificateRequest, dict]):
The request object. Request for the `UpdateCertificate`
method.
certificate (google.cloud.certificate_manager_v1.types.Certificate):
Required. A definition of the
certificate to update.
This corresponds to the ``certificate`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The update mask applies to the resource. For
the ``FieldMask`` definition, see
https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.certificate_manager_v1.types.Certificate`
Defines TLS certificate.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([certificate, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a certificate_manager.UpdateCertificateRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, certificate_manager.UpdateCertificateRequest):
request = certificate_manager.UpdateCertificateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if certificate is not None:
request.certificate = certificate
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_certificate]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("certificate.name", request.certificate.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
certificate_manager.Certificate,
metadata_type=certificate_manager.OperationMetadata,
)
# Done; return the response.
return response
def delete_certificate(
self,
request: Union[certificate_manager.DeleteCertificateRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Deletes a single Certificate.
.. code-block:: python
from google.cloud import certificate_manager_v1
def sample_delete_certificate():
# Create a client
client = certificate_manager_v1.CertificateManagerClient()
# Initialize request argument(s)
request = certificate_manager_v1.DeleteCertificateRequest(
name="name_value",
)
# Make the request
operation = client.delete_certificate(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.certificate_manager_v1.types.DeleteCertificateRequest, dict]):
The request object. Request for the `DeleteCertificate`
method.
name (str):
Required. A name of the certificate to delete. Must be
in the format ``projects/*/locations/*/certificates/*``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a | |
# Positions at first time step
time0 = min(dpos_data[:, 0])
id_t0 = numpy.where(dpos_data[:, 0] == time0)
pos_t0 = pos_data[id_t0, 0:9]
except ValueError:
# this is for the case simulation hass not been ran and
# time does not exists
time0 = 0
id_t0 = None
pos_t0 = numpy.array([
numpy.hstack(([0.,
io.instances()[k].attrs['id']]
,io.instances()[k].attrs['translation']
,io.instances()[k].attrs['orientation']))
for k in io.instances()
if io.instances()[k].attrs['id'] >= 0])
if numpy.shape(spos_data)[0] > 0:
set_position(spos_data)
print(spos_data.shape)
# static objects are always visible
for instance, actors in static_actors.items():
for actor in actors:
actor.VisibilityOn()
set_position(*pos_t0)
set_dynamic_actors_visibility(time0)
renderer_window.AddRenderer(renderer)
interactor_renderer.SetRenderWindow(renderer_window)
interactor_renderer.GetInteractorStyle(
).SetCurrentStyleToTrackballCamera()
# http://www.itk.org/Wiki/VTK/Depth_Peeling ?
# Use a render window with alpha bits (as initial value is 0 (false) ):
renderer_window.SetAlphaBitPlanes(1)
# Force to not pick a framebuffer with a multisample buffer ( as initial
# value is 8):
renderer_window.SetMultiSamples(0)
# Choose to use depth peeling (if supported) (initial value is 0
# (false) )
renderer.SetUseDepthPeeling(1)
# Set depth peeling parameters.
renderer.SetMaximumNumberOfPeels(100)
# Set the occlusion ratio (initial value is 0.0, exact image)
renderer.SetOcclusionRatio(0.1)
# Set the initial camera position and orientation if specified
if initial_camera[0] is not None:
renderer.GetActiveCamera().SetPosition(*initial_camera[0])
if initial_camera[1] is not None:
renderer.GetActiveCamera().SetFocalPoint(*initial_camera[1])
if initial_camera[2] is not None:
renderer.GetActiveCamera().SetViewUp(*initial_camera[2])
if initial_camera[3] is not None:
renderer.GetActiveCamera().ParallelProjectionOn()
renderer.GetActiveCamera().SetParallelScale(initial_camera[3])
# callback maker for scale manipulation
def make_scale_observer(glyphs):
def scale_observer(obj, event):
slider_repres = obj.GetRepresentation()
scale_at_pos = slider_repres.GetValue()
for glyph in glyphs:
for k in glyph:
glyph[k].SetScaleFactor(
scale_at_pos * glyph[k]._scale_fact)
return scale_observer
# callback maker for time scale manipulation
def make_time_scale_observer(time_slider_repres, time_observer):
delta_time = max_time - min_time
def time_scale_observer(obj, event):
slider_repres = obj.GetRepresentation()
time_scale_at_pos = 1. - slider_repres.GetValue()
current_time = time_observer._time
shift = (current_time - min_time) / delta_time
xmin_time = min_time + time_scale_at_pos / 2. * delta_time
xmax_time = max_time - time_scale_at_pos / 2. * delta_time
xdelta_time = xmax_time - xmin_time
new_mintime = max(min_time, current_time - xdelta_time)
new_maxtime = min(max_time, current_time + xdelta_time)
time_slider_repres.SetMinimumValue(new_mintime)
time_slider_repres.SetMaximumValue(new_maxtime)
return time_scale_observer
# make a slider widget and its representation
def make_slider(title, observer, interactor,
startvalue, minvalue, maxvalue, cx1, cy1, cx2, cy2):
slider_repres = vtk.vtkSliderRepresentation2D()
slider_repres.SetMinimumValue(
minvalue - (maxvalue - minvalue) / 100)
slider_repres.SetMaximumValue(
maxvalue + (maxvalue - minvalue) / 100)
slider_repres.SetValue(startvalue)
slider_repres.SetTitleText(title)
slider_repres.GetPoint1Coordinate().\
SetCoordinateSystemToNormalizedDisplay()
slider_repres.GetPoint1Coordinate().SetValue(cx1, cy1)
slider_repres.GetPoint2Coordinate().\
SetCoordinateSystemToNormalizedDisplay()
slider_repres.GetPoint2Coordinate().SetValue(cx2, cy2)
slider_repres.SetSliderLength(0.02)
slider_repres.SetSliderWidth(0.03)
slider_repres.SetEndCapLength(0.01)
slider_repres.SetEndCapWidth(0.03)
slider_repres.SetTubeWidth(0.005)
slider_repres.SetLabelFormat('%f')
slider_repres.SetTitleHeight(0.02)
slider_repres.SetLabelHeight(0.02)
slider_widget = vtk.vtkSliderWidget()
slider_widget.SetInteractor(interactor)
slider_widget.SetRepresentation(slider_repres)
slider_widget.KeyPressActivationOff()
slider_widget.SetAnimationModeToAnimate()
slider_widget.SetEnabled(True)
slider_widget.AddObserver('InteractionEvent', observer)
return slider_widget, slider_repres
image_maker = vtk.vtkWindowToImageFilter()
image_maker.SetInput(renderer_window)
recorder = vtk.vtkOggTheoraWriter()
recorder.SetQuality(2)
recorder.SetRate(frames_per_second)
recorder.SetFileName(os.path.splitext(io_filename)[0]+'.avi')
recorder.SetInputConnection(image_maker.GetOutputPort())
writer = vtk.vtkPNGWriter()
writer.SetInputConnection(image_maker.GetOutputPort())
class InputObserver():
def __init__(self, times=None, slider_repres=None):
self._opacity = 1.0
self._current_id = vtk.vtkIdTypeArray()
self._renderer = renderer
self._renderer_window = renderer_window
self._image_counter = 0
self._view_cycle = -1
self._recording = False
self._times = None
if times is None or len(times)==0:
return
self._times = times
self._stimes = set(times)
self._time_step = (max(self._stimes) - min(self._stimes)) \
/ len(self._stimes)
self._time = min(times)
if slider_repres is None:
return
self._slider_repres = slider_repres
def update(self):
global cf_prov
if self._times is None:
renderer_window.Render()
return
index = bisect.bisect_left(self._times, self._time)
index = max(0, index)
index = min(index, len(self._times) - 1)
if cf_prov is not None:
cf_prov._time = self._times[index]
cf_prov.xmethod()
# contact_posa.Update()
# contact_posb.Update()
# contact_pos_force.Update()
# arrow_glyph.Update()
# gmapper.Update()
# set_positionv(spos_data[:, 1:9])
set_dynamic_actors_visibility(self._times[index])
id_t = numpy.where(pos_data[:, 0] == self._times[index])
set_position(*pos_data[id_t, :])
self._slider_repres.SetValue(self._time)
self._current_id.SetNumberOfValues(1)
self._current_id.SetValue(0, index)
self._iter_plot.SetSelection(self._current_id)
self._prec_plot.SetSelection(self._current_id)
renderer_window.Render()
def object_pos(self, id_):
index = bisect.bisect_left(self._times, self._time)
index = max(0, index)
index = min(index, len(self._times) - 1)
id_t = numpy.where(pos_data[:, 0] == self._times[index])
return (pos_data[id_t[0][id_], 2], pos_data[id_t[0][id_], 3], pos_data[id_t[0][id_], 4])
def set_opacity(self):
for instance, actors in dynamic_actors.items():
for actor in actors:
actor.GetProperty().SetOpacity(self._opacity)
def key(self, obj, event):
global cf_prov
key = obj.GetKeySym()
print('key', key)
if key == 'r':
spos_data, dpos_data, dom_data, cf_data, solv_data = load()
if not cf_disable:
cf_prov = CFprov(cf_data, dom_data)
times = list(set(dpos_data[:, 0]))
times.sort()
if len(spos_data) > 0:
instances = set(dpos_data[:, 1]).union(
set(spos_data[:, 1]))
else:
instances = set(dpos_data[:, 1])
if cf_prov is not None:
cf_prov._time = min(times[:])
cf_prov.xmethod()
for mu in cf_prov._mu_coefs:
contact_posa[mu].SetInputData(cf_prov._output)
contact_posa[mu].Update()
contact_posb[mu].SetInputData(cf_prov._output)
contact_posb[mu].Update()
contact_pos_force[mu].Update()
contact_pos_norm[mu].Update()
id_t0 = numpy.where(
dpos_data[:, 0] == time0)
pos_data = dpos_data[:]
min_time = times[0]
set_dynamic_actors_visibility(time0)
max_time = times[len(times) - 1]
self._slider_repres.SetMinimumValue(min_time)
self._slider_repres.SetMaximumValue(max_time)
self.update()
if key == 'p':
self._image_counter += 1
image_maker.Update()
writer.SetFileName(
'vview-{0}.png'.format(self._image_counter))
writer.Write()
if key == 'Up':
self._time_step = self._time_step * 2.
self._time += self._time_step
if key == 'Down':
self._time_step = self._time_step / 2.
self._time -= self._time_step
if key == 'Left':
self._time -= self._time_step
if key == 'Right':
self._time += self._time_step
if key == 't':
self._opacity -= .1
self.set_opacity()
if key == 'T':
self._opacity += .1
self.set_opacity()
if key == 'c':
print('camera position:', self._renderer.GetActiveCamera().GetPosition())
print('camera focal point', self._renderer.GetActiveCamera().GetFocalPoint())
print('camera clipping plane', self._renderer.GetActiveCamera().GetClippingRange())
print('camera up vector', self._renderer.GetActiveCamera().GetViewUp())
if self._renderer.GetActiveCamera().GetParallelProjection() != 0:
print('camera parallel scale', self._renderer.GetActiveCamera().GetParallelScale())
if key == 'o':
self._renderer.GetActiveCamera().SetParallelProjection(
1 - self._renderer.GetActiveCamera().GetParallelProjection())
if key == 'v':
# Cycle through some useful views
dist = norm(self._renderer.GetActiveCamera().GetPosition())
# dist2 = norm([numpy.sqrt(dist**2)/3]*2)
d3 = norm([numpy.sqrt(dist**2) / 3] * 3)
self._view_cycle += 1
if self._view_cycle == 0:
print('Left')
self._renderer.GetActiveCamera().SetPosition(
dist, 0, 0)
self._renderer.GetActiveCamera().SetFocalPoint(0, 0, 0)
self._renderer.GetActiveCamera().SetViewUp(0, 0, 1)
elif self._view_cycle == 1:
print('Right')
self._renderer.GetActiveCamera().SetPosition(
0, dist, 0)
self._renderer.GetActiveCamera().SetFocalPoint(0, 0, 0)
self._renderer.GetActiveCamera().SetViewUp(0, 0, 1)
elif self._view_cycle == 2:
print('Top')
self._renderer.GetActiveCamera().SetPosition(
0, 0, dist)
self._renderer.GetActiveCamera().SetFocalPoint(0, 0, 0)
self._renderer.GetActiveCamera().SetViewUp(1, 0, 0)
else: # Corner
print('Corner')
self._renderer.GetActiveCamera().SetPosition(
d3, d3, d3)
self._renderer.GetActiveCamera().SetFocalPoint(0, 0, 0)
self._renderer.GetActiveCamera().SetViewUp(
-1, -1, 1)
self._view_cycle = -1
self._renderer.ResetCameraClippingRange()
if key == 's':
if not self._recording:
recorder.Start()
self._recording = True
if key == 'e':
if self._recording:
self._recording = False
recorder.End()
if key == 'C':
this_view.action(self)
self.update()
def time(self, obj, event):
slider_repres = obj.GetRepresentation()
self._time = slider_repres.GetValue()
self.update()
# observer on 2D chart
def iter_plot_observer(self, obj, event):
if self._iter_plot.GetSelection() is not None:
# just one selection at the moment!
if self._iter_plot.GetSelection().GetMaxId() >= 0:
self._time = self._times[
self._iter_plot.GetSelection().GetValue(0)]
# -> recompute index ...
self.update()
def prec_plot_observer(self, obj, event):
if self._prec_plot.GetSelection() is not None:
# just one selection at the moment!
if self._prec_plot.GetSelection().GetMaxId() >= 0:
self._time = self._times[
self._prec_plot.GetSelection().GetValue(0)]
# -> recompute index ...
self.update()
def recorder_observer(self, obj, event):
if self._recording:
if advance_by_time is not None:
self._time += advance_by_time
slwsc.SetEnabled(False) # Scale slider
xslwsc.SetEnabled(False) # Time scale slider
# slider_widget.SetEnabled(False) # Time slider
# widget.SetEnabled(False) # Axis widget
self.update()
image_maker.Modified()
recorder.Write()
if advance_by_time is not None:
slwsc.SetEnabled(True)
xslwsc.SetEnabled(True)
# slider_widget.SetEnabled(True)
# widget.SetEnabled(True) # Axis widget
if self._time >= max(self._times):
self._recording = False
recorder.End()
if len(times) > 0:
slider_repres = vtk.vtkSliderRepresentation2D()
if min_time is None:
min_time = times[0]
if max_time is None:
max_time = times[len(times) - 1]
slider_repres.SetMinimumValue(min_time)
slider_repres.SetMaximumValue(max_time)
slider_repres.SetValue(min_time)
slider_repres.SetTitleText("time")
slider_repres.GetPoint1Coordinate(
).SetCoordinateSystemToNormalizedDisplay()
slider_repres.GetPoint1Coordinate().SetValue(0.4, 0.9)
slider_repres.GetPoint2Coordinate(
).SetCoordinateSystemToNormalizedDisplay()
slider_repres.GetPoint2Coordinate().SetValue(0.9, 0.9)
slider_repres.SetSliderLength(0.02)
slider_repres.SetSliderWidth(0.03)
slider_repres.SetEndCapLength(0.01)
slider_repres.SetEndCapWidth(0.03)
slider_repres.SetTubeWidth(0.005)
slider_repres.SetLabelFormat("%3.4lf")
slider_repres.SetTitleHeight(0.02)
slider_repres.SetLabelHeight(0.02)
slider_widget = vtk.vtkSliderWidget()
slider_widget.SetInteractor(interactor_renderer)
slider_widget.SetRepresentation(slider_repres)
slider_widget.KeyPressActivationOff()
slider_widget.SetAnimationModeToAnimate()
slider_widget.SetEnabled(True)
input_observer = InputObserver(times, slider_repres)
slider_widget.AddObserver("InteractionEvent", input_observer.time)
else:
input_observer = InputObserver()
interactor_renderer.AddObserver('KeyPressEvent', input_observer.key)
# Create a vtkLight, and set the light parameters.
light = vtk.vtkLight()
light.SetFocalPoint(0, 0, 0)
light.SetPosition(0, 0, 500)
# light.SetLightTypeToHeadlight()
renderer.AddLight(light)
hlight = vtk.vtkLight()
hlight.SetFocalPoint(0, 0, 0)
# hlight.SetPosition(0, 0, 500)
hlight.SetLightTypeToHeadlight()
renderer.AddLight(hlight)
# Warning! numpy support offer a view on numpy array
# the numpy array must not be garbage collected!
nxtime = solv_data[:, 0]
nxiters = solv_data[:, 1]
nprecs = solv_data[:, 2]
xtime = numpy_support.numpy_to_vtk(nxtime)
xiters = numpy_support.numpy_to_vtk(nxiters)
xprecs = numpy_support.numpy_to_vtk(nprecs)
xtime.SetName('time')
xiters.SetName('iterations')
xprecs.SetName('precisions')
table = vtk.vtkTable()
table.AddColumn(xtime)
table.AddColumn(xiters)
table.AddColumn(xprecs)
# table.Dump()
tview_iter = vtk.vtkContextView()
tview_prec = vtk.vtkContextView()
chart_iter = vtk.vtkChartXY()
chart_prec = vtk.vtkChartXY()
tview_iter.GetScene().AddItem(chart_iter)
tview_prec.GetScene().AddItem(chart_prec)
iter_plot = chart_iter.AddPlot(vtk.vtkChart.LINE)
iter_plot.SetLabel('Solver iterations')
iter_plot.GetXAxis().SetTitle('time')
iter_plot.GetYAxis().SetTitle('iterations')
prec_plot = chart_prec.AddPlot(vtk.vtkChart.LINE)
prec_plot.SetLabel('Solver precisions')
prec_plot.GetXAxis().SetTitle('time')
prec_plot.GetYAxis().SetTitle('precisions')
add_compatiblity_methods(iter_plot)
add_compatiblity_methods(prec_plot)
iter_plot.SetInputData(table, 'time', 'iterations')
prec_plot.SetInputData(table, 'time', 'precisions')
iter_plot.SetWidth(5.0)
prec_plot.SetWidth(5.0)
iter_plot.SetColor(0, 255, 0, 255)
prec_plot.SetColor(0, 255, 0, 255)
input_observer._iter_plot = iter_plot
input_observer._prec_plot = prec_plot
input_observer._iter_plot_view = tview_iter
input_observer._prec_plot_view = tview_prec
tview_iter.GetInteractor().AddObserver('RightButtonReleaseEvent',
input_observer.iter_plot_observer)
tview_prec.GetInteractor().AddObserver('RightButtonReleaseEvent',
input_observer.prec_plot_observer)
# screen_size = renderer_window.GetScreenSize()
renderer_window.SetSize(*config['window_size'])
renderer_window.SetWindowName('vview: ' + io_filename)
tview_iter.GetRenderer().GetRenderWindow().SetSize(600, 200)
tview_prec.GetRenderer().GetRenderWindow().SetSize(600, 200)
tview_iter.GetInteractor().Initialize()
# tview_iter.GetInteractor().Start()
tview_iter.GetRenderer().SetBackground(.9, .9, .9)
tview_iter.GetRenderer().Render()
tview_prec.GetInteractor().Initialize()
# tview_prec.GetInteractor().Start()
tview_prec.GetRenderer().SetBackground(.9, .9, .9)
tview_prec.GetRenderer().Render()
if io.contact_forces_data().shape[0] > 0:
slwsc, slrepsc = make_slider('Scale',
make_scale_observer([cone_glyph, cylinder_glyph, sphere_glypha, sphere_glyphb, arrow_glyph]
),
interactor_renderer,
cf_scale_factor, cf_scale_factor -
cf_scale_factor / 2,
cf_scale_factor + cf_scale_factor / 2,
0.01, 0.01, 0.01, 0.7)
if len(times) > 0:
xslwsc, xslrepsc = make_slider('Time scale',
make_time_scale_observer(
slider_repres, input_observer),
interactor_renderer,
time_scale_factor, time_scale_factor -
time_scale_factor / 2,
time_scale_factor + | |
code.append(indent(1) + var_prefix + 'bus_t' + str(idx) + '* ' + res_name + ',\n')
# idx += 1
# code.append(indent(1) + 'bool init,\n')
# code.append(indent(1) + 'unsigned int FILTER_S\n')
# code.append('){\n')
code.append('#pragma HLS DATAFLOW\n\n')
code.append(indent(1) + '// FIFOs\n')
idx = 0
for op_name in desp['OP_NAME']:
for sa_rows in range(desp['SA_ROWS'] + 1):
for sa_cols in range(desp['SA_COLS'] + 1):
code.append(indent(1) + 'stream<' + var_prefix + 'Data' + str(idx) + 'PEChannelType> fifo' + str(idx) + \
'_feed' + str(sa_rows) + '_' + str(sa_cols) + ';\n')
# depth = max(2, desp['FC_GROUP_FACTOR'][idx] + 1)
if desp['APP_NAME'] == 'nw':
depth = max(2, desp['FC_GROUP_FACTOR'][idx] * 6 + 1)
else:
depth = max(2, desp['FC_GROUP_FACTOR'][idx] * 1 + 1)
code.append('#pragma HLS STREAM variable=fifo' + str(idx) + '_feed' + str(sa_rows) + '_' + str(sa_cols) + ' depth=' + str(depth) + '\n')
idx += 1
for res_name in desp['RES_NAME']:
for sa_rows in range(desp['SA_ROWS'] + 1):
for sa_cols in range(desp['SA_COLS'] + 1):
code.append(indent(1) + 'stream<' + var_prefix + 'Data' + str(idx) + 'PEChannelType> fifo' + str(idx) + \
'_collect' + str(sa_rows) + '_' + str(sa_cols) + ';\n')
if desp['APP_NAME'] == 'nw':
depth = max(2, desp['FC_GROUP_FACTOR'][idx] * 6 + 1)
else:
depth = max(2, desp['FC_GROUP_FACTOR'][idx] * 1 + 1)
code.append('#pragma HLS STREAM variable=fifo' + str(idx) + '_collect' + str(sa_rows) + '_' + str(sa_cols) + \
' depth=' + str(depth) + '\n')
idx += 1
idx = 0
for op_name in desp['OP_NAME']:
feed_num = desp['OP_ENGINE_NUM'][idx] + 1
for feed_id in range(feed_num):
code.append(indent(1) + 'stream<' + var_prefix + 'Data' + str(idx) + 'TransferChannelType> fifo' + str(idx) + \
'_transfer' + str(feed_id) + ';\n')
code.append('#pragma HLS STREAM variable=fifo' + str(idx) + '_transfer' + str(feed_id) + ' depth=' + str(desp['CHANNEL_DEPTH']) + '\n')
idx += 1
for res_name in desp['RES_NAME']:
feed_num = desp['RES_ENGINE_NUM'][idx - len(desp['OP_NAME'])] + 1
for feed_id in range(feed_num):
code.append(indent(1) + 'stream<' + var_prefix + 'Data' + str(idx) + 'TransferChannelType> fifo' + str(idx) + '_transfer' + str(feed_id) + ';\n')
code.append('#pragma HLS STREAM variable=fifo' + str(idx) + '_transfer' + str(feed_id) + ' depth=' + str(desp['CHANNEL_DEPTH']) + '\n')
idx += 1
# shim fifos
idx = 0
for op_name in desp['OP_NAME']:
code.append(indent(1) + 'stream<ap_uint<%sDATA%d_WIDTH * %sDATA%d_FC_SIMD_FACTOR> > fifo%d_shim;\n' % (var_prefix, idx, var_prefix, idx, idx))
code.append('#pragma HLS STREAM variable=fifo%d_shim depth=2\n' % (idx))
idx += 1
for res_name in desp['RES_NAME']:
code.append(indent(1) + 'stream<ap_uint<%sDATA%d_WIDTH * %sDATA%d_FC_SIMD_FACTOR> > fifo%d_shim;\n' % (var_prefix, idx, var_prefix, idx, idx))
code.append('#pragma HLS STREAM variable=fifo%d_shim depth=2\n\n' % (idx))
idx += 1
# config fifos
code.append(indent(1) + 'stream<uint> fifo_DataFeed0Head_config_out0;\n')
code.append('#pragma HLS STREAM variable=fifo_DataFeed0Head_config_out0 depth=16\n')
code.append(indent(1) + 'stream<uint> fifo_DataFeed0Head_config_out1;\n')
code.append('#pragma HLS STREAM variable=fifo_DataFeed0Head_config_out1 depth=16\n')
code.append(indent(1) + 'stream<uint> fifo_DataFeed1Head_config_out0;\n')
code.append('#pragma HLS STREAM variable=fifo_DataFeed1Head_config_out0 depth=16\n\n')
idx = 0
for op_name in desp['OP_NAME']:
feed_num = desp['OP_ENGINE_NUM'][idx]
if idx == 0:
for feed_id in range(feed_num):
if feed_id < feed_num - 1:
code.append(indent(1) + 'stream<uint> fifo_DataFeed%dEngine%d_config_out0;\n' % (idx, feed_id))
code.append(indent(1) + 'stream<uint> fifo_DataFeed%dEngine%d_config_out1;\n' % (idx, feed_id))
code.append('#pragma HLS STREAM variable=fifo_DataFeed%dEngine%d_config_out0 depth=16\n' % (idx, feed_id))
code.append('#pragma HLS STREAM variable=fifo_DataFeed%dEngine%d_config_out1 depth=16\n' % (idx, feed_id))
else:
code.append(indent(1) + 'stream<uint> fifo_DataFeed%dEngine%d_config_out1;\n' % (idx, feed_id))
code.append('#pragma HLS STREAM variable=fifo_DataFeed%dEngine%d_config_out1 depth=16\n\n' % (idx, feed_id))
elif idx == 1:
for feed_id in range(feed_num):
if feed_id < feed_num - 1:
code.append(indent(1) + 'stream<uint> fifo_DataFeed%dEngine%d_config_out0;\n' % (idx, feed_id))
code.append('#pragma HLS STREAM variable=fifo_DataFeed%dEngine%d_config_out0 depth=16\n' % (idx, feed_id))
idx += 1
code.append('\n')
for res_name in desp['RES_NAME']:
feed_num = desp['RES_ENGINE_NUM'][idx - len(desp['OP_NAME'])]
for feed_id in range(feed_num):
code.append(indent(1) + 'stream<uint> fifo_DataCollect%dEngine%d_config_out;\n' % (idx, feed_id))
code.append('#pragma HLS STREAM variable=fifo_DataCollect%dEngine%d_config_out depth=16\n' % (idx, feed_id))
idx += 1
code.append('\n')
for row in range(desp['SA_ROWS']):
for col in range(desp['SA_COLS']):
code.append(indent(1) + 'stream<uint> fifo_PE%d_%d_op0_config_out;\n' % (row, col))
code.append(indent(1) + 'stream<uint> fifo_PE%d_%d_op1_config_out;\n' % (row, col))
code.append(indent(1) + 'stream<uint> fifo_PE%d_%d_compute_config_out;\n' % (row, col))
code.append(indent(1) + 'stream<uint> fifo_PE%d_%d_res_config_out;\n' % (row, col))
code.append('#pragma HLS STREAM variable=fifo_PE%d_%d_op0_config_out depth=2\n' % (row, col))
code.append('#pragma HLS STREAM variable=fifo_PE%d_%d_op1_config_out depth=2\n' % (row, col))
code.append('#pragma HLS STREAM variable=fifo_PE%d_%d_compute_config_out depth=2\n' % (row, col))
code.append('#pragma HLS STREAM variable=fifo_PE%d_%d_res_config_out depth=2\n' % (row, col))
code.append('\n')
for row in range(desp['SA_ROWS']):
for col in range(desp['SA_COLS']):
idx = 0
for op_name in desp['OP_NAME']:
code.append(indent(1) + 'stream<' + var_prefix + 'Data' + str(idx) + 'PEChannelType> PE' + \
str(row) + '_' + str(col) + '_fifo' + str(idx) + '_local;\n')
code.append('#pragma HLS STREAM variable=PE' + str(row) + '_' + str(col) + '_fifo' + str(idx) + \
'_local depth=' + str(desp['CHANNEL_DEPTH']) + '\n')
idx += 1
for res_name in desp['RES_NAME']:
code.append(indent(1) + 'stream<' + var_prefix + 'Data' + str(idx) + 'PEChannelType> PE' + \
str(row) + '_' + str(col) + '_fifo' + str(idx) + '_local;\n')
code.append('#pragma HLS STREAM variable=PE' + str(row) + '_' + str(col) + '_fifo' + str(idx) + \
'_local depth=' + str(desp['CHANNEL_DEPTH']) + '\n')
idx += 1
if desp['APP_NAME'] == 'nw':
inter_idx = 0
for inter_name in desp['INTER_NAME']:
code.append(indent(1) + 'stream<' + desp['INTER_DATA_TYPE'][inter_idx] + '> PE' + str(row) + '_' + \
str(col) + '_fifo_' + inter_name + '_local;\n')
code.append('#pragma HLS STREAM variable=PE' + str(row) + '_' + str(col) + '_fifo_' + inter_name + \
'_local depth=' + str(desp['CHANNEL_DEPTH']) + '\n')
inter_idx += 1
code.append('\n')
# code.append(indent(1) + 'stream<%sConfigInst> fifo_kernel_config_in;\n' % (var_prefix))
# code.append('#pragma HLS STREAM variable=fifo_kernel_config_in depth=2\n')
# code.append(indent(1) + 'stream<%sConfigInst> fifo_kernel_config_out;\n' % (var_prefix))
# code.append('#pragma HLS STREAM variable=fifo_kernel_config_out depth=2\n\n')
code.append(indent(1) + '// modules\n')
idx = 0
for op_name in desp['OP_NAME']:
# feed_shim
# code.append(indent(1) + var_prefix + 'DataFeed' + str(idx) + 'Head_Shim(\n')
# code.append(indent(2) + op_name + ', fifo' + str(idx) + '_shim,\n')
# if idx == 0:
# code.append(indent(2) + '%sIN_NUM, %sOUT_NUM, %sIN_NUM_T, %sOUT_NUM_T, %sIN_IMG_H, %sIN_IMG_W, %sOUT_IMG_H, %sOUT_IMG_W, %sOUT_IMG_H_T, %sOUT_IMG_W_T, FILTER_S, %sSTRIDE, %sLAYER_BATCH,\n' % (var_prefix, var_prefix, var_prefix, var_prefix, var_prefix, var_prefix, var_prefix, var_prefix, var_prefix, var_prefix, var_prefix, var_prefix))
# code.append(indent(2) + 'fifo_kernel_config_in\n')
# else:
# code.append(indent(2) + '%sIN_NUM, %sOUT_NUM, %sIN_NUM_T, %sOUT_NUM_T, %sIN_IMG_H, %sIN_IMG_W, %sOUT_IMG_H, %sOUT_IMG_W, %sOUT_IMG_H_T, %sOUT_IMG_W_T, FILTER_S, %sSTRIDE, %sLAYER_BATCH\n' % (var_prefix, var_prefix, var_prefix, var_prefix, var_prefix, var_prefix, var_prefix, var_prefix, var_prefix, var_prefix, var_prefix, var_prefix))
# code.append(indent(1) + ');\n\n')
# feed_head
feed_num = desp['OP_ENGINE_NUM'][idx]
code.append(indent(1) + var_prefix + 'DataFeed' + str(idx) + 'Head(\n')
# code.append(indent(2) + 'fifo%d_shim,\n' % (idx))
if idx == 0:
code.append(indent(2) + 'fifo_cin,\n')
else:
code.append(indent(2) + 'fifo_weight,\n')
for feed_group in range(desp['FC_SPLIT_FACTOR'][idx]):
feed_id = feed_group * (desp['OP_ENGINE_NUM'][idx] / desp['FC_SPLIT_FACTOR'][idx])
feed_id = int(feed_id)
if feed_group < desp['FC_SPLIT_FACTOR'][idx] - 1:
code.append(indent(2) + 'fifo' + str(idx) + '_transfer' + str(feed_id) + ',\n')
else:
code.append(indent(2) + 'fifo' + str(idx) + '_transfer' + str(feed_id) + ',\n')
if idx == 0:
# code.append(indent(2) + '%sIN_NUM, %sOUT_NUM, %sIN_IMG_H, %sIN_IMG_W, %sOUT_IMG_H, %sOUT_IMG_W, FILTER_S,\n' % (var_prefix, var_prefix, var_prefix, var_prefix, var_prefix, var_prefix))
code.append(indent(2) + 'fifo_kernel_config_in,\n')
code.append(indent(2) + 'fifo_kernel_config_out,\n')
if idx == 0:
code.append(indent(2) + 'fifo_DataFeed%dHead_config_out0, fifo_DataFeed%dHead_config_out1\n' % (idx, idx))
else:
code.append(indent(2) + 'fifo_DataFeed0Head_config_out1, fifo_DataFeed1Head_config_out0\n')
code.append(indent(1) + ');\n\n')
# feed engine
for feed_group in range(desp['FC_SPLIT_FACTOR'][idx]):
for local_feed_id in range(int(feed_num / desp['FC_SPLIT_FACTOR'][idx])):
feed_id = local_feed_id + feed_group * (feed_num / desp['FC_SPLIT_FACTOR'][idx])
feed_id_nxt = feed_id + 1
feed_id = int(feed_id)
feed_id_nxt = int(feed_id_nxt)
if local_feed_id < feed_num / desp['FC_SPLIT_FACTOR'][idx] - 1:
code.append(indent(1) + var_prefix + 'DataFeed' + str(idx) + 'Engine0_wrapper(\n')
code.append(indent(2) + 'fifo' + str(idx) + '_transfer' + str(feed_id) + ',\n')
code.append(indent(2) + 'fifo' + str(idx) + '_transfer' + str(feed_id_nxt) + ',\n')
for gs in range(desp['FC_GROUP_FACTOR'][idx]):
feed_pe_id = feed_id * desp['FC_GROUP_FACTOR'][idx] + gs
feed_pe_id = int(feed_pe_id)
ch_dir = desp['OP_CHANNEL_DIR'][idx]
if ch_dir == 'D':
code.append(indent(2) + 'fifo' + str(idx) + '_feed0_' + str(feed_pe_id) + ',\n')
elif ch_dir == 'U':
row_idx = desp['SA_ROWS'] - 1
code.append(indent(2) + 'fifo' + str(idx) + '_feed' + str(row_idx) + '_' + str(feed_pe_id) + ',\n')
elif ch_dir == 'R':
code.append(indent(2) + 'fifo' + str(idx) + '_feed' + str(feed_pe_id) + '_0,\n')
elif ch_dir == 'L':
col_idx = desp['SA_COLS'] - 1
code.append(indent(2) + 'fifo' + str(idx) + '_feed' + str(feed_pe_id) + '_' + str(col_idx) + ',\n')
code.append(indent(2) + str(local_feed_id) + ',\n')
if idx == 0:
if feed_id == 0:
code.append(indent(2) + 'fifo_DataFeed%dHead_config_out0,\n' % (idx))
code.append(indent(2) + 'fifo_DataFeed%dEngine%d_config_out0,\n' % (idx, feed_id))
code.append(indent(2) + 'fifo_DataFeed%dEngine%d_config_out1\n' % (idx, feed_id))
else:
code.append(indent(2) + 'fifo_DataFeed%dEngine%d_config_out0,\n' % (idx, feed_id - 1))
code.append(indent(2) + 'fifo_DataFeed%dEngine%d_config_out0,\n' % (idx, feed_id))
code.append(indent(2) + 'fifo_DataFeed%dEngine%d_config_out1\n' % (idx, feed_id))
elif idx == 1:
if feed_id == 0:
code.append(indent(2) + 'fifo_DataFeed1Head_config_out0,\n')
code.append(indent(2) + 'fifo_DataFeed%dEngine%d_config_out0\n' % (idx, feed_id))
else:
code.append(indent(2) + 'fifo_DataFeed%dEngine%d_config_out0,\n' % (idx, | |
#!/usr/bin/env python
import sys, os, time, json, socket, select, random, subprocess, string, hashlib, bisect, atexit
VERSION = "0.6"
REPLICA_PROG = './3700kvstore'
NUM_CLIENTS = 8
#######################################################################################################
# Parses and validates config files for simulations
class Config:
def __init__(self, filename):
# load the json
conf = json.loads(open(filename).read())
# check for required fields
if 'lifetime' not in conf or 'replicas' not in conf or 'requests' not in conf:
raise AttributeError("Required field is missing from the config file")
# load the required fields and sanity check them
self.lifetime = int(conf['lifetime'])
if self.lifetime < 5:
raise ValueError("Simulation lifetime must be at least 5 seconds")
self.replicas = int(conf['replicas'])
if self.replicas < 3 or self.replicas > 21:
raise ValueError("Number of replicas must be at least 3 and at most 21")
self.requests = int(conf['requests'])
if self.requests < 0:
raise ValueError("Number of requests cannot be negative")
# initialize the random number generator
if 'seed' in conf: self.seed = conf['seed']
else: self.seed = None
random.seed(self.seed)
# load the default variables
self.mix = self.__get_default__(conf, 'mix', 0, 1, 0.8, "Read/Write mix must be between 0 and 1")
self.start_wait = self.__get_default__(conf, 'start_wait', 0, self.lifetime, 2.0,
"Start wait must be between 0 and %s" % (self.lifetime))
self.end_wait = self.__get_default__(conf, 'end_wait', 0, self.lifetime, 2.0,
"End wait must be between 0 and %s" % (self.lifetime))
self.drops = self.__get_default__(conf, 'drops', 0, 1, 0.0, "Drops must be between 0 and 1")
self.max_packets = self.__get_default__(conf, 'max_packets', self.requests, 900000,
20000, "max_packets must be between %i and %i" %
(self.requests, 900000))
if 'events' in conf: self.events = conf['events']
else: self.events = []
# sanity check the events
for event in self.events:
if event['type'] not in ['kill_leader', 'kill_non_leader', 'part_easy', 'part_hard', 'part_end']:
raise ValueError("Unknown event type: %s" % (event['type']))
if event['time'] < 0 or event['time'] > self.lifetime:
raise ValueError("Event time must be between 0 and %s" % (self.lifetime))
def __get_default__(self, conf, field, low, high, default, exception):
if field in conf:
temp = float(conf[field])
if temp < low or temp > high:
raise ValueError(exception)
else: temp = default
return temp
def dump(self):
print ('%8s %s\n' * 9) % ('Lifetime', self.lifetime, 'Replicas', self.replicas,
'Requests', self.requests, 'Seed', self.seed,
'Mix', self.mix, 'Start Wait', self.start_wait,
'End Wait', self.end_wait, 'Drops', self.drops,
'Max Packets', self.max_packets),
for event in self.events:
print '%8s %15s %s' % ('Event', event['type'], event['time'])
#######################################################################################################
class Stats:
def __init__(self):
self.total_msgs = 0
self.total_drops = 0
self.total_get = 0
self.total_put = 0
self.failed_get = 0
self.failed_put = 0
self.incorrect = 0
self.duplicates = 0
self.unanswered_get = 0
self.unanswered_put = 0
self.redirects = 0
self.latencies = []
self.died = 0
self.killed = 0
self.mean_latency = 0.0
self.median_latency = 0.0
self.leaders = []
def add_leader(self, ldr):
if len(self.leaders) == 0 or self.leaders[-1] != ldr:
self.leaders.append(ldr)
def finalize(self):
if len(self.latencies) > 0:
self.latencies.sort()
self.mean_latency = float(sum(self.latencies))/len(self.latencies)
self.median_latency = self.latencies[len(self.latencies)/2]
def dump(self):
print 'Simulation finished.'
print 'Leaders:', ' '.join(self.leaders)
print 'Replicas that died/were killed: %i/%i' % (self.died, self.killed)
print 'Total messages sent:', self.total_msgs
print 'Total messages dropped:', self.total_drops
print 'Total client get()/put() requests: %i/%i' % (self.total_get, self.total_put)
print 'Total duplicate responses:', self.duplicates
print 'Total unanswered get()/put() requests: %i/%i' % (self.unanswered_get, self.unanswered_put)
print 'Total redirects:', self.redirects
print 'Total get()/put() failures: %i/%i' % (self.failed_get, self.failed_put)
print 'Total get() with incorrect response:', self.incorrect
if len(self.latencies) > 0:
print 'Mean/Median query latency: %fsec/%fsec' % (float(sum(self.latencies))/len(self.latencies),
self.latencies[len(self.latencies)/2])
#######################################################################################################
class Client:
class Request:
def __init__(self, get, key, val=None):
self.get = get
self.key = key
self.val = val
self.ts = time.time()
def __init__(self, simulator, cid):
self.reqs = {}
self.items = {}
self.completed = set()
self.sim = simulator
self.cid = cid
self.leader = 'FFFF'
def forget(self):
self.leader = 'FFFF'
def __get_rand_str__(self, size=16, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def __get_destination__(self):
if self.leader == 'FFFF' or self.leader not in self.sim.living_rids:
self.leader = 'FFFF'
return random.choice(list(self.sim.living_rids))
return self.leader
def __create_get__(self, key):
self.sim.stats.total_get += 1
mid = self.__get_rand_str__()
self.reqs[mid] = self.Request(True, key)
dst = self.__get_destination__()
return {'src': self.cid, 'dst': dst, 'leader': self.leader,
'type': 'get', 'MID': mid, 'key': key}
def __create_put__(self, key, value):
self.sim.stats.total_put += 1
mid = self.__get_rand_str__()
self.reqs[mid] = self.Request(False, key, value)
dst = self.__get_destination__()
return {'src': self.cid, 'dst': dst, 'leader': self.leader,
'type': 'put', 'MID': mid, 'key': key, 'value': value}
def finalize(self):
for req in self.reqs.itervalues():
if req.get: self.sim.stats.unanswered_get += 1
else: self.sim.stats.unanswered_put += 1
def create_req(self, get=True):
# create a get message, if possible
if get and len(self.items) > 0:
return self.__create_get__(random.choice(self.items.keys()))
# decide to add a new key, or update an existing key
if len(self.items) == 0 or random.random() > 0.5:
k = self.__get_rand_str__(size=32)
v = hashlib.md5(k).hexdigest()
else:
k = random.choice(self.items.keys())
v = hashlib.md5(self.items[k]).hexdigest()
return self.__create_put__(k, v)
def deliver(self, raw_msg, msg):
# validate the message
if 'MID' not in msg:
print "*** Simulator Error - Message missing mid field: %s" % (raw_msg)
self.sim.stats.incorrect += 1
return None
if msg['type'] not in ['ok', 'fail', 'redirect']:
print "*** Simulator Error - Unknown message type sent to client: %s" % (raw_msg)
self.sim.stats.incorrect += 1
return None
mid = msg['MID']
# is this a duplicate?
if mid in self.completed:
self.sim.stats.duplicates += 1
return None
# is this a message that I'm expecting?
try:
req = self.reqs[mid]
except:
print "*** Simulator Error - client received an unexpected MID: %s" % (raw_msg)
self.sim.stats.incorrect += 1
return None
del self.reqs[mid]
self.leader = msg['leader']
self.sim.stats.latencies.append(time.time() - req.ts)
# if this is a redirect or a fail, try again
if msg['type'] in ['redirect', 'fail']:
if req.get:
if msg['type'] == 'fail': self.sim.stats.failed_get += 1
self.sim.stats.redirects += 1
return self.__create_get__(req.key)
if msg['type'] == 'fail': self.sim.stats.failed_put += 1
self.sim.stats.redirects += 1
return self.__create_put__(req.key, req.val)
# msg type must be ok, mark it as completed
self.completed.add(mid)
if req.get:
if 'value' not in msg:
print "*** Simulator Error - get() response missing the value of the key: %s" % (raw_msg)
self.sim.stats.incorrect += 1
if self.items[req.key] != msg['value']:
print "*** Simulator Error - client received an incorrect value for a key: %s" % (raw_msg)
self.sim.stats.incorrect += 1
else:
self.items[req.key] = req.val
return None
#######################################################################################################
# Represents a replica, the associated process, and it's sockets
class Replica:
def __init__(self, rid):
self.rid = rid
self.client_sock = None
self.alive = False
# try and delete the old domain socket, just in case
try: os.unlink(rid)
except: pass
# create the listen socket for this replica
self.listen_sock = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
self.listen_sock.bind(rid)
self.listen_sock.listen(1)
def run(self, rids):
args = [REPLICA_PROG, self.rid]
args.extend(rids - set([self.rid]))
self.proc = subprocess.Popen(args)
self.alive = True
def shutdown(self):
if self.alive:
self.alive = False
if self.client_sock: self.client_sock.close()
self.listen_sock.close()
self.listen_sock = None
self.client_sock = None
self.proc.kill()
self.proc.wait()
os.unlink(self.rid)
def deliver(self, raw_msg):
if self.alive:
try:
self.client_sock.send(raw_msg)
return True
except:
print '*** Simulator Error - Unable to send to replica'
self.shutdown()
return False
#######################################################################################################
# Represents and executes the entire simulation
class Simulation:
def __init__(self, filename):
self.leader = 'FFFF'
self.events = []
# stats tracking
self.stats = Stats()
# virtual network partitions
self.partition = None
# Load the config file
self.conf = Config(filename)
#self.conf.dump()
# Create the clients
self.cids = set()
self.clients = {}
for i in xrange(self.conf.replicas + 16, self.conf.replicas + 16 + NUM_CLIENTS):
cid = ('%04x' % (i)).upper()
self.cids.add(cid)
self.clients[cid] = Client(self, cid)
# Create the sockets and the replicas
self.rids = set()
self.replicas = {}
for i in xrange(self.conf.replicas):
rid = ('%04x' % (i)).upper()
self.rids.add(rid)
self.replicas[rid] = Replica(rid)
self.living_rids = self.rids.copy()
def run(self):
for r in self.replicas.itervalues():
r.run(self.rids)
# initialize the clock and create all the get(), put(), and kill() events
clock = start = time.time()
self.__populate_event_queue__(clock)
# the main event loop
while clock - start < self.conf.lifetime and self.stats.total_msgs < self.conf.max_packets:
# populate the list of living sockets
sockets = []
listen_socks = set()
for r in self.replicas.itervalues():
if r.listen_sock:
sockets.append(r.listen_sock)
listen_socks.add(r.listen_sock)
if r.client_sock: sockets.append(r.client_sock)
ready = select.select(sockets, [], [], 0.1)[0]
for sock in ready:
# if this is a listen sock, accept the connection and map it | |
<filename>edacom.py
#!/usr/bin/env python
"""
Runs on one Raspberry Pi inside each of the beamformer control boxes, to send pointing commands to the eight
beamformers connected to that box.
On startup, it:
-Checks that the hostname (as reported by 'hostname -A') is either 'eda1com or 'eda2com', and exits if not.
-Uses the integer (1 or 2) in the hostname to determine whether this box is connected to the first
eight beamformers (0-8), or the second eight beamformers (9-F).
-Starts a Pyro4 daemon on port 19987 to listen for (and execute) remote procedure calls over the network.
On exit (eg, with a control-C or a 'kill' command), it:
-Stops the Pyro4 daemon
-Exits.
"""
import atexit
import logging
from logging import handlers
import optparse
import signal
import subprocess
import sys
import threading
import time
# noinspection PyUnresolvedReferences
import RPi.GPIO as GPIO
import astropy
import astropy.time
import astropy.units
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation
if sys.version_info.major == 2:
# noinspection PyUnresolvedReferences
STR_CLASS = basestring
else:
STR_CLASS = str
# set up the logging
LOGLEVEL_CONSOLE = logging.DEBUG # Logging level for console messages (INFO, DEBUG, ERROR, CRITICAL, etc)
LOGLEVEL_LOGFILE = logging.DEBUG # Logging level for logfile
LOGLEVEL_REMOTE = logging.INFO
LOGFILE = "/tmp/edacom.log"
class MWALogFormatter(logging.Formatter):
def format(self, record):
return "%s: time %10.6f - %s" % (record.levelname, time.time(), record.getMessage())
mwalf = MWALogFormatter()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
fh = handlers.RotatingFileHandler(LOGFILE, maxBytes=1000000000,
backupCount=5) # 1 Gb per file, max of five old log files
fh.setLevel(LOGLEVEL_LOGFILE)
fh.setFormatter(mwalf)
ch = logging.StreamHandler()
ch.setLevel(LOGLEVEL_CONSOLE)
ch.setFormatter(mwalf)
# rh = handlers.SysLogHandler(address=('mw-gw', 514))
# rh.setLevel(LOGLEVEL_REMOTE)
# rh.setFormatter(mwalf)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
# logger.addHandler(rh)
import Pyro4
# noinspection PyUnresolvedReferences
sys.excepthook = Pyro4.util.excepthook
Pyro4.config.DETAILED_TRACEBACK = True
Pyro4.config.SERIALIZERS_ACCEPTED.add('pickle')
import pyslave
import pointing
TILEID = 99 # MWA tile ID for the EDA station
SLAVEPORT = 19987
STRICT = False
ONLYBFs = None
# ONLYBFs = ['E']
CPOS = (0.0, 0.0, 0.0) # Offset from geometric centre, in metres, to use as delay centre for pointing calculations
# IO pin allocations as (txdata, txclock, rxdata) for each of the 8 RxDOC cards in this box, numbered 1-8
IOPINS = {1:(29, 16, 40), 2:(26, 15, 38), 3:(24, 13, 37), 4:(23, 12, 36), 5:(22, 11, 35), 6:(21, 10, 33), 7:(19, 8, 32),
8:(18, 7, 31)}
# Timeout for PyController comms to the PointingSlave instance
PS_TIMEOUT = 60
SIGNAL_HANDLERS = {}
CLEANUP_FUNCTION = None
MWAPOS = EarthLocation.from_geodetic(lon="116:40:14.93", lat="-26:42:11.95", height=377.8)
def init():
"""Initialise IO pins for pointing comms with all 8 beamformers
"""
GPIO.setmode(GPIO.BOARD) # Use board connector pin numbers to specify I/O pins
GPIO.setwarnings(False)
for i in range(1, 9):
txdata, txclock, rxdata = IOPINS[i]
GPIO.setup(rxdata, GPIO.IN)
GPIO.setup(txdata, GPIO.OUT)
GPIO.setup(txclock, GPIO.OUT)
def get_hostname():
"""Returns the hostname, with domain stripped off. Used to work out whether this Raspberry Pi controls MWA
beamformers 0-8 (eda1com.mwa128t.org) or beamformers 9-F (eda2com.mwa128t.org).
"""
if sys.version_info.major == 2:
fqdn = subprocess.check_output(['hostname', '-A'], shell=False)
else:
fqdn = subprocess.check_output(['hostname', '-A'], shell=False).decode('UTF-8')
return fqdn.split('.')[0]
def point(starttime=0, bfnum=0, outstring='', results=None, resultlock=None):
"""
Called with the start time of the next observation (a unix timestamp), the beamformer number to point, and
the string containing the delay bits to write.
Waits until the specified time, then sends the bit string to the beamformer. The results are written to the
given dictionary, using the lock supplied - this is because 'point' is called in parallel for all eight
beamformers, using the same results dictionary, so that all eight beamformers are pointed at the same instant.
:param starttime: start time in seconds past the unix epoch,
:param bfnum: beamformer output number (1-8),
:param outstring: bit-string to send,
:param results: dictionary to store (temp, flag) returned from the beamformer,
:param resultlock: lock object to avoid conflicts writing to the results dict
:return:
"""
now = time.time()
if now < starttime:
logger.debug("bf %d is sleeping for %5.2f seconds" % (bfnum, starttime - now))
time.sleep(starttime - now)
temp, flags = send_bitstring(bfnum=bfnum, outstring=outstring)
with resultlock:
results[bfnum] = (temp, flags)
logger.info("bf %d bitstring sent." % bfnum)
def calc_azel(ra=0.0, dec=0.0, calctime=None):
"""
Takes RA and DEC in degrees, and calculates Az/El of target at the specified time
:param ra: Right Ascension (J2000) in degrees
:param dec: Declination (J2000) in degrees
:param calctime: Time (as a unix time stamp) for the conversion, or None to calculate for the current time.
:return: A tuple of (azimuth, elevation) in degrees
"""
# noinspection PyUnresolvedReferences
coords = SkyCoord(ra=ra, dec=dec, equinox='J2000', unit=(astropy.units.deg, astropy.units.deg))
if calctime is None:
azeltime = Time.now()
else:
azeltime = Time(calctime, format='unix', scale='utc')
coords.location = MWAPOS
coords.obstime = azeltime
cpos = coords.transform_to('altaz')
return cpos.az.deg, cpos.alt.deg
class PointingSlave(pyslave.Slave):
"""Subclass the pycontroller slave class so we can override the notify() method to point the EDA.
Any methods decorated with '@Pyro4.expose' are called remotely over the network, from the control
computer.
"""
def __init__(self, edanum=0, tileid=0, clientid=None, port=None):
"""
:param edanum: Either 1 or 2, used to determine which set of 8 beamformers we are pointing.
:param tileid: Which MWA tile number we are (used to ignore notify() calls not meant for us)
:param clientid: Pyro4 service name - eg eda1com
:param port: network port to listen on
"""
self.tileid = tileid
self.orig_tileid = tileid # Save the 'real' tile ID here, so we can change the 'current' one
self.edanum = edanum
self.offsets = pointing.getOffsets()
self.lastpointing = (None, None, None, None, None, None, None, None, None)
pyslave.Slave.__init__(self, clientid=clientid, rclass='pointing', port=port)
@Pyro4.expose
def stop_tracking(self):
"""Change the tileid that we recognise for notify() calls, so that we ignore any notify() calls
from pycontroller in response to MWA observations. EDA client code calls to notify() use a
tileid of 0, and are always recognised.
"""
self.tileid = -1
logger.info('Tracking disable, current tile ID set to None')
return True
@Pyro4.expose
def start_tracking(self):
"""Change the tileid that we recognise for notify() calls, so that we react to any notify() calls
from pycontroller in response to MWA observations. EDA client code calls to notify() use a
tileid of 0, and are always recognised.
"""
self.tileid = self.orig_tileid
logger.info('Tracking enabled, current tile ID restored to %d' % self.tileid)
return True
@Pyro4.expose
def onlybfs(self, bfids=None):
"""If called with bfids=None, enables all dipoles on all MWA beamformers. If bfids is a list of
single hex digits, or a string of hex digits, enable all dipoles on those beamformers, and
disable them on all others.
The state is saved in a global variable, and lasts until the next call to onlybfs().
:param bfids: A list of hex digits (eg ['0', '4', 'A']), or a string of hex digits (eg '04A')
:return: False if there was an error parsing the bfids argument, True if successful.
"""
global ONLYBFs
if bfids is None:
logger.info('Enabling all channels')
ONLYBFs = None
elif (type(bfids) == list) or (isinstance(bfids, STR_CLASS)):
onlybfs = []
for bfid in bfids:
if (isinstance(bfid, STR_CLASS)) and (len(bfid) == 1):
if bfid.upper() in pointing.HEXD:
onlybfs.append(bfid.upper())
else:
logger.critical("Invalid BFID code: %s" % bfid)
return False
else:
logger.critical("Invalid BFID: %s" % bfid)
return False
logger.info("Enabling only beamformers %s" % onlybfs)
ONLYBFs = onlybfs
@Pyro4.expose
def set_cpos(self, cpos=None):
"""Sets the position of the EDA centre used for delay calculations, relative to the geometric centre.
If cpos is not None, it must be a tuple of three floats, used as an offset from the geometrical
EDA centre (0,0,0) in the units used in the locations file), in metres.
The state is saved in a global variable, and lasts until the next call to set_cpos().
:param cpos: A tuple of three floats (offsets E/W, N/S and up/down), or None.
:return: False if there was an error parsing the cpos argument, True if successful.
"""
global CPOS
if cpos is None:
CPOS = (0.0, 0.0, 0.0)
else:
if (type(cpos) == tuple) and (len(cpos) == 3):
ok = True
for element in cpos:
if (type(element) != float) and (type(element) != int):
ok = False
if ok:
CPOS = cpos
return True
else:
logger.error('Invalid item in argument for set_cpos(%s) call' % cpos)
return False
else:
logger.error('Invalid argument to set_cpos(%s)' % cpos)
return False
@Pyro4.expose
def is_tracking(self):
"""Returns True if we are tracking MWA observations, False otherwise."""
return self.tileid == self.orig_tileid
@Pyro4.expose
def get_status(self):
"""Returns a status object. This is a tuple of:
istracking (True or False),
ONLYBFs (global variable, None for all | |
"""
Smart Virtual Thermostat FOR trv python plugin for Domoticz
Author: Erwanweb,
adapted from the SVT By Logread V0.4.4 and Anthor, see:
https://github.com/999LV/SmartVirtualThermostat
http://www.antor.fr/apps/smart-virtual-thermostat-eng-2/?lang=en
https://github.com/AntorFr/SmartVT
Version: 0.0.1: alpha
0.0.2: beta
0.1.1: correction for reducted temp if no presence
0.1.2: correction for control of TRV setpoint
0.1.3: correction for control of TRV setpoint when off
"""
"""
<plugin key="SVT3" name="AC Smart Virtual Thermostat for TRV" author="Erwanweb" version="0.1.3" externallink="https://github.com/Erwanweb/SVT3.git">
<description>
<h2>Smart Virtual Thermostat for TRV</h2><br/>
V.0.1.3<br/>
Easily implement in Domoticz an advanced virtual thermostat using TRV<br/>
<h3>Set-up and Configuration</h3>
</description>
<params>
<param field="Address" label="Domoticz IP Address" width="200px" required="true" default="127.0.0.1"/>
<param field="Port" label="Port" width="40px" required="true" default="8080"/>
<param field="Username" label="Username" width="200px" required="false" default=""/>
<param field="Password" label="Password" width="200px" required="false" default=""/>
<param field="Mode1" label="Inside Temperature Sensors (csv list of idx)" width="100px" required="true" default="0"/>
<param field="Mode2" label="TRV Temperature Sensors (csv list of idx)" width="100px" required="false" default=""/>
<param field="Mode3" label="TRV (csv list of idx)" width="100px" required="true" default="0"/>
<param field="Mode4" label="Presence Sensors (csv list of idx)" width="100px" required="false" default=""/>
<param field="Mode5" label="Pause On delay, Pause Off delay, Forced mode duration, Presence on delay, Presence off delay(all in minutes) reduc jour, reduc nuit (both in tenth of degre)" width="200px" required="true" default="2,1,60,1,60,10,20"/>
<param field="Mode6" label="Logging Level" width="200px">
<options>
<option label="Normal" value="Normal" default="true"/>
<option label="Verbose" value="Verbose"/>
<option label="Debug - Python Only" value="2"/>
<option label="Debug - Basic" value="62"/>
<option label="Debug - Basic+Messages" value="126"/>
<option label="Debug - Connections Only" value="16"/>
<option label="Debug - Connections+Queue" value="144"/>
<option label="Debug - All" value="-1"/>
</options>
</param>
</params>
</plugin>
"""
import Domoticz
import json
import urllib.parse as parse
import urllib.request as request
from datetime import datetime, timedelta
import time
import base64
import itertools
class deviceparam:
def __init__(self, unit, nvalue, svalue):
self.unit = unit
self.nvalue = nvalue
self.svalue = svalue
class BasePlugin:
def __init__(self):
self.debug = False
self.pauseondelay = 2 # time between pause sensor actuation and actual pause
self.pauseoffdelay = 1 # time between end of pause sensor actuation and end of actual pause
self.forcedduration = 60 # time in minutes for the forced mode
self.ActiveSensors = {}
self.InTempSensors = []
self.TRVTempSensors = []
self.OutTempSensors = []
self.switchHeat = False
self.Heaters = []
self.heat = False
self.pause = False
self.pauserequested = False
self.pauserequestchangedtime = datetime.now()
self.forced = False
self.intemp = 20.0
self.intemperror = False
self.TRVtemp = 20.0
self.outtemp = 20.0
self.setpoint = 20.0
self.TRVsetpoint = 20.0
self.endheat = datetime.now()
self.nexttemps = datetime.now()
self.temptimeout = datetime.now()
self.DTpresence = []
self.Presencemode = False
self.Presence = False
self.PresenceTH = False
self.presencechangedtime = datetime.now()
self.PresenceDetected = False
self.DTtempo = datetime.now()
self.presenceondelay = 1 # time between first detection and last detection before turning presence ON
self.presenceoffdelay = 60 # time between last detection before turning presence OFF
self.reducjour = 10 # reduction de la temp par rapport a la consigne
self.reducnuit = 20 # reduction de la temp par rapport a la consigne
self.learn = True
return
def onStart(self):
# setup the appropriate logging level
try:
debuglevel = int(Parameters["Mode6"])
except ValueError:
debuglevel = 0
self.loglevel = Parameters["Mode6"]
if debuglevel != 0:
self.debug = True
Domoticz.Debugging(debuglevel)
DumpConfigToLog()
self.loglevel = "Verbose"
else:
self.debug = False
Domoticz.Debugging(0)
# create the child devices if these do not exist yet
devicecreated = []
if 1 not in Devices:
Options = {"LevelActions": "||",
"LevelNames": "Off|Auto|Forced",
"LevelOffHidden": "false",
"SelectorStyle": "0"}
Domoticz.Device(Name="Thermostat Control", Unit=1, TypeName="Selector Switch", Switchtype=18, Image=15,
Options=Options, Used=1).Create()
devicecreated.append(deviceparam(1, 0, "0")) # default is Off state
if 2 not in Devices:
Options = {"LevelActions": "||",
"LevelNames": "Off|Normal|Economy|Vacation",
"LevelOffHidden": "true",
"SelectorStyle": "0"}
Domoticz.Device(Name="Thermostat Mode", Unit=2, TypeName="Selector Switch", Switchtype=18, Image=15,
Options=Options, Used=1).Create()
devicecreated.append(deviceparam(2, 0, "10")) # default is normal confort mode
if 3 not in Devices:
Domoticz.Device(Name="Thermostat Pause", Unit=3, TypeName="Switch", Image=9, Used=1).Create()
devicecreated.append(deviceparam(3, 0, "")) # default is Off
if 4 not in Devices:
Domoticz.Device(Name="Setpoint Normal", Unit=4, Type=242, Subtype=1, Used=1).Create()
devicecreated.append(deviceparam(4, 0, "20")) # default is 20 degrees
if 5 not in Devices:
Domoticz.Device(Name="Setpoint Economy", Unit=5, Type=242, Subtype=1).Create()
devicecreated.append(deviceparam(5 ,0, "18")) # default is 18 degrees
if 6 not in Devices:
Domoticz.Device(Name="Thermostat temp", Unit=6, TypeName="Temperature", Used=1).Create()
devicecreated.append(deviceparam(6, 0, "20")) # default is 20 degrees
if 7 not in Devices:
Domoticz.Device(Name="Heating Request", Unit=7, TypeName="Switch", Image=9, Used=1).Create()
devicecreated.append(deviceparam(7, 0, "")) # default is Off
if 8 not in Devices:
Domoticz.Device(Name="Presence sensor", Unit=8, TypeName="Switch", Image=9).Create()
devicecreated.append(deviceparam(8, 0, "")) # default is Off
# if any device has been created in onStart(), now is time to update its defaults
for device in devicecreated:
Devices[device.unit].Update(nValue=device.nvalue, sValue=device.svalue)
# build lists of sensors and switches
self.InTempSensors = parseCSV(Parameters["Mode1"])
Domoticz.Debug("Inside Temperature sensors = {}".format(self.InTempSensors))
self.TRVTempSensors = parseCSV(Parameters["Mode2"])
Domoticz.Debug("TRV Temperature sensors = {}".format(self.TRVTempSensors))
self.Heaters = parseCSV(Parameters["Mode3"])
Domoticz.Debug("Heaters = {}".format(self.Heaters))
self.DTpresence = parseCSV(Parameters["Mode4"])
Domoticz.Debug("DTpresence = {}".format(self.DTpresence))
# build dict of status of all temp sensors to be used when handling timeouts
for sensor in itertools.chain(self.InTempSensors, self.TRVTempSensors):
self.ActiveSensors[sensor] = True
# splits additional parameters
params = parseCSV(Parameters["Mode5"])
if len(params) == 7:
self.pauseondelay = CheckParam("Pause On Delay", params[0], 2)
self.pauseoffdelay = CheckParam("Pause Off Delay", params[1], 0)
self.forcedduration = CheckParam("Forced Mode Duration", params[2], 60)
if self.forcedduration < 30:
Domoticz.Error("Invalid forced mode duration parameter. Using minimum of 30 minutes !")
self.calculate_period = 30
self.presenceondelay = CheckParam("Presence On Delay", params[3], 1)
self.presenceoffdelay = CheckParam("Presence Off Delay",params[4],30)
self.reducjour = CheckParam("reduit jour",params[5],10)
self.reducnuit = CheckParam("reduit nuit",params[6],20)
else:
Domoticz.Error("Error reading Mode5 parameters")
# if mode = off then make sure actual heating is off just in case if was manually set to on
if Devices[1].sValue == "0":
self.switchHeat = False
# update temp
self.readTemps()
def onStop(self):
Domoticz.Debugging(0)
def onCommand(self, Unit, Command, Level, Color):
Domoticz.Debug("onCommand called for Unit {}: Command '{}', Level: {}".format(Unit, Command, Level))
if Unit == 3: # pause switch
self.pauserequestchangedtime = datetime.now()
svalue = ""
if str(Command) == "On":
nvalue = 1
self.pauserequested = True
else:
nvalue = 0
self.pauserequested = False
else:
nvalue = 1 if Level > 0 else 0
svalue = str(Level)
Devices[Unit].Update(nValue=nvalue, sValue=svalue)
if Unit in (1, 2, 4, 5): # truc
self.onHeartbeat()
def onHeartbeat(self):
self.PresenceDetection()
now = datetime.now()
# fool proof checking.... based on users feedback
if not all(device in Devices for device in (1,2,3,4,5,6,7,8)):
Domoticz.Error("one or more devices required by the plugin is/are missing, please check domoticz device creation settings and restart !")
return
if Devices[1].sValue == "0": # Thermostat is off
Domoticz.Log("Thermostat is OFF")
Domoticz.Debug("TRV Calculded setpoint is : 7 because of thermostat off")
self.TRVsetpoint = 7
if not Devices[7].nValue == 0:
Devices[7].Update(nValue=0, sValue=Devices[7].sValue)
if self.forced or self.switchHeat: # thermostat setting was just changed so we kill the heating
self.forced = False
self.switchHeat = False
Domoticz.Debug("Switching heat Off !")
elif Devices[1].sValue == "20": # Thermostat is in forced mode
Domoticz.Log("Thermostat is in FORCED mode")
if self.forced:
if self.endheat <= now:
self.forced = False
self.endheat = now
Domoticz.Debug("Forced mode Off after timer !")
Devices[1].Update(nValue=1, sValue="10") # set thermostat to normal mode
self.switchHeat = False
self.TRVsetpoint = self.setpoint - (self.intemp - self.TRVtemp) # correction of TRV setpoint using difference between real indoor temp and mesured trv temp.
Domoticz.Debug("TRV Calculded setpoint is : " + str(self.TRVsetpoint))
if not Devices[7].nValue == 0:
Devices[7].Update(nValue = 0,sValue = Devices[7].sValue)
else:
self.forced = True
self.endheat = now + timedelta(minutes=self.forcedduration)
Domoticz.Debug("Forced mode On !")
self.switchHeat = True
Domoticz.Debug("TRV Calculded setpoint is : 28")
self.TRVsetpoint = 28
if Devices[7].nValue == 0:
Devices[7].Update(nValue = 1,sValue = Devices[7].sValue)
else: # Thermostat is in mode auto
Domoticz.Debug("Thermostat is in AUTO mode")
if self.forced: # thermostat setting was just changed from "forced" so we kill the forced mode
Domoticz.Debug("Forced mode Off !")
self.forced = False
self.switchHeat = True
self.TRVsetpoint = self.setpoint - (self.intemp - self.TRVtemp) # correction of TRV setpoint using difference between real indoor temp and mesured trv temp.
Domoticz.Debug("TRV Calculded setpoint is : " + str(self.TRVsetpoint))
if not Devices[7].nValue == 0:
Devices[7].Update(nValue = 0,sValue = Devices[7].sValue)
elif self.pause and not self.pauserequested: # we are in pause and the pause switch is now off
if self.pauserequestchangedtime + timedelta(minutes=self.pauseoffdelay) <= now:
Domoticz.Debug("Pause is now Off")
self.pause = False
self.switchHeat = True
self.TRVsetpoint = self.setpoint - (self.intemp - self.TRVtemp) # correction of TRV setpoint using difference between real indoor temp and | |
#-------------------------------------------------------------------------------
# $Id$
#
# Project: EOxServer <http://eoxserver.org>
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2013 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
from optparse import make_option
from itertools import chain
from django.core.exceptions import ValidationError
from django.core.management import call_command
from django.core.management.base import CommandError, BaseCommand
from django.utils.dateparse import parse_datetime
from django.contrib.gis import geos
from eoxserver.core import env
from eoxserver.contrib import gdal, osr
from eoxserver.backends import models as backends
from eoxserver.backends.component import BackendComponent
from eoxserver.backends.cache import CacheContext
from eoxserver.backends.access import connect
from eoxserver.resources.coverages import models
from eoxserver.resources.coverages.metadata.component import MetadataComponent
from eoxserver.resources.coverages.management.commands import (
CommandOutputMixIn, _variable_args_cb, nested_commit_on_success
)
def _variable_args_cb_list(option, opt_str, value, parser):
""" Helper function for optparse module. Allows variable number of option
values when used as a callback.
"""
args = []
for arg in parser.rargs:
if not arg.startswith('-'):
args.append(arg)
else:
del parser.rargs[:len(args)]
break
if not getattr(parser.values, option.dest):
setattr(parser.values, option.dest, [])
getattr(parser.values, option.dest).append(args)
class Command(CommandOutputMixIn, BaseCommand):
option_list = BaseCommand.option_list + (
make_option("-i", "--identifier", "--coverage-id", dest="identifier",
action="store", default=None,
help=("Override identifier.")
),
make_option("-d", "--data", dest="data",
action="callback", callback=_variable_args_cb_list, default=[],
help=("Add a data item to the dataset. Format is: "
"[storage_type:url] [package_type:location]* format:location"
)
),
make_option("-s", "--semantic", dest="semantics",
action="callback", callback=_variable_args_cb, default=None,
help=("Optional band semantics. If given, one band "
"semantics 'band[*]' must be present for each '--data' "
"option.")
),
make_option("-m", "--meta-data", dest="metadata",
action="callback", callback=_variable_args_cb_list, default=[],
help=("Optional. [storage_type:url] [package_type:location]* "
"format:location")
),
make_option("-r", "--range-type", dest="range_type_name",
help=("Mandatory. Name of the stored range type. ")
),
make_option("-e", "--extent", dest="extent",
action="store", default=None,
help=("Override extent. Comma separated list of "
"<minx>,<miny>,<maxx>,<maxy>.")
),
make_option("--size", dest="size",
action="store", default=None,
help=("Override size. Comma separated list of <size-x>,<size-y>.")
),
make_option("--srid", dest="srid",
action="store", default=None,
help=("Override SRID. Integer number.")
),
make_option("-p", "--projection", dest="projection",
action="store", default=None,
help=("Override projection.")
),
make_option("-f", "--footprint", dest="footprint",
action="store", default=None,
help=("Override footprint. Must be supplied as WKT Polygons or "
"MultiPolygons.")
),
make_option("--begin-time", dest="begin_time",
action="store", default=None,
help=("Override begin time. Format is ISO8601 datetime strings.")
),
make_option("--end-time", dest="end_time",
action="store", default=None,
help=("Override end time. Format is ISO8601 datetime strings.")
),
make_option("--coverage-type", dest="coverage_type",
action="store", default=None,
help=("The actual coverage type.")
),
make_option("--visible", dest="visible",
action="store_true", default=False,
help=("Set the coverage to be 'visible', which means it is "
"advertised in GetCapabilities responses.")
),
make_option("--collection", dest="collection_ids",
action='callback', callback=_variable_args_cb, default=None,
help=("Optional. Link to one or more collection(s).")
),
make_option('--ignore-missing-collection',
dest='ignore_missing_collection',
action="store_true", default=False,
help=("Optional. Proceed even if the linked collection "
"does not exist. By defualt, a missing collection "
"will result in an error.")
)
)
args = (
"-d [<storage>:][<package>:]<location> [-d ... ] "
"-r <range-type-name> "
"[-m [<storage>:][<package>:]<location> [-m ... ]] "
"[-s <semantic> [-s <semantic>]] "
"[--identifier <identifier>] "
"[-e <minx>,<miny>,<maxx>,<maxy>] "
"[--size <size-x> <size-y>] "
"[--srid <srid> | --projection <projection-def>] "
"[--footprint <footprint-wkt>] "
"[--begin-time <begin-time>] [--end-time <end-time>] "
"[--coverage-type <coverage-type-name>] "
"[--visible] [--collection <collection-id> [--collection ... ]] "
"[--ignore-missing-collection]"
)
help = """
Registers a Dataset.
A dataset is a collection of data and metadata items. When beeing
registered, as much metadata as possible is extracted from the supplied
(meta-)data items. If some metadata is still missing, it needs to be
supplied via the specific override options.
By default, datasets are not "visible" which means that they are not
advertised in the GetCapabilities sections of the various services.
This needs to be overruled via the `--visible` switch.
The registered dataset can optionally be directly inserted one or more
collections.
"""
@nested_commit_on_success
def handle(self, *args, **kwargs):
with CacheContext() as cache:
self.handle_with_cache(cache, *args, **kwargs)
def handle_with_cache(self, cache, *args, **kwargs):
metadata_component = MetadataComponent(env)
datas = kwargs["data"]
semantics = kwargs.get("semantics")
metadatas = kwargs["metadata"]
range_type_name = kwargs["range_type_name"]
if range_type_name is None:
raise CommandError("No range type name specified.")
range_type = models.RangeType.objects.get(name=range_type_name)
metadata_keys = set((
"identifier", "extent", "size", "projection",
"footprint", "begin_time", "end_time", "coverage_type",
))
all_data_items = []
retrieved_metadata = {}
retrieved_metadata.update(
self._get_overrides(**kwargs)
)
for metadata in metadatas:
storage, package, format, location = self._get_location_chain(
metadata
)
data_item = backends.DataItem(
location=location, format=format or "", semantic="metadata",
storage=storage, package=package,
)
data_item.full_clean()
data_item.save()
all_data_items.append(data_item)
with open(connect(data_item, cache)) as f:
content = f.read()
reader = metadata_component.get_reader_by_test(content)
if reader:
values = reader.read(content)
format = values.pop("format", None)
if format:
data_item.format = format
data_item.full_clean()
data_item.save()
for key, value in values.items():
if key in metadata_keys:
retrieved_metadata.setdefault(key, value)
if len(datas) < 1:
raise CommandError("No data files specified.")
if semantics is None:
# TODO: check corner cases.
# e.g: only one data item given but multiple bands in range type
# --> bands[1:<bandnum>]
if len(datas) == 1:
if len(range_type) == 1:
semantics = ["bands[1]"]
else:
semantics = ["bands[1:%d]" % len(range_type)]
else:
semantics = ["bands[%d]" % i for i in range(len(datas))]
for data, semantic in zip(datas, semantics):
storage, package, format, location = self._get_location_chain(data)
data_item = backends.DataItem(
location=location, format=format or "", semantic=semantic,
storage=storage, package=package,
)
data_item.full_clean()
data_item.save()
all_data_items.append(data_item)
# TODO: other opening methods than GDAL
ds = gdal.Open(connect(data_item, cache))
reader = metadata_component.get_reader_by_test(ds)
if reader:
values = reader.read(ds)
format = values.pop("format", None)
if format:
data_item.format = format
data_item.full_clean()
data_item.save()
for key, value in values.items():
if key in metadata_keys:
retrieved_metadata.setdefault(key, value)
ds = None
if len(metadata_keys - set(retrieved_metadata.keys())):
raise CommandError(
"Missing metadata keys %s."
% ", ".join(metadata_keys - set(retrieved_metadata.keys()))
)
try:
# TODO: allow types of different apps
CoverageType = getattr(models, retrieved_metadata["coverage_type"])
except AttributeError:
raise CommandError(
"Type '%s' is not supported." % kwargs["coverage_type"]
)
try:
coverage = CoverageType()
coverage.range_type = range_type
proj = retrieved_metadata.pop("projection")
if isinstance(proj, int):
retrieved_metadata["srid"] = proj
else:
definition, format = proj
# Try to identify the SRID from the given input
try:
sr = osr.SpatialReference(definition, format)
retrieved_metadata["srid"] = sr.srid
except Exception, e:
prj = models.Projection.objects.get(
format=format, definition=definition
)
retrieved_metadata["projection"] = prj
# TODO: bug in models for some coverages
for key, value in retrieved_metadata.items():
setattr(coverage, key, value)
coverage.visible = kwargs["visible"]
coverage.full_clean()
coverage.save()
for data_item in all_data_items:
data_item.dataset = coverage
data_item.full_clean()
data_item.save()
# link with collection(s)
if kwargs["collection_ids"]:
ignore_missing_collection = kwargs["ignore_missing_collection"]
call_command("eoxs_collection_link",
collection_ids=kwargs["collection_ids"],
add_ids=[coverage.identifier],
ignore_missing_collection=ignore_missing_collection
)
except Exception as e:
self.print_traceback(e, kwargs)
raise CommandError("Dataset registration failed: %s" % e)
self.print_msg(
"Dataset with ID '%s' registered sucessfully."
% coverage.identifier
)
def _get_overrides(self, identifier=None, size=None, extent=None,
begin_time=None, end_time=None, footprint=None,
projection=None, coverage_type=None, **kwargs):
overrides = {}
if coverage_type:
overrides["coverage_type"] = coverage_type
if identifier:
overrides["identifier"] = identifier
if extent:
overrides["extent"] = map(float, extent.split(","))
if size:
overrides["size"] = map(int, size.split(","))
if begin_time:
overrides["begin_time"] = parse_datetime(begin_time)
if end_time:
overrides["end_time"] = parse_datetime(end_time)
if footprint:
footprint = geos.GEOSGeometry(footprint)
if footprint.hasz :
raise CommandError(
"Invalid footprint geometry! 3D geometry is not supported!"
)
if footprint.geom_type == "MultiPolygon" :
overrides["footprint"] = footprint
elif footprint.geom_type == "Polygon" :
overrides["footprint"] = geos.MultiPolygon(footprint)
else :
raise CommandError(
"Invalid footprint geometry type '%s'!"
% (footprint.geom_type)
)
if projection:
try:
overrides["projection"] = int(projection)
except ValueError:
overrides["projection"] = projection
return overrides
def _get_location_chain(self, items):
""" Returns the tuple
"""
component = BackendComponent(env)
storage = None
package = None
storage_type, url = self._split_location(items[0])
if storage_type:
storage_component = component.get_storage_component(storage_type)
else:
storage_component = None
if storage_component:
storage, _ = backends.Storage.objects.get_or_create(
url=url, storage_type=storage_type
)
# packages
for item in items[1 if storage | |
prev_vert2 and \
not circle_full:
# force quads, otherwise won't make it to end of loop2
tri, quad = 1, 0
else:
# calculate if tri or quad gives shortest edge
tri, quad = [(mesh.vertices[loop1[i+1]].co -
mesh.vertices[loop2[j]].co).length
for j in range(prev_vert2, prev_vert2+2)]
# triangle
if tri < quad:
lines.append([loop1[i+1], loop2[prev_vert2]])
if circle_full == 2:
circle_full = False
# quad
elif not circle_full:
lines.append([loop1[i+1], loop2[prev_vert2+1]])
prev_vert2 += 1
# quad to first vertex of loop2
else:
lines.append([loop1[i+1], loop2[0]])
prev_vert2 = 0
circle_full = True
# final face for circular loops
if loop1_circular and loop2_circular:
lines.append([loop1[0], loop2[0]])
return(lines)
# calculate number of segments needed
def bridge_calculate_segments(mesh, lines, loops, segments):
# return if amount of segments is set by user
if segments != 0:
return segments
# edge lengths
average_edge_length = [(mesh.vertices[vertex].co - \
mesh.vertices[loop[0][i+1]].co).length for loop in loops for \
i, vertex in enumerate(loop[0][:-1])]
# closing edges of circular loops
average_edge_length += [(mesh.vertices[loop[0][-1]].co - \
mesh.vertices[loop[0][0]].co).length for loop in loops if loop[1]]
# average lengths
average_edge_length = sum(average_edge_length) / len(average_edge_length)
average_bridge_length = sum([(mesh.vertices[v1].co - \
mesh.vertices[v2].co).length for v1, v2 in lines]) / len(lines)
segments = max(1, round(average_bridge_length / average_edge_length))
return(segments)
# return dictionary with vertex index as key, and the normal vector as value
def bridge_calculate_virtual_vertex_normals(mesh, lines, loops, edge_faces,
edgekey_to_edge):
if not edge_faces: # interpolation isn't set to cubic
return False
# pity reduce() isn't one of the basic functions in python anymore
def average_vector_dictionary(dic):
for key, vectors in dic.items():
#if type(vectors) == type([]) and len(vectors) > 1:
if len(vectors) > 1:
average = mathutils.Vector()
for vector in vectors:
average += vector
average /= len(vectors)
dic[key] = [average]
return dic
# get all edges of the loop
edges = [[edgekey_to_edge[tuple(sorted([loops[j][0][i],
loops[j][0][i+1]]))] for i in range(len(loops[j][0])-1)] for \
j in [0,1]]
edges = edges[0] + edges[1]
for j in [0, 1]:
if loops[j][1]: # circular
edges.append(edgekey_to_edge[tuple(sorted([loops[j][0][0],
loops[j][0][-1]]))])
"""
calculation based on face topology (assign edge-normals to vertices)
edge_normal = face_normal x edge_vector
vertex_normal = average(edge_normals)
"""
vertex_normals = dict([(vertex, []) for vertex in loops[0][0]+loops[1][0]])
for edge in edges:
faces = edge_faces[edge.key] # valid faces connected to edge
if faces:
# get edge coordinates
v1, v2 = [mesh.vertices[edge.key[i]].co for i in [0,1]]
edge_vector = v1 - v2
if edge_vector.length < 1e-4:
# zero-length edge, vertices at same location
continue
edge_center = (v1 + v2) / 2
# average face coordinates, if connected to more than 1 valid face
if len(faces) > 1:
face_normal = mathutils.Vector()
face_center = mathutils.Vector()
for face in faces:
face_normal += face.normal
face_center += face.center
face_normal /= len(faces)
face_center /= len(faces)
else:
face_normal = faces[0].normal
face_center = faces[0].center
if face_normal.length < 1e-4:
# faces with a surface of 0 have no face normal
continue
# calculate virtual edge normal
edge_normal = edge_vector.cross(face_normal)
edge_normal.length = 0.01
if (face_center - (edge_center + edge_normal)).length > \
(face_center - (edge_center - edge_normal)).length:
# make normal face the correct way
edge_normal.negate()
edge_normal.normalize()
# add virtual edge normal as entry for both vertices it connects
for vertex in edge.key:
vertex_normals[vertex].append(edge_normal)
"""
calculation based on connection with other loop (vertex focused method)
- used for vertices that aren't connected to any valid faces
plane_normal = edge_vector x connection_vector
vertex_normal = plane_normal x edge_vector
"""
vertices = [vertex for vertex, normal in vertex_normals.items() if not \
normal]
if vertices:
# edge vectors connected to vertices
edge_vectors = dict([[vertex, []] for vertex in vertices])
for edge in edges:
for v in edge.key:
if v in edge_vectors:
edge_vector = mesh.vertices[edge.key[0]].co - \
mesh.vertices[edge.key[1]].co
if edge_vector.length < 1e-4:
# zero-length edge, vertices at same location
continue
edge_vectors[v].append(edge_vector)
# connection vectors between vertices of both loops
connection_vectors = dict([[vertex, []] for vertex in vertices])
connections = dict([[vertex, []] for vertex in vertices])
for v1, v2 in lines:
if v1 in connection_vectors or v2 in connection_vectors:
new_vector = mesh.vertices[v1].co - mesh.vertices[v2].co
if new_vector.length < 1e-4:
# zero-length connection vector,
# vertices in different loops at same location
continue
if v1 in connection_vectors:
connection_vectors[v1].append(new_vector)
connections[v1].append(v2)
if v2 in connection_vectors:
connection_vectors[v2].append(new_vector)
connections[v2].append(v1)
connection_vectors = average_vector_dictionary(connection_vectors)
connection_vectors = dict([[vertex, vector[0]] if vector else \
[vertex, []] for vertex, vector in connection_vectors.items()])
for vertex, values in edge_vectors.items():
# vertex normal doesn't matter, just assign a random vector to it
if not connection_vectors[vertex]:
vertex_normals[vertex] = [mathutils.Vector((1, 0, 0))]
continue
# calculate to what location the vertex is connected,
# used to determine what way to flip the normal
connected_center = mathutils.Vector()
for v in connections[vertex]:
connected_center += mesh.vertices[v].co
if len(connections[vertex]) > 1:
connected_center /= len(connections[vertex])
if len(connections[vertex]) == 0:
# shouldn't be possible, but better safe than sorry
vertex_normals[vertex] = [mathutils.Vector((1, 0, 0))]
continue
# can't do proper calculations, because of zero-length vector
if not values:
if (connected_center - (mesh.vertices[vertex].co + \
connection_vectors[vertex])).length < (connected_center - \
(mesh.vertices[vertex].co - connection_vectors[vertex])).\
length:
connection_vectors[vertex].negate()
vertex_normals[vertex] = [connection_vectors[vertex].\
normalized()]
continue
# calculate vertex normals using edge-vectors,
# connection-vectors and the derived plane normal
for edge_vector in values:
plane_normal = edge_vector.cross(connection_vectors[vertex])
vertex_normal = edge_vector.cross(plane_normal)
vertex_normal.length = 0.1
if (connected_center - (mesh.vertices[vertex].co + \
vertex_normal)).length < (connected_center - \
(mesh.vertices[vertex].co - vertex_normal)).length:
# make normal face the correct way
vertex_normal.negate()
vertex_normal.normalize()
vertex_normals[vertex].append(vertex_normal)
# average virtual vertex normals, based on all edges it's connected to
vertex_normals = average_vector_dictionary(vertex_normals)
vertex_normals = dict([[vertex, vector[0]] for vertex, vector in \
vertex_normals.items()])
return(vertex_normals)
# add vertices to mesh
def bridge_create_vertices(mesh, vertices):
start_index = len(mesh.vertices)
mesh.vertices.add(len(vertices))
for i in range(len(vertices)):
mesh.vertices[start_index + i].co = vertices[i]
# add faces to mesh
def bridge_create_faces(mesh, faces, twist):
# have the normal point the correct way
if twist < 0:
[face.reverse() for face in faces]
faces = [face[2:]+face[:2] if face[0]==face[1] else face for \
face in faces]
# eekadoodle prevention
for i in range(len(faces)):
if not faces[i][-1]:
if faces[i][0] == faces[i][-1]:
faces[i] = [faces[i][1], faces[i][2], faces[i][3], faces[i][1]]
else:
faces[i] = [faces[i][-1]] + faces[i][:-1]
start_faces = len(mesh.faces)
mesh.faces.add(len(faces))
for i in range(len(faces)):
mesh.faces[start_faces + i].vertices_raw = faces[i]
mesh.update(calc_edges = True) # calc_edges prevents memory-corruption
# calculate input loops
def bridge_get_input(mesh):
# create list of internal edges, which should be skipped
eks_of_selected_faces = [item for sublist in [face.edge_keys for face \
in mesh.faces if face.select and not face.hide] for item in sublist]
edge_count = {}
for ek in eks_of_selected_faces:
if ek in edge_count:
edge_count[ek] += 1
else:
edge_count[ek] = 1
internal_edges = [ek for ek in edge_count if edge_count[ek] > 1]
# sort correct edges into loops
selected_edges = [edge.key for edge in mesh.edges if edge.select \
and not edge.hide and edge.key not in internal_edges]
loops = get_connected_selections(selected_edges)
return(loops)
# return values needed by the bridge operator
def bridge_initialise(mesh, interpolation):
if interpolation == 'cubic':
# dict with edge-key as key and list of connected valid faces as value
face_blacklist = [face.index for face in mesh.faces if face.select or \
face.hide]
edge_faces = dict([[edge.key, []] for edge in mesh.edges if not \
edge.hide])
for face in mesh.faces:
if face.index in face_blacklist:
continue
for key in face.edge_keys:
edge_faces[key].append(face)
# dictionary with the edge-key as key and edge as value
edgekey_to_edge = dict([[edge.key, edge] for edge in mesh.edges if \
edge.select and not edge.hide])
else:
edge_faces = False
edgekey_to_edge = False
# selected faces input
old_selected_faces = [face.index for face in mesh.faces if face.select \
and not face.hide]
# find out if faces created by bridging should be smoothed
smooth = False
if mesh.faces:
if sum([face.use_smooth for face in mesh.faces])/len(mesh.faces) \
>= 0.5:
smooth = True
return(edge_faces, edgekey_to_edge, old_selected_faces, smooth)
# return a string with the input method
def bridge_input_method(loft, loft_loop):
method = ""
if loft:
if loft_loop:
method = "Loft loop"
else:
method = "Loft no-loop"
else:
method = "Bridge"
return(method)
# match up loops in pairs, used for multi-input bridging
def bridge_match_loops(mesh, loops):
# calculate average loop normals | |
<gh_stars>0
# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
# This software is distributed under the terms and conditions of the 'Apache-2.0'
# license which can be found in the file 'LICENSE' in this package distribution
# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
import copy
import re
import os
import types
import requests
import time
from functools import wraps
from flask import request
import logging
from moon_utilities import exceptions
from moon_utilities import configuration
LOG = logging.getLogger("moon.utilities." + __name__)
keystone_config = configuration.get_configuration("openstack/keystone")["openstack/keystone"]
# slave = configuration.get_configuration(configuration.SLAVE)["slave"]
__targets = {}
def filter_input(func_or_str):
def __filter(string):
if string and type(string) is str:
return "".join(re.findall("[\w\- +]*", string))
return string
def __filter_dict(arg):
result = dict()
for key in arg.keys():
if key == "email":
result["email"] = __filter_email(arg[key])
elif key == "password":
result["password"] = arg['password']
else:
result[key] = __filter(arg[key])
return result
def __filter_email(string):
if string and type(string) is str:
return "".join(re.findall("[\w@\._\- +]*", string))
return string
def wrapped(*args, **kwargs):
_args = []
for arg in args:
if isinstance(arg, str):
arg = __filter(arg)
elif isinstance(arg, list):
arg = [__filter(item) for item in arg]
elif isinstance(arg, tuple):
arg = (__filter(item) for item in arg)
elif isinstance(arg, dict):
arg = __filter_dict(arg)
_args.append(arg)
for arg in kwargs:
if type(kwargs[arg]) is str:
kwargs[arg] = __filter(kwargs[arg])
if isinstance(kwargs[arg], str):
kwargs[arg] = __filter(kwargs[arg])
elif isinstance(kwargs[arg], list):
kwargs[arg] = [__filter(item) for item in kwargs[arg]]
elif isinstance(kwargs[arg], tuple):
kwargs[arg] = (__filter(item) for item in kwargs[arg])
elif isinstance(kwargs[arg], dict):
kwargs[arg] = __filter_dict(kwargs[arg])
return func_or_str(*_args, **kwargs)
if isinstance(func_or_str, str):
return __filter(func_or_str)
if isinstance(func_or_str, list):
return [__filter(item) for item in func_or_str]
if isinstance(func_or_str, tuple):
return (__filter(item) for item in func_or_str)
if isinstance(func_or_str, dict):
return __filter_dict(func_or_str)
if isinstance(func_or_str, types.FunctionType):
return wrapped
return None
def enforce(action_names, object_name, **extra):
"""Fake version of the enforce decorator"""
def wrapper_func(func):
def wrapper_args(*args, **kwargs):
# LOG.info("kwargs={}".format(kwargs))
# kwargs['user_id'] = kwargs.pop('user_id', "admin")
# LOG.info("Calling enforce on {} with args={} kwargs={}".format(func.__name__, args, kwargs))
return func(*args, **kwargs)
return wrapper_args
return wrapper_func
def login(user=None, password=None, domain=None, project=None, url=None):
if not user:
user = keystone_config['user']
if not password:
password = keystone_config['password']
if not domain:
domain = keystone_config['domain']
if not project:
project = keystone_config['project']
if not url:
url = keystone_config['url']
headers = {
"Content-Type": "application/json"
}
data_auth = {
"auth": {
"identity": {
"methods": [
"password"
],
"password": {
"user": {
"domain": {
"id": domain
},
"name": user,
"password": password
}
}
},
"scope": {
"project": {
"domain": {
"id": domain
},
"name": project
}
}
}
}
req = requests.post("{}/auth/tokens".format(url),
json=data_auth, headers=headers,
verify=keystone_config['certificate'])
if req.status_code in (200, 201, 204):
headers['X-Auth-Token'] = req.headers['X-Subject-Token']
return headers
LOG.error(req.text)
raise exceptions.KeystoneError
def logout(headers, url=None):
if not url:
url = keystone_config['url']
headers['X-Subject-Token'] = headers['X-Auth-Token']
req = requests.delete("{}/auth/tokens".format(url), headers=headers, verify=keystone_config['certificate'])
if req.status_code in (200, 201, 204):
return
LOG.error(req.text)
raise exceptions.KeystoneError
class Context:
def __init__(self, init_context, cache):
self.cache = cache
self.__keystone_project_id = init_context.get("project_id")
self.__pdp_id = None
self.__pdp_value = None
for _pdp_key, _pdp_value in self.cache.pdp.items():
if _pdp_value["keystone_project_id"] == self.__keystone_project_id:
self.__pdp_id = _pdp_key
self.__pdp_value = copy.deepcopy(_pdp_value)
break
if not self.__pdp_value:
raise exceptions.AuthzException(
"Cannot create context for authz "
"with Keystone project ID {}".format(
self.__keystone_project_id
))
self.__subject = init_context.get("subject_name")
self.__object = init_context.get("object_name")
self.__action = init_context.get("action_name")
self.__current_request = None
self.__request_id = init_context.get("req_id")
self.__cookie = init_context.get("cookie")
self.__manager_url = init_context.get("manager_url")
self.__interface_name = init_context.get("interface_name")
self.__index = -1
# self.__init_initial_request()
self.__headers = []
policies = self.cache.policies
models = self.cache.models
for policy_id in self.__pdp_value["security_pipeline"]:
model_id = policies[policy_id]["model_id"]
for meta_rule in models[model_id]["meta_rules"]:
self.__headers.append(meta_rule)
self.__meta_rules = self.cache.meta_rules
self.__pdp_set = {}
# self.__init_pdp_set()
def delete_cache(self):
self.cache = {}
def set_cache(self, cache):
self.cache = cache
def increment_index(self):
self.__index += 1
self.__init_current_request()
self.__init_pdp_set()
@property
def current_state(self):
return self.__pdp_set[self.__headers[self.__index]]['effect']
@current_state.setter
def current_state(self, state):
if state not in ("grant", "deny", "passed"):
state = "passed"
self.__pdp_set[self.__headers[self.__index]]['effect'] = state
@current_state.deleter
def current_state(self):
self.__pdp_set[self.__headers[self.__index]]['effect'] = "unset"
@property
def current_policy_id(self):
return self.__pdp_value["security_pipeline"][self.__index]
@current_policy_id.setter
def current_policy_id(self, value):
pass
@current_policy_id.deleter
def current_policy_id(self):
pass
def __init_current_request(self):
self.__subject = self.cache.get_subject(
self.__pdp_value["security_pipeline"][self.__index],
self.__subject)
self.__object = self.cache.get_object(
self.__pdp_value["security_pipeline"][self.__index],
self.__object)
self.__action = self.cache.get_action(
self.__pdp_value["security_pipeline"][self.__index],
self.__action)
self.__current_request = dict(self.initial_request)
def __init_pdp_set(self):
for header in self.__headers:
self.__pdp_set[header] = dict()
self.__pdp_set[header]["meta_rules"] = self.__meta_rules[header]
self.__pdp_set[header]["target"] = self.__add_target(header)
self.__pdp_set[header]["effect"] = "unset"
self.__pdp_set["effect"] = "deny"
# def update_target(self, context):
# # result = dict()
# current_request = context['current_request']
# _subject = current_request.get("subject")
# _object = current_request.get("object")
# _action = current_request.get("action")
# meta_rule_id = context['headers'][context['index']]
# policy_id = self.cache.get_policy_from_meta_rules(meta_rule_id)
# meta_rules = self.cache.meta_rules()
# # for meta_rule_id in meta_rules:
# for sub_cat in meta_rules[meta_rule_id]['subject_categories']:
# if sub_cat not in context["pdp_set"][meta_rule_id]["target"]:
# context["pdp_set"][meta_rule_id]["target"][sub_cat] = []
# for assign in self.cache.get_subject_assignments(policy_id, _subject, sub_cat).values():
# for assign in assign["assignments"]:
# if assign not in context["pdp_set"][meta_rule_id]["target"][sub_cat]:
# context["pdp_set"][meta_rule_id]["target"][sub_cat].append(assign)
# for obj_cat in meta_rules[meta_rule_id]['object_categories']:
# if obj_cat not in context["pdp_set"][meta_rule_id]["target"]:
# context["pdp_set"][meta_rule_id]["target"][obj_cat] = []
# for assign in self.cache.get_object_assignments(policy_id, _object, obj_cat).values():
# for assign in assign["assignments"]:
# if assign not in context["pdp_set"][meta_rule_id]["target"][obj_cat]:
# context["pdp_set"][meta_rule_id]["target"][obj_cat].append(assign)
# for act_cat in meta_rules[meta_rule_id]['action_categories']:
# if act_cat not in context["pdp_set"][meta_rule_id]["target"]:
# context["pdp_set"][meta_rule_id]["target"][act_cat] = []
# for assign in self.cache.get_action_assignments(policy_id, _action, act_cat).values():
# for assign in assign["assignments"]:
# if assign not in context["pdp_set"][meta_rule_id]["target"][act_cat]:
# context["pdp_set"][meta_rule_id]["target"][act_cat].append(assign)
# # context["pdp_set"][meta_rule_id]["target"].update(result)
def __add_target(self, meta_rule_id):
result = dict()
_subject = self.__current_request["subject"]
_object = self.__current_request["object"]
_action = self.__current_request["action"]
meta_rules = self.cache.meta_rules
policy_id = self.cache.get_policy_from_meta_rules(meta_rule_id)
for sub_cat in meta_rules[meta_rule_id]['subject_categories']:
if sub_cat not in result:
result[sub_cat] = []
result[sub_cat].extend(
self.cache.get_subject_assignments(policy_id, _subject, sub_cat))
for obj_cat in meta_rules[meta_rule_id]['object_categories']:
if obj_cat not in result:
result[obj_cat] = []
result[obj_cat].extend(
self.cache.get_object_assignments(policy_id, _object, obj_cat))
for act_cat in meta_rules[meta_rule_id]['action_categories']:
if act_cat not in result:
result[act_cat] = []
result[act_cat].extend(
self.cache.get_action_assignments(policy_id, _action, act_cat))
return result
def __repr__(self):
return """PDP ID: {id}
current_request: {current_request}
request_id: {request_id}
index: {index}
headers: {headers}
pdp_set: {pdp_set}
""".format(
id=self.__pdp_id,
current_request=self.__current_request,
request_id=self.__request_id,
headers=self.__headers,
pdp_set=self.__pdp_set,
index=self.__index
)
def to_dict(self):
return {
"initial_request": copy.deepcopy(self.initial_request),
"current_request": copy.deepcopy(self.__current_request),
"headers": copy.deepcopy(self.__headers),
"index": copy.deepcopy(self.__index),
"pdp_set": copy.deepcopy(self.__pdp_set),
"request_id": copy.deepcopy(self.__request_id),
"manager_url": copy.deepcopy(self.__manager_url),
"interface_name": copy.deepcopy(self.__interface_name),
}
@property
def request_id(self):
return self.__request_id
@request_id.setter
def request_id(self, value):
raise Exception("You cannot update the request_id")
@request_id.deleter
def request_id(self):
raise Exception("You cannot update the request_id")
@property
def manager_url(self):
return self.__manager_url
@manager_url.setter
def manager_url(self, value):
raise Exception("You cannot update the manager_url")
@manager_url.deleter
def manager_url(self):
raise Exception("You cannot update the manager_url")
@property
def interface_name(self):
return self.__interface_name
@interface_name.setter
def interface_name(self, value):
raise Exception("You cannot update the interface_name")
@interface_name.deleter
def interface_name(self):
raise Exception("You cannot update the interface_name")
@property
def cookie(self):
return self.__cookie
@cookie.setter
def cookie(self, value):
raise Exception("You cannot update the cookie")
@cookie.deleter
def cookie(self):
raise Exception("You cannot delete the cookie")
@property
def initial_request(self):
return {
"subject": self.__subject,
"object": self.__object,
"action": self.__action,
}
@initial_request.setter
def initial_request(self, value):
raise Exception("You are not allowed to update the initial_request")
@initial_request.deleter
def initial_request(self):
raise Exception("You are not allowed to delete the initial_request")
@property
def current_request(self):
if not self.__current_request:
self.__current_request = copy.deepcopy(self.initial_request)
return self.__current_request
@current_request.setter
def current_request(self, value):
self.__current_request = copy.deepcopy(value)
# Note (asteroide): if the current request is modified, we must update the PDP Set.
self.__init_pdp_set()
@current_request.deleter
def current_request(self):
self.__current_request = {}
self.__pdp_set = {}
@property
def headers(self):
return self.__headers
@headers.setter
def headers(self, headers):
self.__headers = headers
@headers.deleter
def headers(self):
self.__headers = list()
@property
def index(self):
return self.__index
@index.setter
def index(self, index):
self.__index += 1
@index.deleter
def index(self):
self.__index = -1
@property
def pdp_set(self):
return self.__pdp_set
@pdp_set.setter
def pdp_set(self, value):
raise Exception("You are not allowed to modify the pdp_set")
@pdp_set.deleter
def pdp_set(self):
self.__pdp_set = {}
TOKENS = {}
def check_token(token, url=None):
_verify = False
if keystone_config['certificate']:
_verify = keystone_config['certificate']
try:
os.environ.pop("http_proxy")
os.environ.pop("https_proxy")
except KeyError:
pass
if not url:
url = keystone_config['url']
headers = {
"Content-Type": "application/json",
'X-Subject-Token': token,
'X-Auth-Token': token,
}
if not keystone_config['check_token']:
# TODO (asteroide): must send the admin id
return "admin" if not token else token
elif keystone_config['check_token'].lower() in ("false", "no", "n"):
# TODO (asteroide): must send the admin id
return "admin" if not token else token
if keystone_config['check_token'].lower() in ("yes", "y", "true"):
if token in TOKENS:
delta = time.mktime(TOKENS[token]["expires_at"]) - time.mktime(time.gmtime())
if delta > 0:
return TOKENS[token]["user"]
raise exceptions.KeystoneError
else:
req = requests.get("{}/auth/tokens".format(url), headers=headers, verify=_verify)
if req.status_code in (200, 201):
# Note (asteroide): the time stamps is not in ISO 8601, so it is necessary to delete
# characters after the dot
token_time = req.json().get("token").get("expires_at").split(".")
TOKENS[token] = dict()
TOKENS[token]["expires_at"] = time.strptime(token_time[0], "%Y-%m-%dT%H:%M:%S")
TOKENS[token]["user"] = req.json().get("token").get("user").get("id")
return TOKENS[token]["user"]
LOG.error("{} - {}".format(req.status_code, req.text))
raise exceptions.KeystoneError
elif keystone_config['check_token'].lower() == | |
# Copyright (c) <NAME>
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import numpy as np
from decoding_algorithms.ctc_decoder_base import CTCDecoderBase
class CTCScopeSearchLengthControlDecoder(CTCDecoderBase):
"""
An equivalent implementation of the CTC Beam Search with Length Control (BSLC).
Comparing with naive BSLC, this implementation can generalize toward brute force search by increasing the scope.
"""
def __init__(self, dictionary, decoder_parameters):
super().__init__(dictionary, decoder_parameters)
# Sample temporary variable
self.sample_desired_length = None
self.id_to_index_dict_list = None
self.index_to_id_dict_list = None
self.ctc_sequence_length = None
self.prob_sequence = None
self.prev_max_row_index = None
self.scope_lprob_table = None
self.transition_tracker_table = None
self.single_prefix_index_table = None
# Decoder configuration parameters
self.force_length = decoder_parameters["force_length"]
self.use_length_ratio = decoder_parameters["use_length_ratio"]
self.k = decoder_parameters["k"] # dummy variable
self.beam_size = decoder_parameters["beam_size"]
self.scope = decoder_parameters["scope"]
self.margin_criteria = decoder_parameters["marg_criteria"]
self.blank_index = dictionary.blank()
self.replacing_value = float("-inf")
self.device = None
self.dtype = None
# Assertions on decoder parameters
assert self.scope > 1, "The scope must be positive integer"
assert self.beam_size > 0, "Beam size are required to be positive"
assert self.desired_length > 0, "The desired length should be greater than 0"
# assert self.beam_size % 2 == 0, "The beam size must be even number"
# Initialize reusable variables
self.special_element_tuple_list = [list(range(self.k)), [0] * self.k]
def top_k_filtering(self, column, lprob):
"""
Get the top-k most probable token and their corresponding probabilities
logits (tensor): the logits returned by the model at the current time step
Return:
values (tensor of size k): the probability of the most probable tokens, with the first one set to be the blank token
index_to_id_dict (dict of size k): a dictionary mapping from the element index of the values vector to their real id
repeated_element_index_list (list): a list of the index (row, column) indicating the repeating index
"""
top_k_id_tensor = torch.zeros(self.k, dtype=torch.long, device=self.device) # word id
top_k_lprob_tensor = torch.zeros(self.k, dtype=self.dtype, device=self.device)
# Record the blank token id, no matter whether it's in top k
top_k_id_tensor[0] = self.blank_index
top_k_lprob_tensor[0] = lprob[self.blank_index]
# Find the k most probable words and their indexes
naive_top_k_lprob, naive_top_k_id = lprob.topk(self.k)
# Fill in the remaining slot of the top_k_id_tensor and top_k_lprob_tensor
top_k_id_tensor[1:] = naive_top_k_id[naive_top_k_id != self.blank_index][:self.k - 1]
top_k_lprob_tensor[1:] = naive_top_k_lprob[naive_top_k_id != self.blank_index][:self.k - 1]
# create dictionaries mapping between index and ids
index_to_id_dict = {k: v.item() for k, v in enumerate(top_k_id_tensor)}
id_to_index_dict = {v.item(): k for k, v in enumerate(top_k_id_tensor)}
# Record the dictionary
self.index_to_id_dict_list[column] = index_to_id_dict
self.id_to_index_dict_list[column] = id_to_index_dict
if column == 0:
# For the first column, there is no repeated element
repeated_or_special_element_index_list = self.special_element_tuple_list
else:
prev_id_to_index_dict = self.id_to_index_dict_list[column - 1]
prev_index_to_id_dict = self.index_to_id_dict_list[column - 1]
# Find the overlapping words except blank token in the current top_k words and previous top_k words
repeated_element_list = set(prev_id_to_index_dict.keys()).intersection(set(top_k_id_tensor[1:].tolist()))
repeated_element_tuple_index_list = [[prev_id_to_index_dict[element] for element in repeated_element_list],
[id_to_index_dict[element] for element in repeated_element_list]]
repeated_or_special_element_index_list = repeated_element_tuple_index_list
repeated_or_special_element_index_list[0] += self.special_element_tuple_list[0]
repeated_or_special_element_index_list[1] += self.special_element_tuple_list[1]
repeated_or_special_element_index_list[0] = tuple(repeated_or_special_element_index_list[0])
repeated_or_special_element_index_list[1] = tuple(repeated_or_special_element_index_list[1])
return top_k_lprob_tensor, repeated_or_special_element_index_list
def get_blank_token_prob(self, current_filtered_log_prob):
only_special_cloned_prob = current_filtered_log_prob.clone()
only_special_cloned_prob[1:] = self.replacing_value
return only_special_cloned_prob
def get_non_blank_token_prob(self, current_filtered_log_prob):
only_non_special_cloned_prob = current_filtered_log_prob.clone()
only_non_special_cloned_prob[0] = self.replacing_value
return only_non_special_cloned_prob
def scope_search_row_inference(self, column, reshaping_index, only_blank_prob, only_non_blank_prob,
blank_or_repeated_transition_matrix, non_blank_non_repeated_transition_matrix,
repeated_or_blank_transition_mask):
"""
Perform actual table filling for each rows.
"""
repeated_or_blank_transition_mask = repeated_or_blank_transition_mask[reshaping_index]
repeated_or_blank_transition_mask = \
repeated_or_blank_transition_mask.repeat(
2 * [1] + (repeated_or_blank_transition_mask.dim() - 2) * [self.beam_size])
# Initialization
if column == 0:
# Add the blank token probability to all dimensions of the first row of scope table
self.scope_lprob_table[0] += only_blank_prob[reshaping_index]
# Add the non-blank token probability to all dimensions of the second row of the scope table
self.scope_lprob_table[1] += only_non_blank_prob[reshaping_index]
# No other operation is needed for the first column
return
# Recursion
# We first update the expansion row, then middle rows and the first row to avoid undesired in-place update
# Expansion row
if column + 1 < self.sample_desired_length:
# Calculate the probability of each word in the pure transition.
self.scope_lprob_table[column + 1] = self.scope_lprob_table[self.prev_max_row_index] \
+ non_blank_non_repeated_transition_matrix[reshaping_index]
# Record the previous row of these transitions.
self.transition_tracker_table[column + 1, ..., ~repeated_or_blank_transition_mask, :] = \
self.transition_tracker_table[self.prev_max_row_index, ..., ~repeated_or_blank_transition_mask, :]
self.transition_tracker_table[column + 1, ..., ~repeated_or_blank_transition_mask, [column - 1]] \
= self.prev_max_row_index
# Middle rows
# Transition probability from diagonal neighbours
diagonal_transition_lprob \
= self.scope_lprob_table[0:self.prev_max_row_index] + non_blank_non_repeated_transition_matrix[
reshaping_index]
# Transition probability from row neighbours
row_transition_lprob \
= self.scope_lprob_table[1:self.prev_max_row_index + 1] + blank_or_repeated_transition_matrix[
reshaping_index]
# Combine the two types of transitions into one Tensor
sum_transition_lprob = diagonal_transition_lprob
sum_transition_lprob[..., repeated_or_blank_transition_mask] \
= row_transition_lprob[..., repeated_or_blank_transition_mask]
self.scope_lprob_table[1:self.prev_max_row_index + 1] = sum_transition_lprob
# Record the previous rows for each of the current row
current_tracker_dim = self.transition_tracker_table[1:self.prev_max_row_index + 1,
..., ~repeated_or_blank_transition_mask, [column - 1]].dim() - 1
# Copy the transition history
diagonal_transition_history = self.transition_tracker_table[0:self.prev_max_row_index].clone()
diagonal_transition_history[..., ~repeated_or_blank_transition_mask, [column - 1]] += \
torch.arange(0, self.prev_max_row_index, device=self.device)[(...,) + (None,) * current_tracker_dim] + 1
row_transition_history = self.transition_tracker_table[1:self.prev_max_row_index + 1].clone()
row_transition_history[..., repeated_or_blank_transition_mask, [column - 1]] += \
torch.arange(1, self.prev_max_row_index + 1, device=self.device)[(...,) + (None,) * current_tracker_dim] + 1
# Record the current chosen index
self.transition_tracker_table[1:self.prev_max_row_index + 1, ..., ~repeated_or_blank_transition_mask, :] \
= diagonal_transition_history[..., ~repeated_or_blank_transition_mask, :]
self.transition_tracker_table[1:self.prev_max_row_index + 1, ..., repeated_or_blank_transition_mask, :] \
= row_transition_history[..., repeated_or_blank_transition_mask, :]
# First row
# Add the blank token probability to all dimensions of the first row of scope table
self.scope_lprob_table[0] = self.scope_lprob_table[0] + only_blank_prob[reshaping_index]
# Set the previous row of the current first row to 0
transition_index = torch.zeros(self.beam_size, device=self.device, dtype=torch.int8) - 1
transition_index[0] = 1 # add 1 to encounter -1 in the initialization of the transition table
self.transition_tracker_table[0, ..., column - 1] += transition_index[reshaping_index]
def scope_search_column_inference(self, column):
"""
Perform table (prob table and prefix table) filling for a single column
"""
# Calculate some temporal variables
remaining_scope = self.scope - column # Remaining dimensions before filling in the current column
self.prev_max_row_index = min(column, self.sample_desired_length - 1) # Notice it's an index
# Find the top_k probability
current_step_lprob = self.prob_sequence[column].log()
filtered_lprob, repeated_or_special_element_index_list = self.top_k_filtering(column, current_step_lprob)
only_blank_cloned_prob = self.get_blank_token_prob(filtered_lprob)
only_non_blank_cloned_prob = self.get_non_blank_token_prob(filtered_lprob)
# Create a mask for non_blank_non_repeat_transitions
repeated_or_blank_transition_mask = torch.zeros([self.k, self.k], dtype=torch.bool, device=self.device)
repeated_or_blank_transition_mask[repeated_or_special_element_index_list] = True
# Mask out the blank and repeated transitions
non_blank_non_repeated_transition_matrix = filtered_lprob.expand(self.k, self.k).clone()
non_blank_non_repeated_transition_matrix[repeated_or_blank_transition_mask] = self.replacing_value
# Mask out the non-blank and non-repeated transitions
blank_or_repeated_transition_matrix = filtered_lprob.expand(self.k, self.k).clone()
blank_or_repeated_transition_matrix[~repeated_or_blank_transition_mask] = self.replacing_value
# Find the appropriate reshaping index and marginalize the lprob matrix if needed.
if remaining_scope > 0:
reshaping_index = tuple((...,) + (None,) * (remaining_scope - 1))
else:
most_probable_word_index = self.margin_over_prob_table()
reshaping_index = tuple((...,) + (None,) * (1 - 1))
self.single_prefix_index_table[0:self.prev_max_row_index + 1, column - 1] = most_probable_word_index
self.scope_search_row_inference(column, reshaping_index, only_blank_cloned_prob,
only_non_blank_cloned_prob, blank_or_repeated_transition_matrix,
non_blank_non_repeated_transition_matrix, repeated_or_blank_transition_mask)
def margin_over_prob_table(self):
"""
Marginalize over the first dimension of the table
"""
remaining_axis = tuple(range(2, self.scope + 1)) # A tuple of the remaining axis after marginalization
if self.margin_criteria == "mean":
sum_old_prob_along_remaining_axis = torch.logsumexp(self.scope_lprob_table[:self.prev_max_row_index + 1],
dim=remaining_axis)
most_probable_word_index = torch.argmax(sum_old_prob_along_remaining_axis, dim=1)
elif self.margin_criteria == "filtered_mean":
# Select token based on its average non-inf probability
sum_lprob_along_remaining_axis \
= torch.logsumexp(self.scope_lprob_table[:self.prev_max_row_index + 1], dim=remaining_axis)
non_inf_sum = (self.scope_lprob_table[:self.prev_max_row_index + 1] != float("-inf")).long().sum(
remaining_axis)
sum_lprob_along_remaining_axis -= non_inf_sum.log() # take the average
sum_lprob_along_remaining_axis = torch.nan_to_num(sum_lprob_along_remaining_axis, float("-inf"))
most_probable_word_index = torch.argmax(sum_lprob_along_remaining_axis, dim=1)
elif self.margin_criteria == "max":
# If we are using max as the select criteria, we select the token in the first axis that can lead to the
# sub-sequence with the maximum probability
max_old_prob_along_remaining_axis = torch.amax(self.scope_lprob_table[:self.prev_max_row_index + 1],
dim=remaining_axis)
most_probable_word_index = torch.argmax(max_old_prob_along_remaining_axis, dim=1)
else:
raise NotImplementedError("Haven't designed other evaluation criteria")
# Marginalize the lprob scope table based on the chosen words.
repeat_index = tuple([1] + (self.scope - 1) * [1] + [self.beam_size])
row_axis = torch.arange(0, self.prev_max_row_index + 1)
self.scope_lprob_table[0:self.prev_max_row_index + 1] \
= self.scope_lprob_table[row_axis, most_probable_word_index].unsqueeze(-1).repeat(repeat_index)
# Marginalize the transition scope table based on the chosen words.
repeat_index = tuple([1] + (self.scope - 1) * [1] + [self.beam_size] + [1])
self.transition_tracker_table[0:self.prev_max_row_index + 1] \
= self.transition_tracker_table[row_axis, most_probable_word_index].unsqueeze(-2).repeat(repeat_index)
return most_probable_word_index
def ctc_scope_search_length_control_initialization(self, logits):
"""
Initialize some temporary variables
"""
self.prob_sequence = logits.softmax(dim=-1) # Get the log probability from logits
self.ctc_sequence_length = len(self.prob_sequence) # The length of the ctc output sequence.
assert self.scope | |
#!/usr/bin/python
import matplotlib.pyplot as plt
import numpy as np
import sys
import optparse
import os
import subprocess
import test_results
import stats
import graph_utilities
def calc_sums(epochs):
sum_list = []
sum_list.append(epochs[0][1]) #num_epochs
sum_list.append(epochs[0][1]) #epoch_number
#each of the values
for i in range(2, len(epochs[0])):
sum = 0
#each of the items
for index in range(len(epochs)):
sum += epochs[index][i]
sum_list.append(sum)
return sum_list
def get_stats_type(data, index):
means = []
stddevs = []
curr_epochs_values = []
curr_num_epochs = -1
for epoch in data:
if epoch[1] != curr_num_epochs:
curr_num_epochs = epoch[1]
if curr_num_epochs != 1:
means.append(np.mean(np.array(curr_epochs_values)))
stddevs.append(np.std(np.array(curr_epochs_values)))
del curr_epochs_values[:]
curr_epochs_values.append(float(epoch[index]))
means.append(np.mean(np.array(curr_epochs_values)))
stddevs.append(np.std(np.array(curr_epochs_values)))
return means, stddevs
def get_results_list(input_dir):
files = os.listdir(input_dir)
sorted_files = sorted(files, key=lambda x: (int(x.split('.')[0]), int(x.split('stats-')[-1]), x.split('.')[-1]))
time_list = []
curr_list = []
curr_total = []
curr_compute = []
curr_aggregation = []
total_list = []
compute_list = []
aggregation_list = []
max_list = []
epochs_list = []
curr_num_epochs = -1
headings = []
for f in sorted_files:
if "stream-stats" in f:
continue
num_epochs = int(f.split('.')[0])
if num_epochs != curr_num_epochs:
if len(curr_list) > 0:
time_list.append(curr_list)
max_list.append(curr_list[0])
epochs_list.append(curr_num_epochs)
total_list.append(curr_total) #add in a new empty list
compute_list.append(curr_compute)
aggregation_list.append(curr_aggregation)
curr_list = []
curr_total = []
curr_compute = []
curr_aggregation = []
curr_num_epochs = num_epochs
test = test_results.Test_Results()
string = open(input_dir + "/" + f).readlines()
test.parse_lines(string)
num = f.split(".")[-1][len("taint-stats-"):]
stream_name = f.split(".")[:-1]
stream_name.append("stream-stats-" + str(num))
stream_filename = ".".join(stream_name)
string = open(input_dir + "/" + stream_filename).readlines()
test.parse_lines(string)
test.fix_dift_stats()
test.combine_stats()
curr_list.append(test.get_values())
curr_total.append(test.get_total_time())
curr_compute.append(test.get_compute_time())
curr_aggregation.append(test.get_aggregation_time())
# test.print_compute_times()
if test.get_compute_time() < 0:
print "whoops 1, negative compute time!?!",f
test.print_compute_times()
headings = test.get_titles()
time_list.append(curr_list)
max_list.append(curr_list[0])
epochs_list.append(curr_num_epochs)
total_list.append(curr_total)
compute_list.append(curr_compute)
aggregation_list.append(curr_aggregation)
if test.get_compute_time() < 0:
print "whoops, negative compute time!?!",f
print test.print_compute_times()
return headings, epochs_list,time_list, max_list, total_list, compute_list, aggregation_list
def parse_stats(input_dir,output_dir):
currd = os.getcwd()
for t in os.listdir(input_dir):
d = output_dir + "/" + t
subprocess.call(["/bin/cp", "-r",input_dir + "/" + t, d])
for d in os.listdir(output_dir):
os.chdir(output_dir + "/" + d)
#untar all of the tars, move them all to their final resting place
for tar in os.listdir("."):
print tar
num_epochs = tar.split(".")[0] #the num_epochs is the first value
subprocess.call(["/bin/tar","-xf", tar])
subprocess.call(["/bin/rm",tar])
for stats_file in os.listdir("tmp"):
subprocess.call(["/bin/mv", "tmp/" + stats_file, str(num_epochs) + "." + stats_file])
subprocess.call(["/bin/rm","-r","tmp"])
os.chdir(currd)
os.chdir(currd)
def create_stats_files(input_dir, nepochs, output_dir):
in_base = input_dir + "/" + str(nepochs) + "."
with open(output_dir + str(nepochs) + ".stats.txt", "w+") as outf:
stats.get_stats(nepochs, in_base, outf)
def get_aves(at):
averages = [] #array index by num_epochs -> array of aves
for n in at:
count = len(n) #number of epochs
averages.append([])
for i in range(len(n[0])):
averages[-1].append(0.0)
# print "num_epochs",count,"num_stats",len(averages[-1])
for example in n:
for i in range(len(example)):
averages[-1][i] += example[i]
for i in range(len(averages[-1])):
averages[-1][i] /= count;
return averages
def get_tots(at):
tots = [] #array index by num_epochs -> array of aves
for n in at:
count = len(n) #number of epochs
tots.append([])
for i in range(len(n[0])):
tots[-1].append(0.0)
print "num_epochs",count,"num_stats",len(tots[-1])
for example in n:
for i in range(len(example)):
tots[-1][i] += example[i]
return tots
def get_maxes(t):
tots = [] #array index by num_epochs -> array of tots
for n in t:
tots.append(n[0]) #the first epoch is always the max
return tots
def get_sum(t):
sums = []
for n in t: #array indexed by num_epochs -> array of sums of compute time
sum = 0
for example in n:
sum += example
sums.append([sum])
return sums
def print_aves(aves):
for r in aves:
print "next round!"
for n in r:
for i in n:
print i,
print ""
def build_samples(aves, es):
samples = [] #array from num_epochs -> sample
print "in build_samples"
epochs = {}
print len(aves), len(es)
for a,e in zip(aves,es): #for each epoch size
print "epoch",e,a,"\t"
for epoch,epoch_size in zip(a,e):
if epoch_size in epochs:
epochs[epoch_size].append(epoch)
else:
epochs[epoch_size] = [epoch]
print
for eps in sorted(epochs.keys()):
print "found",eps,"epochs",epochs[eps]
npl = np.array(epochs[eps])
s = test_results.Sample(len(epochs[eps]), np.mean(npl), np.std(npl))
samples.append(s)
for s in samples:
print s
return samples
def normalize_speedup_and_flip(samples, epochs):
if epochs[0] != 1 or len(epochs) == 1:
normal = samples[0] #we always normalize against the first size
else:
normal = samples[1]
ydata = []
yerr = []
for i in range(len(normal)):
cdata = []
cerr = []
for s in samples:
d = normal[i] / s[i] #normalize this value
#our measurements can't handle 0 time averages.. not sure what to do:
if d.mean == 0.0:
print "oh no.. what do I do when it takes 0 seconds?"
cdata.append(1.0)
cerr.append(0.0)
else:
cdata.append(d.mean)
cerr.append(d.ci)
ydata.append(cdata)
yerr.append(cerr)
return ydata, yerr
def normalize(samples, nsample):
rsamples = []
for s in samples:
d = nsample / s #normalize this value
rsamples.append(d)
return rsamples
def normalize_time_and_flip(samples, epochs):
if epochs[0] != 1 or len(epochs) == 1:
normal = samples[0] #we always normalize against the first size
else:
normal = samples[1]
ydata = []
yerr = []
for i in range(len(normal)):
cdata = []
cerr = []
for s in samples:
d = s[i] / normal[i] #normalize this value
#our measurements can't handle 0 time averages.. not sure what to do:
if d.mean == 0.0:
print "oh no.. what do I do when it takes 0 seconds?"
cdata.append(1.0)
cerr.append(0.0)
else:
cdata.append(d.mean)
cerr.append(d.ci)
ydata.append(cdata)
yerr.append(cerr)
return ydata, yerr
def parse_replay_time(f, odir):
subprocess.call(["/bin/cp",f,odir+"/replay_time"])
def build_replay_time(d):
rp_times = []
with open(d + "/replay_time") as ifile:
for w in ifile:
print w
rp_times.append(float(w.strip()))
nprt = np.array(rp_times)
return test_results.Sample(len(rp_times), np.mean(nprt),np.std(nprt))
def main():
parser = optparse.OptionParser()
parser.add_option("-i", "--input-dir", dest="input_dir",
help="where the stats files are saved currently", metavar="INPUT-DIR")
parser.add_option("-o", "--output-dir", dest="output_dir",
help="the dir where we want to save the output files", metavar="OUTPUT-Dir")
parser.add_option("-r", "--replay_time", dest="replay_time_file")
parser.add_option("-n", "--bm_name", dest="bm_name")
(options, args) = parser.parse_args()
if options.output_dir == None or options.output_dir == "":
print "must provide me with an output dir"
return -1
if not os.path.isdir(options.output_dir):
os.makedirs(options.output_dir)
if not os.path.isdir(options.output_dir + "/graphs_and_stats"):
os.makedirs(options.output_dir + "/graphs_and_stats")
#untar all of the stuff from the input_dir
if options.input_dir != None and options.input_dir != "":
parse_stats(options.input_dir,options.output_dir)
print "parsing input stats...?"
if options.replay_time_file:
parse_replay_time(options.replay_time_file, options.output_dir)
ave_times = []
max_times = []
tot_times = []
comp_times = []
agg_times = []
max_e = []
dift_times = []
epoch_times = []
files = os.listdir(options.output_dir)
for j in files:
if not os.path.isdir(options.output_dir + "/" + str(j)) or "graph" in j:
continue
print j
headings,epochs,times,max_list, total_list,compute_list,agg_list = get_results_list(options.output_dir + "/" + str(j))
if len(epochs) > len(max_e):
max_e = epochs
for i in range(len(times)):
curr_times = times[i]
num_epochs = epochs[i]
curr_arange = np.arange(len(curr_times))
graph_utilities.make_stacked_chart(curr_times,headings,curr_arange,options.output_dir +"/graphs_and_stats/"+ str(j) + "." + str(num_epochs), i * (int(j)+1))
create_stats_files(options.output_dir + "/" + str(j), num_epochs, options.output_dir + "/graphs_and_stats/" + str(j) + ".")
ttimes = get_tots(times)
dt = []
for t in ttimes:
dt.append(t[1] + t[2])
dift_times.append(dt)
epoch_times.append(epochs)
ave_times.append(get_aves(times))
tot_times.append(get_tots(times))
max_times.append(get_maxes(total_list))
comp_times.append(get_sum(compute_list))
agg_times.append(get_maxes(agg_list)) #do I want the max? I'm acutally not sure..
print "dift_stats"
samples = build_samples(dift_times, epoch_times)
with open(options.output_dir + "/dift.stats","w+") as stat_file:
for num,s in zip(max_e,samples):
print >> stat_file, num,",",s.mean,",",s.ci
# print num,",",s.mean,",",s.ci
print "latency_stats"
samples = build_samples(max_times, epoch_times)
#we'll normalize the samples if we can
if max_e[0] == 1:
samples = normalize(samples,samples[0])
data = []
errs = []
es = []
cur_val = 1
with open(options.output_dir + "/latency.stats","w+") as stat_file:
for num,s in zip(max_e,samples):
print >> stat_file, num,",",s.mean,",",s.ci
print num,",",s.mean,",",s.ci
while cur_val != num:
data.append(0)
errs.append(0)
es.append(cur_val)
cur_val *=2
# replay_time = build_replay_time(options.output_dir)
# print data
# print errs
# print es
# graph_utilities.make_scale_chart(data, errs,replay_time.mean,replay_time.ci,es,options.output_dir + "/scalability_graph", 19, title=options.bm_name + " Querry Latency", xaxis="Number of Cores",yaxis="Total Time (ms)") #hmm
print "latency_stats...raw!"
#output raw latency stats for further processing
samples = build_samples(max_times, epoch_times)
with open(options.output_dir + "/latency.raw.stats","w+") as stat_file:
for num,s in zip(max_e,samples):
print >> stat_file, num,",",s.mean,",",s.ci
print num,",",s.mean,",",s.ci
samples = build_samples(agg_times, epoch_times)
#we'll normalize the samples if we can
if max_e[0] == 1:
samples = normalize(samples,samples[0])
data = []
errs = []
es = []
cur_val = 1
with open(options.output_dir + "/aggregation.stats","w+") as stat_file:
for num,s in zip(max_e,samples):
print >> stat_file, num,",",s.mean,",",s.ci
print num,",",s.mean,",",s.ci
samples = build_samples(agg_times, epoch_times)
data = []
errs = []
es = | |
URLBase.schemas(garbage)
assert isinstance(schemas, set) is True
assert len(schemas) == 0
def test_apprise_notify_formats(tmpdir):
"""
API: Apprise() Input Formats tests
"""
# Caling load matix a second time which is an internal function causes it
# to skip over content already loaded into our matrix and thefore accesses
# other if/else parts of the code that aren't otherwise called
__load_matrix()
a = Apprise()
# no items
assert len(a) == 0
class TextNotification(NotifyBase):
# set our default notification format
notify_format = NotifyFormat.TEXT
def __init__(self, **kwargs):
super(TextNotification, self).__init__()
def notify(self, **kwargs):
# Pretend everything is okay
return True
def url(self):
# Support URL
return ''
class HtmlNotification(NotifyBase):
# set our default notification format
notify_format = NotifyFormat.HTML
def __init__(self, **kwargs):
super(HtmlNotification, self).__init__()
def notify(self, **kwargs):
# Pretend everything is okay
return True
def url(self):
# Support URL
return ''
class MarkDownNotification(NotifyBase):
# set our default notification format
notify_format = NotifyFormat.MARKDOWN
def __init__(self, **kwargs):
super(MarkDownNotification, self).__init__()
def notify(self, **kwargs):
# Pretend everything is okay
return True
def url(self):
# Support URL
return ''
# Store our notifications into our schema map
SCHEMA_MAP['text'] = TextNotification
SCHEMA_MAP['html'] = HtmlNotification
SCHEMA_MAP['markdown'] = MarkDownNotification
# Test Markdown; the above calls the markdown because our good://
# defined plugin above was defined to default to HTML which triggers
# a markdown to take place if the body_format specified on the notify
# call
assert a.add('html://localhost') is True
assert a.add('html://another.server') is True
assert a.add('html://and.another') is True
assert a.add('text://localhost') is True
assert a.add('text://another.server') is True
assert a.add('text://and.another') is True
assert a.add('markdown://localhost') is True
assert a.add('markdown://another.server') is True
assert a.add('markdown://and.another') is True
assert len(a) == 9
assert a.notify(
title="markdown", body="## Testing Markdown",
body_format=NotifyFormat.MARKDOWN) is True
assert a.notify(
title="text", body="Testing Text",
body_format=NotifyFormat.TEXT) is True
assert a.notify(
title="html", body="<b>HTML</b>",
body_format=NotifyFormat.HTML) is True
def test_apprise_asset(tmpdir):
"""
API: AppriseAsset() object
"""
a = AppriseAsset(theme='light')
# Default theme
assert a.theme == 'light'
# Invalid kw handling
with pytest.raises(AttributeError):
AppriseAsset(invalid_kw='value')
a = AppriseAsset(
theme='dark',
image_path_mask='/{THEME}/{TYPE}-{XY}{EXTENSION}',
image_url_mask='http://localhost/{THEME}/{TYPE}-{XY}{EXTENSION}',
)
a.default_html_color = '#abcabc'
assert a.color('invalid', tuple) == (171, 202, 188)
assert a.color(NotifyType.INFO, tuple) == (58, 163, 227)
assert a.color('invalid', int) == 11258556
assert a.color(NotifyType.INFO, int) == 3843043
assert a.color('invalid', None) == '#abcabc'
assert a.color(NotifyType.INFO, None) == '#3AA3E3'
# None is the default
assert a.color(NotifyType.INFO) == '#3AA3E3'
# Invalid Type
with pytest.raises(ValueError):
# The exception we expect since dict is not supported
a.color(NotifyType.INFO, dict)
assert a.image_url(NotifyType.INFO, NotifyImageSize.XY_256) == \
'http://localhost/dark/info-256x256.png'
assert a.image_path(
NotifyType.INFO,
NotifyImageSize.XY_256,
must_exist=False) == '/dark/info-256x256.png'
# This path doesn't exist so image_raw will fail (since we just
# randompyl picked it for testing)
assert a.image_raw(NotifyType.INFO, NotifyImageSize.XY_256) is None
assert a.image_path(
NotifyType.INFO,
NotifyImageSize.XY_256,
must_exist=True) is None
# Create a new object (with our default settings)
a = AppriseAsset()
# Our default configuration can access our file
assert a.image_path(
NotifyType.INFO,
NotifyImageSize.XY_256,
must_exist=True) is not None
assert a.image_raw(NotifyType.INFO, NotifyImageSize.XY_256) is not None
# Create a temporary directory
sub = tmpdir.mkdir("great.theme")
# Write a file
sub.join("{0}-{1}.png".format(
NotifyType.INFO,
NotifyImageSize.XY_256,
)).write("the content doesn't matter for testing.")
# Create an asset that will reference our file we just created
a = AppriseAsset(
theme='great.theme',
image_path_mask='%s/{THEME}/{TYPE}-{XY}.png' % dirname(sub.strpath),
)
# We'll be able to read file we just created
assert a.image_raw(NotifyType.INFO, NotifyImageSize.XY_256) is not None
# We can retrieve the filename at this point even with must_exist set
# to True
assert a.image_path(
NotifyType.INFO,
NotifyImageSize.XY_256,
must_exist=True) is not None
# Test case where we can't access the image file
if sys.version_info.major <= 2:
# Python v2.x
with mock.patch('__builtin__.open', side_effect=OSError()):
assert a.image_raw(NotifyType.INFO, NotifyImageSize.XY_256) is None
# Our content is retrivable again
assert a.image_raw(NotifyType.INFO, NotifyImageSize.XY_256) is not None
else:
# Python >= v3.x
with mock.patch('builtins.open', side_effect=OSError()):
assert a.image_raw(NotifyType.INFO, NotifyImageSize.XY_256) is None
# Our content is retrivable again
assert a.image_raw(NotifyType.INFO, NotifyImageSize.XY_256) is not None
# Disable all image references
a = AppriseAsset(image_path_mask=False, image_url_mask=False)
# We always return none in these calls now
assert a.image_raw(NotifyType.INFO, NotifyImageSize.XY_256) is None
assert a.image_url(NotifyType.INFO, NotifyImageSize.XY_256) is None
assert a.image_path(NotifyType.INFO, NotifyImageSize.XY_256,
must_exist=False) is None
assert a.image_path(NotifyType.INFO, NotifyImageSize.XY_256,
must_exist=True) is None
# Test our default extension out
a = AppriseAsset(
image_path_mask='/{THEME}/{TYPE}-{XY}{EXTENSION}',
image_url_mask='http://localhost/{THEME}/{TYPE}-{XY}{EXTENSION}',
default_extension='.jpeg',
)
assert a.image_path(
NotifyType.INFO,
NotifyImageSize.XY_256,
must_exist=False) == '/default/info-256x256.jpeg'
assert a.image_url(
NotifyType.INFO,
NotifyImageSize.XY_256) == \
'http://localhost/default/info-256x256.jpeg'
# extension support
assert a.image_path(
NotifyType.INFO,
NotifyImageSize.XY_128,
must_exist=False,
extension='.ico') == '/default/info-128x128.ico'
assert a.image_url(
NotifyType.INFO,
NotifyImageSize.XY_256,
extension='.test') == \
'http://localhost/default/info-256x256.test'
def test_apprise_details():
"""
API: Apprise() Details
"""
# Reset our matrix
__reset_matrix()
# This is a made up class that is just used to verify
class TestDetailNotification(NotifyBase):
"""
This class is used to test various configurations supported
"""
# Minimum requirements for a plugin to produce details
service_name = 'Detail Testing'
# The default simple (insecure) protocol (used by NotifyMail)
protocol = 'details'
# Set test_bool flag
always_true = True
always_false = False
# Define object templates
templates = (
'{schema}://{host}',
'{schema}://{host}:{port}',
'{schema}://{user}@{host}:{port}',
'{schema}://{user}:{pass}@{host}:{port}',
)
# Define our tokens; these are the minimum tokens required required to
# be passed into this function (as arguments). The syntax appends any
# previously defined in the base package and builds onto them
template_tokens = dict(NotifyBase.template_tokens, **{
'notype': {
# Nothing defined is still valid
},
'regex_test01': {
'name': _('RegexTest'),
'type': 'string',
'regex': r'[A-Z0-9]',
},
'regex_test02': {
'name': _('RegexTest'),
# Support regex options too
'regex': (r'[A-Z0-9]', 'i'),
},
'regex_test03': {
'name': _('RegexTest'),
# Support regex option without a second option
'regex': (r'[A-Z0-9]'),
},
'regex_test04': {
# this entry would just end up getting removed
'regex': None,
},
# List without delimiters (causes defaults to kick in)
'mylistA': {
'name': 'fruit',
'type': 'list:string',
},
# A list with a delimiter list
'mylistB': {
'name': 'softdrinks',
'type': 'list:string',
'delim': ['|', '-'],
},
})
template_args = dict(NotifyBase.template_args, **{
# Test _exist_if logic
'test_exists_if_01': {
'name': 'Always False',
'type': 'bool',
# Provide a default
'default': False,
# Base the existance of this key/value entry on the lookup
# of this class value at runtime. Hence:
# if not NotifyObject.always_false
# del this_entry
#
'_exists_if': 'always_false',
},
# Test _exist_if logic
'test_exists_if_02': {
'name': 'Always True',
'type': 'bool',
# Provide a default
'default': False,
# Base the existance of this key/value entry on the lookup
# of this class value at runtime. Hence:
# if not NotifyObject.always_true
# del this_entry
#
'_exists_if': 'always_true',
},
# alias_of testing
'test_alias_of': {
'alias_of': 'mylistB',
'delim': ('-', ' ')
}
})
def url(self):
# Support URL
return ''
def notify(self, **kwargs):
# Pretend everything is okay (so we don't break other tests)
return True
# Store our good detail notification in our schema map
SCHEMA_MAP['details'] = TestDetailNotification
# Create our Apprise instance
a = Apprise()
# Dictionary response
assert isinstance(a.details(), dict)
# Reset our matrix
__reset_matrix()
__load_matrix()
def test_apprise_details_plugin_verification():
"""
API: Apprise() Details Plugin Verification
"""
# Reset our matrix
__reset_matrix()
__load_matrix()
a = Apprise()
# Details object
details = a.details()
# Dictionary response
assert isinstance(details, dict)
# Details object with language defined:
details = a.details(lang='en')
# Dictionary response
assert isinstance(details, dict)
# Details object with unsupported language:
details = a.details(lang='xx')
# Dictionary response
assert isinstance(details, dict)
# Apprise version
assert 'version' in details
assert details.get('version') == __version__
# Defined schemas identify each plugin
assert 'schemas' in details
assert isinstance(details.get('schemas'), list)
# We have an entry per defined plugin
assert 'asset' in details
assert isinstance(details.get('asset'), dict)
assert 'app_id' in details['asset']
assert 'app_desc' in details['asset']
assert 'default_extension' in details['asset']
assert 'theme' in details['asset']
assert 'image_path_mask' in details['asset']
assert 'image_url_mask' in details['asset']
assert 'image_url_logo' in details['asset']
# Valid Type Regular Expression Checker
# Case Sensitive and MUST match the following:
is_valid_type_re = re.compile(
r'((choice|list):)?(string|bool|int|float)')
# match tokens found in templates so we can cross reference them back
# to see if they have a matching argument
template_token_re = re.compile(r'{([^}]+)}[^{]*?(?=$|{)')
# Define acceptable map_to arguments that can be tied in with the
# kwargs function definitions.
valid_kwargs = set([
# General Parameters
'user', 'password', 'port', 'host', 'schema', 'fullpath',
# NotifyBase parameters:
'format', 'overflow',
# URLBase parameters:
'verify', 'cto', 'rto',
])
# Valid Schema | |
'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'disk_id',
],
'required': [
'project_id',
'location_id',
'disk_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'disk_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'disk_id': 'diskId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'disk_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__storage_project_disk_get
)
def __storage_project_disk_list(
self,
project_id,
location_id,
**kwargs
):
"""List storage/disk # noqa: E501
List disk # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.storage_project_disk_list(project_id, location_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
Keyword Args:
name (str): Filter by name. [optional]
vm (str): Filter by vm. [optional]
tag_value (str): Filter by tag.value. [optional]
tag_key (str): Filter by tag.key. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Disk]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
return self.call_with_http_info(**kwargs)
self.storage_project_disk_list = _Endpoint(
settings={
'response_type': ([Disk],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/storage/{locationId}/project/{projectId}/disk',
'operation_id': 'storage_project_disk_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'name',
'vm',
'tag_value',
'tag_key',
],
'required': [
'project_id',
'location_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'name':
(str,),
'vm':
(str,),
'tag_value':
(str,),
'tag_key':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'name': 'name',
'vm': 'vm',
'tag_value': 'tag.value',
'tag_key': 'tag.key',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'name': 'query',
'vm': 'query',
'tag_value': 'query',
'tag_key': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__storage_project_disk_list
)
def __storage_project_disk_metric_get(
self,
project_id,
location_id,
disk_id,
metric_id,
**kwargs
):
"""Get storage/disk.metric # noqa: E501
Get storage/disk.metric # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.storage_project_disk_metric_get(project_id, location_id, disk_id, metric_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
disk_id (str): Disk Id
metric_id (str): metricId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Metric
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['disk_id'] = \
disk_id
kwargs['metric_id'] = \
metric_id
return self.call_with_http_info(**kwargs)
self.storage_project_disk_metric_get = _Endpoint(
settings={
'response_type': (Metric,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/storage/{locationId}/project/{projectId}/disk/{diskId}/metric/{metricId}',
'operation_id': 'storage_project_disk_metric_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'disk_id',
'metric_id',
],
'required': [
'project_id',
'location_id',
'disk_id',
'metric_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'disk_id':
(str,),
'metric_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'disk_id': 'diskId',
'metric_id': 'metricId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'disk_id': 'path',
'metric_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__storage_project_disk_metric_get
)
def __storage_project_disk_metric_list(
self,
project_id,
location_id,
disk_id,
**kwargs
):
"""List storage/disk.metric # noqa: E501
List storage/disk.metric # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.storage_project_disk_metric_list(project_id, location_id, disk_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
disk_id (str): Disk Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Metric]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['disk_id'] = \
disk_id
return self.call_with_http_info(**kwargs)
self.storage_project_disk_metric_list = _Endpoint(
settings={
'response_type': ([Metric],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/storage/{locationId}/project/{projectId}/disk/{diskId}/metric',
'operation_id': 'storage_project_disk_metric_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'disk_id',
],
'required': [
'project_id',
'location_id',
'disk_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'disk_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'disk_id': 'diskId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'disk_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__storage_project_disk_metric_list
)
def __storage_project_disk_metric_point_list(
self,
project_id,
location_id,
disk_id,
metric_id,
**kwargs
):
"""List storage/disk.point # noqa: E501
List storage/disk.point # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.storage_project_disk_metric_point_list(project_id, location_id, disk_id, metric_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
disk_id (str): Disk Id
metric_id (str): metricId
Keyword Args:
interval (str): interval. [optional]
timespan (str): timespan. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a | |
<filename>pyshgp/push/instruction.py<gh_stars>10-100
"""Concrete implementations of the Instruction Atom type."""
from abc import ABC, abstractmethod
from typing import Callable, Set, Sequence
from pyshgp.push.atoms import InstructionMeta
from pyshgp.push.config import PushConfig
from pyshgp.push.type_library import RESERVED_PSEUDO_STACKS
from pyshgp.push.state import PushState
from pyshgp.utils import Token
class Instruction(ABC):
"""A function in the Push language used to modify the PushState.
The Instruction class is the abstract base class for specific implementations
that are configured differently. For example, see SimpleInstruction verses
TakesStateInstruction.
Parameters
----------
name : str,
A unique name for the instruction.
code_blocks : int
The number of CodeBlocks to open following the instruction in a Genome.
docstring : str, optional
A string describing in the behavior of the Instruction.
Attributes
----------
name : str,
A unique name for the instruction.
code_blocks : int
The number of CodeBlocks to open following the instruction in a Genome.
docstring : str, optional
A string describing in the behavior of the Instruction.
"""
__slots__ = ["name", "code_block", "docstring"]
def __init__(self, name: str, code_blocks: int, docstring="Write me!"):
self.name = name
self.code_blocks = code_blocks
self.docstring = docstring
@abstractmethod
def evaluate(self, push_state: PushState, push_config: PushConfig = None) -> PushState:
"""Evaluate the instruction on the given PushState.
Parameters
----------
push_state: pyshgp.push.state.PushState
The PushState to run the instruction on.
push_config: pyshgp.push.interpreter.PushConfig
The configuration of the Push language.
Returns
-------
pyshgp.push.state.PushState
Return the given state, possibly modified by the Instruction.
"""
pass
@abstractmethod
def required_stacks(self) -> Set[str]:
"""Return a list of PushType names relevant to the instruction."""
pass
def meta(self) -> InstructionMeta:
"""Create an ``InstructionMeta`` from the instruction object."""
return InstructionMeta(name=self.name, code_blocks=self.code_blocks)
def __eq__(self, other):
if type(self) == type(other):
return self.name == other.name
return False
def __hash__(self):
return self.name.__hash__()
def __repr__(self):
return "Instruction<{n}>".format(n=self.name)
def _check_is_seq(x, source):
if not isinstance(x, Sequence):
raise ValueError("Instruction result must be a sequence. {i} gave {t}.".format(
i=source,
t=type(x)
))
class SimpleInstruction(Instruction):
"""A simple instruction implementation.
A SimpleInstruction uses a standardized way of manipulating PushStates. In
other words, it handles popping its own function arguments and pushing the
function return values.
The first step of evaluating a SimpleInstruction is to pop the arguments
from the stacks corresponding the instruction's ``input_stacks`` list.
If multiple occurrences of the same type are in ``input_stacks``, items are
taken from progressively deeper in that stack. If the stacks of the
PushState do not contain a sufficient number of items, the instruction does
not modify the PushState.
The popped arguments are then passed to the instruction's function to produce
a tuple of outputs. It is crucial that the instruction's function produce a
tuple of outputs, even if it only contains a single element. The elements of
the tuple are then routed to the corresponding stacks specified in the
instruction's ``output_stacks``.
Parameters
----------
name : str,
A unique name for the instruction.
f : Callable
A function whose signature matches input_stacks and output_stacks.
input_stacks : Sequence[str]
A list of PushType names to use when popping arguments from the PushState.
output_stacks : Sequence[str]
A list of PushType names to use when pushing function results to the PushState.
code_blocks : int
The number of CodeBlocks to open following the instruction in a Genome.
docstring : str, optional
A string describing in the behavior of the Instruction.
"""
__slots__ = ["name", "f", "input_stacks", "output_stacks", "code_blocks", "docstring"]
def __init__(self,
name: str,
f: Callable,
input_stacks: Sequence[str],
output_stacks: Sequence[str],
code_blocks: int,
docstring="Write me!"):
super().__init__(name, code_blocks, docstring)
self.f = f
self.input_stacks = input_stacks
self.output_stacks = output_stacks
def evaluate(self, push_state: PushState, push_config: PushConfig = None) -> PushState:
"""Evaluate the instruction on the given PushState. Return mutated State.
A SimpleInstruction infers which values to pop and push from the stack
based on its `input_stacks` and `output_stacks`.
Parameters
----------
push_state : PushState
Push state to modify with the Instruction.
push_config : pyshgp.push.interpreter.PushConfig
Configuration of the interpreter. Used to get various limits.
Returns
-------
PushState
Return the given state, possibly modified by the Instruction.
"""
# Pull args, if present.
args = push_state.observe_stacks(self.input_stacks)
if Token.no_stack_item in args:
return push_state
# Compute result, return if revert or response too big.
result = self.f(*args)
if result is Token.revert:
return push_state
_check_is_seq(result, self)
# Remove arguments, push results.
push_state.pop_from_stacks(self.input_stacks)
push_state.push_to_stacks(result, self.output_stacks)
return push_state
def required_stacks(self) -> Set[str]:
"""Return a list of PushType names relevant to the instruction.
Based on the the instructions input and output types.
"""
return set(self.input_stacks + self.output_stacks) - RESERVED_PSEUDO_STACKS
class StateToStateInstruction(Instruction):
"""Instruction that takes entire PushState and returns entire PushState."""
__slots__ = ["name", "f", "stacks_used", "code_blocks", "docstring"]
def __init__(self,
name: str,
f: Callable,
stacks_used: Sequence[str],
code_blocks: int,
docstring="Write me!"):
super().__init__(name, code_blocks, docstring)
self.f = f
self.stacks_used = set(stacks_used)
def evaluate(self, push_state: PushState, push_config: PushConfig = None) -> PushState:
"""Evaluate the instruction on the given PushState. Return mutated State.
A SimpleInstruction infers which values to pop and push from the stack
based on its `input_stacks` and `output_stacks`.
Parameters
----------
push_state : PushState
Push state to modify with the Instruction.
push_config : PushConfig
Configuration of the interpreter. Used to get various limits.
Returns
-------
PushState
Return the given state, possibly modified by the Instruction.
"""
result = self.f(push_state)
if result == Token.revert:
return push_state
else:
return result
def required_stacks(self) -> Set[str]:
"""Return a list of PushType names relevant to the instruction."""
return self.stacks_used - RESERVED_PSEUDO_STACKS
class TakesStateInstruction(Instruction):
"""Instruction that takes entire PushState and returns particular values.
The function of a TakesStateInstruction accepts an entire PushState as input
and produces either a ``Token.revert`` or a tuple of outputs values. It is
crucial that the instruction's function produce a tuple of outputs, even if
it only contains a single element.
The elements of the tuple are then routed to the corresponding stacks
specified in the instruction's ``output_stacks``.
Additional PushTypes utilized by the instruction are denoted in ``other_stacks``.
Parameters
----------
name : str,
A unique name for the instruction.
f : Callable
A function that takes a PushState as input and produces values corresponding to ``output_stacks.``
output_stacks : Sequence[str]
A list of PushType names to use when pushing function results to the PushState.
other_stacks : Sequence[str]
A list of additional PushType names used by the Instruction's function.
code_blocks : int
The number of CodeBlocks to open following the instruction in a Genome.
docstring : str, optional
A string describing in the behavior of the Instruction.
"""
__slots__ = ["name", "f", "output_stacks", "other_stacks", "code_blocks", "docstring"]
def __init__(self,
name: str,
f: Callable,
output_stacks: Sequence[str],
other_stacks: Sequence[str],
code_blocks: int,
docstring="Write me!"):
super().__init__(name, code_blocks, docstring)
self.f = f
self.output_stacks = output_stacks
self.other_stacks = other_stacks
def evaluate(self, push_state: PushState, push_config: PushConfig = None) -> PushState:
"""Evaluate the instruction on the given PushState. Return mutated State.
A SimpleInstruction infers which values to pop and push from the stack
based on its `input_stacks` and `output_stacks`.
Parameters
----------
push_state : PushState
Push state to modify with the Instruction.
push_config : PushConfig
Configuration of the interpreter. Used to get various limits.
Returns
-------
PushState
Return the given state, possibly modified by the Instruction.
"""
# Compute result. State should be modified in place during function.
result = self.f(push_state)
# Return if revert.
if result is Token.revert:
return push_state
_check_is_seq(result, self)
# Push results.
push_state.push_to_stacks(result, self.output_stacks)
return push_state
def required_stacks(self) -> Set[str]:
"""Return a list of PushType names relevant to the instruction."""
return set(self.other_stacks + self.output_stacks) - RESERVED_PSEUDO_STACKS
class ProducesManyOfTypeInstruction(Instruction):
"""Instruction that produces arbitrarily many values of a given PushType.
ProducesManyOfTypeInstructions pop their arguments in the same was as
SimpleInstructions. Items are popped from the stacks corresponding the
types denoted in the ``input_stacks`` list. If multiple occurrences of the
same type are in ``input_stacks``, items are taken from progressively deeper
in that stack. If the stacks of the PushState do not contain a sufficient
number of items, the instruction does not modify the PushState.
The popped arguments are then passed to the instruction's function to produce
a tuple of outputs. It is crucial that the instruction's function produce a
tuple of outputs, even if it only contains a single element. | |
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2019-2021 Terbau
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json
import asyncio
import aioxmpp
import re
import functools
import datetime
from typing import (TYPE_CHECKING, Optional, Any, List, Dict, Union, Tuple,
Awaitable, Type)
from collections import OrderedDict
from .enums import Enum
from .errors import PartyError, Forbidden, HTTPException, NotFound
from .user import User
from .friend import Friend
from .enums import (PartyPrivacy, PartyDiscoverability, PartyJoinability,
DefaultCharactersChapter2, Region, ReadyState, Platform)
from .utils import MaybeLock
if TYPE_CHECKING:
from .client import Client
class EditEntry:
def __init__(self, func: Awaitable, *args: Any, name: Optional[str] = None, **kwargs: Any) -> None:
if not asyncio.iscoroutinefunction(func):
raise TypeError('EditEntry function must be coroutine')
self.func = func
self.name = name or func.__qualname__
self.args = args
self.keywords = kwargs
def __repr__(self) -> str:
return ('<EditEntry func={0.func!r} name={0.name!r} '
'args={0.args!r} '
'kwargs={0.keywords!r}>'.format(self))
def __call__(self) -> Awaitable:
return self.func(*self.args, **self.keywords)
class SquadAssignment:
"""Represents a party members squad assignment. A squad assignment
is basically a piece of information about which position a member
has in the party, which is directly related to party teams.
Parameters
----------
position: Optional[:class:`int`]
The position a member should have in the party. If no position
is passed, a position will be automatically given according to
the position priorities set.
hidden: :class:`bool`
Whether or not the member should be hidden in the party.
.. warning::
Being hidden is not a native fortnite feature so be careful
when using this. It might lead to undesirable results.
"""
__slots__ = ('position', 'hidden')
def __init__(self, *,
position: Optional[int] = None,
hidden: bool = False) -> None:
self.position = position
self.hidden = hidden
def __repr__(self):
return ('<SquadAssignment position={0.position!r} '
'hidden={0.hidden!r}>'.format(self))
@classmethod
def copy(cls, assignment):
self = cls.__new__(cls)
self.position = assignment.position
self.hidden = assignment.hidden
return self
class DefaultPartyConfig:
"""Data class for the default party configuration used when a new party
is created.
Parameters
----------
privacy: Optional[:class:`PartyPrivacy`]
| The party privacy that should be used.
| Defaults to: :attr:`PartyPrivacy.PUBLIC`
max_size: Optional[:class:`int`]
| The maximun party size. Valid party sizes must use a value
between 1 and 16.
| Defaults to ``16``
chat_enabled: Optional[:class:`bool`]
| Wether or not the party chat should be enabled for the party.
| Defaults to ``True``.
team_change_allowed: :class:`bool`
| Whether or not players should be able to manually swap party team
with another player. This setting only works if the client is the
leader of the party.
| Defaults to ``True``
default_squad_assignment: :class:`SquadAssignment`
| The default squad assignment to use for new members. Squad assignments
holds information about a party member's current position and visibility.
Please note that setting a position in the default squad assignment
doesnt actually do anything and it will just be overridden.
| Defaults to ``SquadAssignment(hidden=False)``.
position_priorities: List[int]
| A list of exactly 16 ints all ranging from 0-15. When a new member
joins the party or a member is not defined in a squad assignment
request, it will automatically give the first available position
in this list.
| Defaults to a list of 0-15 in order.
reassign_positions_on_size_change: :class:`bool`
| Whether or not positions should be automatically reassigned if the party
size changes. Set this to ``False`` if you want members to keep their
positions unless manually changed. The reassignment is done according
to the position priorities.
| Defaults to ``True``.
joinability: Optional[:class:`PartyJoinability`]
| The joinability configuration that should be used.
| Defaults to :attr:`PartyJoinability.OPEN`
discoverability: Optional[:class:`PartyDiscoverability`]
| The discoverability configuration that should be used.
| Defaults to :attr:`PartyDiscoverability.ALL`
invite_ttl: Optional[:class:`int`]
| How many seconds the invite should be valid for before
automatically becoming invalid.
| Defaults to ``14400``
sub_type: Optional[:class:`str`]
| The sub type the party should use.
| Defaults to ``'default'``
party_type: Optional[:class:`str`]
| The type of the party.
| Defaults to ``'DEFAULT'``
cls: Type[:class:`ClientParty`]
| The default party object to use for the client's party. Here you can
specify all class objects that inherits from :class:`ClientParty`.
meta: List[:class:`functools.partial`]
A list of coroutines in the form of partials. This config will be
automatically equipped by the party when a new party is created by the
client.
.. code-block:: python3
from fortnitepy import ClientParty
from functools import partial
[
partial(ClientParty.set_custom_key, 'myawesomekey'),
partial(ClientParty.set_playlist, 'Playlist_PlaygroundV2', region=fortnitepy.Region.EUROPE)
]
Attributes
----------
team_change_allowed: :class:`bool`
Whether or not players are able to manually swap party team
with another player. This setting only works if the client is the
leader of the party.
default_squad_assignment: :class:`SquadAssignment`
The default squad assignment to use for new members and members
not specified in manual squad assignments requests.
position_priorities: List[:class:`int`]
A list containing exactly 16 integers ranging from 0-16 with no
duplicates. This is used for position assignments.
reassign_positions_on_size_change: :class:`bool`
Whether or not positions will be automatically reassigned when the
party size changes.
cls: Type[:class:`ClientParty`]
The default party object used to represent the client's party.
""" # noqa
def __init__(self, **kwargs: Any) -> None:
self.cls = kwargs.pop('cls', ClientParty)
self._client = None
self.team_change_allowed = kwargs.pop('team_change_allowed', True)
self.default_squad_assignment = kwargs.pop(
'default_squad_assignment',
SquadAssignment(hidden=False),
)
value = kwargs.pop('position_priorities', None)
if value is None:
self._position_priorities = list(range(16))
else:
self.position_priorities = value
self.reassign_positions_on_size_change = kwargs.pop(
'reassign_positions_on_size_change',
True
)
self.meta = kwargs.pop('meta', [])
self._config = {}
self.update(kwargs)
@property
def position_priorities(self):
return self._position_priorities
@position_priorities.setter
def position_priorities(self, value):
def error():
raise ValueError(
'position priorities must include exactly 16 integers '
'ranging from 0-16.'
)
if len(value) != 16:
error()
for i in range(16):
if i not in value:
error()
self._position_priorities = value
def _inject_client(self, client: 'Client') -> None:
self._client = client
@property
def config(self) -> Dict[str, Any]:
self._client._check_party_confirmation()
return self._config
def update(self, config: Dict[str, Any]) -> None:
default = {
'privacy': PartyPrivacy.PUBLIC.value,
'joinability': PartyJoinability.OPEN.value,
'discoverability': PartyDiscoverability.ALL.value,
'max_size': 16,
'invite_ttl_seconds': 14400,
'chat_enabled': True,
'join_confirmation': False,
'sub_type': 'default',
'type': 'DEFAULT',
}
to_update = {}
for key, value in config.items():
if isinstance(value, Enum):
to_update[key] = value.value
default_config = {**default, **self._config}
self._config = {**default_config, **config, **to_update}
def _update_privacy(self, args: list) -> None:
for arg in args:
if isinstance(arg, PartyPrivacy):
if arg.value['partyType'] == 'Private':
include = {
'discoverability': PartyDiscoverability.INVITED_ONLY.value, # noqa
'joinability': PartyJoinability.INVITE_AND_FORMER.value, # noqa
}
else:
include = {
'discoverability': PartyDiscoverability.ALL.value,
'joinability': PartyJoinability.OPEN.value,
}
self.update({'privacy': arg, **include})
break
def update_meta(self, meta: List[functools.partial]) -> None:
names = []
results = []
unfiltered = [*meta[::-1], *self.meta[::-1]]
for elem in unfiltered:
coro = elem.func
if coro.__qualname__ not in names:
# Very hacky solution but its needed to update the privacy
# in .config since updating privacy doesnt work as expected
# when updating with an "all patch" strategy like other props.
if coro.__qualname__ == 'ClientParty.set_privacy':
self._update_privacy(elem.args)
names.append(coro.__qualname__)
results.append(elem)
if not (asyncio.iscoroutine(coro)
or asyncio.iscoroutinefunction(coro)):
raise TypeError('meta must be list containing partials '
'of coroutines')
self.meta = results
class DefaultPartyMemberConfig:
"""Data class for the default party member configuration used when the
client joins a party.
Parameters
----------
cls: Type[:class:`ClientPartyMember`]
The default party member object to use to represent the client as a
party member. Here you can specify all classes that inherits from
:class:`ClientPartyMember`.
The library has two out of the box objects that you can use:
- :class:`ClientPartyMember` *(Default)*
- :class:`JustChattingClientPartyMember`
yield_leadership: :class:`bool`:
Wether or not the client should promote another member automatically
whenever there is a chance to.
Defaults to | |
or the main thread.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4208")
#
# ------------ wait-for-cycle ------------
#
def wait_for_cycle_command(obj, cycle):
if conf.python.iface.python.in_main_branch():
SIM_command_has_problem()
print ("The wait-for-cycle command is only allowed "
"in script branches.")
else:
try:
wait_for_obj_hap("Core_Cycle_Count", obj, cycle)
except SimExc_Break:
print "Command '%s.wait-for-cycle' interrupted." % obj.name
SIM_command_has_problem()
new_command("wait-for-cycle", wait_for_cycle_command,
[arg(int_t, 'cycle')],
namespace = "processor",
short = "wait until reaching cycle",
see_also = ["script-branch",
"wait-for-variable",
"wait-for-hap",
"<text-console>.wait-for-string",
"<processor>.wait-for-step"],
doc = """
Postpones execution of a script branch until the processor reaches the
specified cycle in the simulation.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4237")
#
# ------------ wait-for-step ------------
#
def wait_for_step_command(obj, step):
if conf.python.iface.python.in_main_branch():
SIM_command_has_problem()
print ("The wait-for-step command is only allowed "
"in script branches.")
else:
try:
wait_for_obj_hap("Core_Step_Count", obj, step)
except SimExc_Break:
print "Command '%s.wait-for-step' interrupted." % obj.name
SIM_command_has_problem()
new_command("wait-for-step", wait_for_step_command,
[arg(int_t, 'step')],
namespace = "processor",
short = "wait until reaching step",
see_also = ["script-branch",
"wait-for-variable",
"wait-for-hap",
"<text-console>.wait-for-string",
"<processor>.wait-for-cycle"],
doc = """
Postpones execution of a script branch until the processor reaches the
specified step in the simulation.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4267")
#
# ------------ wait-for-hap ------------
#
def wait_for_hap_command(hap, obj, idx0, idx1, ret):
if conf.python.iface.python.in_main_branch():
SIM_command_has_problem()
print "The wait-for-hap command is only allowed in script branches."
else:
try:
r = wait_for_obj_hap(hap, obj, idx0, idx1)
except SimExc_Break:
print "Command 'wait-for-hap' interrupted."
SIM_command_has_problem()
if ret:
for i in range(len(r)):
if type(r[i]) == type(conf.sim):
r[i] = r[i].name
simenv.set_variable_value_idx(ret, r[i], 1, i)
new_command("wait-for-hap", wait_for_hap_command,
[arg(str_t, 'hap', '', expander = hap_expander),
arg(obj_t('object'), "object", '?', None),
arg(int_t, 'idx0', '?', -1),
arg(int_t, 'idx1', '?', -1),
arg(str_t, 'ret', '?')],
short = "wait until hap occurs",
see_also = ["script-branch",
"wait-for-variable",
"<text-console>.wait-for-string",
"<processor>.wait-for-cycle",
"<processor>.wait-for-step"],
doc = """
Postpones execution of a script branch until <arg>hap</arg> occurs. The
optional argument <arg>obj</arg> limits the haps to a specific object, and
<arg>idx0</arg>, <arg>idx1</arg> can be used for indexed and range haps. The
data associated with the hap can be saved into the named variable specified by
<arg>ret</arg>. This variable will be indexed, with local scope.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4301")
def list_script_branch_command():
print "ID Object Hap Hap-ID Function"
for sb in conf.python.script_branches:
if sb[3]:
name = sb[3].name
else:
name = "None"
print "%-3d %-15s %-20s %3d %s" % (sb[0], name, sb[1], sb[2], sb[4])
new_command("list-script-branches", list_script_branch_command,
see_also = ["script-branch", "interrupt-script-branch"],
doc = """
List all currently active script branches.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4330")
def interrupt_script_branch_command(id):
try:
conf.python.iface.python.interrupt_branch(id)
print "Script branch %d interrupted." % id
except Exception, msg:
print "Failed interrupting script branch: %s" % msg
SIM_command_has_problem()
new_command("interrupt-script-branch", interrupt_script_branch_command,
[arg(int_t, 'id')],
see_also = ["script-branch", "list-script-branches"],
doc = """
Send a interrupt exception to a scripts branch. The argument is the script
branch ID, that is returned by the <cmd>script-branch</cmd> command, and that
is also listed by the <cmd>list-script-branches</cmd> command. The branch will
wakeup and exit when it receives the exception.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4344")
all_tee_objs = []
def tee_handler(file, buf, count):
file.write(buf)
file.flush()
def tee(filename):
global all_tee_objs
try:
file_obj=open(filename, "w")
except:
print "Failed to open '%s' for writing" % filename
SIM_command_has_problem()
return
all_tee_objs.append((filename, file_obj))
SIM_add_output_handler(tee_handler, file_obj)
def remove_tee(filename):
for i in range(len(all_tee_objs)):
(name, obj) = all_tee_objs[i]
if filename == None or name == filename:
SIM_remove_output_handler(tee_handler, obj)
obj.close()
del(all_tee_objs[i])
if filename:
return
if filename:
print "Output not enabled to file '%s'" % filename
SIM_command_has_problem()
new_command("output-file-start", tee,
[ arg(filename_t(), "filename") ],
type = ["Files and Directories"],
short = "send output to file",
see_also = ['output-file-stop'],
doc = """
Send output to <i>filename</i>. Any output displayed in the Simics console that goes through the output handler API will be written to the file.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4384")
new_command("output-file-stop", remove_tee,
[ arg(filename_t(), "filename", "?", None) ],
type = ["Files and Directories"],
short = "stop sending output to file",
see_also = ['output-file-start'],
doc = """
Stop sending output to file. If no filename is given, then the command
will disable all file output.""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4392")
def add_module_path_cmd(path):
VT_add_module_dir(path)
SIM_module_list_refresh()
new_command("add-module-directory", add_module_path_cmd,
[arg(filename_t(dirs = 1), "path")],
type = ["Simics Search Path", "Configuration",
"Files and Directories", "Modules"],
short = "add a directory to the module search path",
doc = """
Adds a directory to the Simics module search path. This path is used to look
for additional modules, that can be used to extend the functionality of Simics.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4405")
def signed_cmd(size, value):
return (value & ((1 << size) - 1)) - ((value << 1) & (1 << size))
new_command("signed", lambda x : signed_cmd(64, x),
[arg(int_t, "int")],
type = ["Command-Line Interface", "Output"],
short = "interpret unsigned integer as signed",
doc = """
Interpret an integer, <param>int</param>, as a signed value of a specific bit
width. For example <cmd>signed16 0xffff</cmd> will return -1. The
<cmd>signed</cmd> command assumes a 64 bit width.
""", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4418")
def _signed_cmd(s):
return lambda x : signed_cmd(s, x)
for i in (8, 16, 32, 64):
new_command("signed%d" % i, _signed_cmd(i),
[arg(int_t, "int")],
type = ["Command-Line Interface", "Output"],
short = "interpret unsigned integer as signed",
doc_with = "signed", filename="/mp/simics-3.0/src/core/common/generic_commands.py", linenumber="4432")
import sim_commands
# asserts that Simics is stopped
def assert_stopped():
assert_cpu()
if SIM_simics_is_running():
raise CliError, "Simics is already running."
# internal for commands
def list_processors():
limit = SIM_number_processors()
print "Current status:"
for i in range(limit):
cpu = SIM_get_processor(i)
print "Processor", cpu.name, iff(SIM_processor_enabled(cpu), "enabled.", "disabled.")
def conf_object_expander(string):
return get_completions(string, conf.all_object_names);
#
# -------------- simics-path commands ----------------
#
def add_directory_cmd(path, prepend):
SIM_add_directory(path, prepend)
new_command("add-directory", add_directory_cmd,
[arg(filename_t(dirs = 1, keep_simics_ref = 1), "path"),
arg(flag_t, "-prepend")],
type = ["Simics Search Path", "Configuration",
"Files and Directories"],
short = "add a directory to the Simics search path",
doc = """
Adds a directory to the Simics search path. The Simics search path is a list of
directories where Simics searches for additional files when loading a
configuration or executing a command like <cmd>load-file</cmd>.
The value of <arg>path</arg> is normally appended at the end of the list. If
the <i>-prepend</i> flag is given, the path will be added as first in the list.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="27")
def print_directories_cmd():
dirs = SIM_get_directories()
print "The current Simics search path is:"
for dir in dirs:
print dir
new_command("list-directories", print_directories_cmd,
[],
type = ["Simics Search Path", "Files and Directories"],
short = "list directories in Simics search path",
doc = """
Print a list of all directories in the Simics search path.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="48")
new_command("print-directories", print_directories_cmd,
[],
short = "print the directory Simics search path",
type = "deprecated commands",
deprecated = "list-directories", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="56")
def clear_directories_cmd():
SIM_clear_directories()
print "Simics search path is now empty."
new_command("clear-directories", clear_directories_cmd,
[],
type = ["Simics Search Path", "Files and Directories"],
short = "clear the Simics search path",
doc = """
Empty the Simics search path.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="66")
def resolve_file_cmd(filename):
file = SIM_lookup_file(filename)
if not file:
print "Not in search path:", filename
else:
return file
new_command("resolve-file", resolve_file_cmd,
[arg(str_t, "filename")],
type = ["Simics Search Path", "Files and Directories"],
short = "resolve a filename",
alias = "lookup-file",
doc = """\
Looks for the file <arg>filename</arg> in the Simics search path. If it
is found, its complete path is returned.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="82")
def native_path_cmd(filename):
return SIM_native_path(filename)
new_command("native-path", native_path_cmd,
[arg(str_t, "filename")],
type = ["Files and Directories"],
short = "convert a filename to host native form",
doc = """\
Converts a path to its host native form. On Unix, this command returns
<arg>filename</arg> unchanged. On Windows, it translates Cygwin-style paths
to native Windows paths. Refer to the documentation
<b>SIM_native_path()</b>, for a detailed description of the conversions
made.
This command can be used for portability when opening files residing on the
host filesystem.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="95")
# Print a table (list of rows, each a list of strings).
# The alignments list specifies how each column should be aligned,
# each entry being "r" or "l". The string 'spacing' is put between columns.
def print_table(table, alignments, spacing):
widths = [max(len(table[row][col])
for row in xrange(len(table)))
for col in xrange(len(table[0]))]
print "\n".join(spacing.join({'l': "%-*s", 'r': "%*s"}[a] % (w, s)
for (s, w, a) in zip(trow, widths,
alignments))
for trow in table)
def print_time_cmd(cpu, steps = False, cycles = False, all = False):
if all:
if steps or cycles:
print "The -s and -c flags cannot be used with -all."
SIM_command_has_problem()
return
cpus = []
next = SIM_next_queue(None)
while next:
cpus.append(next)
next = SIM_next_queue(next)
elif cpu:
cpus = [cpu]
else:
cpus = [current_processor()]
if steps and cycles:
print "The -s and -c flags cannot be used at the same time."
SIM_command_has_problem()
return
elif steps:
return SIM_step_count(cpus[0])
elif cycles:
return SIM_cycle_count(cpus[0])
print_table([["processor", "steps", "cycles", "time [s]"]]
+ [[cpu.name,
number_str(SIM_step_count(cpu), 10),
number_str(SIM_cycle_count(cpu), 10),
"%.3f" % SIM_time(cpu)]
for cpu in cpus],
["l", "r", "r", "r"], " | |
('', 'Other (type below)'),
)
travel_reimbursement = models.CharField(
max_length=STR_MED,
verbose_name='How will instructors\' travel and accommodations be '
'managed?',
choices=TRAVEL_REIMBURSEMENT_CHOICES,
blank=True, default='',
)
travel_reimbursement_other = models.CharField(
max_length=STR_LONG,
verbose_name='Other propositions for managing instructors\' travel and'
' accommodations',
blank=True,
)
comment = models.TextField(
help_text='What else do you want us to know about your workshop? About'
' your attendees? About you?',
verbose_name='Anything else?',
blank=True,
)
def get_absolute_url(self):
return reverse('eventrequest_details', args=[self.pk])
def __str__(self):
return "{name} (from {affiliation}, {type} workshop)".format(
name=self.name, affiliation=self.affiliation,
type=self.workshop_type,
)
class EventSubmission(AssignmentMixin, ActiveMixin, CreatedUpdatedMixin,
models.Model):
url = models.URLField(
null=False, blank=False,
verbose_name='Link to the workshop\'s website')
contact_name = models.CharField(
null=False, blank=False, max_length=STR_LONG,
verbose_name='Your name')
contact_email = models.EmailField(
null=False, blank=False,
verbose_name='Your email',
help_text='We may need to contact you regarding workshop details.')
self_organized = models.BooleanField(
null=False, default=False,
verbose_name='Was the workshop self-organized?')
notes = models.TextField(
null=False, blank=True, default='')
def __str__(self):
return 'Event submission <{}>'.format(self.url)
def get_absolute_url(self):
return reverse('eventsubmission_details', args=[self.pk])
class DCSelfOrganizedEventRequest(AssignmentMixin, ActiveMixin,
CreatedUpdatedMixin, models.Model):
"""Should someone want to run a self-organized Data Carpentry event, they
have to fill this specific form first. See
https://github.com/swcarpentry/amy/issues/761"""
name = models.CharField(
max_length=STR_LONGEST,
)
email = models.EmailField()
organization = models.CharField(
max_length=STR_LONGEST,
verbose_name='University or organization affiliation',
)
INSTRUCTOR_CHOICES = [
('', 'None'),
('incomplete', 'Have gone through instructor training, but haven\'t '
'yet completed checkout'),
('dc', 'Certified Data Carpentry instructor'),
('swc', 'Certified Software Carpentry instructor'),
('both', 'Certified Software and Data Carpentry instructor'),
]
instructor_status = models.CharField(
max_length=STR_MED, choices=INSTRUCTOR_CHOICES,
verbose_name='Your Software and Data Carpentry instructor status',
blank=True,
)
PARTNER_CHOICES = [
('y', 'Yes'),
('n', 'No'),
('u', 'Unsure'),
('', 'Other (enter below)'),
]
is_partner = models.CharField(
max_length=1,
choices=PARTNER_CHOICES,
blank=True,
verbose_name='Is your organization a Data Carpentry or Software '
'Carpentry Partner'
)
is_partner_other = models.CharField(
max_length=STR_LONG,
default='', blank=True,
verbose_name='Other (is your organization a Partner?)',
)
location = models.CharField(
max_length=STR_LONGEST,
verbose_name='Location',
help_text='City, Province or State',
)
country = CountryField()
associated_conference = models.CharField(
max_length=STR_LONG,
default='', blank=True,
verbose_name='Associated conference',
help_text='If the workshop is to be associated with a conference or '
'meeting, which one?',
)
dates = models.CharField(
max_length=STR_LONGEST,
verbose_name='Planned workshop dates',
help_text='Preferably in YYYY-MM-DD to YYYY-MM-DD format',
)
# workshop domain(s)
domains = models.ManyToManyField(
'DCWorkshopDomain',
blank=False,
verbose_name='Domain for the workshop',
help_text='Set of lessons you\'re going to teach',
)
domains_other = models.CharField(
max_length=STR_LONGEST,
blank=True, default='',
verbose_name='Other domains for the workshop',
help_text='If none of the fields above works for you.',
)
# Lesson topics to be taught during the workshop
topics = models.ManyToManyField(
'DCWorkshopTopic',
blank=False,
verbose_name='Topics to be taught',
help_text='A Data Carpentry workshop must include a Data Carpentry '
'lesson on data organization and three other modules in the '
'same domain from the Data Carpentry curriculum (see <a '
'href="http://www.datacarpentry.org/workshops/">http://www.'
'datacarpentry.org/workshops/</a>). If you do want to '
'include materials not in our curriculum, please note that '
'below and we\'ll get in touch.'
)
topics_other = models.CharField(
max_length=STR_LONGEST,
blank=True, default='',
verbose_name='Other topics to be taught',
help_text='If none of the fields above works for you.',
)
# questions about attendees' experience levels
attendee_academic_levels = models.ManyToManyField(
'AcademicLevel',
help_text='If you know the academic level(s) of your attendees, '
'indicate them here.',
verbose_name='Attendees\' academic level',
)
attendee_data_analysis_level = models.ManyToManyField(
'DataAnalysisLevel',
help_text='If you know, indicate learner\'s general level of data '
'analysis experience',
verbose_name='Attendees\' level of data analysis experience',
)
# payments
PAYMENT_CHOICES = [
('per_participant', 'I will contribute $25/participant through '
'registration fees'),
('invoice', 'I will contribute $500 via an invoice'),
('credit_card', 'I will contribute $500 via a credit card payment'),
('fee_waiver', 'I would like to request a fee waiver'),
]
payment = models.CharField(
max_length=STR_MED,
blank=False, choices=PAYMENT_CHOICES,
default='per_participant',
verbose_name='Payment choice',
help_text='Self-organized workshops for non-Partner organizations are '
'$500 or $25/participant for a workshop licensing fee (<a '
'href="http://www.datacarpentry.org/self-organized-workshops'
'/">http://www.datacarpentry.org/self-organized-workshops/'
'</a>). Fee waivers are available and generally granted upon'
' request.',
)
fee_waiver_reason = models.CharField(
max_length=STR_LONGEST,
default='', blank=True,
verbose_name='Reason for requesting a fee waiver',
)
# confirmations
handle_registration = models.BooleanField(
default=False, blank=False,
verbose_name='I confirm that I will handle registration for this'
' workshop',
)
distribute_surveys = models.BooleanField(
default=False, blank=False,
verbose_name='I confirm that I will distribute the Data Carpentry '
'surveys to workshop participants',
)
follow_code_of_conduct = models.BooleanField(
default=False, blank=False,
verbose_name='I confirm that I will follow the Data Carpentry Code of'
' Conduct',
)
def get_absolute_url(self):
return reverse('dcselforganizedeventrequest_details', args=[self.pk])
class AcademicLevel(models.Model):
name = models.CharField(max_length=STR_MED, null=False, blank=False)
def __str__(self):
return self.name
class ComputingExperienceLevel(models.Model):
# it's a long field because we need to store reasoning too, for example:
# "Novice (uses a spreadsheet for data analysis rather than writing code)"
name = models.CharField(max_length=STR_LONGEST, null=False, blank=False)
def __str__(self):
return self.name
class DataAnalysisLevel(models.Model):
# ComputingExperienceLevel's sibling
name = models.CharField(max_length=STR_LONGEST, null=False, blank=False)
def __str__(self):
return self.name
class DCWorkshopTopic(models.Model):
"""Single lesson topic used in a workshop."""
name = models.CharField(max_length=STR_LONGEST, null=False, blank=False)
def __str__(self):
return self.name
class DCWorkshopDomain(models.Model):
"""Single domain used in a workshop (it corresponds to a set of lessons
Data Carpentry prepared)."""
name = models.CharField(max_length=STR_LONGEST, null=False, blank=False)
def __str__(self):
return self.name
#------------------------------------------------------------
class Role(models.Model):
'''Enumerate roles in workshops.'''
name = models.CharField(max_length=STR_MED)
verbose_name = models.CharField(max_length=STR_LONG,
null=False, blank=True, default='')
def __str__(self):
return self.verbose_name
#------------------------------------------------------------
class TaskManager(models.Manager):
def instructors(self):
"""Fetch tasks with role 'instructor'."""
return self.get_queryset().filter(role__name="instructor")
def learners(self):
"""Fetch tasks with role 'learner'."""
return self.get_queryset().filter(role__name="learner")
def helpers(self):
"""Fetch tasks with role 'helper'."""
return self.get_queryset().filter(role__name="helper")
@reversion.register
class Task(models.Model):
'''Represent who did what at events.'''
event = models.ForeignKey(Event)
person = models.ForeignKey(Person)
role = models.ForeignKey(Role)
title = models.CharField(max_length=STR_LONG, blank=True)
url = models.URLField(blank=True)
objects = TaskManager()
class Meta:
unique_together = ('event', 'person', 'role', 'url')
ordering = ("role__name", "event")
def __str__(self):
if self.title:
return self.title
return '{0}/{1}={2}'.format(self.event, self.person, self.role)
def get_absolute_url(self):
return reverse('task_details', kwargs={'task_id': self.id})
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
# Trigger an update of the attendance field
self.event.save()
#------------------------------------------------------------
class Lesson(models.Model):
'''Represent a lesson someone might teach.'''
name = models.CharField(max_length=STR_MED)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
#------------------------------------------------------------
class Qualification(models.Model):
'''What is someone qualified to teach?'''
person = models.ForeignKey(Person)
lesson = models.ForeignKey(Lesson)
def __str__(self):
return '{0}/{1}'.format(self.person, self.lesson)
#------------------------------------------------------------
class BadgeQuerySet(models.query.QuerySet):
"""Custom QuerySet that provides easy way to get instructor badges
(we use that a lot)."""
INSTRUCTOR_BADGES = ('dc-instructor', 'swc-instructor')
def instructor_badges(self):
"""Filter for instructor badges only."""
return self.filter(name__in=self.INSTRUCTOR_BADGES)
class Badge(models.Model):
'''Represent a badge we award.'''
name = models.CharField(max_length=STR_MED, unique=True)
title = models.CharField(max_length=STR_MED)
criteria = models.CharField(max_length=STR_LONG)
objects = BadgeQuerySet.as_manager()
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('badge_details', args=[self.name])
#------------------------------------------------------------
class Award(models.Model):
'''Represent a particular badge earned by a person.'''
person = models.ForeignKey(Person)
badge = models.ForeignKey(Badge)
awarded = models.DateField(default=datetime.date.today)
event = models.ForeignKey(Event, null=True, blank=True,
on_delete=models.PROTECT)
awarded_by = models.ForeignKey(
Person, null=True, blank=True, on_delete=models.PROTECT,
related_name='awarded_set')
class Meta:
unique_together = ("person", "badge", )
def __str__(self):
return '{0}/{1}/{2}/{3}'.format(self.person, self.badge, self.awarded, self.event)
#------------------------------------------------------------
class KnowledgeDomain(models.Model):
"""Represent a knowledge domain a person is engaged in."""
name = models.CharField(max_length=STR_LONG)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
# ------------------------------------------------------------
class TodoItemQuerySet(models.query.QuerySet):
@staticmethod
def current_week_dates(today=None):
if not today:
today = datetime.date.today()
start = today - datetime.timedelta(days=today.weekday())
end = start + datetime.timedelta(days=7)
return start, end
@staticmethod
def next_week_dates(today=None):
if not today:
today = datetime.date.today()
start = today + datetime.timedelta(days=(7 - today.weekday()))
end = start + datetime.timedelta(days=7)
return start, end
def user(self, person):
"""Return TODOs only for specific person."""
return self.filter(event__assigned_to=person)
def current_week(self, today=None):
"""Select TODOs for the current week."""
start, end = TodoItemQuerySet.current_week_dates(today)
return self.filter(due__gte=start, due__lt=end)
def next_week(self, today=None):
"""Select TODOs for the next week."""
start, end = TodoItemQuerySet.next_week_dates(today)
return self.filter(due__gte=start, due__lt=end)
def incomplete(self):
"""Select TODOs that aren't marked as completed."""
return self.filter(completed=False)
def current(self, today=None):
"""A shortcut for getting TODOs from this and upcoming week."""
return ((self.current_week(today) | self.next_week(today)) &
self.incomplete())
class TodoItem(models.Model):
"""Model representing to-do items for events."""
event = models.ForeignKey(Event, null=False, blank=False)
completed = models.BooleanField(default=False)
title = models.CharField(max_length=STR_LONG, default='', blank=False)
due = models.DateField(blank=True, null=True)
additional = models.CharField(max_length=STR_LONGEST, default='',
blank=True)
objects = TodoItemQuerySet.as_manager()
class Meta:
ordering = ["due", "title"]
def __str__(self):
from .util import universal_date_format
if self.due:
return "{title} due {due}".format(
title=self.title, due=universal_date_format(self.due),
)
else:
return self.title
# ------------------------------------------------------------
@reversion.register
class InvoiceRequest(models.Model):
STATUS_CHOICES = (
('not-invoiced', 'Invoice not requested'),
('sent', 'Sent out'),
('paid', 'Paid'),
)
status = models.CharField(
max_length=STR_MED, null=False, blank=False, default='not-invoiced',
choices=STATUS_CHOICES,
verbose_name='Invoice status')
sent_date = models.DateField(
null=True, blank=True, verbose_name='Date invoice was sent out',
help_text='YYYY-MM-DD')
paid_date = models.DateField(
null=True, blank=True, verbose_name='Date invoice was paid',
help_text='YYYY-MM-DD')
organization = models.ForeignKey(
Organization, on_delete=models.PROTECT, verbose_name='Organization | |
% (
alpha, cutoff, len(self.scored_filters))
NUM_FILTERS_TO_SHOW = 10
def evaluate(self, *args, **kwargs):
verbosity = kwargs.get('verbosity', 0)
if not self.scored_filters:
if verbosity >= 0:
print (' %s ' % self.name).center(75, '=')
print ' (No filters!)'
return 0
else:
cm_score = ConfidenceMetric.evaluate(self, *args, **kwargs)
if verbosity >= 0 and self.NUM_FILTERS_TO_SHOW>0:
print ' Filters:'
for filter, score in self.scored_filters[:self.NUM_FILTERS_TO_SHOW]:
print textwrap.fill('%8.3f %s' % (score, filter.name),
initial_indent=' '*2,
subsequent_indent=' '*10)
if len(self.scored_filters) > self.NUM_FILTERS_TO_SHOW:
print ' ... %d more filters...' % (
len(self.scored_filters) - self.NUM_FILTERS_TO_SHOW)
return cm_score
def score(self, serif_rel):
return sum(filter_score
for (filter, filter_score) in self.scored_filters
if filter.check(serif_rel))
class ScoredFilterConfidenceMetric2(ScoredFilterConfidenceMetric):
"""
This version picks filters one at a time, and then applies them to
the dev set. That way, the scores assigned to subsequent filters
are appropriately adjusted.
"""
SCORE='fscore'
def __init__(self, apf_relations, serif_relations, filters,
alpha=0.9, num_filters=5, cutoff=0.1):
apf_relmention_dict = make_apf_relmention_dict(apf_relations)
base_p, base_r, base_f = evaluate(serif_relations, apf_relmention_dict)
self.scored_filters = []
for i in range(num_filters):
# Score the filters:
def score_filter(f):
return evaluate(serif_relations, apf_relmention_dict, alpha, f)
scored_filters = [(f, 100.*(score_filter(f)[2]-base_f))
for f in filters]
# Pick the best one:
scored_filters.sort(key=lambda fs:-fs[1])
filter, score = scored_filters[0]
if score <= 0: continue
self.scored_filters.append(scored_filters[0])
# Apply the filter:
serif_relations = [r for r in serif_relations if filter.check(r)]
self.scored_filters.sort(key=lambda fs:-fs[1])
self.name = 'FilterMetric2(alpha=%s; cutoff=%s%%; %d filters)' % (
alpha, cutoff, len(self.scored_filters))
class MaxentConfidenceMetric(ConfidenceMetric):
name = 'Maxent'
def score(self, serif_rel):
return maxent_score(serif_rel)
class SerifConfidenceMetric(ConfidenceMetric):
name = "SERIF"
def score(self, serif_rel):
return float(serif_rel.confidence)
######################################################################
# Helper Functions
######################################################################
def columns(*blocks, **kwargs):
align = kwargs.pop('align', 'top')
if kwargs: raise TypeError('Unexpected kwarg %s' % kwargs.keys()[0])
s = ''
blocks = [block.split('\n') for block in blocks]
colwidths = [max(len(line)+1 for line in block) for block in blocks]
if align == 'bottom':
height = max(len(block) for block in blocks)
blocks = [['']*(height-len(block))+block for block in blocks]
for lines in map(None, *blocks):
for colwidth, line in zip(colwidths, lines):
s += (line or '').ljust(colwidth) + ' '
s += '\n'
return s
def running_avg(xy_pairs, window=50, debug=False):
result = []
for i in range(window/2):
result.append( (avg([x for (x,y) in xy_pairs[:i+window/2]]),
avg([y for (x,y) in xy_pairs[:i+window/2]])) )
if debug:
print '%3d [%3d:%3d] (%5.2f,%5.2f) -> (%5.2f, %5.2f)' % (
i, 0, i+window/2, xy_pairs[i][0], xy_pairs[i][1],
result[-1][0], result[-1][1])
for i in range(window/2, len(xy_pairs)-window/2):
start = max(0, i-window/2)
stop = min(len(xy_pairs)-1, i+window/2)
result.append((avg([x for (x,y) in xy_pairs[start:stop]]),
avg([y for (x,y) in xy_pairs[start:stop]])))
if debug:
print '%3d [%3d:%3d] (%5.2f,%5.2f) -> (%5.2f, %5.2f)' % (
i, start, stop, xy_pairs[i][0], xy_pairs[i][1],
result[-1][0], result[-1][1])
for i in range(len(xy_pairs)-window/2, len(xy_pairs)):
result.append( (avg([x for (x,y) in xy_pairs[i:]]),
avg([y for (x,y) in xy_pairs[i:]])) )
if debug:
print '%3d [%3d:%3d] (%5.2f,%5.2f) -> (%5.2f, %5.2f)' % (
i, i, len(xy_pairs), xy_pairs[i][0], xy_pairs[i][1],
result[-1][0], result[-1][1])
return result
######################################################################
# Data Loading
######################################################################
def _filter_by_docid(relations, docids, name):
result = [r for r in relations if r.docid in docids]
print '%s: %d->%d' % (name, len(relations), len(result))
print ' (%f%%)' % (100.*len(result)/len(relations))
return result
def load_data(serif_root, apf_roots, docid_filter_file=None):
print 'Loading serif output...'
serif_relation_decisions = load_serif_relations(serif_root)
print 'Loading APF (eval) data...'
apf_data = apf.read_apf(APF_ROOTS)
apf_relations = sum([r.relmentions for r in apf_data.values() if
hasattr(r, 'relmentions')], [])
# If requested, then filter out any relations that aren't in the
# given list of docids.
if docid_filter_file is not None:
train_docids = set(open(docid_filter_file).read().split())
serif_relation_decisions = _filter_by_docid(serif_relation_decisions,
train_docids, 'Serif')
apf_relations = _filter_by_docid(apf_relations, train_docids, 'Apf')
# If there are any files that we have serif output for, but do not
# have any apf output for, then discard that serif output --
# otherwise, it will show up as a false positive. Similarly, if
# we have any files that we have apf output for but no serif
# output, then discard them.
apf_docids = set(v.docid for v in apf_data.values())
serif_docids = set(r.docid for r in serif_relation_decisions)
serif_extras = serif_docids-apf_docids
if serif_extras:
print ('Discarding unmatched serif output for %d/%d docs' %
(len(serif_extras), len(serif_docids)))
serif_relation_decisions = [r for r in serif_relation_decisions
if r.docid not in serif_extras]
apf_extras = apf_docids-serif_docids
if apf_extras:
print ('Discarding unmatched apf output for %d/%d docs' %
(len(apf_extras), len(apf_docids)))
apf_relations = [r for r in apf_relations
if r.docid not in apf_extras]
return serif_relation_decisions, apf_relations
# Pick some heldout documents for later evaluation.
def split_data(serif_relations, apf_relations):
all_docids = list(set(r.docid for r in serif_relations))
random.shuffle(all_docids)
N = len(all_docids)/5
dev_docids = set(all_docids[N:])
heldout_docids = set(all_docids[:N])
print 'Selecting dev set and heldout set'
print '%6d dev docs' % len(dev_docids)
print '%6d heldout docs' % len(heldout_docids)
heldout_serif_relations = [r for r in serif_relations
if r.docid in heldout_docids]
serif_relations = [r for r in serif_relations
if r.docid not in heldout_docids]
heldout_apf_relations = [r for r in apf_relations
if r.docid in heldout_docids]
apf_relations = [r for r in apf_relations
if r.docid not in heldout_docids]
return ((serif_relations, apf_relations),
(heldout_serif_relations, heldout_apf_relations))
######################################################################
# Decide Which Filters to Use
######################################################################
def choose_filter_candidates(apf_relations, serif_relations):
filter_makers = [
# Filters based on entity types & mention types
DiscreteFilterMaker('lhs_entity_type',
(lambda r: r.lhs_entity_type)),
DiscreteFilterMaker('lhs_entity_subtype',
(lambda r: r.lhs_entity_subtype)),
DiscreteFilterMaker('rhs_entity_type',
(lambda r: r.rhs_entity_type)),
DiscreteFilterMaker('rhs_entity_subtype',
(lambda r: r.rhs_entity_subtype)),
DiscreteFilterMaker('entity_types', entity_types),
DiscreteFilterMaker('lhs_mention_type', (lambda r: r.lhs_mention_type)),
DiscreteFilterMaker('rhs_mention_type', (lambda r: r.rhs_mention_type)),
DiscreteFilterMaker('mention_types', mention_types),
# Filters based on the maxent model
CutoffFilterMaker('maxent_score', maxent_score, slices=100),
CutoffFilterMaker('max_pro_rel_feature', max_pro_rel_feature),
CutoffFilterMaker('max_anti_rel_feature', max_anti_rel_feature),
# Other features
DiscreteFilterMaker('overlap', overlap_type),
CutoffFilterMaker('arglen', arglen, slices=30),
DiscreteFilterMaker('nested_args', nested_args),
CutoffFilterMaker('argdist', argdist, slices=30),
# Check for the presence of features used by maxent/p1 models
FeatureFilterMaker(),
# Check some characterstics of how we assembled the answer:
BinaryFilterMaker('use_p1_answer', lambda r:r.use_p1_answer),
BinaryFilterMaker('use_cvec_answer', lambda r:r.use_cvec_answer),
BinaryFilterMaker('secondary_selected', lambda r:r.secondary_selected),
BinaryFilterMaker('use_p1_answer_vec_tree',
lambda r:r.use_p1_answer_vec_tree),
DiscreteFilterMaker('ethnic_heuristic', lambda r:r.ethnic_heuristic),
CutoffFilterMaker('reversed', (lambda r:r.reversed-r.not_reversed)),
]
filters = []
# Explore lots of different alpha's.
filters += choose_filters_2(apf_relations, serif_relations,
filter_makers, nbest=10,
alphas=[0.01*n for n in range(30, 100)],
display_alphas=[0.99, 0.95, 0.9, 0.5])
# Find some very-high-precision filters
filters += choose_filters(apf_relations, serif_relations, filter_makers,
depth=1, width=20, narrowing_factor=1./20,
alpha=0.999)
filters += choose_filters(apf_relations, serif_relations, filter_makers,
depth=1, width=20, narrowing_factor=1./20,
alpha=0.99)
# Find some good filters, emphasizing precision
filters += choose_filters(apf_relations, serif_relations, filter_makers,
depth=50, width=1,
alpha=0.9)
# Find some good filters, emphasizing precision a little less
filters += choose_filters(apf_relations, serif_relations, filter_makers,
depth=50, width=1,
alpha=0.8)
# See what we find if we maximize F(0.5):
filters += choose_filters(apf_relations, serif_relations, filter_makers,
depth=50, width=1,
alpha=0.5)
filters += choose_filters(apf_relations, serif_relations, filter_makers,
depth=5, width=5,
alpha=0.5)
# Remove any duplicate filters (assume names are unique):
filters = dict((f.name, f) for f in filters).values()
return filters
######################################################################
# Decide Which Metrics to Use
######################################################################
def choose_metrics(dev_apf_relations, dev_serif_relations,
heldout_apf_relations, heldout_serif_relations,
filters, num_metrics=5, verbose=True):
#NUM_FILTERS = [5, 10, 20, 30, 40, 50, 1000]
#NUM_FILTERS = [5, 10, 20, 30, 40, 50]
NUM_FILTERS = [10, 11, 12, 13, 14, 15]
ALPHA = [0.5, 0.8, 0.9, 0.95, 0.98, 0.99, 0.999, 0.9999]
#NUM_FILTERS = [5, 30]
#ALPHA = [0.8, 0.9]
metrics = []
# Try out various meta-parameters:
for num_filters in NUM_FILTERS:
for alpha in ALPHA:
for metrictype in [ScoredFilterConfidenceMetric,
ScoredFilterConfidenceMetric2
]:
cm = metrictype(
dev_apf_relations, dev_serif_relations,
filters, alpha=alpha, num_filters=num_filters)
cm_score = cm.evaluate(heldout_apf_relations,
heldout_serif_relations,
verbosity=-1)
print '[%.3f] %s' % (cm_score, cm.name)
metrics.append( (cm_score, cm) )
metrics.sort()
metrics = [cm for (cm_score,cm) in metrics[-num_metrics:]]
if verbose:
display_metrics(metrics, heldout_apf_relations,
heldout_serif_relations)
return metrics
def display_metrics(metrics, heldout_apf_relations, heldout_serif_relations):
for cm in metrics:
cm.__class__ = globals()[type(cm).__name__]
cm.evaluate(heldout_apf_relations, heldout_serif_relations)
######################################################################
# Main Script:
######################################################################
# We do all this in the global namespace to make it easier to
# experiment with interactively (using emacs python-mode). All
# the "if 1:" or "if 0:" statements are so I can repeatedly send
# this buffer to a python interactive shell and not repeat steps
# I've already done (when experimenting with other steps). If
# you're running this from scratch, then all the "if 0"'s should
# probably be changed to "if 1".
SERIF_ROOT = r'//traid07/u16/eloper/runjobs/expts/relation-confidence/'
APF_ROOTS = [r'//traid01/speed/Ace/data-2005-v4.0/English',
(r'//traid01/speed/Training-Data/English/'+
r'bbn-ace2005-relation-training/fixed-apf')]
DOCID_FILTER_FILE = (r'//titan3/u70/users/eloper/code/relation-confidence/'
r'lib/train_docids')
# Disable this block if you want to skip data loading.
if 0:
print 'Loading data...'
serif_relation_decisions, apf_relations = load_data(SERIF_ROOT, APF_ROOTS)
serif_relations = [r for r in serif_relation_decisions
if r.answer != 'NONE']
if 0:
print 'Max recall (over all data): %.1f%%' % (100*max_recall(
serif_relation_decisions, apf_relations))
# Split the data into a dev set and a heldout set.
if 0:
((dev_serif_relations, dev_apf_relations),
(heldout_serif_relations, heldout_apf_relations)) = split_data(
serif_relations, apf_relations)
if 0:
print '\nChoosing filter candidates...'
filters = choose_filter_candidates(dev_apf_relations, dev_serif_relations)
# Display the performance of individual filters:
if | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'AnalyticsApplicationCloudwatchLoggingOptionsArgs',
'AnalyticsApplicationInputsArgs',
'AnalyticsApplicationInputsKinesisFirehoseArgs',
'AnalyticsApplicationInputsKinesisStreamArgs',
'AnalyticsApplicationInputsParallelismArgs',
'AnalyticsApplicationInputsProcessingConfigurationArgs',
'AnalyticsApplicationInputsProcessingConfigurationLambdaArgs',
'AnalyticsApplicationInputsSchemaArgs',
'AnalyticsApplicationInputsSchemaRecordColumnArgs',
'AnalyticsApplicationInputsSchemaRecordFormatArgs',
'AnalyticsApplicationInputsSchemaRecordFormatMappingParametersArgs',
'AnalyticsApplicationInputsSchemaRecordFormatMappingParametersCsvArgs',
'AnalyticsApplicationInputsSchemaRecordFormatMappingParametersJsonArgs',
'AnalyticsApplicationInputsStartingPositionConfigurationArgs',
'AnalyticsApplicationOutputArgs',
'AnalyticsApplicationOutputKinesisFirehoseArgs',
'AnalyticsApplicationOutputKinesisStreamArgs',
'AnalyticsApplicationOutputLambdaArgs',
'AnalyticsApplicationOutputSchemaArgs',
'AnalyticsApplicationReferenceDataSourcesArgs',
'AnalyticsApplicationReferenceDataSourcesS3Args',
'AnalyticsApplicationReferenceDataSourcesSchemaArgs',
'AnalyticsApplicationReferenceDataSourcesSchemaRecordColumnArgs',
'AnalyticsApplicationReferenceDataSourcesSchemaRecordFormatArgs',
'AnalyticsApplicationReferenceDataSourcesSchemaRecordFormatMappingParametersArgs',
'AnalyticsApplicationReferenceDataSourcesSchemaRecordFormatMappingParametersCsvArgs',
'AnalyticsApplicationReferenceDataSourcesSchemaRecordFormatMappingParametersJsonArgs',
'FirehoseDeliveryStreamElasticsearchConfigurationArgs',
'FirehoseDeliveryStreamElasticsearchConfigurationCloudwatchLoggingOptionsArgs',
'FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationArgs',
'FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorArgs',
'FirehoseDeliveryStreamElasticsearchConfigurationProcessingConfigurationProcessorParameterArgs',
'FirehoseDeliveryStreamElasticsearchConfigurationVpcConfigArgs',
'FirehoseDeliveryStreamExtendedS3ConfigurationArgs',
'FirehoseDeliveryStreamExtendedS3ConfigurationCloudwatchLoggingOptionsArgs',
'FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationArgs',
'FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationArgs',
'FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerArgs',
'FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerHiveJsonSerDeArgs',
'FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationInputFormatConfigurationDeserializerOpenXJsonSerDeArgs',
'FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationArgs',
'FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerArgs',
'FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerOrcSerDeArgs',
'FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationOutputFormatConfigurationSerializerParquetSerDeArgs',
'FirehoseDeliveryStreamExtendedS3ConfigurationDataFormatConversionConfigurationSchemaConfigurationArgs',
'FirehoseDeliveryStreamExtendedS3ConfigurationDynamicPartitioningConfigurationArgs',
'FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationArgs',
'FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorArgs',
'FirehoseDeliveryStreamExtendedS3ConfigurationProcessingConfigurationProcessorParameterArgs',
'FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfigurationArgs',
'FirehoseDeliveryStreamExtendedS3ConfigurationS3BackupConfigurationCloudwatchLoggingOptionsArgs',
'FirehoseDeliveryStreamHttpEndpointConfigurationArgs',
'FirehoseDeliveryStreamHttpEndpointConfigurationCloudwatchLoggingOptionsArgs',
'FirehoseDeliveryStreamHttpEndpointConfigurationProcessingConfigurationArgs',
'FirehoseDeliveryStreamHttpEndpointConfigurationProcessingConfigurationProcessorArgs',
'FirehoseDeliveryStreamHttpEndpointConfigurationProcessingConfigurationProcessorParameterArgs',
'FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationArgs',
'FirehoseDeliveryStreamHttpEndpointConfigurationRequestConfigurationCommonAttributeArgs',
'FirehoseDeliveryStreamKinesisSourceConfigurationArgs',
'FirehoseDeliveryStreamRedshiftConfigurationArgs',
'FirehoseDeliveryStreamRedshiftConfigurationCloudwatchLoggingOptionsArgs',
'FirehoseDeliveryStreamRedshiftConfigurationProcessingConfigurationArgs',
'FirehoseDeliveryStreamRedshiftConfigurationProcessingConfigurationProcessorArgs',
'FirehoseDeliveryStreamRedshiftConfigurationProcessingConfigurationProcessorParameterArgs',
'FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationArgs',
'FirehoseDeliveryStreamRedshiftConfigurationS3BackupConfigurationCloudwatchLoggingOptionsArgs',
'FirehoseDeliveryStreamS3ConfigurationArgs',
'FirehoseDeliveryStreamS3ConfigurationCloudwatchLoggingOptionsArgs',
'FirehoseDeliveryStreamServerSideEncryptionArgs',
'FirehoseDeliveryStreamSplunkConfigurationArgs',
'FirehoseDeliveryStreamSplunkConfigurationCloudwatchLoggingOptionsArgs',
'FirehoseDeliveryStreamSplunkConfigurationProcessingConfigurationArgs',
'FirehoseDeliveryStreamSplunkConfigurationProcessingConfigurationProcessorArgs',
'FirehoseDeliveryStreamSplunkConfigurationProcessingConfigurationProcessorParameterArgs',
'StreamStreamModeDetailsArgs',
]
@pulumi.input_type
class AnalyticsApplicationCloudwatchLoggingOptionsArgs:
def __init__(__self__, *,
log_stream_arn: pulumi.Input[str],
role_arn: pulumi.Input[str],
id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] log_stream_arn: The ARN of the CloudWatch Log Stream.
:param pulumi.Input[str] role_arn: The ARN of the IAM Role used to send application messages.
:param pulumi.Input[str] id: The ARN of the Kinesis Analytics Application.
"""
pulumi.set(__self__, "log_stream_arn", log_stream_arn)
pulumi.set(__self__, "role_arn", role_arn)
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter(name="logStreamArn")
def log_stream_arn(self) -> pulumi.Input[str]:
"""
The ARN of the CloudWatch Log Stream.
"""
return pulumi.get(self, "log_stream_arn")
@log_stream_arn.setter
def log_stream_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "log_stream_arn", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> pulumi.Input[str]:
"""
The ARN of the IAM Role used to send application messages.
"""
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "role_arn", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The ARN of the Kinesis Analytics Application.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@pulumi.input_type
class AnalyticsApplicationInputsArgs:
def __init__(__self__, *,
name_prefix: pulumi.Input[str],
schema: pulumi.Input['AnalyticsApplicationInputsSchemaArgs'],
id: Optional[pulumi.Input[str]] = None,
kinesis_firehose: Optional[pulumi.Input['AnalyticsApplicationInputsKinesisFirehoseArgs']] = None,
kinesis_stream: Optional[pulumi.Input['AnalyticsApplicationInputsKinesisStreamArgs']] = None,
parallelism: Optional[pulumi.Input['AnalyticsApplicationInputsParallelismArgs']] = None,
processing_configuration: Optional[pulumi.Input['AnalyticsApplicationInputsProcessingConfigurationArgs']] = None,
starting_position_configurations: Optional[pulumi.Input[Sequence[pulumi.Input['AnalyticsApplicationInputsStartingPositionConfigurationArgs']]]] = None,
stream_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[str] name_prefix: The Name Prefix to use when creating an in-application stream.
:param pulumi.Input['AnalyticsApplicationInputsSchemaArgs'] schema: The Schema format of the data in the streaming source. See Source Schema below for more details.
:param pulumi.Input[str] id: The ARN of the Kinesis Analytics Application.
:param pulumi.Input['AnalyticsApplicationInputsKinesisFirehoseArgs'] kinesis_firehose: The Kinesis Firehose configuration for the streaming source. Conflicts with `kinesis_stream`.
See Kinesis Firehose below for more details.
:param pulumi.Input['AnalyticsApplicationInputsKinesisStreamArgs'] kinesis_stream: The Kinesis Stream configuration for the streaming source. Conflicts with `kinesis_firehose`.
See Kinesis Stream below for more details.
:param pulumi.Input['AnalyticsApplicationInputsParallelismArgs'] parallelism: The number of Parallel in-application streams to create.
See Parallelism below for more details.
:param pulumi.Input['AnalyticsApplicationInputsProcessingConfigurationArgs'] processing_configuration: The Processing Configuration to transform records as they are received from the stream.
See Processing Configuration below for more details.
:param pulumi.Input[Sequence[pulumi.Input['AnalyticsApplicationInputsStartingPositionConfigurationArgs']]] starting_position_configurations: The point at which the application starts processing records from the streaming source.
See Starting Position Configuration below for more details.
"""
pulumi.set(__self__, "name_prefix", name_prefix)
pulumi.set(__self__, "schema", schema)
if id is not None:
pulumi.set(__self__, "id", id)
if kinesis_firehose is not None:
pulumi.set(__self__, "kinesis_firehose", kinesis_firehose)
if kinesis_stream is not None:
pulumi.set(__self__, "kinesis_stream", kinesis_stream)
if parallelism is not None:
pulumi.set(__self__, "parallelism", parallelism)
if processing_configuration is not None:
pulumi.set(__self__, "processing_configuration", processing_configuration)
if starting_position_configurations is not None:
pulumi.set(__self__, "starting_position_configurations", starting_position_configurations)
if stream_names is not None:
pulumi.set(__self__, "stream_names", stream_names)
@property
@pulumi.getter(name="namePrefix")
def name_prefix(self) -> pulumi.Input[str]:
"""
The Name Prefix to use when creating an in-application stream.
"""
return pulumi.get(self, "name_prefix")
@name_prefix.setter
def name_prefix(self, value: pulumi.Input[str]):
pulumi.set(self, "name_prefix", value)
@property
@pulumi.getter
def schema(self) -> pulumi.Input['AnalyticsApplicationInputsSchemaArgs']:
"""
The Schema format of the data in the streaming source. See Source Schema below for more details.
"""
return pulumi.get(self, "schema")
@schema.setter
def schema(self, value: pulumi.Input['AnalyticsApplicationInputsSchemaArgs']):
pulumi.set(self, "schema", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The ARN of the Kinesis Analytics Application.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="kinesisFirehose")
def kinesis_firehose(self) -> Optional[pulumi.Input['AnalyticsApplicationInputsKinesisFirehoseArgs']]:
"""
The Kinesis Firehose configuration for the streaming source. Conflicts with `kinesis_stream`.
See Kinesis Firehose below for more details.
"""
return pulumi.get(self, "kinesis_firehose")
@kinesis_firehose.setter
def kinesis_firehose(self, value: Optional[pulumi.Input['AnalyticsApplicationInputsKinesisFirehoseArgs']]):
pulumi.set(self, "kinesis_firehose", value)
@property
@pulumi.getter(name="kinesisStream")
def kinesis_stream(self) -> Optional[pulumi.Input['AnalyticsApplicationInputsKinesisStreamArgs']]:
"""
The Kinesis Stream configuration for the streaming source. Conflicts with `kinesis_firehose`.
See Kinesis Stream below for more details.
"""
return pulumi.get(self, "kinesis_stream")
@kinesis_stream.setter
def kinesis_stream(self, value: Optional[pulumi.Input['AnalyticsApplicationInputsKinesisStreamArgs']]):
pulumi.set(self, "kinesis_stream", value)
@property
@pulumi.getter
def parallelism(self) -> Optional[pulumi.Input['AnalyticsApplicationInputsParallelismArgs']]:
"""
The number of Parallel in-application streams to create.
See Parallelism below for more details.
"""
return pulumi.get(self, "parallelism")
@parallelism.setter
def parallelism(self, value: Optional[pulumi.Input['AnalyticsApplicationInputsParallelismArgs']]):
pulumi.set(self, "parallelism", value)
@property
@pulumi.getter(name="processingConfiguration")
def processing_configuration(self) -> Optional[pulumi.Input['AnalyticsApplicationInputsProcessingConfigurationArgs']]:
"""
The Processing Configuration to transform records as they are received from the stream.
See Processing Configuration below for more details.
"""
return pulumi.get(self, "processing_configuration")
@processing_configuration.setter
def processing_configuration(self, value: Optional[pulumi.Input['AnalyticsApplicationInputsProcessingConfigurationArgs']]):
pulumi.set(self, "processing_configuration", value)
@property
@pulumi.getter(name="startingPositionConfigurations")
def starting_position_configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AnalyticsApplicationInputsStartingPositionConfigurationArgs']]]]:
"""
The point at which the application starts processing records from the streaming source.
See Starting Position Configuration below for more details.
"""
return pulumi.get(self, "starting_position_configurations")
@starting_position_configurations.setter
def starting_position_configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AnalyticsApplicationInputsStartingPositionConfigurationArgs']]]]):
pulumi.set(self, "starting_position_configurations", value)
@property
@pulumi.getter(name="streamNames")
def stream_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "stream_names")
@stream_names.setter
def stream_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "stream_names", value)
@pulumi.input_type
class AnalyticsApplicationInputsKinesisFirehoseArgs:
def __init__(__self__, *,
resource_arn: pulumi.Input[str],
role_arn: pulumi.Input[str]):
"""
:param pulumi.Input[str] resource_arn: The ARN of the Kinesis Firehose delivery stream.
:param pulumi.Input[str] role_arn: The ARN of the IAM Role used to access the stream.
"""
pulumi.set(__self__, "resource_arn", resource_arn)
pulumi.set(__self__, "role_arn", role_arn)
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> pulumi.Input[str]:
"""
The ARN of the Kinesis Firehose delivery stream.
"""
return pulumi.get(self, "resource_arn")
@resource_arn.setter
def resource_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_arn", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> pulumi.Input[str]:
"""
The ARN of the IAM Role used to access the stream.
"""
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "role_arn", value)
@pulumi.input_type
class AnalyticsApplicationInputsKinesisStreamArgs:
def __init__(__self__, *,
resource_arn: pulumi.Input[str],
role_arn: pulumi.Input[str]):
"""
:param pulumi.Input[str] resource_arn: The ARN of the Kinesis Stream.
:param pulumi.Input[str] role_arn: The ARN of the IAM Role used to access the stream.
"""
pulumi.set(__self__, "resource_arn", resource_arn)
pulumi.set(__self__, "role_arn", role_arn)
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> pulumi.Input[str]:
"""
The ARN of the Kinesis Stream.
"""
return pulumi.get(self, "resource_arn")
@resource_arn.setter
def resource_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_arn", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> pulumi.Input[str]:
"""
The ARN of the IAM Role used to access the stream.
"""
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "role_arn", value)
@pulumi.input_type
class AnalyticsApplicationInputsParallelismArgs:
def __init__(__self__, *,
count: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] count: The Count of streams.
"""
if count is not None:
pulumi.set(__self__, "count", count)
@property
@pulumi.getter
def count(self) -> Optional[pulumi.Input[int]]:
"""
The Count of streams.
"""
return pulumi.get(self, "count")
@count.setter
def count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "count", value)
@pulumi.input_type
class AnalyticsApplicationInputsProcessingConfigurationArgs:
def __init__(__self__, *,
lambda_: pulumi.Input['AnalyticsApplicationInputsProcessingConfigurationLambdaArgs']):
"""
:param pulumi.Input['AnalyticsApplicationInputsProcessingConfigurationLambdaArgs'] lambda_: The Lambda function configuration. See Lambda below for more details.
"""
pulumi.set(__self__, "lambda_", lambda_)
@property
@pulumi.getter(name="lambda")
def lambda_(self) -> pulumi.Input['AnalyticsApplicationInputsProcessingConfigurationLambdaArgs']:
"""
The Lambda function configuration. See Lambda below for more details.
"""
return pulumi.get(self, "lambda_")
@lambda_.setter
def lambda_(self, value: pulumi.Input['AnalyticsApplicationInputsProcessingConfigurationLambdaArgs']):
pulumi.set(self, "lambda_", value)
@pulumi.input_type
class AnalyticsApplicationInputsProcessingConfigurationLambdaArgs:
def __init__(__self__, *,
resource_arn: pulumi.Input[str],
role_arn: pulumi.Input[str]):
"""
:param pulumi.Input[str] resource_arn: The ARN of the Lambda function.
:param pulumi.Input[str] role_arn: The ARN of the IAM Role used to access the Lambda function.
"""
pulumi.set(__self__, "resource_arn", resource_arn)
pulumi.set(__self__, "role_arn", role_arn)
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> pulumi.Input[str]:
"""
The ARN of the Lambda function.
"""
return pulumi.get(self, "resource_arn")
@resource_arn.setter
def resource_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_arn", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> pulumi.Input[str]:
"""
The ARN of the IAM Role used to access the Lambda function.
"""
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "role_arn", value)
@pulumi.input_type
class AnalyticsApplicationInputsSchemaArgs:
def __init__(__self__, *,
record_columns: pulumi.Input[Sequence[pulumi.Input['AnalyticsApplicationInputsSchemaRecordColumnArgs']]],
record_format: pulumi.Input['AnalyticsApplicationInputsSchemaRecordFormatArgs'],
record_encoding: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['AnalyticsApplicationInputsSchemaRecordColumnArgs']]] record_columns: The Record Column mapping for the streaming source data element.
See Record Columns below for more details.
:param pulumi.Input['AnalyticsApplicationInputsSchemaRecordFormatArgs'] record_format: The Record Format and mapping information to schematize a record.
See Record Format below for more details.
:param pulumi.Input[str] record_encoding: The Encoding of the record in the streaming source.
"""
pulumi.set(__self__, "record_columns", record_columns)
pulumi.set(__self__, "record_format", record_format)
if record_encoding is not None:
pulumi.set(__self__, "record_encoding", record_encoding)
@property
| |
# Copyright 2019 <NAME>. All rights reserved.
# Use of this source code is governed by the GNU-GPL
# license that can be found in the LICENSE file.
# Date created: July 21, 2019
import json
import os
import platform
import random
import shutil
import subprocess
import time
import _thread
import threading
from os.path import abspath, dirname, isfile, join
import flask
import markdown
from markdown.extensions import Extension
from markdown.inlinepatterns import Pattern
__all__ = [
'Cache',
'CsvReader',
'CsvWriter',
'Logger',
'Singleton',
'clear',
'cur_dir',
'get_json',
'parse_csv',
'ran_num',
'read_file',
'render_markdown',
'render_markdown_from_file',
'render_markdown_for_flask',
'render_markdown_from_file_for_flask',
'timestamp',
'save_json',
'write_csv',
'write_file'
]
EMOJI_RE = r'(:)(.*?):'
def read_file(file):
"""
Reads a file.
Parameters:
file: Path to the file.
Returns:
The content of the file.
"""
with open(file, 'r') as f:
return f.read()
def write_file(file, data):
"""
Writes data to a file at once.
Parameters:
file: Path to the file.
data: Data to write.
"""
with open(file, 'w') as f:
f.write(data)
def clear():
"""
Clears the console platform-specific.
"""
p = platform.system()
if p == 'Windows':
subprocess.call('cls', shell=False)
elif p in ['Linux', 'Darwin']:
subprocess.call('clear', shell=False)
def cur_dir(file):
"""
Gets the current directory of a file.
Arguments:
file: File to get the directory from.
Returns:
The directory.
"""
return dirname(os.path.realpath(file))
def ran_num(length=1):
"""
Random string number generator.
This function generates a string with a custom length
that contains random digits and characters from a-f.
Parameters:
length: Number of places the number should have.
Returns:
A string with random digits and characters.
"""
number = ''
for z in range(length):
r = random.randint(0, 15)
if 0 <= r <= 9:
number += str(r)
elif r == 10:
number += 'a'
elif r == 11:
number += 'b'
elif r == 12:
number += 'c'
elif r == 13:
number += 'd'
elif random == 14:
number += 'e'
elif r == 15:
number += 'f'
return number
def timestamp():
"""
Creates the local time as a string of the format <YYYY-MM-DD:HH-mm-SS>.
Returns:
The local time as a string.
"""
t = time.localtime()
return '{}-{}-{}:{}-{}-{}'.format(t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)
def get_json(file, path=''):
"""
Reads and parses a JSON file.
Parameters:
file: The name of the JSON file to read.
path: The path to the JSON file, if it is not in the working directory.
Returns:
A dict containing the JSON file's content.
"""
with open(os.path.join(path, file), 'r', encoding='UTF-8') as j:
return json.load(j)
def save_json(file, data, path=''):
"""
Writes data to a JSON file.
Parameters:
file: The name of the JSON file to write to.
data: The object to write.
path: The path to the JSON file, if it is not in the working directory.
"""
if not os.path.exists(path):
os.mkdir(path)
with open(os.path.join(path, file), 'w', encoding='UTF-8') as j:
json.dump(data, j, indent=4)
class EmojiExtension(Extension):
"""
Original version can be found here: https://github.com/bytefish/MarkdownEmojiExtension.
"""
def __init__(self, **kwargs):
self.config = {
'emojis': [[], 'List of Emojis.']
}
self.as_dictionary = lambda emojis: dict((emoji['key'], emoji['value']) for emoji in emojis)
super(EmojiExtension, self).__init__(**kwargs)
def extendMarkdown(self, md):
emojis = self.as_dictionary(self.getConfig('emojis'))
pattern = EmojiInlinePattern(EMOJI_RE, emojis)
md.inlinePatterns.add('emoji', pattern, '<not_strong')
@staticmethod
def create_from_json():
return EmojiExtension(emojis=get_json('emojis.json', path=abspath(__file__).replace('__init__.py', '')))
class EmojiInlinePattern(Pattern):
def __init__(self, pattern, emojis):
super(EmojiInlinePattern, self).__init__(pattern)
self.emojis = emojis
def handleMatch(self, m):
emoji_key = m.group(3)
return self.emojis.get(emoji_key, '')
def render_markdown(markdown_string=''):
"""
Renders a string containing markdown to an HTML string.
Parameters:
markdown_string: A string containing markdown.
Returns:
The rendered HTML.
"""
return markdown.markdown(markdown_string, extensions=[EmojiExtension.create_from_json()])
def render_markdown_for_flask(markdown_string=''):
"""
Renders a string containing markdown to an HTML string and formats it for Flask.
Parameters:
markdown_string: A string containing markdown.
Returns:
The rendered HTML for Flask.
"""
return flask.Markup(render_markdown(markdown_string))
def render_markdown_from_file(file):
"""
Renders a file containing markdown to an HTML string and formats it for Flask.
Parameters:
file: A file containing markdown.
Returns:
The rendered HTML for Flask.
"""
with open(file, 'r', encoding='UTF-8') as f:
return render_markdown_for_flask(f.read())
def render_markdown_from_file_for_flask(file):
"""
Renders a file containing markdown to an HTML string.
Parameters:
file: A file containing markdown.
Returns:
The rendered HTML.
"""
with open(file, 'r', encoding='UTF-8') as f:
return render_markdown(f.read())
def parse_csv(input_file, separator=','):
"""
Parses a CSV-File and stores it at once in a list.
Parameters:
input_file: CSV-File to read from.
separator: Separator of each element; Default: ','.
"""
r = CsvReader(input_file, separator)
data = []
while r.has_next():
data.append(r.pull())
return data
def write_csv(output_file, data, separator=','):
"""
Writes a CSV-File from a list.
Parameters:
output_file: CSV-File to write to.
data: The list of lines to write.
separator: Separator of each element; Default: ','
"""
w = CsvWriter(output_file, separator)
for line in data:
w.push(line)
class CsvReader:
"""
Dynamic CSV-Reader. File is read line by line.
"""
def __init__(self, input_file, separator=','):
"""
Initialize a new CsvReader.
Parameters:
input_file: CSV-File to read from.
separator: Separator of each element; Default: ','.
"""
self.file = open(input_file, 'r')
self.separator = separator
self._line = ''
self._input_file = input_file
def __del__(self):
"""
Make sure the file is closed.
"""
self.file.close()
def __str__(self):
return '<CSV-File: {}>'.format(self._input_file)
def __repr__(self):
return '<CSV-File: {}>'.format(self._input_file)
def has_next(self):
"""
Checks if there is a next line in the file.
Returns:
True, if there is a next line, otherwise False.
"""
self._line = self.file.readline().strip()
return self._line != ''
def pull(self):
"""
Reads the next line.
Returns:
A list with all elements from that line.
"""
line_list = []
line_string = ''
in_quote = False
index = 1
for char in self._line:
if char == self.separator and not in_quote:
line_list.append(line_string.strip())
line_string = ''
elif char == '"':
in_quote = not in_quote
else:
line_string += char
index += 1
line_list.append(line_string.strip())
return line_list
class CsvWriter:
"""
Dynamic CSV-Writer. File is written line by line.
"""
def __init__(self, output_file, separator=','):
"""
Initialize a new CsvWriter.
Parameters:
output_file: CSV-File to write to.
separator: Separator of each element; Default: ','.
"""
self.file = open(output_file, 'w')
self.separator = separator
self._line = ''
self._output_file = output_file
def __del__(self):
"""
Make sure the file is closed.
"""
self.file.close()
def __str__(self):
return '<CSV-File: {}>'.format(self._output_file)
def __repr__(self):
return '<CSV-File: {}>'.format(self._output_file)
def push(self, line):
string = ''
for s in line:
string += '"' + s + '"' + self.separator
self.file.write(string[:len(string) - 1] + '\n')
class TangoIcon:
"""
Represents an icon from the Icon-Set Tango.
"""
def __init__(self, image, static=''):
"""
Initializes an icon.
Arguments:
image: The name of the image to display.
static: If set to <True>, the image gets copied to <static_path>.
static: If <static> is set to <True>, the image gets copied
to this path.
"""
self._image = image
if static != '':
self.make_static(static)
def make_static(self, path):
"""
Copies the image to a certain destination. Some applications need
static files in a special directory (e.g. Flask).
Arguments:
path: The absolute path, the image should be copied to.
"""
img = join(path, self.image)
if not isfile(img):
shutil.copyfile(self.path, img)
@property
def image(self):
"""
Returns:
The name of the image.
"""
return self._image
@property
def path(self):
"""
Returns:
The absolute path to the requested icon.
"""
return os.path.join(abspath(__file__).replace('__init__.py', ''), 'resources/{}'.format(self.image))
class Singleton(type):
"""
When this class is the metaclass of another class, this would be a singleton.
"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class Cache(metaclass=Singleton):
"""
Implements a simple, thread-safe cache by using a dict.
"""
def __init__(self, ttl=0):
self.__data = {}
self.__lock = threading.Lock()
_log.info('Initialized cache with ttl {}.'.format(ttl))
_thread.start_new_thread(_ttl_master, (ttl,))
def __del__(self):
with self.__lock:
self.__data = {}
def __setitem__(self, key, value):
with self.__lock:
self.__data[key] = value
def __getitem__(self, item):
with self.__lock:
return self.__data[item]
def __contains__(self, item):
with self.__lock:
return item in self.__data
def pop(self, key, default=None):
try:
with self.__lock:
item = self.__data[key]
del self.__data[key]
return item
except KeyError:
return default
def get(self, key, default=None):
try:
with self.__lock:
return self.__data[key]
except KeyError:
return default
def keys(self):
with self.__lock:
return [key for key in self.__data]
def values(self):
with self.__lock:
return [self.__data[key] for key in self.__data]
def clear(self):
self.__del__()
def __str__(self):
return str(self.__data)
def __repr__(self):
return str(self.__data)
def __hash__(self):
return hash(self.__data)
def __eq__(self, other):
return self.__data == other
def _ttl_master(ttl):
if ttl == 0:
return
time.sleep(ttl)
c = Cache()
c.clear()
_log.info('Deleted cache.')
class Logger:
"""
Implements a simple logger that can print to the terminal or write into a | |
<gh_stars>0
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service for handling the user deletion process."""
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import itertools
import logging
import re
from core.domain import auth_services
from core.domain import collection_services
from core.domain import email_manager
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import rights_domain
from core.domain import rights_manager
from core.domain import taskqueue_services
from core.domain import topic_services
from core.domain import user_services
from core.domain import wipeout_domain
from core.platform import models
import feconf
import python_utils
import utils
(
app_feedback_report_models, base_models, blog_models,
collection_models, config_models, exp_models, feedback_models,
question_models, skill_models, story_models, subtopic_models,
suggestion_models, topic_models, user_models
) = models.Registry.import_models([
models.NAMES.app_feedback_report, models.NAMES.base_model,
models.NAMES.blog, models.NAMES.collection, models.NAMES.config,
models.NAMES.exploration, models.NAMES.feedback, models.NAMES.question,
models.NAMES.skill, models.NAMES.story, models.NAMES.subtopic,
models.NAMES.suggestion, models.NAMES.topic, models.NAMES.user,
])
datastore_services = models.Registry.import_datastore_services()
transaction_services = models.Registry.import_transaction_services()
bulk_email_services = models.Registry.import_bulk_email_services()
WIPEOUT_LOGS_PREFIX = '[WIPEOUT]'
PERIOD_AFTER_WHICH_USERNAME_CANNOT_BE_REUSED = datetime.timedelta(weeks=1)
def get_pending_deletion_request(user_id):
"""Return the pending deletion request.
Args:
user_id: str. The unique ID of the user.
Returns:
PendingDeletionRequest. The pending deletion request domain object.
"""
pending_deletion_request_model = (
user_models.PendingDeletionRequestModel.get_by_id(user_id))
return wipeout_domain.PendingDeletionRequest(
pending_deletion_request_model.id,
pending_deletion_request_model.email,
pending_deletion_request_model.normalized_long_term_username,
pending_deletion_request_model.deletion_complete,
pending_deletion_request_model.pseudonymizable_entity_mappings
)
def get_number_of_pending_deletion_requests():
"""Get number of pending deletion request.
Returns:
int. The number of pending deletion requests.
"""
return user_models.PendingDeletionRequestModel.query().count()
def save_pending_deletion_requests(pending_deletion_requests):
"""Save a list of pending deletion request domain objects as
PendingDeletionRequestModel entities in the datastore.
Args:
pending_deletion_requests: list(PendingDeletionRequest). List of pending
deletion request objects to be saved in the datastore.
"""
user_ids = [request.user_id for request in pending_deletion_requests]
pending_deletion_request_models = (
user_models.PendingDeletionRequestModel.get_multi(
user_ids, include_deleted=True)
)
final_pending_deletion_request_models = []
for deletion_request_model, deletion_request in python_utils.ZIP(
pending_deletion_request_models, pending_deletion_requests):
deletion_request.validate()
deletion_request_dict = {
'email': deletion_request.email,
'normalized_long_term_username': (
deletion_request.normalized_long_term_username),
'deletion_complete': deletion_request.deletion_complete,
'pseudonymizable_entity_mappings': (
deletion_request.pseudonymizable_entity_mappings)
}
if deletion_request_model is not None:
deletion_request_model.populate(**deletion_request_dict)
else:
deletion_request_dict['id'] = deletion_request.user_id
deletion_request_model = user_models.PendingDeletionRequestModel(
**deletion_request_dict
)
final_pending_deletion_request_models.append(deletion_request_model)
user_models.PendingDeletionRequestModel.update_timestamps_multi(
final_pending_deletion_request_models)
user_models.PendingDeletionRequestModel.put_multi(
final_pending_deletion_request_models)
def pre_delete_user(user_id):
"""Prepare user for the full deletion.
1. Mark all the activities that are private and solely owned by the user
being deleted as deleted.
2. Disable all the email preferences.
3. Mark the user as to be deleted.
4. Create PendingDeletionRequestModel for the user.
Args:
user_id: str. The id of the user to be deleted. If the user_id
corresponds to a profile user then only that profile is deleted.
For a full user, all of its associated profile users are deleted
too.
"""
pending_deletion_requests = []
user_settings = user_services.get_user_settings(user_id, strict=True)
linked_profile_user_ids = [
user.user_id for user in
user_services.get_all_profiles_auth_details_by_parent_user_id(user_id)
]
profile_users_settings_list = user_services.get_users_settings(
linked_profile_user_ids)
for profile_user_settings in profile_users_settings_list:
profile_id = profile_user_settings.user_id
user_services.mark_user_for_deletion(profile_id)
pending_deletion_requests.append(
wipeout_domain.PendingDeletionRequest.create_default(
profile_id,
profile_user_settings.email
)
)
if feconf.ROLE_ID_MOBILE_LEARNER not in user_settings.roles:
taskqueue_services.defer(
taskqueue_services.FUNCTION_ID_REMOVE_USER_FROM_RIGHTS_MODELS,
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS,
user_id,
)
# Set all the user's email preferences to False in order to disable all
# ordinary emails that could be sent to the users.
user_services.update_email_preferences(
user_id, False, False, False, False)
bulk_email_services.permanently_delete_user_from_list(
user_settings.email)
date_now = datetime.datetime.utcnow()
date_before_which_username_should_be_saved = (
date_now - PERIOD_AFTER_WHICH_USERNAME_CANNOT_BE_REUSED)
user_services.mark_user_for_deletion(user_id)
normalized_long_term_username = (
user_settings.normalized_username
if user_settings.created_on < date_before_which_username_should_be_saved
else None
)
pending_deletion_requests.append(
wipeout_domain.PendingDeletionRequest.create_default(
user_id,
user_settings.email,
normalized_long_term_username=normalized_long_term_username
)
)
save_pending_deletion_requests(pending_deletion_requests)
def delete_users_pending_to_be_deleted():
"""Taskqueue service method for deleting users that are pending
to be deleted. Once these users are deleted, the job results
will be mailed to the admin.
"""
pending_deletion_request_models = (
user_models.PendingDeletionRequestModel.query().fetch())
email_message = 'Results of the User Deletion Cron Job'
for request_model in pending_deletion_request_models:
pending_deletion_request = get_pending_deletion_request(
request_model.id)
# The final status of the deletion. Either 'SUCCESS' or 'ALREADY DONE'.
deletion_status = run_user_deletion(pending_deletion_request)
email_message += '\n-----------------------------------\n'
email_message += (
'PendingDeletionRequestModel ID: %s\n'
'User ID: %s\n'
'Deletion status: %s\n'
) % (
request_model.id, pending_deletion_request.user_id,
deletion_status
)
email_subject = 'User Deletion job result'
if feconf.CAN_SEND_EMAILS:
email_manager.send_mail_to_admin(email_subject, email_message)
def check_completion_of_user_deletion():
"""Taskqueue service method for checking the completion of user deletion.
It checks if all models do not contain the user ID of the deleted user in
their fields. If any field contains the user ID of the deleted user, the
deletion_complete is set to False, so that later the
delete_users_pending_to_be_deleted will be run on that user again.
If all the fields do not contain the user ID of the deleted
user, the final email announcing that the deletion was completed is sent,
and the deletion request is deleted.
"""
pending_deletion_request_models = (
user_models.PendingDeletionRequestModel.query().fetch())
email_message = 'Results of the Completion of User Deletion Cron Job'
for request_model in pending_deletion_request_models:
pending_deletion_request = get_pending_deletion_request(
request_model.id)
# The final status of the completion. Either 'NOT DELETED', 'SUCCESS',
# or 'FAILURE'.
completion_status = run_user_deletion_completion(
pending_deletion_request)
if feconf.CAN_SEND_EMAILS:
email_message += '\n-----------------------------------\n'
email_message += (
'PendingDeletionRequestModel ID: %s\n'
'User ID: %s\n'
'Completion status: %s\n'
) % (
request_model.id, pending_deletion_request.user_id,
completion_status
)
email_subject = 'Completion of User Deletion job result'
email_manager.send_mail_to_admin(email_subject, email_message)
def run_user_deletion(pending_deletion_request):
"""Run the user deletion.
Args:
pending_deletion_request: PendingDeletionRequest. The domain object for
the user being deleted.
Returns:
str. The outcome of the deletion.
"""
if pending_deletion_request.deletion_complete:
return wipeout_domain.USER_DELETION_ALREADY_DONE
else:
delete_user(pending_deletion_request)
pending_deletion_request.deletion_complete = True
save_pending_deletion_requests([pending_deletion_request])
return wipeout_domain.USER_DELETION_SUCCESS
def run_user_deletion_completion(pending_deletion_request):
"""Run the user deletion verification.
Args:
pending_deletion_request: PendingDeletionRequest. The domain object for
the user being verified.
Returns:
str. The outcome of the verification.
"""
# If deletion_complete is False the delete_users_pending_to_be_deleted
# wasn't yet run for the user. The verification will be done in the next
# run of check_completion_of_user_deletion.
if not pending_deletion_request.deletion_complete:
return wipeout_domain.USER_VERIFICATION_NOT_DELETED
elif verify_user_deleted(pending_deletion_request.user_id):
_delete_models_with_delete_at_end_policy(
pending_deletion_request.user_id)
user_models.DeletedUserModel(
id=pending_deletion_request.user_id
).put()
if pending_deletion_request.normalized_long_term_username is not None:
user_services.save_deleted_username(
pending_deletion_request.normalized_long_term_username)
if feconf.CAN_SEND_EMAILS:
email_manager.send_account_deleted_email(
pending_deletion_request.user_id,
pending_deletion_request.email
)
return wipeout_domain.USER_VERIFICATION_SUCCESS
else:
if feconf.CAN_SEND_EMAILS:
email_manager.send_account_deletion_failed_email(
pending_deletion_request.user_id,
pending_deletion_request.email
)
pending_deletion_request.deletion_complete = False
save_pending_deletion_requests([pending_deletion_request])
return wipeout_domain.USER_VERIFICATION_FAILURE
def _delete_models_with_delete_at_end_policy(user_id):
"""Delete auth and user models with deletion policy 'DELETE_AT_END'.
Args:
user_id: str. The unique ID of the user that is being deleted.
"""
for model_class in models.Registry.get_storage_model_classes(
[models.NAMES.auth, models.NAMES.user]):
policy = model_class.get_deletion_policy()
if policy == base_models.DELETION_POLICY.DELETE_AT_END:
model_class.apply_deletion_policy(user_id)
def delete_user(pending_deletion_request):
"""Delete all the models for user specified in pending_deletion_request
on the basis of the user role.
Args:
pending_deletion_request: PendingDeletionRequest. The pending deletion
request object for which to delete or pseudonymize all the models.
"""
user_id = pending_deletion_request.user_id
user_roles = user_models.UserSettingsModel.get_by_id(user_id).roles
auth_services.delete_external_auth_associations(user_id)
_delete_models(user_id, models.NAMES.auth)
_delete_models(user_id, models.NAMES.user)
_pseudonymize_config_models(pending_deletion_request)
_delete_models(user_id, models.NAMES.feedback)
_delete_models(user_id, models.NAMES.improvements)
if feconf.ROLE_ID_MOBILE_LEARNER not in user_roles:
remove_user_from_activities_with_associated_rights_models(
pending_deletion_request.user_id)
_pseudonymize_app_feedback_report_models(pending_deletion_request)
_pseudonymize_feedback_models(pending_deletion_request)
_pseudonymize_suggestion_models(pending_deletion_request)
_pseudonymize_activity_models_without_associated_rights_models(
pending_deletion_request,
models.NAMES.question,
question_models.QuestionSnapshotMetadataModel,
question_models.QuestionCommitLogEntryModel,
'question_id')
_pseudonymize_activity_models_without_associated_rights_models(
pending_deletion_request,
models.NAMES.skill,
skill_models.SkillSnapshotMetadataModel,
skill_models.SkillCommitLogEntryModel,
'skill_id')
_pseudonymize_activity_models_without_associated_rights_models(
pending_deletion_request,
models.NAMES.story,
story_models.StorySnapshotMetadataModel,
story_models.StoryCommitLogEntryModel,
'story_id')
_pseudonymize_activity_models_without_associated_rights_models(
pending_deletion_request,
models.NAMES.subtopic,
subtopic_models.SubtopicPageSnapshotMetadataModel,
subtopic_models.SubtopicPageCommitLogEntryModel,
'subtopic_page_id')
_pseudonymize_activity_models_with_associated_rights_models(
pending_deletion_request,
models.NAMES.exploration,
exp_models.ExplorationSnapshotMetadataModel,
exp_models.ExplorationRightsSnapshotMetadataModel,
exp_models.ExplorationRightsSnapshotContentModel,
exp_models.ExplorationCommitLogEntryModel,
'exploration_id',
feconf.EXPLORATION_RIGHTS_CHANGE_ALLOWED_COMMANDS,
('owner_ids', 'editor_ids', 'voice_artist_ids', 'viewer_ids'))
_remove_user_id_from_contributors_in_summary_models(
user_id, exp_models.ExpSummaryModel)
_pseudonymize_activity_models_with_associated_rights_models(
pending_deletion_request,
models.NAMES.collection,
collection_models.CollectionSnapshotMetadataModel,
collection_models.CollectionRightsSnapshotMetadataModel,
collection_models.CollectionRightsSnapshotContentModel,
collection_models.CollectionCommitLogEntryModel,
'collection_id',
feconf.COLLECTION_RIGHTS_CHANGE_ALLOWED_COMMANDS,
('owner_ids', 'editor_ids', 'voice_artist_ids', 'viewer_ids'))
_remove_user_id_from_contributors_in_summary_models(
user_id, collection_models.CollectionSummaryModel)
_pseudonymize_activity_models_with_associated_rights_models(
pending_deletion_request,
models.NAMES.topic,
topic_models.TopicSnapshotMetadataModel,
topic_models.TopicRightsSnapshotMetadataModel,
topic_models.TopicRightsSnapshotContentModel,
topic_models.TopicCommitLogEntryModel,
'topic_id',
feconf.TOPIC_RIGHTS_CHANGE_ALLOWED_COMMANDS,
('manager_ids',))
_pseudonymize_blog_post_models(pending_deletion_request)
_delete_models(user_id, models.NAMES.email)
def verify_user_deleted(user_id, include_delete_at_end_models=False):
"""Verify that all the models for user specified in pending_deletion_request
are deleted.
Args:
user_id: str. The ID of the user whose deletion should be verified.
include_delete_at_end_models: bool. Whether to skip models
that have deletion policy equal to 'DELETE_AT_END'.
Returns:
bool. True if all the models were correctly deleted, False otherwise.
"""
if not auth_services.verify_external_auth_associations_are_deleted(user_id):
return False
policies_not_to_verify = [
base_models.DELETION_POLICY.KEEP,
base_models.DELETION_POLICY.NOT_APPLICABLE
]
if not include_delete_at_end_models:
policies_not_to_verify.append(
base_models.DELETION_POLICY.DELETE_AT_END)
user_is_verified = True
for model_class in models.Registry.get_all_storage_model_classes():
if (
model_class.get_deletion_policy() not in policies_not_to_verify
and model_class.has_reference_to_user_id(user_id)
):
logging.error(
'%s %s is not deleted for user with ID %s' % (
WIPEOUT_LOGS_PREFIX, model_class.__name__, user_id))
user_is_verified = False
return user_is_verified
def remove_user_from_activities_with_associated_rights_models(user_id):
"""Remove the user from exploration, collection, and topic models.
Args:
user_id: str. The ID of the user for which to remove the user from
explorations, collections, and topics.
"""
subscribed_exploration_summaries = (
exp_fetchers.get_exploration_summaries_where_user_has_role(user_id))
explorations_to_be_deleted_ids = [
exp_summary.id for exp_summary in subscribed_exploration_summaries if
exp_summary.is_private() and
exp_summary.is_solely_owned_by_user(user_id)
]
exp_services.delete_explorations(
user_id, explorations_to_be_deleted_ids, force_deletion=True)
# Release ownership of explorations that are public and are solely owned
# by the to-be-deleted user.
explorations_to_release_ownership_ids = [
exp_summary.id for exp_summary in subscribed_exploration_summaries if
not exp_summary.is_private() and
exp_summary.is_solely_owned_by_user(user_id) and
not exp_summary.community_owned
]
for exp_id in explorations_to_release_ownership_ids:
rights_manager.release_ownership_of_exploration(
user_services.get_system_user(), exp_id)
explorations_to_remove_user_from_ids = [
exp_summary.id for exp_summary in subscribed_exploration_summaries if
not exp_summary.is_solely_owned_by_user(user_id) | |
0, (1, 1))/3 + sqrt(2)*JzKetCoupled(
1, 0, (1, 1))/2 + sqrt(6)*JzKetCoupled(2, 0, (1, 1))/6
assert couple(TensorProduct(JzKet(1, 0), JzKet(1, 1))) == \
-sqrt(2)*JzKetCoupled(
1, 1, (1, 1))/2 + sqrt(2)*JzKetCoupled(2, 1, (1, 1))/2
assert couple(TensorProduct(JzKet(1, 0), JzKet(1, 0))) == \
-sqrt(3)*JzKetCoupled(
0, 0, (1, 1))/3 + sqrt(6)*JzKetCoupled(2, 0, (1, 1))/3
assert couple(TensorProduct(JzKet(1, 0), JzKet(1, -1))) == \
sqrt(2)*JzKetCoupled(
1, -1, (1, 1))/2 + sqrt(2)*JzKetCoupled(2, -1, (1, 1))/2
assert couple(TensorProduct(JzKet(1, -1), JzKet(1, 1))) == \
sqrt(3)*JzKetCoupled(0, 0, (1, 1))/3 - sqrt(2)*JzKetCoupled(
1, 0, (1, 1))/2 + sqrt(6)*JzKetCoupled(2, 0, (1, 1))/6
assert couple(TensorProduct(JzKet(1, -1), JzKet(1, 0))) == \
-sqrt(2)*JzKetCoupled(
1, -1, (1, 1))/2 + sqrt(2)*JzKetCoupled(2, -1, (1, 1))/2
assert couple(TensorProduct(JzKet(1, -1), JzKet(1, -1))) == \
JzKetCoupled(2, -2, (1, 1))
# j1=3/2, j2=1/2
assert couple(TensorProduct(JzKet(S(3)/2, S(3)/2), JzKet(S(1)/2, S(1)/2))) == \
JzKetCoupled(2, 2, (S(3)/2, S(1)/2))
assert couple(TensorProduct(JzKet(S(3)/2, S(3)/2), JzKet(S(1)/2, -S(1)/2))) == \
sqrt(3)*JzKetCoupled(
1, 1, (S(3)/2, S(1)/2))/2 + JzKetCoupled(2, 1, (S(3)/2, S(1)/2))/2
assert couple(TensorProduct(JzKet(S(3)/2, S(1)/2), JzKet(S(1)/2, S(1)/2))) == \
-JzKetCoupled(1, 1, (S(
3)/2, S(1)/2))/2 + sqrt(3)*JzKetCoupled(2, 1, (S(3)/2, S(1)/2))/2
assert couple(TensorProduct(JzKet(S(3)/2, S(1)/2), JzKet(S(1)/2, -S(1)/2))) == \
sqrt(2)*JzKetCoupled(1, 0, (S(
3)/2, S(1)/2))/2 + sqrt(2)*JzKetCoupled(2, 0, (S(3)/2, S(1)/2))/2
assert couple(TensorProduct(JzKet(S(3)/2, -S(1)/2), JzKet(S(1)/2, S(1)/2))) == \
-sqrt(2)*JzKetCoupled(1, 0, (S(
3)/2, S(1)/2))/2 + sqrt(2)*JzKetCoupled(2, 0, (S(3)/2, S(1)/2))/2
assert couple(TensorProduct(JzKet(S(3)/2, -S(1)/2), JzKet(S(1)/2, -S(1)/2))) == \
JzKetCoupled(1, -1, (S(
3)/2, S(1)/2))/2 + sqrt(3)*JzKetCoupled(2, -1, (S(3)/2, S(1)/2))/2
assert couple(TensorProduct(JzKet(S(3)/2, -S(3)/2), JzKet(S(1)/2, S(1)/2))) == \
-sqrt(3)*JzKetCoupled(1, -1, (S(3)/2, S(1)/2))/2 + \
JzKetCoupled(2, -1, (S(3)/2, S(1)/2))/2
assert couple(TensorProduct(JzKet(S(3)/2, -S(3)/2), JzKet(S(1)/2, -S(1)/2))) == \
JzKetCoupled(2, -2, (S(3)/2, S(1)/2))
def test_couple_3_states_numerical():
# Default coupling
# j1=1/2,j2=1/2,j3=1/2
assert couple(TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2))) == \
JzKetCoupled(S(3)/2, S(
3)/2, (S(1)/2, S(1)/2, S(1)/2), ((1, 2, 1), (1, 3, S(3)/2)) )
assert couple(TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2))) == \
sqrt(6)*JzKetCoupled(S(1)/2, S(1)/2, (S(1)/2, S(1)/2, S(1)/2), ((1, 2, 1), (1, 3, S(1)/2)) )/3 + \
sqrt(3)*JzKetCoupled(S(3)/2, S(1)/2, (S(1)/2, S(1)/2, S(1)/
2), ((1, 2, 1), (1, 3, S(3)/2)) )/3
assert couple(TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2))) == \
sqrt(2)*JzKetCoupled(S(1)/2, S(1)/2, (S(1)/2, S(1)/2, S(1)/2), ((1, 2, 0), (1, 3, S(1)/2)) )/2 - \
sqrt(6)*JzKetCoupled(S(1)/2, S(1)/2, (S(1)/2, S(1)/2, S(1)/2), ((1, 2, 1), (1, 3, S(1)/2)) )/6 + \
sqrt(3)*JzKetCoupled(S(3)/2, S(1)/2, (S(1)/2, S(1)/2, S(1)/
2), ((1, 2, 1), (1, 3, S(3)/2)) )/3
assert couple(TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2))) == \
sqrt(2)*JzKetCoupled(S(1)/2, -S(1)/2, (S(1)/2, S(1)/2, S(1)/2), ((1, 2, 0), (1, 3, S(1)/2)) )/2 + \
sqrt(6)*JzKetCoupled(S(1)/2, -S(1)/2, (S(1)/2, S(1)/2, S(1)/2), ((1, 2, 1), (1, 3, S(1)/2)) )/6 + \
sqrt(3)*JzKetCoupled(S(3)/2, -S(1)/2, (S(1)/2, S(1)/2, S(1)
/2), ((1, 2, 1), (1, 3, S(3)/2)) )/3
assert couple(TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2))) == \
-sqrt(2)*JzKetCoupled(S(1)/2, S(1)/2, (S(1)/2, S(1)/2, S(1)/2), ((1, 2, 0), (1, 3, S(1)/2)) )/2 - \
sqrt(6)*JzKetCoupled(S(1)/2, S(1)/2, (S(1)/2, S(1)/2, S(1)/2), ((1, 2, 1), (1, 3, S(1)/2)) )/6 + \
sqrt(3)*JzKetCoupled(S(3)/2, S(1)/2, (S(1)/2, S(1)/2, S(1)/
2), ((1, 2, 1), (1, 3, S(3)/2)) )/3
assert couple(TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2))) == \
-sqrt(2)*JzKetCoupled(S(1)/2, -S(1)/2, (S(1)/2, S(1)/2, S(1)/2), ((1, 2, 0), (1, 3, S(1)/2)) )/2 + \
sqrt(6)*JzKetCoupled(S(1)/2, -S(1)/2, (S(1)/2, S(1)/2, S(1)/2), ((1, 2, 1), (1, 3, S(1)/2)) )/6 + \
sqrt(3)*JzKetCoupled(S(3)/2, -S(1)/2, (S(1)/2, S(1)/2, S(1)
/2), ((1, 2, 1), (1, 3, S(3)/2)) )/3
assert couple(TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(1)/2))) == \
-sqrt(6)*JzKetCoupled(S(1)/2, -S(1)/2, (S(1)/2, S(1)/2, S(1)/2), ((1, 2, 1), (1, 3, S(1)/2)) )/3 + \
sqrt(3)*JzKetCoupled(S(3)/2, -S(1)/2, (S(1)/2, S(1)/2, S(1)
/2), ((1, 2, 1), (1, 3, S(3)/2)) )/3
assert couple(TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, S(-1)/2))) == \
JzKetCoupled(S(3)/2, -S(
3)/2, (S(1)/2, S(1)/2, S(1)/2), ((1, 2, 1), (1, 3, S(3)/2)) )
# j1=S(1)/2, j2=S(1)/2, j3=1
assert couple(TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 1))) == \
JzKetCoupled(2, 2, (S(1)/2, S(1)/2, 1), ((1, 2, 1), (1, 3, 2)) )
assert couple(TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 0))) == \
sqrt(2)*JzKetCoupled(1, 1, (S(1)/2, S(1)/2, 1), ((1, 2, 1), (1, 3, 1)) )/2 + \
sqrt(2)*JzKetCoupled(
2, 1, (S(1)/2, S(1)/2, 1), ((1, 2, 1), (1, 3, 2)) )/2
assert couple(TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, -1))) == \
sqrt(3)*JzKetCoupled(0, 0, (S(1)/2, S(1)/2, 1), ((1, 2, 1), (1, 3, 0)) )/3 + \
sqrt(2)*JzKetCoupled(1, 0, (S(1)/2, S(1)/2, 1), ((1, 2, 1), (1, 3, 1)) )/2 + \
sqrt(6)*JzKetCoupled(
2, 0, (S(1)/2, S(1)/2, 1), ((1, 2, 1), (1, 3, 2)) )/6
assert couple(TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, 1))) == \
sqrt(2)*JzKetCoupled(1, 1, (S(1)/2, S(1)/2, 1), ((1, 2, 0), (1, 3, 1)) )/2 - \
JzKetCoupled(1, 1, (S(1)/2, S(1)/2, 1), ((1, 2, 1), (1, 3, 1)) )/2 + \
JzKetCoupled(2, 1, (S(1)/2, S(1)/2, 1), ((1, 2, 1), (1, 3, 2)) )/2
assert couple(TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, 0))) == \
-sqrt(6)*JzKetCoupled(0, 0, (S(1)/2, S(1)/2, 1), ((1, 2, 1), (1, 3, 0)) )/6 + \
sqrt(2)*JzKetCoupled(1, 0, (S(1)/2, S(1)/2, 1), ((1, 2, 0), (1, 3, 1)) )/2 + \
sqrt(3)*JzKetCoupled(
2, 0, (S(1)/2, S(1)/2, 1), ((1, 2, 1), (1, 3, 2)) )/3
assert couple(TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(S(1)/2, S(-1)/2), JzKet(1, -1))) == \
sqrt(2)*JzKetCoupled(1, -1, (S(1)/2, S(1)/2, 1), ((1, 2, 0), (1, 3, 1)) )/2 + \
JzKetCoupled(1, -1, (S(1)/2, S(1)/2, 1), ((1, 2, 1), (1, 3, 1)) )/2 + \
JzKetCoupled(2, -1, (S(1)/2, S(1)/2, 1), ((1, 2, 1), (1, 3, 2)) )/2
assert couple(TensorProduct(JzKet(S(1)/2, -S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 1))) == \
-sqrt(2)*JzKetCoupled(1, 1, (S(1)/2, S(1)/2, 1), ((1, 2, 0), (1, 3, 1)) )/2 - \
JzKetCoupled(1, 1, (S(1)/2, S(1)/2, 1), ((1, 2, 1), (1, 3, 1)) )/2 + \
JzKetCoupled(2, 1, (S(1)/2, S(1)/2, 1), ((1, 2, 1), (1, 3, 2)) )/2
assert couple(TensorProduct(JzKet(S(1)/2, -S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, 0))) == \
-sqrt(6)*JzKetCoupled(0, 0, (S(1)/2, S(1)/2, 1), ((1, 2, 1), (1, 3, 0)) )/6 - \
sqrt(2)*JzKetCoupled(1, 0, (S(1)/2, S(1)/2, 1), ((1, 2, 0), (1, 3, 1)) )/2 + \
sqrt(3)*JzKetCoupled(
2, 0, (S(1)/2, S(1)/2, 1), ((1, 2, 1), (1, 3, 2)) )/3
assert couple(TensorProduct(JzKet(S(1)/2, -S(1)/2), JzKet(S(1)/2, S(1)/2), JzKet(1, -1))) == \
-sqrt(2)*JzKetCoupled(1, -1, (S(1)/2, S(1)/2, 1), ((1, 2, 0), (1, 3, 1)) )/2 + \
JzKetCoupled(1, -1, (S(1)/2, S(1)/2, 1), ((1, 2, 1), (1, 3, 1)) )/2 + \
JzKetCoupled(2, -1, (S(1)/2, S(1)/2, 1), ((1, 2, 1), (1, 3, 2)) )/2
assert couple(TensorProduct(JzKet(S(1)/2, -S(1)/2), JzKet(S(1)/2, -S(1)/2), JzKet(1, 1))) == \
sqrt(3)*JzKetCoupled(0, 0, (S(1)/2, S(1)/2, 1), ((1, 2, 1), (1, 3, 0)) )/3 - \
sqrt(2)*JzKetCoupled(1, 0, (S(1)/2, S(1)/2, 1), ((1, 2, 1), (1, 3, 1)) )/2 + \
sqrt(6)*JzKetCoupled(
2, 0, (S(1)/2, S(1)/2, 1), ((1, 2, 1), (1, 3, 2)) )/6
assert couple(TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(S(1)/2, -S(1)/2), JzKet(1, 0))) == \
-sqrt(2)*JzKetCoupled(1, -1, (S(1)/2, S(1)/2, 1), ((1, 2, 1), (1, 3, 1)) )/2 + \
sqrt(2)*JzKetCoupled(
2, -1, (S(1)/2, S(1)/2, 1), ((1, 2, 1), (1, 3, 2)) )/2
assert couple(TensorProduct(JzKet(S(1)/2, -S(1)/2), JzKet(S(1)/2, -S(1)/2), JzKet(1, -1))) == \
JzKetCoupled(2, -2, (S(1)/2, S(1)/2, 1), ((1, 2, 1), (1, 3, 2)) )
# j1=S(1)/2, j2=1, j3=1
assert couple(TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(1, 1), JzKet(1, 1))) == \
JzKetCoupled(
S(5)/2, S(5)/2, (S(1)/2, 1, 1), ((1, 2, S(3)/2), (1, 3, S(5)/2)) )
assert couple(TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(1, 1), JzKet(1, 0))) == \
sqrt(15)*JzKetCoupled(S(3)/2, S(3)/2, (S(1)/2, 1, 1), ((1, 2, S(3)/2), (1, 3, S(3)/2)) )/5 + \
sqrt(10)*JzKetCoupled(S(
5)/2, S(3)/2, (S(1)/2, 1, 1), ((1, 2, S(3)/2), (1, 3, S(5)/2)) )/5
assert couple(TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(1, 1), JzKet(1, -1))) == \
sqrt(2)*JzKetCoupled(S(1)/2, S(1)/2, (S(1)/2, 1, 1), ((1, 2, S(3)/2), (1, 3, S(1)/2)) )/2 + \
sqrt(10)*JzKetCoupled(S(3)/2, S(1)/2, (S(1)/2, 1, 1), ((1, 2, S(3)/2), (1, 3, S(3)/2)) )/5 + \
sqrt(10)*JzKetCoupled(S(5)/2, S(1)/2, (S(1)/2, 1, 1), ((1,
2, S(3)/2), (1, 3, S(5)/2)) )/10
assert couple(TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(1, 0), JzKet(1, 1))) == \
sqrt(3)*JzKetCoupled(S(3)/2, S(3)/2, (S(1)/2, 1, 1), ((1, 2, S(1)/2), (1, 3, S(3)/2)) )/3 - \
2*sqrt(15)*JzKetCoupled(S(3)/2, S(3)/2, (S(1)/2, 1, 1), ((1, 2, S(3)/2), (1, 3, S(3)/2)) )/15 + \
sqrt(10)*JzKetCoupled(S(
5)/2, S(3)/2, (S(1)/2, 1, 1), ((1, 2, S(3)/2), (1, 3, S(5)/2)) )/5
assert couple(TensorProduct(JzKet(S(1)/2, S(1)/2), JzKet(1, 0), JzKet(1, 0))) == \
JzKetCoupled(S(1)/2, S(1)/2, (S(1)/2, 1, 1), ((1, 2, S(1)/2), (1, 3, S(1)/2)) )/3 - \
sqrt(2)*JzKetCoupled(S(1)/2, S(1)/2, (S(1)/2, 1, 1), ((1, 2, S(3)/2), (1, 3, S(1)/2)) )/3 + \
sqrt(2)*JzKetCoupled(S(3)/2, S(1)/2, (S(1)/2, 1, | |
<reponame>NECOTIS/CRITICAL
# Copyright (c) 2018, NECOTIS
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Authors: <NAME>, <NAME> (advisor)
# Date: April 18th, 2019
# Organization: Groupe de recherche en Neurosciences Computationnelles et Traitement Intelligent des Signaux (NECOTIS),
# Université de Sherbrooke, Canada
import logging
import random
import unittest
import numpy as np
import matplotlib.pyplot as plt
from brian2.units.stdunits import ms, Hz
from brian2.units.allunits import second
from brian2.input.poissongroup import PoissonGroup
from brian2.synapses.synapses import Synapses
from brian2.monitors.statemonitor import StateMonitor
from brian2.core.clocks import defaultclock
from brian2.core.network import Network, scheduling_summary
from brian2.monitors.spikemonitor import SpikeMonitor
from critical.microcircuit import Microcircuit, createNeuronGroup, createCriticalSynapses
from brian2.core.preferences import prefs
prefs.codegen.target = 'numpy' # use the Python fallback
logger = logging.getLogger(__name__)
class TestMicrocircuit(unittest.TestCase):
def test_init(self):
for connectivity in ['small-world', 'random']:
microcircuit = Microcircuit(
connectivity, minicolumnShape=[2, 2, 2])
microcircuit.printConnectivityStats()
fig = microcircuit.draw3D(showAxisLabels=True)
plt.ion()
plt.show()
plt.draw()
plt.pause(1.0)
plt.close(fig)
def test_neural_dynamic(self):
G = createNeuronGroup(N=1, refractory=15 * ms, tau=50 * ms, vti=0.1)
# Input to the network
P = PoissonGroup(1, 40 * Hz)
S = Synapses(P, G, on_pre='v += 0.2')
S.connect()
M = StateMonitor(G, variables=True, record=True)
defaultclock.dt = 0.5 * ms
net = Network(G, S, P, M)
net.run(1400 * ms)
fig = plt.figure()
plt.subplot(311)
plt.plot(M.t / ms, M.v.T)
plt.ylabel('V')
plt.xlabel('Time [ms]')
plt.subplot(312)
plt.plot(M.t / ms, M.vt.T)
plt.ylabel('Threshold')
plt.xlabel('Time [ms]')
plt.subplot(313)
plt.plot(M.t / ms, 1.0 - M.not_refractory.T)
plt.ylabel('Refractoriness')
plt.yticks([0.0, 1.0])
plt.xlabel('Time [ms]')
plt.tight_layout()
plt.ion()
plt.show()
plt.draw()
plt.pause(100.0)
plt.close(fig)
def test_synapse_dynamic_single(self):
G = createNeuronGroup(N=2)
G.c_out_ref = 1.0
S = createCriticalSynapses(G)
S.connect(i=0, j=1)
S.w = 0.5
S.alpha = 0.1
# Input to the network
P = PoissonGroup(1, 40 * Hz)
Si = Synapses(P, G, model='w : 1', on_pre='''v_post += w * int(not_refractory_post)
c_in_tot_post += w * int(not_refractory_post)''')
Si.connect(i=0, j=0)
Si.w = 1.0
M = SpikeMonitor(G)
Mg = StateMonitor(G, variables=True, record=True,
when='synapses', order=4)
Ms = StateMonitor(S, variables=True, record=True,
when='synapses', order=4)
defaultclock.dt = 1 * ms
net = Network(G, S, P, Si, M, Ms, Mg)
logger.info(scheduling_summary(net))
duration = 10 * second
net.run(duration)
plt.figure()
plt.subplot(221)
plt.plot(M.t / ms, M.i, '.')
plt.ylabel('Neurons')
plt.yticks([0, 1], ['pre', 'post'])
plt.xlabel('Time [ms]')
plt.xlim([0.0, duration / ms])
plt.ylim([-0.1, 1.1])
plt.subplot(222)
plt.plot(Mg.t / ms, Mg[0].v.T, label='pre')
plt.plot(Mg.t / ms, Mg[1].v.T, label='post')
plt.ylabel('v')
plt.xlabel('Time [ms]')
plt.xlim([0.0, duration / ms])
plt.legend()
plt.subplot(223)
plt.plot(Ms.t / ms, Ms.w.T)
plt.ylabel('w')
plt.xlabel('Time [ms]')
plt.xlim([0.0, duration / ms])
plt.subplot(224)
plt.plot(Mg.t / ms, Mg[0].cbf.T, label='pre')
plt.ylabel('cbf')
plt.xlabel('Time [ms]')
plt.xlim([0.0, duration / ms])
plt.legend()
plt.tight_layout()
fig = plt.figure()
plt.subplot(211)
plt.plot(Mg.t / ms, Mg[1].c_in_tot.T, label='post')
plt.ylabel('c_in_tot')
plt.xlabel('Time [ms]')
plt.xlim([0.0, duration / ms])
plt.legend()
plt.subplot(212)
plt.plot(Mg.t / ms, Mg[0].c_out_tot.T, label='pre')
plt.ylabel('c_out_tot')
plt.xlabel('Time [ms]')
plt.xlim([0.0, duration / ms])
plt.legend()
plt.tight_layout()
plt.ion()
plt.show()
plt.draw()
plt.pause(100.0)
plt.close(fig)
def test_synapse_dynamic_dual(self):
G = createNeuronGroup(N=3)
G.c_out_ref = 1.0
S = createCriticalSynapses(G)
S.connect(i=[0, 1], j=[2, 2])
S.w = 0.5
S.alpha = 0.1
# Input to the network
P = PoissonGroup(2, [20 * Hz, 40 * Hz])
Si = Synapses(P, G, model='w : 1', on_pre='''v_post += w * int(not_refractory_post)
c_in_tot_post += w * int(not_refractory_post)''')
Si.connect(i=[0, 1], j=[0, 1])
Si.w = 0.5
M = SpikeMonitor(G)
Mg = StateMonitor(G, variables=True, record=True,
when='synapses', order=4)
Ms = StateMonitor(S, variables=True, record=True,
when='synapses', order=4)
defaultclock.dt = 1 * ms
net = Network(G, S, P, Si, M, Ms, Mg)
logger.info(scheduling_summary(net))
duration = 30 * second
net.run(duration)
plt.figure()
plt.subplot(221)
plt.plot(M.t / ms, M.i, '.')
plt.ylabel('Neurons')
plt.yticks([0, 1, 2], ['pre1', 'pre2', 'post'])
plt.xlabel('Time [ms]')
plt.xlim([0.0, duration / ms])
plt.ylim([-0.1, 2.1])
plt.subplot(222)
plt.plot(Mg.t / ms, Mg[0].v.T, label='pre1')
plt.plot(Mg.t / ms, Mg[1].v.T, label='pre2')
plt.plot(Mg.t / ms, Mg[2].v.T, label='post')
plt.ylabel('v')
plt.xlabel('Time [ms]')
plt.xlim([0.0, duration / ms])
plt.legend()
plt.subplot(223)
plt.plot(Ms.t / ms, Ms.w.T)
plt.ylabel('w')
plt.xlabel('Time [ms]')
plt.xlim([0.0, duration / ms])
plt.subplot(224)
plt.plot(Mg.t / ms, Mg[0].cbf.T, label='pre1')
plt.plot(Mg.t / ms, Mg[1].cbf.T, label='pre2')
plt.ylabel('cbf')
plt.xlabel('Time [ms]')
plt.xlim([0.0, duration / ms])
plt.legend()
plt.tight_layout()
fig = plt.figure()
plt.subplot(211)
plt.plot(Mg.t / ms, Mg[2].c_in_tot.T, label='post')
plt.ylabel('c_in_tot')
plt.xlabel('Time [ms]')
plt.xlim([0.0, duration / ms])
plt.legend()
plt.subplot(212)
plt.plot(Mg.t / ms, Mg[0].c_out_tot.T, label='pre1')
plt.plot(Mg.t / ms, Mg[1].c_out_tot.T, label='pre2')
plt.ylabel('c_out_tot')
plt.xlabel('Time [ms]')
plt.xlim([0.0, duration / ms])
plt.legend()
plt.tight_layout()
plt.ion()
plt.show()
plt.draw()
plt.pause(100.0)
plt.close(fig)
def test_synapse_dynamic_multi(self):
G = createNeuronGroup(N=4)
G.c_out_ref = 1.0
S = createCriticalSynapses(G)
S.connect(i=[0, 1, 0, 1], j=[2, 2, 3, 3])
S.w = 0.5
S.alpha = 0.1
# Input to the network
P = PoissonGroup(2, [40 * Hz, 40 * Hz])
Si = Synapses(P, G, model='w : 1', on_pre='''v_post += w * int(not_refractory_post)
c_in_tot_post += w * int(not_refractory_post)''')
Si.connect(i=[0, 1], j=[0, 1])
Si.w = 0.5
M = SpikeMonitor(G)
Mg = StateMonitor(G, variables=True, record=True,
when='synapses', order=4)
Ms = StateMonitor(S, variables=True, record=True,
when='synapses', order=4)
defaultclock.dt = 1 * ms
net = Network(G, S, P, Si, M, Ms, Mg)
logger.info(scheduling_summary(net))
duration = 30 * second
net.run(duration)
plt.figure()
plt.subplot(221)
plt.plot(M.t / ms, M.i, '.')
plt.ylabel('Neurons')
plt.yticks([0, 1, 2, 3], ['pre1', 'pre2', 'post1', 'post2'])
plt.xlabel('Time [ms]')
plt.xlim([0.0, duration / ms])
plt.ylim([-0.1, 3.1])
plt.subplot(222)
plt.plot(Mg.t / ms, Mg[0].v.T, label='pre1')
plt.plot(Mg.t / ms, Mg[1].v.T, label='pre2')
plt.plot(Mg.t / ms, Mg[2].v.T, label='post1')
plt.plot(Mg.t / ms, Mg[3].v.T, label='post2')
plt.ylabel('v')
plt.xlabel('Time [ms]')
plt.xlim([0.0, duration / ms])
plt.legend()
plt.subplot(223)
plt.plot(Ms.t / ms, Ms.w.T)
plt.ylabel('w')
plt.xlabel('Time [ms]')
plt.xlim([0.0, duration / ms])
plt.subplot(224)
plt.plot(Mg.t / ms, Mg[0].cbf.T, label='pre1')
plt.plot(Mg.t / ms, Mg[1].cbf.T, label='pre2')
plt.ylabel('cbf')
plt.xlabel('Time [ms]')
plt.xlim([0.0, duration / ms])
plt.legend()
plt.tight_layout()
fig = plt.figure()
plt.subplot(211)
plt.plot(Mg.t / ms, Mg[2].c_in_tot.T, label='post1')
plt.plot(Mg.t / ms, Mg[3].c_in_tot.T, label='post2')
plt.ylabel('c_in_tot')
plt.xlabel('Time [ms]')
plt.xlim([0.0, duration / ms])
plt.legend()
plt.subplot(212)
plt.plot(Mg.t / ms, Mg[0].c_out_tot.T, label='pre1')
plt.plot(Mg.t / ms, Mg[1].c_out_tot.T, label='pre2')
plt.ylabel('c_out_tot')
plt.xlabel('Time [ms]')
plt.xlim([0.0, duration / ms])
plt.legend()
plt.tight_layout()
plt.ion()
plt.show()
plt.draw()
plt.pause(100.0)
plt.close(fig)
def test_synapse_dynamic_complex(self):
# NOTE: spontaneous activity is necessary if the weights are small at the beginning of the simulation.
# Otherwise, the learning rule has no opportunity to update the
# weights.
G = createNeuronGroup(N=64, srate=1 * Hz)
G.c_out_ref = 0.5
S = createCriticalSynapses(G)
S.connect(condition='i != j', p=0.1)
S.w = 0.1 + 0.5 * np.random.uniform(size=len(S))
S.alpha = 0.1
# Input to the network
nbInputs = 8
P = PoissonGroup(nbInputs, np.random.uniform(
low=20.0, high=50.0, size=(nbInputs,)) * Hz)
Si = Synapses(P, G, model='w : 1', on_pre='''v_post += w * int(not_refractory_post)
c_in_tot_post += w * int(not_refractory_post)''')
Si.connect(i=np.arange(nbInputs), j=np.random.permutation(
np.arange(len(G)))[:nbInputs])
Si.w = 0.5
M = SpikeMonitor(G)
Mg = StateMonitor(G, variables=True, record=True,
when='synapses', order=4)
Ms = StateMonitor(S, variables=True, record=True,
when='synapses', order=4)
defaultclock.dt = 1 * ms
net = Network(G, S, P, Si, M, Ms, Mg)
logger.info(scheduling_summary(net))
duration = 120 * second
net.run(duration)
plt.figure()
plt.subplot(211)
plt.plot(M.t / ms, M.i, '.')
plt.ylabel('Neurons')
plt.xlabel('Time [ms]')
plt.xlim([0.0, duration / ms])
plt.subplot(212)
plt.plot(Ms.t / ms, Ms.w.T)
plt.ylabel('Weight')
plt.xlabel('Time [ms]')
plt.xlim([0.0, duration / ms])
plt.tight_layout()
fig = plt.figure()
meanCbf = np.mean(Mg.cbf.T, axis=-1)
stdCbf = np.std(Mg.cbf.T, axis=-1)
plt.plot(Mg.t / ms, meanCbf, color='#1B2ACC')
plt.fill_between(Mg.t / ms, meanCbf - stdCbf, meanCbf + stdCbf,
alpha=0.5, edgecolor='#1B2ACC', facecolor='#089FFF', antialiased=True)
plt.ylabel('cbf')
plt.xlabel('Time [ms]')
plt.ion()
plt.show()
plt.draw()
plt.pause(100.0)
plt.close(fig)
def test_microcircuit(self):
# Create the microcircuit
# NOTE: p_max is chosen so to have an out-degree of N=16
m = Microcircuit(connectivity='small-world', macrocolumnShape=[
2, 2, 2], | |
allowed.
Scalar (=non-array) properties may have a value of NULL (= `None`), any
primitive CIM type, reference type, and string type with embedded instance
or embedded object.
Array properties may be Null or may have elements with a value of NULL, any
primitive CIM type, and string type with embedded instance or embedded
object. Reference types are not allowed in property arrays in CIM, as per
DMTF DSP0004.
:Ivariables:
...
All parameters of `__init__` are set as instance variables.
"""
def __init__(self, name, value, _type=None,
class_origin=None, array_size=None, propagated=None,
is_array=None, reference_class=None, qualifiers=None,
embedded_object=None):
"""
Initialize the `CIMProperty` object.
This function infers optional arguments that are not specified (for
example, it infers `type` from the Python type of `value` and other
information). If the specified arguments are inconsistent, an
exception is raised. If an optional argument is needed for some reason,
an exception is raised.
:Parameters:
name : `unicode` or UTF-8 encoded `str`
Name of the property. Must not be `None`.
value
Value of the property (interpreted as actual value when the
property object is used in an instance, and as default value when
it is used in a class).
For valid types for CIM values, see `cim_types`.
type : string
Name of the CIM type of the property (e.g. `'uint8'`).
`None` means that the argument is unspecified, causing the
corresponding instance variable to be inferred. An exception is
raised if it cannot be inferred.
class_origin : `unicode` or UTF-8 encoded `str`
The CIM class origin of the property (the name
of the most derived class that defines or overrides the property in
the class hierarchy of the class owning the property).
`None` means that class origin information is not available.
array_size : `int`
The size of the array property, for fixed-size arrays.
`None` means that the array property has variable size.
propagated : `unicode` or UTF-8 encoded `str`
The CIM *propagated* attribute of the property (the effective value
of the `Propagated` qualifier of the property, which is a string
that specifies the name of the source property from which the
property value should be propagated).
`None` means that propagation information is not available.
is_array : `bool`
A boolean indicating whether the property is an array (`True`) or a
scalar (`False`).
`None` means that the argument is unspecified, causing the
corresponding instance variable to be inferred from the `value`
parameter, and if that is `None` it defaults to `False` (scalar).
reference_class : `unicode` or UTF-8 encoded `str`
The name of the referenced class, for reference properties.
`None` means that the argument is unspecified, causing the
corresponding instance variable to be inferred. An exception is
raised if it cannot be inferred.
qualifiers : `dict` or `NocaseDict`
A dictionary specifying CIM qualifier values.
The dictionary keys must be the qualifier names. The dictionary
values must be `CIMQualifier` objects specifying the qualifier
values.
`None` means that there are no qualifier values. In all cases,
the `qualifiers` instance variable will be a `NocaseDict` object.
embedded_object : string
A string value indicating the kind of
embedded object represented by the property value. The following
values are defined for this argument:
`'instance'`: The property is declared with the
`EmbeddedInstance` qualifier, indicating that the property
value is an embedded instance of a known class name (or Null).
`'object'`: The property is declared with the
`EmbeddedObject` qualifier, indicating that the property
value is an embedded object (instance or class) of which the
class name is not known (or Null).
`None` means that the argument is unspecified, causing the
corresponding instance variable to be inferred. An exception is
raised if it cannot be inferred.
Examples:
* `CIMProperty("MyString", "abc")`
-> a string property
* `CIMProperty("MyNum", 42, "uint8")`
-> a uint8 property
* `CIMProperty("MyNum", Uint8(42))`
-> a uint8 property
* `CIMProperty("MyNumArray", [1,2,3], "uint8")`
-> a uint8 array property
* `CIMProperty("MyRef", CIMInstanceName(...))`
-> a reference property
* `CIMProperty("MyEmbObj", CIMClass(...))`
-> an embedded object property containing a class
* `CIMProperty("MyEmbObj", CIMInstance(...),
embedded_object='object')`
-> an embedded object property containing an instance
* `CIMProperty("MyEmbInst", CIMInstance(...))`
-> an embedded instance property
* `CIMProperty("MyString", None, "string")`
-> a string property that is Null
* `CIMProperty("MyNum", None, "uint8")`
-> a uint8 property that is Null
* `CIMProperty("MyRef", None, reference_class="MyClass")`
-> a reference property that is Null
* `CIMProperty("MyEmbObj", None, embedded_object='object')`
-> an embedded object property that is Null
* `CIMProperty("MyEmbInst", None, embedded_object='instance')`
-> an embedded instance property that is Null
:Raises:
:raise TypeError:
:raise ValueError:
"""
# Check `name`
if name is None:
raise ValueError('Property must have a name')
# General checks:
if embedded_object not in (None, 'instance', 'object'):
raise ValueError('Property %r specifies an invalid ' \
'embedded_object=%r' % (name, embedded_object))
if is_array not in (None, True, False):
raise ValueError('Property %r specifies an invalid ' \
'is_array=%r' % (name, is_array))
# Set up is_array
if isinstance(value, (list, tuple)):
is_array = _intended_value(True,
None, is_array, 'is_array',
'Property {} has a value that is an array ({})'
.format(name, type(value)))
elif value is not None: # Scalar value
is_array = _intended_value(False,
None, is_array, 'is_array',
'Property {} has a value that is a scalar ({})'
.format(name, type(value)))
else: # Null value
if is_array is None:
is_array = False # For compatibility with old default
if not is_array and array_size is not None:
raise ValueError('Scalar property %r specifies array_size=%r ' \
'(must be None)' % (name, array_size))
# Determine type, embedded_object, and reference_class attributes.
# Make sure value is CIM-typed.
if is_array: # Array property
if reference_class is not None:
raise ValueError(
'Array property %r cannot specify reference_class' % name)
elif value is None or len(value) == 0 or value[0] is None:
# Cannot infer from value, look at embedded_object and type
if embedded_object == 'instance':
msg = 'Array property %r contains embedded instances' % name
_type = _intended_value('string',
None, _type, 'type', msg)
elif embedded_object == 'object':
msg = 'Array property %r contains embedded objects' % name
_type = _intended_value('string',
None, _type, 'type', msg)
elif _type is not None:
# Leave type as specified, but check it for validity
# no need to check type because length is 0
# dummy_type_obj = cim_types.type_from_name(type)
pass
else:
raise ValueError(
'Cannot infer type of array property %r that is ' \
'Null, empty, or has Null as its first element' % \
name)
elif isinstance(value[0], CIMInstance):
msg = 'Array property %r contains CIMInstance values' % name
# do not check type here
# type value could be different class names.
embedded_object = _intended_value(('instance', 'object'),
None, embedded_object,
'embedded_object', msg)
elif isinstance(value[0], CIMClass):
msg = 'Array property %r contains CIMClass values' % name
_type = _intended_value('string',
None, _type, 'type', msg)
embedded_object = _intended_value('object',
None, embedded_object,
'embedded_object', msg)
elif isinstance(value[0], (datetime, timedelta)):
value = [cim_types.CIMDateTime(val) if val is not None
else val for val in value]
msg = 'Array property %r contains datetime or timedelta ' \
'values' % name
_type = _intended_value('datetime',
None, _type, 'type', msg)
embedded_object = _intended_value(None,
None, embedded_object,
'embedded_object', msg)
elif _type == 'datetime':
value = [cim_types.CIMDateTime(val) if val is not None
and not isinstance(val,
cim_types.CIMDateTime)
else val for val in value]
msg = 'Array property %r specifies CIM type %r' % (name, _type)
embedded_object = _intended_value(None,
None, embedded_object,
'embedded_object', msg)
elif _type is None:
# Determine simple type from (non-Null) value
_type = cim_types.cimtype(value[0])
msg = 'Array property %r contains simple typed values ' \
'with no CIM type specified' % name
embedded_object = _intended_value(None,
None, embedded_object,
'embedded_object', msg)
else: # type is specified and value (= entire array) is not Null
# Make sure the array elements are of the corresponding Python
# type.
value = [
cim_types.type_from_name(_type)(val) if val is not None
else val for val in value]
msg = 'Array property %r contains simple typed values ' \
'and specifies CIM type %r' % (name, _type)
embedded_object = _intended_value(None,
None, embedded_object,
'embedded_object', msg)
else: | |
Parameters
----------
forces : 3xN :obj:`numpy.ndarray`
set of forces on object in object basis
torques : 3xN :obj:`numpy.ndarray`
set of torques on object in object basis
normals : 3xN :obj:`numpy.ndarray`
surface normals at the contact points
soft_fingers : bool
whether or not to use the soft finger contact model
wrench_norm_thresh : float
threshold to use to determine equivalence of target wrenches
wrench_regularizer : float
small float to make quadratic program positive semidefinite
params : :obj:`GraspQualityConfig`
set of parameters for grasp matrix and contact model
Returns
-------
int : 1 if in partial closure, 0 otherwise
"""
force_limit = None
if params is None:
return 0
force_limit = params.force_limits
target_wrench = params.target_wrench
if 'wrench_norm_thresh' in list(params.keys()):
wrench_norm_thresh = params.wrench_norm_thresh
if 'wrench_regularizer' in list(params.keys()):
wrench_regularizer = params.wrench_regularizer
# reorganize the grasp matrix for easier constraint enforcement in optimization
num_fingers = normals.shape[1]
num_wrenches_per_finger = forces.shape[1] / num_fingers
G = np.zeros([6, 0])
for i in range(num_fingers):
start_i = num_wrenches_per_finger * i
end_i = num_wrenches_per_finger * (i + 1)
G_i = PointGraspMetrics3D.grasp_matrix(forces[:, start_i:end_i], torques[:, start_i:end_i],
normals[:, i:i + 1],
soft_fingers, params=params)
G = np.c_[G, G_i]
wrench_resisted, _ = PointGraspMetrics3D.wrench_in_positive_span(G, target_wrench, force_limit, num_fingers,
wrench_norm_thresh=wrench_norm_thresh,
wrench_regularizer=wrench_regularizer)
return 1 * wrench_resisted
@staticmethod
def wrench_resistance(forces, torques, normals, soft_fingers=False,
wrench_norm_thresh=1e-3, wrench_regularizer=1e-10,
finger_force_eps=1e-9, params=None):
""" Evalutes wrench resistance: the inverse norm of the contact forces required to resist a target wrench
Estimates resistance by sollving a quadratic program (min normal contact forces to produce a wrench).
Parameters
----------
forces : 3xN :obj:`numpy.ndarray`
set of forces on object in object basis
torques : 3xN :obj:`numpy.ndarray`
set of torques on object in object basis
normals : 3xN :obj:`numpy.ndarray`
surface normals at the contact points
soft_fingers : bool
whether or not to use the soft finger contact model
wrench_norm_thresh : float
threshold to use to determine equivalence of target wrenches
wrench_regularizer : float
small float to make quadratic program positive semidefinite
finger_force_eps : float
small float to prevent numeric issues in wrench resistance metric
params : :obj:`GraspQualityConfig`
set of parameters for grasp matrix and contact model
Returns
-------
float : value of wrench resistance metric
"""
force_limit = None
if params is None:
return 0
force_limit = params.force_limits
target_wrench = params.target_wrench
if 'wrench_norm_thresh' in list(params.keys()):
wrench_norm_thresh = params.wrench_norm_thresh
if 'wrench_regularizer' in list(params.keys()):
wrench_regularizer = params.wrench_regularizer
if 'finger_force_eps' in list(params.keys()):
finger_force_eps = params.finger_force_eps
# reorganize the grasp matrix for easier constraint enforcement in optimization
num_fingers = normals.shape[1]
num_wrenches_per_finger = forces.shape[1] / num_fingers
G = np.zeros([6, 0])
for i in range(num_fingers):
start_i = num_wrenches_per_finger * i
end_i = num_wrenches_per_finger * (i + 1)
G_i = PointGraspMetrics3D.grasp_matrix(forces[:, start_i:end_i], torques[:, start_i:end_i],
normals[:, i:i + 1],
soft_fingers, params=params)
G = np.c_[G, G_i]
# compute metric from finger force norm
Q = 0
wrench_resisted, finger_force_norm = PointGraspMetrics3D.wrench_in_positive_span(G, target_wrench, force_limit,
num_fingers,
wrench_norm_thresh=wrench_norm_thresh,
wrench_regularizer=wrench_regularizer)
if wrench_resisted:
Q = 1.0 / (finger_force_norm + finger_force_eps) - 1.0 / (2 * force_limit)
return Q
@staticmethod
def min_singular(forces, torques, normals, soft_fingers=False, params=None):
""" Min singular value of grasp matrix - measure of wrench that grasp is "weakest" at resisting.
Parameters
----------
forces : 3xN :obj:`numpy.ndarray`
set of forces on object in object basis
torques : 3xN :obj:`numpy.ndarray`
set of torques on object in object basis
normals : 3xN :obj:`numpy.ndarray`
surface normals at the contact points
soft_fingers : bool
whether or not to use the soft finger contact model
params : :obj:`GraspQualityConfig`
set of parameters for grasp matrix and contact model
Returns
-------
float : value of smallest singular value
"""
G = PointGraspMetrics3D.grasp_matrix(forces, torques, normals, soft_fingers)
_, S, _ = np.linalg.svd(G)
min_sig = S[5]
return min_sig
@staticmethod
def wrench_volume(forces, torques, normals, soft_fingers=False, params=None):
""" Volume of grasp matrix singular values - score of all wrenches that the grasp can resist.
Parameters
----------
forces : 3xN :obj:`numpy.ndarray`
set of forces on object in object basis
torques : 3xN :obj:`numpy.ndarray`
set of torques on object in object basis
normals : 3xN :obj:`numpy.ndarray`
surface normals at the contact points
soft_fingers : bool
whether or not to use the soft finger contact model
params : :obj:`GraspQualityConfig`
set of parameters for grasp matrix and contact model
Returns
-------
float : value of wrench volume
"""
k = 1
if params is not None and 'k' in list(params.keys()):
k = params.k
G = PointGraspMetrics3D.grasp_matrix(forces, torques, normals, soft_fingers)
_, S, _ = np.linalg.svd(G)
sig = S
return k * np.sqrt(np.prod(sig))
@staticmethod
def grasp_isotropy(forces, torques, normals, soft_fingers=False, params=None):
""" Condition number of grasp matrix - ratio of "weakest" wrench that the grasp can exert to the "strongest" one.
Parameters
----------
forces : 3xN :obj:`numpy.ndarray`
set of forces on object in object basis
torques : 3xN :obj:`numpy.ndarray`
set of torques on object in object basis
normals : 3xN :obj:`numpy.ndarray`
surface normals at the contact points
soft_fingers : bool
whether or not to use the soft finger contact model
params : :obj:`GraspQualityConfig`
set of parameters for grasp matrix and contact model
Returns
-------
float : value of grasp isotropy metric
"""
G = PointGraspMetrics3D.grasp_matrix(forces, torques, normals, soft_fingers)
_, S, _ = np.linalg.svd(G)
max_sig = S[0]
min_sig = S[5]
isotropy = min_sig / max_sig
if np.isnan(isotropy) or np.isinf(isotropy):
return 0
return isotropy
@staticmethod
def ferrari_canny_L1(forces, torques, normals, soft_fingers=False, params=None,
wrench_norm_thresh=1e-3,
wrench_regularizer=1e-10):
""" Ferrari & Canny's L1 metric. Also known as the epsilon metric.
Parameters
----------
forces : 3xN :obj:`numpy.ndarray`
set of forces on object in object basis
torques : 3xN :obj:`numpy.ndarray`
set of torques on object in object basis
normals : 3xN :obj:`numpy.ndarray`
surface normals at the contact points
soft_fingers : bool
whether or not to use the soft finger contact model
params : :obj:`GraspQualityConfig`
set of parameters for grasp matrix and contact model
wrench_norm_thresh : float
threshold to use to determine equivalence of target wrenches
wrench_regularizer : float
small float to make quadratic program positive semidefinite
Returns
-------
float : value of metric
"""
if params is not None and 'wrench_norm_thresh' in list(params.keys()):
wrench_norm_thresh = params.wrench_norm_thresh
if params is not None and 'wrench_regularizer' in list(params.keys()):
wrench_regularizer = params.wrench_regularizer
# create grasp matrix
G = PointGraspMetrics3D.grasp_matrix(forces, torques, normals,
soft_fingers, params=params)
s = time.time()
# center grasp matrix for better convex hull comp
hull = cvh.ConvexHull(G.T)
# TODO: suppress ridiculous amount of output for perfectly valid input to qhull
e = time.time()
logging.debug('CVH took %.3f sec' % (e - s))
debug = False
if debug:
fig = plt.figure()
torques = G[3:, :].T
ax = Axes3D(fig)
ax.scatter(torques[:, 0], torques[:, 1], torques[:, 2], c='b', s=50)
ax.scatter(0, 0, 0, c='k', s=80)
ax.set_xlim3d(-1.5, 1.5)
ax.set_ylim3d(-1.5, 1.5)
ax.set_zlim3d(-1.5, 1.5)
ax.set_xlabel('tx')
ax.set_ylabel('ty')
ax.set_zlabel('tz')
plt.show()
if len(hull.vertices) == 0:
logging.warning('Convex hull could not be computed')
return 0.0
# determine whether or not zero is in the convex hull
s = time.time()
min_norm_in_hull, v = PointGraspMetrics3D.min_norm_vector_in_facet(G, wrench_regularizer=wrench_regularizer)
e = time.time()
logging.debug('Min norm took %.3f sec' % (e - s))
# print("shunang",min_norm_in_hull)
# if norm is greater than 0 then forces are outside of hull
if min_norm_in_hull > wrench_norm_thresh:
logging.debug('Zero not in convex hull')
return 0.0
# if there are fewer nonzeros than D-1 (dim of space minus one)
# then zero is on the boundary and therefore we do not have
# force closure
if np.sum(v > 1e-4) <= G.shape[0] - 1:
logging.warning('Zero not in interior of convex hull')
return 0.0
# find minimum norm vector across all facets of convex hull
s = time.time()
min_dist = sys.float_info.max
closest_facet = None
# print("shunang",G)
for v in hull.vertices:
if np.max(np.array(v)) < G.shape[1]: # because of some occasional odd behavior from pyhull
facet = G[:, v]
# print("shunang1",facet)
dist, _ = PointGraspMetrics3D.min_norm_vector_in_facet(facet, wrench_regularizer=wrench_regularizer)
if dist < min_dist:
min_dist = dist
closest_facet = v
e = time.time()
logging.debug('Min dist took %.3f sec for %d vertices' % (e - s, len(hull.vertices)))
return min_dist
@staticmethod
def ferrari_canny_L1_force_only(forces, torques, normals, soft_fingers=False, params=None,
wrench_norm_thresh=1e-3,
wrench_regularizer=1e-10):
""" Ferrari & Canny's L1 metric with force only. Also known as the epsilon metric.
Parameters
----------
forces | |
<reponame>dbirman/cs375
from __future__ import division, print_function, absolute_import
import os, sys
import numpy as np
import tensorflow as tf
import cPickle
from tfutils import base, data, optimizer
import json
import copy
import argparse
from utils import *
from data_provider import *
sys.path.append('../normal_pred/')
import normal_encoder_asymmetric_with_bypass
import combinet_builder
host = os.uname()[1]
BATCH_SIZE = 32
IMAGE_SIZE_CROP = 224
NUM_CHANNELS = 3
def get_parser():
parser = argparse.ArgumentParser(description='The script to train the combine net')
# General settings
parser.add_argument('--nport', default = 27017, type = int, action = 'store', help = 'Port number of mongodb')
parser.add_argument('--expId', default = "combinet_test", type = str, action = 'store', help = 'Name of experiment id')
parser.add_argument('--loadexpId', default = None, type = str, action = 'store', help = 'Name of experiment id')
parser.add_argument('--cacheDirPrefix', default = "/mnt/fs1/chengxuz", type = str, action = 'store', help = 'Prefix of cache directory')
parser.add_argument('--innerargs', default = [], type = str, action = 'append', help = 'Arguments for every network')
parser.add_argument('--with_rep', default = 0, type = int, action = 'store', help = 'Whether reporting other losses every batch')
parser.add_argument('--with_grad', default = 0, type = int, action = 'store', help = 'Whether with gradients reporting')
parser.add_argument('--with_train', default = 0, type = int, action = 'store', help = 'Whether with training dataset')
parser.add_argument('--with_recdata', default = 0, type = int, action = 'store', help = 'Whether with second database setting')
parser.add_argument('--nport_rec', default = 27007, type = int, action = 'store', help = 'Port for second database')
parser.add_argument('--valid_first', default = 0, type = int, action = 'store', help = 'Whether validating first')
parser.add_argument('--loadport', default = None, type = int, action = 'store', help = 'Port number of mongodb for loading')
parser.add_argument('--with_feat', default = 1, type = int, action = 'store', help = 'Whether adding the feat validation')
parser.add_argument('--loadstep', default = None, type = int, action = 'store', help = 'Number of steps for loading')
# Network related
parser.add_argument('--pathconfig', default = "normals_config_fcnvgg16_withdepth.cfg", type = str, action = 'store', help = 'Path to config file')
parser.add_argument('--dataconfig', default = "dataset_config.cfg", type = str, action = 'store', help = 'Path to config file for dataset')
parser.add_argument('--valdconfig', default = None, type = str, action = 'store', help = 'Validation dataset config, default to be None, and will copy to other configs below')
parser.add_argument('--topndconfig', default = None, type = str, action = 'store', help = 'Path to config file for dataset, for topn validation')
parser.add_argument('--featdconfig', default = None, type = str, action = 'store', help = 'Path to config file for dataset, for feats validation')
parser.add_argument('--modeldconfig', default = None, type = str, action = 'store', help = 'Path to config file for dataset, for model in validation')
parser.add_argument('--seed', default = 0, type = int, action = 'store', help = 'Random seed for model')
parser.add_argument('--namefunc', default = "combine_normal_tfutils_new", type = str, action = 'store', help = 'Name of function to build the network')
parser.add_argument('--valinum', default = -1, type = int, action = 'store', help = 'Number of validation steps, default is -1, which means all the validation')
parser.add_argument('--cache_filter', default = 0, type = int, action = 'store', help = 'Whether cache the pretrained weights as tf tensors')
parser.add_argument('--fix_pretrain', default = 0, type = int, action = 'store', help = 'Whether fix the pretrained weights')
parser.add_argument('--extra_feat', default = 0, type = int, action = 'store', help = 'Whether to add normal and depth outputs for ImageNet and PlaceNet, default is 0, which means no')
# Loss related
parser.add_argument('--whichloss', default = 0, type = int, action = 'store', help = 'Whether to use new loss') # Deprecated for now
parser.add_argument('--depth_norm', default = 8000, type = int, action = 'store', help = 'Coefficient for depth loss')
parser.add_argument('--label_norm', default = 20, type = float, action = 'store', help = 'Coefficient for label loss')
parser.add_argument('--depthloss', default = 0, type = int, action = 'store', help = 'Whether to use new depth loss')
parser.add_argument('--normalloss', default = 0, type = int, action = 'store', help = 'Whether to use new normal loss')
parser.add_argument('--multtime', default = 1, type = int, action = 'store', help = '1 means original, larger than 1 means multiple time points')
parser.add_argument('--trainable', default = 0, type = int, action = 'store', help = 'Whether use trainable weights')
parser.add_argument('--nfromd', default = 0, type = int, action = 'store', help = 'Whether calculating the normals from depth')
parser.add_argument('--ret_dict', default = 0, type = int, action = 'store', help = '1 means returning dict for loss_withcfg, default is 0')
parser.add_argument('--combine_dict', default = 0, type = int, action = 'store', help = '1 means combining 5 datasets to 2, default is 0')
parser.add_argument('--self_order', default = None, type = str, action = 'store', help = 'None means default, otherwise, it should be separated by ","')
parser.add_argument('--print_loss', default = 0, type = int, action = 'store', help = '1 means printing loss us tf.Print, default is 0')
# Training related
parser.add_argument('--batchsize', default = None, type = int, action = 'store', help = 'Batch size')
parser.add_argument('--valbatchsize', default = None, type = int, action = 'store', help = 'Validation Batch size')
parser.add_argument('--queuecap', default = None, type = int, action = 'store', help = 'Queue capacity')
parser.add_argument('--init_stddev', default = .01, type = float, action = 'store', help = 'Init stddev for convs')
parser.add_argument('--init_type', default = 'xavier', type = str, action = 'store', help = 'Init type')
parser.add_argument('--n_threads', default = 4, type = int, action = 'store', help = 'Number of threads')
parser.add_argument('--val_n_threads', default = 1, type = int, action = 'store', help = 'Number of threads for validation')
## Learning rate, optimizers
parser.add_argument('--init_lr', default = .01, type = float, action = 'store', help = 'Init learning rate')
parser.add_argument('--whichopt', default = 0, type = int, action = 'store', help = 'Choice of the optimizer, 0 means momentum, 1 means Adam')
parser.add_argument('--adameps', default = 0.1, type = float, action = 'store', help = 'Epsilon for adam, only used when whichopt is 1')
parser.add_argument('--adambeta1', default = 0.9, type = float, action = 'store', help = 'Beta1 for adam, only used when whichopt is 1')
parser.add_argument('--adambeta2', default = 0.999, type = float, action = 'store', help = 'Beta2 for adam, only used when whichopt is 1')
parser.add_argument('--withclip', default = 1, type = int, action = 'store', help = 'Whether do clip')
## Saving metric
parser.add_argument('--fre_valid', default = 10000, type = int, action = 'store', help = 'Frequency of the validation')
parser.add_argument('--fre_metric', default = 1000, type = int, action = 'store', help = 'Frequency of the saving metrics')
parser.add_argument('--fre_filter', default = 10000, type = int, action = 'store', help = 'Frequency of the saving filters')
## Dataset related
parser.add_argument('--whichscenenet', default = 0, type = int, action = 'store', help = 'Choice of the scenenet, 0 means all, 1 means the compressed version, 2 means the new png version')
parser.add_argument('--whichscannet', default = 0, type = int, action = 'store', help = 'Choice of the scannet, 0 means original, 1 means the smaller new version')
parser.add_argument('--whichimagenet', default = 0, type = int, action = 'store', help = 'Choice of the imagenet, 0 means original, 1 means the smaller new version')
parser.add_argument('--whichcoco', default = 0, type = int, action = 'store', help = 'Which coco dataset to use, 0 means original, 1 means the one without 0 instance')
parser.add_argument('--as_list', default = 0, type = int, action = 'store', help = 'Whether handling init_ops as dicts or not, if as dicts, enqueue and dequeue will be done separately to each dict')
parser.add_argument('--which_place', default = 0, type = int, action = 'store', help = 'Which place dataset to use, 1 means only part')
## Preprocessing related
parser.add_argument('--twonormals', default = 0, type = int, action = 'store', help = 'Whether having two normal readouts, 0 means no')
parser.add_argument('--depthnormal', default = 0, type = int, action = 'store', help = 'Whether to normalize the depth input')
parser.add_argument('--ignorebname', default = 0, type = int, action = 'store', help = 'Whether ignore the batch name')
parser.add_argument('--categorymulti', default = 1, type = int, action = 'store', | |
# -*- coding: utf-8 -*-
# (C) Copyright 2020, 2021 IBM. All Rights Reserved.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Base class for analog Modules."""
from typing import Any, Dict, List, Optional, Tuple, Union, TYPE_CHECKING
from torch import device as torch_device
from torch import Tensor
from torch.nn import Module, Parameter
from aihwkit.exceptions import ModuleError
from aihwkit.simulator.configs import (
FloatingPointRPUConfig, InferenceRPUConfig, SingleRPUConfig,
UnitCellRPUConfig
)
from aihwkit.simulator.tiles import InferenceTile
if TYPE_CHECKING:
from aihwkit.simulator.tiles import BaseTile
RPUConfigAlias = Union[FloatingPointRPUConfig, SingleRPUConfig,
UnitCellRPUConfig, InferenceRPUConfig]
class AnalogModuleBase(Module):
"""Base class for analog Modules.
Base ``Module`` for analog layers that use analog tiles. When subclassing,
please note:
* the ``_setup_tile()`` method is expected to be called by the subclass
constructor, and it does not only create a tile, but also sets some
instance attributes that are needed by the analog features (optimizer
and others).
* the ``weight`` and ``bias`` Parameters are not guaranteed to be in
sync with the tile weights and biases during the lifetime of the instance,
for performance reasons. The canonical way of reading and writing
weights is via the ``set_weights()`` and ``get_weights()`` as opposed
to using the attributes directly.
* the ``BaseTile`` subclass that is created is retrieved from the
``rpu_config.tile_class`` attribute.
"""
# pylint: disable=abstract-method
_analog_tile_counter: int = 0
def register_analog_tile(self, tile: 'BaseTile') -> None:
"""Register the analog context of the tile.
Note:
Needs to be called at the end init to register the tile
for the analog optimizers.
Args:
tile: tile to register
"""
self.register_parameter('analog_ctx_' + str(self._analog_tile_counter),
tile.get_analog_ctx())
if tile.shared_weights is not None:
if not isinstance(tile.shared_weights, Parameter):
tile.shared_weights = Parameter(tile.shared_weights)
self.register_parameter('analog_shared_weights_' + str(self._analog_tile_counter),
tile.shared_weights)
self._analog_tile_counter += 1
def unregister_parameter(self, param_name: str) -> None:
"""Unregister module parameter from parameters.
Raises:
ModuleError: In case parameter is not found
"""
param = getattr(self, param_name, None)
if not isinstance(param, Parameter):
raise ModuleError(f"Cannot find parameter {param_name} to unregister")
param_data = param.detach().clone()
delattr(self, param_name)
setattr(self, param_name, param_data)
def _setup_tile(
self,
in_features: int,
out_features: int,
bias: bool,
rpu_config: Optional[RPUConfigAlias] = None,
realistic_read_write: bool = False,
weight_scaling_omega: float = 0.0
) -> 'BaseTile':
"""Create an analog tile and setup this layer for using it.
Create an analog tile to be used for the basis of this layer operations,
and setup additional attributes of this instance that are needed for
using the analog tile.
If ``weight_scaling_omega`` is larger than 0, the weights are set in a
scaled manner (assuming a digital output scale). See
:meth:`~aihwkit.simulator.tiles.base.BaseTile.set_weights_scaled`
for details.
Note:
This method also sets the following attributes, which are assumed
to be set by the rest of the methods:
* ``self.use_bias``
* ``self.realistic_read_write``
* ``self.weight_scaling_omega``
* ``self.in_features``
* ``self.out_features``
Args:
in_features: input vector size (number of columns).
out_features: output vector size (number of rows).
rpu_config: resistive processing unit configuration.
bias: whether to use a bias row on the analog tile or not.
realistic_read_write: whether to enable realistic read/write
for setting initial weights and read out of weights.
weight_scaling_omega: the weight value where the max
weight will be scaled to. If zero, no weight scaling will
be performed
Returns:
An analog tile with the requested parameters.
"""
# pylint: disable=attribute-defined-outside-init, protected-access
# Default to constant step device if not provided.
if not rpu_config:
rpu_config = SingleRPUConfig()
# Setup the analog-related attributes of this instance.
self.use_bias = bias
self.realistic_read_write = realistic_read_write
self.weight_scaling_omega = weight_scaling_omega
self.in_features = in_features
self.out_features = out_features
# Create the tile.
return rpu_config.tile_class(out_features, in_features, rpu_config, bias=bias)
def set_weights(
self,
weight: Tensor,
bias: Optional[Tensor] = None,
force_exact: bool = False
) -> None:
"""Set the weight (and bias) with given Tensors.
This uses an realistic write if the property ``realistic_read_write``
of the layer is set, unless it is overwritten by ``force_exact``. It
uses a scaled write if ``weight_scaling_omega`` is positive (see
:meth:`~aihwkit.simulator.tiles.base.BaseTile.set_weights_scaled`).
Note:
This is the recommended way for setting the weight/bias matrix of
the analog tile, as it will correctly store the weights into the
internal memory. Directly writing to ``self.weight`` and
``self.bias`` might yield wrong results as they are not always in
sync with the analog tile Parameters, for performance reasons.
Args:
weight: weight matrix
bias: bias vector
force_exact: forces an exact write to the analog tiles
"""
shape = [self.out_features, self.in_features]
weight = weight.clone().reshape(shape)
realistic = self.realistic_read_write and not force_exact
if self.weight_scaling_omega > 0.0:
self.analog_tile.set_weights_scaled(weight, bias, realistic=realistic,
omega=self.weight_scaling_omega)
else:
self.analog_tile.set_weights(weight, bias, realistic=realistic)
self._sync_weights_from_tile()
def get_weights(
self,
force_exact: bool = False
) -> Tuple[Tensor, Optional[Tensor]]:
"""Get the weight (and bias) tensors.
This uses an realistic read if the property ``realistic_read_write`` of
the layer is set, unless it is overwritten by ``force_exact``. It
scales the analog weights by the digital alpha scale if
``weight_scaling_omega`` is positive (see
:meth:`~aihwkit.simulator.tiles.base.BaseTile.get_weights_scaled`).
Note:
This is the recommended way for setting the weight/bias matrix from
the analog tile, as it will correctly fetch the weights from the
internal memory. Accessing ``self.weight`` and ``self.bias`` might
yield wrong results as they are not always in sync with the
analog tile library, for performance reasons.
Args:
force_exact: forces an exact read to the analog tiles
Returns:
tuple: weight matrix, bias vector
"""
realistic = self.realistic_read_write and not force_exact
if self.weight_scaling_omega > 0.0:
return self.analog_tile.get_weights_scaled(realistic=realistic)
return self.analog_tile.get_weights(realistic=realistic)
def _sync_weights_from_tile(self) -> None:
"""Update the layer weight and bias from the values on the analog tile.
Update the ``self.weight`` and ``self.bias`` Parameters with an
exact copy of the internal analog tile weights.
"""
tile_weight, tile_bias = self.get_weights(force_exact=True)
self.weight.data[:] = tile_weight.reshape(self.weight.shape)
if self.use_bias:
self.bias.data[:] = tile_bias.reshape(self.bias.shape) # type: ignore
def _sync_weights_to_tile(self) -> None:
"""Update the tile values from the layer weights and bias.
Update the internal tile weights with an exact copy of the values of
the ``self.weight`` and ``self.bias`` Parameters.
"""
self.set_weights(self.weight, self.bias, force_exact=True)
def _load_from_state_dict(
self,
state_dict: Dict,
prefix: str,
local_metadata: Dict,
strict: bool,
missing_keys: List[str],
unexpected_keys: List[str],
error_msgs: List[str]) -> None:
"""Copy parameters and buffers from `state_dict` into only this
module, but not its descendants.
This method is a specialization of ``Module._load_from_state_dict``
that takes into account the extra ``analog_tile_state`` key used by
analog layers.
"""
key = '{}analog_tile_state'.format(prefix)
if key in state_dict:
analog_state = state_dict.pop(key)
self.analog_tile.__setstate__(analog_state)
elif strict:
missing_keys.append(key)
# update the weight / bias (not saved explicitly)
self._sync_weights_from_tile()
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys,
unexpected_keys, error_msgs)
def state_dict(
self,
destination: Any = None,
prefix: str = '',
keep_vars: bool = False
) -> Dict:
"""Return a dictionary containing a whole state of the module."""
self._sync_weights_from_tile()
# TODO: this will also pickle the resistive device. Problematic? we
# could also just save hidden_pars and weights. However, in any case the
# state_dict will not reflect the model.parameters() any more, which
# might get tricky. In any case, internal hidden weights need to be
# saved to reconstruct a meaningful analog tile
analog_state = self.analog_tile.__getstate__()
current_state = super().state_dict(destination, prefix, keep_vars)
current_state['{}analog_tile_state'.format(prefix)] = analog_state
return current_state
def cpu(self) -> 'AnalogModuleBase':
"""Move all model parameters, buffers and tiles to the CPU.
Note:
Please be aware that moving analog layers from GPU to CPU is
currently not supported.
Returns:
This layer with its parameters, buffers and tiles in CPU.
"""
# pylint: disable=attribute-defined-outside-init
super().cpu()
self.analog_tile = self.analog_tile.cpu() # type: BaseTile
self.set_weights(self.weight, self.bias)
return self
def cuda(
self,
device: Optional[Union[torch_device, str, int]] = None
) -> 'AnalogModuleBase':
"""Move all model parameters, buffers and tiles to the GPU.
This also makes associated parameters and buffers different objects. So
it should be called before constructing optimizer if the module will
live on GPU while being optimized.
Arguments:
device (int, optional): if specified, all parameters will be
copied to that GPU device
Returns:
This layer with its parameters, buffers and tiles in GPU.
"""
# | |
<gh_stars>0
from typing import List
from urllib import response
import discord
from discord import app_commands, TextStyle
from discord.ext import commands, tasks
from discord.ext.commands import Cog
import sys
sys.path.append('../')
from main import config_load, increment_command_counter, ClanBankTransaction, Guild
from datetime import datetime, timedelta, timezone
import re
import copy
from bs4 import BeautifulSoup
import math
from utils import is_int, max_cash, get_coins_image_name, digits
import traceback
import io
import imageio
from skimage import img_as_uint
config = config_load()
yellow = [255, 255, 0, 255]
white = [255, 255, 255, 255]
green = [0, 255, 131, 255]
red = [255, 50, 50, 255]
char_index = {'K': 10, 'M': 11, '-': 12}
def enlarge_digit(digit, factor):
'''
Doubles the size of an image factor times
'''
for f in range(factor-1):
ldigit = []
for row in digit:
lrow = [row[int(i/2)] for i in range(len(row)*2)]
ldigit.append(lrow)
ldigit.append(lrow)
digit = ldigit
return digit
def draw_char(img, char, x, y, c, size):
'''
Draws a character on an image at (x, y)
'''
colour = c
if img.shape[2] == 3 and len(c) > 3:
colour = colour[:3]
elif img.shape[2] == 4 and len(c) < 4:
colour.append(255)
digit = digits[int(char) if is_int(char) else char_index[char]]
pixels = enlarge_digit(digit, size)
x_0 = x
for row in reversed(pixels):
x = x_0
for value in reversed(row):
if value == 1:
img[y, x] = colour
x -= 1
y -= 1
return (x-1, y)
def draw_gp(img, amount):
'''
Draw an amount over an image of RuneScape coins.
'''
colour = green if amount >= 10000000 else white if amount >= 100000 else yellow if amount >= 0 else red
amount = round(amount, -6) if abs(amount) >= 10000000 else round(amount, -3) if abs(amount) >= 100000 else amount
amount_str = str(amount)
if amount >= 10000000 or amount <= -10000000:
amount_str = amount_str[::-1].replace('000000', 'M', 1)[::-1]
elif amount >= 100000 or amount <= -100000:
amount_str = amount_str[::-1].replace('000', 'K', 1)[::-1]
size = 5
for i, char in enumerate(amount_str):
draw_char(img, char, (int(5*(2**size)/2)-1)*(i+1)+i*(2**size), int(8*(2**size)/2)-1, colour, size)
def get_coins_image(amount):
'''
Get an image for the given amount of coins.
'''
# Get base coins image
coins = imageio.imread(f'images/{get_coins_image_name(amount)}.png')
# Draw amount
draw_gp(coins, amount)
imageio.imwrite('images/coins.png', coins)
with open('images/coins.png', 'rb') as f:
coins_image = io.BytesIO(f.read())
coins_image = discord.File(coins_image, filename='coins.png')
return coins_image
async def get_transactions(guild_id: int):
'''
Gets all ClanBankTransaction for a given guild.
'''
return await ClanBankTransaction.query.where(ClanBankTransaction.guild_id == guild_id).gino.all()
async def create_transaction(amount: int, description: str, guild_id: int, member_id: int):
'''
Creates a ClanBankTransaction.
'''
await ClanBankTransaction.create(amount=amount, description=description, guild_id=guild_id, member_id=member_id, time=datetime.utcnow())
class Dropdown(discord.ui.Select):
def __init__(self, options):
# The placeholder is what will be shown when no option is chosen
# The min and max values indicate we can only pick one of the options
# The options parameter defines the dropdown options. We defined this above
super().__init__(placeholder='Choose a role...', min_values=1, max_values=1, options=options, custom_id='bank_role_select')
async def callback(self, interaction: discord.Interaction):
# Use the interaction object to update the guild bank_role_id.
# The self object refers to the Select object,
# and the values attribute gets a list of the user's
# selected options. We only want the first one.
role = interaction.guild.get_role(int(self.values[0]))
guild = await Guild.get(interaction.guild.id)
await guild.update(bank_role_id=role.id).apply()
await interaction.response.send_message(f'The bank management role has been set to `{role.name}`', ephemeral=True)
class SelectRoleView(discord.ui.View):
def __init__(self, bot, guild):
super().__init__()
self.bot = bot
# Get options for role dropdown
options = [discord.SelectOption(label=role.name, value=str(role.id)) for role in sorted(guild.roles, reverse=True)]
if len(options) > 25:
options = options[:25]
# Adds the dropdown to our view object.
self.add_item(Dropdown(options))
class ConfirmView(discord.ui.View):
def __init__(self, bot):
super().__init__(timeout=None)
self.bot = bot
self.value = None
@discord.ui.button(label='Cancel', style=discord.ButtonStyle.danger, custom_id='bank_tx_cancel_button')
async def cancel(self, interaction: discord.Interaction, button: discord.ui.Button):
# Validate permissions
if not interaction.user.id == int(interaction.message.embeds[0].footer.text.replace('User ID: ', '')):
await interaction.response.send_message('Only the creator of a transaction can cancel it', ephemeral=True)
return
# Update message
embed = interaction.message.embeds[0]
embed.set_footer(text=f'Cancelled by {interaction.user.display_name}', icon_url='https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/160/twitter/322/cross-mark_274c.png')
await interaction.message.edit(embed=embed, view=None)
await interaction.response.send_message('Transaction cancelled successfully.', ephemeral=True)
self.value = False
self.stop()
@discord.ui.button(label='Confirm', style=discord.ButtonStyle.success, custom_id='bank_tx_confirm_button')
async def confirm(self, interaction: discord.Interaction, button: discord.ui.Button):
# Validate permissions
if not interaction.user.id == int(interaction.message.embeds[0].footer.text.replace('User ID: ', '')):
await interaction.response.send_message('Only the creator of a transaction can confirm it', ephemeral=True)
return
# Handle confirm
status = await self.confirm_handler(interaction)
if status != 'success':
await interaction.response.send_message(status, ephemeral=True)
return
# Update message
embed = interaction.message.embeds[0]
embed.set_footer(text=f'Confirmed by {interaction.user.display_name}', icon_url='https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/thumbs/160/twitter/322/check-mark-button_2705.png')
await interaction.message.edit(attachments=interaction.message.attachments, embed=embed, view=None)
await interaction.response.send_message('Transaction confirmed successfully.', ephemeral=True)
self.value = True
self.stop()
async def confirm_handler(self, interaction: discord.Interaction) -> str:
'''
Parses data from an addition / subtraction embed and processes the transaction.
'''
user_id = int(interaction.message.embeds[0].footer.text.replace('User ID: ', ''))
member = interaction.guild.get_member(user_id)
if not member:
member = await interaction.guild.fetch_member(user_id)
if not member:
return 'Error: member not found'
# Parse message
amount = int(interaction.message.embeds[0].fields[0].value)
if 'subtraction' in interaction.message.embeds[0].title:
amount = -amount
description = interaction.message.embeds[0].fields[1].value
await create_transaction(amount, description, interaction.guild.id, user_id)
return 'success'
async def on_error(self, interaction: discord.Interaction, error: Exception):
await interaction.response.send_message('Error', ephemeral=True)
print(error)
traceback.print_tb(error.__traceback__)
class AddTransactionModal(discord.ui.Modal, title='Clan bank addition'):
def __init__(self, bot):
super().__init__()
self.bot = bot
amount = discord.ui.TextInput(label='How much do you want to add?', placeholder="0", min_length=1, max_length=10, required=True, style=TextStyle.short)
description = discord.ui.TextInput(label='Description (optional)', placeholder='Describe the reason for your addition here...', max_length=1000, required=False, style=TextStyle.paragraph)
async def on_submit(self, interaction: discord.Interaction):
# Validation
amount = self.amount.value
amount = amount.upper().replace('K', '000').replace('M', '000000').replace('B', '000000000')
if not is_int(amount):
await interaction.response.send_message(f'Error: invalid amount: `{amount}`', ephemeral=True)
return
amount = int(amount)
if amount == 0:
await interaction.response.send_message(f'Error: amount cannot be 0', ephemeral=True)
return
if amount > max_cash or amount < -max_cash:
await interaction.response.send_message(f'Error: amount too great: `{amount}`', ephemeral=True)
return
# Get an image for the given amount of coins
coins_image = get_coins_image(amount)
# Create embed to show data
embed = discord.Embed(title=f'**Clan bank addition**', colour=0x00e400)
embed.add_field(name='Amount', value=str(amount), inline=False)
embed.add_field(name='Description', value=self.description.value if self.description.value else 'N/A', inline=False)
embed.set_author(name=interaction.user.display_name, icon_url=interaction.user.display_avatar.url)
embed.set_footer(text=f'User ID: {interaction.user.id}')
embed.set_thumbnail(url='attachment://coins.png')
# Create a view to confirm / cancel
view = ConfirmView(self.bot)
msg = await interaction.response.send_message(files=[coins_image], embed=embed, view=view)
await view.wait()
async def on_error(self, interaction: discord.Interaction, error: Exception):
await interaction.response.send_message('Error', ephemeral=True)
print(error)
traceback.print_tb(error.__traceback__)
class SubtractTransactionModal(discord.ui.Modal, title='Clan bank subtraction'):
def __init__(self, bot):
super().__init__()
self.bot = bot
amount = discord.ui.TextInput(label='How much do you want to subtract?', placeholder="0", min_length=1, max_length=10, required=True, style=TextStyle.short)
description = discord.ui.TextInput(label='Description (optional)', placeholder='Describe the reason for your subtraction here...', max_length=1000, required=False, style=TextStyle.paragraph)
async def on_submit(self, interaction: discord.Interaction):
# Validation
amount = self.amount.value
amount = amount.upper().replace('K', '000').replace('M', '000000').replace('B', '000000000')
if not is_int(amount):
await interaction.response.send_message(f'Error: invalid amount: `{amount}`', ephemeral=True)
return
amount = int(amount)
if amount == 0:
await interaction.response.send_message(f'Error: amount cannot be 0', ephemeral=True)
return
if amount > max_cash or amount < -max_cash:
await interaction.response.send_message(f'Error: amount too great: `{amount}`', ephemeral=True)
return
# Get an image for the given amount of coins
coins_image = get_coins_image(-amount)
# Create embed to show data
embed = discord.Embed(title=f'**Clan bank subtraction**', colour=0xff0000)
embed.add_field(name='Amount', value=str(amount), inline=False)
embed.add_field(name='Description', value=self.description.value if self.description.value else 'N/A', inline=False)
embed.set_author(name=interaction.user.display_name, icon_url=interaction.user.display_avatar.url)
embed.set_footer(text=f'User ID: {interaction.user.id}')
embed.set_thumbnail(url='attachment://coins.png')
# Create a view to confirm / cancel
view = ConfirmView(self.bot)
msg = await interaction.response.send_message(files=[coins_image], embed=embed, view=view)
await view.wait()
async def on_error(self, interaction: discord.Interaction, error: Exception):
await interaction.response.send_message('Error', ephemeral=True)
print(error)
traceback.print_tb(error.__traceback__)
class ClanBank(commands.Cog):
def __init__(self, bot: commands.AutoShardedBot):
self.bot = bot
def cog_unload(self):
pass
@app_commands.command(name='bank')
async def bank(self, interaction: discord.Interaction, action: str):
'''
Manage the clan bank
'''
if not interaction.user.guild_permissions.administrator and interaction.user.id != config['owner']:
guild = await Guild.get(interaction.guild.id)
bank_role = None
if guild.bank_role_id:
bank_role = interaction.guild.get_role(guild.bank_role_id)
if bank_role is None or not interaction.user.top_role >= bank_role:
await interaction.response.send_message(f'You do not have permission to use this command.', ephemeral=True)
return
# Validation
if not action in ['view', 'add', 'subtract', 'history', 'role']:
await interaction.response.send_message(f'Invalid action: {action}', ephemeral=True)
return
if action == 'add':
await self.add(interaction)
elif action == 'subtract':
await self.subtract(interaction)
elif action == 'history':
await self.history(interaction)
elif action =='role':
await self.set_bank_role(interaction)
else:
await self.view(interaction)
@bank.autocomplete('action')
async def action_autocomplete(
self,
interaction: discord.Interaction,
current: str,
) -> List[app_commands.Choice[str]]:
actions = ['view', 'add', 'subtract', 'history']
admin_actions = ['role']
return [
app_commands.Choice(name=action, value=action)
for action in actions if current.lower() in action.lower()
] + [
app_commands.Choice(name=action, value=action)
for action in admin_actions if current.lower() in action.lower() and
(interaction.user.guild_permissions.administrator or interaction.user.id == config['owner'])
]
async def view(self, interaction: discord.Interaction):
# Get the clan bank transactions
tx = await get_transactions(interaction.guild.id)
amount = sum([t.amount for t in tx])
embed = discord.Embed(title=f'**Clan bank**', description=f'Total amount: `{amount:,}`', colour=0x00b2ff)
embed.set_author(name=interaction.user.display_name, | |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 25 16:29:19 2020
@author: muell018
"""
from flask import Blueprint, session, render_template, url_for, flash, redirect, request, jsonify, json, current_app, send_from_directory
from flask_login import login_required, current_user
from ..io.cts import Cts
import difflib
from jinja2 import Template
from pathlib import Path
from datetime import datetime
from collections import OrderedDict
import csv
from pprint import pprint
compare_bp = Blueprint('compare', __name__)
@compare_bp.route("/compare", methods = ['GET', 'POST'])
@login_required
def compare():
""" Eats two file names, returns a comparison of the two files.
Both files must be csv files containing
<a word>;<doc ID>;<pageNr>;<line ID>;<index of the word>
They may also contain lines with additional HTML code (if the
output format is html):
<h3>Document 1</h3>
"""
if request.method == 'GET':
return "html"
elif request.method == 'POST':
# Get the JSON payload of the request containing the two file names
payload = request.get_json()
if payload['format'] == "html":
# Read input data, i.e. both input files (CSV) from disk:
dumping_path = Path(current_app.config["CACHE_PATH"]) # \Dokumente\Synchronisation\Programmieren\Python\tutorial_flask_wsgi\instance\cache
filename1 = Path(dumping_path, payload['files'][0])
filename2 = Path(dumping_path, payload['files'][1])
o = openfile(filename1)
e = openfile(filename2)
balance_tokens(o, e)
data1 = prepare_for_diff(o)
data2 = prepare_for_diff(e)
# Use difflib to find the differences:
print("ANALYZER: searching for differences (with difflib) ...")
d = difflib.Differ()
delta = d.compare(data1, data2)
delta = [*delta] # convert generator to list
pairs = prepare_output(delta, o,e)
filtered = filter_false_positives(pairs)
html = export_to_html(filtered,
original_document=o[0]['document'],
censored_document=e[0]['document'])
dumping_path = Path(current_app.config["CACHE_PATH"])
timestamp = datetime.now().strftime('%Y-%m-%dT%H-%M-%S')
filename = f"differences,{o[0]['document']}_vs_{e[0]['document']},{timestamp}.html"
savename = Path(dumping_path, filename)
try:
with open(savename, "w", encoding="utf-8") as f:
f.write(html)
except:
pass
return html
elif payload['format'] == "raw_diff":
# Read input data, i.e. both input files (CSV) from disk:
dumping_path = Path(current_app.config["CACHE_PATH"])
filename1 = Path(dumping_path, payload['files'][0])
filename2 = Path(dumping_path, payload['files'][1])
o = openfile(filename1)
e = openfile(filename2)
balance_tokens(o, e)
data1 = prepare_for_diff(o)
data2 = prepare_for_diff(e)
# Use difflib to find the differences:
print("ANALYZER: searching for differences (with difflib) ...")
d = difflib.Differ()
delta = d.compare(data1, data2)
delta = [*delta] # convert generator to list
pairs = prepare_output(delta, o,e)
filtered = filter_false_positives(pairs)
output = serialize_diff_pairs(filtered)
output["original"]["docTitle"] = o[0]['document']
output["censored"]["docTitle"] = e[0]['document']
output["message"] = "Success! Use the censorship inspector to process the output."
print("ANALYZER: Done! Sending JSON to client.")
return jsonify(output)
elif payload['format'] == "TRACER":
""" The TRACER data is already formatted correctly in the TSV files.
The only thing we have to do here is to replace the "XX" place holders
at the beginning of every line with a two digit number representing
the no. of the document. """
dumping_path = Path(current_app.config["CACHE_PATH"])
output = []
docs = []
docnr = 10
for file in payload['files']:
infile = Path(dumping_path, file)
with open(infile, "r", encoding="utf-8") as f:
lines = f.readlines()
for idx, line in enumerate(lines):
output.append(f"{docnr}{line[2:]}")
if idx == 0: # get the document identifier of the first line
docs.append(line.split("\t")[-1].strip())
docnr += 1
timestamp = datetime.now().strftime('%Y-%m-%dT%H-%M-%S')
filename = f"tracer_{','.join([str(x) for x in docs])}_{timestamp}.txt"
savename = Path(dumping_path, filename)
print(f"ANALYZER: Trying to write {savename}")
try:
print("ANALYZER: Sucess!")
with open(savename, "w", encoding="utf-8") as f:
f.writelines(output)
return jsonify(message = f'Success! You can download the exported file under /download/{savename}',
links = [{'href': f'/download/{savename}',
'rel': 'download',
'type': 'GET'}]), 200
except:
print(f"ERROR: Analyzer: Could not write file {savename}")
return jsonify(message = f"ERROR: Analyzer: Could not write file {savename}",
links = [{'href': "error",
'rel': 'download',
'type': 'GET'}]), 500
def openfile(filename,separator=";"):
""" Opens a csv file and returns a dict. """
output = []
fieldnames = ["word","document","coldoc","page","line","wordnr", "type"]
with open(filename, newline='', encoding="utf-8") as csvfile:
data = csv.DictReader(csvfile, fieldnames=fieldnames, delimiter=";", quotechar='"')
for item in data:
output.append(item)
return output
def balance_tokens(t1, t2):
""" When the texts to be compared have different lengths, the rendering of
the differences found in these texts will be difficult. Therefore, we
add dummy lines to the shorter file. Returns two equally long lists of dicts. """
difference = len(t1) - len(t2)
print("ANALYZER: Balancing different lengths:", len(t1), len(t2), len(t1) - len(t2))
if difference < 0:
line = t1[-1]['line']
for element in range(1, (difference * -1) + 1):
t1.append({'word': '',
'document': 'Dummy1977',
'coldoc': '0.0',
'page': '0',
'line': line,
'wordnr': '', # str(offset + element)
'type': 'dummy'})
elif difference > 0:
line = t2[-1]['line']
for element in range(1, difference + 1):
t2.append({'word': '',
'document': 'Dummy1977',
'coldoc': '0.0',
'page': '0',
'line': line,
'wordnr': '', # str(offset + element)
'type': 'dummy'})
print("ANALYZER: Balanced: ", len(t1), len(t2), len(t1) - len(t2))
def prepare_for_diff(data):
""" Eats a list of dicts, extracts the words in lower case and
returns them as a list. (We don't want capital letters to
disturb our comparison.) """
prepared = []
for item in data:
prepared.append(item['word'].lower())
return prepared
def prepare_output(delta, o, e):
""" Eats the delta produced by difflib and converted to a list,
the original (o) and expurgated (e) data, and returns
a list of lists containing the code (" "/"+"/"-"), the word
in the original version, the word in the expurgated version, the type
(word or punctuation) and the precise cts addresses in both versions. """
print("ANALYZER: preparing data for export ...")
newline = []
pairs = []
position_d1 = 0
position_d2 = 0
for d in delta:
if position_d1 + 1 > len(o) or position_d2 + 1 > len(e):
break
code = d[:1]
data = d[2:]
org = o[position_d1]
exp = e[position_d2]
if code == "?" or code == "":
pass
else:
orgcts = Cts().from_string(f"tr:{org['coldoc']}:{org['page']}.{org['line']}@{org['wordnr']}")
expcts = Cts().from_string(f"tr:{exp['coldoc']}:{exp['page']}.{exp['line']}@{exp['wordnr']}")
if code == " ":
pairs.append([" ",
org['word'],
orgcts,
exp['word'],
expcts,
org['type']])
position_d1 += 1
position_d2 += 1
elif code == "+":
pairs.append(["+",
"",
orgcts,
exp['word'],
expcts,
exp['type']])
position_d2 += 1
elif code == "-":
pairs.append(["-",
org['word'],
orgcts,
"",
expcts,
org['type']])
position_d1 += 1
# with open("debug_prepare_output.txt", "w", encoding="utf-8") as f:
# for pair in pairs:
# f.write(str(pair)+"\n")
# print("DEBUG: ANALYZER: debug file written.")
return pairs
def serialize_diff_pairs(pairs):
""" Makes the list of pairs JSON serializable by converting the Cts objects
and adding some metadata. """
output = {"original": {"colId": pairs[0][2].col,
"docId": pairs[0][2].doc},
"censored": {"colId": pairs[0][4].col,
"docId": pairs[0][4].doc}}
pages = {}
words = []
last_page = pairs[0][2].page
last_line = pairs[0][2].rl
for pair in pairs:
this_page = pair[2].page
this_line = pair[2].rl
if this_page != last_page:
pages[last_page] = {'words': words}
words = []
last_page = this_page
words.append([pair[0],
pair[1],
f"{pair[2].rl}@{pair[2].subreference}",
pair[3],
f"{pair[4].page}.{pair[4].rl}@{pair[4].subreference}",
pair[5]])
pages[last_page] = {'words': words}
output['pages'] = pages
return output
def filter_false_positives(pairs):
''' Use a sliding window (three items long) to search for patterns like:
- ne
- dum
+ nedum <<
+ nobisipsis <<
- nobis
- ipsis
- it
+ ita <<
- a
- etiamsi <<
+ etiam
+ si
If there is a match, keep the long word and drop the two shorter ones.'''
print("ANALYZER: filtering false positives ...")
output = []
countdown = 0
for i in range(len(pairs)-2):
if countdown > 0:
countdown -= 1
if countdown == 0:
if pairs[i][0] == "+" or pairs[i][0] == "-":
first = pairs[i][1] + pairs[i][3]
second = pairs[i+1][1] + pairs[i+1][3]
third = pairs[i+2][1] + pairs[i+2][3]
if first.lower() + second.lower() == third.lower():
output.append([' ',
third, pairs[i+2][2],
third, pairs[i+2][4],
pairs[i+2][5]])
countdown = 3
elif first.lower() + third.lower() == second.lower():
output.append([' ',
second, pairs[i+1][2],
second, pairs[i+1][4],
pairs[i+1][5]])
countdown = 3
elif second.lower() + third.lower() == first.lower():
output.append([' ',
first, pairs[i][2],
first, pairs[i][4],
pairs[i][5]])
countdown = 3
else:
output.append(pairs[i])
countdown = 0
else:
output.append(pairs[i])
return output
def add_text(original, censored):
""" This function preserves prossible lower/upper case
differences between the two versions: """
html = ""
template = Template('<span class="{{ classname }}">{{ text }}</span> ')
if original != censored:
if original == "":
html += censored + " "
elif censored == "":
html += original + " "
else:
html += template.render(classname="original", | |
<reponame>Rancerle/Clarkson_Magenta
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class for sampling, encoding, and decoding from trained MusicVAE models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import re
import tarfile
from backports import tempfile
import numpy as np
import tensorflow as tf
class NoExtractedExamplesException(Exception):
pass
class MultipleExtractedExamplesException(Exception):
pass
class TrainedModel(object):
"""An interface to a trained model for encoding, decoding, and sampling.
Args:
config: The Config to build the model graph with.
batch_size: The batch size to build the model graph with.
checkpoint_dir_or_path: The directory containing checkpoints for the model,
the most recent of which will be loaded, or a direct path to a specific
checkpoint.
var_name_substitutions: Optional list of string pairs containing regex
patterns and substitution values for renaming model variables to match
those in the checkpoint. Useful for backwards compatibility.
session_target: Optional execution engine to connect to. Defaults to
in-process.
sample_kwargs: Additional, non-tensor keyword arguments to pass to sample
call.
"""
def __init__(self, config, batch_size, checkpoint_dir_or_path=None,
var_name_substitutions=None, session_target='', **sample_kwargs):
checkpoint_path = (tf.train.latest_checkpoint(checkpoint_dir_or_path)
if tf.gfile.IsDirectory(checkpoint_dir_or_path) else
checkpoint_dir_or_path)
self._config = copy.deepcopy(config)
self._config.hparams.batch_size = batch_size
with tf.Graph().as_default():
model = self._config.model
model.build(
self._config.hparams,
self._config.data_converter.output_depth,
is_training=False)
# Input placeholders
self._temperature = tf.placeholder(tf.float32, shape=())
self._z_input = (
tf.placeholder(tf.float32,
shape=[batch_size, self._config.hparams.z_size])
if self._config.hparams.z_size else None)
self._c_input = (
tf.placeholder(
tf.float32,
shape=[None, self._config.data_converter.control_depth])
if self._config.data_converter.control_depth > 0 else None)
self._inputs = tf.placeholder(
tf.float32,
shape=[batch_size, None, self._config.data_converter.input_depth])
self._controls = tf.placeholder(
tf.float32,
shape=[batch_size, None, self._config.data_converter.control_depth])
self._inputs_length = tf.placeholder(
tf.int32,
shape=[batch_size] + list(self._config.data_converter.length_shape))
self._max_length = tf.placeholder(tf.int32, shape=())
# Outputs
self._outputs, self._decoder_results = model.sample(
batch_size,
max_length=self._max_length,
z=self._z_input,
c_input=self._c_input,
temperature=self._temperature,
**sample_kwargs)
if self._config.hparams.z_size:
q_z = model.encode(self._inputs, self._inputs_length, self._controls)
self._mu = q_z.loc
self._sigma = q_z.scale.diag
self._z = q_z.sample()
var_map = None
if var_name_substitutions is not None:
var_map = {}
for v in tf.global_variables():
var_name = v.name[:-2] # Strip ':0' suffix.
for pattern, substitution in var_name_substitutions:
var_name = re.sub(pattern, substitution, var_name)
if var_name != v.name[:-2]:
tf.logging.info('Renaming `%s` to `%s`.', v.name[:-2], var_name)
var_map[var_name] = v
# Restore graph
self._sess = tf.Session(target=session_target)
saver = tf.train.Saver(var_map)
if (os.path.exists(checkpoint_path) and
tarfile.is_tarfile(checkpoint_path)):
tf.logging.info('Unbundling checkpoint.')
with tempfile.TemporaryDirectory() as temp_dir:
tar = tarfile.open(checkpoint_path)
tar.extractall(temp_dir)
# Assume only a single checkpoint is in the directory.
for name in tar.getnames():
if name.endswith('.ckpt.index'):
checkpoint_path = os.path.join(temp_dir, name[0:-6])
break
saver.restore(self._sess, checkpoint_path)
else:
saver.restore(self._sess, checkpoint_path)
def sample(self, n=None, length=None, temperature=1.0, same_z=False,
c_input=None):
"""Generates random samples from the model.
Args:
n: The number of samples to return. A full batch will be returned if not
specified.
length: The maximum length of a sample in decoder iterations. Required
if end tokens are not being used.
temperature: The softmax temperature to use (if applicable).
same_z: Whether to use the same latent vector for all samples in the
batch (if applicable).
c_input: A sequence of control inputs to use for all samples (if
applicable).
Returns:
A list of samples as NoteSequence objects.
Raises:
ValueError: If `length` is not specified and an end token is not being
used.
"""
batch_size = self._config.hparams.batch_size
n = n or batch_size
z_size = self._config.hparams.z_size
if not length and self._config.data_converter.end_token is None:
raise ValueError(
'A length must be specified when the end token is not used.')
length = length or tf.int32.max
feed_dict = {
self._temperature: temperature,
self._max_length: length
}
if self._z_input is not None and same_z:
z = np.random.randn(z_size).astype(np.float32)
z = np.tile(z, (batch_size, 1))
feed_dict[self._z_input] = z
if self._c_input is not None:
feed_dict[self._c_input] = c_input
outputs = []
for _ in range(int(np.ceil(n / batch_size))):
if self._z_input is not None and not same_z:
feed_dict[self._z_input] = (
np.random.randn(batch_size, z_size).astype(np.float32))
outputs.append(self._sess.run(self._outputs, feed_dict))
samples = np.vstack(outputs)[:n]
if self._c_input is not None:
return self._config.data_converter.to_items(
samples, np.tile(np.expand_dims(c_input, 0), [batch_size, 1, 1]))
else:
return self._config.data_converter.to_items(samples)
def encode(self, note_sequences, assert_same_length=False):
"""Encodes a collection of NoteSequences into latent vectors.
Args:
note_sequences: A collection of NoteSequence objects to encode.
assert_same_length: Whether to raise an AssertionError if all of the
extracted sequences are not the same length.
Returns:
The encoded `z`, `mu`, and `sigma` values.
Raises:
RuntimeError: If called for a non-conditional model.
NoExtractedExamplesException: If no examples were extracted.
MultipleExtractedExamplesException: If multiple examples were extracted.
AssertionError: If `assert_same_length` is True and any extracted
sequences differ in length.
"""
if not self._config.hparams.z_size:
raise RuntimeError('Cannot encode with a non-conditional model.')
inputs = []
controls = []
lengths = []
for note_sequence in note_sequences:
extracted_tensors = self._config.data_converter.to_tensors(note_sequence)
if not extracted_tensors.inputs:
raise NoExtractedExamplesException(
'No examples extracted from NoteSequence: %s' % note_sequence)
if len(extracted_tensors.inputs) > 1:
raise MultipleExtractedExamplesException(
'Multiple (%d) examples extracted from NoteSequence: %s' %
(len(extracted_tensors.inputs), note_sequence))
inputs.append(extracted_tensors.inputs[0])
controls.append(extracted_tensors.controls[0])
lengths.append(extracted_tensors.lengths[0])
if assert_same_length and len(inputs[0]) != len(inputs[-1]):
raise AssertionError(
'Sequences 0 and %d have different lengths: %d vs %d' %
(len(inputs) - 1, len(inputs[0]), len(inputs[-1])))
return self.encode_tensors(inputs, lengths, controls)
def encode_tensors(self, input_tensors, lengths, control_tensors=None):
"""Encodes a collection of input tensors into latent vectors.
Args:
input_tensors: Collection of input tensors to encode.
lengths: Collection of lengths of input tensors.
control_tensors: Collection of control tensors to encode.
Returns:
The encoded `z`, `mu`, and `sigma` values.
Raises:
RuntimeError: If called for a non-conditional model.
"""
if not self._config.hparams.z_size:
raise RuntimeError('Cannot encode with a non-conditional model.')
n = len(input_tensors)
input_depth = self._config.data_converter.input_depth
batch_size = self._config.hparams.batch_size
batch_pad_amt = -n % batch_size
if batch_pad_amt > 0:
input_tensors += [np.zeros([0, input_depth])] * batch_pad_amt
length_array = np.array(lengths, np.int32)
length_array = np.pad(
length_array,
[(0, batch_pad_amt)] + [(0, 0)] * (length_array.ndim - 1),
'constant')
max_length = max([len(t) for t in input_tensors])
inputs_array = np.zeros(
[len(input_tensors), max_length, input_depth])
for i, t in enumerate(input_tensors):
inputs_array[i, :len(t)] = t
control_depth = self._config.data_converter.control_depth
controls_array = np.zeros(
[len(input_tensors), max_length, control_depth])
if control_tensors is not None:
control_tensors += [np.zeros([0, control_depth])] * batch_pad_amt
for i, t in enumerate(control_tensors):
controls_array[i, :len(t)] = t
outputs = []
for i in range(len(inputs_array) // batch_size):
batch_begin = i * batch_size
batch_end = (i+1) * batch_size
feed_dict = {self._inputs: inputs_array[batch_begin:batch_end],
self._controls: controls_array[batch_begin:batch_end],
self._inputs_length: length_array[batch_begin:batch_end]}
outputs.append(
self._sess.run([self._z, self._mu, self._sigma], feed_dict))
assert outputs
return tuple(np.vstack(v)[:n] for v in zip(*outputs))
def decode(self, z, length=None, temperature=1.0, c_input=None):
"""Decodes a collection of latent vectors into NoteSequences.
Args:
z: A collection of latent vectors to decode.
length: The maximum length of a sample in decoder iterations. Required
if end tokens are not being used.
temperature: The softmax temperature to use (if applicable).
c_input: Control sequence (if applicable).
Returns:
A list of decodings as NoteSequence objects.
Raises:
RuntimeError: If called for a non-conditional model.
ValueError: If `length` is not specified and an end token is not being
used.
"""
tensors = self.decode_to_tensors(z, length, temperature, c_input)
if self._c_input is not None:
return self._config.data_converter.to_items(
tensors, np.tile(np.expand_dims(c_input, 0),
[self._config.hparams.batch_size, 1, 1]))
else:
return self._config.data_converter.to_items(tensors)
def decode_to_tensors(self, z, length=None, temperature=1.0, c_input=None,
return_full_results=False):
"""Decodes a collection of latent vectors into output tensors.
Args:
z: A collection of latent vectors to decode.
length: The maximum length of a sample in decoder iterations. Required
if end tokens are not being used.
temperature: The softmax temperature to use (if applicable).
c_input: Control sequence (if applicable).
return_full_results: If true will return the full decoder_results,
otherwise it will return only the samples.
Returns:
If return_full_results is True, will return the full decoder_results list,
otherwise it will return the samples from the decoder as a 2D numpy array.
Raises:
RuntimeError: If called for a non-conditional model.
ValueError: If `length` is not specified and an end token is not being
used.
"""
if not self._config.hparams.z_size:
raise RuntimeError('Cannot decode with a non-conditional model.')
if not length and self._config.data_converter.end_token is None:
raise ValueError(
'A length must be specified when the end token is not used.')
batch_size = self._config.hparams.batch_size
n = len(z)
length | |
<reponame>Princeton-CDH/ppa-django<filename>ppa/archive/forms.py
import json
from django import forms
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
from django.db.models import Max, Min
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from ppa.archive.models import NO_COLLECTION_LABEL, Collection, DigitizedWork
class SelectDisabledMixin(object):
"""
Mixin for :class:`django.forms.RadioSelect` or :class:`django.forms.CheckboxSelect`
classes to set an option as disabled. To disable, the widget's choice
label option should be passed in as a dictionary with `disabled` set
to True::
{'label': 'option', 'disabled': True}.
"""
# Using a solution at https://djangosnippets.org/snippets/2453/
def create_option(
self, name, value, label, selected, index, subindex=None, attrs=None
):
disabled = None
if isinstance(label, dict):
label, disabled = label["label"], label.get("disabled", False)
option_dict = super().create_option(
name, value, label, selected, index, subindex=subindex, attrs=attrs
)
if disabled:
option_dict["attrs"].update({"disabled": "disabled"})
return option_dict
class RadioSelectWithDisabled(SelectDisabledMixin, forms.RadioSelect):
"""
Subclass of :class:`django.forms.RadioSelect` with option to mark
a choice as disabled.
"""
class SelectWithDisabled(SelectDisabledMixin, forms.Select):
"""
Subclass of :class:`django.forms.Select` with option to mark
a choice as disabled.
"""
class CheckboxSelectMultipleWithDisabled(
SelectDisabledMixin, forms.CheckboxSelectMultiple
):
"""
Subclass of :class:`django.forms.CheckboxSelectMultiple` with option to mark
a choice as disabled.
"""
class FacetChoiceField(forms.MultipleChoiceField):
"""Add CheckboxSelectMultiple field with facets taken from solr query"""
# Borrowed from https://github.com/Princeton-CDH/derrida-django/blob/develop/derrida/books/forms.py
# customize multiple choice field for use with facets.
# no other adaptations needed
# - turn of choice validation (shouldn't fail if facets don't get loaded)
# - default to not required
# - use checkbox select multiple as default widget
widget = forms.CheckboxSelectMultiple
def __init__(self, *args, **kwargs):
if "required" not in kwargs:
kwargs["required"] = False
super().__init__(*args, **kwargs)
def valid_value(self, value):
return True
# RangeWidget and RangeField also borrowed from Derrida codebase
class RangeWidget(forms.MultiWidget):
"""date range widget, for two numeric inputs"""
#: separator string when splitting out values in decompress
sep = "-"
#: template to use to render range multiwidget
# (based on multiwidget, but adds "to" between dates)
template_name = "archive/widgets/rangewidget.html"
def __init__(self, *args, **kwargs):
widgets = [
forms.NumberInput(attrs={"aria-label": "Start"}),
forms.NumberInput(attrs={"aria-label": "End"}),
]
super().__init__(widgets, *args, **kwargs)
def decompress(self, value):
if value:
return [int(val) for val in value.split(self.sep)]
return [None, None]
class RangeField(forms.MultiValueField):
widget = RangeWidget
def __init__(self, *args, **kwargs):
fields = (
forms.IntegerField(
error_messages={"invalid": "Enter a number"},
validators=[
RegexValidator(r"^[0-9]*$", "Enter a valid number."),
],
required=False,
),
forms.IntegerField(
error_messages={"invalid": "Enter a number"},
validators=[
RegexValidator(r"^[0-9]*$", "Enter a valid number."),
],
required=False,
),
)
kwargs["fields"] = fields
super().__init__(require_all_fields=False, *args, **kwargs)
def compress(self, data_list):
# if both values are set and the first is greater than the second,
# raise a validation error
if all(data_list) and len(data_list) == 2 and data_list[0] > data_list[1]:
raise ValidationError(
"Invalid range (%s - %s)" % (data_list[0], data_list[1])
)
return self.widget.sep.join(["%d" % val if val else "" for val in data_list])
class ModelMultipleChoiceFieldWithEmpty(forms.ModelMultipleChoiceField):
"""Extend :class:`django.forms.ModelMultipleChoiceField` to add an
option for an unset or empty choice (i.e. no relationship in a
many-to-many relationship such as collection membership).
"""
EMPTY_VALUE = NO_COLLECTION_LABEL
EMPTY_ID = 0
def clean(self, value):
"""Extend clean to use default validation on all values but
the empty id."""
try:
pk_values = [val for val in value if val and int(val) != self.EMPTY_ID]
except ValueError:
# non-integer will raise value error
raise ValidationError("Invalid collection")
qs = super()._check_values(pk_values)
if self.EMPTY_ID in value or str(self.EMPTY_ID) in value:
return [self.EMPTY_VALUE] + list(qs)
return qs
class SearchForm(forms.Form):
"""Simple search form for digitized works."""
SORT_CHOICES = [
("relevance", "Relevance"),
("pub_date_asc", "Year Oldest-Newest"),
("pub_date_desc", "Year Newest-Oldest"),
("title_asc", "Title A-Z"),
("title_desc", "Title Z-A"),
("author_asc", "Author A-Z"),
("author_desc", "Author Z-A"),
]
#: help text to be shown with the form
#: (appears when you hover over the question mark icon)
QUESTION_POPUP_TEXT = """
Boolean search within a field is supported. Operators must be capitalized (AND, OR).
"""
# text inputs
query = forms.CharField(
label="Keyword or Phrase",
required=False,
widget=forms.TextInput(
attrs={
"placeholder": "Search full text and metadata",
"_icon": "search",
"_align": "left",
}
),
)
title = forms.CharField(
label="Title",
required=False,
widget=forms.TextInput(
attrs={
"placeholder": "Search the archive by book title",
"_icon": "search",
"_align": "left",
}
),
)
author = forms.CharField(
label="Author",
required=False,
widget=forms.TextInput(
attrs={
"placeholder": "Search the archive by author",
"_icon": "search",
"_align": "left",
}
),
)
# facets
# collections = FacetChoiceField(label='Collection')
# NOTE: using model choice field to list all collections in the database,
# even if they have no assocaited works in Solr
collections = ModelMultipleChoiceFieldWithEmpty(
queryset=Collection.objects.order_by("name"),
label="Collection",
widget=CheckboxSelectMultipleWithDisabled,
required=False,
)
pub_date = RangeField(
label="Publication Date",
required=False,
widget=RangeWidget(
attrs={"size": 4, "title": "publication date", "_inline": True}
),
)
sort = forms.ChoiceField(
widget=SelectWithDisabled, choices=SORT_CHOICES, required=False
)
# booleans
earliest_only = forms.BooleanField(
label="Earliest Edition in Hathi",
required=False,
widget=forms.CheckboxInput(attrs={"disabled": True}),
)
ace_only = forms.BooleanField(
label="Authorized Critical Edition",
required=False,
widget=forms.CheckboxInput(attrs={"disabled": True}),
)
dict_exclude = forms.BooleanField(
label="Dictionaries",
required=False,
widget=forms.CheckboxInput(attrs={"disabled": True}),
)
pg_exclude = forms.BooleanField(
label="Pronunciation Guides",
required=False,
widget=forms.CheckboxInput(attrs={"disabled": True}),
)
# fields to request a facet from solr
facet_fields = ["collections_exact"]
range_facets = ["pub_date"]
# mapping of solr fields to form input
solr_facet_fields = {"collections_exact": "collections"}
@staticmethod
def defaults():
"""Default values when initializing the form. Sort by title,
pre-select collections based exclude property."""
return {
"sort": "title_asc",
# always include uncategorized collections; no harm if not present
"collections": [ModelMultipleChoiceFieldWithEmpty.EMPTY_ID]
+ list(
Collection.objects.filter(exclude=False).values_list("id", flat=True)
),
}
def __init__(self, data=None, *args, **kwargs):
"""
Set choices dynamically based on form kwargs and presence of keywords.
"""
super().__init__(data=data, *args, **kwargs)
pubdate_range = self.pub_date_minmax()
self.pubdate_validation_msg = (
"Enter sequential years between {} and {}.".format(
pubdate_range[0], pubdate_range[1]
)
)
# because pubdate is a multifield/multiwidget, access the widgets
# under the multiwidgets
pubdate_widgets = self.fields["pub_date"].widget.widgets
for idx, val in enumerate(pubdate_range):
# don't set None as placeholder (only possible if db is empty)
if val:
# set max/min and initial values
pubdate_widgets[idx].attrs.update(
{
"placeholder": pubdate_range[idx],
"min": pubdate_range[0],
"max": pubdate_range[1],
}
)
# relevance is disabled unless we have a keyword query present
if not data or not self.has_keyword_query(data):
self.fields["sort"].widget.choices[0] = (
"relevance",
{"label": "Relevance", "disabled": True},
)
def has_keyword_query(self, data):
"""check if any of the keyword search fields have search terms"""
return any(
data.get(query_field, None) for query_field in ["query", "title", "author"]
)
def get_solr_sort_field(self, sort=None):
"""
Set solr sort fields for the query based on sort and query strings.
If sort field is not specified, will use sort in the the cleaned
data in the current form. If sort is not specified and valid
form data is not available, will raise an :class:`AttributeError`.
:return: solr sort field
"""
solr_mapping = {
"relevance": "-score",
"pub_date_asc": "pub_date",
"pub_date_desc": "-pub_date",
"title_asc": "sort_title",
"title_desc": "-sort_title",
"author_asc": "author_exact",
"author_desc": "-author_exact",
}
# if not specified, use sort value from current form data
if sort is None:
sort = self.cleaned_data.get("sort")
# return solr field for requested sort option
return solr_mapping.get(sort, None)
def set_choices_from_facets(self, facets):
"""Set choices on field from a dictionary of facets"""
# Also borrowed from Derrida module referenced for FacetChoiceField
# Uses mapping of solr_facet_fields and facet_fields in class
# definition but does not yet import full functionality of
# derrida-django's ReferenceSearchForm
# The primary adaptation involves use of a dictionary of dictionaries
# for facets in SolrClient vs. the functionality of
# django-haystack/pysolr.
for key, facet_dict in facets.items():
formfield = self.solr_facet_fields.get(key, key)
# special case: collections is no longer a facet choice field,
# but options should be disabled if not present at all
# (i.e. no works are associated with that collection in Solr)
if formfield == "collections":
new_choice = []
for choice in self.fields[formfield].widget.choices:
# widget choice is tuple of id, name; check for name in facets
if choice[1] not in facet_dict.keys():
new_choice.append(
(choice[0], {"label": choice[1], "disabled": True})
)
else:
new_choice.append(choice)
# if there are items not in a collection, add an option
# so they will be findable
if NO_COLLECTION_LABEL in facet_dict:
new_choice.append(
(
ModelMultipleChoiceFieldWithEmpty.EMPTY_ID,
{"label": NO_COLLECTION_LABEL},
)
)
# replace choices with new version
self.fields[formfield].widget.choices = new_choice
# normal facet field behavior: populate choices from facet
# disabling for now, not currently in use
# elif formfield in self.fields:
# self.fields[formfield].choices = [
# (val, mark_safe('%s <span>%d</span>' % (val, count)))
# for val, count in facet_dict.items()]
PUBDATE_CACHE_KEY = "digitizedwork_pubdate_maxmin"
def pub_date_minmax(self):
"""Get minimum and maximum values for
:class:`~ppa.archive.models.DigitizedWork` publication | |
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2010 <NAME> (<EMAIL>)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Samples visible locations of a target object and a sensor.
.. image:: ../../images/databases/visibilitymodel.jpg
:width: 640
`[source] <../_modules/openravepy/databases/visibilitymodel.html>`_
**Running the Generator**
.. code-block:: bash
openrave.py --database visibilitymodel --robot=robots/pa10schunk.robot.xml
**Showing Visible Locations**
.. code-block:: bash
openrave.py --database visibilitymodel --robot=robots/pa10schunk.robot.xml --show
Usage
-----
Dynamically generate/load the visibility sampler for a manipulator/sensor/target combination:
.. code-block:: python
robot.SetActiveManipulator(...)
ikmodel = openravepy.databases.visibilitymodel.VisibilityModel(robot,target,sensorname)
if not vmodel.load():
vmodel.autogenerate()
Description
-----------
As long as a sensor is attached to a robot arm, can be applied to any robot to get immediate visibiliy configuration sampling:
.. image:: ../../images/databases/visibilitymodel_extents.jpg
:height: 250
The visibility database generator uses the :ref:`module-visualfeedback` for the underlying visibility
computation. The higher level functions it provides are sampling configurations, computing all valid
configurations with the manipulator, and display.
Command-line
------------
.. shell-block:: openrave.py --database visibilitymodel --help
Class Definitions
-----------------
"""
from __future__ import with_statement # for python 2.5
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2009-2010 <NAME> (<EMAIL>)'
__license__ = 'Apache License, Version 2.0'
import time
import os.path
if not __openravepy_build_doc__:
from ..openravepy_int import *
from ..openravepy_ext import *
from numpy import *
else:
from numpy import array
from . import DatabaseGenerator
import inversekinematics, kinematicreachability
from .. import interfaces
import logging
log = logging.getLogger('openravepy.'+__name__.split('.',2)[-1])
class VisibilityModel(DatabaseGenerator):
class GripperVisibility:
"""Used to hide links not beloning to gripper.
When 'entered' will hide all the non-gripper links in order to facilitate visiblity of the gripper
"""
def __init__(self,manip):
self.manip = manip
self.robot = self.manip.GetRobot()
self.hiddengeoms = []
def __enter__(self):
self.hiddengeoms = []
with self.robot.GetEnv():
# stop rendering the non-gripper links
childlinkids = [link.GetIndex() for link in self.manip.GetChildLinks()]
for link in self.robot.GetLinks():
if link.GetIndex() not in childlinkids:
for geom in link.GetGeometries():
self.hiddengeoms.append((geom,geom.IsDraw()))
geom.SetDraw(False)
def __exit__(self,type,value,traceback):
with self.robot.GetEnv():
for geom,isdraw in self.hiddengeoms:
geom.SetDraw(isdraw)
def __init__(self,robot,target,sensorrobot=None,sensorname=None,maxvelmult=None, ignoresensorcollision=None):
"""Starts a visibility model using a robot, a sensor, and a target
The minimum needed to be specified is the robot and a sensorname. Supports sensors that do
not belong to the current robot in the case that a robot is holding the target with its
manipulator. Providing the target allows visibility information to be computed.
"""
DatabaseGenerator.__init__(self,robot=robot)
self.sensorrobot = sensorrobot if sensorrobot is not None else robot
self.target = target
self.visualprob = interfaces.VisualFeedback(self.robot,maxvelmult=maxvelmult,ignoresensorcollision=ignoresensorcollision)
self.basemanip = interfaces.BaseManipulation(self.robot,maxvelmult=maxvelmult)
self.convexhull = None
self.sensorname = sensorname
if self.sensorname is None:
possiblesensors = [s.GetName() for s in self.sensorrobot.GetAttachedSensors() if s.GetSensor() is not None and s.GetSensor().Supports(Sensor.Type.Camera)]
if len(possiblesensors) > 0:
self.sensorname = possiblesensors[0]
self.manip = robot.GetActiveManipulator()
self.manipname = None if self.manip is None else self.manip.GetName()
self.visibilitytransforms = None
self.rmodel = self.ikmodel = None
self.preshapes = None
self.preprocess()
def clone(self,envother):
clone = DatabaseGenerator.clone(self,envother)
clone.rmodel = self.rmodel.clone(envother) if not self.rmodel is None else None
clone.preshapes = array(self.preshapes) if not self.preshapes is None else None
clone.ikmodel = self.ikmodel.clone(envother) if not self.ikmodel is None else None
clone.visualprob = self.visualprob.clone(envother)
clone.basemanip = self.basemanip.clone(envother)
clone.preprocess()
return clone
def has(self):
return self.visibilitytransforms is not None and len(self.visibilitytransforms) > 0
def getversion(self):
return 2
def getfilename(self,read=False):
return RaveFindDatabaseFile(os.path.join('robot.'+self.robot.GetKinematicsGeometryHash(), 'visibility.' + self.manip.GetStructureHash() + '.' + self.attachedsensor.GetStructureHash() + '.' + self.target.GetKinematicsGeometryHash()+'.pp'),read)
def load(self):
try:
params = DatabaseGenerator.load(self)
if params is None:
return False
self.visibilitytransforms,self.convexhull,self.KK,self.dims,self.preshapes = params
self.preprocess()
return self.has()
except e:
return False
def save(self):
DatabaseGenerator.save(self,(self.visibilitytransforms,self.convexhull,self.KK,self.dims,self.preshapes))
def preprocess(self):
with self.env:
manipname = self.visualprob.SetCameraAndTarget(sensorname=self.sensorname,sensorrobot=self.sensorrobot,manipname=self.manipname,target=self.target)
assert(self.manipname is None or self.manipname==manipname)
self.manip = self.robot.SetActiveManipulator(manipname)
self.attachedsensor = [s for s in self.sensorrobot.GetAttachedSensors() if s.GetName() == self.sensorname][0]
self.ikmodel = inversekinematics.InverseKinematicsModel(robot=self.robot,iktype=IkParameterization.Type.Transform6D)
if not self.ikmodel.load():
self.ikmodel.autogenerate()
if self.visibilitytransforms is not None:
self.visualprob.SetCameraTransforms(transforms=self.visibilitytransforms)
def autogenerate(self,options=None,gmodel=None):
preshapes = None
sphere =None
conedirangles = None
if options is not None:
if options.preshapes is not None:
preshapes = zeros((0,len(self.manip.GetGripperIndices())))
for preshape in options.preshapes:
preshapes = r_[preshapes,[array([float(s) for s in preshape.split()])]]
if options.sphere is not None:
sphere = [float(s) for s in options.sphere.split()]
if options.conedirangles is not None:
conedirangles = []
for conediranglestring in options.conedirangles:
conedirangles.append([float(s) for s in conediranglestring.split()])
if not gmodel is None:
preshapes = array([gmodel.grasps[0][gmodel.graspindices['igrasppreshape']]])
if len(self.manip.GetGripperIndices()) > 0:
if preshapes is None:
with self.target:
self.target.Enable(False)
taskmanip = interfaces.TaskManipulation(self.robot)
final,traj = taskmanip.ReleaseFingers(execute=False,outputfinal=True)
preshapes = array([final])
else:
preshapes = array(())
self.generate(preshapes=preshapes,sphere=sphere,conedirangles=conedirangles)
self.save()
def generate(self,preshapes,sphere=None,conedirangles=None,localtransforms=None):
self.preshapes=preshapes
self.preprocess()
self.sensorname = self.attachedsensor.GetName()
self.manipname = self.manip.GetName()
bodies = [(b,b.IsEnabled()) for b in self.env.GetBodies() if b != self.robot and b != self.target]
for b in bodies:
b[0].Enable(False)
try:
with self.env:
sensor = self.attachedsensor.GetSensor()
if sensor is not None: # set power to 0?
sensorgeom = sensor.GetSensorGeometry(Sensor.Type.Camera)
sensordata = sensor.GetSensorData(Sensor.Type.Camera)
self.KK = sensorgeom.KK.K
self.dims = sensordata.imagedata.shape
with RobotStateSaver(self.robot):
# find better way of handling multiple grasps
if len(self.preshapes) > 0:
self.robot.SetDOFValues(self.preshapes[0],self.manip.GetGripperIndices())
extentsfile = os.path.join(RaveGetHomeDirectory(),'kinbody.'+self.target.GetKinematicsGeometryHash(),'visibility.txt')
if sphere is None and os.path.isfile(extentsfile):
self.visibilitytransforms = self.visualprob.ProcessVisibilityExtents(extents=loadtxt(extentsfile,float),conedirangles=conedirangles)
elif localtransforms is not None:
self.visibilitytransforms = self.visualprob.ProcessVisibilityExtents(transforms=localtransforms)
else:
if sphere is None:
sphere = [3,0.1,0.15,0.2,0.25,0.3]
self.visibilitytransforms = self.visualprob.ProcessVisibilityExtents(sphere=sphere,conedirangles=conedirangles)
print 'total transforms: ',len(self.visibilitytransforms)
self.visualprob.SetCameraTransforms(transforms=self.visibilitytransforms)
finally:
for b,enable in bodies:
b.Enable(enable)
def SetCameraTransforms(self,transforms):
"""Sets the camera transforms to the visual feedback problem"""
self.visualprob.SetCameraTransforms(transforms=transforms)
def showtransforms(self,options=None):
if self.robot != self.sensorrobot:
pts = poseMultArrayT(self.sensorrobot.GetTransformPose(), InvertPoses(self.visibilitytransforms))[:,4:7]
else:
pts = poseMultArrayT(self.target.GetTransformPose(), self.visibilitytransforms)[:,4:7]
h=self.env.plot3(pts,5,colors=array([0.5,0.5,1,0.2]))
try:
with RobotStateSaver(self.robot):
# disable all non-child links
for link in self.robot.GetLinks():
link.Enable(link in self.manip.GetChildLinks())
with self.GripperVisibility(self.manip):
for i,pose in enumerate(self.visibilitytransforms):
with self.env:
if len(self.preshapes) > 0:
self.robot.SetDOFValues(self.preshapes[0],self.manip.GetGripperIndices())
if self.robot != self.sensorrobot:
# sensor is not attached to robot
# robot should be grabbing the targt
assert(self.robot.IsGrabbing(self.target) is not None)
relativepose = poseMult(poseMult(self.attachedsensor.GetTransformPose(),InvertPose(pose)), InvertPose(self.target.GetTransformPose()))
for link in self.manip.GetChildLinks():
link.SetTransform(poseMult(relativepose, link.GetTransformPose()))
else:
# robot should not be grabbing the targt
assert(self.robot.IsGrabbing(self.target) is None)
relativepose = poseMult(InvertPose(self.attachedsensor.GetTransformPose()),self.manip.GetTransformPose())
globalCameraPose = poseMult(self.target.GetTransformPose(), pose)
grasppose = poseMult(globalCameraPose,relativepose)
deltapose = poseMult(grasppose,InvertPose(self.manip.GetTransformPose()))
for link in self.manip.GetChildLinks():
link.SetTransform(poseMult(deltapose,link.GetTransformPose()))
visibility = self.visualprob.ComputeVisibility()
self.env.UpdatePublishedBodies()
msg='%d/%d visibility=%d, press any key to continue: '%(i,len(self.visibilitytransforms),visibility)
if options is not None and options.showimage:
pilutil=__import__('scipy.misc',fromlist=['pilutil'])
I=self.getCameraImage()
print(msg)
pilutil.imshow(I)
else:
raw_input(msg)
finally:
# have to destroy the plot handle
h = None
def ShowTransform(self, relativepose, options=None):
"""moves the robot links temporarily to show a transform
"""
if self.robot != self.sensorrobot:
pts = poseMult(self.sensorrobot.GetTransformPose(), InvertPose(relativepose))[4:7]
else:
pts = poseMult(self.target.GetTransformPose(), relativepose)[4:7]
h=self.env.plot3(pts,5,colors=array([0.5,0.5,1,0.2]))
try:
with RobotStateSaver(self.robot):
# disable all non-child links
for link in self.robot.GetLinks():
link.Enable(link in self.manip.GetChildLinks())
with self.GripperVisibility(self.manip):
with self.env:
if len(self.preshapes) > 0:
self.robot.SetDOFValues(self.preshapes[0],self.manip.GetGripperIndices())
if self.robot != self.sensorrobot:
# sensor is not attached to robot
# robot should be grabbing the targt
assert(self.robot.IsGrabbing(self.target) is not None)
linkrelativepose = poseMult(poseMult(self.attachedsensor.GetTransformPose(),InvertPose(relativepose)), InvertPose(self.target.GetTransformPose()))
for link in self.manip.GetChildLinks():
link.SetTransform(poseMult(linkrelativepose, link.GetTransformPose()))
else:
# robot should not be grabbing the targt
assert(self.robot.IsGrabbing(self.target) is None)
linkrelativepose = poseMult(InvertPose(self.attachedsensor.GetTransformPose()),self.manip.GetTransformPose())
globalCameraPose = poseMult(self.target.GetTransformPose(), relativepose)
grasppose = poseMult(globalCameraPose, linkrelativepose)
deltapose = poseMult(grasppose,InvertPose(self.manip.GetTransformPose()))
for link in self.manip.GetChildLinks():
link.SetTransform(poseMult(deltapose,link.GetTransformPose()))
visibility = self.visualprob.ComputeVisibility()
self.env.UpdatePublishedBodies()
msg='visibility=%d, press any key to continue: '%(visibility)
if options is not None and options.showimage:
pilutil=__import__('scipy.misc',fromlist=['pilutil'])
I=self.getCameraImage()
print(msg)
pilutil.imshow(I)
else:
raw_input(msg)
finally:
# have to destroy the plot handle
h = None
def show(self,options=None):
if self.env.GetViewer() is None:
self.env.SetViewer('qtcoin')
time.sleep(0.4) # give time for viewer to initialize
self.attachedsensor.GetSensor().Configure(Sensor.ConfigureCommand.PowerOn)
self.attachedsensor.GetSensor().Configure(Sensor.ConfigureCommand.RenderDataOn)
return self.showtransforms(options)
def moveToPreshape(self):
"""uses a planner to safely move the hand to the preshape and returns the trajectory"""
if len(self.preshapes) > 0:
preshape=self.preshapes[0]
with self.robot:
self.robot.SetActiveDOFs(self.manip.GetArmIndices())
self.basemanip.MoveUnsyncJoints(jointvalues=preshape,jointinds=self.manip.GetGripperIndices())
while not self.robot.GetController().IsDone(): # busy wait
time.sleep(0.01)
with self.robot:
self.robot.SetActiveDOFs(self.manip.GetGripperIndices())
self.basemanip.MoveActiveJoints(goal=preshape)
while not self.robot.GetController().IsDone(): # busy wait
time.sleep(0.01)
def computeValidTransform(self,returnall=False,checkcollision=True,computevisibility=True,randomize=False):
with self.robot:
if self.manip.CheckIndependentCollision():
raise planning_error('robot independent links are initiallly in collision')
validjoints = []
if randomize:
order = random.permutation(len(self.visibilitytransforms))
else:
order = xrange(len(self.visibilitytransforms))
for i in order:
pose = self.visibilitytransforms[i]
Trelative = dot(linalg.inv(self.attachedsensor.GetTransform()),self.manip.GetEndEffectorTransform())
Tcamera = dot(self.target.GetTransform(),matrixFromPose(pose))
Tgrasp = dot(Tcamera,Trelative)
s = self.manip.FindIKSolution(Tgrasp,checkcollision)
if s is not None:
self.robot.SetDOFValues(s,self.manip.GetArmIndices())
if computevisibility and not self.visualprob.ComputeVisibility():
continue
validjoints.append((s,i))
if not returnall:
return validjoints
print 'found',len(validjoints)
return validjoints
def pruneTransformations(self,thresh=0.04,numminneighs=10,maxdist=None,translationonly=True):
if self.rmodel is None:
self.rmodel = kinematicreachability.ReachabilityModel(robot=self.robot)
if not self.rmodel.load():
# do not autogenerate since that would force this model to depend on | |
<gh_stars>0
import pytest
from labgrid import Environment
from labgrid.consoleloggingreporter import ConsoleLoggingReporter
from labgrid.protocol import CommandProtocol
from labgrid.driver.exception import ExecutionError
from autonx import NSHStrategy, SimConsoleDriver
import os
import re
simple_pass = r".*(pass|PASS)"
ltp_testdata = [
("ltp_behavior_timers_1_1", simple_pass),
("ltp_behavior_timers_2_1", simple_pass),
("ltp_clock_settime_speculative_4_3", simple_pass),
("ltp_clock_settime_speculative_4_4", simple_pass),
("ltp_definitions_errno_h_3_2", simple_pass),
("ltp_definitions_errno_h_4_1", simple_pass),
("ltp_definitions_mqueue_h_1_1", simple_pass),
("ltp_definitions_sched_h_10_1", simple_pass),
("ltp_definitions_signal_h_26_1", simple_pass),
("ltp_functional_mqueues_send_rev_2", simple_pass),
("ltp_functional_semaphores_sem_conpro", simple_pass),
("ltp_functional_semaphores_sem_philosopher", simple_pass),
("ltp_functional_semaphores_sem_readerwriter", simple_pass),
("ltp_functional_semaphores_sem_sleepingbarber", simple_pass),
("ltp_interfaces_aio_suspend_5_1", simple_pass),
("ltp_interfaces_asctime_1_1", simple_pass),
("ltp_interfaces_clock_1_1", simple_pass),
("ltp_interfaces_clock_2_1", simple_pass),
("ltp_interfaces_clock_getres_1_1", simple_pass),
("ltp_interfaces_clock_getres_3_1", simple_pass),
("ltp_interfaces_clock_getres_5_1", simple_pass),
("ltp_interfaces_clock_getres_6_1", simple_pass),
("ltp_interfaces_clock_getres_6_2", simple_pass),
("ltp_interfaces_clock_gettime_1_1", simple_pass),
("ltp_interfaces_clock_gettime_1_2", simple_pass),
("ltp_interfaces_clock_gettime_2_1", simple_pass),
("ltp_interfaces_clock_gettime_3_1", simple_pass),
("ltp_interfaces_clock_gettime_7_1", simple_pass),
("ltp_interfaces_clock_gettime_8_1", simple_pass),
("ltp_interfaces_clock_gettime_8_2", simple_pass),
("ltp_interfaces_clock_nanosleep_11_1", simple_pass),
("ltp_interfaces_clock_nanosleep_1_1", simple_pass),
("ltp_interfaces_clock_nanosleep_13_1", simple_pass),
("ltp_interfaces_clock_nanosleep_2_1", simple_pass),
("ltp_interfaces_clock_nanosleep_3_1", simple_pass),
("ltp_interfaces_clock_settime_1_1", simple_pass),
("ltp_interfaces_clock_settime_17_1", simple_pass),
("ltp_interfaces_clock_settime_17_2", simple_pass),
("ltp_interfaces_clock_settime_19_1", simple_pass),
("ltp_interfaces_clock_settime_20_1", simple_pass),
("ltp_interfaces_clock_settime_4_1", simple_pass),
("ltp_interfaces_clock_settime_4_2", simple_pass),
("ltp_interfaces_clock_settime_5_1", simple_pass),
("ltp_interfaces_clock_settime_5_2", simple_pass),
("ltp_interfaces_clock_settime_6_1", simple_pass),
("ltp_interfaces_ctime_1_1", simple_pass),
("ltp_interfaces_difftime_1_1", simple_pass),
("ltp_interfaces_fsync_4_1", simple_pass),
("ltp_interfaces_fsync_5_1", simple_pass),
("ltp_interfaces_fsync_7_1", simple_pass),
("ltp_interfaces_gmtime_1_1", simple_pass),
("ltp_interfaces_gmtime_2_1", simple_pass),
("ltp_interfaces_kill_2_1", simple_pass),
("ltp_interfaces_kill_2_2", simple_pass),
("ltp_interfaces_kill_3_1", simple_pass),
("ltp_interfaces_localtime_1_1", simple_pass),
("ltp_interfaces_mktime_1_1", simple_pass),
("ltp_interfaces_mlock_10_1", simple_pass),
("ltp_interfaces_mlock_5_1", simple_pass),
("ltp_interfaces_mlock_8_1", simple_pass),
("ltp_interfaces_mlockall_13_1", simple_pass),
("ltp_interfaces_mlockall_13_2", simple_pass),
("ltp_interfaces_mlockall_3_6", simple_pass),
("ltp_interfaces_mlockall_3_7", simple_pass),
("ltp_interfaces_mlockall_8_1", simple_pass),
("ltp_interfaces_mmap_10_1", simple_pass),
("ltp_interfaces_mmap_11_1", simple_pass),
("ltp_interfaces_mmap_1_1", simple_pass),
("ltp_interfaces_mmap_12_1", simple_pass),
("ltp_interfaces_mmap_1_2", simple_pass),
("ltp_interfaces_mmap_14_1", simple_pass),
("ltp_interfaces_mmap_19_1", simple_pass),
("ltp_interfaces_mmap_21_1", simple_pass),
("ltp_interfaces_mmap_23_1", simple_pass),
("ltp_interfaces_mmap_24_1", simple_pass),
("ltp_interfaces_mmap_24_2", simple_pass),
("ltp_interfaces_mmap_27_1", simple_pass),
("ltp_interfaces_mmap_31_1", simple_pass),
("ltp_interfaces_mmap_3_1", simple_pass),
("ltp_interfaces_mmap_32_1", simple_pass),
("ltp_interfaces_mmap_5_1", simple_pass),
("ltp_interfaces_mmap_6_4", simple_pass),
("ltp_interfaces_mmap_6_5", simple_pass),
("ltp_interfaces_mmap_6_6", simple_pass),
("ltp_interfaces_mmap_7_1", simple_pass),
("ltp_interfaces_mmap_7_2", simple_pass),
("ltp_interfaces_mmap_9_1", simple_pass),
("ltp_interfaces_mq_close_1_1", simple_pass),
("ltp_interfaces_mq_close_3_1", simple_pass),
("ltp_interfaces_mq_close_3_2", simple_pass),
("ltp_interfaces_mq_close_3_3", simple_pass),
("ltp_interfaces_mq_close_4_1", simple_pass),
("ltp_interfaces_mq_getattr_2_1", simple_pass),
("ltp_interfaces_mq_getattr_2_2", simple_pass),
("ltp_interfaces_mq_getattr_3_1", simple_pass),
("ltp_interfaces_mq_getattr_4_1", simple_pass),
("ltp_interfaces_mq_notify_1_1", simple_pass),
("ltp_interfaces_mq_notify_3_1", simple_pass),
("ltp_interfaces_mq_notify_4_1", simple_pass),
("ltp_interfaces_mq_notify_8_1", simple_pass),
("ltp_interfaces_mq_open_11_1", simple_pass),
("ltp_interfaces_mq_open_1_1", simple_pass),
("ltp_interfaces_mq_open_12_1", simple_pass),
("ltp_interfaces_mq_open_13_1", simple_pass),
("ltp_interfaces_mq_open_15_1", simple_pass),
("ltp_interfaces_mq_open_18_1", simple_pass),
("ltp_interfaces_mq_open_19_1", simple_pass),
("ltp_interfaces_mq_open_20_1", simple_pass),
("ltp_interfaces_mq_open_21_1", simple_pass),
("ltp_interfaces_mq_open_23_1", simple_pass),
("ltp_interfaces_mq_open_25_2", simple_pass),
("ltp_interfaces_mq_open_27_1", simple_pass),
("ltp_interfaces_mq_open_27_2", simple_pass),
("ltp_interfaces_mq_open_29_1", simple_pass),
("ltp_interfaces_mq_open_3_1", simple_pass),
("ltp_interfaces_mq_open_7_1", simple_pass),
("ltp_interfaces_mq_open_7_3", simple_pass),
("ltp_interfaces_mq_open_8_1", simple_pass),
("ltp_interfaces_mq_open_9_1", simple_pass),
("ltp_interfaces_mq_receive_10_1", simple_pass),
("ltp_interfaces_mq_receive_11_1", simple_pass),
("ltp_interfaces_mq_receive_11_2", simple_pass),
("ltp_interfaces_mq_receive_1_1", simple_pass),
("ltp_interfaces_mq_receive_12_1", simple_pass),
("ltp_interfaces_mq_receive_2_1", simple_pass),
("ltp_interfaces_mq_receive_7_1", simple_pass),
("ltp_interfaces_mq_receive_8_1", simple_pass),
("ltp_interfaces_mq_send_10_1", simple_pass),
("ltp_interfaces_mq_send_11_1", simple_pass),
("ltp_interfaces_mq_send_11_2", simple_pass),
("ltp_interfaces_mq_send_1_1", simple_pass),
("ltp_interfaces_mq_send_13_1", simple_pass),
("ltp_interfaces_mq_send_14_1", simple_pass),
("ltp_interfaces_mq_send_2_1", simple_pass),
("ltp_interfaces_mq_send_3_1", simple_pass),
("ltp_interfaces_mq_send_3_2", simple_pass),
("ltp_interfaces_mq_send_4_1", simple_pass),
("ltp_interfaces_mq_send_4_2", simple_pass),
("ltp_interfaces_mq_send_4_3", simple_pass),
("ltp_interfaces_mq_send_7_1", simple_pass),
("ltp_interfaces_mq_send_8_1", simple_pass),
("ltp_interfaces_mq_send_9_1", simple_pass),
("ltp_interfaces_mq_setattr_1_1", simple_pass),
("ltp_interfaces_mq_setattr_1_2", simple_pass),
("ltp_interfaces_mq_setattr_2_1", simple_pass),
("ltp_interfaces_mq_setattr_5_1", simple_pass),
("ltp_interfaces_mq_timedreceive_10_1", simple_pass),
("ltp_interfaces_mq_timedreceive_10_2", simple_pass),
("ltp_interfaces_mq_timedreceive_11_1", simple_pass),
("ltp_interfaces_mq_timedreceive_1_1", simple_pass),
("ltp_interfaces_mq_timedreceive_13_1", simple_pass),
("ltp_interfaces_mq_timedreceive_14_1", simple_pass),
("ltp_interfaces_mq_timedreceive_15_1", simple_pass),
("ltp_interfaces_mq_timedreceive_17_1", simple_pass),
("ltp_interfaces_mq_timedreceive_17_2", simple_pass),
("ltp_interfaces_mq_timedreceive_17_3", simple_pass),
("ltp_interfaces_mq_timedreceive_2_1", simple_pass),
("ltp_interfaces_mq_timedreceive_7_1", simple_pass),
("ltp_interfaces_mq_timedsend_10_1", simple_pass),
("ltp_interfaces_mq_timedsend_11_1", simple_pass),
("ltp_interfaces_mq_timedsend_11_2", simple_pass),
("ltp_interfaces_mq_timedsend_1_1", simple_pass),
("ltp_interfaces_mq_timedsend_12_1", simple_pass),
("ltp_interfaces_mq_timedsend_13_1", simple_pass),
("ltp_interfaces_mq_timedsend_14_1", simple_pass),
("ltp_interfaces_mq_timedsend_15_1", simple_pass),
("ltp_interfaces_mq_timedsend_18_1", simple_pass),
("ltp_interfaces_mq_timedsend_19_1", simple_pass),
("ltp_interfaces_mq_timedsend_20_1", simple_pass),
("ltp_interfaces_mq_timedsend_2_1", simple_pass),
("ltp_interfaces_mq_timedsend_3_1", simple_pass),
("ltp_interfaces_mq_timedsend_3_2", simple_pass),
("ltp_interfaces_mq_timedsend_4_1", simple_pass),
("ltp_interfaces_mq_timedsend_4_2", simple_pass),
("ltp_interfaces_mq_timedsend_4_3", simple_pass),
("ltp_interfaces_mq_timedsend_7_1", simple_pass),
("ltp_interfaces_mq_timedsend_8_1", simple_pass),
("ltp_interfaces_mq_timedsend_9_1", simple_pass),
("ltp_interfaces_mq_unlink_1_1", simple_pass),
("ltp_interfaces_mq_unlink_7_1", simple_pass),
("ltp_interfaces_munlock_10_1", simple_pass),
("ltp_interfaces_munlock_11_1", simple_pass),
("ltp_interfaces_munlock_7_1", simple_pass),
("ltp_interfaces_munlockall_5_1", simple_pass),
("ltp_interfaces_munmap_2_1", simple_pass),
("ltp_interfaces_munmap_3_1", simple_pass),
("ltp_interfaces_munmap_4_1", simple_pass),
("ltp_interfaces_munmap_8_1", simple_pass),
("ltp_interfaces_munmap_9_1", simple_pass),
("ltp_interfaces_nanosleep_10000_1", simple_pass),
("ltp_interfaces_nanosleep_1_1", simple_pass),
("ltp_interfaces_nanosleep_2_1", simple_pass),
("ltp_interfaces_nanosleep_5_1", simple_pass),
("ltp_interfaces_nanosleep_6_1", simple_pass),
("ltp_interfaces_pthread_attr_destroy_1_1", simple_pass),
("ltp_interfaces_pthread_attr_destroy_2_1", simple_pass),
("ltp_interfaces_pthread_attr_destroy_3_1", simple_pass),
("ltp_interfaces_pthread_attr_getdetachstate_1_1", simple_pass),
("ltp_interfaces_pthread_attr_getdetachstate_1_2", simple_pass),
("ltp_interfaces_pthread_attr_getinheritsched_1_1", simple_pass),
("ltp_interfaces_pthread_attr_getschedparam_1_1", simple_pass),
("ltp_interfaces_pthread_attr_getschedpolicy_2_1", simple_pass),
("ltp_interfaces_pthread_attr_getstack_1_1", simple_pass),
("ltp_interfaces_pthread_attr_getstacksize_1_1", simple_pass),
("ltp_interfaces_pthread_attr_init_1_1", simple_pass),
("ltp_interfaces_pthread_attr_init_2_1", simple_pass),
("ltp_interfaces_pthread_attr_init_3_1", simple_pass),
("ltp_interfaces_pthread_attr_init_4_1", simple_pass),
("ltp_interfaces_pthread_attr_setdetachstate_1_1", simple_pass),
("ltp_interfaces_pthread_attr_setdetachstate_1_2", simple_pass),
("ltp_interfaces_pthread_attr_setdetachstate_2_1", simple_pass),
("ltp_interfaces_pthread_attr_setdetachstate_4_1", simple_pass),
("ltp_interfaces_pthread_attr_setinheritsched_1_1", simple_pass),
("ltp_interfaces_pthread_attr_setinheritsched_2_1", simple_pass),
("ltp_interfaces_pthread_attr_setinheritsched_2_2", simple_pass),
("ltp_interfaces_pthread_attr_setinheritsched_2_3", simple_pass),
("ltp_interfaces_pthread_attr_setinheritsched_2_4", simple_pass),
("ltp_interfaces_pthread_attr_setinheritsched_4_1", simple_pass),
("ltp_interfaces_pthread_attr_setschedparam_1_1", simple_pass),
("ltp_interfaces_pthread_attr_setschedparam_1_2", simple_pass),
("ltp_interfaces_pthread_attr_setschedparam_1_3", simple_pass),
("ltp_interfaces_pthread_attr_setschedparam_1_4", simple_pass),
("ltp_interfaces_pthread_attr_setschedpolicy_1_1", simple_pass),
("ltp_interfaces_pthread_attr_setschedpolicy_1_2", simple_pass),
("ltp_interfaces_pthread_attr_setschedpolicy_1_3", simple_pass),
("ltp_interfaces_pthread_attr_setschedpolicy_4_1", simple_pass),
("ltp_interfaces_pthread_attr_setschedpolicy_5_1", simple_pass),
("ltp_interfaces_pthread_attr_setstack_1_1", simple_pass),
("ltp_interfaces_pthread_attr_setstack_4_1", simple_pass),
("ltp_interfaces_pthread_attr_setstack_6_1", simple_pass),
("ltp_interfaces_pthread_attr_setstack_7_1", simple_pass),
("ltp_interfaces_pthread_attr_setstacksize_1_1", simple_pass),
("ltp_interfaces_pthread_attr_setstacksize_4_1", simple_pass),
("ltp_interfaces_pthread_barrierattr_destroy_1_1", simple_pass),
("ltp_interfaces_pthread_barrierattr_getpshared_1_1", simple_pass),
("ltp_interfaces_pthread_barrierattr_init_1_1", simple_pass),
("ltp_interfaces_pthread_barrierattr_init_2_1", simple_pass),
("ltp_interfaces_pthread_barrierattr_setpshared_1_1", simple_pass),
("ltp_interfaces_pthread_barrierattr_setpshared_2_1", simple_pass),
("ltp_interfaces_pthread_barrier_destroy_1_1", simple_pass),
("ltp_interfaces_pthread_barrier_destroy_2_1", simple_pass),
("ltp_interfaces_pthread_barrier_init_1_1", simple_pass),
("ltp_interfaces_pthread_barrier_init_3_1", simple_pass),
("ltp_interfaces_pthread_barrier_init_4_1", simple_pass),
("ltp_interfaces_pthread_barrier_wait_1_1", simple_pass),
("ltp_interfaces_pthread_barrier_wait_2_1", simple_pass),
("ltp_interfaces_pthread_barrier_wait_3_1", simple_pass),
("ltp_interfaces_pthread_barrier_wait_3_2", simple_pass),
("ltp_interfaces_pthread_cancel_1_1", simple_pass),
("ltp_interfaces_pthread_cancel_1_2", simple_pass),
("ltp_interfaces_pthread_cancel_1_3", simple_pass),
("ltp_interfaces_pthread_cancel_2_1", simple_pass),
("ltp_interfaces_pthread_cancel_2_2", simple_pass),
("ltp_interfaces_pthread_cancel_2_3", simple_pass),
("ltp_interfaces_pthread_cancel_3_1", simple_pass),
("ltp_interfaces_pthread_cancel_4_1", simple_pass),
("ltp_interfaces_pthread_cancel_5_1", simple_pass),
("ltp_interfaces_pthread_cleanup_pop_1_1", simple_pass),
("ltp_interfaces_pthread_cleanup_pop_1_2", simple_pass),
("ltp_interfaces_pthread_cleanup_pop_1_3", simple_pass),
("ltp_interfaces_pthread_cleanup_push_1_1", simple_pass),
("ltp_interfaces_pthread_cleanup_push_1_2", simple_pass),
("ltp_interfaces_pthread_cleanup_push_1_3", simple_pass),
("ltp_interfaces_pthread_condattr_destroy_1_1", simple_pass),
("ltp_interfaces_pthread_condattr_destroy_2_1", simple_pass),
("ltp_interfaces_pthread_condattr_destroy_3_1", simple_pass),
("ltp_interfaces_pthread_condattr_destroy_4_1", simple_pass),
("ltp_interfaces_pthread_condattr_getclock_1_1", simple_pass),
("ltp_interfaces_pthread_condattr_getclock_1_2", simple_pass),
("ltp_interfaces_pthread_condattr_init_3_1", simple_pass),
("ltp_interfaces_pthread_condattr_setclock_1_1", simple_pass),
("ltp_interfaces_pthread_condattr_setclock_1_2", simple_pass),
("ltp_interfaces_pthread_condattr_setclock_2_1", simple_pass),
("ltp_interfaces_pthread_cond_broadcast_1_1", simple_pass),
("ltp_interfaces_pthread_cond_broadcast_2_1", simple_pass),
("ltp_interfaces_pthread_cond_broadcast_2_2", simple_pass),
("ltp_interfaces_pthread_cond_broadcast_4_1", simple_pass),
("ltp_interfaces_pthread_cond_broadcast_4_2", simple_pass),
("ltp_interfaces_pthread_cond_destroy_1_1", simple_pass),
("ltp_interfaces_pthread_cond_destroy_3_1", simple_pass),
("ltp_interfaces_pthread_cond_init_1_1", simple_pass),
("ltp_interfaces_pthread_cond_init_2_1", simple_pass),
("ltp_interfaces_pthread_cond_init_3_1", simple_pass),
("ltp_interfaces_pthread_cond_init_4_3", simple_pass),
("ltp_interfaces_pthread_cond_signal_1_1", simple_pass),
("ltp_interfaces_pthread_cond_signal_2_1", simple_pass),
("ltp_interfaces_pthread_cond_signal_2_2", simple_pass),
("ltp_interfaces_pthread_cond_signal_4_1", simple_pass),
("ltp_interfaces_pthread_cond_signal_4_2", simple_pass),
("ltp_interfaces_pthread_cond_timedwait_1_1", simple_pass),
("ltp_interfaces_pthread_cond_timedwait_2_1", simple_pass),
("ltp_interfaces_pthread_cond_timedwait_2_2", simple_pass),
("ltp_interfaces_pthread_cond_timedwait_2_3", simple_pass),
("ltp_interfaces_pthread_cond_timedwait_3_1", simple_pass),
("ltp_interfaces_pthread_cond_timedwait_4_1", simple_pass),
("ltp_interfaces_pthread_cond_timedwait_4_3", simple_pass),
("ltp_interfaces_pthread_cond_wait_1_1", simple_pass),
("ltp_interfaces_pthread_cond_wait_2_1", simple_pass),
("ltp_interfaces_pthread_cond_wait_3_1", simple_pass),
("ltp_interfaces_pthread_cond_wait_4_1", simple_pass),
("ltp_interfaces_pthread_create_1_1", simple_pass),
("ltp_interfaces_pthread_create_12_1", simple_pass),
("ltp_interfaces_pthread_create_1_2", simple_pass),
("ltp_interfaces_pthread_create_1_3", simple_pass),
("ltp_interfaces_pthread_create_2_1", simple_pass),
("ltp_interfaces_pthread_create_3_1", simple_pass),
("ltp_interfaces_pthread_create_4_1", simple_pass),
("ltp_interfaces_pthread_create_5_1", simple_pass),
("ltp_interfaces_pthread_create_8_1", simple_pass),
("ltp_interfaces_pthread_detach_1_1", simple_pass),
("ltp_interfaces_pthread_detach_2_1", simple_pass),
("ltp_interfaces_pthread_detach_3_1", simple_pass),
("ltp_interfaces_pthread_detach_4_1", simple_pass),
("ltp_interfaces_pthread_detach_4_2", simple_pass),
("ltp_interfaces_pthread_equal_1_1", simple_pass),
("ltp_interfaces_pthread_equal_1_2", simple_pass),
("ltp_interfaces_pthread_equal_2_1", simple_pass),
("ltp_interfaces_pthread_exit_1_1", simple_pass),
("ltp_interfaces_pthread_exit_2_1", simple_pass),
("ltp_interfaces_pthread_exit_3_1", simple_pass),
("ltp_interfaces_pthread_getschedparam_1_1", simple_pass),
("ltp_interfaces_pthread_getschedparam_1_2", simple_pass),
("ltp_interfaces_pthread_getschedparam_1_3", simple_pass),
("ltp_interfaces_pthread_getspecific_1_1", simple_pass),
("ltp_interfaces_pthread_getspecific_3_1", simple_pass),
("ltp_interfaces_pthread_join_1_1", simple_pass),
("ltp_interfaces_pthread_join_2_1", simple_pass),
("ltp_interfaces_pthread_join_3_1", simple_pass),
("ltp_interfaces_pthread_join_5_1", simple_pass),
("ltp_interfaces_pthread_join_6_2", simple_pass),
("ltp_interfaces_pthread_key_create_1_1", simple_pass),
("ltp_interfaces_pthread_key_create_1_2", simple_pass),
("ltp_interfaces_pthread_key_create_2_1", simple_pass),
("ltp_interfaces_pthread_key_create_3_1", simple_pass),
("ltp_interfaces_pthread_key_delete_1_1", simple_pass),
("ltp_interfaces_pthread_key_delete_1_2", simple_pass),
("ltp_interfaces_pthread_key_delete_2_1", simple_pass),
("ltp_interfaces_pthread_kill_1_2", simple_pass),
("ltp_interfaces_pthread_kill_2_1", simple_pass),
("ltp_interfaces_pthread_kill_3_1", simple_pass),
("ltp_interfaces_pthread_kill_6_1", simple_pass),
("ltp_interfaces_pthread_kill_7_1", simple_pass),
("ltp_interfaces_pthread_kill_8_1", simple_pass),
("ltp_interfaces_pthread_mutexattr_destroy_1_1", simple_pass),
("ltp_interfaces_pthread_mutexattr_destroy_2_1", simple_pass),
("ltp_interfaces_pthread_mutexattr_destroy_3_1", simple_pass),
("ltp_interfaces_pthread_mutexattr_destroy_4_1", simple_pass),
("ltp_interfaces_pthread_mutexattr_getprotocol_1_1", simple_pass),
("ltp_interfaces_pthread_mutexattr_getprotocol_1_2", simple_pass),
("ltp_interfaces_pthread_mutexattr_getpshared_1_1", simple_pass),
("ltp_interfaces_pthread_mutexattr_getpshared_1_2", simple_pass),
("ltp_interfaces_pthread_mutexattr_getpshared_1_3", simple_pass),
("ltp_interfaces_pthread_mutexattr_getpshared_3_1", simple_pass),
("ltp_interfaces_pthread_mutexattr_gettype_1_1", simple_pass),
("ltp_interfaces_pthread_mutexattr_gettype_1_2", simple_pass),
("ltp_interfaces_pthread_mutexattr_gettype_1_3", simple_pass),
("ltp_interfaces_pthread_mutexattr_gettype_1_4", simple_pass),
("ltp_interfaces_pthread_mutexattr_gettype_1_5", simple_pass),
("ltp_interfaces_pthread_mutexattr_init_1_1", simple_pass),
("ltp_interfaces_pthread_mutexattr_init_3_1", simple_pass),
("ltp_interfaces_pthread_mutexattr_setprotocol_1_1", simple_pass),
("ltp_interfaces_pthread_mutexattr_setprotocol_3_1", simple_pass),
("ltp_interfaces_pthread_mutexattr_setprotocol_3_2", simple_pass),
("ltp_interfaces_pthread_mutexattr_setpshared_1_1", simple_pass),
("ltp_interfaces_pthread_mutexattr_setpshared_1_2", simple_pass),
("ltp_interfaces_pthread_mutexattr_setpshared_2_1", simple_pass),
("ltp_interfaces_pthread_mutexattr_setpshared_2_2", simple_pass),
("ltp_interfaces_pthread_mutexattr_setpshared_3_1", simple_pass),
("ltp_interfaces_pthread_mutexattr_setpshared_3_2", simple_pass),
("ltp_interfaces_pthread_mutexattr_settype_1_1", simple_pass),
("ltp_interfaces_pthread_mutexattr_settype_2_1", simple_pass),
("ltp_interfaces_pthread_mutexattr_settype_3_1", simple_pass),
("ltp_interfaces_pthread_mutexattr_settype_3_2", simple_pass),
("ltp_interfaces_pthread_mutexattr_settype_3_3", simple_pass),
("ltp_interfaces_pthread_mutexattr_settype_3_4", simple_pass),
("ltp_interfaces_pthread_mutexattr_settype_7_1", simple_pass),
("ltp_interfaces_pthread_mutex_destroy_1_1", simple_pass),
("ltp_interfaces_pthread_mutex_destroy_2_1", simple_pass),
("ltp_interfaces_pthread_mutex_destroy_2_2", simple_pass),
("ltp_interfaces_pthread_mutex_destroy_3_1", simple_pass),
("ltp_interfaces_pthread_mutex_destroy_5_1", simple_pass),
("ltp_interfaces_pthread_mutex_destroy_5_2", simple_pass),
("ltp_interfaces_pthread_mutex_init_1_1", simple_pass),
("ltp_interfaces_pthread_mutex_init_1_2", simple_pass),
("ltp_interfaces_pthread_mutex_init_2_1", simple_pass),
("ltp_interfaces_pthread_mutex_init_3_1", simple_pass),
("ltp_interfaces_pthread_mutex_init_3_2", simple_pass),
("ltp_interfaces_pthread_mutex_init_4_1", simple_pass),
("ltp_interfaces_pthread_mutex_lock_1_1", simple_pass),
("ltp_interfaces_pthread_mutex_lock_2_1", simple_pass),
("ltp_interfaces_pthread_mutex_lock_3_1", simple_pass),
("ltp_interfaces_pthread_mutex_lock_4_1", simple_pass),
("ltp_interfaces_pthread_mutex_lock_5_1", simple_pass),
("ltp_interfaces_pthread_mutex_timedlock_1_1", simple_pass),
("ltp_interfaces_pthread_mutex_timedlock_2_1", simple_pass),
("ltp_interfaces_pthread_mutex_timedlock_4_1", simple_pass),
("ltp_interfaces_pthread_mutex_timedlock_5_1", simple_pass),
("ltp_interfaces_pthread_mutex_timedlock_5_2", simple_pass),
("ltp_interfaces_pthread_mutex_timedlock_5_3", simple_pass),
("ltp_interfaces_pthread_mutex_trylock_1_1", simple_pass),
("ltp_interfaces_pthread_mutex_trylock_3_1", simple_pass),
("ltp_interfaces_pthread_mutex_trylock_4_1", simple_pass),
("ltp_interfaces_pthread_mutex_trylock_4_3", simple_pass),
("ltp_interfaces_pthread_mutex_unlock_1_1", simple_pass),
("ltp_interfaces_pthread_mutex_unlock_2_1", simple_pass),
("ltp_interfaces_pthread_mutex_unlock_3_1", simple_pass),
("ltp_interfaces_pthread_mutex_unlock_5_1", simple_pass),
("ltp_interfaces_pthread_mutex_unlock_5_2", simple_pass),
("ltp_interfaces_pthread_once_1_1", simple_pass),
("ltp_interfaces_pthread_once_1_2", simple_pass),
("ltp_interfaces_pthread_once_1_3", simple_pass),
("ltp_interfaces_pthread_once_2_1", simple_pass),
("ltp_interfaces_pthread_once_3_1", simple_pass),
("ltp_interfaces_pthread_once_6_1", simple_pass),
("ltp_interfaces_pthread_rwlock_destroy_1_1", simple_pass),
("ltp_interfaces_pthread_rwlock_destroy_3_1", simple_pass),
("ltp_interfaces_pthread_rwlock_init_2_1", simple_pass),
("ltp_interfaces_pthread_rwlock_init_6_1", simple_pass),
("ltp_interfaces_pthread_rwlock_rdlock_4_1", simple_pass),
("ltp_interfaces_pthread_rwlock_timedrdlock_2_1", simple_pass),
("ltp_interfaces_pthread_rwlock_timedrdlock_6_1", simple_pass),
("ltp_interfaces_pthread_rwlock_timedrdlock_6_2", simple_pass),
("ltp_interfaces_pthread_rwlock_timedwrlock_6_1", simple_pass),
("ltp_interfaces_pthread_rwlock_timedwrlock_6_2", simple_pass),
("ltp_interfaces_pthread_rwlock_unlock_1_1", simple_pass),
("ltp_interfaces_pthread_rwlock_unlock_2_1", simple_pass),
("ltp_interfaces_pthread_rwlock_unlock_4_1", simple_pass),
("ltp_interfaces_pthread_rwlock_unlock_4_2", simple_pass),
("ltp_interfaces_pthread_rwlock_wrlock_2_1", simple_pass),
("ltp_interfaces_pthread_rwlock_wrlock_3_1", simple_pass),
("ltp_interfaces_pthread_self_1_1", simple_pass),
("ltp_interfaces_pthread_setcancelstate_1_1", simple_pass),
("ltp_interfaces_pthread_setcancelstate_1_2", simple_pass),
("ltp_interfaces_pthread_setcancelstate_2_1", simple_pass),
("ltp_interfaces_pthread_setcancelstate_3_1", simple_pass),
("ltp_interfaces_pthread_setcanceltype_1_1", simple_pass),
("ltp_interfaces_pthread_setcanceltype_1_2", simple_pass),
("ltp_interfaces_pthread_setcanceltype_2_1", simple_pass),
("ltp_interfaces_pthread_setschedparam_1_1", simple_pass),
("ltp_interfaces_pthread_setschedparam_1_2", simple_pass),
("ltp_interfaces_pthread_setschedparam_4_1", simple_pass),
("ltp_interfaces_pthread_setschedparam_5_1", simple_pass),
("ltp_interfaces_pthread_setschedprio_1_1", simple_pass),
("ltp_interfaces_pthread_setspecific_1_1", simple_pass),
("ltp_interfaces_pthread_setspecific_1_2", simple_pass),
("ltp_interfaces_pthread_sigmask_10_1", simple_pass),
("ltp_interfaces_pthread_spin_destroy_1_1", simple_pass),
("ltp_interfaces_pthread_spin_destroy_3_1", simple_pass),
("ltp_interfaces_pthread_spin_init_1_1", simple_pass),
("ltp_interfaces_pthread_spin_init_4_1", simple_pass),
("ltp_interfaces_pthread_spin_lock_1_1", simple_pass),
("ltp_interfaces_pthread_spin_lock_1_2", simple_pass),
("ltp_interfaces_pthread_spin_lock_3_1", simple_pass),
("ltp_interfaces_pthread_spin_lock_3_2", simple_pass),
("ltp_interfaces_pthread_spin_trylock_1_1", simple_pass),
("ltp_interfaces_pthread_spin_trylock_4_1", simple_pass),
("ltp_interfaces_pthread_spin_unlock_1_1", simple_pass),
("ltp_interfaces_pthread_spin_unlock_1_2", simple_pass),
("ltp_interfaces_pthread_spin_unlock_3_1", simple_pass),
("ltp_interfaces_pthread_testcancel_1_1", simple_pass),
("ltp_interfaces_pthread_testcancel_2_1", simple_pass),
("ltp_interfaces_raise_6_1", simple_pass),
("ltp_interfaces_raise_7_1", simple_pass),
("ltp_interfaces_sched_getparam_1_1", simple_pass),
("ltp_interfaces_sched_getparam_2_1", simple_pass),
("ltp_interfaces_sched_getparam_3_1", simple_pass),
("ltp_interfaces_sched_get_priority_max_1_1", simple_pass),
("ltp_interfaces_sched_get_priority_max_1_2", simple_pass),
("ltp_interfaces_sched_get_priority_max_1_3", simple_pass),
("ltp_interfaces_sched_get_priority_max_1_4", simple_pass),
("ltp_interfaces_sched_get_priority_max_2_1", simple_pass),
("ltp_interfaces_sched_get_priority_min_1_1", simple_pass),
("ltp_interfaces_sched_get_priority_min_1_2", simple_pass),
("ltp_interfaces_sched_get_priority_min_1_3", simple_pass),
("ltp_interfaces_sched_get_priority_min_1_4", simple_pass),
("ltp_interfaces_sched_get_priority_min_2_1", simple_pass),
("ltp_interfaces_sched_getscheduler_1_1", simple_pass),
("ltp_interfaces_sched_getscheduler_3_1", simple_pass),
("ltp_interfaces_sched_getscheduler_4_1", simple_pass),
("ltp_interfaces_sched_rr_get_interval_1_1", simple_pass),
("ltp_interfaces_sched_rr_get_interval_2_1", simple_pass),
("ltp_interfaces_sched_setparam_22_1", simple_pass),
("ltp_interfaces_sched_setparam_23_1", simple_pass),
("ltp_interfaces_sched_setparam_23_2", simple_pass),
("ltp_interfaces_sched_setparam_23_3", simple_pass),
("ltp_interfaces_sched_setparam_23_4", simple_pass),
("ltp_interfaces_sched_setparam_23_5", simple_pass),
("ltp_interfaces_sched_setparam_25_1", simple_pass),
("ltp_interfaces_sched_setparam_25_2", simple_pass),
("ltp_interfaces_sched_setparam_25_3", simple_pass),
("ltp_interfaces_sched_setparam_25_4", simple_pass),
("ltp_interfaces_sched_setparam_5_1", simple_pass),
("ltp_interfaces_sched_setscheduler_1_1", simple_pass),
("ltp_interfaces_sched_setscheduler_16_1", simple_pass),
("ltp_interfaces_sched_setscheduler_17_1", simple_pass),
("ltp_interfaces_sched_setscheduler_17_2", simple_pass),
("ltp_interfaces_sched_setscheduler_17_3", simple_pass),
("ltp_interfaces_sched_setscheduler_17_4", simple_pass),
("ltp_interfaces_sched_setscheduler_17_5", simple_pass),
("ltp_interfaces_sched_setscheduler_19_1", simple_pass),
("ltp_interfaces_sched_setscheduler_19_2", simple_pass),
("ltp_interfaces_sched_setscheduler_19_3", simple_pass),
("ltp_interfaces_sched_setscheduler_19_4", simple_pass),
("ltp_interfaces_sched_setscheduler_19_5", simple_pass),
("ltp_interfaces_sched_setscheduler_4_1", simple_pass),
("ltp_interfaces_sched_yield_2_1", simple_pass),
("ltp_interfaces_sem_close_1_1", simple_pass),
("ltp_interfaces_sem_close_2_1", simple_pass),
("ltp_interfaces_sem_close_3_1", simple_pass),
("ltp_interfaces_sem_close_3_2", simple_pass),
("ltp_interfaces_sem_destroy_3_1", simple_pass),
("ltp_interfaces_sem_destroy_4_1", simple_pass),
("ltp_interfaces_sem_getvalue_1_1", simple_pass),
("ltp_interfaces_sem_getvalue_2_1", simple_pass),
("ltp_interfaces_sem_getvalue_2_2", simple_pass),
("ltp_interfaces_sem_getvalue_4_1", simple_pass),
("ltp_interfaces_sem_getvalue_5_1", simple_pass),
("ltp_interfaces_sem_init_1_1", simple_pass),
("ltp_interfaces_sem_init_2_1", simple_pass),
("ltp_interfaces_sem_init_2_2", simple_pass),
("ltp_interfaces_sem_init_3_1", simple_pass),
("ltp_interfaces_sem_init_5_1", simple_pass),
("ltp_interfaces_sem_init_5_2", simple_pass),
("ltp_interfaces_sem_init_6_1", simple_pass),
("ltp_interfaces_sem_init_7_1", simple_pass),
("ltp_interfaces_sem_open_10_1", simple_pass),
("ltp_interfaces_sem_open_1_1", simple_pass),
("ltp_interfaces_sem_open_1_2", simple_pass),
("ltp_interfaces_sem_open_1_3", simple_pass),
("ltp_interfaces_sem_open_1_4", simple_pass),
("ltp_interfaces_sem_open_15_1", simple_pass),
("ltp_interfaces_sem_open_2_1", simple_pass),
("ltp_interfaces_sem_open_2_2", simple_pass),
("ltp_interfaces_sem_open_4_1", simple_pass),
("ltp_interfaces_sem_open_5_1", simple_pass),
("ltp_interfaces_sem_open_6_1", simple_pass),
("ltp_interfaces_sem_post_1_1", simple_pass),
("ltp_interfaces_sem_post_1_2", simple_pass),
("ltp_interfaces_sem_post_2_1", simple_pass),
("ltp_interfaces_sem_post_4_1", simple_pass),
("ltp_interfaces_sem_post_5_1", simple_pass),
("ltp_interfaces_sem_post_6_1", simple_pass),
("ltp_interfaces_sem_timedwait_10_1", simple_pass),
("ltp_interfaces_sem_timedwait_11_1", simple_pass),
("ltp_interfaces_sem_timedwait_1_1", simple_pass),
("ltp_interfaces_sem_timedwait_2_2", simple_pass),
("ltp_interfaces_sem_timedwait_3_1", simple_pass),
("ltp_interfaces_sem_timedwait_4_1", simple_pass),
("ltp_interfaces_sem_timedwait_6_1", simple_pass),
("ltp_interfaces_sem_timedwait_6_2", simple_pass),
("ltp_interfaces_sem_timedwait_7_1", simple_pass),
("ltp_interfaces_sem_unlink_1_1", simple_pass),
("ltp_interfaces_sem_unlink_2_1", simple_pass),
("ltp_interfaces_sem_unlink_4_1", simple_pass),
("ltp_interfaces_sem_unlink_4_2", simple_pass),
("ltp_interfaces_sem_unlink_5_1", simple_pass),
("ltp_interfaces_sem_unlink_6_1", simple_pass),
("ltp_interfaces_sem_unlink_7_1", simple_pass),
("ltp_interfaces_sem_unlink_9_1", simple_pass),
("ltp_interfaces_sem_wait_11_1", simple_pass),
("ltp_interfaces_sem_wait_1_1", simple_pass),
("ltp_interfaces_sem_wait_12_1", simple_pass),
("ltp_interfaces_sem_wait_1_2", simple_pass),
("ltp_interfaces_sem_wait_13_1", simple_pass),
("ltp_interfaces_sem_wait_3_1", simple_pass),
("ltp_interfaces_sem_wait_5_1", simple_pass),
("ltp_interfaces_shm_open_11_1", simple_pass),
("ltp_interfaces_shm_open_1_1", simple_pass),
("ltp_interfaces_shm_open_13_1", simple_pass),
("ltp_interfaces_shm_open_14_2", simple_pass),
("ltp_interfaces_shm_open_15_1", simple_pass),
("ltp_interfaces_shm_open_16_1", simple_pass),
("ltp_interfaces_shm_open_17_1", simple_pass),
("ltp_interfaces_shm_open_18_1", simple_pass),
("ltp_interfaces_shm_open_20_1", simple_pass),
("ltp_interfaces_shm_open_20_2", simple_pass),
("ltp_interfaces_shm_open_20_3", simple_pass),
("ltp_interfaces_shm_open_21_1", simple_pass),
("ltp_interfaces_shm_open_22_1", simple_pass),
("ltp_interfaces_shm_open_25_1", simple_pass),
("ltp_interfaces_shm_open_26_1", simple_pass),
("ltp_interfaces_shm_open_28_1", simple_pass),
("ltp_interfaces_shm_open_28_2", simple_pass),
("ltp_interfaces_shm_open_28_3", simple_pass),
("ltp_interfaces_shm_open_37_1", simple_pass),
("ltp_interfaces_shm_open_38_1", simple_pass),
("ltp_interfaces_shm_open_39_1", simple_pass),
("ltp_interfaces_shm_open_39_2", simple_pass),
("ltp_interfaces_shm_open_41_1", simple_pass),
("ltp_interfaces_shm_open_8_1", simple_pass),
("ltp_interfaces_shm_unlink_10_1", simple_pass),
("ltp_interfaces_shm_unlink_10_2", simple_pass),
("ltp_interfaces_shm_unlink_11_1", simple_pass),
("ltp_interfaces_shm_unlink_1_1", simple_pass),
("ltp_interfaces_shm_unlink_2_1", simple_pass),
("ltp_interfaces_shm_unlink_3_1", simple_pass),
("ltp_interfaces_shm_unlink_5_1", simple_pass),
("ltp_interfaces_shm_unlink_6_1", simple_pass),
("ltp_interfaces_sigaction_1_10", simple_pass),
("ltp_interfaces_sigaction_1_11", simple_pass),
("ltp_interfaces_sigaction_1_13", simple_pass),
("ltp_interfaces_sigaction_1_17", simple_pass),
("ltp_interfaces_sigaction_1_18", simple_pass),
("ltp_interfaces_sigaction_1_19", simple_pass),
("ltp_interfaces_sigaction_1_2", simple_pass),
("ltp_interfaces_sigaction_1_4", simple_pass),
("ltp_interfaces_sigaction_1_5", simple_pass),
("ltp_interfaces_sigaction_18_10", simple_pass),
("ltp_interfaces_sigaction_18_11", simple_pass),
("ltp_interfaces_sigaction_18_13", simple_pass),
("ltp_interfaces_sigaction_18_17", simple_pass),
("ltp_interfaces_sigaction_18_18", simple_pass),
("ltp_interfaces_sigaction_18_2", simple_pass),
("ltp_interfaces_sigaction_18_4", simple_pass),
("ltp_interfaces_sigaction_18_5", simple_pass),
("ltp_interfaces_sigaction_18_9", simple_pass),
("ltp_interfaces_sigaction_19_10", simple_pass),
("ltp_interfaces_sigaction_19_11", simple_pass),
("ltp_interfaces_sigaction_19_13", simple_pass),
("ltp_interfaces_sigaction_19_17", simple_pass),
("ltp_interfaces_sigaction_19_18", simple_pass),
("ltp_interfaces_sigaction_19_2", simple_pass),
("ltp_interfaces_sigaction_19_4", simple_pass),
("ltp_interfaces_sigaction_19_5", simple_pass),
("ltp_interfaces_sigaction_19_9", simple_pass),
("ltp_interfaces_sigaction_1_9", simple_pass),
("ltp_interfaces_sigaction_2_10", simple_pass),
("ltp_interfaces_sigaction_2_11", simple_pass),
| |
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Driver for common sequences for image management on switchable usb port."""
import glob
import os
import shutil
import subprocess
import tempfile
import time
import urllib
import hw_driver
import servo.utils.usb_hierarchy as usb_hierarchy
import usb
# If a hub is attached to the usual image usb storage slot, use this port on the
# hub to search for the usb storage.
STORAGE_ON_HUB_PORT = 1
class UsbImageManagerError(hw_driver.HwDriverError):
"""Error class for UsbImageManager errors."""
pass
# pylint: disable=invalid-name
# Servod driver discovery logic requires this naming convension
class usbImageManager(hw_driver.HwDriver):
"""Driver to handle common tasks on the switchable usb port."""
# Polling rate to poll for image usb dev to appear if setting mux to
# servo_sees_usbkey
_POLLING_DELAY_S = 0.1
# Timeout to wait before giving up on hoping the image usb dev will enumerate
_WAIT_TIMEOUT_S = 10
# Timeout to settle all pending tasks on the device before writing to it.
_SETTLE_TIMEOUT_S = 60
# Control aliases to the image mux and power intended for image management
_IMAGE_USB_MUX = 'image_usbkey_mux'
_IMAGE_USB_PWR = 'image_usbkey_pwr'
_IMAGE_DEV = 'image_usbkey_dev'
_IMAGE_MUX_TO_SERVO = 'servo_sees_usbkey'
_HTTP_PREFIX = 'http://'
_DEFAULT_ERROR_MSG = 'No USB storage device found for image transfer.'
def __init__(self, interface, params):
"""Initialize driver by initializing HwDriver."""
super(usbImageManager, self).__init__(interface, params)
# This delay is required to safely switch the usb image mux direction
self._poweroff_delay = params.get('usb_power_off_delay', 0)
if self._poweroff_delay:
self._poweroff_delay = float(self._poweroff_delay)
# This is required to determine if the usbkey is connected to the host.
# The |hub_ports| is a comma-seperated string of usb hub port numbers
# that the image usbkey enumerates under for a given servo device.
# NOTE: initialization here is shared among multiple controls, some of which
# do not use the hub_ports logic. Do not raise error here if the param
# is not available, but rather inside the controls that leverage it.
self._image_usbkey_hub_ports = None
if 'hub_ports' in params:
self._image_usbkey_hub_ports = params.get('hub_ports').split(',')
# Flag to indicate whether the usb port supports having a hub attached to
# it. In that case, the image will be searched on the |STORAGE_ON_HUB_PORT|
# of the hub.
self._supports_hub_on_port = params.get('hub_on_port', False)
self._error_msg = self._DEFAULT_ERROR_MSG
if 'error_amendment' in params:
self._error_msg += ' ' + params['error_amendment']
def _Get_image_usbkey_direction(self):
"""Return direction of image usbkey mux."""
return self._interface.get(self._IMAGE_USB_MUX)
def _Set_image_usbkey_direction(self, mux_direction):
"""Connect USB flash stick to either servo or DUT.
This function switches 'usb_mux_sel1' to provide electrical
connection between the USB port J3 and either servo or DUT side.
Args:
mux_direction: map values of "servo_sees_usbkey" or "dut_sees_usbkey".
"""
self._SafelySwitchMux(mux_direction)
if self._interface.get(self._IMAGE_USB_MUX) == self._IMAGE_MUX_TO_SERVO:
# This will ensure that we make a best-effort attempt to only
# return when the block device of the attached usb stick fully
# enumerates.
self._interface.get(self._IMAGE_DEV)
def _SafelySwitchMux(self, mux_direction):
"""Helper to switch the usb mux.
Switching the usb mux is accompanied by powercycling
of the USB stick, because it sometimes gets wedged if the mux
is switched while the stick power is on.
Args:
mux_direction: map values of "servo_sees_usbkey" or "dut_sees_usbkey".
"""
if self._interface.get(self._IMAGE_USB_MUX) != mux_direction:
self._interface.set(self._IMAGE_USB_PWR, 'off')
time.sleep(self._poweroff_delay)
self._interface.set(self._IMAGE_USB_MUX, mux_direction)
time.sleep(self._poweroff_delay)
if self._interface.get(self._IMAGE_USB_PWR) != 'on':
# Enforce that power is supplied.
self._interface.set(self._IMAGE_USB_PWR, 'on')
def _PathIsHub(self, usb_sysfs_path):
"""Return whether |usb_sysfs_path| is a usb hub."""
if not os.path.exists(usb_sysfs_path):
return False
with open(os.path.join(usb_sysfs_path, 'bDeviceClass'), 'r') as classf:
return int(classf.read().strip(), 16) == usb.CLASS_HUB
def _Get_image_usbkey_dev(self):
"""Probe the USB disk device plugged in the servo from the host side.
Returns:
USB disk path if one and only one USB disk path is found, otherwise an
empty string.
Raises:
UsbImageManagerError: if 'hub_ports' was not defined in params
"""
if self._image_usbkey_hub_ports is None:
raise UsbImageManagerError('hub_ports need to be defined in params.')
servod = self._interface
# When the user is requesting the usb_dev they most likely intend for the
# usb to the facing the servo, and be powered. Enforce that.
self._SafelySwitchMux(self._IMAGE_MUX_TO_SERVO)
# Look for own servod usb device
# pylint: disable=protected-access
# Need servod information to find own servod instance.
usb_id = (servod._vendor, servod._product, servod._serialnames['main'])
self_usb = usb_hierarchy.Hierarchy.GetUsbDeviceSysfsPath(*usb_id)
hub_on_servo = usb_hierarchy.Hierarchy.GetSysfsParentHubStub(self_usb)
# Image usb is one of the hub ports |self._image_usbkey_hub_ports|
image_location_candidates = ['%s.%s' % (hub_on_servo, p) for p in
self._image_usbkey_hub_ports]
hub_location_candidates = []
if self._supports_hub_on_port:
# Here the config says that |image_usbkey_sysfs| might actually have a hub
# and not storage attached to it. In that case, the |STORAGE_ON_HUB_PORT|
# on that hub will house the storage.
hub_location_candidates = ['%s.%d' % (path, STORAGE_ON_HUB_PORT)
for path in image_location_candidates]
image_location_candidates.extend(hub_location_candidates)
self._logger.debug('usb image dev file candidates: %s',
', '.join(image_location_candidates))
# Let the device settle first before pushing out any data onto it.
subprocess.call(['/bin/udevadm', 'settle', '-t',
str(self._SETTLE_TIMEOUT_S)])
self._logger.debug('All udev events have settled.')
end = time.time() + self._WAIT_TIMEOUT_S
while image_location_candidates:
active_storage_candidate = image_location_candidates.pop(0)
if os.path.exists(active_storage_candidate):
if self._PathIsHub(active_storage_candidate):
# Do not check the hub, only devices.
continue
# Use /sys/block/ entries to see which block device is the |self_usb|.
# Use sd* to avoid querying any non-external block devices.
for candidate in glob.glob('/sys/block/sd*'):
# |candidate| is a link to a sys hw device file
devicepath = os.path.realpath(candidate)
# |active_storage_candidate| is also a link to a sys hw device file
if devicepath.startswith(os.path.realpath(active_storage_candidate)):
devpath = '/dev/%s' % os.path.basename(candidate)
if os.path.exists(devpath):
return devpath
# Enqueue the candidate again in hopes that it will eventually enumerate.
image_location_candidates.append(active_storage_candidate)
if time.time() >= end:
break
time.sleep(self._POLLING_DELAY_S)
# Split and join to help with error message formatting from XML that might
# introduce multiple white-spaces.
self._logger.warn(' '.join(self._error_msg.split()))
self._logger.warn('Stick should be at one of the usb image dev file '
'candidates: %s', ', '.join(image_location_candidates))
if self._supports_hub_on_port:
self._logger.warn('If using a hub on the image key port, please make '
'sure to use port %d on the hub. This should be at '
'one of: %s.', STORAGE_ON_HUB_PORT,
', '.join(hub_location_candidates))
return ''
def _Get_download_to_usb_dev(self):
"""Improved error reporting for misuse."""
raise UsbImageManagerError('Download requires image path. Please use set '
'version of the control to provide path.')
def _Set_download_to_usb_dev(self, image_path):
"""Download image and save to the USB device found by host_usb_dev.
If the image_path is a URL, it will download this url to the USB path;
otherwise it will simply copy the image_path's contents to the USB path.
Args:
image_path: path or url to the recovery image.
Raises:
UsbImageManagerError: if download fails for any reason.
"""
# pylint: disable=broad-except
# Ensure that any issue gets caught & reported as UsbImageError
self._logger.debug('image_path(%s)', image_path)
self._logger.debug('Detecting USB stick device...')
usb_dev = self._interface.get(self._IMAGE_DEV)
# |errormsg| is usd later to indicate the error
errormsg = ''
if not usb_dev:
# No usb dev attached, skip straight to the end.
errormsg = 'No usb device connected to servo'
else:
# There is a usb dev attached. Try to get the image.
try:
if image_path.startswith(self._HTTP_PREFIX):
self._logger.debug('Image path is a URL, downloading image')
urllib.urlretrieve(image_path, usb_dev)
else:
shutil.copyfile(image_path, usb_dev)
# Ensure that after the download the usb-device is still attached, as
# copyfile does not raise an error stick is removed mid-writing for
# instance.
if not self._interface.get('image_usbkey_dev'):
raise UsbImageManagerError('Device file %s not found again after '
'copy completed.' % usb_dev)
except urllib.ContentTooShortError:
errormsg = 'Failed to download URL: %s to USB device: %s' % (image_path,
usb_dev)
except (IOError, OSError) as e:
errormsg = ('Failed to transfer image to USB device: %s ( %s ) ' %
(e.strerror, e.errno))
except UsbImageManagerError as e:
errormsg = 'Failed to transfer image to USB device: %s' % e.message
except BaseException as e:
errormsg = ('Unexpected exception downloading %s to %s: %s' %
(image_path, usb_dev, str(e)))
finally:
# We just plastered the partition table for a block device.
# Pass or fail, we mustn't go without telling the kernel about
# the change, or it will punish us with sporadic, hard-to-debug
# failures.
subprocess.call(['sync'])
subprocess.call(['blockdev', '--rereadpt', usb_dev])
if errormsg:
self._logger.error(errormsg)
raise UsbImageManagerError(errormsg)
def _Set_make_image_noninteractive(self, usb_dev_partition):
"""Makes the recovery image noninteractive.
A noninteractive image will reboot automatically after installation
instead of waiting for the USB device | |
<filename>hvm/vm/base.py
from __future__ import absolute_import
from abc import (
ABCMeta,
abstractmethod
)
import contextlib
import functools
import logging
from typing import ( # noqa: F401
List,
Type,
Tuple,
Optional,
Union,
)
from hvm.constants import CREATE_CONTRACT_ADDRESS
import time
import rlp_cython as rlp
from eth_bloom import (
BloomFilter,
)
from eth_utils import (
to_tuple,
encode_hex,
to_int
)
from eth_hash.auto import keccak
from hvm.constants import (
GENESIS_PARENT_HASH,
MAX_PREV_HEADER_DEPTH,
MAX_UNCLES,
ZERO_HASH32,
BLANK_REWARD_HASH)
from hvm.db.trie import make_trie_root_and_nodes
from hvm.db.chain import BaseChainDB # noqa: F401
from hvm.exceptions import (
HeaderNotFound,
ValidationError,
IncorrectBlockType,
IncorrectBlockHeaderType,
BlockOnWrongChain,
ParentNotFound,
ReceivableTransactionNotFound,
TransactionNotFound)
from hvm.rlp.blocks import ( # noqa: F401
BaseBlock,
BaseQueueBlock,
BaseMicroBlock)
from hvm.rlp.transactions import ( # noqa: F401
BaseTransaction,
BaseReceiveTransaction
)
from hvm.rlp.headers import (
BlockHeader,
BaseBlockHeader)
from hvm.rlp.receipts import Receipt # noqa: F401
from hvm.utils.datatypes import (
Configurable,
)
from hvm.utils.db import (
get_parent_header,
get_block_header_by_hash,
)
from hvm.validation import (
validate_length_lte,
validate_gas_limit,
validate_private_key,
)
from hvm.vm.message import (
Message,
)
from hvm.vm.state import BaseState # noqa: F401
from eth_typing import (
Hash32,
Address,
)
from eth_keys.datatypes import(
BaseKey,
PublicKey,
PrivateKey
)
from hvm.utils.rlp import convert_rlp_to_correct_class
from hvm.rlp.consensus import StakeRewardBundle
from hvm.db.consensus import ConsensusDB
from hvm.types import Timestamp
from hvm.vm.computation import BaseComputation
class BaseVM(Configurable, metaclass=ABCMeta):
micro_block_class: Type[BaseMicroBlock] = None
block_class: Type[BaseBlock] = None
queue_block_class: Type[BaseQueueBlock] = None
consensus_db_class: Type[ConsensusDB] = None
fork: str = None
chaindb: BaseChainDB = None
consensus_db: ConsensusDB = None
_state_class: Type[BaseState] = None
state: BaseState = None
block: BaseBlock = None
queue_block: BaseQueueBlock = None
network_id: int = 0
min_time_between_blocks: int = 0
@abstractmethod
def __init__(self, header, chaindb):
pass
#
# Logging
#
@property
@abstractmethod
def logger(self):
raise NotImplementedError("VM classes must implement this method")
#
# Execution
#
@abstractmethod
def apply_send_transaction(self,
header: BlockHeader,
transaction: BaseTransaction,
caller_chain_address: Address,
validate: bool = True) -> Tuple[BlockHeader, Receipt, BaseComputation]:
raise NotImplementedError("VM classes must implement this method")
@abstractmethod
def apply_receive_transaction(self,
header: BlockHeader,
receive_transaction: BaseReceiveTransaction,
caller_chain_address: Address,
validate: bool = True) -> Tuple[Optional[BlockHeader],
Optional[Receipt],
BaseComputation,
Optional[BaseReceiveTransaction]]:
raise NotImplementedError("VM classes must implement this method")
@abstractmethod
def execute_bytecode(self,
origin,
gas_price,
gas,
to,
sender,
value,
data,
code,
code_address=None):
raise NotImplementedError("VM classes must implement this method")
@abstractmethod
def make_receipt(self, base_header: BaseBlockHeader,
computation: BaseComputation,
send_transaction: BaseTransaction,
receive_transaction: BaseReceiveTransaction = None,
refund_transaction: BaseReceiveTransaction = None,
) -> Receipt:
"""
Generate the receipt resulting from applying the transaction.
:param base_header: the header of the block before the transaction was applied.
:param transaction: the transaction used to generate the receipt
:param computation: the result of running the transaction computation
:param state: the resulting state, after executing the computation
:return: receipt
"""
raise NotImplementedError("VM classes must implement this method")
@abstractmethod
def reverse_pending_transactions(self, block_header: BaseBlockHeader) -> None:
raise NotImplementedError("VM classes must implement this method")
#
# Mining
#
@abstractmethod
def import_block(self, block: Union[BaseBlock, BaseQueueBlock], validate: bool = True, private_key: PrivateKey = None) -> BaseBlock:
raise NotImplementedError("VM classes must implement this method")
# @abstractmethod
# def set_block_transactions(self, base_block, new_header, transactions, receipts):
# raise NotImplementedError("VM classes must implement this method")
#
# Finalization
#
# @abstractmethod
# def finalize_block(self, block):
# raise NotImplementedError("VM classes must implement this method")
@abstractmethod
def pack_block(self, block, *args, **kwargs):
raise NotImplementedError("VM classes must implement this method")
#
# Headers
#
@abstractmethod
def configure_header(self, **header_params):
"""
Setup the current header with the provided parameters. This can be
used to set fields like the gas limit or timestamp to value different
than their computed defaults.
"""
raise NotImplementedError("VM classes must implement this method")
@classmethod
@abstractmethod
def create_header_from_parent(cls, parent_header, **header_params):
"""
Creates and initializes a new block header from the provided
`parent_header`.
"""
raise NotImplementedError("VM classes must implement this method")
#
# Blocks
#
@classmethod
@abstractmethod
def get_block_class(cls) -> Type['BaseBlock']:
raise NotImplementedError("VM classes must implement this method")
# @classmethod
# @abstractmethod
# def get_prev_hashes(cls, last_block_hash, chaindb):
# raise NotImplementedError("VM classes must implement this method")
@classmethod
@abstractmethod
def convert_block_to_correct_class(self, block: BaseBlock) -> BaseBlock:
raise NotImplementedError("VM classes must implement this method")
#
# Transactions
#
@abstractmethod
def create_transaction(self, *args, **kwargs):
raise NotImplementedError("VM classes must implement this method")
@classmethod
@abstractmethod
def get_transaction_class(cls):
raise NotImplementedError("VM classes must implement this method")
@classmethod
@abstractmethod
def get_receive_transaction_class(cls):
raise NotImplementedError("VM classes must implement this method")
@abstractmethod
def save_recievable_transactions(self, block_header_hash: Hash32, computations: List[BaseComputation]) -> None:
raise NotImplementedError("VM classes must implement this method")
#
# Validate
#
@abstractmethod
def validate_block(self, block):
raise NotImplementedError("VM classes must implement this method")
@abstractmethod
def validate_transaction_against_header(self, base_header, send_transaction, receive_transaction):
"""
Validate that the given transaction is valid to apply to the given header.
:param base_header: header before applying the transaction
:param transaction: the transaction to validate
:raises: ValidationError if the transaction is not valid to apply
"""
raise NotImplementedError("VM classes must implement this method")
#
# State
#
@classmethod
@abstractmethod
def get_state_class(cls):
raise NotImplementedError("VM classes must implement this method")
class VM(BaseVM):
"""
The :class:`~hvm.vm.base.BaseVM` class represents the Chain rules for a
specific protocol definition such as the Frontier or Homestead network.
.. note::
Each :class:`~hvm.vm.base.BaseVM` class must be configured with:
- ``block_class``: The :class:`~hvm.rlp_templates.blocks.Block` class for blocks in this VM ruleset.
- ``_state_class``: The :class:`~hvm.vm.state.State` class used by this VM for execution.
"""
header: BlockHeader = None
_block: BaseBlock = None
_queue_block: BaseQueueBlock = None
_state: BaseState = None
def __init__(self, header: BlockHeader, chaindb: BaseChainDB, network_id: int):
self.chaindb = chaindb
self.consensus_db = self.consensus_db_class(chaindb)
self.network_id = network_id
self.header = header
def __repr__(self) -> str:
return '<{class_name}>'.format(
class_name=self.__class__.__name__
)
#
# Logging
#
@property
def logger(self):
return logging.getLogger('hvm.vm.base.VM.{0}'.format(self.__class__.__name__))
@property
def block(self) -> BaseBlock:
if self._block is None:
self._block = self.get_block_class().from_header(header=self.header, chaindb=self.chaindb)
return self._block
@block.setter
def block(self, val):
self._block = val
@property
def queue_block(self) -> BaseQueueBlock:
if self._queue_block is None:
self._queue_block = self.get_queue_block_class().from_header(header=self.header)
return self._queue_block
@queue_block.setter
def queue_block(self, val):
self._queue_block = val
@property
def state(self) -> BaseState:
if self._state is None:
self._state = self.get_state_class()(db=self.chaindb.db, execution_context=self.header.create_execution_context())
return self._state
@state.setter
def state(self, val):
self._state = val
def refresh_state(self) -> None:
self.state = self.get_state_class()(
db=self.chaindb.db,
execution_context=self.header.create_execution_context()
)
#
# Execution
#
def apply_send_transaction(self,
header: BlockHeader,
transaction: BaseTransaction,
caller_chain_address: Address,
validate: bool = True) -> Tuple[BlockHeader, Receipt, BaseComputation]:
"""
Apply the transaction to the current block. This is a wrapper around
:func:`~hvm.vm.state.State.apply_transaction` with some extra orchestration logic.
:param header: header of the block before application
:param transaction: to apply
"""
#caller_chain_address = header.sender
#this is a send transaction
send_transaction = transaction
receive_transaction = None
if validate:
self.validate_transaction_against_header(header, send_transaction=send_transaction)
computation, _ = self.state.apply_transaction(send_transaction = send_transaction,
caller_chain_address = caller_chain_address,
receive_transaction = receive_transaction,
validate = validate)
if validate:
receipt = self.make_receipt(header, computation, send_transaction)
new_header = header.copy(
bloom=int(BloomFilter(header.bloom) | receipt.bloom),
gas_used=receipt.gas_used,
)
return new_header, receipt, computation
else:
return None, None, computation
def apply_receive_transaction(self,
header: BlockHeader,
receive_transaction: BaseReceiveTransaction,
caller_chain_address: Address,
validate: bool = True) -> Tuple[Optional[BlockHeader],
Optional[Receipt],
BaseComputation,
Optional[BaseReceiveTransaction]]:
"""
Apply the transaction to the current block. This is a wrapper around
:func:`~hvm.vm.state.State.apply_transaction` with some extra orchestration logic.
:param header: header of the block before application
:param transaction: to apply
"""
# Lets make sure we have this receivable transaction in the account
receivable_tx_key = self.state.account_db.get_receivable_transaction(caller_chain_address,
receive_transaction.send_transaction_hash)
# Very first thing, check to see if this transaction has been received before:
try:
block_hash, index, is_receive = self.chaindb.get_transaction_index(receive_transaction.hash)
if self.chaindb.is_in_canonical_chain(block_hash):
raise ValidationError(
'Tried to import a receive transaction that has already been received in the canonical chain')
except TransactionNotFound:
pass
if receivable_tx_key is None:
# There is no receivable transaction that matches this one.
# now check to see if the block is in the canonical chain, but didnt have the transaction in it
try:
block_hash, index, is_receive = self.chaindb.get_transaction_index(receive_transaction.send_transaction_hash)
if block_hash == receive_transaction.sender_block_hash:
raise ValidationError(
'Receive transaction is invalid. We do have the send transaction and send block, but it has already been received.')
else:
raise ValidationError(
'Receive transaction is invalid. We have already imported this transaction, but it was from another block.')
except TransactionNotFound:
if self.chaindb.is_in_canonical_chain(receive_transaction.sender_block_hash):
raise ValidationError(
'Receive transaction is invalid. We have the sender block, but it didn\'t contain the send transaction')
if self.chaindb.exists(receive_transaction.send_transaction_hash):
self.logger.debug("The missing receivable transaction exists in the db but not canonical chain.")
if self.chaindb.is_in_canonical_chain(receive_transaction.sender_block_hash):
self.logger.debug("The sender block of the missing receivable transaction is in the canonical chain. This must means the tx is in there, but wasnt saved to canonical transactions...")
raise ReceivableTransactionNotFound("caller_chain_address = {}, send_transaction_hash = {}, sender_block_hash = {}".format(
encode_hex(caller_chain_address),
encode_hex(receive_transaction.send_transaction_hash),
encode_hex(receive_transaction.sender_block_hash),
))
else:
#now lets | |
np.array([1,0,0,0,1,0,0,0,1,0,0,0])
import subprocess
if 'win' in sys.platform[:3]:
elastix_bin = os.path.join(elastix_dir,'elastix.exe')
elif 'lin' in sys.platform[:3]:
elastix_bin = os.path.join(elastix_dir,'bin/elastix')
os.environ['LD_LIBRARY_PATH'] = os.path.join(elastix_dir,'lib')
elif 'dar' in sys.platform[:3]:
elastix_bin = os.path.join(elastix_dir,'bin/elastix')
os.environ['DYLD_LIBRARY_PATH'] = os.path.join(elastix_dir,'lib')
# temp_dir = tempfile.mkdtemp(prefix = '/data/malbert/tmp/tmp_')
temp_dir_obj = tempfile.TemporaryDirectory()
temp_dir = temp_dir_obj.name
outdir = temp_dir
# param_path_similarity = os.path.join(temp_dir,'elx_params_similarity.txt')
param_strings = [params_translation,params_rotation,params_affine][:degree+1]
# param_strings = [params_translation,params_rotation]
# param_strings = [params_translation]#,params_rotation,params_affine]
param_paths = [os.path.join(temp_dir,'elx_params_%s.txt' %i) for i in range(len(param_strings))]
# param_path_affine = os.path.join(temp_dir,'elx_params_affine.txt')
# choose scaling factors so that starting image has 10 pixel width
# highest_factor = int(np.min(fixed.shape)/10)
# factors = [highest_factor]*3 + [np.max([1,highest_factor/2.])]*3 + [np.max([1,highest_factor/4.])]*3 + [np.max([1,highest_factor/8.])]*3
# choose scaling factors so that highest factor is associated to an image with >10 pixels min shape
factors = []
image_pyramid_line = "\n(ImagePyramidSchedule"
number_of_iterations_line = "\n(MaximumNumberOfIterations"
for i in range(1+int(np.trunc(np.log2(np.min(list(moving.shape)+list(fixed.shape))/20.))))[::-1]:
f = int(2**i)
factors.append(f)
image_pyramid_line +=" %s %s %s" %(f,f,f)
number_of_iterations_line +=" %s" %1000
image_pyramid_line += ")"
number_of_iterations_line += ")"
image_pyramid_line +="\n(NumberOfResolutions %s)" %len(factors)
for i in range(len(param_strings))[:1]:
param_strings[i] = param_strings[i] + image_pyramid_line + number_of_iterations_line
# mod_params_similarity = params_similarity + image_pyramid_line
# mod_params_affine = params_rotation + image_pyramid_line + number_of_iterations_line
####################################
# choose scaling factors so that highest factor is associated to an image with >10 pixels min shape
factors = []
image_pyramid_line = "\n(ImagePyramidSchedule"
number_of_iterations_line = "\n(MaximumNumberOfIterations"
for i in range(1+int(np.trunc(np.log2(np.min(list(moving.shape)+list(fixed.shape))/20.))))[::-1]:
f = int(2**i)
factors.append(f)
image_pyramid_line +=" %s %s %s" %(f,f,f)
number_of_iterations_line +=" %s" %1000
image_pyramid_line += ")"
number_of_iterations_line += ")"
image_pyramid_line +="\n(NumberOfResolutions %s)" %len(factors)
for i in range(len(param_strings))[1:2]:
param_strings[i] = param_strings[i] + image_pyramid_line + number_of_iterations_line
# mod_params_similarity = params_similarity + image_pyramid_line
# mod_params_affine = params_rotation + image_pyramid_line + number_of_iterations_line
####################################
####################################
# choose scaling factors so that highest factor is associated to an image with >10 pixels min shape
factors = []
image_pyramid_line = "\n(ImagePyramidSchedule"
number_of_iterations_line = "\n(MaximumNumberOfIterations"
for i in range(1+int(np.trunc(np.log2(np.min(list(moving.shape)+list(fixed.shape))/20.))))[::-1][-1:]:
f = int(2**i)
factors.append(f)
image_pyramid_line +=" %s %s %s" %(f,f,f)
number_of_iterations_line +=" %s" %1000
image_pyramid_line += ")"
number_of_iterations_line += ")"
image_pyramid_line +="\n(NumberOfResolutions %s)" %len(factors)
for i in range(len(param_strings))[2:]:
param_strings[i] = param_strings[i] + image_pyramid_line + number_of_iterations_line
# mod_params_similarity = params_similarity + image_pyramid_line
# mod_params_affine = params_rotation + image_pyramid_line + number_of_iterations_line
####################################
if t0 is not None:
t0 = np.array(t0)
t0_inv = np.array(params_invert_coordinates(t0))
elx_initial_transform_path = os.path.join(temp_dir,'elx_initial_transform.txt')
createInitialTransformFile(np.array(fixed_spacing), t0_inv, elx_initial_transform_template_string, elx_initial_transform_path)
createParameterFile(np.array(fixed_spacing),elx_initial_transform_path, param_strings[0], param_paths[0])
for i in range(1,len(param_strings)):
open(param_paths[i],'w').write(param_strings[i])
# open(param_path_similarity,'w').write(mod_params_similarity)
# open(param_path_affine,'w').write(mod_params_affine)
fixed_path = os.path.join(temp_dir,'fixed.mhd')
moving_path = os.path.join(temp_dir,'moving.mhd')
fixedsitk = sitk.GetImageFromArray(fixed)
fixedsitk.SetSpacing(fixed_spacing)
fixedsitk.SetOrigin(fixed_origin)
movingsitk = sitk.GetImageFromArray(moving)
movingsitk.SetSpacing(moving_spacing)
movingsitk.SetOrigin(moving_origin)
# set fixed mask
fixed_mask_path = os.path.join(temp_dir,'fixed_mask.mhd')
# fixed_clahe = clahe(fixed,40,clip_limit=0.02)
if fixed_mask is None:
fixed_clahe = clahe(fixed, 40, clip_limit=0.02)
fixed_mask = get_mask_using_otsu(fixed_clahe)
# print('warning: simple mask in elastix call')
# fixed_mask = (np.array(fixed_clahe) > np.mean(fixed_clahe)).astype(np.uint16)
fixed_mask_sitk = sitk.GetImageFromArray(fixed_mask)
fixed_mask_sitk.SetSpacing(fixed_spacing)
fixed_mask_sitk.SetOrigin(fixed_origin)
sitk.WriteImage(fixed_mask_sitk,fixed_mask_path)
sitk.WriteImage(fixedsitk,fixed_path)
sitk.WriteImage(movingsitk,moving_path)
# FNULL = open(os.devnull, 'w')
# cmd = '%s -f %s -m %s -p %s -p %s -out %s -threads 1' %(elastix_bin,fixed_path,moving_path,param_path_similarity,param_path_affine,outdir)
# cmd = '%s -f %s -m %s -t0 %s' %(elastix_bin,fixed_path,moving_path,elx_initial_transform_path)
cmd = '%s -f %s -m %s -t0 %s' %(elastix_bin,fixed_path,moving_path,elx_initial_transform_path)
for i in range(len(param_strings)):
cmd += ' -p %s' %param_paths[i]
cmd += ' -fMask %s' %fixed_mask_path
cmd += ' -out %s' %outdir
# cmd += ' -threads 1' %outdir
cmd = cmd.split(' ')
# subprocess.Popen(cmd,stdout=FNULL).wait()
subprocess.Popen(cmd).wait()
final_params = t0
for i in range(len(param_strings)):
final_params = matrix_to_params(get_affine_parameters_from_elastix_output(os.path.join(temp_dir, 'TransformParameters.%s.txt' % i), t0=final_params))
print(outdir)
return final_params
@io_decorator
def register_linear_elastix(fixed,moving,degree=2,elastix_dir=None,
identifier_sample=None, identifier_fixed=None, identifier_moving=None, debug_dir=None):
"""
estimate t0 and crop images to intersection in y
:param fixed:
:param moving:
:return:
"""
lower_y0 = fixed.origin[1]
upper_y0 = fixed.origin[1] + fixed.shape[1]*fixed.spacing[1]
lower_y1 = moving.origin[1]
upper_y1 = moving.origin[1] + moving.shape[1]*moving.spacing[1]
lower_overlap = np.max([lower_y0,lower_y1])
upper_overlap = np.min([upper_y0,upper_y1])
yl0 = int((lower_overlap - lower_y0) / (upper_y0-lower_y0) * fixed.shape[1])
yu0 = int((upper_overlap - lower_y0) / (upper_y0-lower_y0) * fixed.shape[1])
yl1 = int((lower_overlap - lower_y1) / (upper_y1-lower_y1) * moving.shape[1])
yu1 = int((upper_overlap - lower_y1) / (upper_y1-lower_y1) * moving.shape[1])
# images can have different overlaps because of rounding to integer
origin_overlap0 = np.zeros(3)
origin_overlap1 = np.zeros(3)
origin_overlap0[:] = fixed.origin
origin_overlap1[:] = moving.origin
origin_overlap0[1] = lower_y0 + yl0 * fixed.spacing[1]
origin_overlap1[1] = lower_y1 + yl1 * moving.spacing[1]
# static = ImageArray(fixed[:,yl0:yu0,:],spacing=fixed.spacing,origin=origin_overlap0)
# mov = ImageArray(moving[:,yl1:yu1,:],spacing=moving.spacing,origin=origin_overlap1)
c0 = clahe(fixed,10,clip_limit=0.02)
c1 = clahe(moving,10,clip_limit=0.02)
# print('warning: not performing clahe')
# c0 = fixed
# c1 = moving
static = ImageArray(c0[:,yl0:yu0,:],spacing=fixed.spacing,origin=origin_overlap0)
mov = ImageArray(c1[:,yl1:yu1,:],spacing=moving.spacing,origin=origin_overlap1)
static_mask = get_mask_using_otsu(static)
static_mask = ImageArray(static_mask, spacing=fixed.spacing, origin=origin_overlap0)
t00 = mv_utils.euler_matrix(0, + fixed.rotation - moving.rotation, 0)
center_static = np.array(static.shape)/2.*static.spacing + static.origin
center_mov = np.array(mov.shape)/2.*mov.spacing + mov.origin
t00offset = center_mov - np.dot(t00[:3,:3],center_static)
t00[:3,3] = t00offset
t00 = matrix_to_params(t00)
# reg_spacing = np.array([fixed.spacing[0]*4]*3)
# print('WARNING: 20180614: changed fft registration spacing')
reg_iso_spacing = np.min([np.array(im.spacing)*np.array(im.shape)/160. for im in [static,mov]])
reg_iso_spacing = np.max([[reg_iso_spacing]+list(static.spacing)+list(mov.spacing)])
reg_spacing = np.array([reg_iso_spacing]*3)
stack_properties = calc_stack_properties_from_views_and_params([static.get_info(), mov.get_info()],
[matrix_to_params(np.eye(4)), t00],
spacing=reg_spacing, mode='union')
static_t = transform_stack_sitk(static,
matrix_to_params(np.eye(4)),
out_shape=stack_properties['size'],
out_origin=stack_properties['origin'],
out_spacing=stack_properties['spacing']
)
mov_t = transform_stack_sitk(mov,
t00,
out_shape=stack_properties['size'],
out_origin=stack_properties['origin'],
out_spacing=stack_properties['spacing']
)
im0 = static_t
im1 = mov_t
offset = translation3d(im1,im0)
# offset = np.array([-offset[2],0,offset[0]]) * reg_spacing
# offset = np.array([offset[0],0,offset[2]]) * reg_spacing
# print('WARNING: add complete FFT offset (also y component), 20181109')
offset = np.array([offset[0],offset[1],offset[2]]) * reg_spacing
t0 = np.copy(t00)
t0[9:] += np.dot(t0[:9].reshape((3,3)),offset)
# return t0
# use raw intensities for elastix
static = ImageArray(fixed[:,yl0:yu0,:],spacing=fixed.spacing,origin=origin_overlap0)
mov = ImageArray(moving[:,yl1:yu1,:],spacing=moving.spacing,origin=origin_overlap1)
import tifffile
if debug_dir is not None:
movt0 = transform_stack_sitk(mov, t0, stack_properties=static.get_info())
tifffile.imsave(os.path.join(debug_dir, 'mv_reginfo_000_%03d_pair_%s_%s_view_%s.tif'
% (identifier_sample, identifier_fixed, identifier_moving, identifier_fixed)), static)
tifffile.imsave(os.path.join(debug_dir, 'mv_reginfo_000_%03d_pair_%s_%s_view_%s.tif'
% (identifier_sample, identifier_fixed, identifier_moving, identifier_moving)), mov)
tifffile.imsave(os.path.join(debug_dir, 'mv_reginfo_000_%03d_pair_%s_%s_view_%s_pretransformed.tif'
% (identifier_sample, identifier_fixed, identifier_moving, identifier_moving)), movt0)
if degree is None or degree < 0: return t0
try:
parameters = register_linear_elastix_seq(static, mov, t0,
degree=degree,
elastix_dir=elastix_dir,
fixed_mask=static_mask)
if debug_dir is not None:
movt = transform_stack_sitk(mov, parameters, stack_properties=static.get_info())
tifffile.imsave(os.path.join(debug_dir, 'mv_reginfo_000_%03d_pair_%s_%s_view_%s_transformed.tif'
%(identifier_sample, identifier_fixed, identifier_moving, identifier_moving)), movt)
except:
raise(Exception('Could not register view pair (%s, %s)' %(identifier_fixed, identifier_moving)))
return parameters
from numpy.fft import fftn, ifftn
def translation3d(im0, im1):
"""Return translation vector to register images."""
# fill zeros with noise
im0_m = np.copy(im0)
im1_m = np.copy(im1)
# print('WARNING: ADDING NOISE IN FFT REGISTRATION (added 20181109)')
print('stack size in FFT translation registration: %s' %list(im0.shape))
# n0 = [im0_m[im0_m>0].min(),np.percentile(im0_m[im0_m>0],5)]
n0 = [0.05,0.15] # typical border values resulting from applying clahe to Z1 background
im0_m[im0==0] = np.random.random(np.sum(im0==0))*(n0[1]-n0[0])+n0[0]
# n1 = [im1_m[im1_m>0].min(),np.percentile(im1_m[im1_m>0],5)]
n1 = [0.05,0.15] # typical border values resulting from applying clahe to Z1 background
im1_m[im1==0] = np.random.random(np.sum(im1==0))*(n1[1]-n1[0])+n1[0]
shape = im0.shape
# f0 = fftn(im0)
# f1 = fftn(im1)
f0 = fftn(im0_m)
f1 = fftn(im1_m)
ir = abs(ifftn((f0 * f1.conjugate()) / (abs(f0) * abs(f1))))
# print('WARNING: FILTERING IN FFT REGISTRATION (added 20181109)')
ir_gauss = ndimage.gaussian_filter(ir,1)
# t0, t1, t2 = np.unravel_index(np.argmax(ir), shape)
t0, t1, t2 = np.unravel_index(np.argmax(ir_gauss), shape)
# if t0 > shape[0] // 2:
# t0 -= shape[0]
# if t1 > shape[1] // 2:
# t1 -= shape[1]
# if t2 > shape[2] // 2:
# t2 -= shape[2]
if t0 > shape[0] // 2: t0 -= shape[0]
if t1 > shape[1] // 2: t1 -= shape[1]
if t2 > shape[2] // 2: t2 -= shape[2]
return [t0, t1, t2]
# import transformations
def get_affine_parameters_from_elastix_output(filepath_or_params,t0=None):
if type(filepath_or_params) == str:
raw_out_params = open(filepath_or_params).read()
elx_out_params = raw_out_params.split('\n')[2][:-1].split(' ')[1:]
elx_out_params = np.array([float(i) for i in elx_out_params])
# if len(elx_out_params) in [6, 7, 12]:
if len(elx_out_params) in [6, 12]:
outCenterOfRotation = raw_out_params.split('\n')[19][:-1].split(' ')[1:]
outCenterOfRotation = np.array([float(i) for i in outCenterOfRotation])
else:
elx_out_params = filepath_or_params
# when input is given as parameters, set center of rotation zero.
# affinelogstacktransform doesn't use it
# neither EulerStackTransform
outCenterOfRotation = np.zeros(3)
if len(elx_out_params)==6:
# tmp = transformations.euler_matrix(elx_out_params[0],elx_out_params[1],elx_out_params[2])
tmp = mv_utils.euler_matrix(elx_out_params[0], elx_out_params[1], elx_out_params[2])
elx_affine_params = np.zeros(12)
elx_affine_params[:9] = tmp[:3,:3].flatten()
elx_affine_params[-3:] = np.array([elx_out_params[3],elx_out_params[4],elx_out_params[5]])
# translation = elx_affine_params[-3:] - np.dot(elx_affine_params[:9].reshape((3,3)),outCenterOfRotation) + outCenterOfRotation
# elx_affine_params = np.concatenate([elx_affine_params[:9],translation],0)
if len(elx_out_params)==12: # affine case
elx_affine_params = elx_out_params
# elif len(elx_out_params)==7: # similarity transform
# angles = transformations.euler_from_quaternion([np.sqrt(1-np.sum([np.power(elx_out_params[i],2) for i in range(3)])),
# elx_out_params[0],elx_out_params[1],elx_out_params[2]])
# tmp = transformations.compose_matrix(angles=angles)
# elx_affine_params = np.zeros(12)
# elx_affine_params[:9] = tmp[:3,:3].flatten()*elx_out_params[6]
# elx_affine_params[-3:] = np.array([elx_out_params[3],elx_out_params[4],elx_out_params[5]])
# translation = elx_affine_params[-3:] - np.dot(elx_affine_params[:9].reshape((3,3)),outCenterOfRotation) + outCenterOfRotation
# elx_affine_params = np.concatenate([elx_affine_params[:9],translation],0)
elif len(elx_out_params)==3: # translation transform
elx_affine_params = np.array([1.,0,0,0,1,0,0,0,1,0,0,0])
elx_affine_params[9:] = elx_out_params
if len(elx_out_params) in [6,12]:
# if len(elx_out_params) in [6,7,12]:
# outCenterOfRotation = np.dot(params_to_matrix(params_invert_coordinates(t0)),np.array(list(outCenterOfRotation)+[1]))[:3]
translation = | |
np.sum(np.power(Actual[:,0] - Truth[:,0], 2.0))
print(t, '%10.4e' % sse)
return sse
if (False) :
# optimize.brute
ranges = (slice(0.18, 0.50, 0.05), slice(0.001, 0.01, 0.001), slice(2,4+1,1), slice(2,5+1,1))
# [0.43 0.007 2. 3. ] 1.225e+06
ranges = (slice(0.38, 0.48, 0.005), slice(0.006, 0.008, 0.0001), slice(2,3+1,1), slice(3,4+1,1))
result = brute( targetSSE, ranges, full_output=True, finish=None )
# print(result)
minT = result[0]
sse = targetSSE(minT)
print('SSE %10.6f, %10.6f, %10.3e' % (minT[0], minT[1], sse))
# def tSSE(t):
# return targetSSE([t[0], t[1], minT[2], minT[3]])
#
# t = fmin( tSSE, minT[0:1], disp=True)
# print(t)
# minSSE = 1e100
# minT = []
# for sc in (2, ) : # range(1, 5) :
# for rc in (5, ) : # range(sc, 6) :
# passSSE = 1e100
# passT = []
# for st in arange(0.45, 0.55, 0.001) : #arange(0.25, 5.0, 0.25) : # arange(2.8, 3.0, 0.01) : #
# for rt in arange(0.15, 0.25, 0.001) :
# t = array([st, rt, sc, rc])
# sse = targetSSE(t)
# if (sse < passSSE) :
# passSSE = sse
# passT = t
# print('SSE %d, %d, %10.6f, %10.6f, %10.3e' % (sc, rc, t[0], t[1], sse))
# if (passSSE < minSSE) :
# minSSE = passSSE
# minT = passT
# print('MINSSE %d, %d, %10.6f, %10.6f, %10.3e' % (minT[2], minT[3], minT[0], minT[1], minSSE))
sse = targetSSE(minT)
print('SSE %10.6f, %10.6f, %10.3e' % (minT[0], minT[1], sse))
else :
# minT = [0.425, 0.0079, 2, 4] # 0.606000, 0.160000, 2, 5
# minSSE = targetSSE(minT)
# print('SSE %10.6f, %10.6f, %10.3e' % (minT[0], minT[1], minSSE))
# sse = minSSE
# theta = minimize_scalar( targetTheta, [0.5, 0.99])
sse = targetTheta(theta)
minT = [1.0, 1.0, 1, 1]
if (False) :
t = minimize( targetTheta, [theta], method='Nelder-Mead', options={'disp': True})
print(t)
# sse = targetSSE(t.x)
# print('minimize SSE %10.6f, %10.6f, %10.3e' % (t.x[0], t.x[1], sse))
# print('ACTUAL: ', A2S(f.getState()))
# print('EXPECT: ', A2S(Truth[-1,:]))
st = stats.f.sf(minT[0],1,1)
rt = stats.f.sf(minT[1],1,1)
title = 'AOPF S, %10.4g, %d, R, %10.4g, %d, SSE, %10.3e' % (st, int(minT[2]), rt, int(minT[3]), sse)
ax = plt.subplot(3,1,1)
plt.title( title )
ax.plot(Times[k:], Actual[k:,0], 'r.')
ax.plot(Times[k:], Truth[k:,0], 'b-')
ax = plt.subplot(3,1,2)
err = (Truth[k:,0]-Actual[k:,0])
print('RESIDUALS', mean(err), var(err), min(err), max(err))
ax.plot(Times[k:], err, 'b-') # /Truth[:,0]
ax = plt.subplot(3,1,3)
ax.plot(Times[k:], Best[k:], 'k-')
# Schedule = zeros([nS])
# at = array([0, 1, 2, 25, 30, 40, 50, 55, 60, 70, 80,100])
# ao = array([0, 1, 5, 4, 3, 2, 1, 2, 3, 4, 5, 5])
# k = 0
# for i in range(0,nS) :
# if (at[k] <= Times[i,0]) :
# k += 1
# Schedule[i] = ao[k-1]
#
# ax.plot(Times, Schedule, 'm-')
plt.show()
def xstepVaryingOrder(self):
iter = 1
random.seed(iter)
np.random.seed(iter)
print()
testData = TestData()
print()
nS = 1000
Times = zeros([nS,1])
Truth = zeros([nS,5+1])
i = 0
with open(testData.testDataPath('varyingorder2.csv'), newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in reader :
Times[i,0] = float(row[0])
for j in range(0,5+1) :
Truth[i,j] = float(row[j+1])
i += 1
if (i >= Times.shape[0]) :
break
tau = 0.1
R = 10.0
theta = 0.6925
Observations = Truth[:,0:1] + R * randn(nS,1)
Actual = zeros([nS,5+1])
Best = zeros([nS,1])
if (True) : # True for 5th order only
f = PairedPolynomialFilter(5, tau, theta)
k = 0
for j in range(0,nS) :
Zstar = f.predict(Times[j,0])
e = Observations[j,0] - Zstar[0]
f.update(Times[j,0], Zstar, e)
if (f.getStatus() == FilterStatus.RUNNING) :
Actual[j,:] = f.getState()
else :
Actual[j,:] = Truth[j,:]
sse = np.sum(np.power(Actual[k:,0] - Truth[k:,0], 2.0))
print(k, '5th Order Only %10.3e' % sse)
# ax = plt.subplot(3,1,1)
# plt.title('5th order only; %10.3e' % sse)
# ax.plot(Times, Actual[:,0], 'r.')
# ax.plot(Times, Truth[:,0], 'b-')
# ax = plt.subplot(3,1,2)
err = (Truth[:,0]-Actual[:,0])
print('RESIDUALS', mean(err), var(err), min(err), max(err))
# ax.plot(Times, err, 'b-')
# ax = plt.subplot(3,1,3)
# Best[:,0] = 5
# ax.plot(Times, Best, 'k-')
# plt.show()
def targetTheta(theta : float):
trace = open('AOPF.csv', 'w')
f = AdaptiveOrderPolynomialFilter(5, tau, theta, trace=trace)
for j in range(0,nS) :
# trace.write('%10.3f T %15.9g %15.6g %15.6g %15.6g %15.6g %15.6g\n' % (Times[j,0], Truth[j,0], Truth[j,1], Truth[j,2], Truth[j,3], Truth[j,4], Truth[j,5]))
Zstar = f.predict(Times[j,0])
e = Observations[j,0] - Zstar[0]
f.update(Times[j,0], Zstar, e)
Actual[j,:] = f.getState()
Best[j,0] = f.getBest()
f.close()
sse = np.sum(np.power(Actual[:,0] - Truth[:,0], 2.0))
print(theta, '%10.4e' % sse)
return sse
def targetSSE(t : float):
trace = open('AOPF.csv', 'w')
f = AdaptiveOrderPolynomialFilter(5, tau, theta, trace=trace)
for j in range(0,nS) :
# trace.write('%10.3f T %15.9g %15.6g %15.6g %15.6g %15.6g %15.6g\n' % (Times[j,0], Truth[j,0], Truth[j,1], Truth[j,2], Truth[j,3], Truth[j,4], Truth[j,5]))
Zstar = f.predict(Times[j,0])
e = Observations[j,0] - Zstar[0]
f.update(Times[j,0], Zstar, e)
Actual[j,:] = f.getState()
Best[j,0] = f.getBest()
f.close()
sse = np.sum(np.power(Actual[:,0] - Truth[:,0], 2.0))
print(t, '%10.4e' % sse)
return sse
if (False) :
# optimize.brute
ranges = (slice(0.18, 0.50, 0.05), slice(0.001, 0.01, 0.001), slice(2,4+1,1), slice(2,5+1,1))
# [0.43 0.007 2. 3. ] 1.225e+06
ranges = (slice(0.38, 0.48, 0.005), slice(0.006, 0.008, 0.0001), slice(2,3+1,1), slice(3,4+1,1))
result = brute( targetSSE, ranges, full_output=True, finish=None )
# print(result)
minT = result[0]
sse = targetSSE(minT)
print('SSE %10.6f, %10.6f, %10.3e' % (minT[0], minT[1], sse))
# def tSSE(t):
# return targetSSE([t[0], t[1], minT[2], minT[3]])
#
# t = fmin( tSSE, minT[0:1], disp=True)
# print(t)
# minSSE = 1e100
# minT = []
# for sc in (2, ) : # range(1, 5) :
# for rc in (5, ) : # range(sc, 6) :
# passSSE = 1e100
# passT = []
# for st in arange(0.45, 0.55, 0.001) : #arange(0.25, 5.0, 0.25) : # arange(2.8, 3.0, 0.01) : #
# for rt in arange(0.15, 0.25, 0.001) :
# t = array([st, rt, sc, rc])
# sse = targetSSE(t)
# if (sse < passSSE) :
# passSSE = sse
# passT = t
# print('SSE %d, %d, %10.6f, %10.6f, %10.3e' % (sc, rc, t[0], t[1], sse))
# if (passSSE < minSSE) :
# minSSE = passSSE
# minT = passT
# print('MINSSE %d, %d, %10.6f, %10.6f, %10.3e' % (minT[2], minT[3], minT[0], minT[1], minSSE))
sse = targetSSE(minT)
print('SSE %10.6f, %10.6f, %10.3e' % (minT[0], minT[1], sse))
else :
# minT = [0.425, 0.0079, 2, 4] # 0.606000, 0.160000, 2, 5
# minSSE = targetSSE(minT)
# print('SSE %10.6f, %10.6f, %10.3e' % (minT[0], minT[1], minSSE))
# sse = minSSE
# theta = minimize_scalar( targetTheta, [0.5, 0.99])
sse = targetTheta(theta)
minT = [1.0, 1.0, 1, 1]
if (False) :
t = minimize( targetTheta, [theta], method='Nelder-Mead', options={'disp': True})
print(t)
# sse = targetSSE(t.x)
# print('minimize SSE %10.6f, %10.6f, %10.3e' % (t.x[0], t.x[1], sse))
# print('ACTUAL: ', A2S(f.getState()))
# print('EXPECT: ', A2S(Truth[-1,:]))
st = stats.f.sf(minT[0],1,1)
rt = stats.f.sf(minT[1],1,1)
title = 'AOPF S, %10.4g, %d, R, %10.4g, %d, SSE, %10.3e' % (st, int(minT[2]), rt, int(minT[3]), sse)
ax = plt.subplot(3,1,1)
plt.title( title )
ax.plot(Times, Actual[:,0], 'r.')
ax.plot(Times, Truth[:,0], 'b-')
ax = plt.subplot(3,1,2)
err = (Truth[:,0]-Actual[:,0])
print('RESIDUALS', mean(err), var(err), min(err), max(err))
ax.plot(Times, err, 'b-') # /Truth[:,0]
ax = plt.subplot(3,1,3)
ax.plot(Times, Best, 'k-')
Schedule = zeros([nS])
at = array([0, 1, 2, 25, 30, 40, 50, 55, 60, 70, 80,100])
ao = array([0, 1, 5, 4, 3, 2, 1, 2, 3, 4, 5, 5])
k = 0
for i in range(0,nS) :
if (at[k] <= Times[i,0]) :
k += 1
Schedule[i] = ao[k-1]
ax.plot(Times, Schedule, 'm-')
plt.show()
# def generateVaryingOrder(self, iter : int):
# random.seed(iter)
# np.random.seed(iter)
# print()
# t0 = 0
# tau = 0.1
# R = 10.0
# nS = 1000
# theta = 0.95
# f = AdaptiveOrderPolynomialFilter(5, tau, theta)
# Y0 = generateTestPolynomial( 5, nS, t0, tau )
# print(Y0)
# Truth = zeros([nS, 5+1])
# Times = zeros([nS,1])
# Observations = zeros([nS,1])
# i = 0
# Truth[i,0] = Y0[0]
# k = 100
# order = 0
# (times, truth, observations, noise) = generateTestData(order, k, 0.0, Truth[i,:], tau, sigma=R)
# Times[i:i+k,0] = times[:,0]
# Truth[i:i+k,0:order+1] = truth[:,0:order+1]
# Observations[i:i+k,0] = observations[:,0]
# i += k
#
# def addSegment(i : int, k : int, order1 : int, order2 : int, Times : array, Truth : array, Observations : array):
# Times[i,:] = Times[i-1,:]
# Truth[i,0:order2+1] = StateTransition.conformState(order2, Y0)
# Truth[i,0:order1+1] = Truth[i-1,0:order1+1]
# (times, truth, observations, noise) = generateTestData(order2, k, Times[i,0], Truth[i,0:order2+1], tau, sigma=R)
# Times[i:i+k,0] = times[:,0]
# Truth[i:i+k,0:order2+1] = truth[:,0:order2+1]
# Observations[i:i+k,0] = observations[:,0]
# # print(A2S(Truth[i-1:i+1,:]))
# # print(A2S(Truth[i+k-1:i+k+1,:]))
# return i + k
#
# i = addSegment(i, 500, 0, 5, Times, Truth, Observations)
# i = addSegment(i, 100, 5, 2, Times, Truth, Observations)
# i = addSegment(i, 100, 1, 2, Times, Truth, Observations)
# nS = addSegment(i, 200, 2, 4, Times, Truth, Observations)
#
# trace = open('AOPF.csv', 'w')
# f = AdaptiveOrderPolynomialFilter(5, tau, theta, trace=trace)
# for j in range(0,nS) :
# Zstar = f.predict(Times[j][0])
# e = Observations[j] - Zstar[0]
# f.update(Times[j][0], | |
(size_a < size_b ||
(size_a == size_b &&
a['ob_digit'][size_a-1] < b['ob_digit'][size_b-1])) {
// |a| < |b|
pdiv['ob_size'] = 0;
prem['ob_digit'] = a['ob_digit']['slice'](0);
prem['ob_size'] = a['ob_size'];
return 0;
}
if (size_b == 1) {
rem = [0];
prem['ob_digit'] = [0];
prem['ob_size'] = 1;
z = divrem1(a, b['ob_digit'][0], prem['ob_digit']);
prem = long_normalize(prem);
}
else {
z = @{{!x_divrem}}(a, b, prem);
}
if (z === null) {
pdiv['ob_size'] = 0;
} else {
pdiv['ob_digit'] = z['ob_digit']['slice'](0);
pdiv['ob_size'] = z['ob_size'];
}
if ((a['ob_size'] < 0) != (b['ob_size'] < 0))
pdiv['ob_size'] = -(pdiv['ob_size']);
if (a['ob_size'] < 0 && prem['ob_size'] != 0)
prem['ob_size'] = -prem['ob_size'];
return 0;
}
function x_divrem(v1, w1, prem) {
var size_w = w1['ob_size'] < 0 ? -w1['ob_size'] : w1['ob_size'];
var d = Math['floor'](PyLong_BASE / (w1['ob_digit'][size_w-1] + 1));
var v = muladd1($x_divrem_v, v1, d, 0);
var w = muladd1($x_divrem_w, w1, d, 0);
var a, j, k;
var size_v = v['ob_size'] < 0 ? -v['ob_size'] : v['ob_size'];
k = size_v - size_w;
a = new $long(0);
a['ob_size'] = k + 1;
for (j = size_v; k >= 0; --j, --k) {
var vj = (j >= size_v) ? 0 : v['ob_digit'][j];
var carry = 0;
var q, i;
if (vj == w['ob_digit'][size_w-1])
q = PyLong_MASK;
else
q = Math['floor'](((vj << PyLong_SHIFT) + v['ob_digit'][j-1]) /
w['ob_digit'][size_w-1]);
while (w['ob_digit'][size_w-2]*q >
((
(vj << PyLong_SHIFT)
+ v['ob_digit'][j-1]
- q*w['ob_digit'][size_w-1]
) << PyLong_SHIFT)
+ v['ob_digit'][j-2])
--q;
for (i = 0; i < size_w && i+k < size_v; ++i) {
var z = w['ob_digit'][i] * q;
var zz = z >>> PyLong_SHIFT;
carry += v['ob_digit'][i+k] - z
+ (zz << PyLong_SHIFT);
v['ob_digit'][i+k] = carry & PyLong_MASK;
// carry = Py_ARITHMETIC_RIGHT_SHIFT(BASE_TWODIGITS_TYPE, carry, PyLong_SHIFT);
carry >>= PyLong_SHIFT;
carry -= zz;
}
if (i+k < size_v) {
carry += v['ob_digit'][i+k];
v['ob_digit'][i+k] = 0;
}
if (carry == 0)
a['ob_digit'][k] = q;
else {
a['ob_digit'][k] = q-1;
carry = 0;
for (i = 0; i < size_w && i+k < size_v; ++i) {
carry += v['ob_digit'][i+k] + w['ob_digit'][i];
v['ob_digit'][i+k] = carry & PyLong_MASK;
// carry = Py_ARITHMETIC_RIGHT_SHIFT( BASE_TWODIGITS_TYPE, carry, PyLong_SHIFT);
carry >>= PyLong_SHIFT;
}
}
} /* for j, k */
i = divrem1(v, d, prem);
prem['ob_digit'] = i['ob_digit']['slice'](0);
prem['ob_size'] = i['ob_size'];
return long_normalize(a);
}
function x_add(a, b) {
var size_a = a['ob_size'] < 0 ? -a['ob_size'] : a['ob_size'];
var size_b = b['ob_size'] < 0 ? -b['ob_size'] : b['ob_size'];
var z = new $long(0);
var i;
var carry = 0;
if (size_a < size_b) {
var temp = a;
a = b;
b = temp;
temp = size_a;
size_a = size_b;
size_b = temp;
}
for (i = 0; i < size_b; ++i) {
carry += a['ob_digit'][i] + b['ob_digit'][i];
z['ob_digit'][i] = carry & PyLong_MASK;
carry >>>= PyLong_SHIFT;
}
for (; i < size_a; ++i) {
carry += a['ob_digit'][i];
z['ob_digit'][i] = carry & PyLong_MASK;
carry >>>= PyLong_SHIFT;
}
z['ob_digit'][i] = carry;
z['ob_size'] = i+1;
return long_normalize(z);
}
function x_sub(a, b) {
var size_a = a['ob_size'] < 0 ? -a['ob_size'] : a['ob_size'];
var size_b = b['ob_size'] < 0 ? -b['ob_size'] : b['ob_size'];
var z = new $long(0);
var i;
var borrow = 0;
var sign = 1;
if (size_a < size_b) {
var temp = a;
a = b;
b = temp;
temp = size_a;
size_a = size_b;
size_b = temp;
sign = -1;
} else if (size_a == size_b) {
i = size_a;
while (--i >= 0 && a['ob_digit'][i] == b['ob_digit'][i])
;
if (i < 0)
return z;
if (a['ob_digit'][i] < b['ob_digit'][i]) {
var temp = a;
a = b;
b = temp;
temp = size_a;
size_a = size_b;
size_b = temp;
sign = -1;
}
size_a = size_b = i+1;
}
for (i = 0; i < size_b; ++i) {
borrow = a['ob_digit'][i] - b['ob_digit'][i] - borrow;
z['ob_digit'][i] = borrow & PyLong_MASK;
borrow >>>= PyLong_SHIFT;
borrow &= 1;
}
for (; i < size_a; ++i) {
borrow = a['ob_digit'][i] - borrow;
z['ob_digit'][i] = borrow & PyLong_MASK;
borrow >>>= PyLong_SHIFT;
borrow &= 1;
}
z['ob_size'] = i;
if (sign < 0)
z['ob_size'] = -(z['ob_size']);
return long_normalize(z);
}
function x_mul(a, b) {
var size_a = a['ob_size'] < 0 ? -a['ob_size'] : a['ob_size'];
var size_b = b['ob_size'] < 0 ? -b['ob_size'] : b['ob_size'];
var z = new $long(0);
var i, s;
z['ob_size'] = size_a + size_b;
for (i = 0; i < z['ob_size']; i++) {
z['ob_digit'][i] = 0;
}
if (size_a == size_b && array_eq(a['ob_digit'], b['ob_digit'], size_a)) {
// Efficient squaring per HAC, Algorithm 14['16']:
for (i = 0; i < size_a; ++i) {
var carry;
var f = a['ob_digit'][i];
var pz = (i << 1);
var pa = i + 1;
var paend = size_a;
carry = z['ob_digit'][pz] + f * f;
z['ob_digit'][pz++] = carry & PyLong_MASK;
carry >>>= PyLong_SHIFT;
f <<= 1;
while (pa < paend) {
carry += z['ob_digit'][pz] + a['ob_digit'][pa++] * f;
z['ob_digit'][pz++] = carry & PyLong_MASK;
carry >>>= PyLong_SHIFT;
}
if (carry) {
carry += z['ob_digit'][pz];
z['ob_digit'][pz++] = carry & PyLong_MASK;
carry >>>= PyLong_SHIFT;
}
if (carry) {
z['ob_digit'][pz] += carry & PyLong_MASK;
}
}
}
else { // a is not the same as b -- gradeschool long mult
for (i = 0; i < size_a; ++i) {
var carry = 0;
var f = a['ob_digit'][i];
var pz = i;
var pb = 0;
var pbend = size_b;
while (pb < pbend) {
carry += z['ob_digit'][pz] + b['ob_digit'][pb++] * f;
z['ob_digit'][pz++] = carry & PyLong_MASK;
carry >>>= PyLong_SHIFT;
}
if (carry) {
z['ob_digit'][pz] += carry & PyLong_MASK;
}
}
}
z['ob_size'] = z['ob_digit']['length'];
return long_normalize(z);
}
function l_divmod(v, w, pdiv, pmod) {
var div = $l_divmod_div,
mod = $l_divmod_mod;
if (long_divrem(v, w, div, mod) < 0)
return -1;
if (pdiv == null && pmod == null) return 0;
if ((mod['ob_size'] < 0 && w['ob_size'] > 0) ||
(mod['ob_size'] > 0 && w['ob_size'] < 0)) {
mod = mod['__add__'](w);
div = div['__sub__']($const_long_1);
}
if (pdiv !== null) {
pdiv['ob_digit'] = div['ob_digit']['slice'](0);
pdiv['ob_size'] = div['ob_size'];
}
if (pmod !== null) {
pmod['ob_digit'] = mod['ob_digit']['slice'](0);
pmod['ob_size'] = mod['ob_size'];
}
return 0;
}
/* XXX do not convert to @{{long}} - this is correct */
var $long = pyjslib['long'] = function(value, radix) {
var v, i;
if (!radix || radix['valueOf']() == 0) {
if (typeof value == 'undefined') {
throw @{{TypeError}}("long() takes at least 1 argument");
}
switch (value['__number__']) {
case 0x01:
value = value > 0 ? Math['floor'](value) : Math['ceil'](value);
break;
case 0x02:
break;
case 0x04:
return value;
}
radix = null;
}
if (typeof this != 'object' || this['__number__'] != 0x04) return new $long(value, radix);
v = value;
this['ob_size'] = 0;
this['ob_digit'] = new Array();
if (v['__number__']) {
if (radix) {
throw @{{TypeError}}("long() can't convert non-string with explicit base");
}
if (v['__number__'] == 0x04) {
var size = v['ob_size'] < 0 ? -v['ob_size']:v['ob_size'];
for (var i = 0; i < size; i++) {
this['ob_digit'][i] = v['ob_digit'][i];
}
this['ob_size'] = v['ob_size'];
return this;
}
if (v['__number__'] == 0x02) {
var neg = false;
var ndig = 0;
v = v['valueOf']();
if (v < 0) {
v = -v;
neg = true;
}
// Count the number of Python digits.
var t = v;
while (t) {
this['ob_digit'][ndig] = t & PyLong_MASK;
t >>>= PyLong_SHIFT;
++ndig;
}
this['ob_size'] = neg ? -ndig : ndig;
return this;
}
if (v['__number__'] == 0x01) {
var ndig, frac, expo, bits;
var neg = false;
if (isNaN(v)) {
throw @{{ValueError}}('cannot convert float NaN to integer');
}
if (!isFinite(v)) {
throw @{{OverflowError}}('cannot convert float infinity to integer');
}
if (v == 0) {
this['ob_digit'][0] = 0;
this['ob_size'] = 0;
return this;
}
if (v < 0) {
v = -v;
neg = true;
}
// frac = frexp(dval, &expo); // dval = frac*2**expo; 0.0 <= frac < 1.0
if (v == 0) {
frac = 0;
expo = 0;
| |
None: imds = eom.make_imds()
t1, t2 = imds.t1, imds.t2
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nkpts, nocc_a, nvir_a = t1a.shape
nkpts, nocc_b, nvir_b = t1b.shape
kconserv = imds.kconserv
Hr1a = -np.diag(imds.Foo[kshift])
Hr1b = -np.diag(imds.FOO[kshift])
Hr2aaa = np.zeros((nkpts,nkpts,nocc_a,nocc_a,nvir_a), dtype=t1[0].dtype)
Hr2bbb = np.zeros((nkpts,nkpts,nocc_b,nocc_b,nvir_b), dtype=t1[0].dtype)
Hr2abb = np.zeros((nkpts,nkpts,nocc_a,nocc_b,nvir_b), dtype=t1[0].dtype)
Hr2baa = np.zeros((nkpts,nkpts,nocc_b,nocc_a,nvir_a), dtype=t1[0].dtype)
if eom.partition == 'mp':
raise Exception("MP diag is not tested") # remove this to use untested code
#foo = eris.fock[0][:,:nocc_a,:nocc_a]
#fOO = eris.fock[1][:,:nocc_b,:nocc_b]
#fvv = eris.fock[0][:,:nvir_a,:nvir_a]
#fVV = eris.fock[1][:,:nvir_b,:nvir_b]
for ki in range(nkpts):
for kj in range(nkpts):
ka = kconserv[ki,kshift,kj]
Hr2aaa[ki,kj] = imds.Fvv[ka].diagonal()
Hr2aaa[ki,kj] -= imds.Foo[ki].diagonal()[:,None,None]
Hr2aaa[ki,kj] -= imds.Foo[kj].diagonal()[None,:,None]
Hr2bbb[ki,kj] = imds.FVV[ka].diagonal()
Hr2bbb[ki,kj] -= imds.FOO[ki].diagonal()[:,None,None]
Hr2bbb[ki,kj] -= imds.FOO[kj].diagonal()[None,:,None]
Hr2aba[ki,kj] = imds.Fvv[ka].diagonal()
Hr2aba[ki,kj] -= imds.Foo[ki].diagonal()[:,None,None]
Hr2aba[ki,kj] -= imds.FOO[kj].diagonal()[None,:,None]
Hr2bab[ki,kj] = imds.FVV[ka].diagonal()
Hr2bab[ki,kj] -= imds.FOO[ki].diagonal()[:,None,None]
Hr2bab[ki,kj] -= imds.Foo[kj].diagonal()[None,:,None]
else:
for ka in range(nkpts):
for ki in range(nkpts):
kj = kconserv[kshift,ki,ka]
Hr2aaa[ki,kj] += imds.Fvv[ka].diagonal()
Hr2abb[ki,kj] += imds.FVV[ka].diagonal()
Hr2bbb[ki,kj] += imds.FVV[ka].diagonal()
Hr2baa[ki,kj] += imds.Fvv[ka].diagonal()
Hr2aaa[ki,kj] -= imds.Foo[ki].diagonal()[:,None,None]
Hr2aaa[ki,kj] -= imds.Foo[kj].diagonal()[None,:,None]
Hr2abb[ki,kj] -= imds.Foo[ki].diagonal()[:,None,None]
Hr2abb[ki,kj] -= imds.FOO[kj].diagonal()[None,:,None]
Hr2baa[ki,kj] -= imds.FOO[ki].diagonal()[:,None,None]
Hr2baa[ki,kj] -= imds.Foo[kj].diagonal()[None,:,None]
Hr2bbb[ki,kj] -= imds.FOO[ki].diagonal()[:,None,None]
Hr2bbb[ki,kj] -= imds.FOO[kj].diagonal()[None,:,None]
for ki, kj in itertools.product(range(nkpts), repeat=2):
Hr2aaa[ki, kj] += lib.einsum('iijj->ij', imds.Woooo[ki, ki, kj])[:,:,None]
Hr2abb[ki, kj] += lib.einsum('iiJJ->iJ', imds.WooOO[ki, ki, kj])[:,:,None]
Hr2bbb[ki, kj] += lib.einsum('IIJJ->IJ', imds.WOOOO[ki, ki, kj])[:,:,None]
Hr2baa[ki, kj] += lib.einsum('jjII->Ij', imds.WooOO[kj, kj, ki])[:,:,None]
kb = kconserv[ki, kshift, kj]
Hr2aaa[ki,kj] -= lib.einsum('iejb,jibe->ijb', imds.Wovov[ki,kshift,kj], t2aa[kj,ki,kb])
Hr2abb[ki,kj] -= lib.einsum('ieJB,iJeB->iJB', imds.WovOV[ki,kshift,kj], t2ab[ki,kj,kshift])
Hr2baa[ki,kj] -= lib.einsum('jbIE,jIbE->Ijb', imds.WovOV[kj,kb,ki], t2ab[kj,ki,kb])
Hr2bbb[ki,kj] -= lib.einsum('IEJB,JIBE->IJB', imds.WOVOV[ki,kshift,kj], t2bb[kj,ki,kb])
Hr2aaa[ki, kj] += lib.einsum('ibbi->ib', imds.Wovvo[ki, kb, kb])[:,None,:]
Hr2aaa[ki, kj] += lib.einsum('jbbj->jb', imds.Wovvo[kj, kb, kb])[None,:,:]
Hr2baa[ki, kj] += lib.einsum('jbbj->jb', imds.Wovvo[kj, kb, kb])[None,:,:]
Hr2baa[ki, kj] -= lib.einsum('IIbb->Ib', imds.WOOvv[ki, ki, kb])[:,None,:]
Hr2abb[ki, kj] += lib.einsum('JBBJ->JB', imds.WOVVO[kj, kb, kb])[None,:,:]
Hr2abb[ki, kj] -= lib.einsum('iiBB->iB', imds.WooVV[ki, ki, kb])[:,None,:]
Hr2bbb[ki, kj] += lib.einsum('IBBI->IB', imds.WOVVO[ki, kb, kb])[:,None,:]
Hr2bbb[ki, kj] += lib.einsum('JBBJ->JB', imds.WOVVO[kj, kb, kb])[None,:,:]
vector = amplitudes_to_vector_ip((Hr1a,Hr1b), (Hr2aaa,Hr2baa,Hr2abb,Hr2bbb), kshift, kconserv)
return vector
def mask_frozen_ip(eom, vector, kshift, const=LARGE_DENOM):
'''Replaces all frozen orbital indices of `vector` with the value `const`.'''
nkpts = eom.nkpts
nocca, noccb = eom.nocc
nmoa, nmob = eom.nmo
kconserv = eom.kconserv
r1, r2 = eom.vector_to_amplitudes(vector, kshift, nkpts, (nmoa, nmob), (nocca, noccb), kconserv)
r1a, r1b = r1
r2aaa, r2baa, r2abb, r2bbb = r2
# Get location of padded elements in occupied and virtual space
nonzero_opadding, nonzero_vpadding = eom.nonzero_opadding, eom.nonzero_vpadding
nonzero_opadding_a, nonzero_opadding_b = nonzero_opadding
nonzero_vpadding_a, nonzero_vpadding_b = nonzero_vpadding
new_r1a = const * np.ones_like(r1a)
new_r1b = const * np.ones_like(r1b)
new_r2aaa = const * np.ones_like(r2aaa)
new_r2baa = const * np.ones_like(r2baa)
new_r2abb = const * np.ones_like(r2abb)
new_r2bbb = const * np.ones_like(r2bbb)
# r1a/b case
new_r1a[nonzero_opadding_a[kshift]] = r1a[nonzero_opadding_a[kshift]]
new_r1b[nonzero_opadding_b[kshift]] = r1b[nonzero_opadding_b[kshift]]
# r2aaa case
for ki in range(nkpts):
for kj in range(nkpts):
kb = kconserv[ki, kshift, kj]
idx = np.ix_([ki], [kj], nonzero_opadding_a[ki], nonzero_opadding_a[kj], nonzero_vpadding_a[kb])
new_r2aaa[idx] = r2aaa[idx]
# r2baa case
for ki in range(nkpts):
for kj in range(nkpts):
kb = kconserv[ki, kshift, kj]
idx = np.ix_([ki], [kj], nonzero_opadding_b[ki], nonzero_opadding_a[kj], nonzero_vpadding_a[kb])
new_r2baa[idx] = r2baa[idx]
# r2abb case
for ki in range(nkpts):
for kj in range(nkpts):
kb = kconserv[ki, kshift, kj]
idx = np.ix_([ki], [kj], nonzero_opadding_a[ki], nonzero_opadding_b[kj], nonzero_vpadding_b[kb])
new_r2abb[idx] = r2abb[idx]
# r2bbb case
for ki in range(nkpts):
for kj in range(nkpts):
kb = kconserv[ki, kshift, kj]
idx = np.ix_([ki], [kj], nonzero_opadding_b[ki], nonzero_opadding_b[kj], nonzero_vpadding_b[kb])
new_r2bbb[idx] = r2bbb[idx]
return eom.amplitudes_to_vector((new_r1a,new_r1b), (new_r2aaa,new_r2baa,new_r2abb,new_r2bbb), kshift, kconserv)
def get_padding_k_idx(eom, cc):
# Get location of padded elements in occupied and virtual space
nonzero_padding_alpha, nonzero_padding_beta = padding_k_idx(cc, kind="split")
nonzero_opadding_alpha, nonzero_vpadding_alpha = nonzero_padding_alpha
nonzero_opadding_beta, nonzero_vpadding_beta = nonzero_padding_beta
return ((nonzero_opadding_alpha, nonzero_opadding_beta),
(nonzero_vpadding_alpha, nonzero_vpadding_beta))
class EOMIP(eom_kgccsd.EOMIP):
def __init__(self, cc):
#if not isinstance(cc, kccsd.GCCSD):
# raise TypeError
self.kpts = cc.kpts
eom_kgccsd.EOMIP.__init__(self, cc)
get_diag = ipccsd_diag
matvec = ipccsd_matvec
get_padding_k_idx = get_padding_k_idx
mask_frozen = mask_frozen_ip
def get_init_guess(self, kshift, nroots=1, koopmans=False, diag=None):
size = self.vector_size()
dtype = getattr(diag, 'dtype', np.complex128)
nroots = min(nroots, size)
nocca, noccb = self.nocc
guess = []
if koopmans:
idx = np.zeros(nroots, dtype=int)
tmp_oalpha, tmp_obeta = self.nonzero_opadding[kshift]
tmp_oalpha = list(tmp_oalpha)
tmp_obeta = list(tmp_obeta)
if len(tmp_obeta) + len(tmp_oalpha) < nroots:
raise ValueError("Max number of roots for k-point (idx=%3d) for koopmans "
"is %3d.\nRequested %3d." %
(kshift, len(tmp_obeta)+len(tmp_oalpha), nroots))
total_count = 0
while(total_count < nroots):
if total_count % 2 == 0 and len(tmp_oalpha) > 0:
idx[total_count] = tmp_oalpha.pop()
else:
# Careful! index depends on how we create vector
# (here the first elements are r1a, then r1b)
idx[total_count] = nocca + tmp_obeta.pop()
total_count += 1
else:
idx = diag.argsort()
for i in idx[:nroots]:
g = np.zeros(size, dtype)
g[i] = 1.0
g = self.mask_frozen(g, kshift, const=0.0)
guess.append(g)
return guess
def gen_matvec(self, kshift, imds=None, left=False, **kwargs):
if imds is None: imds = self.make_imds()
diag = self.get_diag(kshift, imds)
if left:
raise NotImplementedError
matvec = lambda xs: [self.l_matvec(x, kshift, imds, diag) for x in xs]
else:
matvec = lambda xs: [self.matvec(x, kshift, imds, diag) for x in xs]
return matvec, diag
def vector_to_amplitudes(self, vector, kshift, nkpts=None, nmo=None, nocc=None, kconserv=None):
if nmo is None: nmo = self.nmo
if nocc is None: nocc = self.nocc
if nkpts is None: nkpts = self.nkpts
if kconserv is None: kconserv = self.kconserv
return vector_to_amplitudes_ip(vector, kshift, nkpts, nmo, nocc, kconserv)
def amplitudes_to_vector(self, r1, r2, kshift, kconserv=None):
if kconserv is None: kconserv = self.kconserv
return amplitudes_to_vector_ip(r1, r2, kshift, kconserv)
def vector_size(self):
nocca, noccb = self.nocc
nmoa, nmob = self.nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
nkpts = self.nkpts
return (nocca + noccb +
nkpts*nocca*(nkpts*nocca-1)*nvira//2 +
nkpts**2*noccb*nocca*nvira +
nkpts**2*nocca*noccb*nvirb +
nkpts*noccb*(nkpts*noccb-1)*nvirb//2)
def make_imds(self, eris=None, t1=None, t2=None):
imds = _IMDS(self._cc, eris, t1, t2)
imds.make_ip()
return imds
########################################
# EOM-EA-CCSD
########################################
def amplitudes_to_vector_ea(r1, r2, kshift, kconserv):
r1a, r1b = r1
r2a, r2aba, r2bab, r2b = r2
nkpts = r2a.shape[0]
nocca, noccb = r2a.shape[2], r2b.shape[2]
nvira, nvirb = r2a.shape[3], r2b.shape[3]
# From symmetry for aaa and bbb terms, only store lower
# triangular part (ka,a) < (kb,b)
r2aaa = np.zeros((nocca*nkpts*nvira*(nkpts*nvira-1))//2, dtype=r2a.dtype)
r2bbb = np.zeros((noccb*nkpts*nvirb*(nkpts*nvirb-1))//2, dtype=r2b.dtype)
index = 0
for kj, ka in itertools.product(range(nkpts), repeat=2):
kb = kconserv[kshift,ka,kj]
if ka < kb: # Take diagonal part
idxa, idya = np.tril_indices(nvira, 0)
else: # Don't take diagonal (equal to zero)
idxa, idya = np.tril_indices(nvira, -1)
r2aaa[index:index + nocca*len(idya)] = r2a[kj,ka,:,idxa,idya].reshape(-1)
index = index + nocca*len(idya)
index = 0
for kj, ka in itertools.product(range(nkpts), repeat=2):
kb = kconserv[kshift,ka,kj]
if ka < kb: # Take diagonal part
idxb, idyb = np.tril_indices(nvirb, 0)
else:
idxb, idyb = np.tril_indices(nvirb, -1)
r2bbb[index:index + noccb*len(idyb)] = r2b[kj,ka,:,idxb,idyb].reshape(-1)
index = index + noccb*len(idyb)
return np.hstack((r1a, r1b, r2aaa.ravel(),
r2aba.ravel(), r2bab.ravel(),
r2bbb.ravel()))
def vector_to_amplitudes_ea(vector, kshift, nkpts, nmo, nocc, kconserv):
nocca, noccb = nocc
nmoa, nmob = nmo
nvira, nvirb = nmoa-nocca, nmob-noccb
sizes = (nvira, nvirb, nkpts*nocca*(nkpts*nvira-1)*nvira//2,
nkpts**2*nocca*nvirb*nvira, nkpts**2*noccb*nvira*nvirb,
nkpts*noccb*(nkpts*nvirb-1)*nvirb//2)
sections = np.cumsum(sizes[:-1])
r1a, r1b, r2a, r2aba, r2bab, r2b = np.split(vector, sections)
r2aaa = np.zeros((nkpts,nkpts,nocca,nvira,nvira), dtype=r2a.dtype)
r2aba = r2aba.reshape(nkpts,nkpts,nocca,nvirb,nvira).copy()
r2bab = r2bab.reshape(nkpts,nkpts,noccb,nvira,nvirb).copy()
r2bbb = np.zeros((nkpts,nkpts,noccb,nvirb,nvirb), dtype=r2b.dtype)
index = 0
for kj, ka in itertools.product(range(nkpts), repeat=2):
kb = kconserv[kshift,ka,kj]
if ka < kb: # Take diagonal part
idxa, idya = np.tril_indices(nvira, 0)
else:
idxa, idya = np.tril_indices(nvira, -1)
tmp = r2a[index:index + nocca*len(idya)].reshape(-1,nocca)
r2aaa[kj,ka,:,idxa,idya] = tmp
r2aaa[kj,kb,:,idya,idxa] = -tmp
index = index + nocca*len(idya)
index = 0
for kj, ka in itertools.product(range(nkpts), repeat=2):
kb = kconserv[kshift,ka,kj]
if ka < kb: # Take diagonal part
idxb, idyb = np.tril_indices(nvirb, 0)
else:
idxb, idyb = np.tril_indices(nvirb, -1)
tmp = r2b[index:index + noccb*len(idyb)].reshape(-1,noccb)
r2bbb[kj,ka,:,idxb,idyb] = tmp
r2bbb[kj,kb,:,idyb,idxb] = -tmp
index = index + noccb*len(idyb)
r1 = (r1a.copy(), r1b.copy())
r2 = (r2aaa, r2aba, r2bab, r2bbb)
return r1, r2
def eaccsd_matvec(eom, vector, kshift, imds=None, diag=None):
'''2ph operators are of the form s_{ j}^{ab}, i.e. 'jb' indices are coupled'''
if imds is None: imds = eom.make_imds()
t1, t2= imds.t1, imds.t2
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape[3:]
nmoa, nmob = nocca + nvira, noccb + nvirb
kconserv = imds.kconserv
nkpts = eom.nkpts
r1, r2 = eom.vector_to_amplitudes(vector, kshift, nkpts, (nmoa, nmob), (nocca, noccb), kconserv)
r1a, r1b = r1
r2aaa, r2aba, r2bab, r2bbb = r2
# BEGINNING OF MATVEC CONTRACTIONS: ref - Nooijen 1995 EOM-CC for EA
# Fvv terms
# (\bar{H}S)^a = | |
<reponame>DavidChoi76/hydrofunctions
"""
hydrofunctions.usgs_rdb
~~~~~~~~~~~~~~~~~~~~~~~
This module is for working with the various USGS dataservices that use the rdb
text format. These include the statistics service, the field measurements
service, the rating curve service, and the peak discharge service.
"""
import pandas as pd
import requests
from io import StringIO
from IPython.core import display
from . import exceptions
class hydroRDB:
"""A class for holding the information from USGS rdb files.
Args:
header (str):
A multi-line string from the header of the rdb file. The header
often contain important metadata and user warnings.
table (pandas dataframe):
This is a dataframe made from the rdb file.
columns (str):
A string from the rdb file that lists the column names.
dtypes (str):
A string from the rdb file that gives the data type and length of
each column.
**Properties:**
**header** (str):
A multi-line string from the header of the rdb file. The header
often contain important metadata and user warnings.
**table** (pandas dataframe):
This is a dataframe made from the rdb file.
**columns** (str):
A string from the rdb file that lists the column names.
**dtypes** (str):
A string from the rdb file that gives the data type and length of
each column.
You can also access the header and the dataframe as a named tuple::
hydroRDB(header=<a multi-line string>, table=<pandas dataframe>)
Note:
- The args to create this object are supplied by hf.read_rdb().
- The hydroRDB object is returned from several functions that request\
RDB files from a USGS data service, including: peaks(), field_meas(),\
rating_curve(), stats(), site_file(), and data_catalog().
- You can read more about the RDB format here: https://pubs.usgs.gov/of/2003/ofr03123/6.4rdb_format.pdf
"""
def __init__(self, header, table, columns, dtypes):
self.header = header
self.table = table
self.columns = columns
self.dtypes = dtypes
def __iter__(self):
return iter((self.header, self.table))
def __repr__(self):
return f"hydroRDB(header={self.header},\ntable={self.table}"
def _repr_html_(self):
html_header = "<p>" + self.header.replace("\n", "<br />") + "</p>"
# return html_header + self.df._repr_html_()
return f"<p>hydroRDB(header=<br />{html_header}</p><p>table=<br />{self.table._repr_html_()})</p>"
def get_usgs_RDB_service(url, headers=None, params=None):
"""Request data from a USGS dataservice and handle errors.
Args:
url (str):
a string used by Requests as the base URL.
header (dict):
a dict of parameters used to request the data.
params (dict):
a dict of parameters used to modify the url of a REST service.
Returns:
A Requests response object.
Raises:
This function will raise an exception for any non-200 status code, and\
in cases where the USGS service returns anything that is not obviously\
an RDB file. If an exception is raised, then an attempt will be made to\
display the error page which the USGS sometimes sends back to the user.
"""
response = requests.get(url, headers=headers, params=params)
if response.status_code == 200:
if response.text[0] == "#":
# Everything seems good; they apparently returned an RDB file.
return response
else:
print(
"The USGS has apparently not returned any data. Check the "
"following message for further information for why this "
"request failed. One possibility is that your site number "
"is incorrect."
)
display.display(display.HTML(response.text))
raise exceptions.HydroNoDataError(
"The USGS did not return a valid RDB file " "for this request."
)
else:
# response.status_code != 200:
print(f"The USGS has returned an error code of {response.status_code}")
# If this code is being run inside of a notebook, the USGS error page
# will be displayed.
display.display(display.HTML(response.text))
# raise an exception
response.raise_for_status()
# or raise some sort of Hydro http error based on requests http error.
return response
def read_rdb(text):
"""Read strings that are in rdb format.
Args:
text (str):
A long string containing the contents of a rdb file. A common way
to obtain these would be from the .text property of a requests
response, as in the example usage below.
Returns:
header (multi-line string):
Every commented line at the top of the rdb file is marked with a
'#' symbol. Each of these lines is stored in this output.
outputDF (pandas.DataFrame):
A dataframe containing the information in the rdb file. `site_no`
and `parameter_cd` are interpreted as a string, but every other number
is interpreted as a float or int; missing values as an np.nan;
strings for everything else.
columns (list of strings):
The column names, taken from the rdb header row.
dtypes (list of strings):
The second header row from the rdb file. These mostly tell the
column width, and typically record everything as string data ('s')
type. The exception to this are dates, which are listed with a 'd'.
"""
try:
headerlines = []
datalines = []
count = 0
for line in text.splitlines():
if line[0] == "#":
headerlines.append(line)
elif count == 0:
columns = line.split()
count = count + 1
elif count == 1:
dtypes = line.split()
count = count + 1
else:
datalines.append(line)
data = "\n".join(datalines)
header = "\n".join(headerlines)
outputDF = pd.read_csv(
StringIO(data),
sep="\t",
comment="#",
header=None,
names=columns,
dtype={"site_no": str, "parameter_cd": str},
)
except:
print(
"There appears to be an error processing the file that the USGS "
"returned. This sometimes occurs if you entered the wrong site "
"number. We were expecting an RDB file, but we received the "
f"following instead:\n{text}"
)
raise
# outputDF.outputDF.filter(like='_cd').columns
# TODO: code columns ('*._cd') should be interpreted as strings.
return header, outputDF, columns, dtypes
def site_file(site):
"""Load USGS site file into a Pandas dataframe.
Args:
site (str):
The gauge ID number for the site.
Returns:
a hydroRDB object or tuple consisting of the header and a pandas
dataframe.
**Example:**
>>> test = site_file('01542500')
>>> test
hydroRDB(header=<a multi-line string of the header>,
table=<a Pandas dataframe>)
You can also access the header, dataframe, column names, and data types
through the associated properties `header`, `table`, `columns`, `dtypes`::
>>> test.table
<a Pandas dataframe>
"""
url = (
"https://waterservices.usgs.gov/nwis/site/?format=rdb&sites="
+ site
+ "&siteOutput=expanded&siteStatus=all"
)
headers = {"Accept-encoding": "gzip"}
print("Retrieving the site file for site #", site, " from ", url)
response = get_usgs_RDB_service(url, headers)
header, outputDF, columns, dtype, = read_rdb(response.text)
output_obj = hydroRDB(header, outputDF, columns, dtype)
return output_obj
def data_catalog(site):
"""Load a history of the data collected at a site into a Pandas dataframe.
Args:
site (str):
The gauge ID number for the site.
Returns:
a hydroRDB object or tuple consisting of the header and a pandas
dataframe.
**Example:**
>>> test = data_catalog('01542500')
>>> test
hydroRDB(header=<a mulit-line string of the header>,
table=<a Pandas dataframe>)
You can also access the header, dataframe, column names, and data types
through the associated properties `header`, `table`, `columns`, `dtypes`::
>>> test.table
<a Pandas dataframe>
"""
url = (
"https://waterservices.usgs.gov/nwis/site/?format=rdb&sites="
+ site
+ "&seriesCatalogOutput=true&siteStatus=all"
)
headers = {"Accept-encoding": "gzip"}
print("Retrieving the site file for site #", site, " from ", url)
response = get_usgs_RDB_service(url, headers)
header, outputDF, columns, dtype, = read_rdb(response.text)
output_obj = hydroRDB(header, outputDF, columns, dtype)
return output_obj
def field_meas(site):
"""Load USGS field measurements of stream discharge into a Pandas dataframe.
Args:
site (str):
The gauge ID number for the site.
Returns:
a hydroRDB object or tuple consisting of the header and a pandas
dataframe. Each row of the table represents an observation on a given date of
river conditions at the gauge by USGS personnel. Values are stored in
columns, and include the measured stream discharge, channel width,
channel area, depth, and velocity.
**Example:**
>>> test = field_meas('01542500')
>>> test
hydroRDB(header=<a mulit-line string of the header>,
table=<a Pandas dataframe>)
You can also access the header, dataframe, column names, and data types
through the associated properties `header`, `table`, `columns`, `dtypes`::
>>> test.table
<a Pandas dataframe>
**Discussion:**
The USGS operates over 8,000 stream gages around the United States and
territories. Each of these sensors records the depth, or 'stage' of the
water. In order to translate this stage data into stream discharge, the
USGS staff creates an empirical relationship called a 'rating curve'
between the river stage and stream discharge. To construct this curve,
the USGS personnel visit all of the gage every one to eight weeks, and
measure the stage and the discharge of the river manually.
The ``field_meas()`` function returns all of the field-collected data for
this | |
0 #aqui tenho que ver o caso dos novos registos, pois o index começa por 0 tambem
#print('selected_ids', selected_ids)
for rec_id in selected_ids:
if str(rec_id[1]) == str(self.main_key):
record_num = rec_id[0]
#print('my record_num e', record_num)
#print('4')
#penso que podemos substituir esta merda aqui pelo index da lista, ver depois
#for record_id in record_ids:
# index[order_number] = str(record_id)
# if str(record_id) == str(self.main_key):
# record_num = order_number
# order_number += 1
#print(index + self.main_key + bottle.SimpleTemplate.defaults['main_key'])
try:
first = selected_ids[0][1]
except:
first = 'None'
try:
last = selected_ids[-1][1]
except:
last = 'None'
if record_num > 1:
back = selected_ids[record_num-1][1]
else:
try:
back = selected_ids[record_num][1]
except:
back = 'None'
try:
forward = selected_ids[record_num +1][1]
except:
forward = 'None'#selected_ids[record_num][1]
#print('first, back, forward, last', first, back, forward, last)
#print('5')
#Caso seja chamado sem ser por onchange verifica os valores e define os atributos de acordo a eles
#if self.dynamic_atrs == False: # os atributos dinamicos são ao nivel do campo e não do form por isso não entendi o que isto está a fazer aqui
if record:
values = record
else:
values = {}
for field in get_model_fields(model):
if hasattr(field[1], 'default') and field[1].default:
#print('o default e:' + field[1].default)
default = field[1].default
if type(default) == str:
if 'session' in default:
default = eval('bottle.request.' + default)
elif 'get_' in default:
default = eval('model.' + default)
else:
default = str(default)
else:
default = str(default)
#print('my default is ' + default)
if field[1].__class__.__name__ == 'parent_field':
#print('Im a parent field')
options = eval('objs.{model_name}().get_options()'.format(model_name = field[1].model_name))
values[field[0]] = parent2HTML(value = default, options = options)
#print ('values', values[field[0]])
else:
values[field[0]] = default
#print('values:' + values)
#new_atrs = run_dyn_atrs_function(values = values, model = model, field_name = None)
#print('6')
#Verifica se tem tabs e se tiver preenche a lista de campos que pertencem a tabs
tab_field_names = []
if hasattr(model, '__tabs__') and model.__tabs__ != []:
for tab in model.__tabs__:
for f in tab[1]:
tab_field_names.append(f)
#print('tab_field_names--------', tab_field_names)
values_record = {}#dicionário utilizado para registar os valores dos campos do registo escolhido
#print('7')
#print (model)
dynamic_atrs_result = {}
dynamic_atrs_args = {}
for field in get_model_fields(model):
#caso tenha onchange ou dynamic_atrs nos argumentos deve passar a função para os kargs para que possa passar para o JS via HTML5, assim o JS poderá invocar a função que existe no objecto, receber o result que é um dicionário com campos e valores ou atributos html5 e mudar os valores ou atributos de acordo com o resultado da função, esta abordagem só funciona quando o utilizador muda o valor do campo e não quando o campo é mudado por um botão de workflow, temos também que assegurar que quando abrimos o form se as condições se manifestarem devem ser cumpridas
onchange_function = ''
dynamic_atrs_function = ''
if field[1].dynamic_atrs in ['False', False]:
del field[1].dynamic_atrs
if field[1].onchange in ['False', False]:
del field[1].onchange
if hasattr(field[1], 'dynamic_atrs'):
#print('Tenho dynamic_atrs', field[1].dynamic_atrs)
dynamic_atrs_function = field[1].dynamic_atrs
if hasattr(field[1], 'onchange'):
onchange_function = field[1].onchange
if hasattr(field[1], 'dynamic_atrs') or hasattr(field[1], 'onchange'):
print('vou acrescentar os argumentos da função')
dynamic_atrs_args[field[0]] = """ onChange="runEditOnchange('{key}', '{name}', '{onchange_function}', '{dynamic_atrs_function}')" """.format(key=self.main_key, name=self.name, onchange_function=onchange_function, dynamic_atrs_function=dynamic_atrs_function)
# #print(field[1].args)
# #vou correr a função definida para adaptar os atributos
if hasattr(field[1], 'dynamic_atrs'):
from ujson import loads
# #print(dynamic_atrs_function)
# #print(record)
if record:
rec_to_pass = record
else:
rec_to_pass = {}
for sf in select_fields:
rec_to_pass[sf] = ''
dynamic_atrs_result = loads(eval("model.{function}(record={record}, internal=True)".format(function=dynamic_atrs_function, record=rec_to_pass)))
#print('o resultado dos meus atributos dinamicos é vindos da função', dynamic_atrs_result)
#print(type(dynamic_atrs_result))
# print('as funções onchange que vou acrescentar ao codigo ', dynamic_atrs_args)
for field in get_model_fields(model):
#print('8' + field[0])
kargs = {}#dicionário que guarda os dados necessários para passar para o template de cada campo (field)
#print(str(edit_ok))
if field[1].__class__.__name__ in ['list_field', 'many2many']:
if field[1].__class__.__name__ == 'list_field':
field_model = eval("""objs.{model_name}()""".format(model_name=field[1].model_name))
print('before filter_expression')
self.ctx_dict[field[0]] = {}
#print('condition do list_field', get_condition(condition=field[1].condition, record=record))
self.ctx_dict[field[0]]['filter_expression'] = get_condition(condition=field[1].condition, record=record)
self.ctx_dict[field[0]]['page'] = 0
self.ctx_dict[field[0]]['limit'] = 1000
self.ctx_dict[field[0]]['show_search'] = False
self.ctx_dict[field[0]]['title'] = field[1].name
set_context(self.window_id, self.ctx_dict)
#print('window_status is {var1}'.format(var1 = self.ctx_dict['window_status']))
#print('filter_expression is {var1}'.format(var1 = self.ctx_dict[field[0]]['filter_expression']))
if hasattr(field[1], 'list_edit_mode'):
self.ctx_dict[field[0]]['list_edit_mode'] = field[1].list_edit_mode
else:
self.ctx_dict[field[0]]['list_edit_mode'] = field_model.model.__list_edit_mode__
self.ctx_dict[field[0]]['list_field'] = True
set_context(self.window_id, self.ctx_dict)
print('antes de list_data')
list_data = prepare_list_data(model=field_model, ctx_dict=self.ctx_dict, list_name=field[0])
if hasattr(field[1], 'size') and field[1].size > 10:
size = field[1].size
else:
size = 300
cols = round(size / 25)
if cols > 12:
cols = 12
list_data['size'] = size
list_data['cols'] = cols
list_data['recalled'] = False # o recalled ´e utilizado para eu saber se estou a carregar a lista pela 1ª vez ou a rechama-la
#print('depois de list_data')
#print('after prepare_list_data')
#print(str(list_data))
elif field[1].__class__.__name__ == 'many2many':
#print('im a m2m')
parent_ids = model.get_m2m_ids(field_name=field[0], record=record)
#print('parent_ids is {var1}'.format(var1=str(parent_ids)))
condition = ""
if parent_ids:
parent_ids_str = '('
for parent_id in parent_ids:
parent_ids_str += "'" + str(parent_id) + "',"
parent_ids_str = parent_ids_str[:-1] + ')'
#print('a minha parent_ids_str do many2many é ', parent_ids_str)
condition = 'where="id in {parent_ids}"'.format(parent_ids=str(parent_ids_str))
#print('before eval_model')
field_model = eval("""objs.{model_name}({condition})""".format(model_name=field[1].model_name, condition=condition))
#print('after eval_model field_model is {var1}'.format(var1 = str(field_model)))
field_fields = []
if hasattr(field[1], 'fields'):
field_fields = field[1].fields
list_data = prepare_m2m_data(model=field_model, ctx_dict=self.ctx_dict, fields=field_fields, parent_ids=parent_ids)
self.ctx_dict[field[0]] = {}
#print('8.1.1.2')
#print('list_data {var1}'.format(var1=str(list_data)))
self.ctx_dict[field[0]].update(list_data)
set_context(self.window_id, self.ctx_dict)
kargs.update(list_data)
#print('kargs is {var1}'.format(var1=str(kargs)))
#kargs['show_search'] = False
#kargs['list_edit_mode'] = field_model.__list_edit_mode__
#print('8.1.2')
#print('self.main_key, field[1].__class__.__name__' + self.main_key + field[1].__class__.__name__)
if self.main_key not in ['None', None] and field[1].__class__.__name__ not in ['separator', 'newline', 'link','label','hrline']:
field_values = get_field_value(record, field, model)
#aqui se for popup tenho que procurar no request por popup_fieldname
#print(field[0], '--------------------------------------------------------------------------------------------')
#print(field_values['field_value'], type(field_values['field_value']))
if field_values['field_value']:
#print('we do have field_value', kargs)
#if isinstance(field_values['field_value'], Decimal):
# kargs['value'] = field_values['field_value']#str(format_number(field_values['field_value']))
#else:
# print('sou outro que não decimal')
kargs['value'] = field_values['field_value']
#print(kargs['value'])
#elif field[1].__class__.__name__ in ['separator', 'newline']:
# pass
else:
if hasattr(field[1], 'default') and field[1].default:
default = field[1].default
if type(default) == str:
if 'session' in default:
default = default[7:]
try:
default = eval('self.ctx_dict{default}'.format(default = default, window_id = self.window_id))
except:
default = 'ERROR'
elif 'get_' in default:
default = eval('model.' + default)
else:
default = str(default)
if field[1].__class__.__name__ == 'parent_field':
#print('im a parent_field')
options = eval('objs.{model_name}().get_options()'.format(model_name = field[1].model_name))
#print('1')
#kargs['parent_name'] = eval('objs.{model_name}().__name__'.format(model_name=field[1].model_name))
#options = model.options[field[0]]
#print('2', kargs['parent_name'])
kargs['value'] = parent2HTML(value = default, options = options)
#print('3', kargs['value'])
else:
kargs['value'] = default
#print('valor por defeito is {var1}'.format(var1=kargs['value']))
if field[1].__class__.__name__ == 'parent_field':
kargs['parent_name'] = eval('objs.{model_name}().__name__'.format(model_name=field[1].model_name))
#print('8.1.3')
if field[1].__class__.__name__ in ['combo_field', 'choice_field']:
#print('sou combo ou choice ', field[0])
if isinstance(field[1].options, list):
kargs['options'] = field[1].options
else:
kargs['options'] = eval(field[1].options)
#print('validando as options do choice', kargs['options'])
if field[1].__class__.__name__ in ['choice_field']:
option_ids = []
for o in kargs['options']:
option_ids.append(o[0])
option_ids = to_tuple(option_ids)
#não podemos guardar a filter exprecion e outras variaveis no modelo pois podemos facilmente ter dois choice que se alimentam do mesmo modelo por isso mudamos para guardar com o nome do campo como key, claro que depois temos que ver como fazer relativamente à list_data dado que está a ir buscar ao ctx_dict com o modelo como key
self.ctx_dict[field[0]] = {}
self.ctx_dict[field[0]]['filter_expression'] = 'id ' + str(option_ids)
self.ctx_dict[field[0]]['filter_expr_choice_restrict'] = 'id ' + str(option_ids) # esta entrada serve para garantir que nas choice_field quando carregamos no botão de filter jamais devolve valores que não constem desta lista.
#print('modelo', field[1].model)
#print ('my filter expression', self.ctx_dict[field[0]]['filter_expression'])
self.ctx_dict[field[0]]['page'] = 0
self.ctx_dict[field[0]]['limit'] = 1000
self.ctx_dict[field[0]]['show_search'] = False
set_context(self.window_id, self.ctx_dict)
#print('8.1.4')
if dynamic_atrs_result:
#Significa que os args serão modificados pela dicionário alterando os atributos retornados pela função dynamic_atrs do campo, corremos o risco de fazer overwrite de atributos, depois temos que ver essa questão
if field[0] in dynamic_atrs_result:
print('estou no dynamic_atrs_result', field[0])
print(dynamic_atrs_result[field[0]])
for arg in dynamic_atrs_result[field[0]]:
if arg == 'atrs':
field[1].args = field[1].args + ' ' + dynamic_atrs_result[field[0]]['atrs']
else:
exec("kargs['{arg}'] = dynamic_atrs_result['{field_name}']['{arg}']".format(arg=arg, field_name=field[0]))
print('os argumentos a passar ao html são', field[1].args)
if dynamic_atrs_args:
#Significa que os args serão modificados pela dicionário acrescentando a função onchange aos campos que tem o atributo dynamic_atrs, só terá efeito se o utilizador mudar o valor mas se for através de um botão de workflow o próprio botão tem que chamar o onchange
if field[0] in dynamic_atrs_args:
field[1].args = field[1].args + dynamic_atrs_args[field[0]]
if hasattr(field[1], 'size') and field[1].size > 10:
| |
<reponame>espenhgn/SpikeSort
#!/usr/bin/env python
#coding=utf-8
import numpy as np
from scipy import interpolate, signal
import tables
import tempfile
import os
from warnings import warn
class ZeroPhaseFilter:
"""IIR Filter with zero phase delay"""
def __init__(self, ftype, fband, tw=200., stop=20):
self.gstop=stop
self.gpass=1
self.fband = fband
self.tw = tw
self.ftype = ftype
self._coefs_cache = {}
def _design_filter(self, FS):
if not FS in self._coefs_cache:
wp = np.array(self.fband)
ws = wp + np.array([-self.tw, self.tw])
wp, ws = wp*2./FS, ws*2./FS
b,a = signal.iirdesign(wp=wp, ws=ws, gstop=self.gstop, gpass=self.gpass, ftype=self.ftype)
self._coefs_cache[FS]=(b,a)
else:
b,a = self._coefs_cache[FS]
return b, a
def __call__(self, x, FS):
b, a = self._design_filter(FS)
return signal.filtfilt(b,a, x)
class FilterFir:
"""FIR filter with zero phase delay
Attributes
----------
f_pass : float
normalised low-cutoff frequency
f_stop : float
normalised high-cutoff frequency
order : int
filter order
"""
def __init__(self, f_pass, f_stop, order):
self._coefs_cache = {}
self.fp = f_pass
self.fs = f_stop
self.order = order
def _design_filter(self, FS):
if not FS in self._coefs_cache:
bands = [0, min(self.fs, self.fp), max(self.fs, self.fp), FS/2]
gains = [int(self.fp < self.fs), int(self.fp > self.fs)]
b, a = signal.remez(self.order, bands, gains, Hz=FS), [1]
self._coefs_cache[FS]=(b, a)
else:
b,a = self._coefs_cache[FS]
return b, a
def __call__(self, x, FS):
b, a = self._design_filter(FS)
return signal.filtfilt(b, a, x)
class Filter:
def __init__(self, fpass, fstop, gpass=1, gstop=10, ftype='butter'):
self.ftype = ftype
self.fp = np.asarray(fpass)
self.fs = np.asarray(fstop)
self._coefs_cache = {}
self.gstop = gstop
self.gpass = gpass
def _design_filter(self, FS):
if not FS in self._coefs_cache:
wp, ws = self.fp*2/FS, self.fs*2/FS
b,a = signal.iirdesign(wp=wp, ws=ws, gstop=self.gstop, gpass=self.gpass, ftype=self.ftype)
self._coefs_cache[FS]=(b,a)
else:
b,a = self._coefs_cache[FS]
return b, a
def __call__(self, x, FS):
b, a = self._design_filter(FS)
return signal.filtfilt(b,a, x)
def filter_proxy(spikes, filter_obj, chunksize=1E6):
"""Proxy object to read filtered data
Parameters
----------
spikes : dict
unfiltered raw recording
filter_object : object
Filter to filter the data
chunksize : int
size of segments in which data is filtered
Returns
-------
sp_dict : dict
filtered recordings
"""
data = spikes['data']
sp_dict = spikes.copy()
if filter_obj is None:
return spikes
tmp_file = tempfile.NamedTemporaryFile(mode='w')
filename = tmp_file.name
atom = tables.Atom.from_dtype(np.dtype('float64'))
shape = data.shape
h5f = tables.openFile(filename,'w')
carray = h5f.createCArray('/', "test", atom, shape)
chunksize = int(chunksize)
n_chunks = int(np.ceil(shape[1]*1./chunksize))
for i in range(shape[0]):
for j in range(n_chunks):
stop = int(np.min(((j+1)*chunksize, shape[1])))
carray[i,j*chunksize:stop] = filter_obj(data[i,j*chunksize:stop], sp_dict['FS'])
sp_dict['data'] = carray
return sp_dict
def split_cells(spikes, idx, which='all'):
"""Return the spike features splitted into separate cells
"""
if which == 'all':
classes = np.unique(idx)
else:
classes = which
data = spikes['data']
time = spikes['time']
spikes_dict = dict([(cl, {'data': data[:,idx==cl, :], 'time': time})
for cl in classes])
return spikes_dict
def remove_spikes(spt_dict, remove_dict, tolerance):
"""Remove spikes with given spike times from the spike time
structure """
spt_data = spt_dict['data']
spt_remove = remove_dict['data']
min, max = tolerance
for t in spt_remove:
spt_data = spt_data[(spt_data>(t+max)) | (spt_data<(t+min))]
spt_ret = spt_dict.copy()
spt_ret['data'] = spt_data
return spt_ret
def detect_spikes(spike_data, thresh='auto', edge="rising",
contact=0, filter=None):
r"""Detects spikes in extracellular data using amplitude thresholding.
Parameters
----------
spike_data : dict
extracellular waveforms
thresh : float or 'auto'
threshold for detection. if thresh is 'auto' it will be
estimated from the data.
edge : {'rising', 'falling'}
which edge to trigger on
contact : int, optional
index of tetrode contact to use for detection, defaults to
first contact
filter : object, optional
filter used for spike detection; defaults to no filtering
Returns
-------
spt_dict : dict
dictionary with 'data' key which contains detected threshold
crossing in miliseconds
"""
sp_data = spike_data['data'][contact, :]
n_contacts = spike_data['n_contacts']
if filter is not None:
sp_data = filter(sp_data, spike_data['FS'])
#if n_contacts>1:
# sp_data = sp_data[:,contact]
FS = spike_data['FS']
if type(thresh) is str or type(thresh) is unicode:
if thresh=='auto':
thresh_frac = 8
else:
thresh_frac = float(thresh)
thresh = thresh_frac*np.sqrt(float(np.var(sp_data[:int(10*FS)])))
if edge == 'falling' or edge =="min":
thresh = -thresh
if edge == "rising" or edge == "max":
i, = np.where((sp_data[:-1]<thresh) & (sp_data[1:]>thresh))
elif edge == "falling" or edge == "min":
i, = np.where((sp_data[:-1]>thresh) & (sp_data[1:]<thresh))
else:
raise TypeError("Edge must be 'rising' or 'falling'")
spt = i*1000./FS
spt_dict = {'data': spt, 'thresh': thresh, 'contact': contact}
return spt_dict
def filter_spt(spike_data, spt_dict, sp_win):
spt = spt_dict['data']
sp_data = spike_data['data']
FS = spike_data['FS']
try:
n_pts = sp_data.shape[1]
except IndexError:
n_pts = len(sp_data)
max_time = (n_pts)*1000./FS
t_min = np.max((-sp_win[0],0))
t_max = np.min((max_time, max_time-sp_win[1]))
idx, = np.nonzero((spt>=t_min) & (spt<=t_max))
return idx
def extract_spikes(spike_data, spt_dict, sp_win, resample=1,
contacts='all'):
"""Extract spikes from recording.
Parameters
----------
spike_data : dict
extracellular data (see :ref:`raw_recording`)
spt : dict
spike times structure (see :ref:`spike_times`)
sp_win : list of int
temporal extent of the wave shape
Returns
-------
wavedict : dict
spike waveforms structure (see :ref:`spike_wave`)
"""
sp_data = spike_data['data']
n_contacts = spike_data['n_contacts']
if contacts == "all":
contacts = np.arange(n_contacts)
elif type(contacts) is int:
contacts = np.array([contacts])
else:
contacts = np.asarray(contacts)
FS = spike_data['FS']
spt = spt_dict['data']
idx = np.arange(len(spt))
inner_idx = filter_spt(spike_data, spt_dict, sp_win)
outer_idx = idx[np.in1d(idx, inner_idx) == False]
indices = (spt/1000.*FS).astype(np.int32)
win = (np.asarray(sp_win)/1000.*FS).astype(np.int32)
time = np.arange(win[1]-win[0])*1000./FS+sp_win[0]
n_contacts, n_pts = sp_data.shape
#auxiliarly function to find a valid spike window within data range
minmax = lambda x: np.max([np.min([n_pts, x]), 0])
spWave = np.zeros((len(time), len(spt), len(contacts)),
dtype=np.float32)
for i in inner_idx:
sp = indices[i]
spWave[:,i,:] = sp_data[contacts, sp+win[0]:sp+win[1]].T
for i in outer_idx:
sp = indices[i]
l, r = map(minmax, sp+win)
if l<>r:
spWave[(l-sp)-win[0]:(r-sp)-win[0],i,:] = sp_data[contacts, l:r].T
wavedict = {"data":spWave, "time": time, "FS": FS}
if len(idx) != len(inner_idx):
is_valid = np.zeros(len(spt), dtype=np.bool)
is_valid[inner_idx] = True
wavedict['is_valid'] = is_valid
if resample<>1:
warn("resample argument is deprecated."
"Please update your code to use function"
"resample_spikes", DeprecationWarning)
wavedict = resample_spikes(wavedict, FS*resample)
return wavedict
def resample_spikes(spikes_dict, FS_new):
"""Upsample spike waveforms using spline interpolation"""
sp_waves = spikes_dict['data']
time = spikes_dict['time']
FS = spikes_dict['FS']
resamp_time = np.arange(time[0], time[-1], 1000./FS_new)
n_pts, n_spikes, n_contacts = sp_waves.shape
spike_resamp = np.empty((len(resamp_time), n_spikes, n_contacts))
for i in range(n_spikes):
for contact in range(n_contacts):
tck = interpolate.splrep(time, sp_waves[:, i, contact],s=0)
spike_resamp[:,i, contact] = interpolate.splev(resamp_time, tck, der=0)
return {"data":spike_resamp, "time":resamp_time, "FS":FS}
def align_spikes(spike_data, spt_dict, sp_win, type="max", resample=1,
contact=0, remove=True):
"""Aligns spike waves and returns corrected spike times
Parameters
----------
spike_data : dict
spt_dict : dict
sp_win : list of int
type : {'max', 'min'}, optional
resample : int, optional
contact : int, optional
remove : bool, optiona
Returns
-------
ret_dict : dict
spike times of aligned spikes
"""
spt = spt_dict['data'].copy()
idx_align = np.arange(len(spt))
#spt_align = {'data': spt}
#go in a loop until all spikes are correctly aligned
iter_id = 0
while len(idx_align) > 0:
spt_align = {'data': spt[idx_align]}
spt_inbound = filter_spt(spike_data, spt_align, sp_win)
idx_align = idx_align[spt_inbound]
#spt_align = {'data': spt[idx_align]}
sp_waves_dict = extract_spikes(spike_data, spt_align, sp_win,
resample=resample, contacts=contact)
sp_waves = sp_waves_dict['data'][:,spt_inbound,0]
#if sp_waves_dict.has_key('is_valid'):
# sp_waves = sp_waves[:, sp_waves_dict['is_valid']]
time = sp_waves_dict['time']
if type=="max":
i = sp_waves.argmax(0)
elif type=="min":
i = sp_waves.argmin(0)
#move spike markers
shift = time[i]
spt[idx_align]+=shift
#if spike maximum/minimum was at the edge we have to extract it at the
# new marker and repeat the alignment
tol = 0.1
idx_align = idx_align[(shift<(sp_win[0]+tol)) | (shift>(sp_win[1]-tol))]
iter_id +=1
#print "Align. iteration %d, remaining idx %d" % (iter_id, len(idx_align))
#print shift
ret_dict = {'data':spt}
if remove:
#remove double spikes
FS = spike_data['FS']
ret_dict = remove_doubles(ret_dict, 1000./FS)
return ret_dict
def remove_doubles(spt_dict,tol):
new_dict = spt_dict.copy()
spt = spt_dict['data']
if len(spt)>0:
spt=spt[np.concatenate(([True],np.diff(spt)>tol))]
new_dict['data']=spt
return new_dict
def merge_spikes(spike_waves1, spike_waves2):
"""Merges two sets of spike waves
Parameters
----------
spike_waves1 : dict
spike_waves2 : dict
spike wavefroms to merge; both spike wave sets must be defined
within the same time window and with the same sampling
frequency
Returns
-------
spike_waves : dict
merged spike waveshapes
clust_idx : array
labels denoting to which set the given spike originally belonged to
"""
sp_data1 = spike_waves1['data']
sp_data2 = spike_waves2['data']
sp_data = np.hstack((sp_data1, sp_data2))
spike_waves = spike_waves1.copy()
spike_waves['data'] = | |
import copy
import cmath
import numpy
import scipy.linalg
from pauxy.estimators.thermal import greens_function, one_rdm_from_G, particle_number
from pauxy.estimators.mixed import local_energy
from pauxy.walkers.stack import PropagatorStack
from pauxy.walkers.walker import Walker
from pauxy.utils.linalg import regularise_matrix_inverse
from pauxy.utils.misc import update_stack, get_numeric_names
class ThermalWalker(Walker):
def __init__(self, system, trial, walker_opts={}, verbose=False):
Walker.__init__(self, system, trial, walker_opts=walker_opts)
self.num_slices = trial.num_slices
dtype = numpy.complex128
self.G = numpy.zeros(trial.dmat.shape, dtype=dtype)
self.nbasis = trial.dmat[0].shape[0]
self.stack_size = walker_opts.get('stack_size', None)
max_diff_diag = numpy.linalg.norm((numpy.diag(trial.dmat[0].diagonal())-trial.dmat[0]))
if max_diff_diag < 1e-10:
self.diagonal_trial = True
if verbose:
print("# Trial density matrix is diagonal.")
else:
self.diagonal_trial = False
if verbose:
print("# Trial density matrix is not diagonal.")
if self.stack_size == None:
self.stack_size = trial.stack_size
if (self.num_slices//self.stack_size)*self.stack_size != self.num_slices:
if verbose:
print("# Input stack size does not divide number of slices.")
self.stack_size = update_stack(self.stack_size, self.num_slices, verbose)
if self.stack_size > trial.stack_size:
if verbose:
print("# Walker stack size differs from that estimated from "
"trial density matrix.")
print("# Be careful. cond(BT)**stack_size: %10.3e."
%(trial.cond**self.stack_size))
self.stack_length = self.num_slices // self.stack_size
if verbose:
print("# Walker stack size: {}".format(self.stack_size))
self.lowrank = walker_opts.get('low_rank', False)
self.lowrank_thresh = walker_opts.get('low_rank_thresh', 1e-6)
if verbose:
print("# Using low rank trick: {}".format(self.lowrank))
self.stack = PropagatorStack(self.stack_size, trial.num_slices,
trial.dmat.shape[-1], dtype,
trial.dmat, trial.dmat_inv,
diagonal=self.diagonal_trial,
lowrank=self.lowrank,
thresh=self.lowrank_thresh)
# Initialise all propagators to the trial density matrix.
self.stack.set_all(trial.dmat)
self.greens_function_qr_strat(trial)
self.stack.G = self.G
self.M0 = numpy.array([scipy.linalg.det(self.G[0], check_finite=False),
scipy.linalg.det(self.G[1], check_finite=False)])
self.stack.ovlp = numpy.array([1.0/self.M0[0], 1.0/self.M0[1]])
# # temporary storage for stacks...
I = numpy.identity(system.nbasis, dtype=dtype)
One = numpy.ones(system.nbasis, dtype=dtype)
self.Tl = numpy.array([I, I])
self.Ql = numpy.array([I, I])
self.Dl = numpy.array([One, One])
self.Tr = numpy.array([I, I])
self.Qr = numpy.array([I, I])
self.Dr = numpy.array([One, One])
self.hybrid_energy = 0.0
if verbose:
eloc = self.local_energy(system)
P = one_rdm_from_G(self.G)
nav = particle_number(P)
print("# Initial walker energy: {} {} {}".format(*eloc))
print("# Initial walker electron number: {}".format(nav))
# self.buff_names = ['weight', 'G', 'unscaled_weight', 'phase', 'Tl',
# 'Ql', 'Dl', 'Tr', 'Qr', 'Dr', 'M0']
self.buff_names, self.buff_size = get_numeric_names(self.__dict__)
# self.buff_size = (self.G.size+3+self.Tl.size+2+
# self.Ql.size+self.Dl.size+self.Tr.size+self.Qr.size
# +self.Dr.size)
def greens_function(self, trial, slice_ix=None, inplace=True):
if self.lowrank:
return self.stack.G
else:
return self.greens_function_qr_strat(trial, slice_ix=slice_ix,
inplace=inplace)
def greens_function_svd(self, trial, slice_ix=None, inplace=True):
if slice_ix == None:
slice_ix = self.stack.time_slice
bin_ix = slice_ix // self.stack.stack_size
# For final time slice want first block to be the rightmost (for energy
# evaluation).
if bin_ix == self.stack.nbins:
bin_ix = -1
if inplace:
G = None
else:
G = numpy.zeros(self.G.shape, self.G.dtype)
for spin in [0, 1]:
# Need to construct the product A(l) = B_l B_{l-1}..B_L...B_{l+1}
# in stable way. Iteratively construct SVD decompositions starting
# from the rightmost (product of) propagator(s).
B = self.stack.get((bin_ix+1)%self.stack.nbins)
(U1, S1, V1) = scipy.linalg.svd(B[spin])
for i in range(2, self.stack.nbins+1):
ix = (bin_ix + i) % self.stack.nbins
B = self.stack.get(ix)
T1 = numpy.dot(B[spin], U1)
# todo optimise
T2 = numpy.dot(T1, numpy.diag(S1))
(U1, S1, V) = scipy.linalg.svd(T2)
V1 = numpy.dot(V, V1)
A = numpy.dot(U1.dot(numpy.diag(S1)), V1)
# Final SVD decomposition to construct G(l) = [I + A(l)]^{-1}.
# Care needs to be taken when adding the identity matrix.
T3 = numpy.dot(U1.conj().T, V1.conj().T) + numpy.diag(S1)
(U2, S2, V2) = scipy.linalg.svd(T3)
U3 = numpy.dot(U1, U2)
D3 = numpy.diag(1.0/S2)
V3 = numpy.dot(V2, V1)
# G(l) = (U3 S2 V3)^{-1}
# = V3^{\dagger} D3 U3^{\dagger}
if inplace:
# self.G[spin] = (V3inv).dot(U3.conj().T)
self.G[spin] = (V3.conj().T).dot(D3).dot(U3.conj().T)
else:
# G[spin] = (V3inv).dot(U3.conj().T)
G[spin] = (V3.conj().T).dot(D3).dot(U3.conj().T)
return G
def greens_function_qr(self, trial, slice_ix=None, inplace=True):
if (slice_ix == None):
slice_ix = self.stack.time_slice
bin_ix = slice_ix // self.stack.stack_size
# For final time slice want first block to be the rightmost (for energy
# evaluation).
if bin_ix == self.stack.nbins:
bin_ix = -1
if not inplace:
G = numpy.zeros(self.G.shape, self.G.dtype)
else:
G = None
for spin in [0, 1]:
# Need to construct the product A(l) = B_l B_{l-1}..B_L...B_{l+1}
# in stable way. Iteratively construct SVD decompositions starting
# from the rightmost (product of) propagator(s).
B = self.stack.get((bin_ix+1)%self.stack.nbins)
(U1, V1) = scipy.linalg.qr(B[spin], pivoting=False, check_finite=False)
for i in range(2, self.stack.nbins+1):
ix = (bin_ix + i) % self.stack.nbins
B = self.stack.get(ix)
T1 = numpy.dot(B[spin], U1)
(U1, V) = scipy.linalg.qr(T1, pivoting=False, check_finite=False)
V1 = numpy.dot(V, V1)
# Final SVD decomposition to construct G(l) = [I + A(l)]^{-1}.
# Care needs to be taken when adding the identity matrix.
V1inv = scipy.linalg.solve_triangular(V1, numpy.identity(V1.shape[0]))
T3 = numpy.dot(U1.conj().T, V1inv) + numpy.identity(V1.shape[0])
(U2, V2) = scipy.linalg.qr(T3, pivoting=False, check_finite=False)
U3 = numpy.dot(U1, U2)
V3 = numpy.dot(V2, V1)
V3inv = scipy.linalg.solve_triangular(V3, numpy.identity(V3.shape[0]))
# G(l) = (U3 S2 V3)^{-1}
# = V3^{\dagger} D3 U3^{\dagger}
if inplace:
self.G[spin] = (V3inv).dot(U3.conj().T)
else:
G[spin] = (V3inv).dot(U3.conj().T)
return G
def compute_left_right(self, center_ix):
# Use Stratification method (DOI 10.1109/IPDPS.2012.37)
# B(L) .... B(1)
for spin in [0, 1]:
# right bit
# B(right) ... B(1)
if (center_ix > 0):
# print ("center_ix > 0")
B = self.stack.get(0)
(self.Qr[spin], R1, P1) = scipy.linalg.qr(B[spin], pivoting=True, check_finite=False)
# Form D matrices
self.Dr[spin] = (R1.diagonal())
D1inv = (1.0/R1.diagonal())
self.Tr[spin] = numpy.einsum('i,ij->ij',D1inv, R1)
# now permute them
self.Tr[spin][:,P1] = self.Tr[spin] [:,range(self.nbasis)]
for ix in range(1, center_ix):
B = self.stack.get(ix)
C2 = numpy.einsum('ij,j->ij',
numpy.dot(B[spin], self.Qr[spin]),
self.Dr[spin])
(self.Qr[spin], R1, P1) = scipy.linalg.qr(C2, pivoting=True, check_finite=False)
# Compute D matrices
D1inv = (1.0/R1.diagonal())
self.Dr[spin] = (R1.diagonal())
# smarter permutation
# D^{-1} * R
tmp = numpy.einsum('i,ij->ij',D1inv, R1)
# D^{-1} * R * P^T
tmp[:,P1] = tmp[:,range(self.nbasis)]
# D^{-1} * R * P^T * T
self.Tr[spin] = numpy.dot(tmp, self.Tr[spin])
# left bit
# B(l) ... B(left)
if (center_ix < self.stack.nbins-1):
# print("center_ix < self.stack.nbins-1 first")
# We will assume that B matrices are all diagonal for left....
B = self.stack.get(center_ix+1)
self.Dl[spin] = (B[spin].diagonal())
D1inv = (1.0/B[spin].diagonal())
self.Ql[spin] = numpy.identity(B[spin].shape[0])
self.Tl[spin] = numpy.identity(B[spin].shape[0])
for ix in range(center_ix+2, self.stack.nbins):
# print("center_ix < self.stack.nbins-1 first inner loop")
B = self.stack.get(ix)
C2 = (numpy.einsum('ii,i->i',B[spin],self.Dl[spin]))
self.Dl[spin] = C2
def compute_right(self, center_ix):
# Use Stratification method (DOI 10.1109/IPDPS.2012.37)
# B(L) .... B(1)
for spin in [0, 1]:
# right bit
# B(right) ... B(1)
if (center_ix > 0):
# print ("center_ix > 0")
B = self.stack.get(0)
(self.Qr[spin], R1, P1) = scipy.linalg.qr(B[spin], pivoting=True, check_finite=False)
# Form D matrices
self.Dr[spin] = (R1.diagonal())
D1inv = (1.0/R1.diagonal())
self.Tr[spin] = numpy.einsum('i,ij->ij',D1inv, R1)
# now permute them
self.Tr[spin][:,P1] = self.Tr[spin] [:,range(self.nbasis)]
for ix in range(1, center_ix):
B = self.stack.get(ix)
C2 = numpy.einsum('ij,j->ij',
numpy.dot(B[spin], self.Qr[spin]),
self.Dr[spin])
(self.Qr[spin], R1, P1) = scipy.linalg.qr(C2, pivoting=True, check_finite=False)
# Compute D matrices
D1inv = (1.0/R1.diagonal())
self.Dr[spin] = (R1.diagonal())
# smarter permutation
# D^{-1} * R
tmp = numpy.einsum('i,ij->ij',D1inv, R1)
# D^{-1} * R * P^T
tmp[:,P1] = tmp[:,range(self.nbasis)]
# D^{-1} * R * P^T * T
self.Tr[spin] = numpy.dot(tmp, self.Tr[spin])
def compute_left(self, center_ix):
# Use Stratification method (DOI 10.1109/IPDPS.2012.37)
# B(L) .... B(1)
for spin in [0, 1]:
# left bit
# B(l) ... B(left)
if (center_ix < self.stack.nbins-1):
# print("center_ix < self.stack.nbins-1 first")
# We will assume that B matrices are all diagonal for left....
B = self.stack.get(center_ix+1)
self.Dl[spin] = (B[spin].diagonal())
self.Ql[spin] = numpy.identity(B[spin].shape[0])
self.Tl[spin] = numpy.identity(B[spin].shape[0])
for ix in range(center_ix+2, self.stack.nbins):
# print("center_ix < self.stack.nbins-1 first inner loop")
B = self.stack.get(ix)
C2 = (numpy.einsum('ii,i->i',B[spin],self.Dl[spin]))
self.Dl[spin] = C2.diagonal()
def greens_function_left_right(self, center_ix, inplace=False, thresh = 1e-6):
assert(self.diagonal_trial)
if not inplace:
G = numpy.zeros(self.G.shape, self.G.dtype)
else:
G = None
mL = self.G.shape[1]
mR = self.G.shape[1]
mT = self.G.shape[1]
Bc = self.stack.get(center_ix)
nbsf = Bc.shape[1]
# It goes to right to left and we sample (I + L*B*R) in the end
for spin in [0,1]:
if (center_ix > 0): # there exists right bit
mR = len(self.Dr[spin][numpy.abs(self.Dr[spin])>thresh])
Ccr = numpy.einsum('ij,j->ij',
numpy.dot(Bc[spin],self.Qr[spin][:,:mR]),
self.Dr[spin][:mR]) # N x mR
(Qlcr, Rlcr, Plcr) = scipy.linalg.qr(Ccr, pivoting=True, check_finite=False)
Dlcr = Rlcr[:mR,:mR].diagonal() # mR
Dinv = 1.0/Dlcr # mR
tmp = numpy.einsum('i,ij->ij',Dinv[:mR], Rlcr[:mR,:mR]) # mR, mR x mR -> mR x mR
tmp[:,Plcr] = tmp[:,range(mR)]
Tlcr = numpy.dot(tmp, self.Tr[spin][:mR,:]) # mR x N
else:
(Qlcr, Rlcr, Plcr) = scipy.linalg.qr(Bc[spin], pivoting=True, check_finite=False)
# Form D matrices
Dlcr = Rlcr.diagonal()
mR = len(Dlcr[numpy.abs(Dlcr) > thresh])
Dinv = 1.0/Rlcr.diagonal()
Tlcr = numpy.einsum('i,ij->ij',Dinv[:mR], Rlcr[:mR,:]) # mR x N
Tlcr[:,Plcr] = Tlcr[:,range(self.nbasis)] # mR x N
if (center_ix < self.stack.nbins-1): # there exists left bit
# assume left stack is all diagonal (i.e., QDT = diagonal -> Q and T are | |
by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_escalation_chain_list_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str fields:
:param int size:
:param int offset:
:param str filter:
:return: EscalationChainPaginationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['fields', 'size', 'offset', 'filter'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_escalation_chain_list" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/setting/alert/chains', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EscalationChainPaginationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_immediate_device_list_by_device_group_id(self, id, **kwargs): # noqa: E501
"""get immediate devices under group # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_immediate_device_list_by_device_group_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param str fields:
:param int size:
:param int offset:
:param str filter:
:return: DevicePaginationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_immediate_device_list_by_device_group_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_immediate_device_list_by_device_group_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_immediate_device_list_by_device_group_id_with_http_info(self, id, **kwargs): # noqa: E501
"""get immediate devices under group # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_immediate_device_list_by_device_group_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param str fields:
:param int size:
:param int offset:
:param str filter:
:return: DevicePaginationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'fields', 'size', 'offset', 'filter'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_immediate_device_list_by_device_group_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_immediate_device_list_by_device_group_id`") # noqa: E501
if 'id' in params and not re.search('\d+', params['id'] if type(params['id']) is str else str(params['id'])): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `get_immediate_device_list_by_device_group_id`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/device/groups/{id}/devices', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DevicePaginationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_immediate_website_list_by_website_group_id(self, id, **kwargs): # noqa: E501
"""get a list of websites for a group # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_immediate_website_list_by_website_group_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param str fields:
:param int size:
:param int offset:
:param str filter:
:return: WebsitePaginationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_immediate_website_list_by_website_group_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_immediate_website_list_by_website_group_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_immediate_website_list_by_website_group_id_with_http_info(self, id, **kwargs): # noqa: E501
"""get a list of websites for a group # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_immediate_website_list_by_website_group_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param str fields:
:param int size:
:param int offset:
:param str filter:
:return: WebsitePaginationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'fields', 'size', 'offset', 'filter'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_immediate_website_list_by_website_group_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_immediate_website_list_by_website_group_id`") # noqa: E501
if 'id' in params and not re.search('\d+', params['id'] if type(params['id']) is str else str(params['id'])): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `get_immediate_website_list_by_website_group_id`, must conform to the pattern `/\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['LMv1'] # noqa: E501
return self.api_client.call_api(
'/website/groups/{id}/websites', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WebsitePaginationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_netflow_endpoint_list(self, id, **kwargs): # noqa: E501
"""get netflow endpoint list # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_netflow_endpoint_list(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param int start:
:param int end:
:param str netflow_filter:
:param str port:
:param str fields:
:param int size:
:param int offset:
:param str filter:
:return: EndpointPaginationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_netflow_endpoint_list_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_netflow_endpoint_list_with_http_info(id, **kwargs) # noqa: E501
return data
def get_netflow_endpoint_list_with_http_info(self, id, **kwargs): # noqa: E501
"""get netflow endpoint list # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_netflow_endpoint_list_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:param int start:
:param int end:
:param str netflow_filter:
:param str port:
:param str fields:
:param int size:
:param int offset:
:param str filter:
:return: EndpointPaginationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'start', 'end', 'netflow_filter', 'port', 'fields', 'size', 'offset', 'filter'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_netflow_endpoint_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_netflow_endpoint_list`") # noqa: E501
if 'id' in params | |
from core.himesis_utils import graph_to_dot
from util.decompose_graph import decompose_graph
from core.himesis_utils import build_traceability
from core.new_match_algo import NewHimesisMatcher
from copy import deepcopy
from collections import defaultdict
class Slicer:
def __init__(self, rules, transformation, superclasses_dict, overlapping_rules, subsumption, atomic_contracts):
self.debug = False
self.data = {}
self.direct_links = {}
self.backward_links = {}
self.match_elements = {}
self.isolated_match_elements = {}
self.apply_elements = {}
self.found_links = {}
self.found_isolated_match_elements = {}
self.rules = rules
self.atomic_contracts = atomic_contracts
self.required_rules = {}
for rule in self.rules:
self.rules[rule] = build_traceability(deepcopy(self.rules[rule]))
self.transformation = transformation
self.superclasses_dict = superclasses_dict
self.overlapping_rules = overlapping_rules
self.subsumption = subsumption
for layer in transformation:
for i, rule in enumerate(layer):
rule_name = rule.name
#make sure to update layer with traceability rules
layer[i] = self.rules[rule_name]
data = decompose_graph(self.rules[rule_name], get_isolated_match_elements = True)
self.direct_links[rule_name] = data["direct_links"]
self.backward_links[rule_name] = data["backward_links"]
self.match_elements[rule_name] = data["match_elements"]
self.isolated_match_elements[rule_name] = data["isolated_match_elements"]
self.apply_elements[rule_name] = data["apply_elements"]
self.data[rule_name] = data
#if self.debug:
#print("Drawing: " + rule.name)
#graph_to_dot(rule.name, rule)
for layer in transformation:
for rule in layer:
r_rules = self.find_required_rules(rule.name, [rule])
self.required_rules[rule.name] = r_rules
r_rules_names = sorted([r for r,v in r_rules.items()])
print("Rule " + rule.name + " depends on: " + str(r_rules_names))
for layer in transformation:
for rule in layer:
self.check_for_missing_elements(rule, is_contract=False)
for contract in atomic_contracts:
contract_name, contract_list = self.decompose_contract(contract)
required_rules = self.find_required_rules(contract_name, contract_list, True, verbosity = 0)
self.required_rules[contract_name] = required_rules
def get_contract(self, contract_num, atomic, if_then):
contract = None
num = contract_num
if 0 <= contract_num < len(atomic):
atomic = [atomic[contract_num]]
if_then = []
contract = atomic[0]
num -= len(atomic)
if 0 <= num < len(if_then):
atomic = []
if_then = [if_then[num]]
contract = if_then[0]
print("Slicing for contract number " + str(contract_num) + " : " + contract[0])
return contract, atomic, if_then
def decompose_contract(self, contract):
contract_list = contract[1].get_graph()
contract_name = contract[1].to_string()
#print("Slicing for: " + contract_name)
self.direct_links[contract_name] = []
self.backward_links[contract_name] = []
self.match_elements[contract_name] = []
self.isolated_match_elements[contract_name] = []
self.apply_elements[contract_name] = []
for c in contract_list:
data = decompose_graph(c, verbosity = 0, isolated_if_attached_backward=True, get_isolated_match_elements = True)
self.direct_links[c.name] = data["direct_links"]
self.backward_links[c.name] = data["backward_links"]
self.match_elements[c.name] = data["match_elements"]
self.isolated_match_elements[c.name] = data["isolated_match_elements"]
self.apply_elements[c.name] = data["apply_elements"]
self.data[c.name] = data
return contract_name, contract_list
def slice_transformation(self, contract):
import time
start_time = time.time()
print("Number rules before: " + str(len(self.rules)))
try:
contract_name, contract_list = self.decompose_contract(contract)
except KeyError:
#this is actually a rule
contract_name = contract.name
contract_list = [contract]
if self.debug:
graph_to_dot(contract_name, contract_list[0])
for layer in self.transformation:
for rule in layer:
graph_to_dot(rule.name, rule)
required_rules = self.find_required_rules(contract_name, contract_list, True, verbosity=0)
self.required_rules[contract_name] = required_rules
rr_names = [rule for rule, v in required_rules.items()]
print("Required rules for contract " + contract_name + ": " + str(sorted(rr_names)))
#this contract is a rule, so make sure it requires itself
if contract_name in self.rules.keys():
required_rules[contract_name] = []
#raise Exception("Contract Required Rules")
required_rules_stack = deepcopy(list(required_rules.keys()))
required_rules = []
while required_rules_stack:
rr = required_rules_stack[0]
required_rules_stack = required_rules_stack[1:]
if rr not in required_rules:
required_rules.append(rr)
rule = self.rules[rr]
#print("Getting rrs for: " + rr)
new_rrs = self.find_required_rules(rule.name, [rule], False, self.transformation)
new_rrs = list(new_rrs.keys())
# add in the rules which might be needed for subsumption
for key, values in self.overlapping_rules.items():
if rr == key:
for val in values:
new_rrs.append(val)
for key, values in self.subsumption.items():
if rr == key:
for val in values:
new_rrs.append(val)
if rr in values:
new_rrs.append(key)
for rr2 in new_rrs:
if rr2 not in required_rules and rr2 not in required_rules_stack:
required_rules_stack.append(rr2)
print("Required rules for contract " + contract_name + " (recursive):\n" + str(sorted(required_rules)))
#raise Exception()
new_rules = {}
for k in self.rules.keys():
if k in required_rules:
new_rules[k] = self.rules[k]
new_transformation = []
for layer in self.transformation:
new_layer = []
for rule in layer:
if rule.name in required_rules:
new_layer.append(rule)
new_transformation.append(new_layer)
end_time = time.time() - start_time
for contract in contract_list:
self.check_for_missing_elements(contract, is_contract = True)
print("Time taken for: -slicing- " + str(end_time) + " seconds")
print("Number rules after: " + str(len(new_rules)))
#raise Exception()
return new_rules, new_transformation
def find_required_rules(self, pattern_name, pattern_list, is_contract = False, verbosity = 0):
if self.debug:
print("\nLooking for required rules for pattern: " + pattern_name)
required_rules = {}
for layer in self.transformation:
# don't look at the same layer that a rule is on
if not is_contract:
rule_names = [r.name for r in layer]
if pattern_name in rule_names:
break
for rule in layer:
if rule in required_rules:
continue
if is_contract:
rule_me = self.match_elements[rule.name]
rule_me = set([rule.vs[n]["mm__"] for n in rule_me])
else:
rule_me = set()
source_data = self.data[rule.name]
source_mms = rule.vs["mm__"]
for pattern in pattern_list:
verbosity = 0
pattern_data = self.data[pattern.name]
pattern_mms = pattern.vs["mm__"]
#we care about backward links for both rules and contracts,
#but only direct link for contracts
if is_contract:
real_backward_links = pattern_data["backward_links"]
else:
real_backward_links = [bl for bl in pattern_data["backward_links"] if pattern_mms[bl[2]] == "backward_link"]
real_trace_links = [tl for tl in source_data["backward_links"] if source_mms[tl[2]] == "trace_link"]
links = [
[real_backward_links, real_trace_links],
]
if is_contract:
links.append([pattern_data["direct_links"], source_data["direct_links"]])
graph_me = self.isolated_match_elements[pattern.name]
graph_me = set([pattern.vs[n]["mm__"].replace("MT_pre__", "") for n in graph_me])
if len(graph_me.intersection(rule_me)) > 0:
try:
required_rules[rule.name].append(graph_me)
except KeyError:
required_rules[rule.name] = [graph_me]
#continue
self.match_links(required_rules, links, pattern, self.data[pattern.name], rule, self.data[rule.name], self.superclasses_dict,
verbosity = verbosity)
return required_rules
def match_links(self, required_rules, links, pattern, pattern_data, graph, source_data, superclasses_dict, verbosity=0):
matcher = NewHimesisMatcher(graph, pattern, pred1=source_data, pred2=pattern_data, superclasses_dict=superclasses_dict, skip_equations = True)
pattern_mms = matcher.pattern_mms
# source_mms = graph.vs["mm__"]
does_match = False
for iso_match_element in pattern_data["isolated_match_elements"]:
iso_mm = pattern_mms[iso_match_element]
patt_constraints = matcher.get_patt_node_constraints(iso_match_element)
for node in range(len(graph.vs)):
# print("Matching on: " + str(node))
nodes_match = matcher.match_nodes(node, iso_match_element, iso_mm, patt_constraints)
if nodes_match:
if pattern.name not in self.found_isolated_match_elements.keys():
self.found_isolated_match_elements[pattern.name] = []
self.found_isolated_match_elements[pattern.name].append(iso_match_element)
try:
required_rules[graph.name].append(iso_match_element)
except KeyError:
required_rules[graph.name] = [iso_match_element]
does_match = True
# copy these links, as we might need to remove some
#print("Pattern: " + pattern.name)
for patt_links, source_links in links:
if verbosity > 1:
print("\n===================\nPattern " + pattern.name + " nodes:")
for patt0_n, patt1_n, patt_link_n in patt_links:
matcher.print_link(pattern, patt0_n, patt1_n, patt_link_n)
print("Pattern " + pattern.name + " nodes:\n===================\n")
print("\n===================\nGraph " + graph.name + " nodes:")
for graph_n0_n, graph_n1_n, graph_link_n in source_links:
matcher.print_link(graph, graph_n0_n, graph_n1_n, graph_link_n)
print("Graph " + graph.name + " nodes:\n===================\n")
for patt0_n, patt1_n, patt_link_n in patt_links:
patt_0_mm = pattern_mms[patt0_n]
patt_1_mm = pattern_mms[patt1_n]
patt_link_mm = pattern_mms[patt_link_n]
patt_constraints_0 = matcher.get_patt_node_constraints(patt0_n)
patt_constraints_1 = matcher.get_patt_node_constraints(patt1_n)
patt_constraints_link = matcher.get_patt_node_constraints(patt_link_n)
for graph_n0_n, graph_n1_n, graph_link_n in source_links:
if pattern.vs[patt_link_n]["mm__"] in ["trace_link", "backward_link"]:
if graph.vs[graph_link_n]["mm__"] == "trace_link":
links_match = True
else:
links_match = False
else:
links_match = matcher.match_nodes(graph_link_n, patt_link_n, patt_link_mm, patt_constraints_link)
if not links_match:
#if verbosity > 1:
# print("Links don't match")
continue
if verbosity > 1:
print("\nChecking Pattern " + pattern.name + " nodes:")
matcher.print_link(None, pattern, patt0_n, patt1_n, patt_link_n)
nodes_match_1 = matcher.match_nodes(graph_n0_n, patt0_n, patt_0_mm, patt_constraints_0)
nodes_match_2 = matcher.match_nodes(graph_n1_n, patt1_n, patt_1_mm, patt_constraints_1)
#if nodes_match:
# print("\nNodes found!")
# if not nodes_match:
# print("Failure matching on " + pc.vs[pc_n0_n]["mm__"] + " vs " + contract.vs[n0_n]["mm__"])
# if not nodes_match:
# print("Failure matching on " + pc.vs[pc_n1_n]["mm__"] + " vs " + contract.vs[n1_n]["mm__"])
if nodes_match_1 and nodes_match_2:
if verbosity > 1:
print("\nFound the pattern link: ")
matcher.print_link(None, pattern, patt0_n, patt1_n, patt_link_n)
print("On:")
matcher.print_link(None, graph, graph_n0_n, graph_n1_n, graph_link_n)
if pattern.name not in self.found_links.keys():
self.found_links[pattern.name] = []
patt_mms = (pattern_mms[patt0_n], pattern_mms[patt1_n], pattern_mms[patt_link_n])
#print("Pattern link " + str(patt_mms) + " found in " + graph.name)
self.found_links[pattern.name].append(patt_mms)
try:
required_rules[graph.name].append((patt0_n, patt1_n, patt_link_n))
except KeyError:
required_rules[graph.name] = [(patt0_n, patt1_n, patt_link_n)]
does_match = True
#return True
#pc_direct_links.remove([pc_n0_n, pc_n1_n, pc_link_n])
#break
# else:
# if verbosity > 1:
# print("\nNot match on:")
# print(graph.vs[graph_n0_n]["mm__"])
# print(graph.vs[graph_n1_n]["mm__"])
#
# if graph_link_n:
# print(graph.vs[graph_link_n]["mm__"])
# print()
# if not found_match:
# if verbosity > 1:
# print("No direct link matches found")
# print("Couldn't find:")
# print(pattern.vs[patt0_n]["mm__"])
# print(pattern.vs[patt1_n]["mm__"])
# if patt_link_n:
# print(pattern.vs[patt_link_n]["mm__"])
# print()
# return False
return does_match
def check_for_missing_elements(self, graph, is_contract = False):
original_mms = graph.vs["mm__"]
mms = [mm.replace("MT_pre__", "") for mm in original_mms]
direct_links = self.data[graph.name]["direct_links"]
backward_links = self.data[graph.name]["backward_links"]
#print("Check missing elements: " + graph.name)
if not is_contract:
backward_links = [bl for bl in backward_links if mms[bl[2]] == "backward_link"]
try:
found_links = self.found_links[graph.name]
except KeyError:
found_links = []
if is_contract:
for dl in direct_links:
dl_as_mm = (mms[dl[0]], mms[dl[1]], mms[dl[2]])
if dl_as_mm not in found_links:
rwe = self.print_rules_with_element(original_mms[dl[0]], verbose = False)
rwe2 = self.print_rules_with_element(original_mms[dl[1]], verbose = False)
#check to | |
<reponame>benyaboy/sage-graphics
############################################################################
#
# SAGE UI - A Graphical User Interface for SAGE
# Copyright (C) 2005 Electronic Visualization Laboratory,
# University of Illinois at Chicago
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the distribution.
# * Neither the name of the University of Illinois at Chicago nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Direct questions, comments etc about SAGE UI to www.evl.uic.edu/cavern/forum
#
# Author: <NAME>
#
############################################################################
import os.path
import os
import time
from math import ceil
import string
import wx
from Mywx import MyBitmapButton
from threading import Timer
from globals import *
# some global vars
hashRecordingWords = {} #RJ 2005-04-07
hashRecordingWords[1001] = "exec"
hashRecordingWords[1002] = "kill"
hashRecordingWords[1003] = "move"
hashRecordingWords[1004] = "resize"
hashRecordingWords[1007] = "bg"
hashRecordingWords[1008] = "depth"
hashRecordingWords[1100] = "shutdown"
############################################################################
#
# CLASS: SessionRecorder
#
# DESCRIPTION: A class for recording user actions to a file in a directory.
# To use just create an instance of this class and call
# RecordAction(code, data) to write a line to a file. "Code" is
# the message code sent to SAGE and it gets mapped to a text string
# according to a hash defined here. "Data" is whatever you want to
# record in a file and it's code dependent. The last (optional)
# parameter to RecordAction is a boolean signifying if you want
# to insert a pause after an action. By default pauses are inserted
# in the file with the value of the elapsed time since the last
# recorded message. When done recording, just call Close() and
# destroy the object.
#
# DATE: April, 2005
#
############################################################################
class SessionRecorder:
def __init__(self, filename):
#recDir = ConvertPath("./sessions/") # a dir to write session files to
print "Session recording started. Session will be saved in ", filename
self.file = open(filename, "w")
self.prevTime = 0
def RecordAction(self, code, data, insertPause = True):
# only record if the action is supported
if hashRecordingWords.has_key( int(code) ):
#record a pause, a session always starts with a 2 second pause!
if self.prevTime == 0:
pauseTime = 2
else:
pauseTime = int( ceil( time.time() - self.prevTime ) )
self.prevTime = time.time()
# finally, do the writing
if insertPause:
self.file.write("pause " + str(pauseTime) + "\n")
self.file.write( hashRecordingWords[int(code)] +" "+ data+"\n" )
def Close(self):
print "Session recording stopped"
self.file.close()
############################################################################
#
# CLASS: SessionReader
#
# DESCRIPTION: A class for reading user actions from a specified file.
# To use, create an instance of this class and pass in a
# filename to read from. Once created, keep calling ReadAction()
# in order to read new lines from a file. It returns data in this
# format: (code, data). It is your job to use this data then.
# This class also has options to pause, stop, speed up and slow
# down the playback of the session file. It does all that by
# playing with the length of the pause (speed), repeatedly
# returning pause (pausing) or returning a fake EOF (stopping
# prematurely).
#
#
# DATE: April, 2005
#
############################################################################
class SessionReader:
def __init__(self, sessionFile):
if os.path.isfile(sessionFile):
self.file = open(sessionFile, "r")
else:
print "ERROR: Sessions file \"", sessionFile, "\" doesn't exist."
# control variables
self.speed = 1.0
self.stop = False
self.paused = False
def SetPause(self, doPause):
self.paused = doPause
# reads the file one line at a time
# it returns a tuple in this format: (code, data)
# code is: -1 (paused), 0 (EOF) or **** (message code)
# data depends on the message
def ReadAction(self):
# this is how we stop session playback
if self.stop:
return (0, "EOF")
# this is how we pause playback (pretend that we encountered a
# pause in the file. Instead we are pausing the playback and
# not reading any new lines from the file
if self.paused:
return (-1, "0.5")
if not hasattr(self, "file"):
return
line = self.file.readline()
line = string.strip(line) # remove the newline char
# EOF case
if line == "":
self.file.close()
return (0, "EOF")
# split the line into action and data
action, data = string.split(line, " ", 1)
# send a pause separately
if action == "pause":
data = float(data) * self.speed
return (-1, str(data))
# this is the actual message to be sent to SAGE
code = -2 # any non-existent code
for k, v in hashRecordingWords.iteritems():
if v == action:
code = int(k) #extract the code
return (code, data)
def GoSlower(self):
self.speed = self.speed * 2
if self.speed > 8:
self.speed = 8
if self.speed > 1:
return "1/" + str(int(self.speed))
elif self.speed == 1:
return "1"
elif self.speed < 1:
return str(int((0.125 / self.speed) * 8))
def GoFaster(self):
self.speed = float(self.speed) / 2
if self.speed < 0.125:
self.speed = 0.125
if self.speed > 1:
return "1/" + str(int(self.speed))
elif self.speed == 1:
return "1"
elif self.speed < 1:
return str(int((0.125 / self.speed) * 8))
def Stop(self):
self.stop = True
def Close(self):
if hasattr(self, "file"):
self.file.close()
############################################################################
#
# CLASS: PlaybackDialog
#
# DESCRIPTION: This class describes the dialog box that controls the session
# playback. It's a modal dialog so the UI can't be interacted
# with during the playback. However, you can speed up or slow
# down the playback, pause it or quit it prematurely.
#
# NOTE: This dialog is not neccessary for the operation of playback
# but it gives user more control over it.
#
# DATE: April, 2005
#
############################################################################
class PlaybackDialog(wx.Dialog):
def __init__(self, parent, sessionReader):
wx.Dialog.__init__(self, parent, -1, "Session Playback")#, style = wx.STATIC_BORDER)
self.sessionReader = sessionReader
self.SetSize((250, 120))
self.SetBackgroundColour(wx.Colour(0, 51, 52)) #003334
self.SetForegroundColour(wx.Colour(255, 255, 255))
self.CenterOnParent(wx.BOTH)
self.SetFont(StandardFont())
wx.StaticText(self, -1, "Playback in progress...", (50, 20))
#self.CaptureMouse()
self.pause = False
# create the controls for the dialog
self.pauseBtn = MyBitmapButton( self, (30, 50), (30, 30), "images/pause_up.jpg", "images/pause_down.jpg", "images/pause_over.jpg")
self.stopBtn = MyBitmapButton( self, (70, 50), (30, 30), "images/stop_up.jpg", "images/stop_down.jpg", "images/stop_over.jpg")
self.slowerBtn = MyBitmapButton( self, (135, 50), (30, 30), "images/slower_up.jpg", "images/slower_down.jpg", "images/slower_over.jpg")
self.speedText = wx.StaticText( self, -1, "x 1", (172, 60), style = wx.ALIGN_CENTRE)
self.fasterBtn = MyBitmapButton( self, (208, 50), (30, 30), "images/faster_up.jpg", "images/faster_down.jpg", "images/faster_over.jpg")
# bind the events and event handlers for the buttons
self.pauseBtn.Bind( wx.EVT_LEFT_UP, self.OnPause)
self.stopBtn.Bind( wx.EVT_LEFT_UP, self.OnClose)
self.fasterBtn.Bind( wx.EVT_LEFT_UP, self.OnFaster)
self.slowerBtn.Bind( wx.EVT_LEFT_UP, self.OnSlower)
self.Bind( wx.EVT_CLOSE, self.OnClose)
self.Show()
def OnPause(self, evt):
if self.pause:
self.pauseBtn.SetUpBitmap("images/pause_up.jpg")
self.pauseBtn.SetDownBitmap("images/pause_down.jpg")
self.pauseBtn.SetOverBitmap("images/pause_over.jpg")
self.pause = False
self.sessionReader.SetPause(False)
else:
self.pauseBtn.SetUpBitmap("images/play_up.jpg")
self.pauseBtn.SetDownBitmap("images/play_down.jpg")
self.pauseBtn.SetOverBitmap("images/play_over.jpg")
self.pause = True
self.sessionReader.SetPause(True)
MyBitmapButton.OnLeftUp(evt.GetEventObject(), evt)
# this just hides the window since sageGate will close it when it's done
# it also tells the sessionReader to stop the playback
def OnClose(self, evt):
self.sessionReader.Stop()
#self.Destroy()
self.Show(False) # SAGEGate is the one that destroys the window so just hide it for now
# sageGate calls this function when EOF has been reached
def Close(self):
#self.ReleaseMouse()
self.Destroy()
# this calls SessionReader and sets the speed of playback (basically it
# changes the multiplier of the pauses)
def OnFaster(self, evt):
MyBitmapButton.OnLeftUp(evt.GetEventObject(), evt)
newSpeed = self.sessionReader.GoFaster()
self.speedText.SetLabel("x " + newSpeed)
# this calls SessionReader and sets the speed | |
'''
Stuff for driving MS office applications from Python using COM
Currently just Excel but Word will come soon.
'''
from win32com.client import Dispatch
from types import *
from string import uppercase
class Excel:
'''
Wrapper for MS Excel derived from that in Python Programming on Win32
'''
def __init__(self,filename=None):
'''
Open a new Excel spreadsheet optionally associated with a file
'''
self.xlApp = Dispatch("Excel.Application")
if filename:
self.filename = filename
self.xlBook = self.xlApp.Workbooks.Open(filename)
else:
self.xlBook = self.xlApp.Workbooks.Add()
self.filename = ''
self.open = 1
def save(self, newfilename=None):
'''
Save the workbook either to the default file, another file,
or let Excel query the user where to save it.
'''
if newfilename:
self.filename = newfilename
self.xlBook.SaveAs(newfilename)
else:
self.xlBook.Save()
def close(self):
self.xlBook.Close(SaveChanges=0)
del self.xlApp
self.open = 0
def getCell(self, row, col, sheet=1):
'''
Returns the value in cell (row,col) or None if it is blank.
'''
xlSheet = self.xlBook.Worksheets(sheet)
return xlSheet.Cells(row,col).Value
def getCellFormula(self, row, col, sheet=1):
'''
Returns the formula in cell (row,col) or the value if
there is no formula. If there is no value nor formula,
None is returned.
'''
xlSheet = self.xlBook.Worksheets(sheet)
result = xlSheet.Cells(row,col).Formula
if result == '': # A blank field seems to return a blank string
result = None
return result
def setCell(self, value, row, col, sheet=1):
'''
Sets the value in cell (row,col).
'''
xlSheet = self.xlBook.Worksheets(sheet)
xlSheet.Cells(row,col).Value = value
def getRange(self, row1, col1, row2=None, col2=None, sheet=1):
'''
Returns the data in the given range as a 2d array (i.e., as
a tuple of tuples). If the bottom corner is not specified
or is incompletely specified, assume a dimension of 1.
'''
if not row2:
row2 = row1
if not col2:
col2 = col1
xlSheet = self.xlBook.Worksheets(sheet)
cell1 = xlSheet.Cells(row1,col1)
cell2 = xlSheet.Cells(row2,col2)
return xlSheet.Range(cell1,cell2).Value
def matrixDimensions(self, data):
'''
Determine the dimemension of the matrix data which can be a
scalar, vector, or 2-D matrix. Allows for string data, or for
matrices in which the first row or column are strings labels
for series ... so look at the last row to determine the length
of a row (= number of columns). If the data is a vector then
it is taken as a row-vector in order to be consistent with how
the default extension happens when assigning a simple list or
vector into a rectangular range in Excel.
'''
last = None
n = m = 1
try:
n = len(data)
last = data[-1]
except TypeError:
n = m = 1 # We have a scalar
if last:
if type(last) == StringType:
m = n # Row-vector of strings
n = 1
else:
try:
m = len(last)
except TypeError:
m = n # Row-vector of scalars
n = 1
return (n,m)
def setRange(self, data, row1=1, col1=1, row2=None, col2=None, sheet=1):
'''
Set the range of cells to the given data.
If both corners of the range are specified, the corresponding
piece of data is copied into the range. If data is too small,
then it is mystically extended to fill the range. E.g., you
can fill a range from a scalar or a vector. Vectors are treated
as row-vectors when filling a rectangular region.
Optionally, you specify only the top-left corner of range in
row1, cell1 and specify row2<=0 - the other coordinate is figured
out from the dimension of the data. This can always be overridden by
specifying the full range coordinates.
If no coordinates are given, the data is put into the top left
of the spreadsheet.
Returns the range that was set.
'''
(n,m) = self.matrixDimensions(data)
if not row2:
row2 = row1 + n - 1
if not col2:
col2 = col1 + m - 1
xlSheet = self.xlBook.Worksheets(sheet)
cell1 = xlSheet.Cells(row1,col1)
cell2 = xlSheet.Cells(row2,col2)
xlSheet.Range(cell1,cell2).Value = data
return (row1, col1, row2, col2)
def getContiguousRange(self, row1, col1, sheet=1):
'''
Returns data in the range which forms a contiguous
block with top-left corner in cell (row1,col1).
Starting from the specified cell, scan down/across
the first column/row and identify the range bordered
by blank cells. Blanks within the region will be
set to None.
'''
xlSheet = self.xlBook.Worksheets(sheet)
row2 = row1
while xlSheet.Cells(row2+1,col1).Value not in [None,'']:
row2 = row2 + 1
col2 = col1
while xlSheet.Cells(row1,col2+1).Value not in [None,'']:
col2 = col2 + 1
return self.getRange(row1, col1, row2, col2, sheet=sheet)
def selectRange(self, row1, col1, row2=None, col2=None, sheet=1):
'''
Select the range of cells on the specified sheet. It also
has to select that sheet as the active worksheet.
'''
if not row2:
row2 = row1
if not col2:
col2 = col1
xlSheet = self.xlBook.Worksheets(sheet)
xlSheet.Select()
cell1 = xlSheet.Cells(row1,col1)
cell2 = xlSheet.Cells(row2,col2)
xlSheet.Range(cell1,cell2).Select()
def chartRange(self, row1, col1, row2, col2, sheet=1,
**keys):
'''
Chart the data in the specified range. Additional options
are processed by chartSelectedRange.
'''
self.selectRange(row1, col1, row2, col2, sheet=sheet)
keys['sheet'] = sheet
apply(self.chartSelectedRange, (), keys)
def chartSelectedRange(self,
title=None, xlabel=None, ylabel=None,
plotby='columns',
charttype='xy',
sheet=1,
xmin=None, xmax=None,
ymin=None, ymax=None,
xlog=0, ylog=0):
'''
The interface to Excel charts. Just a few of the capabilities
are exposed here.
[The first of a set of options is the default]
plotby = 'columns' ... data series run down columns
. = 'rows' ... across rows
charttype = 'xy' ... XY scatter plot with lines and points.
. First series is X. Others are y1, y2, etc.
. = 'surface' ... Surface plot of a scalar function of
. two variables. Data should be a grid of the function.
. = 'contour' or 'colorcontour' ... Contour plot of a scalar
. function of two variables. Data should be a grid of
. values.
xmin and xmax = min/max values of the x or category axis
. It defaults to autoscale by Excel. This only applies to
. XY plots (since the surface/contor plots do not use
. values for the category axes ... they use string labels)
ymin and ymax = min/max values of the y or value axis
. It defaults to auto by Excel. Applies to all charts.
xlog = 0 ... use a linear for x or category axis.
. = 1 ... use a log (values must be positive)
. This only applies to XY plots.
ylog = 0 ... use a linear for the value or Y axis
. = 1 ... use a log .
. Applies to all charts
If the first element of each data series is a string, it is
used to label the series. If this string is representable as
a numerical value you must precede it with a single quote to
force Excel to treat it as a string. Note that you must use
strings. If you use numbers it will be interpreted as data
and incorporated into the plot. For the 2-D plots (xy,
surface, contour) you can border the actual data on left and
on the top with strings to label axes.
'''
charttypes = {'xy':74, 'surface':83, 'colorcontour':85, 'contour':56}
try:
charttype = charttypes[charttype]
except KeyError:
print('Excel.chartSelectedRange: Unknown charttype', charttype, ' defaulting to XY')
charttype = charttypes['xy']
# Make the chart and set how the data will be interpreted
# Taking a reference to the active chart does not seemt to work???
self.xlApp.Charts.Add()
self.xlApp.ActiveChart.ChartType = charttype
xlRows=1
xlColumns=2
if plotby == 'rows':
self.xlApp.ActiveChart.PlotBy = xlRows
elif plotby == 'columns':
self.xlApp.ActiveChart.PlotBy = xlColumns
else:
print('Excel.chartSelectedRange: Unknown plotby', charttype, ' defaulting to columns')
self.xlApp.ActiveChart.PlotBy = xlColumns
# Set the title and axis labels
if title:
self.xlApp.ActiveChart.HasTitle = 1
self.xlApp.ActiveChart.ChartTitle.Characters.Text = title
xlCategory=1
xlValue=2
#xlSeries=3
xlPrimary=1
#xlSecondary=2
if xlabel:
self.xlApp.ActiveChart.Axes(xlCategory,xlPrimary).HasTitle = 1
self.xlApp.ActiveChart.Axes(xlCategory,xlPrimary).AxisTitle.Characters.Text = xlabel
if ylabel:
self.xlApp.ActiveChart.Axes(xlValue,xlPrimary).HasTitle = 1
self.xlApp.ActiveChart.Axes(xlValue,xlPrimary).AxisTitle.Characters.Text = ylabel
# Set the axis scale and log options
xlLinear = 0xffffefdc
xlLogarithmic=0xffffefdb
if ymin != None:
self.xlApp.ActiveChart.Axes(xlValue).MinimumScale = ymin
if ymax != None:
self.xlApp.ActiveChart.Axes(xlValue).MaximumScale = ymax
if ylog:
self.xlApp.ActiveChart.Axes(xlValue).ScaleType = xlLogarithmic
if charttype == charttypes['xy']:
if xmin != None:
| |
PROCEDURE {}(@sent_data nvarchar(max),
@message_type smallint,
@guild_id int,
@message_mode smallint,
@dm_reason nvarchar(max),
@channels t_tmp_channel_log READONLY) AS
/* Procedure that saves the log
* This is done within sql instead of python for speed optimization
*/
BEGIN
BEGIN TRY
DECLARE @existing_data_id int = NULL;
DECLARE @last_log_id int = NULL;
SELECT @existing_data_id = id FROM DataHISTORY dh WHERE dh.content = @sent_data;
IF @existing_data_id IS NULL
BEGIN
INSERT INTO DataHISTORY(content) VALUES(@sent_data);
SELECT @existing_data_id = id FROM DataHISTORY dh WHERE dh.content = @sent_data;
END
INSERT INTO MessageLOG(sent_data, message_type, guild_id, message_mode, dm_reason, [timestamp]) VALUES(
@existing_data_id, @message_type, @guild_id, @message_mode, @dm_reason, GETDATE()
);
SET @last_log_id = SCOPE_IDENTITY();
DECLARE @existance tinyint;
SELECT @existance = (CASE WHEN EXISTS(SELECT TOP(1) 1 FROM @channels) THEN 1 ELSE 0 END)
IF @existance = 1
BEGIN
INSERT INTO MessageChannelLOG (log_id, channel_id, reason)
SELECT @last_log_id, ch.id, ch.reason FROM @channels ch --OPENJSON(@channels) WITH(id int, reason nvarchar(max)) ch;
END
COMMIT;
BEGIN TRAN;
END TRY
BEGIN CATCH
ROLLBACK;
BEGIN TRAN;
THROW;
END CATCH
END"""
}
]
with suppress(SQLAlchemyError, TimeoutError, PyTDSError):
trace("[SQL]: Creating Views, Procedures & Functions...", TraceLEVELS.NORMAL)
with self._sessionmaker.begin() as session:
for statement in stms:
session.execute(text("CREATE OR ALTER " + statement["stm"].format(statement["name"]) ))
return True
return False
def generate_lookup_values(self) -> bool:
"""~ Method ~
@Info: Generates the lookup values for all the different classes the @register_type decorator was used on.
"""
session : Session
with suppress(SQLAlchemyError, TimeoutError, PyTDSError):
trace("[SQL]: Generating lookuptable values...", TraceLEVELS.NORMAL)
with self._sessionmaker.begin() as session:
for to_add in copy.deepcopy(GLOBALS.lt_types): # Deepcopied to prevent SQLAlchemy from deleting the data
existing = session.query(type(to_add)).where(type(to_add).name == to_add.name).first()
if existing is None:
session.add(to_add)
session.flush()
existing = to_add
self.add_to_cache(type(to_add), to_add.name, existing.id)
return True
return False
def create_tables(self) -> bool:
"""~ Method ~
@Info: Creates tables from the SQLAlchemy's descriptor classes"""
with suppress(SQLAlchemyError, TimeoutError, PyTDSError):
trace("[SQL]: Creating tables...", TraceLEVELS.NORMAL)
self.Base.metadata.create_all(bind=self.engine)
return True
return False
def connect_cursor(self) -> bool:
""" ~ Method ~
@Info: Creates a cursor for the database (for faster communication)"""
with suppress(Exception):
trace("[SQL]: Connecting the cursor...", TraceLEVELS.NORMAL)
self.cursor = self.engine.raw_connection().cursor()
return True
return False
def begin_engine(self) -> bool:
"""~ Method ~
@Info: Creates engine"""
with suppress(SQLAlchemyError, TimeoutError, PyTDSError):
self.engine = create_engine(f"mssql+pytds://{self.username}:{self.__password}@{self.server}/{self.database}",
echo=False,future=True, pool_pre_ping=True,
connect_args={"login_timeout" : SQL_CONNECTOR_TIMEOUT, "timeout" : SQL_CONNECTOR_TIMEOUT})
self._sessionmaker = sessionmaker(bind=self.engine)
return True
return False
# def create_database(self) -> bool:
# """ ~ Method ~
# @Info: Creates database if it doesn't exist"""
# with suppress(SQLAlchemyError, TimeoutError, PyTDSError):
# trace("[SQL]: Creating database...", TraceLEVELS.NORMAL)
# if not database_exists(self.engine.url):
# create_database(self.engine.url)
# return True
# return False
def initialize(self) -> bool:
"""~ Method ~
@Info: This method initializes the connection to the database, creates the missing tables
and fills the lookuptables with types defined by the register_type(lookup_table) function.
@Param: void"""
# Create engine for communicating with the SQL base
if not self.begin_engine():
trace("[SQL]: Unable to start engine.", TraceLEVELS.ERROR)
return False
# if not self.create_database():
# trace("[SQL]: Unable to create database")
# return False
# Create tables and the session class bound to the engine
if not self.create_tables():
trace("[SQL]: Unable to create all the tables.", TraceLEVELS.ERROR)
return False
# Insert the lookuptable values
if not self.generate_lookup_values():
trace("[SQL]: Unable to create lookuptables' rows.", TraceLEVELS.ERROR)
return False
# Create datatypes
if not self.create_data_types():
trace("[SQL]: Unable to data types", TraceLEVELS.ERROR)
return False
# Initialize views, procedures and functions
if not self.create_analytic_objects():
trace("[SQL]: Unable to create views, procedures and functions.", TraceLEVELS.ERROR)
return False
# Connect the cursor for faster procedure calls
if not self.connect_cursor():
trace("[SQL]: Unable to connect the cursor", TraceLEVELS.ERROR)
return False
return True
def get_insert_guild(self,
snowflake: int,
name: str,
_type: str) -> int:
"""~ Method ~
@Info:
Inserts the guild into the db if it doesn't exist,
adds it to cache and returns it's internal db id from cache."""
result = None
if snowflake not in self.GuildUSER:
with self._sessionmaker.begin() as session:
session: Session
result = session.query(GuildUSER.id).filter(GuildUSER.snowflake_id == snowflake).first()
if result is not None:
result = result[0]
self.add_to_cache(GuildUSER, snowflake, result)
else:
guild_type = self.GuildTYPE[_type]
result = GuildUSER(guild_type, snowflake, name)
session.add(result)
session.flush()
result = result.id
self.add_to_cache(GuildUSER, snowflake, result)
else:
result = self.GuildUSER[snowflake]
return result
def get_insert_channels(self,
channels: List[dict],
guild_id: int) -> List[dict]:
"""~ Method ~
@Info:
- Adds missing channels to the database, where it then caches those added,
to avoid unnecessary quaries if all channels exist and then returns
a list of dicitonaries containing internal DB id and reason why sending failed.
@Param:
- channels: List[dict[id, name]] ~ List of dictionaries containing snowflake_id and name of the channel"""
not_cached = [{"id": x["id"], "name": x["name"]} for x in channels if x["id"] not in self.CHANNEL] # Get snowflakes that are not cached
not_cached_snow = [x["id"] for x in not_cached]
if len(not_cached):
with self._sessionmaker.begin() as session:
session: Session
result = session.query(CHANNEL.id, CHANNEL.snowflake_id).where(CHANNEL.snowflake_id.in_(not_cached_snow)).all()
for internal_id, snowflake_id in result:
self.add_to_cache(CHANNEL, snowflake_id, internal_id)
to_add = [CHANNEL(x["id"], x["name"], guild_id) for x in not_cached if x["id"] not in self.CHANNEL]
if len(to_add):
session.add_all(to_add)
session.flush()
for channel in to_add:
self.add_to_cache(CHANNEL, channel.snowflake_id, channel.id)
ret = [(self.CHANNEL.get(d["id"],None), d.get("reason", None)) for d in channels]
#For some reason pytds doesn't like when a row with a NULL column value is followed by a row with a non NULL column value
for channel in ret.copy():
if channel[1] is None:
ret.append(ret.pop(0))
else:
break
return ret
def stop_engine(self):
"""~ Method ~
@Info: Closes the engine and the cursor"""
self.cursor.close()
self.engine.dispose()
GLOBALS.enabled = False
def handle_error(self,
exception: int, message: str,
loop: asyncio.AbstractEventLoop) -> bool:
"""~ function ~
@Info: Used to handle errors that happen in the save_log method.
@Return: Returns BOOL indicating if logging to the base should be attempted again."""
res = False
time.sleep(SQL_RECOVERY_TIME)
# Handle the error
if exception == 208: # Invalid object name (table deleted)
if self.create_tables() and self.create_data_types() and self.create_analytic_objects():
res = True
elif exception in {547, 515}: # Constraint conflict, NULL value
r_table = re.search(r'(?<=table "dbo.).+(?=")', message)
if r_table is not None:
self.clear_cache(r_table.group(0)) # Clears only the affected table cache
else:
self.clear_cache() # Clears all caching tables
res = self.generate_lookup_values()
elif exception in {-1, 2, 53}: # Diconnect error, reconnect after period
self.reconnect_after(SQL_RECONNECT_TIME, loop)
elif exception == 2812:
if self.create_data_types() and self.create_analytic_objects():
res = True
elif exception == 2801: # Object was altered (via external source) after procedure was compiled
res = True # Just retry
elif exception == 1205: # Transaction deadlocked
with suppress(SQLAlchemyError, TimeoutError, PyTDSError):
with self._sessionmaker() as session:
session.commit() # Just commit
res = True
# Could not handle the error, switch to file logging permanently
if not res and exception not in {-1, 2, 53}:
self.stop_engine()
return res # Returns if the error was handled or not
async def save_log(self,
guild_context: dict,
message_context: dict) -> bool:
"""~ Method ~
@Info: This method saves the log generated by
the xGUILD object into the database
@Param:
guild_context: dict :: Context generated by the xGUILD object,
see guild.xGUILD.generate_log() for more info.
message_context: dict :: Context generated by the xMESSAGE object,
see guild.xMESSAGE.generate_log_context() for more info.
@Return: Returns bool value indicating success (True) or failure (False)."""
# Parse the data
sent_data = message_context.get("sent_data")
guild_snowflake = guild_context.get("id")
guild_name = guild_context.get("name")
guild_type: str = guild_context.get("type")
message_type: str = message_context.get("type")
message_mode = message_context.get("mode", None)
channels = message_context.get("channels", None)
dm_success_info = message_context.get("success_info", None)
dm_success_info_reason = None
if dm_success_info is not None:
if "reason" in dm_success_info:
dm_success_info_reason = dm_success_info["reason"]
_channels = pytds.default
if channels is not None:
channels = channels['successful'] + channels['failed']
# Prevent multiple tasks from attempting to do operations on the database at the same time
# This is to avoid eg. procedures being called while they are being created,
# handle error being called from different tasks, etc.
async with self.lock:
if not GLOBALS.enabled:
# While current task was waiting for lock to be released,
# some other task disabled the logging due to an unhandable error
return False
for tries in | |
<gh_stars>0
# # Mask R-CNN - Train on Shapes Dataset
#
#
# This notebook shows how to train Mask R-CNN on your own dataset. To keep things simple we use a synthetic dataset of shapes (squares, triangles, and circles) which enables fast training. You'd still need a GPU, though, because the network backbone is a Resnet101, which would be too slow to train on a CPU. On a GPU, you can start to get okay-ish results in a few minutes, and good results in less than an hour.
#
# The code of the *Shapes* dataset is included below. It generates images on the fly, so it doesn't require downloading any data. And it can generate images of any size, so we pick a small image size to train faster.
# In[1]:
import os
import sys
import random
import math
import re
import time
import numpy as np
#import cv2
import matplotlib
import matplotlib.pyplot as plt
# current datasets
trainingdictionary = {'hcc':{'dbfile':'/rsrch1/ip/dtfuentes/github/RandomForestHCCResponse/datalocation/trainingdata.csv','rootlocation':'/rsrch1/ip/dtfuentes/github/RandomForestHCCResponse'},
'hccnorm':{'dbfile':'/rsrch1/ip/dtfuentes/github/RandomForestHCCResponse/datalocation/trainingnorm.csv','rootlocation':'/rsrch1/ip/dtfuentes/github/RandomForestHCCResponse'},
'hccvol':{'dbfile':'/rsrch1/ip/dtfuentes/github/RandomForestHCCResponse/datalocation/tumordata.csv','rootlocation':'/rsrch1/ip/dtfuentes/github/RandomForestHCCResponse'},
'hccvolnorm':{'dbfile':'/rsrch1/ip/dtfuentes/github/RandomForestHCCResponse/datalocation/tumornorm.csv','rootlocation':'/rsrch1/ip/dtfuentes/github/RandomForestHCCResponse'},
'hccroinorm':{'dbfile':'/rsrch1/ip/dtfuentes/github/RandomForestHCCResponse/datalocation/tumorroi.csv','rootlocation':'/rsrch1/ip/dtfuentes/github/RandomForestHCCResponse'},
'dbg':{'dbfile':'./debugdata.csv','rootlocation':'/rsrch1/ip/dtfuentes/objectdetection'},
'comp':{'dbfile':'./comptrainingdata.csv','rootlocation':'/rsrch1/ip/dtfuentes/objectdetection' }}
# ## Configurations
# In[2]:
# setup command line parser to control execution
from optparse import OptionParser
parser = OptionParser()
parser.add_option( "--initialize",
action="store_true", dest="initialize", default=False,
help="build initial sql file ", metavar = "BOOL")
parser.add_option( "--builddb",
action="store_true", dest="builddb", default=False,
help="load all training data into npy", metavar="FILE")
parser.add_option( "--traintumor",
action="store_true", dest="traintumor", default=False,
help="train model for tumor segmentation", metavar="FILE")
parser.add_option( "--setuptestset",
action="store_true", dest="setuptestset", default=False,
help="cross validate test set", metavar="FILE")
parser.add_option( "--setupobjtestset",
action="store_true", dest="setupobjtestset", default=False,
help="cross validate test set", metavar="FILE")
parser.add_option( "--debug",
action="store_true", dest="debug", default=False,
help="compare tutorial dtype", metavar="Bool")
parser.add_option( "--ModelID",
action="store", dest="modelid", default=None,
help="model id", metavar="FILE")
parser.add_option( "--outputModelBase",
action="store", dest="outputModelBase", default=None,
help="output location ", metavar="Path")
parser.add_option( "--predictmodel",
action="store", dest="predictmodel", default=None,
help="apply model to image", metavar="Path")
parser.add_option( "--predictimage",
action="store", dest="predictimage", default=None,
help="apply model to image", metavar="Path")
parser.add_option( "--segmentation",
action="store", dest="segmentation", default=None,
help="model output ", metavar="Path")
parser.add_option( "--modelpath",
action="store", dest="modelpath", default=None,
help="model location", metavar="Path")
parser.add_option( "--anonymize",
action="store", dest="anonymize", default=None,
help="setup info", metavar="Path")
parser.add_option( "--trainingmodel",
action="store", dest="trainingmodel", default='full',
help="setup info", metavar="string")
parser.add_option( "--trainingloss",
action="store", dest="trainingloss", default='dscimg',
help="setup info", metavar="string")
parser.add_option( "--trainingsolver",
action="store", dest="trainingsolver", default='SGD',
help="setup info", metavar="string")
parser.add_option( "--backbone",
action="store", dest="backbone", default='resnet50',
help="setup info", metavar="string")
parser.add_option( "--databaseid",
action="store", dest="databaseid", default='comp',
help="available data: hcc, crc, dbg", metavar="string")
parser.add_option( "--root_dir",
action="store", dest="root_dir", default=os.path.abspath("../../"),
help="code directory", metavar="string")
parser.add_option( "--kfolds",
type="int", dest="kfolds", default=5,
help="setup info", metavar="int")
parser.add_option( "--idfold",
type="int", dest="idfold", default=0,
help="setup info", metavar="int")
(options, args) = parser.parse_args()
# Root directory of the project
ROOT_DIR = options.root_dir
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
from mrcnn.model import log
#get_ipython().run_line_magic('matplotlib', 'inline')
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
class TumorConfig(Config):
"""Configuration for training on the toy shapes dataset.
Derives from the base Config class and overrides values specific
to the toy shapes dataset.
"""
# Give the configuration a recognizable name
NAME = "tumor"
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 8
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # background + lesion
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
IMAGE_MIN_DIM = 512
IMAGE_MAX_DIM = 512
# assume square image
assert IMAGE_MAX_DIM == IMAGE_MIN_DIM
IMAGE_CHANNEL_COUNT = 1
MEAN_PIXEL = 0
IMAGE_RESIZE_MODE = 'none'
BACKBONE = options.backbone
MYOPTIMIZER = options.trainingsolver
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 32
# Use a small epoch since the data is simple
STEPS_PER_EPOCH = 1
STEPS_PER_EPOCH = 100
# Loss weights for more precise optimization.
# Can be used for R-CNN training setup.
LOSS_WEIGHTS = {
"rpn_class_loss": 1.0,
"rpn_bbox_loss": 1.,
"mrcnn_class_loss": 1.,
"mrcnn_bbox_loss": 1.,
"mrcnn_mask_loss": 1.
}
# use small validation steps since the epoch is small
VALIDATION_STEPS = 5
# raw dicom data is usually short int (2bytes) datatype
# labels are usually uchar (1byte)
IMG_DTYPE = np.int16
SEG_DTYPE = np.uint8
globaldirectorytemplate = '%s/%s/%s/%s/%d/%.2e%.2e%.2e%.2e%.2e/%03d%03d/%03d/%03d'
config = TumorConfig()
config.display()
# options dependency
options.dbfile = trainingdictionary[options.databaseid]['dbfile']
options.rootlocation = trainingdictionary[options.databaseid]['rootlocation']
options.sqlitefile = options.dbfile.replace('.csv','.sqlite' )
options.globalnpfile = options.dbfile.replace('.csv','%d.npy' % config.IMAGE_MAX_DIM)
print('database file: %s sqlfile: %s dbfile: %s rootlocation: %s' % (options.globalnpfile,options.sqlitefile,options.dbfile, options.rootlocation ) )
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs", config.globaldirectorytemplate % (options.databaseid,options.trainingloss,config.BACKBONE,options.trainingsolver,config.IMAGE_MAX_DIM,config.LOSS_WEIGHTS['rpn_class_loss'],config.LOSS_WEIGHTS['rpn_bbox_loss'],config.LOSS_WEIGHTS['mrcnn_class_loss'],config.LOSS_WEIGHTS['mrcnn_bbox_loss'],config.LOSS_WEIGHTS['mrcnn_mask_loss'],config.IMAGES_PER_GPU,config.VALIDATION_STEPS,options.kfolds,options.idfold) )
print (MODEL_DIR)
# build data base from CSV file
def GetDataDictionary():
import sqlite3
CSVDictionary = {}
tagsconn = sqlite3.connect(options.sqlitefile)
cursor = tagsconn.execute(' SELECT aq.* from trainingdata aq ;' )
names = [description[0] for description in cursor.description]
sqlStudyList = [ dict(zip(names,xtmp)) for xtmp in cursor ]
for row in sqlStudyList :
CSVDictionary[int( row['dataid'])] = {'image':row['image'], 'label':row['label'], 'uid':"%s" %row['uid']}
return CSVDictionary
# setup kfolds
def GetSetupKfolds(numfolds,idfold,dataidsfull ):
from sklearn.model_selection import KFold
if (numfolds < idfold or numfolds < 1):
raise("data input error")
# split in folds
if (numfolds > 1):
kf = KFold(n_splits=numfolds)
allkfolds = [ (list(map(lambda iii: dataidsfull[iii], train_index)), list(map(lambda iii: dataidsfull[iii], test_index))) for train_index, test_index in kf.split(dataidsfull )]
train_index = allkfolds[idfold][0]
test_index = allkfolds[idfold][1]
else:
train_index = np.array(dataidsfull )
test_index = None
return (train_index,test_index)
## Borrowed from
## $(SLICER_DIR)/CTK/Libs/DICOM/Core/Resources/dicom-schema.sql
##
## --
## -- A simple SQLITE3 database schema for modelling locally stored DICOM files
## --
## -- Note: the semicolon at the end is necessary for the simple parser to separate
## -- the statements since the SQlite driver does not handle multiple
## -- commands per QSqlQuery::exec call!
## -- ;
## TODO note that SQLite does not enforce the length of a VARCHAR.
## TODO (9) What is the maximum size of a VARCHAR in SQLite?
##
## TODO http://www.sqlite.org/faq.html#q9
##
## TODO SQLite does not enforce the length of a VARCHAR. You can declare a VARCHAR(10) and SQLite will be happy to store a 500-million character string there. And it will keep all 500-million characters intact. Your content is never truncated. SQLite understands the column type of "VARCHAR(N)" to be the same as "TEXT", regardless of the value of N.
initializedb = """
DROP TABLE IF EXISTS 'Images' ;
DROP TABLE IF EXISTS 'Patients' ;
DROP TABLE IF EXISTS 'Series' ;
DROP TABLE IF EXISTS 'Studies' ;
DROP TABLE IF EXISTS 'Directories' ;
DROP TABLE IF EXISTS 'lstat' ;
DROP TABLE IF EXISTS 'overlap' ;
CREATE TABLE 'Images' (
'SOPInstanceUID' VARCHAR(64) NOT NULL,
'Filename' VARCHAR(1024) NOT NULL ,
'SeriesInstanceUID' VARCHAR(64) NOT NULL ,
'InsertTimestamp' VARCHAR(20) NOT NULL ,
PRIMARY KEY ('SOPInstanceUID') );
CREATE TABLE 'Patients' (
'PatientsUID' INT PRIMARY KEY NOT NULL ,
'StdOut' varchar(1024) NULL ,
'StdErr' varchar(1024) NULL ,
'ReturnCode' INT NULL ,
'FindStudiesCMD' VARCHAR(1024) NULL );
CREATE TABLE 'Series' (
'SeriesInstanceUID' VARCHAR(64) NOT NULL ,
'StudyInstanceUID' VARCHAR(64) NOT NULL ,
'Modality' VARCHAR(64) NOT NULL ,
'SeriesDescription' VARCHAR(255) NULL ,
'StdOut' varchar(1024) NULL ,
'StdErr' varchar(1024) NULL ,
'ReturnCode' INT NULL ,
'MoveSeriesCMD' VARCHAR(1024) NULL ,
PRIMARY KEY ('SeriesInstanceUID','StudyInstanceUID') );
CREATE TABLE 'Studies' (
'StudyInstanceUID' VARCHAR(64) NOT NULL ,
'PatientsUID' INT NOT NULL ,
'StudyDate' DATE NULL ,
'StudyTime' VARCHAR(20) NULL ,
'AccessionNumber' INT NULL ,
'StdOut' varchar(1024) NULL ,
'StdErr' varchar(1024) NULL ,
'ReturnCode' INT NULL ,
'FindSeriesCMD' VARCHAR(1024) NULL ,
'StudyDescription' VARCHAR(255) NULL ,
PRIMARY KEY ('StudyInstanceUID') );
CREATE TABLE 'Directories' (
'Dirname' VARCHAR(1024) ,
PRIMARY KEY ('Dirname') );
CREATE TABLE lstat (
InstanceUID VARCHAR(255) NOT NULL, -- 'studyuid *OR* seriesUID'
SegmentationID VARCHAR(80) NOT NULL, -- UID for segmentation file
FeatureID VARCHAR(80) NOT NULL, -- UID for image feature
LabelID INT NOT NULL, -- label id for LabelSOPUID statistics of FeatureSOPUID
Mean REAL NULL,
StdD REAL NULL,
Max REAL NULL,
Min REAL NULL,
Count INT NULL,
Volume REAL NULL,
ExtentX INT NULL,
ExtentY INT NULL,
ExtentZ INT NULL,
PRIMARY KEY (InstanceUID,SegmentationID,FeatureID,LabelID) );
-- expected csv format
-- FirstImage,SecondImage,LabelID,InstanceUID,MatchingFirst,MatchingSecond,SizeOverlap,DiceSimilarity,IntersectionRatio
CREATE TABLE overlap(
FirstImage VARCHAR(80) NOT NULL, -- UID for FirstImage
SecondImage VARCHAR(80) NOT NULL, -- UID for SecondImage
LabelID INT NOT NULL, -- label id for LabelSOPUID statistics of FeatureSOPUID
InstanceUID | |
<gh_stars>100-1000
#!/usr/bin/env python
# coding=utf-8
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs strace or dtrace on a test and processes the logs to extract the
dependencies from the source tree.
Automatically extracts directories where all the files are used to make the
dependencies list more compact.
"""
import codecs
import csv
import logging
import optparse
import os
import posixpath
import re
import subprocess
import sys
## OS-specific imports
if sys.platform == 'win32':
from ctypes.wintypes import create_unicode_buffer
from ctypes.wintypes import windll, FormatError # pylint: disable=E0611
from ctypes.wintypes import GetLastError # pylint: disable=E0611
elif sys.platform == 'darwin':
import Carbon.File # pylint: disable=F0401
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(os.path.dirname(BASE_DIR))
KEY_TRACKED = 'isolate_dependency_tracked'
KEY_UNTRACKED = 'isolate_dependency_untracked'
## OS-specific functions
if sys.platform == 'win32':
def QueryDosDevice(drive_letter):
"""Returns the Windows 'native' path for a DOS drive letter."""
assert re.match(r'^[a-zA-Z]:$', drive_letter), drive_letter
# Guesswork. QueryDosDeviceW never returns the required number of bytes.
chars = 1024
drive_letter = unicode(drive_letter)
p = create_unicode_buffer(chars)
if 0 == windll.kernel32.QueryDosDeviceW(drive_letter, p, chars):
err = GetLastError()
if err:
# pylint: disable=E0602
raise WindowsError(
err,
'QueryDosDevice(%s): %s (%d)' % (
str(drive_letter), FormatError(err), err))
return p.value
def GetShortPathName(long_path):
"""Returns the Windows short path equivalent for a 'long' path."""
long_path = unicode(long_path)
# Adds '\\\\?\\' when given an absolute path so the MAX_PATH (260) limit is
# not enforced.
if os.path.isabs(long_path) and not long_path.startswith('\\\\?\\'):
long_path = '\\\\?\\' + long_path
chars = windll.kernel32.GetShortPathNameW(long_path, None, 0)
if chars:
p = create_unicode_buffer(chars)
if windll.kernel32.GetShortPathNameW(long_path, p, chars):
return p.value
err = GetLastError()
if err:
# pylint: disable=E0602
raise WindowsError(
err,
'GetShortPathName(%s): %s (%d)' % (
str(long_path), FormatError(err), err))
def GetLongPathName(short_path):
"""Returns the Windows long path equivalent for a 'short' path."""
short_path = unicode(short_path)
# Adds '\\\\?\\' when given an absolute path so the MAX_PATH (260) limit is
# not enforced.
if os.path.isabs(short_path) and not short_path.startswith('\\\\?\\'):
short_path = '\\\\?\\' + short_path
chars = windll.kernel32.GetLongPathNameW(short_path, None, 0)
if chars:
p = create_unicode_buffer(chars)
if windll.kernel32.GetLongPathNameW(short_path, p, chars):
return p.value
err = GetLastError()
if err:
# pylint: disable=E0602
raise WindowsError(
err,
'GetLongPathName(%s): %s (%d)' % (
str(short_path), FormatError(err), err))
def get_current_encoding():
"""Returns the 'ANSI' code page associated to the process."""
return 'cp%d' % int(windll.kernel32.GetACP())
class DosDriveMap(object):
"""Maps \Device\HarddiskVolumeN to N: on Windows."""
# Keep one global cache.
_MAPPING = {}
def __init__(self):
if not self._MAPPING:
# This is related to UNC resolver on windows. Ignore that.
self._MAPPING['\\Device\\Mup'] = None
for letter in (chr(l) for l in xrange(ord('C'), ord('Z')+1)):
try:
letter = '%s:' % letter
mapped = QueryDosDevice(letter)
# It can happen. Assert until we see it happens in the wild. In
# practice, prefer the lower drive letter.
assert mapped not in self._MAPPING
if mapped not in self._MAPPING:
self._MAPPING[mapped] = letter
except WindowsError: # pylint: disable=E0602
pass
def to_dos(self, path):
"""Converts a native NT path to DOS path."""
m = re.match(r'(^\\Device\\[a-zA-Z0-9]+)(\\.*)?$', path)
assert m, path
if not m.group(1) in self._MAPPING:
# Unmapped partitions may be accessed by windows for the
# fun of it while the test is running. Discard these.
return None
drive = self._MAPPING[m.group(1)]
if not drive or not m.group(2):
return drive
return drive + m.group(2)
def get_native_path_case(root, relative_path):
"""Returns the native path case."""
if sys.platform == 'win32':
# Windows used to have an option to turn on case sensitivity on non Win32
# subsystem but that's out of scope here and isn't supported anymore.
# First process root.
if root:
root = GetLongPathName(GetShortPathName(root)) + os.path.sep
path = os.path.join(root, relative_path) if root else relative_path
# Go figure why GetShortPathName() is needed.
return GetLongPathName(GetShortPathName(path))[len(root):]
elif sys.platform == 'darwin':
# Technically, it's only HFS+ on OSX that is case insensitive. It's
# the default setting on HFS+ but can be changed.
root_ref, _ = Carbon.File.FSPathMakeRef(root)
rel_ref, _ = Carbon.File.FSPathMakeRef(os.path.join(root, relative_path))
return rel_ref.FSRefMakePath()[len(root_ref.FSRefMakePath())+1:]
else:
# Give up on cygwin, as GetLongPathName() can't be called.
return relative_path
def get_flavor():
"""Returns the system default flavor. Copied from gyp/pylib/gyp/common.py."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
'sunos5': 'solaris',
'freebsd7': 'freebsd',
'freebsd8': 'freebsd',
}
return flavors.get(sys.platform, 'linux')
def isEnabledFor(level):
return logging.getLogger().isEnabledFor(level)
def fix_python_path(cmd):
"""Returns the fixed command line to call the right python executable."""
out = cmd[:]
if out[0] == 'python':
out[0] = sys.executable
elif out[0].endswith('.py'):
out.insert(0, sys.executable)
return out
def posix_relpath(path, root):
"""posix.relpath() that keeps trailing slash."""
out = posixpath.relpath(path, root)
if path.endswith('/'):
out += '/'
return out
class Strace(object):
"""strace implies linux."""
IGNORED = (
'/bin',
'/dev',
'/etc',
'/lib',
'/proc',
'/sys',
'/tmp',
'/usr',
'/var',
)
class _Context(object):
"""Processes a strace log line and keeps the list of existent and non
existent files accessed.
Ignores directories.
"""
# This is the most common format. pid function(args) = result
RE_HEADER = re.compile(r'^(\d+)\s+([^\(]+)\((.+?)\)\s+= (.+)$')
# An interrupted function call, only grab the minimal header.
RE_UNFINISHED = re.compile(r'^(\d+)\s+([^\(]+).*$')
UNFINISHED = ' <unfinished ...>'
# A resumed function call.
RE_RESUMED = re.compile(r'^(\d+)\s+<\.\.\. ([^ ]+) resumed> (.+)$')
# A process received a signal.
RE_SIGNAL = re.compile(r'^\d+\s+--- SIG[A-Z]+ .+ ---')
# A process didn't handle a signal.
RE_KILLED = re.compile(r'^(\d+)\s+\+\+\+ killed by ([A-Z]+) \+\+\+$')
# A call was canceled.
RE_UNAVAILABLE = re.compile(r'\)\s+= \? <unavailable>$')
# Arguments parsing.
RE_CHDIR = re.compile(r'^\"(.+?)\"$')
RE_EXECVE = re.compile(r'^\"(.+?)\", \[.+?\], \[.+?\]$')
RE_OPEN2 = re.compile(r'^\"(.*?)\", ([A-Z\_\|]+)$')
RE_OPEN3 = re.compile(r'^\"(.*?)\", ([A-Z\_\|]+), (\d+)$')
RE_RENAME = re.compile(r'^\"(.+?)\", \"(.+?)\"$')
def __init__(self, blacklist):
self._cwd = {}
self.blacklist = blacklist
self.files = set()
self.non_existent = set()
# Key is a tuple(pid, function name)
self._pending_calls = {}
self._line_number = 0
@classmethod
def traces(cls):
prefix = 'handle_'
return [i[len(prefix):] for i in dir(cls) if i.startswith(prefix)]
def on_line(self, line):
self._line_number += 1
line = line.strip()
if self.RE_SIGNAL.match(line):
# Ignore signals.
return
m = self.RE_KILLED.match(line)
if m:
self.handle_exit_group(int(m.group(1)), m.group(2), None, None)
return
if line.endswith(self.UNFINISHED):
line = line[:-len(self.UNFINISHED)]
m = self.RE_UNFINISHED.match(line)
assert m, '%d: %s' % (self._line_number, line)
self._pending_calls[(m.group(1), m.group(2))] = line
return
m = self.RE_UNAVAILABLE.match(line)
if m:
# This usually means a process was killed and a pending call was
# canceled.
# TODO(maruel): Look up the last exit_group() trace just above and make
# sure any self._pending_calls[(pid, anything)] is properly flushed.
return
m = self.RE_RESUMED.match(line)
if m:
pending = self._pending_calls.pop((m.group(1), m.group(2)))
# Reconstruct the line.
line = pending + m.group(3)
m = self.RE_HEADER.match(line)
assert m, '%d: %s' % (self._line_number, line)
return getattr(self, 'handle_%s' % m.group(2))(
int(m.group(1)),
m.group(2),
m.group(3),
m.group(4))
def handle_chdir(self, pid, _function, args, result):
"""Updates cwd."""
if result.startswith('0'):
cwd = self.RE_CHDIR.match(args).group(1)
if not cwd.startswith('/'):
cwd2 = os.path.join(self._cwd[pid], cwd)
logging.debug('handle_chdir(%d, %s) -> %s' % (pid, cwd, cwd2))
self._cwd[pid] = cwd2
else:
logging.debug('handle_chdir(%d, %s)' % (pid, cwd))
self._cwd[pid] = cwd
else:
assert False, 'Unexecpected fail: %s' % result
def handle_clone(self, pid, _function, _args, result):
"""Transfers cwd."""
if result == '? ERESTARTNOINTR (To be restarted)':
return
self._cwd[int(result)] = self._cwd[pid]
def handle_execve(self, pid, _function, args, result):
self._handle_file(pid, self.RE_EXECVE.match(args).group(1), result)
def handle_exit_group(self, pid, _function, _args, _result):
"""Removes cwd."""
del self._cwd[pid]
@staticmethod
def handle_fork(_pid, _function, args, result):
assert False, (args, result)
def handle_open(self, pid, _function, args, result):
args = (self.RE_OPEN3.match(args) or self.RE_OPEN2.match(args)).groups()
if 'O_DIRECTORY' in args[1]:
return
self._handle_file(pid, args[0], result)
def handle_rename(self, pid, _function, args, result):
args = self.RE_RENAME.match(args).groups()
self._handle_file(pid, args[0], result)
self._handle_file(pid, args[1], result)
@staticmethod
def handle_stat64(_pid, _function, args, result):
assert False, (args, result)
@staticmethod
def handle_vfork(_pid, _function, args, result):
assert False, (args, result)
def _handle_file(self, pid, filepath, result):
if result.startswith('-1'):
return
old_filepath = filepath
if not filepath.startswith('/'):
filepath = os.path.join(self._cwd[pid], filepath)
if self.blacklist(filepath):
return
if old_filepath != filepath:
logging.debug(
'_handle_file(%d, %s) -> %s' % (pid, old_filepath, filepath))
else:
logging.debug('_handle_file(%d, %s)' % (pid, filepath))
if filepath not in self.files and filepath not in self.non_existent:
if os.path.isfile(filepath):
self.files.add(filepath)
else:
self.non_existent.add(filepath)
@classmethod
def gen_trace(cls, cmd, cwd, logname):
"""Runs strace on an executable."""
logging.info('gen_trace(%s, %s, %s)' % (cmd, cwd, logname))
silent = not isEnabledFor(logging.INFO)
stdout = stderr = None
if silent:
stdout = stderr = subprocess.PIPE
traces = ','.join(cls._Context.traces())
trace_cmd = ['strace', '-f', '-e', 'trace=%s' % traces, '-o', logname]
child = subprocess.Popen(
trace_cmd + cmd, cwd=cwd, stdout=stdout, stderr=stderr)
out, err = child.communicate()
# Once it's done, inject a chdir() call to cwd to be | |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""
@file py102-example2-zernike.py
@brief Fitting a surface in Python example for Python 102 lecture
@author <NAME> (<EMAIL>)
@url http://python101.vanwerkhoven.org
@date 20111012
Created by <NAME> (<EMAIL>) on 2011-10-12
Copyright (c) 2011 <NAME>. All rights reserved.
This file is licensed under the Creative Commons Attribution-Share Alike
license versions 3.0 or higher, see
http://creativecommons.org/licenses/by-sa/3.0/
Objectified for easier calling and set-up by <EMAIL> 2015
AS: see
http://www.staff.science.uu.nl/~werkh108/docs/teach/2011b_python/course/python102/python_102-print.pdf
and
http://www.vanwerkhoven.org/teaching.html esp. pyhon 102
Python 3-ized, print(), //, and xrange->range only <EMAIL> 2018
"""
"""
<EMAIL>:12 ./ZernikeFitter.py
grid:
[[[-1.000e+00 -1.000e+00 -1.000e+00 -1.000e+00]
[-5.000e-01 -5.000e-01 -5.000e-01 -5.000e-01]
[ 0.000e+00 0.000e+00 0.000e+00 0.000e+00]
[ 5.000e-01 5.000e-01 5.000e-01 5.000e-01]]
[[-1.000e+00 -5.000e-01 0.000e+00 5.000e-01]
[-1.000e+00 -5.000e-01 0.000e+00 5.000e-01]
[-1.000e+00 -5.000e-01 0.000e+00 5.000e-01]
[-1.000e+00 -5.000e-01 0.000e+00 5.000e-01]]]
grid_rho
[[ 1.414e+00 1.118e+00 1.000e+00 1.118e+00]
[ 1.118e+00 7.071e-01 5.000e-01 7.071e-01]
[ 1.000e+00 5.000e-01 0.000e+00 5.000e-01]
[ 1.118e+00 7.071e-01 5.000e-01 7.071e-01]]
<EMAIL>:13 ./ZernikeFitter.py
grid:
[[[-6.667e-01 -6.667e-01 -6.667e-01]
[ 0.000e+00 0.000e+00 0.000e+00]
[ 6.667e-01 6.667e-01 6.667e-01]]
[[-6.667e-01 0.000e+00 6.667e-01]
[-6.667e-01 0.000e+00 6.667e-01]
[-6.667e-01 0.000e+00 6.667e-01]]]
grid_rho
[[ 9.428e-01 6.667e-01 9.428e-01]
[ 6.667e-01 0.000e+00 6.667e-01]
[ 9.428e-01 6.667e-01 9.428e-01]]
"""
### Libraries
import sys
import astropy.io.fits as pyfits
import numpy as N
from scipy.misc import factorial as fac
### Init functions
def zernike_rad(m, n, rho):
"""
Calculate the radial component of Zernike polynomial (m, n)
given a grid of radial coordinates rho.
>>> zernike_rad(3, 3, 0.333)
0.036926037000000009
>>> zernike_rad(1, 3, 0.333)
-0.55522188900000002
>>> zernike_rad(3, 5, 0.12345)
-0.007382104685237683
"""
if (n < 0 or m < 0 or abs(m) > n):
raise ValueError
if ((n-m) % 2):
return rho*0.0
pre_fac = lambda k: (-1.0)**k * fac(n-k) / ( fac(k) * fac( (n+m)/2.0 - k ) * fac( (n-m)/2.0 - k ) )
return sum(pre_fac(k) * rho**(n-2.0*k) for k in range((n-m)//2+1))
def zernike(m, n, rho, phi):
"""
Calculate Zernike polynomial (m, n) given a grid of radial
coordinates rho and azimuthal coordinates phi.
>>> zernike(3,5, 0.12345, 1.0)
0.0073082282475042991
>>> zernike(1, 3, 0.333, 5.0)
-0.15749545445076085
"""
if (m > 0): return zernike_rad(m, n, rho) * N.cos(m * phi)
if (m < 0): return zernike_rad(-m, n, rho) * N.sin(-m * phi)
return zernike_rad(0, n, rho)
def zernikel(j, rho, phi):
"""
Calculate Zernike polynomial with Noll coordinate j given a grid of radial
coordinates rho and azimuthal coordinates phi.
>>> zernikel(0, 0.12345, 0.231)
1.0
>>> zernikel(1, 0.12345, 0.231)
0.028264010304937772
>>> zernikel(6, 0.12345, 0.231)
0.0012019069816780774
"""
n = 0
while (j > n):
n += 1
j -= n
m = -n+2*j
return zernike(m, n, rho, phi)
def generate_testdata(nzern_, grid_rho_, grid_phi_):
# hardcoded first 15 zernikes
test_vec_ = N.random.random(nzern_) - 0.5 # fewer Z's in the test surface...
test_vec_[15:] = 0.0 # 15 modes excited and rest zero...
print("input Z coeffts:\n", test_vec_)
test_surf_ = sum(val * zernikel(i, grid_rho_, grid_phi_) for (i, val) in enumerate(test_vec_))
return test_vec_, test_surf_
class ZernikeFitter:
"""
Does Zernikes on a circular disk fitting just inside your array of size narr,
so if your pupil is undersized within the pupil array, snip off padding before
sending its wavefront into this object be fit.
Usage:
import ZernikeFitter as ZF
zf = ZF.ZernikeFitter(nzern=10, narr=200)
zcoeffs, fittedsurface, residualsurface = zf.fit_zernikes_to_surface(yoursurface)
Zernikes break naturally at nzern = 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 ... n*(n+1)/2
N.B. These are Noll-numbered Zernikes <EMAIL> 2015
"""
def __init__(self, nzern=15, narr=200, SEEGRID=False, SEEVAR=False, extrasupportindex = None):
"""
Input: nzern: number of Noll Zernikes to use in the fit
Input: narr: the live pupil array size you want to use
Sets up list of poly's and grids & support grids
Makes coordinate grid for rho and phi and circular support mask
Calculates 'overlap integrals' (covariance matrix) of the Zernike polynomials on your grid and array size
Calculates the inverse of this matrix, so it's 'ready to fit' your incoming array
extrasupportindex is a 2D index array to define eg spider supports where the pupil is not live
"""
self.narr = narr
self.nzern = nzern # tbd - allowed numbers from Pascal's Triangle sum(n) starting from n=1, viz. n(n+1)/2
self.grid = (N.indices((self.narr, self.narr), dtype=N.float) - self.narr//2) / (float(self.narr)*0.5)
self.grid_rho = (self.grid**2.0).sum(0)**0.5
if SEEGRID:
print("grid:")
print(self.grid)
print("grid_rho")
print(self.grid_rho)
sys.exit()
self.grid_phi = N.arctan2(self.grid[0], self.grid[1])
self.grid_mask = self.grid_rho <= 1
self.grid_outside = self.grid_rho > 1
# Add a spider support sort of extra masking here such as:
if extrasupportindex:
# Add a 'spider support':
self.grid_mask[extrasupportindex] = 0
self.grid_outside[extrasupportindex] = 1
# Compute list of explicit Zernike polynomials and keep them around for fitting
self.zern_list = [zernikel(i, self.grid_rho, self.grid_phi)*self.grid_mask for i in range(self.nzern)]
# Force zernikes to be unit standard deviation over circular mask
for z, zfunc in enumerate(self.zern_list):
if z>0: self.zern_list[z] = (zfunc/zfunc[self.grid_mask].std()) * self.grid_mask
else: self.zern_list[0] = zfunc * self.grid_mask
#stack = N.zeros((nzern,narr,narr))
#print "variance:"
#nolli = 0
# To normalize all but piston to RMS 1 divide by this number
self.sigma = []
for zfunc in self.zern_list:
self.sigma.append(zfunc[self.grid_mask].std())
# Calculate covariance between all Zernike polynomials
self.cov_mat = N.array([[N.sum(zerni * zernj) for zerni in self.zern_list] for zernj in self.zern_list])
# Invert covariance matrix using SVD
self.cov_mat_in = N.linalg.pinv(self.cov_mat)
def fit_zernikes_to_surface(self, surface):
"""
Input: surface: input surface to be fit (2D array)
Output: zcoeffs: 1d vector of coefficients of the fit (self.nzern in length)
Output: rec_wf: the 'recovered wavefront' - i.e. the fitted zernikes, in same array size as surface
Output: res_wf: surface - rec_wf, i.e. the residual error in the fit
"""
# Calculate the inner product of each Zernike mode with the test surface
wf_zern_inprod = N.array([N.sum(surface * zerni) for zerni in self.zern_list])
# Given the inner product vector of the test wavefront with Zernike basis,
# calculate the Zernike polynomial coefficients
zcoeffs = N.dot(self.cov_mat_in, wf_zern_inprod)
print("First few recovered Zernike coeffts:", zcoeffs[:min(10, self.nzern)])
# Reconstruct (e.g. wavefront) surface from Zernike components
rec_wf = sum(val * zernikel(i, self.grid_rho, self.grid_phi) for (i, val) in enumerate(zcoeffs))
rec_wf = rec_wf * self.grid_mask
print( "Standard deviation of fit is {0:.3e}".format((surface*self.grid_mask - rec_wf)[self.grid_mask].std()) )
return zcoeffs, rec_wf, (surface*self.grid_mask - rec_wf)
if __name__ == "__main__":
# This will set up default printing of numpy to 3 dec places, scientific notation
N.set_printoptions(precision=3, threshold=None, edgeitems=None, linewidth=None,
suppress=None, nanstr=None, infstr=None, formatter={'float': '{: 0.3e}'.format} )
#f = ZernikeFitter(nzern=6, narr=4, SEEGRID=True)
#zf = ZernikeFitter(nzern=6, narr=3, SEEGRID=True)
#sys.exit()
#### early tests w/exits
#zf = ZernikeFitter(nzern = 21, narr=201, SEEVAR=True) # Set up to use first 21 Zernikes, radial order 4
# above exits with sys.exit()
GEN = True # generate test data
GEN = False # read in a kolmog phase screen
# initialize ZernikeFitter
# two test cases are possible -
# test data file or
# test data created in-memory with only the first 15 Zernikes
#
if GEN is False: # read in 200 x 200 test data file from disk - adjust path by hand
test_surf = pyfits.getdata("kol_D_5.00_ro_0.50_DoverR0_10.0l1no0001phase.fits")[:100,:100]
print(test_surf.shape, "test input file found: {:s}".format("kol_D_5.00_ro_0.50_DoverR0_10.0l1no0001phase.fits"))
zf = ZernikeFitter(nzern=300, narr=test_surf.shape[0]) # up to radial order 22
else:
zf = ZernikeFitter(nzern = 21, narr=201) # Set up to use first 21 Zernikes, radial order 4
test_vec, test_surf = generate_testdata(zf.nzern, zf.grid_rho, zf.grid_phi)
print(test_surf.shape, "test input data generated")
if True:
stack = N.zeros((zf.nzern,zf.narr,zf.narr))
for z, zfunc in enumerate(zf.zern_list):
if z>0: stack[z,:,:] = (zfunc/zfunc[zf.grid_mask].std())*zf.grid_mask
else: stack[z,:,:] = zfunc*zf.grid_mask
pyfits.PrimaryHDU(data=stack).writeto("ZfunctionsUnitVar.fits", overwrite=True)
# Fit the array with Zernikes:
zcoeffs, rec_wf, res_wf = zf.fit_zernikes_to_surface(test_surf*zf.grid_mask)
#test_surf[zf.grid_outside] = 0.0
#rec_wf[zf.grid_outside] = 0.0
#res_wf[zf.grid_outside] = 0.0
# Test reconstruction coeffts for in-memory case (i.e. the fit)
if GEN:
print("numpy.allclose(Input, reconstruction coeffs): ", N.allclose(test_vec, zcoeffs), "\n")
### Store data to disk
from time import time, asctime, gmtime, localtime
clist = []
clist.append(pyfits.Card('Program', 'ZernikeFitter.py') )
clist.append(pyfits.Card('Epoch', time()) )
clist.append(pyfits.Card('utctime', asctime(gmtime(time()))) )
clist.append(pyfits.Card('loctime', asctime(localtime(time()))) )
hdr = pyfits.Header(cards=clist+[pyfits.Card('Desc', 'Surface input')])
pyfits.writeto('FitZernikes_inputsurface.fits', test_surf, header=hdr, overwrite=True, checksum=True)
hdr = pyfits.Header(cards=clist+[pyfits.Card('Desc', 'Reconstructed surface')])
#pyfits.writeto('FitZernikes_fittedsurface.fits', rec_wf*zf.grid_mask, header=hdr, overwrite=True, checksum=True)
pyfits.writeto('FitZernikes_fittedsurface.fits', rec_wf, header=hdr, overwrite=True, checksum=True)
if GEN is False:
hdr = pyfits.Header(cards=clist+[pyfits.Card('Desc', 'Data - Zernfit')])
pyfits.writeto('FitZernikes_residual.fits', res_wf*zf.grid_mask, header=hdr, overwrite=True, checksum=True)
### Plot some results
import pylab as plt
#fig = plt.figure(1)
#ax | |
first_pitch,
"last_pitch_thrown": last_pitch,
"since_game_start": since_start,
"at_bat_duration": dur,
"is_complete_at_bat": final_pbp_event["is_complete_at_bat"],
"score": first_pbp_event["score"],
"outs_before_play": first_pbp_event["outs_before_play"],
"runners_on_base": first_pbp_event["runners_on_base"],
"runs_outs_result": runs_outs_result,
"play_description": final_pbp_event["play_description"],
"pitch_sequence_description": pitch_sequence_description,
"pbp_events": self.at_bat_event_groups[ab_id],
"pitchfx": pfx_data,
"removed_pitchfx": removed_pfx,
}
self.game_events_combined_data.append(combined_at_bat_data)
self.save_removed_pfx(all_removed_pfx)
self.save_invalid_pitchfx(at_bat_ids_invalid_pfx)
return Result.Ok()
def reconcile_at_bat_ids(self):
at_bat_ids_from_box = list(set(self.at_bat_event_groups.keys()))
at_bat_ids_from_box = self.order_at_bat_ids_by_time(at_bat_ids_from_box)
at_bat_ids_from_pfx = [pfx.at_bat_id for pfx in self.all_pfx_data_for_game]
at_bat_ids_invalid_pfx = list(set(at_bat_ids_from_pfx) - set(at_bat_ids_from_box))
return (at_bat_ids_from_box, at_bat_ids_invalid_pfx)
def order_at_bat_ids_by_time(self, at_bat_ids):
game_event_id_map = [
{
"at_bat_id": ab_id,
"pbp_table_row_number": self._get_first_table_row_num_for_at_bat(ab_id),
}
for ab_id in at_bat_ids
]
game_event_id_map.sort(key=lambda x: x["pbp_table_row_number"])
return [id_map["at_bat_id"] for id_map in game_event_id_map]
def _get_first_table_row_num_for_at_bat(self, at_bat_id):
return min(game_event["pbp_table_row_number"] for game_event in self.at_bat_event_groups[at_bat_id])
def get_all_pbp_events_for_at_bat(self, at_bat_id):
at_bat_events = [event for event in self.at_bat_event_groups[at_bat_id] if event["event_type"] == "AT_BAT"]
at_bat_events.sort(key=lambda x: x["pbp_table_row_number"])
return at_bat_events
def get_all_pfx_data_for_at_bat(self, at_bat_id):
pfx_for_at_bat = [pfx for pfx in self.all_pfx_data_for_game if pfx.at_bat_id == at_bat_id]
return self.convert_pfx_list_to_dict_list(pfx_for_at_bat)
def convert_pfx_list_to_dict_list(self, pfx_list):
pfx_dict_list = []
for pfx in pfx_list:
pfx_dict = pfx.as_dict()
pfx_dict.pop("__brooks_pitchfx_data__", None)
pfx_dict["at_bat_id"] = pfx.at_bat_id
pfx_dict["inning_id"] = pfx.inning_id
pfx_dict["pitcher_id_bbref"] = pfx.pitcher_id_bbref
pfx_dict["batter_id_bbref"] = pfx.batter_id_bbref
pfx_dict["pitcher_id_db"] = pfx.db_pitcher_id
pfx_dict["batter_id_db"] = pfx.db_batter_id
pfx_dict["game_start_time"] = pfx.game_start_time
pfx_dict["time_pitch_thrown"] = pfx.time_pitch_thrown
pfx_dict["seconds_since_game_start"] = pfx.seconds_since_game_start
pfx_dict_list.append(pfx_dict)
pfx_dict_list.sort(key=lambda x: x["ab_count"])
return pfx_dict_list
def get_total_pitches_in_sequence(self, pitch_sequence):
total_pitches = 0
for abbrev in pitch_sequence:
result = self.get_pitch_type(abbrev)
if result.failure:
return result
pitch_type = result.value
total_pitches += pitch_type["pitch_counts"]
return Result.Ok(total_pitches)
def get_pitch_type(self, abbrev):
try:
pitch_type = PPB_PITCH_LOG_DICT[abbrev]
return Result.Ok(pitch_type)
except KeyError as e:
return Result.Fail(f"Invalid pitch abbreviation: {abbrev}\n{repr(e)}")
def update_invalid_ibb_pfx(self, invalid_ibb_pfx):
for pfx in invalid_ibb_pfx:
pfx["is_invalid_ibb"] = True
return deepcopy(sorted(invalid_ibb_pfx, key=lambda x: (x["ab_id"], x["ab_count"])))
def determine_pfx_sequence(self, at_bat_id, pfx_data, pitch_count):
if pitch_count == 0:
return Result.Ok(([], [], []))
prev_ab_id = self.get_prev_at_bat_id(at_bat_id)
result = self.get_prev_pitch_thrown_time(prev_ab_id)
if result.failure:
return result
prev_pitch_thrown = result.value
valid_pfx = []
missing_pitch_numbers = []
possible_pfx = deepcopy(pfx_data)
pitch_number_dict = self.get_pitch_number_dict(pfx_data)
for pitch_num in range(1, pitch_count + 1):
matches = pitch_number_dict.get(pitch_num, [])
if not matches:
missing_pitch_numbers.append(pitch_num)
continue
if len(matches) == 1:
best_pfx = matches[0]
else:
result = self.find_best_pfx_for_pitch_number(
ab_id=at_bat_id,
prev_ab_id=prev_ab_id,
pfx_data=matches,
pitch_num=pitch_num,
prev_pitch_thrown=prev_pitch_thrown,
)
if result.failure:
return result
best_pfx = result.value
valid_pfx.append(best_pfx)
possible_pfx.remove(best_pfx)
prev_pitch_thrown = best_pfx["time_pitch_thrown"]
out_of_sequence_pfx = self.update_out_of_sequence_pfx(possible_pfx)
return Result.Ok((valid_pfx, out_of_sequence_pfx, missing_pitch_numbers))
def get_prev_at_bat_id(self, at_bat_id):
try:
index = self.at_bat_ids.index(at_bat_id)
if index == 0:
return None
return self.at_bat_ids[index - 1]
except ValueError:
return None
def get_prev_pitch_thrown_time(self, at_bat_id):
if not at_bat_id:
return Result.Ok(self.game_start_time)
result = self.get_game_event(at_bat_id)
if result.failure:
return result
at_bat = result.value
if not at_bat:
return Result.Fail(f"No game event found with at bat id {at_bat_id}")
if not self.pitchfx_data_is_complete(at_bat):
return Result.Ok(self.game_start_time)
last_pitch_thrown_str = at_bat["pitchfx"][-1]["time_pitch_thrown_str"]
if not last_pitch_thrown_str:
return Result.Ok(self.game_start_time)
last_pitch_in_at_bat_thrown = datetime.strptime(last_pitch_thrown_str, DT_AWARE)
return Result.Ok(last_pitch_in_at_bat_thrown)
def get_pitch_number_dict(self, pfx_data):
pitch_number_dict = defaultdict(list)
for pfx in pfx_data:
pitch_number_dict[pfx["ab_count"]].append(pfx)
return pitch_number_dict
def find_best_pfx_for_pitch_number(self, ab_id, prev_ab_id, pfx_data, pitch_num, prev_pitch_thrown):
pitch_times = self.get_pitch_metrics_prev_at_bat(ab_id, prev_ab_id, pitch_num)
possible_pfx = []
for pfx in pfx_data:
pitch_delta = (pfx["time_pitch_thrown"] - prev_pitch_thrown).total_seconds()
if pitch_delta < 0 or pitch_delta < int(pitch_times["min"]) or pitch_delta > int(pitch_times["max"]):
continue
possible_pfx.append(pfx)
if not possible_pfx:
pfx_data.sort(key=lambda x: (-x["has_zone_location"], x["seconds_since_game_start"]))
return Result.Ok(pfx_data[0])
if len(possible_pfx) == 1:
return Result.Ok(possible_pfx[0])
deltas = [
{
"pfx": pfx,
"has_zone_location": pfx["has_zone_location"],
"delta": self.delta_avg_time_between_pitches(
pitch_times["avg"], prev_pitch_thrown, pfx["time_pitch_thrown"]
),
}
for pfx in possible_pfx
]
deltas.sort(key=lambda x: (-x["has_zone_location"], x["delta"]))
return Result.Ok(deltas[0]["pfx"])
def get_pitch_metrics_prev_at_bat(self, at_bat_id, prev_ab_id, pitch_num):
if not prev_ab_id:
return {
"avg": self.avg_pitch_times["time_between_pitches"]["avg"],
"min": self.avg_pitch_times["time_between_pitches"]["min"],
"max": self.avg_pitch_times["time_between_pitches"]["max"],
}
same_inning = self.at_bat_ids_are_in_same_inning([at_bat_id, prev_ab_id])
if pitch_num == 1 and same_inning:
return {
"avg": self.avg_pitch_times["time_between_at_bats"]["avg"],
"min": self.avg_pitch_times["time_between_at_bats"]["min"],
"max": self.avg_pitch_times["time_between_at_bats"]["max"],
}
if pitch_num == 1:
return {
"avg": self.avg_pitch_times["time_between_innings"]["avg"],
"min": self.avg_pitch_times["time_between_innings"]["min"],
"max": self.avg_pitch_times["time_between_innings"]["max"],
}
return {
"avg": self.avg_pitch_times["time_between_pitches"]["avg"],
"min": self.avg_pitch_times["time_between_pitches"]["min"],
"max": self.avg_pitch_times["time_between_pitches"]["max"],
}
def delta_avg_time_between_pitches(self, avg, pitch1_thrown, pitch2_thrown):
return abs(avg - (pitch2_thrown - pitch1_thrown).total_seconds())
def get_game_event(self, at_bat_id):
matches = [event for event in self.game_events_combined_data if event["at_bat_id"] == at_bat_id]
if not matches:
return Result.Ok(None)
if len(matches) > 1:
return Result.Fail(f"Found {len(matches)} at bats with the same id: {at_bat_id}")
return Result.Ok(matches[0])
def pitchfx_data_is_complete(self, game_event):
pitchfx_audit = game_event["at_bat_pitchfx_audit"]
return (
pitchfx_audit["pitch_count_pitchfx"] > 0
and pitchfx_audit["pitch_count_bbref"] == pitchfx_audit["pitch_count_pitchfx"]
and not pitchfx_audit["pitchfx_error"]
)
def at_bat_ids_are_in_same_inning(self, at_bat_ids):
inning_list = {validate_at_bat_id(ab_id).value["inning_id"] for ab_id in at_bat_ids}
return len(inning_list) == 1
def update_out_of_sequence_pfx(self, out_of_sequence_pfx):
for pfx in out_of_sequence_pfx:
pfx["is_out_of_sequence"] = True
return deepcopy(sorted(out_of_sequence_pfx, key=lambda x: (x["ab_id"], x["ab_count"])))
def _update_at_bat_result_stats(self, final_pitch_of_ab, game_event):
final_pitch_of_ab["is_final_pitch_of_ab"] = True
at_bat_result = final_pitch_of_ab["des"].lower()
if at_bat_result in AT_BAT_RESULTS_UNCLEAR:
final_pitch_of_ab["ab_result_unclear"] = True
final_pitch_of_ab["pbp_play_result"] = game_event["play_description"]
final_pitch_of_ab["pbp_runs_outs_result"] = game_event["runs_outs_result"]
if at_bat_result in AT_BAT_RESULTS_OUT:
final_pitch_of_ab["ab_result_out"] = True
if at_bat_result in AT_BAT_RESULTS_HIT:
final_pitch_of_ab["ab_result_hit"] = True
if at_bat_result == "single":
final_pitch_of_ab["ab_result_single"] = True
if at_bat_result == "double":
final_pitch_of_ab["ab_result_double"] = True
if at_bat_result == "triple":
final_pitch_of_ab["ab_result_triple"] = True
if at_bat_result == "home run":
final_pitch_of_ab["ab_result_homerun"] = True
if at_bat_result in AT_BAT_RESULTS_WALK:
final_pitch_of_ab["ab_result_bb"] = True
if at_bat_result == "intent walk":
final_pitch_of_ab["ab_result_ibb"] = True
if at_bat_result in AT_BAT_RESULTS_STRIKEOUT:
final_pitch_of_ab["ab_result_k"] = True
if at_bat_result in AT_BAT_RESULTS_HBP:
final_pitch_of_ab["ab_result_hbp"] = True
if at_bat_result in AT_BAT_RESULTS_ERROR:
final_pitch_of_ab["ab_result_error"] = True
if at_bat_result in AT_BAT_RESULTS_SAC_HIT:
final_pitch_of_ab["ab_result_sac_hit"] = True
if at_bat_result in AT_BAT_RESULTS_SAC_FLY:
final_pitch_of_ab["ab_result_sac_fly"] = True
def get_at_bat_duration(self, pfx_data):
if not pfx_data:
return (0, 0, None, None)
since_game_start = pfx_data[0]["seconds_since_game_start"]
first_pitch_thrown = pfx_data[0]["time_pitch_thrown"]
last_pitch_thrown = pfx_data[-1]["time_pitch_thrown"]
if not first_pitch_thrown or not last_pitch_thrown:
return (since_game_start, 0, None, None)
at_bat_duration = int((last_pitch_thrown - first_pitch_thrown).total_seconds())
first_pitch_thrown_str = first_pitch_thrown.strftime(DT_AWARE)
last_pitch_thrown_str = last_pitch_thrown.strftime(DT_AWARE)
return (since_game_start, at_bat_duration, first_pitch_thrown_str, last_pitch_thrown_str)
def get_pitch_app_id_from_at_bat_id(self, at_bat_id):
return validate_at_bat_id(at_bat_id).value["pitch_app_id"]
def prepare_pfx_data_for_json_serialization(self, pfx_data):
for pfx_dict in pfx_data:
pfx_dict.pop("game_start_time", None)
pfx_dict.pop("time_pitch_thrown", None)
def describe_at_bat(self, at_bat_id, final_event_in_at_bat, pfx_data, missing_pitch_numbers):
if missing_pitch_numbers:
pfx_data = None
pitch_sequence = final_event_in_at_bat["pitch_sequence"]
result = self.get_total_pitches_in_sequence(pitch_sequence)
if result.failure:
return result
total_pitches = result.value
non_batter_events = self.get_all_other_events_for_at_bat(at_bat_id, final_event_in_at_bat)
current_pitch = 0
next_pitch_blocked_by_c = False
sequence_description = []
for abbrev in pitch_sequence:
pitch_number = ""
pfx_des = ""
blocked_by_c = ""
result = self.get_pitch_type(abbrev)
if result.failure:
return result
pitch_type = result.value
outcome = pitch_type["description"]
if abbrev == "*":
next_pitch_blocked_by_c = True
continue
if pitch_type["pitch_counts"]:
current_pitch += 1
space_count = 1 if total_pitches < 10 or current_pitch >= 10 else 2
pitch_number = f"Pitch{' '*space_count}{current_pitch}/{total_pitches}"
if pfx_data:
pfx = pfx_data[current_pitch - 1]
if abbrev == "X":
outcome = pfx["pdes"] if "missing_pdes" not in pfx["pdes"] else pfx["des"]
pitch_type = PitchType.from_abbrev(pfx["mlbam_pitch_name"])
pfx_des = f'{pfx["start_speed"]:02.0f}mph {pitch_type.print_name}'
if next_pitch_blocked_by_c:
blocked_by_c = "\n(pitch was blocked by catcher)"
next_pitch_blocked_by_c = False
sequence_description.append((pitch_number, f"{outcome}{blocked_by_c}", pfx_des))
continue
if abbrev == ".":
outcome = self.get_next_event_description(non_batter_events, outcome)
sequence_description.append(("", outcome, ""))
while any(not event_dict["processed"] for event_dict in non_batter_events.values()):
outcome = self.get_next_event_description(non_batter_events)
if outcome:
sequence_description.append(("", outcome, ""))
outcome = replace_char_with_newlines(final_event_in_at_bat["play_description"], ";")
sequence_description.append(("Result", outcome, ""))
return Result.Ok(sequence_description)
def get_all_other_events_for_at_bat(self, at_bat_id, final_event_this_at_bat):
all_other_events = list(self.at_bat_event_groups[at_bat_id])
all_other_events.sort(key=lambda x: x["pbp_table_row_number"])
all_other_events.remove(final_event_this_at_bat)
if not all_other_events:
return {}
non_batter_events = OrderedDict()
for num, event in enumerate(all_other_events, start=1):
non_batter_events[num] = {
"processed": False,
"event": event,
}
return non_batter_events
def get_next_event_description(self, non_batter_events, default_outcome=""):
outcome = default_outcome
for event_dict in non_batter_events.values():
if not event_dict["processed"]:
event = event_dict["event"]
outcome = (
f'({event["play_description"]})'
if event["event_type"] == "AT_BAT"
else f'({event["sub_description"]})'
if event["event_type"] == "SUBSTITUTION"
else f'({event["description"]})'
)
event_dict["processed"] = True
break
return outcome.strip(".")
def save_invalid_pitchfx(self, at_bat_ids_invalid_pfx):
self.invalid_pitchfx = defaultdict(dict)
for ab_id in self.order_at_bat_ids_by_park_sv_id(at_bat_ids_invalid_pfx):
inning_id = get_inning_id_from_at_bat_id(ab_id)
id_dict = db.PlayerId.get_player_ids_from_at_bat_id(self.db_session, ab_id)
pfx_data = self.get_all_pfx_data_for_at_bat(ab_id)
pfx_ab_id = 0
pfx_ab_id_list = list({pfx["ab_id"] for pfx in pfx_data})
if pfx_ab_id_list and len(pfx_ab_id_list) == 1:
pfx_ab_id = pfx_ab_id_list[0]
pitch_count = max(pfx["ab_total"] for pfx in pfx_data)
out_of_sequence_pfx = []
missing_pitch_numbers = []
result = self.determine_pfx_sequence(ab_id, pfx_data, pitch_count)
if result.success:
(pfx_data, out_of_sequence_pfx, missing_pitch_numbers) = result.value
self.prepare_pfx_data_for_json_serialization(pfx_data)
if len(out_of_sequence_pfx) > 0:
self.prepare_pfx_data_for_json_serialization(out_of_sequence_pfx)
at_bat_data = {
"at_bat_id": ab_id,
"pfx_ab_id": pfx_ab_id,
"inning_id": inning_id,
"pitch_app_id": self.get_pitch_app_id_from_at_bat_id(ab_id),
"pitcher_id_bbref": id_dict["pitcher_id_bbref"],
"pitcher_id_mlb": id_dict["pitcher_id_mlb"],
"pitcher_name": id_dict["pitcher_name"],
"batter_id_bbref": id_dict["batter_id_bbref"],
"batter_id_mlb": id_dict["batter_id_mlb"],
"batter_name": id_dict["batter_name"],
"at_bat_pitchfx_audit": {
"pitch_count_bbref": 0,
"pitch_count_pitchfx": len(pfx_data),
"patched_pitchfx_count": len([pfx for pfx in pfx_data if pfx["is_patched"]]),
"missing_pitchfx_count": len(missing_pitch_numbers),
"removed_pitchfx_count": len(out_of_sequence_pfx),
"missing_pitch_numbers": missing_pitch_numbers,
"pitchfx_error": result.error if result.failure else None,
"pitchfx_error_message": result.error,
},
"pitchfx": pfx_data,
"removed_pitchfx": out_of_sequence_pfx,
}
self.invalid_pitchfx[inning_id][ab_id] = at_bat_data
def save_removed_pfx(self, removed_pfx_dict):
self.all_removed_pfx = defaultdict(dict)
for ab_id, removed_pfx in removed_pfx_dict.items():
inning_id = get_inning_id_from_at_bat_id(ab_id)
id_dict = db.PlayerId.get_player_ids_from_at_bat_id(self.db_session, ab_id)
self.prepare_pfx_data_for_json_serialization(removed_pfx)
at_bat_data = {
"at_bat_id": ab_id,
"inning_id": inning_id,
"pitch_app_id": self.get_pitch_app_id_from_at_bat_id(ab_id),
"pitcher_id_bbref": id_dict["pitcher_id_bbref"],
"pitcher_id_mlb": id_dict["pitcher_id_mlb"],
"pitcher_name": id_dict["pitcher_name"],
"batter_id_bbref": id_dict["batter_id_bbref"],
"batter_id_mlb": id_dict["batter_id_mlb"],
"batter_name": id_dict["batter_name"],
"pitchfx": removed_pfx,
}
self.all_removed_pfx[inning_id][ab_id] = at_bat_data
def update_boxscore_with_combined_data(self):
updated_innings_list = [self.update_inning_with_combined_data(inning) for inning in self.boxscore.innings_list]
(pitch_stats_away, pitch_stats_home) = self.update_all_pitch_stats()
(bat_stats_away, bat_stats_home) = self.update_all_bat_stats()
away_team_data = self.boxscore.away_team_data.as_dict()
away_team_data.pop("__bbref_boxscore_team_data__", None)
away_team_data.pop("batting_stats", None)
away_team_data.pop("pitching_stats", None)
away_team_data["batting_stats"] = bat_stats_away
away_team_data["pitching_stats"] = pitch_stats_away
home_team_data = self.boxscore.home_team_data.as_dict()
home_team_data.pop("__bbref_boxscore_team_data__", None)
home_team_data.pop("batting_stats", None)
| |
len(x) - 1:
break
update_1 = False
update_2 = False
update_3 = False
change_1_0 = max(0, status_1[0] - 1)
change_1_1 = max(0, status_1[1] - 1)
change_2_0 = max(0, status_2[0] -
1 if status_1[1] != 0 else status_2[0])
change_2_1 = max(0, status_2[1] -
1 if status_1[1] != 0 else status_2[1])
change_3_0 = max(0, status_3[0] -
1 if status_2[1] != 0 else status_3[0])
change_3_1 = max(0, status_3[1] -
1 if status_2[1] != 0 else status_3[1])
status_1[0] = change_1_0
status_1[1] = change_1_1
status_2[0] = change_2_0
status_2[1] = change_2_1
status_3[0] = change_3_0
status_3[1] = change_3_1
idx += 1
if status_1 == [0, 0]:
update_1 = True
if status_2 == [0, 0] and (status_1[1] != 0 or update_1):
update_2 = True
if status_3 == [0, 0] and (status_1[1] != 0 or update_1) and (status_2[1] != 0 or update_2):
update_3 = True
if update_1:
block_pattern[0].append(x[idx][0])
status_1 = [x[idx][0] + 1, 1 if x[idx][0] == 0 else x[idx][0]]
if update_2:
block_pattern[1].append(x[idx][1])
status_2 = [x[idx][1] + 1, 1 if x[idx][1] == 0 else x[idx][1]]
if update_3:
block_pattern[2].append(x[idx][2])
status_3 = [x[idx][2] + 1, 1 if x[idx][2] == 0 else x[idx][2]]
return block_pattern
def block_pattern_to_seq(block_pattern):
x = []
idx_1 = 0
idx_2 = 0
idx_3 = 0
matching = [0]
matching_counter = 0
status_1 = [block_pattern[0][idx_1] + 1, 1 if block_pattern[0]
[idx_1] == 0 else block_pattern[0][idx_1]]
status_2 = [block_pattern[1][idx_2] + 1, 1 if block_pattern[1]
[idx_2] == 0 else block_pattern[1][idx_2]]
status_3 = [block_pattern[2][idx_3] + 1, 1 if block_pattern[2]
[idx_3] == 0 else block_pattern[2][idx_3]]
while True:
x.append([block_pattern[0][idx_1], block_pattern[1]
[idx_2], block_pattern[2][idx_3]])
if idx_3 == len(block_pattern[2]) - 1:
break
matching_counter += 1
update_1 = False
update_2 = False
update_3 = False
change_1_0 = max(0, status_1[0] - 1)
change_1_1 = max(0, status_1[1] - 1)
change_2_0 = max(0, status_2[0] -
1 if status_1[1] != 0 else status_2[0])
change_2_1 = max(0, status_2[1] -
1 if status_1[1] != 0 else status_2[1])
change_3_0 = max(0, status_3[0] -
1 if status_2[1] != 0 else status_3[0])
change_3_1 = max(0, status_3[1] -
1 if status_2[1] != 0 else status_3[1])
status_1[0] = change_1_0
status_1[1] = change_1_1
status_2[0] = change_2_0
status_2[1] = change_2_1
status_3[0] = change_3_0
status_3[1] = change_3_1
if status_1 == [0, 0]:
idx_1 += 1
update_1 = True
if status_2 == [0, 0] and (status_1[1] != 0 or update_1):
idx_2 += 1
update_2 = True
if status_3 == [0, 0] and (status_1[1] != 0 or update_1) and (status_2[1] != 0 or update_2):
idx_3 += 1
update_3 = True
matching.append(matching_counter)
if update_1:
status_1 = [block_pattern[0][idx_1] + 1, 1 if block_pattern[0]
[idx_1] == 0 else block_pattern[0][idx_1]]
if update_2:
status_2 = [block_pattern[1][idx_2] + 1, 1 if block_pattern[1]
[idx_2] == 0 else block_pattern[1][idx_2]]
if update_3:
status_3 = [block_pattern[2][idx_3] + 1, 1 if block_pattern[2]
[idx_3] == 0 else block_pattern[2][idx_3]]
return x, matching
def signals_to_bp(signals, n_Rwaves):
limit = np.where(np.array(signals[0]) == 1)[0][-1] + 1
candidate1 = []
candidate2 = []
candidate3 = []
counter = 0
sub1 = []
for i in range(limit):
if signals[0][i] == 1:
counter += 1
if i == limit - 1:
sub1.append(counter)
if signals[0][i] == 0:
sub1.append(counter)
counter = 0
if max(sub1) <= 7 and min(sub1) >= 1:
candidate1.append(sub1)
sub2 = []
for i in range(limit):
if i == limit - 1:
sub2.append(1)
break
if signals[0][i] == 1 and signals[0][i+1] == 0:
sub2.append(1)
if signals[0][i] == 1 and signals[0][i+1] == 1:
sub2.append(0)
if sub2 not in candidate1:
candidate1.append(sub2)
sub3 = copy.deepcopy(sub2)
sub3[-1] = 0
if sub3 not in candidate1:
candidate1.append(sub3)
idx_1 = np.where(np.array(signals[0]) == 1)[0]
counter = 0
sub1 = []
vary = False
for i in range(len(idx_1)):
if signals[1][idx_1[i]] == 1:
counter += 1
if i == len(idx_1) - 1:
sub1.append(counter)
vary = True
if signals[1][idx_1[i]] == 0:
sub1.append(counter)
counter = 0
if not vary:
if max(sub1) <= 7 and min(sub1) >= 1:
candidate2.append(sub1)
if vary:
if len(sub1) > 1 and max(sub1) <= 7 and min(sub1) >= 1:
low_limit = np.amax([1, sub1[-2] - 3])
up_limit = np.amin([7, sub1[-2] + 3])
valid_range = np.linspace(
low_limit, up_limit, up_limit - low_limit + 1, dtype='int16')
for val in valid_range:
if val >= sub1[-1]:
sub_alt = copy.deepcopy(sub1[:-1])
sub_alt += [val]
candidate2.append(sub_alt)
if len(sub1) == 1 and max(sub1) <= 7 and min(sub1) >= 1:
low_limit = sub1[0]
up_limit = 7
valid_range = np.linspace(
low_limit, up_limit, up_limit - low_limit + 1, dtype='int16')
for val in valid_range:
sub_alt = copy.deepcopy(sub1[:-1])
sub_alt += [val]
candidate2.append(sub_alt)
sub2 = []
alt = True
for i in range(len(idx_1)):
if i == len(idx_1) - 1 and signals[1][idx_1[i]] == 1:
sub2.append(1)
break
if i == len(idx_1) - 1 and signals[1][idx_1[i]] == 0:
alt = False
break
if signals[1][idx_1[i]] == 1 and signals[1][idx_1[i+1]] == 0:
sub2.append(1)
if signals[1][idx_1[i]] == 1 and signals[1][idx_1[i+1]] == 1:
sub2.append(0)
if sub2 not in candidate2:
candidate2.append(sub2)
if alt:
sub3 = copy.deepcopy(sub2)
sub3[-1] = 0
if sub3 not in candidate2:
candidate2.append(sub3)
idx_2 = np.where(np.array(signals[1]) == 1)[0]
sub2 = []
alt = True
for i in range(len(idx_2)):
if i == len(idx_2) - 1 and signals[2][idx_2[i]] == 1:
sub2.append(1)
break
if i == len(idx_2) - 1 and signals[2][idx_2[i]] == 0:
alt = False
break
if signals[2][idx_2[i]] == 1 and signals[2][idx_2[i+1]] == 0:
sub2.append(1)
if signals[2][idx_2[i]] == 1 and signals[2][idx_2[i+1]] == 1:
sub2.append(0)
if sub2 not in candidate3:
candidate3.append(sub2)
if alt:
sub3 = copy.deepcopy(sub2)
sub3[-1] = 0
if sub3 not in candidate3:
candidate3.append(sub3)
res = []
for i in range(len(candidate1)):
for j in range(len(candidate2)):
for k in range(len(candidate3)):
bp, bp_type = check_block_pattern_alt(
[candidate1[i], candidate2[j], candidate3[k]], n_Rwaves)
if len(bp) != 0:
res.append((bp, bp_type))
return res
def correct_bp(bp, bp_type, n_Rwaves):
bp_res = copy.deepcopy(bp)
if bp_type == "1":
if sum(bp_res[1]) > n_Rwaves:
bp_res[1][-1] -= abs(sum(bp_res[1]) - n_Rwaves)
if bp_type == "2a":
if sum(bp_res[1]) > n_Rwaves:
bp_res[1][-1] -= abs(sum(bp_res[1]) - n_Rwaves)
if bp_type == "2b":
if sum(bp_res[1]) == 2 * n_Rwaves and len(bp_res[0]) == 2 * n_Rwaves - 1 + len(bp_res[1]) - 1:
bp_res[1][-1] -= 1
return bp_res
if sum(bp_res[1]) > 2 * n_Rwaves:
if sum(bp_res[1]) - bp_res[1][-1] < 2 * n_Rwaves - 1:
if len(bp_res[0]) == 2 * n_Rwaves + len(bp_res[1]) - 1:
bp_res[1][-1] -= abs(sum(bp_res[1]) - 2 * n_Rwaves)
return bp_res
if len(bp_res[0]) == 2 * n_Rwaves - 1 + len(bp_res[1]) - 1:
bp_res[1][-1] -= abs(sum(bp_res[1]) - (2 * n_Rwaves - 1))
return bp_res
if sum(bp_res[1]) - bp_res[1][-1] == 2 * n_Rwaves - 1:
bp_res[1][-1] -= abs(sum(bp_res[1]) - 2 * n_Rwaves)
return bp_res
def bp_to_signals(bp, bp_type, n_Rwaves, fill=True):
if bp_type == "1":
lvl1 = []
for b in bp[0]:
lvl1 += [1]
lvl2 = []
for b in bp[1]:
lvl2 += [1 for i in range(b)] + [0]
lvl3 = []
for b in bp[2]:
lvl3 += [1]
idx = np.where(np.array(lvl2) == 0)[0]
for idx_i in idx:
lvl3.insert(idx_i, 0)
if bp_type == "2a":
lvl1 = []
for b in bp[0]:
lvl1 += [1 for i in range(b)] + [0]
lvl2 = []
for b in bp[1]:
lvl2 += [1 for i in range(b)] + [0]
idx = np.where(np.array(lvl1) == 0)[0]
for idx_i in idx:
lvl2.insert(idx_i, 0)
lvl3 = []
for b in bp[2]:
lvl3 += [1]
idx = np.where(np.array(lvl2) == 0)[0]
for idx_i in idx:
lvl3.insert(idx_i, 0)
if bp_type == "2b":
lvl1 = []
for b in bp[0]:
lvl1 += [1]
lvl2 = []
for b in bp[1]:
lvl2 += [1 for i in range(b)] + [0]
lvl3 = []
for b in bp[2]:
lvl3 += [1 for i in range(b)] + [0]
idx = np.where(np.array(lvl2) == 0)[0]
for idx_i in idx:
lvl3.insert(idx_i, 0)
if bp_type == "2c":
lvl1 = []
for b in bp[0]:
lvl1 += [1 for i in range(b)] + [0]
lvl2 = []
for b in bp[1]:
if b == 0:
lvl2 += [1]
else:
lvl2 += [1 for i in range(b)] + [0]
idx = np.where(np.array(lvl1) == 0)[0]
for idx_i in idx:
lvl2.insert(idx_i, 0)
lvl3 = []
for b in bp[2]:
lvl3 += [1]
idx = np.where(np.array(lvl2) == 0)[0]
for idx_i in idx:
lvl3.insert(idx_i, 0)
if bp_type == | |
<gh_stars>0
import discord
from discord.ext import commands
from discord.ext.commands import cooldown,BucketType
from aiohttp import request
from discord.ext.commands import MemberConverter
import aiohttp
import asyncio
import wikipedia
from howdoi import howdoi
import base64
import urllib.parse
from cogs.usefullTools.dbIntegration import *
from googletrans import Translator
from platform import python_version
import psutil
from psutil import Process, virtual_memory
from datetime import datetime, timedelta
from time import time
class GeneralCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
# Avatar fetcher
@commands.command(aliases=['av'])
@cooldown(1, 5, BucketType.channel)
async def avatar(self, ctx, member, override=None):
if member[0] == '<' and member[1] == '@':
converter = MemberConverter()
member = await converter.convert(ctx, member)
elif member.isdigit():
member = int(member)
else:
pass
members = await ctx.guild.fetch_members().flatten()
multiple_member_array = []
if isinstance(member, discord.Member):
for members_list in members:
if member.name.lower() in members_list.name.lower():
multiple_member_array.append(members_list)
else:
pass
elif isinstance(member, int):
for member_list in members:
if member_list.id == member:
multiple_member_array.append(member_list)
else:
pass
else:
for members_list in members:
if member.lower() in members_list.name.lower():
multiple_member_array.append(members_list)
else:
pass
if member is discord.Member:
if member.isdigit() and member.lower() == 'me' and override == 'override':
embed = discord.Embed(colour=0x0000ff)
embed.set_image(url=f'{ctx.author.avatar_url}')
await ctx.send(embed=embed)
elif len(multiple_member_array) == 1:
if multiple_member_array[0].name == multiple_member_array[0].display_name:
embed = discord.Embed(title=f'{multiple_member_array[0]}',colour=0x0000ff)
elif multiple_member_array[0].name != multiple_member_array[0].display_name:
embed = discord.Embed(title=f'{multiple_member_array[0]}({multiple_member_array[0].display_name})',colour=0x0000ff)
embed.set_image(url=f'{multiple_member_array[0].avatar_url}')
await ctx.send(embed=embed)
elif len(multiple_member_array) > 1:
multiple_member_array_duplicate_array = []
for multiple_member_array_duplicate in multiple_member_array:
if len(multiple_member_array_duplicate_array) < 10:
multiple_member_array_duplicate_array.append(multiple_member_array_duplicate.name)
else:
break
embed = discord.Embed(
title=f'Search for {member}\nFound multiple results (Max 10)',
description=f'\n'.join(multiple_member_array_duplicate_array),
colour=0x808080
)
await ctx.send(embed=embed)
else:
await ctx.send(f'The member `{member}` does not exist!')
# Avatar fetcher: Error handling
@avatar.error
async def avatar_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
embed = discord.Embed(colour=0x0000ff)
embed.set_image(url=f'{ctx.author.avatar_url}')
await ctx.send(embed=embed)
elif isinstance(error, commands.CommandOnCooldown):
await ctx.send(error)
else:
await ctx.send(f'An error occured \n```\n{error}\n```\nPlease check console for traceback, or raise an issue to CABREX')
raise error
# Userinfo
@commands.command(aliases=['ui'])
@cooldown(1, 5, BucketType.channel)
async def userinfo(self, ctx, member):
if member[0] == '<' and member[1] == '@':
converter = MemberConverter()
member = await converter.convert(ctx, member)
elif member.isdigit():
member = int(member)
members = await ctx.guild.fetch_members().flatten()
multiple_member_array = []
if isinstance(member, discord.Member):
for members_list in members:
if member.name.lower() in members_list.name.lower():
multiple_member_array.append(members_list)
else:
pass
elif isinstance(member, int):
for member_list in members:
if member_list.id == member:
multiple_member_array.append(member_list)
else:
pass
else:
for members_list in members:
if member.lower() in members_list.name.lower():
multiple_member_array.append(members_list)
else:
pass
if len(multiple_member_array) == 1:
roles = []
for role in multiple_member_array[0].roles:
roles.append(role)
embed = discord.Embed(
colour = 0x0000ff,
)
embed.set_author(name=f'User Info - {multiple_member_array[0]}')
embed.set_thumbnail(url=multiple_member_array[0].avatar_url)
embed.set_footer(text='made by CABREX with ❤')
embed.add_field(name='ID:', value=multiple_member_array[0].id)
embed.add_field(name='Member Name:', value=multiple_member_array[0])
embed.add_field(name='Member Nickname:', value=multiple_member_array[0].display_name)
embed.add_field(name='Created at: ', value=multiple_member_array[0].created_at.strftime('%a, %#d %B %Y, %I:%M %p UTC'))
embed.add_field(name='Joined at:', value=multiple_member_array[0].joined_at.strftime('%a, %#d %B %Y, %I:%M %p UTC'))
if len(roles) == 1:
embed.add_field(name=f'Roles ({len(roles) - 1})', value='**NIL**')
else:
embed.add_field(name=f'Roles ({len(roles) - 1})', value=' '.join([role.mention for role in roles if role.name != '@everyone']))
embed.add_field(name='Bot?', value=multiple_member_array[0].bot)
await ctx.send(embed=embed)
elif len(multiple_member_array) > 1:
multiple_member_array_duplicate_array = []
for multiple_member_array_duplicate in multiple_member_array:
if len(multiple_member_array_duplicate_array) < 10:
multiple_member_array_duplicate_array.append(multiple_member_array_duplicate.name)
else:
break
embed = discord.Embed(
title=f'Search for {member}\nFound multiple results (Max 10)',
description=f'\n'.join(multiple_member_array_duplicate_array),
colour=0x808080
)
await ctx.send(embed=embed)
else:
await ctx.send(f'The member `{member}` does not exist!')
# Userinfo: Error handling
@userinfo.error
async def userinfo_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send('```\n$userinfo {member_name}\n ^^^^^^^^^^^^^\nMissing Required Argument member_name\n```')
elif isinstance(error, commands.CommandOnCooldown):
await ctx.send(error)
elif isinstance(error, discord.errors.Forbidden):
await ctx.send('I am Forbidden from doing this command, please check if `server members intent` is enabled')
else:
await ctx.send(f'An error occured \n```\n{error}\n```\nPlease check the console for traceback')
raise error
# Server info
@commands.command(aliases=['si'])
@cooldown(1, 4, BucketType.channel)
async def serverinfo(self, ctx):
count = 0
members = await ctx.guild.fetch_members().flatten()
for people in members:
if people.bot:
count = count + 1
else:
pass
embed = discord.Embed(
title = f'{ctx.guild.name} info',
colour = 0x0000ff
)
embed.set_thumbnail(url=ctx.guild.icon_url)
embed.add_field(name='Owner name:', value=f'<@{ctx.guild.owner_id}>')
embed.add_field(name='Server ID:', value=ctx.guild.id)
embed.add_field(name='Server region:', value=ctx.guild.region)
embed.add_field(name='Members:', value=ctx.guild.member_count)
embed.add_field(name='bots:', value=count)
embed.add_field(name='Humans:', value=ctx.guild.member_count - count)
embed.add_field(name='Number of roles:', value=len(ctx.guild.roles))
embed.add_field(name='Number of boosts:', value=ctx.guild.premium_subscription_count)
embed.add_field(name='Text Channels:', value=len(ctx.guild.text_channels))
embed.add_field(name='Voice Channels:', value=len(ctx.guild.voice_channels))
embed.add_field(name='Categories:', value=len(ctx.guild.categories))
embed.add_field(name='Created On:', value=ctx.guild.created_at.strftime('%a, %#d %B %Y, %I:%M %p UTC'))
await ctx.send(embed=embed)
# Serverinfo: Error handling
@serverinfo.error
async def serverinfo_error(self, ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send(error)
raise error
else:
await ctx.send(f"An error occured \n```\n{error}\n```\nPlease check console for traceback, or raise an issue to CABREX.")
raise error
# Servercount
@commands.command(name='servercount', aliases=['sc'])
@cooldown(1, 1, BucketType.channel)
async def servercount(self, ctx):
member_count = 0
for guild in self.bot.guilds:
member_count += guild.member_count
await ctx.send(f'Present in `{len(self.bot.guilds)}` servers, moderating `{member_count}` members')
# Servercount: cooldown
@servercount.error
async def sc_error(self, ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send(error)
else:
await ctx.send(f'An error occured \n```\n{error}\n```\nPlease check console for traceback, or raise an issue to CABREX')
raise error
# Wikipedia support
@commands.command(name='wikipedia', aliases=['whatis', 'wiki'])
@cooldown(1, 2,BucketType.channel)
async def wiki(self, ctx, *, query=None):
if query is not None:
r = wikipedia.page(query)
embed = discord.Embed(
title = r.title,
description = r.summary[0 : 2000],
colour = 0x808080
)
async with ctx.typing():
await asyncio.sleep(2)
await ctx.send(embed=embed)
else:
await ctx.send(f"Your query is empty {ctx.author.mention}!\nEnter something!")
# Wikipedia: Error handling
@wiki.error
async def wiki_error(self, ctx, error):
if isinstance(error, wikipedia.exceptions.DisambiguationError):
await ctx.send(f'There are many articles that match your query, please be more specific {ctx.author.mention}')
elif isinstance(error, commands.CommandOnCooldown):
await ctx.send(error)
else:
await ctx.send(f'An error has occured \n```\n{error}\n```\nPlease check console for traceback, or raise an issue to CABREX')
raise error
# Howdoi stackoverflow API
@commands.command(name='howdoi')
@cooldown(1, 2, BucketType.channel)
async def howdoi(self, ctx, *, query=None):
if query is not None:
parser = howdoi.get_parser()
arguments = vars(parser.parse_args(query.split(' ')))
embed = discord.Embed(
title = f'how to {query}',
description = howdoi.howdoi(arguments)
)
async with ctx.typing():
await asyncio.sleep(2)
await ctx.channel.send(embed=embed)
else:
await ctx.send(f'Your query is empty, please ask a question {ctx.author.mention}')
# Howdoi: Error Handling
@howdoi.error
async def howdoi_error(self, ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send(error)
else:
await ctx.send(f'An error occured \n```\n{error}\n```\nPlease check the console for traceback')
raise error
# Morse code cypher
@commands.command(name='cypher', aliases=['morse'])
@cooldown(1, 2, BucketType.channel)
async def cypher(self, ctx, *, message):
MORSE_DICT = { 'A':'.-', 'B':'-...',
'C':'-.-.', 'D':'-..', 'E':'.',
'F':'..-.', 'G':'--.', 'H':'....',
'I':'..', 'J':'.---', 'K':'-.-',
'L':'.-..', 'M':'--', 'N':'-.',
'O':'---', 'P':'.--.', 'Q':'--.-',
'R':'.-.', 'S':'...', 'T':'-',
'U':'..-', 'V':'...-', 'W':'.--',
'X':'-..-', 'Y':'-.--', 'Z':'--..',
'1':'.----', '2':'..---', '3':'...--',
'4':'....-', '5':'.....', '6':'-....',
'7':'--...', '8':'---..', '9':'----.',
'0':'-----', ', ':'--..--', '.':'.-.-.-',
'?':'..--..', '/':'-..-.', '-':'-....-',
'(':'-.--.', ')':'-.--.-'}
cipher = ''
for letter in message.upper():
if letter != ' ':
cipher += MORSE_DICT[letter] + ' '
else:
cipher += ' '
await ctx.send(f'Here is your cyphered text:\n```\n{cipher}\n```')
# Morse code cypher: Error handling
@cypher.error
async def cypher_error(self, ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send(error)
elif isinstance(error, commands.BadArgument):
await ctx.send('What do you want to cypher?')
else:
await ctx.send(f'An error occured \n```\n{error}\n```\nPlease check console for traceback, or raise an issue to CABREX')
raise error
# Base64 encoding
@commands.command(name='base64')
@cooldown(1, 2, BucketType.channel)
async def base64(self, ctx, message, iterations=1):
if iterations <= 20:
message_bytecode = message.encode('ascii')
for i in range(iterations):
message_bytecode = base64.b64encode(message_bytecode)
base64_message = message_bytecode.decode('ascii')
await ctx.send(f'Here is the base64 encoded version encoded {iterations} time(s):\n```\n{base64_message}\n```')
else:
await ctx.send(f"Maximum number of iterations possible are 20, **{iterations}** number of ierations not allowed")
# Base64 encoding: Error handling
@base64.error
async def base64_error(self, ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send(error)
elif isinstance(error, commands.MissingRequiredArgument):
await ctx.send('What are the arguments')
elif isinstance(error, commands.BadArgument):
await ctx.send("Please enter your text to be encode in quotes")
elif isinstance(error, base64.binascii.Error):
await ctx.send("Please enter a valid base64 encoded message to decrypt {ctx.author.display_name}")
elif isinstance(error, commands.ExpectedClosingQuoteError):
await ctx.send("You didnt close the quotes!")
elif isinstance(error, commands.InvalidEndOfQuotedStringError):
await ctx.send("Too many quotes!")
elif isinstance(error, commands.UnexpectedQuoteError):
await ctx.send("Unexpected quote in non-quoted string")
else:
await ctx.send(f'An error occured \n```\n{error}\n```\nPlease check console for traceback, or raise an issue to CABREX')
raise error
# Base64 decoding
@commands.command(name='dbase64')
@cooldown(1, 2, BucketType.channel)
async def base64_decode(self, ctx, message):
message_bytecode = message.encode('ascii')
decode_bytecode = base64.b64decode(message_bytecode)
base64_message = decode_bytecode.decode('ascii')
await ctx.send(f'Here is the base64 decoded version:\n```\n{base64_message}\n```')
# Base64 decoding: Error handling
@base64_decode.error
async def base64_decode_error(self, ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send(error)
elif isinstance(error, commands.MissingRequiredArgument):
await ctx.send('What are the arguments')
elif isinstance(error, commands.BadArgument):
await ctx.send("Please enter your text to be encode in quotes")
elif isinstance(error, base64.binascii.Error) or isinstance(error, binascii.Error):
await ctx.send("Please enter a valid base64 encoded message to decrypt {ctx.author.display_name}")
elif isinstance(error, UnicodeDecodeError):
await ctx.send("Please enter a valid base64 encoded message to decrypt {ctx.author.display_name}")
elif isinstance(error, commands.ExpectedClosingQuoteError):
await ctx.send("You didnt close the quotes!")
elif isinstance(error, commands.InvalidEndOfQuotedStringError):
await ctx.send("Too many quotes!")
elif isinstance(error, commands.UnexpectedQuoteError):
await ctx.send("Unexpected quote in non-quoted string")
else:
await ctx.send(f'An error occured \n```\n{error}\n```\nPlease check console for traceback, or raise an issue to CABREX')
raise error
# QR Code generator
@commands.command(name='qrcode')
@cooldown(1, 5, BucketType.channel)
async def qr_code_generator(self, ctx, *, message=None):
if message is not None:
embed = discord.Embed(
title = 'Here is your encoded text',
colour = 0x01a901
)
query = urllib.parse.quote(message, safe='')
url = f'http://api.qrserver.com/v1/create-qr-code/?data={query}'
embed.set_image(url=url)
await ctx.send(embed=embed)
else:
await ctx.send("Please enter a message to qrcode encode it")
# QR Code generator: Error handling
@qr_code_generator.error
async def qr_code_generator_error(self, ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send(error)
else:
await ctx.send(f'An error occured \n```\n{error}\n```\nPlease check console for traceback, or raise an issue to CABREX')
raise error
# QR Code reader
@commands.command(name='qrdecode')
@cooldown(1, 5, BucketType.channel)
async def qr_code_decode(self, ctx, message):
encoded_url = urllib.parse.quote(message, safe='')
url = f'http://api.qrserver.com/v1/read-qr-code/?fileurl={encoded_url}&format=json'
async with request("GET", url, headers={}) as response:
if response.status == 200:
data = await response.json()
symbol = data[0]["symbol"]
if symbol[0]["data"] is not None:
await ctx.send(f'Here is the decoded qr code:\n```\n{symbol[0]["data"]}\n```')
else:
await ctx.send(f'An error occured: **{symbol[0]["error"]}**')
# QR Code reader: Error handling
@qr_code_generator.error
async def qr_code_generator_error(self, ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send(error)
else:
await ctx.send(f'An error occured \n```\n{error}\n```\nPlease check console for traceback, or raise an issue to CABREX')
raise error
# Translator
@commands.command(name='translate')
@cooldown(1, 5, BucketType.channel)
async def translator(self, ctx, source_language: str = 'en', destination_language: str = 'en', *, message):
translator = Translator()
translation = translator.translate(
message, dest=destination_language, src=source_language
)
embed = discord.Embed(
title="Translation",
description=f"Sentence : **{message}**\n\nTranslation : **{translation.text}**\n\nType : **{translation.src} > {translation.dest}**",
color=0x008000,
)
await ctx.send(embed=embed)
# Translator: Error handling
@translator.error
async def translator_error(self, ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send(error)
elif isinstance(error, commands.MissingRequiredArgument):
await ctx.send("What do you want to translate?")
else:
await ctx.send(f'An error occured \n```\n{error}\n```\nPlease check console for traceback, or raise an issue to CABREX')
raise error
# Prefix changer
@commands.command(name='prefix')
@cooldown(1, 5, BucketType.guild)
@commands.has_permissions(administrator=True)
async def prefix(self, ctx, prefix: str):
if len(prefix) <= 4:
if not any(c.isdigit() for c in prefix):
insert_prefix(ctx.guild.id, prefix)
await ctx.send(f"Prefix of this server has been changed to **{prefix}** successfully!")
else:
await ctx.send("Integers are not allowed in prefixes")
else:
await ctx.send(f"A prefix must have only 4 or lesser charecters, **{len(prefix)}** is not allowed")
# Prefix changer Error handling
@prefix.error
async def prefix_error(self, ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send(error)
elif isinstance(error, commands.CheckFailure):
await ctx.send(f"Only administrators can use this command, {ctx.author.mention}")
elif isinstance(error, commands.MissingRequiredArgument):
prefix = fetch_prefix(ctx.guild.id)["prefix"]
await ctx.send(f"```\n{prefix}prefix <prefix>\n\nMissing required argument prefix\n```")
else:
await ctx.send(f'An error occured \n```\n{error}\n```\nPlease check console for traceback, or raise an issue to CABREX')
raise error
# Overwrite all guilds with r$ as prefix
@commands.command(name='overrideprefix')
@commands.has_permissions(administrator=True)
@cooldown(1, 10, BucketType.guild)
async def | |
<reponame>ibobak/shap
import pandas as pd
import numpy as np
import scipy as sp
import sys
import warnings
import copy
import operator
import sklearn
from slicer import Slicer, Alias, Obj
# from ._order import Order
from .utils._general import OpChain
# slicer confuses pylint...
# pylint: disable=no-member
op_chain_root = OpChain("shap.Explanation")
class MetaExplanation(type):
""" This metaclass exposes the Explanation object's methods for creating template op chains.
"""
def __getitem__(cls, item):
return op_chain_root.__getitem__(item)
@property
def abs(cls):
""" Element-wize absolute value op.
"""
return op_chain_root.abs
@property
def identity(cls):
""" A no-op.
"""
return op_chain_root.identity
@property
def argsort(cls):
""" Numpy style argsort.
"""
return op_chain_root.argsort
@property
def sum(cls):
""" Numpy style sum.
"""
return op_chain_root.sum
@property
def max(cls):
""" Numpy style max.
"""
return op_chain_root.max
@property
def min(cls):
""" Numpy style min.
"""
return op_chain_root.min
@property
def mean(cls):
""" Numpy style mean.
"""
return op_chain_root.mean
@property
def sample(cls):
""" Numpy style sample.
"""
return op_chain_root.sample
@property
def hclust(cls):
""" Hierarchial clustering op.
"""
return op_chain_root.hclust
class Explanation(metaclass=MetaExplanation):
""" A slicable set of parallel arrays representing a SHAP explanation.
"""
def __init__( # pylint: disable=too-many-arguments
self,
values,
base_values=None,
data=None,
display_data=None,
instance_names=None,
feature_names=None,
output_names=None,
output_indexes=None,
lower_bounds=None,
upper_bounds=None,
error_std=None,
main_effects=None,
hierarchical_values=None,
clustering=None,
compute_time=None
):
self.op_history = []
self.compute_time = compute_time
# cloning. TODOsomeday: better cloning :)
if issubclass(type(values), Explanation):
e = values
values = e.values
base_values = e.base_values
data = e.data
self.output_dims = compute_output_dims(values, base_values, data, output_names)
values_shape = _compute_shape(values)
if output_names is None and len(self.output_dims) == 1:
output_names = [f"Output {i}" for i in range(values_shape[self.output_dims[0]])]
if len(_compute_shape(feature_names)) == 1: # TODOsomeday: should always be an alias once slicer supports per-row aliases
if len(values_shape) >= 1 and len(feature_names) == values_shape[0]:
feature_names = Alias(list(feature_names), 0)
elif len(values_shape) >= 2 and len(feature_names) == values_shape[1]:
feature_names = Alias(list(feature_names), 1)
if len(_compute_shape(output_names)) == 1: # TODOsomeday: should always be an alias once slicer supports per-row aliases
output_names = Alias(list(output_names), self.output_dims[0])
# if len(values_shape) >= 1 and len(output_names) == values_shape[0]:
# output_names = Alias(list(output_names), 0)
# elif len(values_shape) >= 2 and len(output_names) == values_shape[1]:
# output_names = Alias(list(output_names), 1)
if output_names is not None and not isinstance(output_names, Alias):
l = len(_compute_shape(output_names))
if l == 0:
pass
elif l == 1:
output_names = Obj(output_names, self.output_dims)
elif l == 2:
output_names = Obj(output_names, [0] + list(self.output_dims))
else:
raise ValueError("shap.Explanation does not yet support output_names of order greater than 3!")
self._s = Slicer(
values=values,
base_values=base_values if hasattr(base_values, "__len__") else Obj(base_values, [0] + list(self.output_dims)),
data=list_wrap(data),
display_data=list_wrap(display_data),
instance_names=None if instance_names is None else Alias(instance_names, 0),
feature_names=feature_names,
output_names=output_names,
output_indexes=None if output_indexes is None else (self.output_dims, output_indexes),
lower_bounds=list_wrap(lower_bounds),
upper_bounds=list_wrap(upper_bounds),
error_std=list_wrap(error_std),
main_effects=list_wrap(main_effects),
hierarchical_values=list_wrap(hierarchical_values),
clustering=None if clustering is None else Obj(clustering, [0])
)
@property
def shape(self):
""" Compute the shape over potentially complex data nesting.
"""
return _compute_shape(self._s.values)
@property
def values(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.values
@values.setter
def values(self, new_values):
self._s.values = new_values
@property
def base_values(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.base_values
@base_values.setter
def base_values(self, new_base_values):
self._s.base_values = new_base_values
@property
def data(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.data
@data.setter
def data(self, new_data):
self._s.data = new_data
@property
def display_data(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.display_data
@display_data.setter
def display_data(self, new_display_data):
if issubclass(type(new_display_data), pd.DataFrame):
new_display_data = new_display_data.values
self._s.display_data = new_display_data
@property
def instance_names(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.instance_names
@property
def output_names(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.output_names
@output_names.setter
def output_names(self, new_output_names):
self._s.output_names = new_output_names
@property
def output_indexes(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.output_indexes
@property
def feature_names(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.feature_names
@feature_names.setter
def feature_names(self, new_feature_names):
self._s.feature_names = new_feature_names
@property
def lower_bounds(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.lower_bounds
@property
def upper_bounds(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.upper_bounds
@property
def error_std(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.error_std
@property
def main_effects(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.main_effects
@main_effects.setter
def main_effects(self, new_main_effects):
self._s.main_effects = new_main_effects
@property
def hierarchical_values(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.hierarchical_values
@hierarchical_values.setter
def hierarchical_values(self, new_hierarchical_values):
self._s.hierarchical_values = new_hierarchical_values
@property
def clustering(self):
""" Pass-through from the underlying slicer object.
"""
return self._s.clustering
@clustering.setter
def clustering(self, new_clustering):
self._s.clustering = new_clustering
def cohorts(self, cohorts):
""" Split this explanation into several cohorts.
Parameters
----------
cohorts : int or array
If this is an integer then we auto build that many cohorts using a decision tree. If this is
an array then we treat that as an array of cohort names/ids for each instance.
"""
if isinstance(cohorts, int):
return _auto_cohorts(self, max_cohorts=cohorts)
if isinstance(cohorts, (list, tuple, np.ndarray)):
cohorts = np.array(cohorts)
return Cohorts(**{name: self[cohorts == name] for name in np.unique(cohorts)})
raise Exception("The given set of cohort indicators is not recognized! Please give an array or int.")
def __repr__(self):
""" Display some basic printable info, but not everything.
"""
out = ".values =\n"+self.values.__repr__()
if self.base_values is not None:
out += "\n\n.base_values =\n"+self.base_values.__repr__()
if self.data is not None:
out += "\n\n.data =\n"+self.data.__repr__()
return out
def __getitem__(self, item):
""" This adds support for OpChain indexing.
"""
if not isinstance(item, tuple):
item = (item,)
# convert any OpChains or magic strings
for pos, t in enumerate(item): # pylint: disable=too-many-nested-blocks
orig_t = t
if issubclass(type(t), OpChain):
t = t.apply(self)
if issubclass(type(t), (np.int64, np.int32)): # because slicer does not like numpy indexes
t = int(t)
elif issubclass(type(t), np.ndarray):
t = [int(v) for v in t] # slicer wants lists not numpy arrays for indexing
elif issubclass(type(t), Explanation):
t = t.values
elif isinstance(t, str):
# work around for 2D output_names since they are not yet slicer supported
output_names_dims = []
if "output_names" in self._s._objects:
output_names_dims = self._s._objects["output_names"].dim
if pos != 0 and pos in output_names_dims and len(output_names_dims) == 2:
new_values = []
new_base_values = []
new_data = []
new_self = copy.deepcopy(self)
for i, v in enumerate(self.values):
for j, s in enumerate(self.output_names[i]):
if s == t:
new_values.append(np.array(v[:,j]))
new_data.append(np.array(self.data[i]))
new_base_values.append(self.base_values[i][j])
new_self = copy.deepcopy(self)
new_self.values = np.array(new_values)
new_self.base_values = np.array(new_base_values)
new_self.data = np.array(new_data)
new_self.output_names = t
new_self.feature_names = np.array(new_data)
new_self.clustering = None
# work around for 2D feature_names since they are not yet slicer supported
feature_names_dims = []
if "feature_names" in self._s._objects:
feature_names_dims = self._s._objects["feature_names"].dim
if pos != 0 and pos in feature_names_dims and len(feature_names_dims) == 2:
new_values = []
new_data = []
for i, val_i in enumerate(self.values):
for s,v,d in zip(self.feature_names[i], val_i, self.data[i]):
if s == t:
new_values.append(v)
new_data.append(d)
new_self = copy.deepcopy(self)
new_self.values = new_values
new_self.data = new_data
new_self.feature_names = t
new_self.clustering = None
return new_self
if issubclass(type(t), (np.int8, np.int16, np.int32, np.int64)):
t = int(t)
if t is not orig_t:
tmp = list(item)
tmp[pos] = t
item = tuple(tmp)
# call slicer for the real work
new_self = copy.copy(self)
new_self._s = self._s.__getitem__(item)
new_self.op_history.append({
"name": "__getitem__",
"args": (item,),
"prev_shape": self.shape
})
return new_self
def __len__(self):
return self.shape[0]
def __copy__(self):
new_exp = Explanation(
self.values,
self.base_values,
self.data,
self.display_data,
self.instance_names,
self.feature_names,
self.output_names,
self.output_indexes,
self.lower_bounds,
self.upper_bounds,
self.error_std,
self.main_effects,
self.hierarchical_values,
self.clustering
)
new_exp.op_history = copy.copy(self.op_history)
return new_exp
def _apply_binary_operator(self, other, binary_op, op_name):
new_exp = self.__copy__()
new_exp.op_history = copy.copy(self.op_history)
new_exp.op_history.append({
"name": op_name,
"args": (other,),
"prev_shape": self.shape
})
if isinstance(other, Explanation):
new_exp.values = binary_op(new_exp.values, other.values)
if new_exp.data is not None:
new_exp.data = binary_op(new_exp.data, other.data)
if new_exp.base_values is not None:
new_exp.base_values = binary_op(new_exp.base_values, other.base_values)
else:
new_exp.values = binary_op(new_exp.values, other)
if new_exp.data is not None:
new_exp.data = binary_op(new_exp.data, other)
if new_exp.base_values is not None:
new_exp.base_values = binary_op(new_exp.base_values, other)
return new_exp
def __add__(self, other):
return self._apply_binary_operator(other, operator.add, "__add__")
def __radd__(self, other):
return self._apply_binary_operator(other, operator.add, "__add__")
def __sub__(self, other):
return self._apply_binary_operator(other, operator.sub, "__sub__")
def __rsub__(self, other):
return self._apply_binary_operator(other, operator.sub, "__sub__")
def __mul__(self, other):
return self._apply_binary_operator(other, operator.mul, "__mul__")
def __rmul__(self, other):
return self._apply_binary_operator(other, operator.mul, "__mul__")
def __truediv__(self, other):
return self._apply_binary_operator(other, operator.truediv, "__truediv__")
# @property
# def abs(self):
# """ Element-size absolute value operator.
# """
# new_self = copy.copy(self)
# new_self.values = np.abs(new_self.values)
# new_self.op_history.append({
# "name": "abs",
# "prev_shape": self.shape
# })
# return new_self
def | |
#!/usr/bin/env python
#===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
"""dbutil.py - PostgreSQL database utilities for testing.
This module provides easy access to the test database server, and
provides a way to create, load, save and drop databases from this server.
It also provides wrapper classes for psycopg2 database connections that
implement utility queries as methods.
"""
from __future__ import absolute_import
import os
import sys
import logging
import random
import subprocess
import re
import psycopg2
#
# Root directory for test resources.
#
TEST_RESOURCES_ROOT = '/g/data1/v10/test_resources'
#
# Setup information for the test server. This might be better off loaded
# from a config file, but this will do for now. The password is kept
# in a .pgpass file to avoid saving it in versioned files. This is likely
# a better solution than recording the password either here or in a config
# file.
#
TESTSERVER_PARAMS = {
'name': 'test_server',
'host': '172.16.58.3',
'port': '6432',
'user': 'cube_tester',
'superuser': 'cube_admin'
}
#
# Database connection constants. These would be better off being defaults
# for items that can be overridden by a configuration file.
#
CONNECT_TIMEOUT = 60
MAINTENANCE_DB = 'postgres'
TEMPLATE_DB = 'template0'
USE_PGBOUNCER = True
PGBOUNCER_DB = 'pgbouncer'
#
# Random string constants. These set the parameters for random strings
# appended to database names by the random_name utility function. The intent
# is to make temporary database names (most likely) unique to avoid clashes.
# The current format is 9 decimal digits.
#
RANDOM_STR_MIN = 1
RANDOM_STR_MAX = 999999999
RANDOM_STR_FORMAT = "%09d"
#
# Server class
#
class Server(object):
"""Abstraction of a database server.
Gathers all the parameters that describe a server or how to work
with it, and provides services that use this information."""
def __init__(self, params):
self.name = params['name']
self.host = params['host']
self.port = params['port']
self.user = params['user']
self.superuser = params['superuser']
def connect(self, dbname, superuser=False, autocommit=True):
"""Create a pscopg2 connection to a database and return it.
dbname: The database to connect to.
superuser: Set to True to connect as the superuser, otherwise
connect as the user.
autocommit: Set to False to turn off autocommit, otherwise
autocommit will be turned on."""
user = (self.superuser if superuser else self.user)
dsn = ("dbname=%s host=%s port=%s user=%s connect_timeout=%s" %
(dbname, self.host, self.port, user, CONNECT_TIMEOUT))
conn = psycopg2.connect(dsn)
conn.autocommit = autocommit
return conn
def exists(self, dbname):
"""Returns True if the named database exists on the server."""
maint_conn = MaintenanceWrapper(
self.connect(MAINTENANCE_DB, superuser=True))
try:
result = maint_conn.exists(dbname)
finally:
maint_conn.close()
return result
def dblist(self):
"""Returns a list of the databases on the server."""
maint_conn = MaintenanceWrapper(
self.connect(MAINTENANCE_DB, superuser=True))
try:
result = maint_conn.dblist()
finally:
maint_conn.close()
return result
def load(self, dbname, save_dir, save_file):
"""Load the contents of a database from a file.
The database should be empty, and based off template0 or
equivalent. This method calls the psql command to do the load."""
save_path = os.path.join(save_dir, save_file)
load_cmd = ["psql",
"--dbname=%s" % dbname,
"--username=%s" % self.superuser,
"--host=%s" % self.host,
"--port=%s" % self.port,
"--file=%s" % save_path]
try:
subprocess.check_output(load_cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
# Make sure error output is in the error message.
message = ("%s: problem calling %s:\n%s" %
(__name__, err.cmd[0], err.output))
for k in range(len(load_cmd)):
message = message + load_cmd[k]
raise AssertionError(message)
def save(self, dbname, save_dir, save_file, table=None):
"""Save the contents of a database to a file.
This method calls the pg_dump command to do the save. This
dump is in sql script format so use psql to reload."""
save_path = os.path.join(save_dir, save_file)
save_cmd = ["pg_dump",
"--dbname=%s" % dbname,
"--username=%s" % self.superuser,
"--host=%s" % self.host,
"--port=%s" % self.port,
"--file=%s" % save_path]
if table:
save_cmd.append("--table=%s" % table)
try:
subprocess.check_output(save_cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
#Make sure error output is in the error message.
message = ("%s: problem calling %s:\n%s" %
(__name__, err.cmd[0], err.output))
raise AssertionError(message)
def copy_table_between_databases(self, dbname1, dbname2, table_name):
"""Copy a table from one database to another on the same server.
This method pipes the output of pg_dump to psql."""
dump_cmd = ["pg_dump",
"--dbname=%s" % dbname1,
"--username=%s" % self.superuser,
"--host=%s" % self.host,
"--port=%s" % self.port,
"--table=%s" % table_name]
load_cmd = ["psql",
"--dbname=%s" % dbname2,
"--username=%s" % self.superuser,
"--host=%s" % self.host,
"--port=%s" % self.port
]
try:
ps_dump = subprocess.Popen(dump_cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
subprocess.check_output(load_cmd,
stdin=ps_dump.stdout,
stderr=subprocess.STDOUT)
ps_dump.wait()
except subprocess.CalledProcessError as err:
#Make sure error output is in the error message.
message = ("%s: problem calling %s:\n%s" %
(__name__, err.cmd[0], err.output))
raise AssertionError(message)
def drop(self, dbname):
"""Drop the named database.
Connections are closed explicitly with try/finally blocks,
since they do not seem to be closed automatically in the
case of exceptions and this causes problems.
If pgbouncer is in use a pgbouncer pause command needs to
be issued before dropping the database. This will wait
until active transactions are complete."""
maint_conn = MaintenanceWrapper(
self.connect(MAINTENANCE_DB, superuser=True))
try:
if maint_conn.exists(dbname):
if USE_PGBOUNCER:
bouncer_conn = BouncerWrapper(
self.connect(PGBOUNCER_DB, superuser=True))
try:
bouncer_conn.pause(dbname)
maint_conn.drop(dbname)
finally:
bouncer_conn.close()
else:
maint_conn.drop(dbname)
finally:
maint_conn.close()
def create(self, dbname, save_dir=None, save_file=None,
template_db=TEMPLATE_DB):
"""Creates and loads a database from a file.
This method does a clean create and load of the named database
from the file 'savefile'. It drops an old database of the same
name if neccessary.
It uses template_db as the template database, which is copied
to create the new database.
If save_dir or save_file are None (or not specified), no
save file is loaded.
Connections are closed explicitly with try/finally blocks,
since they do not seem to be closed automatically in the
case of exceptions and this causes problems.
If pgbouncer is in use a pgbouncer pause command needs to
be issued before dropping the database. This will wait
until active transactions are complete. The pgbouncer
resume command is issued once the database is (re)created.
This is needed to prevent connection attempts to the new database
from hanging or returning errors if pgbouncer had pools set
up on the old database."""
maint_conn = MaintenanceWrapper(
self.connect(MAINTENANCE_DB, superuser=True))
try:
# Create the database, dropping it first if needed.
if USE_PGBOUNCER:
bouncer_conn = BouncerWrapper(
self.connect(PGBOUNCER_DB, superuser=True))
try:
if maint_conn.exists(dbname):
bouncer_conn.pause(dbname)
maint_conn.drop(dbname)
# To be used as a template, template_db must have
# no current connections.
bouncer_conn.kill(template_db)
maint_conn.create(dbname, template_db)
bouncer_conn.resume(dbname)
finally:
bouncer_conn.close()
else:
if maint_conn.exists(dbname):
maint_conn.drop(dbname)
maint_conn.create(dbname, template_db)
# Load the new database from the save file if necessary
if save_file is not None or save_dir is not None:
self.load(dbname, save_dir, save_file)
# Run ANALYSE on the newly loaded database
db_conn = ConnectionWrapper(self.connect(dbname, superuser=True))
try:
db_conn.analyse()
finally:
db_conn.close()
# All done
finally:
maint_conn.close()
#
# Connection wrappers.
#
class ConnectionWrapper(object):
"""Generic connection wrapper, inherited by the specific wrappers.
This is a wrapper for a psycopg2 database connection. It
passes on unknown attribute references to the wrapped connection
using __getattr__. The specific wrappers that inherit from this
implement queries and operations on the connection (self.conn)
as methods.
Some utility methods are implemented here. database_name is
useful for testing and error messages. analyse is | |
# Variable names start with ## variable_name variable_text, format
# The format is integer, number, text, boolean, integer choice, text choice, rfile
# Then some help text starting with "# "
# Then a list of possible value#explain with the default first
# Then a blank line to end.
self.format4name = {}
self.format4name['hardware_file_type'] = 'text'
re_AeqB = re.compile("^#?(\w+)\s*=\s*([^#]+)#*(.*)") # item values "a = b"
section = None
data_name = None
fp = open("quisk_conf_defaults.py", "rb")
for line in fp:
line = line.strip()
if not line:
data_name = None
continue
if line[0:27] == '################ Receivers ':
section = 'Receivers'
args = line[27:].split(',', 1)
rxname = args[0].strip()
section_data = []
self.receiver_data.append((rxname, section_data))
elif line[0:17] == '################ ':
args = line[17:].split(None, 2)
section = args[0]
if section in ('Keys', 'Colors', 'Obsolete'):
section = None
continue
rxname = None
section_data = []
self.sections.append((section, section_data))
if not section:
continue
if line[0:3] == '## ': # item_name item_text, format
args = line[3:].split(None, 1)
data_name = args[0]
args = args[1].split(',', 1)
dspl = args[0].strip()
fmt = args[1].strip()
value_list = []
if self.format4name.has_key(data_name):
if self.format4name[data_name] != fmt:
print ("Inconsistent format for", data_name, self.format4name[data_name], fmt)
else:
self.format4name[data_name] = fmt
section_data.append([data_name, dspl, fmt, '', value_list])
if not data_name:
continue
mo = re_AeqB.match(line)
if mo:
if data_name != mo.group(1):
print ("Parse error for", data_name)
continue
value = mo.group(2).strip()
expln = mo.group(3).strip()
if value[0] in ('"', "'"):
value = value[1:-1]
elif value == '{': # item is a dictionary
value = getattr(conf, data_name)
elif value == '[': # item is a list
value = getattr(conf, data_name)
if expln:
value_list.append("%s # %s" % (value, expln))
else:
value_list.append(value)
elif line[0:2] == '# ':
section_data[-1][3] = section_data[-1][3] + line[2:] + ' '
fp.close()
class ConfigHelp(wx.html.HtmlWindow): # The "Help with Radios" first-level page
"""Create the help screen for the configuration tabs."""
def __init__(self, parent):
wx.html.HtmlWindow.__init__(self, parent, -1, size=(win_width, 100))
if "gtk2" in wx.PlatformInfo:
self.SetStandardFonts()
self.SetFonts("", "", [10, 12, 14, 16, 18, 20, 22])
# read in text from file help_conf.html in the directory of this module
self.LoadFile('help_conf.html')
class RadioNotebook(wx.Notebook): # The second-level notebook for each radio name
def __init__(self, parent, radio_name):
wx.Notebook.__init__(self, parent)
font = wx.Font(conf.config_font_size, wx.FONTFAMILY_SWISS, wx.NORMAL,
wx.FONTWEIGHT_NORMAL, face=conf.quisk_typeface)
self.SetFont(font)
self.radio_name = radio_name
self.pages = []
def MakePages(self):
if self.pages:
return
radio_name = self.radio_name
page = RadioHardware(self, radio_name)
self.AddPage(page, "Hardware")
self.pages.append(page)
page = RadioSound(self, radio_name)
self.AddPage(page, "Sound")
self.pages.append(page)
for section, names in local_conf.sections:
if section in ('Sound', 'Bands'): # There is a special page for these sections
continue
page = RadioSection(self, radio_name, section, names)
self.AddPage(page, section)
self.pages.append(page)
page = RadioBands(self, radio_name)
self.AddPage(page, "Bands")
self.pages.append(page)
def NewName(self, new_name):
self.radio_name = new_name
for page in self.pages:
page.radio_name = new_name
class ComboCtrl(wx.combo.ComboCtrl):
def __init__(self, parent, value, choices, no_edit=False):
self.value = value
self.choices = choices[:]
self.handler = None
self.height = parent.quisk_height
if no_edit:
wx.combo.ComboCtrl.__init__(self, parent, -1, style=wx.CB_READONLY)
else:
wx.combo.ComboCtrl.__init__(self, parent, -1, style=wx.TE_PROCESS_ENTER)
self.GetTextCtrl().Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
self.Bind(wx.EVT_TEXT_ENTER, self.OnTextEnter)
self.ctrl = ListBoxComboPopup(choices, parent.font)
self.SetPopupControl(self.ctrl)
self.SetText(value)
self.SetSizes()
def SetItems(self, lst):
self.ctrl.SetItems(lst)
self.choices = lst[:]
self.SetSizes()
def SetSizes(self):
charx = self.GetCharWidth()
wm = charx
w, h = self.GetTextExtent(self.value)
if wm < w:
wm = w
for ch in self.choices:
w, h = self.GetTextExtent(ch)
if wm < w:
wm = w
wm += charx * 5
self.SetSizeHints(wm, self.height, 9999, self.height)
def SetSelection(self, n):
try:
text = self.choices[n]
except IndexError:
self.SetText('')
self.value = ''
else:
self.ctrl.SetSelection(n)
self.SetText(text)
self.value = text
def OnTextEnter(self, event=None):
if event:
event.Skip()
if self.value != self.GetValue():
self.value = self.GetValue()
if self.handler:
ok = self.handler(self)
def OnKillFocus(self, event):
event.Skip()
self.OnTextEnter(event)
def OnListbox(self):
self.OnTextEnter()
class ListBoxComboPopup(wx.ListBox, wx.combo.ComboPopup):
def __init__(self, choices, font):
wx.combo.ComboPopup.__init__(self)
self.choices = choices
self.font = font
self.lbox = None
def Create(self, parent):
self.lbox = wx.ListBox(parent, choices=self.choices, style=wx.LB_SINGLE)
self.lbox.SetFont(self.font)
self.lbox.Bind(wx.EVT_MOTION, self.OnMotion)
self.lbox.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
return True
def SetItems(self, lst):
self.choices = lst[:]
self.lbox.Set(self.choices)
def SetSelection(self, n):
self.lbox.SetSelection(n)
def GetStringValue(self):
try:
return self.choices[self.lbox.GetSelection()]
except IndexError:
pass
return ''
def GetAdjustedSize(self, minWidth, prefHeight, maxHeight):
chary = self.lbox.GetCharHeight()
return (minWidth, chary * len(self.choices) * 15 / 10 + chary)
def OnLeftDown(self, event):
event.Skip()
self.Dismiss()
self.GetCombo().OnListbox()
def OnMotion(self, event):
event.Skip()
item = self.lbox.HitTest(event.GetPosition())
if item >= 0:
self.lbox.SetSelection(item)
def GetControl(self):
return self.lbox
class BaseWindow(ScrolledPanel):
def __init__(self, parent):
ScrolledPanel.__init__(self, parent)
self.font = wx.Font(conf.config_font_size, wx.FONTFAMILY_SWISS, wx.NORMAL,
wx.FONTWEIGHT_NORMAL, face=conf.quisk_typeface)
self.SetFont(self.font)
self.row = 1
self.charx = self.GetCharWidth()
self.chary = self.GetCharHeight()
self.quisk_height = self.chary * 14 / 10
# GBS
self.gbs = wx.GridBagSizer(2, 2)
self.gbs.SetEmptyCellSize((self.charx, self.charx))
self.SetSizer(self.gbs)
self.gbs.Add((self.charx, self.charx), (0, 0))
def MarkCols(self):
for col in range(1, self.num_cols):
c = wx.StaticText(self, -1, str(col % 10))
self.gbs.Add(c, (self.row, col))
self.row += 1
def NextRow(self, row=None):
if row is None:
self.row += 1
else:
self.row = row
def AddTextL(self, col, text, span=None):
c = wx.StaticText(self, -1, text)
if col < 0:
pass
elif span is None:
self.gbs.Add(c, (self.row, col), flag=wx.ALIGN_CENTER_VERTICAL)
else:
self.gbs.Add(c, (self.row, col), span=(1, span), flag=wx.ALIGN_CENTER_VERTICAL)
return c
def AddTextCHelp(self, col, text, help_text, span=None):
bsizer = wx.BoxSizer(wx.HORIZONTAL)
txt = wx.StaticText(self, -1, text)
bsizer.Add(txt, flag=wx.ALIGN_CENTER_VERTICAL)
btn = wx.Button(self, -1, "..")
btn.quisk_help_text = help_text
btn.quisk_caption = text
h = self.quisk_height + 2
btn.SetSizeHints(h, h, h, h)
btn.Bind(wx.EVT_BUTTON, self._BTnHelp)
bsizer.Add(btn, flag=wx.ALIGN_CENTER_VERTICAL|wx.LEFT, border=self.charx)
if col < 0:
pass
elif span is None:
self.gbs.Add(bsizer, (self.row, col), flag = wx.ALIGN_CENTER)
else:
self.gbs.Add(bsizer, (self.row, col), span=(1, span), flag = wx.ALIGN_CENTER)
return bsizer
def AddBoxSizer(self, col, span):
bsizer = wx.BoxSizer(wx.HORIZONTAL)
self.gbs.Add(bsizer, (self.row, col), span=(1, span))
return bsizer
def AddColSpacer(self, col, width): # add a width spacer to row 0
self.gbs.Add((width * self.charx, 1), (0, col)) # width is in characters
def AddRadioButton(self, col, text, span=None, start=False):
if start:
c = wx.RadioButton(self, -1, text, style=wx.RB_GROUP)
else:
c = wx.RadioButton(self, -1, text)
if col < 0:
pass
elif span is None:
self.gbs.Add(c, (self.row, col), flag=wx.ALIGN_CENTER_VERTICAL)
else:
self.gbs.Add(c, (self.row, col), span=(1, span), flag=wx.ALIGN_CENTER_VERTICAL)
return c
def AddCheckBox(self, col, text, handler=None):
btn = wx.CheckBox(self, -1, text)
h = self.quisk_height + 2
btn.SetSizeHints(-1, h, -1, h)
if col >= 0:
self.gbs.Add(btn, (self.row, col))
if self.radio_name == "ConfigFileRadio":
btn.Enable(False)
noname_enable.append(btn)
if handler:
btn.Bind(wx.EVT_CHECKBOX, handler)
return btn
def AddPushButton(self, col, text, border=0):
#btn = wx.Button(self, -1, text, style=wx.BU_EXACTFIT)
btn = wx.lib.buttons.GenButton(self, -1, text)
btn.SetBezelWidth(2)
btn.SetUseFocusIndicator(False)
h = self.quisk_height + 2
btn.SetSizeHints(-1, h, -1, h)
if col >= 0:
self.gbs.Add(btn, (self.row, col), flag=wx.RIGHT|wx.LEFT, border=border*self.charx)
if self.radio_name == "ConfigFileRadio":
btn.Enable(False)
noname_enable.append(btn)
return btn
def AddPushButtonR(self, col, text, border=0):
btn = self.AddPushButton(-1, text, border=0)
if col >= 0:
self.gbs.Add(btn, (self.row, col), flag=wx.ALIGN_RIGHT|wx.RIGHT|wx.LEFT, border=border*self.charx)
return btn
def AddComboCtrl(self, col, value, choices, right=False, no_edit=False, span=None, border=1):
cb = ComboCtrl(self, value, choices, no_edit)
if col < 0:
pass
elif span is None:
self.gbs.Add(cb, (self.row, col), flag=wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL|wx.EXPAND|wx.RIGHT|wx.LEFT, border=border*self.charx)
else:
self.gbs.Add(cb, (self.row, col), span=(1, span), flag=wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL|wx.EXPAND|wx.RIGHT|wx.LEFT, border=border*self.charx)
if self.radio_name == "ConfigFileRadio":
cb.Enable(False)
noname_enable.append(cb)
return cb
def AddComboCtrlTx(self, col, text, value, choices, right=False, no_edit=False):
c = wx.StaticText(self, -1, text)
if col >= 0:
self.gbs.Add(c, (self.row, col))
cb = self.AddComboCtrl(col + 1, value, choices, right, no_edit)
else:
cb = self.AddComboCtrl(col, value, choices, right, no_edit)
return c, cb
def AddTextComboHelp(self, col, text, value, choices, help_text, no_edit=False, border=2, span_text=1, span_combo=1):
txt = wx.StaticText(self, -1, text)
self.gbs.Add(txt, (self.row, col), span=(1, span_text), flag=wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, border=self.charx)
col += span_text
cb = self.AddComboCtrl(-1, value, choices, False, no_edit)
if no_edit:
l = len(value)
for i in range(len(choices)):
if value == choices[i][0:l]:
cb.SetSelection(i)
break
else:
print ("Failure to set value for", text, value, choices)
self.gbs.Add(cb, (self.row, col), span=(1, span_combo),
flag=wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL|wx.EXPAND|wx.RIGHT,
border=self.charx*2/10)
col += span_combo
btn = wx.Button(self, -1, "..")
btn.quisk_help_text = help_text
btn.quisk_caption = text
h = self.quisk_height + 2
btn.SetSizeHints(h, h, h, h)
self.gbs.Add(btn, (self.row, col), flag=wx.ALIGN_CENTER_VERTICAL|wx.RIGHT, border=self.charx*border)
btn.Bind(wx.EVT_BUTTON, self._BTnHelp)
return txt, cb, btn
def _BTnHelp(self, event):
btn = event.GetEventObject()
dlg = wx.MessageDialog(self, btn.quisk_help_text, btn.quisk_caption, style=wx.OK|wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def OnChange(self, ctrl):
value = ctrl.GetValue()
self.OnChange2(ctrl, value)
def OnChange2(self, ctrl, value):
name = ctrl.quisk_data_name
fmt4 = local_conf.format4name[name][0:4]
if self.FormatOK(value, fmt4):
radio_dict = local_conf.GetRadioDict(self.radio_name)
radio_dict[name] = value
local_conf.settings_changed = True
def FormatOK(self, value, fmt4): # Check formats integer and number
i1 = value.find('#')
try:
if fmt4 == 'inte':
if i1 > 0:
v = int(value[0:i1], base=0)
else:
v = int(value, base=0)
elif fmt4 == 'numb':
if i1 > 0:
v = float(value[0:i1])
else:
v = | |
burstraw = np.zeros_like(tarr)
f_default = self.default_frequency
for segment in self._burst:
t0, t1 = segment[0]
if start <= t0 and t1 <= end:
t_shift = self.time_global(t0)-t0
amp, phase, freq, env = segment[1]
envarr = np.zeros_like(tarr)
if env == "rec":
freq += f_default
envarr[np.argmin(np.abs(tarr-t0)):np.argmin(np.abs(tarr-t1))+1] = amp
burstraw += envarr*np.cos(2.*np.pi*freq*(tarr+t_shift) + phase)
elif env in ("gauss", "deriv-gauss"):
freq += f_default
sig, center = (t1-t0)/4., (t1+t0)/2.
if env == "gauss":
envarr=amp*np.exp(-(tarr-center)**2./(2.*sig**2.))
elif env == "deriv-gauss":
envarr=amp*(-(tarr-center)/(sig))*np.exp(-(tarr-center)**2./(2.*sig**2.))
envarr[:np.argmin(np.abs((tarr-center)+2*sig))] = 0.
envarr[np.argmin(np.abs((tarr-center)-2.*sig))+1:] = 0.
burstraw += envarr*np.cos(2.*np.pi*freq*(tarr+t_shift) + phase)
elif env == "chirp":
t = tarr[np.argmin(np.abs(tarr-t0)):np.argmin(np.abs(tarr-t1))]
osc = amp*chirp(t = t-t[0], t1 = t[-1]-t[0], f0 = f_default - 0.5*freq, f1 = f_default + 0.5*freq, phi = 180.*phase/np.pi)
pre, tail =np.zeros_like(tarr[:np.argmin(np.abs(tarr-t0))]), np.zeros_like(tarr[np.argmin(np.abs(tarr-t1)):])
burstraw += np.concatenate((pre, osc, tail))
elif not (t1 <= start or end <= t0):
raise Exception('Individual bursts have to be in a single waveform.')
return tarr, pulseraw + burstraw
def _check_inputs(self, **kwargs):
pos_inputs, burst_inputs = [], []
if kwargs['duration'] < 0:
raise Exception("Duration cannot be negative.")
for pos_key in ['at', 'in', 'to', '_from']:
if pos_key in kwargs:
if hasattr(kwargs[pos_key], '__iter__'):
raise Exception("More than one values are given to specify the single-ch output.")
pos_inputs.append(kwargs[pos_key])
if len(pos_inputs) > 1:
raise Exception("Unable to interpret multiply specified positions.")
for burst_key in ['amp', 'phase', 'freq', 'env']:
if burst_key in kwargs:
if hasattr(kwargs[burst_key], '__iter__'):
raise Exception("More than one values are given to specify the single-ch output.")
burst_inputs.append(kwargs[burst_key])
else:
burst_inputs.append({'env':'rec', 'phase' :0}.get(burst_key, np.nan))
return kwargs['duration'], pos_inputs, burst_inputs
def send_wfms(self, **kwargs):
self.instr.send_wfms(ch_id = self.ch_id, **kwargs)
def load_seq(self, **kwargs):
self.instr.load_seq(ch_id = self.ch_id, **kwargs)
def t_arr_concat(self):
return np.arange(0, self.pulse_time, self.t_sample)+0.5*self.t_sample
def make_iterable(inputs, repeat_len = 1):
return inputs if hasattr(inputs, '__iter__') else [inputs]*repeat_len
def reshape(params):
sorted_params = sorted([(k, np.asarray(param)) for k, param in enumerate(params)],
key = lambda p: len(p[1].shape), reverse = True)
reshaped = [None]*len(params)
if len(params) > 2:
j = -1
for k, param in sorted_params:
reshaped[k] = param if j == -1 else reshape((reshaped[j], param))[1]
j = k
elif len(params) == 2:
k_large, param_large = sorted_params[0]
k_small, param_small = sorted_params[1]
reshaped[k_large] = param_large
dim_delta = len(param_large.shape) - len(param_small.shape)
if dim_delta:
extra = ((1,) if len(param_small.shape) > 0 else ())
reshaped[k_small] = np.tile(param_small, param_large.shape[:dim_delta] + extra)
else:
reshaped[k_small] = param_small
if not reshaped[0].shape == reshaped[1].shape:
print reshaped[k_large].shape, reshaped[k_small].shape
raise Exception('Too complicated to reshape properly')
return reshaped
def auto_division(num, minimum = 1000):
num = int(round(num,0))
unit, _num = 1, num
while _num%2 == 0 and unit < minimum:
unit, _num = unit*2, _num/2
if unit < minimum:
_num, _minimum = int(round(num/unit,0)), int(ceil(float(minimum)/float(unit)))
for n in range(_minimum, _num +1):
if _num%n == 0:
unit = n*unit
break
if unit < minimum:
unit = num
return unit, num/unit
class waveform(object):
def __init__(self, ch_list):
self.ch_list = [ch for elem in ch_list for ch in elem.ch_list]
for i, ch in enumerate(self.ch_list):
if ch in self.ch_list[i+1:]:
raise Exception("{Ch} is multiply used.".format(Ch = ch.name))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.section(new = False)
@property
def t_sample(self):
return [ch.t_sample for ch in self.ch_list]
@t_sample.setter
def t_sample(self, newval):
for ch, val in zip(self.ch_list, make_iterable(newval, repeat_len = len(self.ch_list))):
ch.t_sample = val
@property
def default_value(self):
return [ch.default_value for ch in self.ch_list]
@default_value.setter
def default_value(self, newval):
for ch, val in zip(self.ch_list, make_iterable(newval, repeat_len = len(self.ch_list))):
ch.default_value = val
@property
def default_frequency(self):
return [ch.default_frequency for ch in self.ch_list]
@default_frequency.setter
def default_frequency(self, newval):
for ch, val in zip(self.ch_list, make_iterable(newval, repeat_len = len(self.ch_list))):
ch.default_frequency = val
def dwell(self, duration, **kwargs):
for i, ch in enumerate(self.ch_list):
ch.dwell(duration = duration, **self._ch_kwargs(i, **kwargs))
def ramp(self, duration, **kwargs):
for i, ch in enumerate(self.ch_list):
ch.ramp(duration = duration, **self._ch_kwargs(i, **kwargs))
def excurse(self, duration, **kwargs):
for i, ch in enumerate(self.ch_list):
ch.excurse(duration = duration, **self._ch_kwargs(i, **kwargs))
def compensate(self, duration, **kwargs):
return [ch.compensate(duration = duration, **self._ch_kwargs(i, **kwargs)) for i, ch in enumerate(self.ch_list)]
def burst(self, duration, **kwargs):
for i, ch in enumerate(self.ch_list):
ch.burst(duration = duration, **self._ch_kwargs(i, **kwargs))
def sync(self):
latest = max([ch.time_global() for ch in self.ch_list])
for ch in self.ch_list:
ch.keep_up(time = latest)
def section(self, **kwargs):
self.sync()
div = [kwargs.get('division', True), kwargs.get('repeat',1) == 1] + [ch.dividable() for ch in self.ch_list]
kwargs['division'] = min(div) #True only if all conditions are met
for ch in self.ch_list:
ch.section(**kwargs)
def refresh(self):
for ch in self.ch_list:
ch.refresh()
def compose(self):
self.section(new = False)
for ch in self.ch_list:
ch.compose() #compose each ch first
length_list = [ch.time_global() for ch in self.ch_list]
if max(length_list) != min(length_list):
print length_list
raise Exception("Waveform lengths are different.")
def send_wfms(self, **kwargs):
for ch in self.ch_list:
kwargs['id_list'] = [c2.ch_id for c2 in self.ch_list if c2.instr == ch.instr]
ch.send_wfms(**kwargs)
def load_seq(self, **kwargs):
for ch in self.ch_list:
kwargs['id_list'] = [c2.ch_id for c2 in self.ch_list if c2.instr == ch.instr]
ch.load_seq(**kwargs)
def _ch_kwargs(self, ch_num, **kwargs):
ch_kw = kwargs
for key in kwargs:
if key == 'duration':
if kwargs['duration'] < 0.:
raise Exception("Duration cannot be negative.")
else:
kwargs[key] = make_iterable(kwargs[key], repeat_len = len(self.ch_list))
if len(kwargs[key]) != len(self.ch_list):
raise Exception("%s must contain %d points."%(key,len(self.ch_list)))
ch_kw[key] = kwargs[key][ch_num]
return ch_kw
def show(self, **kwargs):
fig, axarr = plt.subplots(len(self.ch_list), sharex=True, figsize = kwargs.get('figsize', None))
axarr = [axarr,] if len(self.ch_list) == 1 else axarr
mode = 'stack' if not kwargs.get('flatten', False) else 'flatten'
for i, ch in enumerate(self.ch_list):
wfm_list = ch.scaled_waveform_list if kwargs.get('scaled', True) else ch.waveform_list
ymax = max([max(wfm) for wfm in wfm_list])
ymin = min([min(wfm) for wfm in wfm_list])
ypos = ymax + 0.1* (ymax - ymin)
if mode == 'stack':
for tarr, wfm, seq_dict in zip(ch.t_array_list, wfm_list, ch.seq.data):
t = np.insert(tarr, [0, len(tarr)], [tarr[0]-0.5*ch.t_sample, tarr[-1]+0.5*ch.t_sample])
w = np.insert(wfm, [0, len(wfm)], [wfm[0], wfm[-1]])
axarr[i].step(t, w, where = 'mid')
axarr[i].axvline(x = t[-1], color = 'k', alpha = 1. if np.isinf(seq_dict['repeat']) else 0.5)
exp = 'x {repeat}'.format(**seq_dict)
axarr[i].text(x = (t[0] + t[-1])/2., y = ypos, s = exp, ha = 'center', va = 'top' )
if mode == 'flatten':
tarr_flatten, wfm_flatten = ch.flatten_waves(scaled = kwargs.get('scaled', False))
axarr[i].step(tarr_flatten, wfm_flatten)
time_global = 0.
for seq_dict in ch.seq.data:
for j in range(int(seq_dict['repeat_1'])):
duration = seq_dict['end'] - seq_dict['start']
axarr[i].axvline(x = time_global, color = 'k', alpha = 0.5)
time_global += duration
if not ymax == ymin:
axarr[i].set_ylim([ymin - 0.1* (ymax - ymin), ymax + 0.1* (ymax - ymin)])
axarr[i].set_ylabel(ch.name)
try:
fig.patch.set_alpha(1.0);fig.patch.set_facecolor('w');plt.tight_layout()
except:
pass
def format_MAGIC1000(self):
#Tektronix AWGs
main = major_channel(self.ch_list)
ch, mk1, mk2 = self.ch_list
defaults = ch.default_value, mk1.default_value, mk2.default_value
magic_file_list, len_list, name_list = [], [], []
for n, main_wfm in enumerate(main.waveform_list):
ch_wfm = ch.waveform_list[n] if len(ch.waveform_list) > n else np.zeros(0)
mk1_wfm = mk1.waveform_list[n] if len(mk1.waveform_list) > n else np.zeros(0)
mk2_wfm = mk2.waveform_list[n] if len(mk2.waveform_list) > n else np.zeros(0)
ch_wfm = np.append(ch_wfm, defaults[0]*np.ones(len(main_wfm)-len(ch_wfm)))
mk1_wfm = np.clip(np.append(mk1_wfm, defaults[1]*np.ones(len(main_wfm)-len(mk1_wfm))), 0., 1.)
mk2_wfm = np.clip(np.append(mk2_wfm, defaults[2]*np.ones(len(main_wfm)-len(mk2_wfm))), 0., 1.)
if min(ch_wfm) < -1. or 1. < max(ch_wfm):
raise Exception('Output out of range.')
trailer = ('CLOCK %13.10e\n' % (1e+9/main.t_sample)).replace("+","")
data = ''
for p in range(len(ch_wfm)):
w, m1, m2 = ch_wfm[p], mk1_wfm[p], mk2_wfm[p]
data += pack('<fB', w, int(round(m1+2*m2,0)))
magic_file_list.append('MAGIC 1000\n' + IEEE_block_format(data) + trailer)
len_list.append(len(main_wfm))
name_list.append(main.seq.data[n]['name'])
return magic_file_list, len_list, name_list
def format_TekWFM(self, format = 'real'):
#Tektronix AWGs 5000 and 7000 series
main = major_channel(self.ch_list)
ch, mk1, mk2 = self.ch_list
defaults = ch.default_value, mk1.default_value, mk2.default_value
file_list, len_list, name_list = [], [], []
for n, main_wfm in enumerate(main.waveform_list):
ch_wfm = ch.waveform_list[n] if len(ch.waveform_list) > n else np.zeros(0)
mk1_wfm = mk1.waveform_list[n] if len(mk1.waveform_list) > n else np.zeros(0)
mk2_wfm = mk2.waveform_list[n] if len(mk2.waveform_list) > n else np.zeros(0)
ch_wfm = np.append(ch_wfm, defaults[0]*np.ones(len(main_wfm)-len(ch_wfm)))
mk1_wfm = np.clip(np.append(mk1_wfm, defaults[1]*np.ones(len(main_wfm)-len(mk1_wfm))), 0., 1.)
mk2_wfm = np.clip(np.append(mk2_wfm, defaults[2]*np.ones(len(main_wfm)-len(mk2_wfm))), 0., 1.)
if min(ch_wfm) < -1. or 1. < max(ch_wfm):
raise Exception('Output out of range.')
data = ''
wvmk = np.clip((ch_wfm+1.)*(2**13)-1., 0, 2**14-1)+ (mk1_wfm+2*mk2_wfm)*(2**14)
for p in wvmk:
data += pack('<h', p)
file_list.append(IEEE_block_format(data))
len_list.append(len(main_wfm))
name_list.append(main.seq.data[n]['name'])
return file_list, len_list, name_list
class AWG_instr(container):
def __setattr__(self, name, value):
if isinstance(value, waveform_channel) and not value.name:
value.name = self.name + '.' + name if self.name else name
super(container, self).__setattr__(name, value)
def | |
<gh_stars>1-10
import copy
class Space:
def __init__(self, schema = {}, name = "myspace"):
"""
schema must contain the names of the dimensions in the space as keys
furthermore, we need type_checkers, and class instantiators for each dimension
it should suffice form the values to be dtype objects
"""
self.name = name
self.dimensions = []
self.metrics = []
# self.dimensions = list(schema.keys())
for key in schema.keys():
try:
dtype = schema[key]
dim = Dimension(key, dtype, lambda arg: dtype(arg) )
setattr(self, key, dim)
self.dimensions.append(key)
except:
print(Warning("One or more dimensions in schema underspecified"))
def point(self, args):
return Point(self,args)
def set_name(self, name):
self.name = name
def append_dimension(self, key, dtype, init=None):
if init==None:
init=dtype
if key in self.dimensions:
Warning(key+" dimension already in this space")
else:
self.dimensions.append(key)
dim = Dimension(key,dtype,init)
setattr(self, key, dim)
def append_metric(self, key, metric):
if type(metric)==Metric:
metric.set_space(self)
else:
metric = Metric(metric, self, description="autogenerated metric object")
if key in self.metrics:
Warning(key+" metric already in this space")
else:
self.metrics.append(key)
setattr(self, key, metric)
def cartersian(space1,space2):
dims1 = space1.dimensions
dims2 = space2.dimensions
#dims = dims1+dims2
#print(dims)
name1 = space1.name
name2 = space2.name
name = name1+str(" X ")+name2
metrics1 = space1.metrics
metrics2 = space2.metrics
#metrics = metrics1 + metrics2
#print(metrics)
space = Space(name=name)
for dim in dims1:
d = getattr(space1,dim)
space.append_dimension(dim,d.dtype,d.init)
for met in metrics1:
m = getattr(space1,met)
space.append_metric(met,m)
for dim in dims2:
d = getattr(space2,dim)
space.append_dimension(dim,d.dtype,d.init)
for met in metrics2:
m = getattr(space2,met)
space.append_metric(met,m)
return space
def spacewise_cartesian(spaces):
base = Space(name="cartesian produce of spaces "+str(spaces))
for space in spaces:
base = cartersian(base, space)
return base
def pointwise_cartesian(points):
#combine spaces of points then make a new point in the new space
spaces = [p.space for p in points]
space = spacewise_cartesian(spaces)
spaces = []
args = {}
for p in points:
spaces.append(p.space)
for d in p.state.dimensions:
args[d] = getattr(p,d)
space = spacewise_cartesian(spaces)
point = Point(space,args)
return point
def space_from_point(point):
return copy.deepcopy(point.space)
class Dimension:
def __init__(self, name, dtype, init):
self.name = name
self.dtype = dtype
self.init = init
class Point:
def __init__(self, space, args_dict):
self.space = space
for key in space.dimensions:
dim = getattr(space,key)
init = dim.init
args = args_dict[key]
if type(args)==dict:
value = init(**args)
elif type(args)==tuple:
#print(*args)
value = init(*args)
else:
value = init(args)
setattr(self, key, value)
def set_space(self, space):
self.space= space
def copy(self):
#first make a clean deep copy
point = copy.deepcopy(self)
#then make sure to set the space back to same parent space
point.set_space(self.space)
return point
class Trajectory:
def __init__(self, point, dynamics = None, params = None):
"""
A Trajectory is an ordered sequence of points in a space
input point must be of class Point
"""
self.space = point.space
self.points= [point]
self.params = params
if dynamics == None:
self.dynamics = Dynamics(self.space, Block(self.space, self.space, lambda point: point))
else:
self.dynamics = dynamics
self.length = 1
def set_params(self, params):
self.params=params
def append_point(self,point):
if point.space == self.space:
self.points.append(point)
self.length +=1
else:
Warning("input point not in the right space")
def append_points(self,points):
for point in points:
self.append_point(point)
def set_dynamics(self, dynamics):
self.dynamics = dynamics
def apply_dynamics(self, iterations=1):
step = self.dynamics.step
for _ in range(iterations):
p = self.points[-1].copy()
if self.params ==None:
point = step(p)
else:
point = step(p,self.params)
self.append_point(point)
class Dynamics:
"""
Dynamics is a map from a space to itself
initized as an identity map
"""
def __init__(self, space, block=None):
if block == None:
block = Block(space, space, lambda point: point)
block.set_description('This Block encodes dynamics for statespace '+str(space))
self.block = block
self.space = space
self.step = self.block.map
def set_step(self, func):
self.block.set_func(func)
self.step = self.block.map
class Metric:
def __init__(self, func, space=None ,description = "my metric"):
self.description = description
self.eval = func
self.space = space
def set_func(self, func):
self.eval = func
def set_space(self, space):
self.space = space
def set_description(self, description):
self.description = description
class Block:
"""
the point of these Blocks is to take an input
in the domain and map it to an output in the codomain
usage:
point_in_codomain = block.map(point_in_domain)
"""
def __init__(self,domain,codomain, func, paramspace = Space(), description=None):
self.paramspace = paramspace
self.params = paramspace.point({})
self.description = description
if type(domain)==Space:
self.domain = domain
else:
Warning("domain must be a Space")
if type(codomain)==Space:
self.codomain = codomain
else:
Warning("codomain must be a Space")
self.map = func
def set_params(self, point, override = False):
if override:
self.paramspace = point.space
self.params = point
else:
if self.paramspace == point.space:
self.params = point
else:
args = {}
for d in self.paramspace.dimensions:
args[d] = getattr(point, d)
self.params = self.paramspace.point(args)
def set_domain(self,space):
if type(space)==Space:
self.domain = space
else:
Warning("domain must be a Space")
def set_codomain(self,space):
if type(space)==Space:
self.codomain = space
else:
Warning("codomain must be a Space")
def set_func(self, func):
self.map = func
def set_description(self, description):
self.description= description
def compose(self, block):
"""
pt_in_codomain_of_self = self.map(block.map(pt_in_domain_of_block))
"""
func = lambda point: self.map(block.map(point))
description = "made by composition; collapsed space is called '"+str(self.domain.name)+"'"
return Block(block.domain, self.codomain, func, description=description )
def copy(self):
domain = self.domain
codomain = self.codomain
func = self.map
description = "copy of block: "+str(self)
return Block(domain,codomain,func, description=description)
def parallel(blocks):
# | ->[ ] -->|
# -->| ->[ ] -->| x | -->
# | ->[ ] -->|
N = len(blocks)
check = 1
for n in range(N-1):
check *= int(blocks[n].domain==blocks[n+1].domain)
if check:
domain = blocks[0].domain
codomain = spacewise_cartesian([b.codomain for b in blocks])
def func(point):
# assumes point in domain
points = []
for b in blocks:
output = b.map(point)
points.append(output)
return pointwise_cartesian(points)
block = Block(domain,codomain, func)
return block
else:
print(Warning("domains of parallel blocks do not match"))
def chain(blocks):
# runs left to right
# domain->[ ] -> [ ] -> [ ]->codomain
# domain = blocks[0].domain
# codomain = blocks[-1].codomain
# revese the order of the list since composition works in the opposite direction
N = len(blocks)
block = blocks[N-1]
for n in range(N-2,-1,-1):
new = blocks[n]
# getting the compositions to chain in reverse
# was a huge pain, edit with care
#print(n)
#print(new.codomain == block.domain)
#print("")
block = block.compose(new)
description = "chain compose of "+str(blocks)
block.set_description(description)
return block
#class Stage:
### work in progress below
# systems will be composed of multistage dynamics
# from here we can work out way back to
# simulations
# and eventually
# experiments
class System():
def __init__(self, statespace, paramspace):
"""
this is a generalized dynamical system
statespace is a space
paramspace is a space
stages is a list of dynamics
if you have a system you can more easily make
instances of dynamics by composing policies and mechanism
"""
self.statespace = statespace
self.paramspace = paramspace
self.stages = []
def set_statespace(self,space):
self.statespace = space
def set_paramspace(self,space):
self.paramspace = space
def append_stage(self,dynamics):
self.stages.append(dynamics)
def insert_stage(self, dynamics, index):
self.stages.insert(index, dynamics)
### plan to have Systems generate "trajectories of trajectories"
### where the inner lists loops through substeps or stages (each of which are dynamics)
### where the outer list contains the ordering of timesteps
### stages seems like a better term than substep
class Stage(Dynamics):
def __init__(self, system, policies=[], mechanisms=[], block=None):
self.policies = policies
self.mechanisms = mechanisms
self.inputSpace = spacewise_cartesian([m.domain for m in mechanisms])
super().__init__(system.statespace, block=block)
def update_inputSpace(self):
self.inputSpace = spacewise_cartesian([m.domain for m in self.mechanisms])
def append_policy(self, policy):
self.update_inputSpace()
policy.set_codomain(self.inputSpace)
policy.set_domain(self.system.statespace)
for obs in policy.observables:
if obs in self.system.statespace:
pass
else:
print(Warning('observable not in system statespace'))
self.polices.append(policy)
def append_mechanism(self, mechanism):
mechanism.set_codomain(self.system.statespace)
if mechanism.dimension in self.system.statesoace:
pass
else:
print(Warning('output dimension not in system statespace'))
self.mechanisms.append(mechanism)
self.update_inputSpace()
def update_step(self, updateDescription=False):
inputMap = parallel(self.policies)
# inputMap.codomain == self.inputSpace
stateUpdateMap = parallel(self.mechanisms)
stateUpdateMap.set_domain(inputMap.codomain)
if updateDescription:
inputs = [d for d in inputMap.codomain.dimensions]
states_updated = [d for d in stateUpdateMap.codomain.dimensions]
stateUpdateMap.set_description("inputs = "+str(inputs)+" and states updated = "+str(states_updated))
block = stateUpdateMap.compose(inputMap)
# combine policies
# combine mechanisms
# combines policies with mechanisms
# results in a statespace->statespace map
###
self.set_step(block)
class Mechanism(Block):
def __init__(self, domain, codomain, dimension, func, description=None):
super().__init__(domain, codomain, func, description=description)
self.dimension = dimension
class Policy(Block):
def __init__(self, domain, codomain, func, description=None, observables =[]):
super().__init__(domain, codomain, func, description=description)
self.observables = observables
def set_observables(self, observables):
| |
their rotten smoke?",
"Tis not enough that through the cloud thou break,",
"To dry the rain on my storm-beaten face,",
"For no man well of such a salve can speak,",
"That heals the wound, and cures not the disgrace:",
"Nor can thy shame give physic to my grief;",
"Though thou repent, yet I have still the loss:",
"The offender's sorrow lends but weak relief",
"To him that bears the strong offence's cross.",
"Ah! but those tears are pearl which thy love sheds,",
"And they are rich and ransom all ill deeds."),
("Sonnet 35",
"No more be grieved atthat which thou hast done:",
"Roses have thorns, and silver fountains mud:",
"Clouds and eclipses stain both moon and sun,",
"And loathsome canker lives in sweetest bud.",
"All men make faults, and even I in this,",
"Authorizing thy trespass with compare,",
"Myself corrupting, salving thy amiss,",
"Excusing thy sins more than thy sins are;",
"For to thy sensual fault I bring in sense,",
"Thy adverse party is thy advocate,",
"And 'gainst myself a lawful plea commence:",
"Such civil war is in my love and hate,",
"That I an accessary needs must be,",
"To that sweet thief which sourly robs from me."),
("Sonnet 36",
"Let me confess that we two must be twain,",
"Although our undivided loves are one:",
"So shall those blots that do with me remain,",
"Without thy help, by me be borne alone.",
"In our two loves there is but one respect,",
"Though in our lives a separable spite,",
"Which though it alter not love's sole effect,",
"Yet doth it steal sweet hours from love's delight.",
"I may not evermore acknowledge thee,",
"Lest my bewailed guilt should do thee shame,",
"Nor thou with public kindness honour me,",
"Unless thou take that honour from thy name:",
"But do not so, I love thee in such sort,",
"As thou being mine, mine is thy good report."),
("Sonnet 37",
"As a decrepit father takes delight",
"To see his active child do deeds of youth,",
"So I, made lame by Fortune's dearest spite,",
"Take all my comfort of thy worth and truth;",
"For whether beauty, birth, or wealth, or wit,",
"Or any of these all, or all, or more,",
"Entitled in thy parts, do crowned sit,",
"I make my love engrafted to this store:",
"So then I am not lame, poor, nor despised,",
"Whilst that this shadow doth such substance give",
"That I in thy abundance am sufficed,",
"And by a part of all thy glory live.",
"Look what is best, that best I wish in thee:",
"This wish I have; then ten times happy me!"),
("Sonnet 38",
"How can my muse want subject to invent,",
"While thou dost breathe, that pour'st into my verse",
"Thine own sweet argument, too excellent",
"For every vulgar paper to rehearse?",
"O! give thy self the thanks, if aught in me",
"Worthy perusal stand against thy sight;",
"For who's so dumb that cannot write to thee,",
"When thou thy self dost give invention light?",
"Be thou the tenth Muse, ten times more in worth",
"Than those old nine which rhymers invocate;",
"And he that calls on thee, let him bring forth",
"Eternal numbers to outlive long date.",
"If my slight muse do please these curious days,",
"The pain be mine, but thine shall be the praise."),
("Sonnet 39",
"O! how thy worth with manners may I sing,",
"When thou art all the better part of me?",
"What can mine own praise to mine own self bring?",
"And what is't but mine own when I praise thee?",
"Even for this, let us divided live,",
"And our dear love lose name of single one,",
"That by this separation I may give",
"That due to thee which thou deserv'st alone.",
"O absence! what a torment wouldst thou prove,",
"Were it not thy sour leisure gave sweet leave,",
"To entertain the time with thoughts of love,",
"Which time and thoughts so sweetly doth deceive,",
"And that thou teachest how to make one twain,",
"By praising him here who doth hence remain."),
("Sonnet 40",
"Take all my loves, my love, yea take them all;",
"What hast thou then more than thou hadst before?",
"No love, my love, that thou mayst true love call;",
"All mine was thine, before thou hadst this more.",
"Then, if for my love, thou my love receivest,",
"I cannot blame thee, for my love thou usest;",
"But yet be blam'd, if thou thy self deceivest",
"By wilful taste of what thyself refusest.",
"I do forgive thy robbery, gentle thief,",
"Although thou steal thee all my poverty:",
"And yet, love knows it is a greater grief",
"To bear love's wrong, than hate's known injury.",
"Lascivious grace, in whom all ill well shows,",
"Kill me with spites yet we must not be foes."),
("Sonnet 41",
"Those pretty wrongs that liberty commits,",
"When I am sometime absent from thy heart,",
"Thy beauty, and thy years full well befits,",
"For still temptation follows where thou art.",
"Gentle thou art, and therefore to be won,",
"Beauteous thou art, therefore to be assailed;",
"And when a woman woos, what woman's son",
"Will sourly leave her till he have prevailed?",
"Ay me! but yet thou mightst my seat forbear,",
"And chide thy beauty and thy straying youth,",
"Who lead thee in their riot even there",
"Where thou art forced to break a twofold truth:",
" Hers by thy beauty tempting her to thee,",
" Thine by thy beauty being false to me."),
("Sonnet 42",
"That thou hast her it is not all my grief,",
"And yet it may be said I loved her dearly;",
"That she hath thee is of my wailing chief,",
"A loss in love that touches me more nearly.",
"Loving offenders thus I will excuse ye:",
"Thou dost love her, because thou know'st I love her;",
"And for my sake even so doth she abuse me,",
"Suffering my friend for my sake to approve her.",
"If I lose thee, my loss is my love's gain,",
"And losing her, my friend hath found that loss;",
"Both find each other, and I lose both twain,",
"And both for my sake lay on me this cross:",
"But here's the joy; my friend and I are one;",
"Sweet flattery! then she loves but me alone."),
("Sonnet 43",
"When most I wink, then do mine eyes best see,",
"For all the day they view things unrespected;",
"But when I sleep, in dreams they look on thee,",
"And darkly bright, are bright in dark directed.",
"Then thou, whose shadow shadows doth make bright,",
"How would thy shadow's form form happy show",
"To the clear day with thy much clearer light,",
"When to unseeing eyes thy shade shines so!",
"How would, I say, mine eyes be blessed made",
"By looking on thee in the living day,",
"When in dead night thy fair imperfect shade",
"Through heavy sleep on sightless eyes doth stay!",
"All days are nights to see till I see thee,",
"And nights bright days when dreams do show thee me."),
("Sonnet 44",
"If the dull substance of my flesh were thought,",
"Injurious distance should not stop my way;",
"For then despite of space I would be brought,",
"From limits far remote, where thou dost stay.",
"No matter then although my foot did stand",
"Upon the farthest earth removed from thee;",
"For nimble thought can jump both sea and land",
"As soon as think the place where he would be.",
"But ah! thought kills me that I am not thought,",
"To leap large lengths of miles when thou art gone,",
"But that, so much of earth and water wrought,",
"I must attend time's leisure with my moan,",
"Receiving nought by elements so slow",
"But heavy tears, badges of either's woe."),
| |
DEBUG = False
import sys
import re
import threading
import os
import time
if True:
from tkinter import *
import tkinter.filedialog as tkFileDialog
import tkinter.messagebox as tkMessageBox
import tkinter.font as tkFont
_next_method_name = '__next__'
import Pmw
from pymol.wizard import cleanup
from pmg_tk.Setting import Setting
from pmg_tk.SetEditor import SetEditor
from pmg_tk.ColorEditor import ColorEditor
from pmg_tk.skins import PMGSkin
from .builder import Builder
import pymol._gui
import traceback
root = None
def encode(s):
# obsolete since removal of py2
return s
def split_tk_file_list(pattern):
filenames = []
while True:
pattern = pattern.strip()
if not pattern:
break
sep = None
if pattern[0] == '{':
pattern = pattern[1:]
sep = '}'
a = pattern.split(sep, 1)
filenames.append(a[0])
pattern = a[1] if len(a) == 2 else ''
return filenames
def asksaveasfilename(*args, **kwargs):
filename = tkFileDialog.asksaveasfilename(*args, **kwargs)
return encode(filename)
def askopenfilename(*args, **kwargs):
filename = tkFileDialog.askopenfilename(*args, **kwargs)
if not filename:
return filename
multiple = kwargs.get('multiple', 0)
if not multiple:
filename = [filename]
elif not isinstance(filename, (list, tuple)):
filename = split_tk_file_list(filename)
filename = map(os.path.normpath, filename)
filename = map(encode, filename)
filename = list(filename)
if not multiple:
return filename[0]
return filename
def _darwin_browser_open(url):
os.popen("open "+url,'r').read()
def darwin_browser_open(url):
t = threading.Thread(target=_darwin_browser_open,args=(url,))
t.setDaemon(1)
t.start()
def _doAsync(self_cmd,cmmd,dirty=0):
self_cmd.do(cmmd) # force strict ordering of commands
if dirty:
self_cmd.dirty()
def _def_ext(ext): # platform-specific default extension handling
if sys.platform != 'win32':
ext = None # default extensions don't work right under X11/Tcl/Tk
return ext
class Normal(PMGSkin, pymol._gui.PyMOLDesktopGUI):
pad = ' ' # extra space in menus
appname = 'The PyMOL Molecular Graphics System'
appversion = '0.0.0.0' # will be set in __init__
copyright = ('Copyright (C) 2003-%d\n' % (time.localtime().tm_year,) +
'Schrodinger LLC.\n'+
'All rights reserved.')
contactweb = 'http://www.pymol.org'
contactemail = '<EMAIL>'
# responsible for setup and takedown of the normal skin
def _inc_fontsize(self, delta, font):
size = font.cget('size')
sign = -1 if size < 0 else 1
size = max(5, abs(size) + delta)
font.configure(size=size * sign)
def inc_fontsize(self, delta=1):
for name in tkFont.names():
self._inc_fontsize(delta, tkFont.nametofont(name))
def inc_fontsize_dialog(self):
dialog = Toplevel(self.root)
grid = dialog
kw = {'row': 0, 'sticky': 'w', 'padx': 5, 'pady': 5}
col = getattr(iter(range(5)), _next_method_name)
Button(grid, text=' - ', command=lambda: self.inc_fontsize(-1)).grid(column=col(), **kw)
Button(grid, text=' + ', command=lambda: self.inc_fontsize( 1)).grid(column=col(), **kw)
Label(grid, text='All GUI Font Sizes').grid(column=col(), **kw)
kw['row'] = 1
col = getattr(iter(range(5)), _next_method_name)
Button(grid, text=' - ', command=lambda: self._inc_fontsize(-1, self.fixedfont)).grid(column=col(), **kw)
Button(grid, text=' + ', command=lambda: self._inc_fontsize( 1, self.fixedfont)).grid(column=col(), **kw)
Label(grid, text='Output Font Size').grid(column=col(), **kw)
dialog.title('GUI Font Size')
@property
def initialdir(self):
'''
Be in sync with cd/pwd on the console until the first file has been
browsed, then remember the last directory.
'''
return self._initialdir or os.getcwd()
@initialdir.setter
def initialdir(self, value):
self._initialdir = value
def cd_dialog(self):
self.cmd.cd(encode(tkFileDialog.askdirectory(
title="Change Working Directory",
initialdir=self.initialdir)) or '.', quiet=0)
def createDataArea(self):
# Create data area where data entry widgets are placed.
self.dataArea = self.app.createcomponent('dataarea',
(), None,
Frame, (self.app._hull,),
relief=SUNKEN,
bd=1)
self.dataArea.pack(side=LEFT, fill=BOTH, expand=YES,
padx=1, pady=1)
def destroyDataArea(self):
self.app.destroycomponent('dataarea')
def createCommandArea(self):
# Create a command area for application-wide buttons.
self.commandFrame = self.app.createcomponent('commandframe', (), None,
Frame,(self.app._hull,),relief=SUNKEN,bd=1)
self.commandFrame.place(width=500)
self.commandFrame.pack(side=TOP,
expand=NO,
fill=BOTH,
padx=1,
pady=1)
def destroyCommandArea(self):
self.app.destroycomponent('commandframe')
def createMessageBar(self):
self.messageBar = Pmw.MessageBar(self.commandFrame, entry_width = 25,
entry_relief='sunken', entry_borderwidth=1) #, labelpos = 'w')
self.abortButton=Button(self.commandFrame,
text='Abort',highlightthickness=0,
command=lambda s=self:self.abort(),padx=0,pady=0)
self.abortButton.pack(side=RIGHT,fill=BOTH,expand=YES)
self.abortButton=Button(self.commandFrame,
text='Rebuild',highlightthickness=0,
# state=DISABLED,
command=lambda s=self:self.rebuild(),padx=0,pady=0)
self.abortButton.pack(side=RIGHT,fill=BOTH,expand=YES)
self.messageBar.pack(side=BOTTOM, anchor=W, fill=X, expand=1)
self.balloon.configure(statuscommand = self.messageBar.helpmessage)
def destroyMessageBar(self):
self.messageBar.destroy()
def get_current_session_file(self):
session_file = self.cmd.get_setting_text("session_file")
session_file = session_file.replace("\\","/") # always use unix-like path separators
return session_file
def set_current_session_file(self, session_file):
session_file = session_file.replace("\\","/") # always use unix-like path separators
self.cmd.set("session_file",session_file)
def confirm_quit(self,e=None):
if self.cmd.get_setting_boolean("session_changed"):
session_file = self.get_current_session_file()
if session_file != '':
message = "Save the current session '%s'?"%os.path.split(session_file)[1]
else:
message = "Save the current session?"
check = tkMessageBox._show("Save Session", message,
tkMessageBox.QUESTION, tkMessageBox.YESNOCANCEL)
if check==tkMessageBox.YES:
if self.session_save():
self.quit_app()
elif check==tkMessageBox.NO:
self.quit_app()
else:
self.quit_app()
def quit_app(self):
self.cmd.log_close()
self.cmd.quit() # avoid logging this - it is inconvenient...
def buttonAdd(self,frame,text,cmmd):
newBtn=Button(frame,
text=text,highlightthickness=0,
command=cmmd,padx=0,pady=0)
newBtn.pack(side=LEFT,fill=BOTH,expand=YES)
return newBtn
def get_view(self):
self.cmd.get_view(2, quiet=0)
try:
str = self.cmd.get_view(3,quiet=1)
self.root.clipboard_clear()
self.root.clipboard_append(str)
self.last_view = str
self.app.selection_clear()
self.app.selection_own()
self.app.selection_handle(lambda a,b,s=self:s.last_view)
print(" PyMOL: Viewing matrix copied to clipboard.")
except:
traceback.print_exc()
def createButtons(self):
self.buttonArea = Frame(self.root)
self.buttonArea.pack(side=TOP, anchor=W)
row1 = self.app.createcomponent('row1', (), None,
Frame,self.commandFrame,bd=0)
row1.pack(side=TOP,fill=BOTH,expand=YES)
btn_reset = self.buttonAdd(row1,'Reset',lambda s=self: s.cmd.do("_ reset"))
btn_reset = self.buttonAdd(row1,'Zoom',lambda s=self: s.cmd.do("_ zoom animate=-1"))
btn_orient = self.buttonAdd(row1,'Orient',lambda s=self: s.cmd.do("_ orient animate=1"))
btn_rtrace = self.buttonAdd(row1,'Draw',lambda s=self: s.cmd.do("_ draw"))
btn_rtrace = self.buttonAdd(row1,'Ray',lambda s=self: s.cmd.do("_ ray async=1"))
row2 = self.app.createcomponent('row2', (), None,
Frame,self.commandFrame,bd=0)
row2.pack(side=TOP,fill=BOTH,expand=YES)
btn_unpick = self.buttonAdd(row2,'Unpick',lambda s=self: s.cmd.do("_ unpick"))
btn_hidesele = self.buttonAdd(row2,'Deselect', lambda: self.cmd.do("_ deselect"))
btn_reset = self.buttonAdd(row2,'Rock',lambda s=self: s.cmd.do("_ rock"))
btn_getview = self.buttonAdd(row2,'Get View',lambda s=self: s.get_view()) # doesn't get logged
row3 = self.app.createcomponent('row3', (), None,
Frame,self.commandFrame,bd=0)
row3.pack(side=TOP,fill=BOTH,expand=YES)
btn_rewind = self.buttonAdd(row3,'|<',lambda s=self: s.cmd.do("_ rewind"))
btn_back = self.buttonAdd(row3,'<',lambda s=self: s.cmd.do("_ backward"))
btn_stop = self.buttonAdd(row3,'Stop',lambda s=self: s.cmd.do("_ mstop"))
btn_play = self.buttonAdd(row3,'Play',lambda s=self: s.cmd.do("_ mplay"))
btn_forward = self.buttonAdd(row3,'>',lambda s=self: s.cmd.do("_ forward"))
btn_last = self.buttonAdd(row3,'>|',lambda s=self: s.cmd.do("_ ending"))
btn_ccache = self.buttonAdd(row3,'MClear',lambda s=self: s.cmd.do("_ mclear"))
row4 = self.app.createcomponent('row4', (), None,
Frame,self.commandFrame,bd=0)
row4.pack(side=TOP,fill=BOTH,expand=YES)
self.cmdB = self.buttonAdd(row4,'Command',
lambda s=self:
s.toggleFrame(s.cmdFrame))
self.buildB = self.buttonAdd(row4,'Builder',
lambda s=self:
s.toggleFrame(s.buildFrame))
self.volB = self.buttonAdd(row4, 'Volume',
self.newVolumeFrame)
# initialize disabled
# self.volB.config(state=DISABLED)
def newVolumeFrame(self):
volumes = self.cmd.get_names_of_type("object:volume", public=1)
if not volumes:
return
if len(volumes) == 1:
self.cmd.volume_panel(volumes[0])
return
def callback():
sels = listbox.getcurselection()
if sels:
self.cmd.volume_panel(sels[0])
window.destroy()
title = 'Select a volume object'
window = Toplevel(self.app.root)
window.title(title)
listbox = Pmw.ScrolledListBox(window,
labelpos='nw',
label_text=title,
items=volumes,
selectioncommand=callback)
listbox.pack(padx=5, pady=5)
x, y = window.winfo_pointerxy()
window.geometry('+%d+%d' % (x - 20, y - 20))
def destroyButtonArea(self):
self.app.destroycomponent('row1')
self.app.destroycomponent('row2')
self.app.destroycomponent('row3')
self.app.destroycomponent('row4')
self.buttonArea.destroy()
def my_show(self,win,center=1):
win.show()
def my_withdraw(self,win):
if sys.platform!='linux2':
win.withdraw()
else:
win.destroy()
def my_activate(self,win,center=1,focus=None):
if sys.platform!='linux2':
win.activate()
else: # autocenter, deiconify, and run mainloop
# this is a workaround for a bug in the
# interaction between Tcl/Tk and common Linux
# window managers (namely KDE/Gnome) which causes
# an annoying 1-2 second delay in opening windows!
if center:
tw = win.winfo_reqwidth()+100
th = win.winfo_reqheight()+100
vw = win.winfo_vrootwidth()
vh = win.winfo_vrootheight()
x = max(0,(vw-tw)/2)
y = max(0,(vh-tw)/2)
win.geometry(newGeometry="+%d+%d"%(x,y))
win.deiconify()
if focus!=None:
focus.focus_set()
win.mainloop()
def my_deactivate(self,win):
if sys.platform!='linux2':
win.deactivate()
else: # autocenter, deiconify, and run mainloop
win.destroy()
def doAsync(self,cmmd):
t = threading.Thread(target=_doAsync,args=(self.cmd,cmmd))
t.setDaemon(1)
t.start()
def command_get(self):
return self.command.get()
def command_set(self, v):
return self.command.set(v)
def command_set_cursor(self, i):
self.entry.icursor(i)
def dump(self,event):
print(dir(event))
print(event.keysym, event.keycode)
def createConsole(self):
self.command = StringVar()
self.lineCount = 0
self._setup_history()
self.cmdFrame = Frame(self.dataArea)
self.buildFrame = Builder(self.app, self.dataArea)
self.toggleFrame(self.cmdFrame,startup=1)
self.entryFrame = Frame(self.cmdFrame)
self.entryFrame.pack(side=BOTTOM,expand=NO,fill=X)
self.entry_label = Label(self.entryFrame, text="PyMOL>", padx=1, pady=1, justify=RIGHT)
self.entry_label.pack(side=LEFT,expand=NO,fill=X)
self.entry = Entry(self.entryFrame, justify=LEFT, width=70,
textvariable=self.command)
self.entry.pack(side=LEFT,expand=YES,fill=X)
self.output = Pmw.ScrolledText(self.cmdFrame)
self.output.pack(side=TOP, fill=BOTH, expand=YES)
self.entry.bind('<Return>', lambda e, s=self:
(s.doTypedCommand(s.command.get()), s.command.set('')))
self.entry.bind('<Tab>', lambda e, s=self: s.complete(e))
self.entry.bind('<Up>', lambda e, s=self: s.back())
self.entry.bind('<Down>', lambda e, s=self: s.forward())
self.entry.bind('<Control-Up>', lambda e: self.back_search())
self.root.protocol("WM_DELETE_WINDOW", lambda s=self: s.confirm_quit())
self.log_file = "log.pml"
# self.entry = self.app.createcomponent('entry', (), None,
# Entry,
# (self.dataArea,),
# justify=LEFT,
# width=50,
### textvariable=self.command)
text = self.output.component('text')
self.text = text
if sys.platform.startswith('win'):
self.font = 'lucida console' # only available on windows
self.my_fw_font=(self.font,8)
self.fixedfont.configure(family=self.font, size=self.my_fw_font[1])
else:
text.tk.call('tk','scaling',1)
self.font = 'fixed' # should be available on any X11-based platform
self.my_fw_font=(self.font,10)
if sys.platform == 'darwin':
self.fixedfont.configure(size=11)
text.configure(width=74)
self.balloon.bind(self.entry, '''Command Input Area
Get the list of commands by hitting <TAB>
Get the list of arguments for one command with a question mark:
PyMOL> color ?
Read the online help for a command with "help":
PyMOL> help color
Get autocompletion for many arguments by hitting <TAB>
PyMOL> color ye<TAB> (will autocomplete "yellow")
''')
if self.app.allow_after:
self.output.after(100,self.update_feedback)
self.output.after(100,self.update_menus)
self.output.pack(side=BOTTOM,expand=YES,fill=BOTH)
self.app.bind(self.entry, 'Command Input Area')
self.app.bind_all('<F1>',lambda e,s=self: s.cmd.do("cmd._special(1,0,0)"))
self.app.bind_all('<F2>',lambda e,s=self: s.cmd.do("cmd._special(2,0,0)"))
self.app.bind_all('<F3>',lambda e,s=self: s.cmd.do("cmd._special(3,0,0)"))
self.app.bind_all('<F4>',lambda e,s=self: s.cmd.do("cmd._special(4,0,0)"))
self.app.bind_all('<F5>',lambda e,s=self: s.cmd.do("cmd._special(5,0,0)"))
self.app.bind_all('<F6>',lambda e,s=self: s.cmd.do("cmd._special(6,0,0)"))
self.app.bind_all('<F7>',lambda e,s=self: s.cmd.do("cmd._special(7,0,0)"))
self.app.bind_all('<F8>',lambda e,s=self: s.cmd.do("cmd._special(8,0,0)"))
self.app.bind_all('<F9>',lambda e,s=self: s.cmd.do("cmd._special(9,0,0)"))
self.app.bind_all('<F10>',lambda e,s=self: s.cmd.do("cmd._special(10,0,0)"))
self.app.bind_all('<F11>',lambda e,s=self: s.cmd.do("cmd._special(11,0,0)"))
self.app.bind_all('<F12>',lambda e,s=self: s.cmd.do("cmd._special(12,0,0)"))
self.app.bind_all('<Control-F1>',lambda e,s=self: s.cmd.do("cmd._special(1,0,0,2)"))
self.app.bind_all('<Control-F2>',lambda e,s=self: s.cmd.do("cmd._special(2,0,0,2)"))
self.app.bind_all('<Control-F3>',lambda e,s=self: s.cmd.do("cmd._special(3,0,0,2)"))
self.app.bind_all('<Control-F4>',lambda e,s=self: s.cmd.do("cmd._special(4,0,0,2)"))
self.app.bind_all('<Control-F5>',lambda e,s=self: s.cmd.do("cmd._special(5,0,0,2)"))
self.app.bind_all('<Control-F6>',lambda e,s=self: s.cmd.do("cmd._special(6,0,0,2)"))
self.app.bind_all('<Control-F7>',lambda e,s=self: s.cmd.do("cmd._special(7,0,0,2)"))
self.app.bind_all('<Control-F8>',lambda e,s=self: s.cmd.do("cmd._special(8,0,0,2)"))
self.app.bind_all('<Control-F9>',lambda e,s=self: s.cmd.do("cmd._special(9,0,0,2)"))
self.app.bind_all('<Control-F10>',lambda e,s=self: s.cmd.do("cmd._special(10,0,0,2)"))
self.app.bind_all('<Control-F11>',lambda e,s=self: s.cmd.do("cmd._special(11,0,0,2)"))
self.app.bind_all('<Control-F12>',lambda e,s=self: s.cmd.do("cmd._special(12,0,0,2)"))
self.entry.bind('<Prior>',lambda e,s=self: s.cmd.do("cmd._special(104,0,0)"))
self.entry.bind('<Next>',lambda e,s=self: s.cmd.do("cmd._special(105,0,0)"))
self.entry.bind('<Control-Prior>',lambda e,s=self: s.cmd.do("cmd._special(104,0,0,2)"))
self.entry.bind('<Control-Next>',lambda e,s=self: s.cmd.do("cmd._special(105,0,0,2)"))
self.entry.bind('<Home>',lambda e,s=self: s.cmd.do("cmd._special(106,0,0)"))
self.entry.bind('<End>',lambda e,s=self: s.cmd.do("cmd._special(107,0,0)"))
def update_feedback(self):
feedback = self.cmd._get_feedback(self.cmd)
if feedback!=None:
self.text.configure(state='normal')
for a in feedback:
self.output.insert(END,"\n")
self.output.insert(END,a)
self.output.see(END)
self.lineCount = | |
about three or more feauture comparision ? For this purpose we can use pair grid plot. Also it seems very cool :)
# And we discover one more thing **radius_worst**, **perimeter_worst** and **area_worst** are correlated as it can be seen pair grid plot. We definetely use these discoveries for feature selection.
# %% _cell_guid="3bda33fe-daf9-4f74-acbc-d9d3c8fc83d9" _execution_state="idle" _uuid="381ecb55ced22383c96320ced2299f5da37ce4b6"
sns.set(style="white")
df = x.loc[:, ['radius_worst', 'perimeter_worst', 'area_worst']]
g = sns.PairGrid(df, diag_sharey=False)
g.map_lower(sns.kdeplot, cmap="Blues_d")
g.map_upper(plt.scatter)
g.map_diag(sns.kdeplot, lw=3)
# %% [markdown] _cell_guid="c9ef0921-19bc-4d40-aa72-0bc2333bd4b4" _execution_state="idle" _uuid="f8355f83ac16e414e3b88e67b77d33ef31c3574d"
# Up to this point, we make some comments and discoveries on data already. If you like what we did, I am sure swarm plot will open the pub's door :)
# %% [markdown] _cell_guid="03abd05a-d67a-4bfc-b951-6394de8c6fc9" _execution_state="idle" _uuid="c3807ef7f6e17b33ae383349bdee7ebfced2a847"
# In swarm plot, I will do three part like violin plot not to make plot very complex appearance
# %% _cell_guid="ef378d49-8aed-4b9e-96e8-e7d2458fdd89" _execution_state="idle" _uuid="85a2413b70c1b3d69f26a2c122c22d55f930e774"
sns.set(style="whitegrid", palette="muted")
data_dia = y
data = x
data_n_2 = (data - data.mean()) / (data.std()) # standardization
data = pd.concat([y, data_n_2.iloc[:, 0:10]], axis=1)
data = pd.melt(data,
id_vars="diagnosis",
var_name="features",
value_name='value')
plt.figure(figsize=(10, 10))
tic = time.time()
sns.swarmplot(x="features", y="value", hue="diagnosis", data=data)
plt.xticks(rotation=90)
# %% _cell_guid="428c75b6-b5d0-47e3-a568-17b5d1896c0c" _execution_state="idle" _uuid="75dfd5e9e50adceb1d42dd000ce779e79b069cce"
data = pd.concat([y, data_n_2.iloc[:, 10:20]], axis=1)
data = pd.melt(data,
id_vars="diagnosis",
var_name="features",
value_name='value')
plt.figure(figsize=(10, 10))
sns.swarmplot(x="features", y="value", hue="diagnosis", data=data)
plt.xticks(rotation=90)
# %% _cell_guid="ee64bbc8-0431-482a-b08f-cdca43e41390" _execution_state="idle" _uuid="209e9e9120d6e889696d2d1190e663b5c1885a82"
data = pd.concat([y, data_n_2.iloc[:, 20:31]], axis=1)
data = pd.melt(data,
id_vars="diagnosis",
var_name="features",
value_name='value')
plt.figure(figsize=(10, 10))
sns.swarmplot(x="features", y="value", hue="diagnosis", data=data)
toc = time.time()
plt.xticks(rotation=90)
print("swarm plot time: ", toc - tic, " s")
# %% [markdown] _cell_guid="5c9efa5e-4938-477e-ab9a-4b084cc0b870" _execution_state="idle" _uuid="d23e2f9b040a92feb0d7ceb8e01e74c758f5dbc3"
# They looks cool right. And you can see variance more clear. Let me ask you a question, **in these three plots which feature looks like more clear in terms of classification.** In my opinion **area_worst** in last swarm plot looks like malignant and benign are seprated not totaly but mostly. Hovewer, **smoothness_se** in swarm plot 2 looks like malignant and benign are mixed so it is hard to classfy while using this feature.
# %% [markdown] _cell_guid="c4c68f34-e876-4e5a-a4a7-09c07381425a" _execution_state="idle" _uuid="b46f98eb7ca8d36dc7bf1516895599524bab694d"
# **What if we want to observe all correlation between features?** Yes, you are right. The answer is heatmap that is old but powerful plot method.
# %% _cell_guid="9e1e7d8a-bbf2-4aab-90e7-78d4c4ccf416" _execution_state="idle" _uuid="0eeb70ddffc8ac332ee076f2f6b2833a6ffddd2d"
#correlation map
f, ax = plt.subplots(figsize=(18, 18))
sns.heatmap(x.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax)
# %% [markdown] _cell_guid="8ee2e02b-9fc6-42f7-8a83-f6dd77df6c11" _execution_state="idle" _uuid="b4e3afecb204a8262330d22e4539554b2af7975a"
# Well, finaly we are in the pub and lets choose our drinks at feature selection part while using heatmap(correlation matrix).
# %% [markdown] _cell_guid="6786734a-40a9-46b6-a13a-97ee9c569636" _execution_state="idle" _uuid="84b145dd0c13a3a0d4dd4f9b9fd1bd782e11fcf8"
# <a id='5'></a>
# ## Feature Selection and Random Forest Classification
# Today our purpuse is to try new cocktails. For example, we are finaly in the pub and we want to drink different tastes. Therefore, we need to compare ingredients of drinks. If one of them includes lemon, after drinking it we need to eliminate other drinks which includes lemon so as to experience very different tastes.
# %% [markdown] _cell_guid="c7b2df4e-270e-4c94-8789-177f5e90ac46" _execution_state="idle" _uuid="a042df90ef7138d6f101463e93936119176bdc0d"
# In this part we will select feature with different methods that are feature selection with correlation, univariate feature selection, recursive feature elimination (RFE), recursive feature elimination with cross validation (RFECV) and tree based feature selection. We will use random forest classification in order to train our model and predict.
# %% [markdown] _cell_guid="94d217e3-b2b3-4016-b72e-f8d521d17af7" _execution_state="idle" _uuid="39003c7b75f265bf0826f407433e65923c4dd017"
# <a id='6'></a>
# ### 1) Feature selection with correlation and random forest classification
# %% [markdown] _cell_guid="785bd27a-30d9-4e08-a864-cde7e5630aad" _execution_state="idle" _uuid="1e6ef08c98cb4bf0dedf275e4c08fae743bb3801"
# As it can be seen in map heat figure **radius_mean, perimeter_mean and area_mean** are correlated with each other so we will use only **area_mean**. If you ask how i choose **area_mean** as a feature to use, well actually there is no correct answer, I just look at swarm plots and **area_mean** looks like clear for me but we cannot make exact separation among other correlated features without trying. So lets find other correlated features and look accuracy with random forest classifier.
# %% [markdown] _cell_guid="eea2971b-b703-4e1b-b048-128501506f33" _execution_state="idle" _uuid="acde9c0b406d72122473f8292d641a9fcb8a8682"
# **Compactness_mean, concavity_mean and concave points_mean** are correlated with each other.Therefore I only choose **concavity_mean**. Apart from these, **radius_se, perimeter_se and area_se** are correlated and I only use **area_se**. **radius_worst, perimeter_worst and area_worst** are correlated so I use **area_worst**. **Compactness_worst, concavity_worst and concave points_worst** so I use **concavity_worst**. **Compactness_se, concavity_se and concave points_se** so I use **concavity_se**. **texture_mean and texture_worst are correlated** and I use **texture_mean**. **area_worst and area_mean** are correlated, I use **area_mean**.
#
#
#
# %% _cell_guid="ef8d06df-bfcc-4e9a-a3ba-5016ec0c5bd5" _execution_state="idle" _uuid="117f3e858e806f3f26a68dadf3fc89d471010156"
drop_list1 = [
'perimeter_mean', 'radius_mean', 'compactness_mean', 'concave points_mean',
'radius_se', 'perimeter_se', 'radius_worst', 'perimeter_worst',
'compactness_worst', 'concave points_worst', 'compactness_se',
'concave points_se', 'texture_worst', 'area_worst'
]
x_1 = x.drop(drop_list1, axis=1) # do not modify x, we will use it later
x_1.head()
# %% [markdown] _cell_guid="6de99062-7a5a-4b70-879c-54d6c8a4a7e2" _execution_state="idle" _uuid="1ab3852ed7fbeba8718e6722e8a40521033bdf29"
# After drop correlated features, as it can be seen in below correlation matrix, there are no more correlated features. Actually, I know and you see there is correlation value 0.9 but lets see together what happen if we do not drop it.
# %% _cell_guid="733f0784-4a3f-410c-a220-f98591825f2e" _execution_state="idle" _uuid="eec5424039036e1af43ba0795b76393805308f97"
#correlation map
f, ax = plt.subplots(figsize=(14, 14))
sns.heatmap(x_1.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax)
# %% [markdown] _cell_guid="f6551fe9-f3a8-4738-ace3-b0427be4aeb4" _execution_state="idle" _uuid="0eaf4ae8e33c2d6352a862953c1fe5eecf46ed27"
# Well, we choose our features but **did we choose correctly ?** Lets use random forest and find accuracy according to chosen features.
# %% _cell_guid="111af932-96f8-4105-8deb-ba1172edd203" _execution_state="idle" _uuid="c7a6af60a44959f81593d788934a49c9259d8b43"
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import f1_score, confusion_matrix
from sklearn.metrics import accuracy_score
# split data train 70 % and test 30 %
x_train, x_test, y_train, y_test = train_test_split(x_1,
y,
test_size=0.3,
random_state=42)
#random forest classifier with n_estimators=10 (default)
clf_rf = RandomForestClassifier(random_state=43)
clr_rf = clf_rf.fit(x_train, y_train)
ac = accuracy_score(y_test, clf_rf.predict(x_test))
print('Accuracy is: ', ac)
cm = confusion_matrix(y_test, clf_rf.predict(x_test))
sns.heatmap(cm, annot=True, fmt="d")
# %% [markdown] _cell_guid="1503384d-ca2b-4b52-82b5-f1131b014269" _execution_state="idle" _uuid="21cda299619940f7b22acc9a804ee56bff71d3e7"
# Accuracy is almost 95% and as it can be seen in confusion matrix, we make few wrong prediction.
# Now lets see other feature selection methods to find better results.
# %% [markdown] _cell_guid="3eed9ac3-e601-4e16-85bc-26a1a6fff850" _execution_state="idle" _uuid="decd86422aee506b061c905e8573abb3612734e4"
# <a id='7'></a>
# ### 2) Univariate feature selection and random forest classification
# In univariate feature selection, we will use SelectKBest that removes all but the k highest scoring features.
# <http://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.SelectKBest.html#sklearn.feature_selection.SelectKBest>
# %% [markdown] _cell_guid="f053659d-9dfe-4858-a220-ef327df3bc36" _execution_state="idle" _uuid="f681583a7b20e4fb86e557b910021a263573cf18"
# In this method we need to choose how many features we will use. For example, will k (number of features) be 5 or 10 or 15? The answer is only trying or intuitively. I do not try all combinations but I only choose k = 5 and find best 5 features.
# %% _cell_guid="4f43c8bd-48f7-4ed9-aa2d-6aa8a29c0c58" _execution_state="idle" _uuid="8159f9efb106f1219dc4e8c2a340399b88f224d8" jupyter={"outputs_hidden": true}
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
# find best scored 5 features
select_feature = SelectKBest(chi2, k=5).fit(x_train, y_train)
# %% _cell_guid="c9684618-06fe-4b0a-835f-ceea46da397c" _execution_state="idle" _uuid="d9dcd1495cbf33c190a0d1211df4bac5e79bc4e5"
print('Score list:', select_feature.scores_)
print('Feature list:', x_train.columns)
# %% [markdown] _cell_guid="b0c426ef-3072-4f6e-bf24-b5d927a98316" _execution_state="idle" _uuid="0f46ea1a9b1282a377549406d1c5e093380954b6"
# Best 5 feature to classify is that **area_mean, area_se, texture_mean, concavity_worst and concavity_mean**. So lets se what happens if we use only these best scored 5 feature.
# %% _cell_guid="efc70e04-bc9c-4f93-bcd3-b1d7160d0d5c" _execution_state="idle" _uuid="9a2bd21537f7c600f3c9baaf833c001084d6ba00"
x_train_2 = select_feature.transform(x_train)
x_test_2 = select_feature.transform(x_test)
#random forest classifier with n_estimators=10 (default)
clf_rf_2 = RandomForestClassifier()
clr_rf_2 = clf_rf_2.fit(x_train_2, y_train)
ac_2 = accuracy_score(y_test, clf_rf_2.predict(x_test_2))
print('Accuracy is: ', ac_2)
cm_2 = confusion_matrix(y_test, clf_rf_2.predict(x_test_2))
sns.heatmap(cm_2, annot=True, fmt="d")
# %% [markdown] _cell_guid="d8888dc1-b50b-46b4-b202-4e33c2630406" _execution_state="idle" _uuid="575005da62c41d12bbb3999b3e26148e12930ce3"
# Accuracy is almost 96% and as it can be seen in confusion matrix, we make few wrong prediction. What we did up to now is that we choose features according to correlation matrix and according to selectkBest method. Although we use 5 features in selectkBest method accuracies look similar.
# Now lets see other feature selection methods to find better results.
# %% [markdown] _cell_guid="702ad2b3-5b12-4d15-93b1-e7d62dfd1040" _execution_state="idle" _uuid="7a3c3050dd9d694e52962c7c712b1ea16aab6fdf"
# <a id='8'></a>
# ### 3) Recursive feature elimination (RFE) with random forest
# <http://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.RFE.html>
# Basically, it uses one of the classification methods (random forest in our example), assign weights to each of features. Whose absolute weights are the smallest are pruned from the current set features. That procedure is recursively repeated on the pruned set until the desired number of features
# %% [markdown] _cell_guid="8a34a801-c568-4598-8a07-85fc20ad0386" _execution_state="idle" _uuid="3ea45c46bb231c767160fe13ad3b21a70f0d0375"
# Like previous method, we will use 5 features. However, which 5 features will we use ? We will choose them with RFE method.
# %% _cell_guid="8df88bb5-8003-4696-9efe-63ebf8d609a5" _execution_state="idle" _uuid="c384a5240d1c1e9e2a6750e5d218dadaf24d2035"
from sklearn.feature_selection import RFE
# Create the RFE object and rank each pixel
clf_rf_3 = RandomForestClassifier()
rfe = RFE(estimator=clf_rf_3, n_features_to_select=5, step=1)
rfe = rfe.fit(x_train, y_train)
# %% _cell_guid="51d63d0b-4e00-4dc1-816c-809287b60806" _execution_state="idle" _uuid="29ba35a98954d0ae686ce46295179d1f1a27b74c"
print('Chosen best 5 feature by rfe:', x_train.columns[rfe.support_])
# %% [markdown] _cell_guid="92aa6013-3e16-4005-ab1b-b7ce53e78bd3" _execution_state="idle" _uuid="ce670f778a661e8ddc3b7b21a43ccb48a551581a"
# Chosen 5 best features by rfe is **texture_mean, area_mean, concavity_mean, area_se, concavity_worst**. They are exactly similar with previous (selectkBest) method. Therefore we do not need to calculate accuracy again. Shortly, we can say that we make good feature selection with | |
test_result:
phone_disabled_tooltip = phone_test.tooltip
msg = create_pie_menu_message(sim, None, reference_id, None, failure_tooltip=phone_disabled_tooltip(sim))
break
if msg is None:
shift_held = bool(shift)
context = client.create_interaction_context(sim, shift_held=shift_held)
can_queue_interactions = sim.queue is None or sim.queue.can_queue_visible_interaction()
if can_queue_interactions:
pie_menu_action = PieMenuActions.SHOW_PIE_MENU
choice_menu = ChoiceMenu(sim)
choice_menu.add_potential_aops(None, context, sim.potential_phone_interactions(context))
client.set_choices(choice_menu)
else:
pie_menu_action = PieMenuActions.INTERACTION_QUEUE_FULL_TOOLTIP
choice_menu = None
msg = create_pie_menu_message(sim, choice_menu, reference_id, pie_menu_action)
distributor = Distributor.instance()
distributor.add_event(Consts_pb2.MSG_PHONE_MENU_CREATE, msg, True)
with telemetry_helper.begin_hook(writer, TELEMETRY_HOOK_CREATE_PIE_MENU, sim=sim) as hook:
hook.write_int('piid', reference_id)
hook.write_string('kind', 'phone')
return len(msg.items)
def create_pie_menu_message(sim, choice_menu, reference_id, pie_menu_action, target=None, failure_tooltip=None, suppress_front_page=False):
msg = interaction_protocol.PieMenuCreate()
msg.sim = sim.id if sim is not None else 0
msg.client_reference_id = reference_id
msg.server_reference_id = 0
msg.supress_social_front_page = suppress_front_page
if failure_tooltip is not None:
msg.disabled_tooltip = failure_tooltip
return msg
if not choice_menu:
fire_service = services.get_fire_service()
if fire_service.fire_is_active:
msg.disabled_tooltip = fire_service.INTERACTION_UNAVAILABLE_DUE_TO_FIRE_TOOLTIP()
return msg
if pie_menu_action == PieMenuActions.INTERACTION_QUEUE_FULL_TOOLTIP:
msg.disabled_tooltip = PieMenuActions.INTERACTION_QUEUE_FULL_STR(sim)
return msg
create_tokens(msg.category_tokens, sim, target, None if target is None else target.get_stored_sim_info())
if choice_menu:
resolver = InteractionResolver(None, None, target, next(iter(choice_menu))[1].context)
else:
resolver = SingleActorAndObjectResolver(sim, target, source='create_pie_menu_message')
if sim is not None:
(icon_override, parent_override, blacklist_icon_tags, blacklist_parent_tags) = sim.get_actor_new_pie_menu_icon_and_parent_name(None, resolver)
else:
icon_override = None
parent_override = None
blacklist_icon_tags = set()
blacklist_parent_tags = set()
if choice_menu is not None:
msg.server_reference_id = choice_menu.revision
club_service = services.get_club_service()
tutorial_service = services.get_tutorial_service()
for (option_id, item) in choice_menu:
aop = item.aop
aop_affordance = aop.affordance
if tutorial_service is not None and not tutorial_service.is_affordance_visible(aop_affordance):
continue
if sim is None:
modifier_tooltip = None
else:
(modifier_visibility, modifier_tooltip) = sim.test_pie_menu_modifiers(aop_affordance)
if not modifier_visibility:
continue
with ProtocolBufferRollback(msg.items) as item_msg:
item_msg.id = aop.aop_id
context = item.context
allow_global_icon_overrides = not blacklist_icon_tags & aop_affordance.interaction_category_tags
allow_global_parent_overrides = not blacklist_parent_tags & aop_affordance.interaction_category_tags
logger.debug('%3d: %s' % (option_id, aop))
name = aop_affordance.get_name(aop.target, context, **aop.interaction_parameters)
(name_override_tunable, name_override_result) = aop_affordance.get_name_override_tunable_and_result(target=aop.target, context=context)
if parent_override is not None:
if allow_global_parent_overrides:
name = parent_override(sim, name)
pie_menu_icon = aop_affordance.get_pie_menu_icon_info(context=context, **aop.interaction_parameters) if icon_override is None else None
category_key = item.category_key
ignore_pie_menu_icon_override = aop_affordance.is_rally_interaction and pie_menu_icon is not None
if name_override_tunable is not None:
if name_override_tunable.new_pie_menu_icon is not None:
if not ignore_pie_menu_icon_override:
pie_menu_icon = name_override_tunable.new_pie_menu_icon(resolver)
if name_override_tunable.new_pie_menu_category is not None:
category_key = name_override_tunable.new_pie_menu_category.guid64
if name_override_tunable.parent_name is not None:
if not (parent_override is None or not allow_global_parent_overrides):
name = name_override_tunable.parent_name(sim, name)
if _show_interaction_tuning_name:
affordance_tuning_name = str(aop_affordance.__name__)
name = InteractionCommandsTuning.INTERACTION_TUNING_NAME(name, affordance_tuning_name)
item_msg.score = aop.content_score if aop.content_score is not None else 0
if _show_front_page_score:
name = InteractionCommandsTuning.INTERACTION_FRONT_PAGE_SCORING(name, str(item_msg.score))
item_msg.loc_string = name
tooltip = modifier_tooltip or item.result.tooltip
if tooltip is not None:
tooltip = aop_affordance.create_localized_string(tooltip, context=context, target=aop.target, **aop.interaction_parameters)
item_msg.disabled_text = tooltip
else:
if tutorial_service is not None:
tooltip = tutorial_service.get_disabled_affordance_tooltip(aop_affordance)
if tooltip is not None:
tooltip = aop_affordance.create_localized_string(tooltip, context=context, target=aop.target, **aop.interaction_parameters)
item_msg.disabled_text = tooltip
else:
success_tooltip = aop_affordance.get_display_tooltip(override=name_override_tunable, context=context, target=aop.target, **aop.interaction_parameters)
if success_tooltip is not None:
item_msg.success_tooltip = success_tooltip
if icon_override is not None and allow_global_icon_overrides:
item_msg.icon_infos.append(create_icon_info_msg(IconInfoData(icon_resource=icon_override)))
elif pie_menu_icon is not None:
item_msg.icon_infos.append(create_icon_info_msg(pie_menu_icon))
if category_key is not None:
item_msg.category_key = category_key
if item.result.icon is not None:
item_msg.icon_infos.append(create_icon_info_msg(IconInfoData(icon_resource=item.result.icon)))
if aop.show_posture_incompatible_icon:
item_msg.icon_infos.append(create_icon_info_msg(IconInfoData(icon_resource=PieMenuActions.POSTURE_INCOMPATIBLE_ICON)))
if club_service is not None and sim is not None:
(encouragement, _) = club_service.get_interaction_encouragement_status_and_rules_for_sim_info(sim.sim_info, aop)
if encouragement == ClubRuleEncouragementStatus.ENCOURAGED:
item_msg.icon_infos.append(create_icon_info_msg(IconInfoData(icon_resource=club_tuning.ClubTunables.PIE_MENU_INTERACTION_ENCOURAGED_ICON)))
elif encouragement == ClubRuleEncouragementStatus.DISCOURAGED:
item_msg.icon_infos.append(create_icon_info_msg(IconInfoData(icon_resource=club_tuning.ClubTunables.PIE_MENU_INTERACTION_DISCOURAGED_ICON)))
handle_pie_menu_item_coloring(item_msg, item, sim, aop, name_override_result)
for visual_target in aop_affordance.visual_targets_gen(aop.target, context, **aop.interaction_parameters):
if visual_target is not None:
item_msg.target_ids.append(visual_target.id)
item_msg.pie_menu_priority = aop_affordance.pie_menu_priority
return msg
def handle_pie_menu_item_coloring(item_msg, item, sim, choice, name_override_result):
mood_result = None
mood_intensity_result = None
away_action = choice.interaction_parameters.get('away_action')
away_action_sim_info = choice.interaction_parameters.get('away_action_sim_info')
if away_action is not None:
away_action_sim_current_mood = away_action_sim_info.get_mood()
if away_action_sim_current_mood in away_action.mood_list:
mood_result = away_action_sim_current_mood
mood_intensity_result = away_action_sim_info.get_mood_intensity()
elif item.result.influence_by_active_mood or name_override_result.influence_by_active_mood:
mood_result = sim.get_mood()
mood_intensity_result = sim.get_mood_intensity()
if mood_result is not None:
item_msg.mood = mood_result.guid64
item_msg.mood_intensity = mood_intensity_result
@sims4.commands.Command('interactions.select', command_type=sims4.commands.CommandType.Live)
def select_choice(choice_id:int, reference_id:int=0, _connection=None):
client = services.client_manager().get(_connection)
return client.select_interaction(choice_id, reference_id)
@sims4.commands.Command('interactions.queue')
def display_queue(sim_id:int=None, _connection=None):
output = Output(_connection)
if sim_id is None:
client = services.client_manager().get(_connection)
sim = _active_sim(client)
else:
sim_info = services.sim_info_manager().get(sim_id)
sim = sim_info.get_sim_instance(allow_hidden_flags=ALL_HIDDEN_REASONS) if sim_info is not None else None
if sim is None:
output('Invalid Sim id {0:08x}'.format(sim_id))
return False
output('Super Interaction State: (num = {0})'.format(len(sim.si_state)))
for si in sim.si_state.sis_actor_gen():
output(' * {}'.format(str(si)))
for subi in si.queued_sub_interactions_gen():
output(' - {}'.format(str(subi)))
output('Interaction Queue State: (num = {0})'.format(len(sim.queue)))
for si in sim.queue:
output(' * {}'.format(str(si)))
output('Running: %s' % sim.queue.running)
@sims4.commands.Command('qa.interactions.list', command_type=sims4.commands.CommandType.Automation)
def display_queue_automation(sim_id:int=None, _connection=None):
output = sims4.commands.AutomationOutput(_connection)
if sim_id is None:
client = services.client_manager().get(_connection)
sim = _active_sim(client)
else:
sim_info = services.sim_info_manager().get(sim_id)
sim = sim_info.get_sim_instance(allow_hidden_flags=ALL_HIDDEN_REASONS) if sim_info is not None else None
if sim is None:
output('SimInteractionData; SimId:None')
return False
if sim.queue.running is None:
output('SimInteractionData; SimId:%d, SICount:%d, RunningId:None' % (sim.id, len(sim.si_state)))
else:
output('SimInteractionData; SimId:%d, SICount:%d, RunningId:%d, RunningClass:%s' % (sim.id, len(sim.si_state), sim.queue.running.id, sim.queue.running.__class__.__name__))
for si in sim.si_state.sis_actor_gen():
output('SimSuperInteractionData; Id:%d, Class:%s' % (si.id, si.__class__.__name__))
@sims4.commands.Command('interactions.reevaluate_head')
def reevaluate_head(sim_id:int=None, _connection=None):
output = sims4.commands.AutomationOutput(_connection)
if sim_id is None:
client = services.client_manager().get(_connection)
sim = _active_sim(client)
else:
sim_info = services.sim_info_manager().get(sim_id)
sim = sim_info.get_sim_instance() if sim_info is not None else None
if sim is None:
output('SimInteractionData; SimId:None')
return False
for interaction in sim.queue:
if interaction.is_super:
interaction.transition = None
sim.queue._get_head()
@sims4.commands.Command('qa.interactions.enable_sim_interaction_logging', command_type=sims4.commands.CommandType.Automation)
def enable_sim_interaction_logging(sim_id:int=None, _connection=None):
output = sims4.commands.AutomationOutput(_connection)
if sim_id is None:
client = services.client_manager().get(_connection)
sim = _active_sim(client)
else:
sim_info = services.sim_info_manager().get(sim_id)
sim = sim_info.get_sim_instance(allow_hidden_flags=ALL_HIDDEN_REASONS) if sim_info is not None else None
if sim is None:
output('SimInteractionToggleOn; SimId:None')
return False
sim.interaction_logging = True
output('[AreaInstanceInteraction] SimInteractionToggleOn; SimId:%d, Logging:%d' % (sim.id, sim.interaction_logging))
@sims4.commands.Command('qa.interactions.disable_sim_interaction_logging', command_type=sims4.commands.CommandType.Automation)
def disable_sim_interaction_logging(sim_id:int=None, _connection=None):
output = sims4.commands.AutomationOutput(_connection)
if sim_id is None:
client = services.client_manager().get(_connection)
sim = _active_sim(client)
else:
sim_info = services.sim_info_manager().get(sim_id)
sim = sim_info.get_sim_instance(allow_hidden_flags=ALL_HIDDEN_REASONS) if sim_info is not None else None
if sim is None:
output('SimInteractionToggleOff; SimId:None')
return False
sim.interaction_logging = False
output('[AreaInstanceInteraction] SimInteractionToggleOff; SimId:%d, Logging:%d' % (sim.id, sim.interaction_logging))
@sims4.commands.Command('qa.interactions.enable_sim_transition_path_logging', command_type=sims4.commands.CommandType.Automation)
def enable_sim_transition_path_logging(sim_id:int=None, _connection=None):
output = sims4.commands.AutomationOutput(_connection)
if sim_id is None:
client = services.client_manager().get(_connection)
sim = _active_sim(client)
else:
sim_info = services.sim_info_manager().get(sim_id)
sim = sim_info.get_sim_instance(allow_hidden_flags=ALL_HIDDEN_REASONS) if sim_info is not None else None
if sim is None:
output('SimTransitionPathToggleOn; SimId:None')
return False
sim.transition_path_logging = True
output('[AreaInstanceInteraction] SimTransitionPathToggleOn; SimId:%d, Logging:%d' % (sim.id, sim.interaction_logging))
@sims4.commands.Command('qa.interactions.disable_sim_transition_path_logging', command_type=sims4.commands.CommandType.Automation)
def disable_sim_transition_path_logging(sim_id:int=None, _connection=None):
output = sims4.commands.AutomationOutput(_connection)
if sim_id is None:
client = services.client_manager().get(_connection)
sim = _active_sim(client)
else:
sim_info = services.sim_info_manager().get(sim_id)
sim = sim_info.get_sim_instance(allow_hidden_flags=ALL_HIDDEN_REASONS) if sim_info is not None else None
if sim is None:
output('SimTransitionPathToggleOff; SimId:None')
return False
sim.transition_path_logging = False
output('[AreaInstanceInteraction] SimTransitionPathToggleOff; SimId:%d, Logging:%d' % (sim.id, sim.interaction_logging))
@sims4.commands.Command('interactions.display_outcomes')
def display_outcomes(sim_id:int=None, _connection=None):
sim_info = services.sim_info_manager().get(sim_id)
sim = sim_info.get_sim_instance() if sim_info is not None else None
client = services.client_manager().get(_connection)
if sim is None:
sim = _active_sim(client)
for si in sim.si_state.sis_actor_gen():
sims4.commands.output('Outcome for {} = {}'.format(si.affordance, si.global_outcome_result), _connection)
def send_reject_response(client, sim, context_handle, cancel_reason):
reject_msg = protocols.ServerResponseFailed()
reject_msg.handle = context_handle
reject_msg.reason = cancel_reason
distributor = Distributor.instance()
distributor.add_op_with_no_owner(GenericProtocolBufferOp(Operation.SIM_SERVER_RESPONSE_FAILED, reject_msg))
logger.debug(' sending reject msg')
def cancel_common(interaction_id:int, context_handle:int=None, _connection=None, user_canceled=False):
client = services.client_manager().get(_connection)
sim = _active_sim(client)
interaction = sim.find_interaction_by_id(interaction_id)
if interaction is None:
continuation = sim.find_continuation_by_id(interaction_id)
if continuation is not None:
continuation.cancel_user(cancel_reason_msg='User canceled the interaction.')
return True
if interaction.cancel_user(cancel_reason_msg='Command interactions.cancel_si'):
return True
if context_handle is not None:
send_reject_response(client, sim, context_handle, protocols.ServerResponseFailed.REJECT_CLIENT_CANCEL_SUPERINTERACTION)
return False
@sims4.commands.Command('interactions.force_inertial', command_type=sims4.commands.CommandType.Automation)
def interaction_force_inertial(opt_target:OptionalTargetParam=None, _connection=None):
sim = get_optional_target(opt_target, _connection)
if sim is None:
return False
for si in sim.si_state:
si.force_inertial = True
@sims4.commands.Command('interactions.cancel', command_type=sims4.commands.CommandType.Live)
def cancel_mixer_interaction(interaction_id:int, mixer_id:int, server_ref:int, context_handle:int=None, _connection=None):
logger.debug('cancel_sub_interaction {0}', interaction_id)
client = services.client_manager().get(_connection)
sim = _active_sim(client)
interaction = sim.find_sub_interaction_by_aop_id(interaction_id, mixer_id)
if interaction is not None and sim.queue.running != interaction:
return interaction.cancel_user(cancel_reason_msg='Command interactions.cancel')
return False
@sims4.commands.Command('interactions.cancel_si', command_type=sims4.commands.CommandType.Live)
def cancel_super_interaction(super_interaction_id:int, context_handle:int=None, _connection=None):
logger.debug('cancel_super_interaction {0}', super_interaction_id)
if False and _mixer_lock:
return False
return cancel_common(super_interaction_id, context_handle, _connection, user_canceled=True)
@sims4.commands.Command('interactions.run_first')
def first_interaction(target_id:int=None, _connection=None):
target = None
if target_id is not None:
target = services.object_manager().get(target_id)
client = services.client_manager().get(_connection)
sim = _active_sim(client)
if target is None:
target = sim
context = client.create_interaction_context(sim)
affordances = list(target.potential_interactions(context))
if affordances:
logger.debug('Running affordance: {0}', affordances[0])
return affordances[0].test_and_execute(context)
return False
@sims4.commands.Command('interactions.push', command_type=sims4.commands.CommandType.Live)
def push_interaction(affordance:TunableInstanceParam(sims4.resources.Types.INTERACTION), opt_target:RequiredTargetParam=None, opt_sim:OptionalTargetParam=None, priority=Priority.High, _connection=None):
target = opt_target.get_target() if opt_target is not None else None
sim = get_optional_target(opt_sim, _connection)
client = services.client_manager().get(_connection)
priority = Priority(priority)
if not sim.queue.can_queue_visible_interaction():
sims4.commands.output('Interaction queue is full, cannot add anymore interactions.', _connection)
return False
else:
context = InteractionContext(sim, InteractionContext.SOURCE_PIE_MENU, priority, client=client, pick=None)
result = sim.push_super_affordance(affordance, target, context)
if not result:
output = sims4.commands.Output(_connection)
output('Failed to push: {}'.format(result))
return False
return True
@sims4.commands.Command('interactions.push_all_sims')
def push_interaction_on_all_sims(affordance:TunableInstanceParam(sims4.resources.Types.INTERACTION), opt_target:RequiredTargetParam=None, _connection=None):
target = opt_target.get_target() if opt_target is not None else None
client = services.client_manager().get(_connection)
for sim_info in client.selectable_sims:
sim = sim_info.get_sim_instance()
if sim is not None:
context = InteractionContext(sim, InteractionContext.SOURCE_PIE_MENU, Priority.High, client=client, pick=None)
sim.push_super_affordance(affordance, target, context)
| |
objs_dict['float'].transform(X_new)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_full_ks(data_full_ks):
objs_dict, X, X_expected = data_full_ks
X_new = objs_dict['object'].transform(X)
X_new = objs_dict['int'].transform(X_new)
X_new = objs_dict['float'].transform(X_new)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_full_pd_np(data_full):
objs_dict, X, X_expected = data_full
X_new = objs_dict['object'].transform_numpy(X.to_numpy())
X_new = objs_dict['int'].transform_numpy(X_new)
X_new = objs_dict['float'].transform_numpy(X_new)
X_new = pd.DataFrame(X_new, columns=['A', 'B', 'C', 'D', 'E', 'F'])
assert_frame_equal(X_new, X_expected.astype(object))
@pytest.mark.koalas
def test_full_ks_np(data_full_ks):
objs_dict, X, X_expected = data_full_ks
X_new = objs_dict['object'].transform_numpy(X.to_numpy())
X_new = objs_dict['int'].transform_numpy(X_new)
X_new = objs_dict['float'].transform_numpy(X_new)
X_new = pd.DataFrame(X_new, columns=['A', 'B', 'C', 'D', 'E', 'F'])
assert_frame_equal(X_new, X_expected.astype(object))
def test_imputers_columns_pd():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = pd.concat([X_int, X_float, X_object], axis=1)
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -99.0, -999.0, -9999.0, 'missing', 'MISSING']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_int_A = IntImputer(
strategy='constant', value=-9, columns=['A']).fit(X)
obj_int_B = IntImputer(
strategy='constant', value=-99, columns=['B']).fit(X)
obj_float_C = FloatImputer(
strategy='constant', value=-999., columns=['C']).fit(X)
obj_float_D = FloatImputer(
strategy='constant', value=-9999., columns=['D']).fit(X)
obj_object_E = ObjectImputer(
strategy='constant', value='missing', columns=['E']).fit(X)
obj_object_F = ObjectImputer(
strategy='constant', value='MISSING', columns=['F']).fit(X)
X_new = obj_int_A.transform(X)
X_new = obj_int_B.transform(X_new)
X_new = obj_float_C.transform(X_new)
X_new = obj_float_D.transform(X_new)
X_new = obj_object_E.transform(X_new)
X_new = obj_object_F.transform(X_new)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_imputers_columns_ks():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = pd.concat([X_int, X_float, X_object], axis=1)
X = ks.from_pandas(X)
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -99.0, -999.0, -9999.0, 'missing', 'MISSING']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_int_A = IntImputer(
strategy='constant', value=-9, columns=['A']).fit(X)
obj_int_B = IntImputer(
strategy='constant', value=-99, columns=['B']).fit(X)
obj_float_C = FloatImputer(
strategy='constant', value=-999., columns=['C']).fit(X)
obj_float_D = FloatImputer(
strategy='constant', value=-9999., columns=['D']).fit(X)
obj_object_E = ObjectImputer(
strategy='constant', value='missing', columns=['E']).fit(X)
obj_object_F = ObjectImputer(
strategy='constant', value='MISSING', columns=['F']).fit(X)
X_new = obj_int_A.transform(X)
X_new = obj_int_B.transform(X_new)
X_new = obj_float_C.transform(X_new)
X_new = obj_float_D.transform(X_new)
X_new = obj_object_E.transform(X_new)
X_new = obj_object_F.transform(X_new)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_imputers_columns_pd_np():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = pd.concat([X_int, X_float, X_object], axis=1)
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -99.0, -999.0, -9999.0, 'missing', 'MISSING']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_int_A = IntImputer(
strategy='constant', value=-9, columns=['A']).fit(X)
obj_int_B = IntImputer(
strategy='constant', value=-99, columns=['B']).fit(X)
obj_float_C = FloatImputer(
strategy='constant', value=-999., columns=['C']).fit(X)
obj_float_D = FloatImputer(
strategy='constant', value=-9999., columns=['D']).fit(X)
obj_object_E = ObjectImputer(
strategy='constant', value='missing', columns=['E']).fit(X)
obj_object_F = ObjectImputer(
strategy='constant', value='MISSING', columns=['F']).fit(X)
X_new = obj_int_A.transform_numpy(X.to_numpy())
X_new = obj_int_B.transform_numpy(X_new)
X_new = obj_float_C.transform_numpy(X_new)
X_new = obj_float_D.transform_numpy(X_new)
X_new = obj_object_E.transform_numpy(X_new)
X_new = obj_object_F.transform_numpy(X_new)
assert_frame_equal(
pd.DataFrame(X_new, columns=list('ABCDEF')),
X_expected.astype(object))
@pytest.mark.koalas
def test_imputers_columns_ks_np():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = pd.concat([X_int, X_float, X_object], axis=1)
X = ks.from_pandas(X)
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -99.0, -999.0, -9999.0, 'missing', 'MISSING']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_int_A = IntImputer(
strategy='constant', value=-9, columns=['A']).fit(X)
obj_int_B = IntImputer(
strategy='constant', value=-99, columns=['B']).fit(X)
obj_float_C = FloatImputer(
strategy='constant', value=-999., columns=['C']).fit(X)
obj_float_D = FloatImputer(
strategy='constant', value=-9999., columns=['D']).fit(X)
obj_object_E = ObjectImputer(
strategy='constant', value='missing', columns=['E']).fit(X)
obj_object_F = ObjectImputer(
strategy='constant', value='MISSING', columns=['F']).fit(X)
X_new = obj_int_A.transform_numpy(X.to_numpy())
X_new = obj_int_B.transform_numpy(X_new)
X_new = obj_float_C.transform_numpy(X_new)
X_new = obj_float_D.transform_numpy(X_new)
X_new = obj_object_E.transform_numpy(X_new)
X_new = obj_object_F.transform_numpy(X_new)
assert_frame_equal(
pd.DataFrame(X_new, columns=list('ABCDEF')),
X_expected.astype(object))
def test_imputers_num_pd():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = pd.concat([X_int, X_float, X_object], axis=1)
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -9.0, -9.0, -9.0, 'MISSING', 'MISSING']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_num = NumericsImputer(
strategy='constant', value=-9.).fit(X)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X)
X_new = obj_num.transform(X)
X_new = obj_object.transform(X_new)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_imputers_num_ks():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = pd.concat([X_int, X_float, X_object], axis=1)
X = ks.from_pandas(X)
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -9.0, -9.0, -9.0, 'MISSING', 'MISSING']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_num = NumericsImputer(
strategy='constant', value=-9.).fit(X)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X)
X_new = obj_num.transform(X)
X_new = obj_object.transform(X_new)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_imputers_num_pd_np():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = pd.concat([X_int, X_float, X_object], axis=1)
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -9.0, -9.0, -9.0, 'MISSING', 'MISSING']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_num = NumericsImputer(
strategy='constant', value=-9.).fit(X)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X)
X_new = obj_num.transform_numpy(X.to_numpy())
X_new = obj_object.transform_numpy(X_new)
assert_frame_equal(
pd.DataFrame(X_new, columns=list('ABCDEF')),
X_expected.astype(object))
@pytest.mark.koalas
def test_imputers_num_ks_np():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = pd.concat([X_int, X_float, X_object], axis=1)
X = ks.from_pandas(X)
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -9.0, -9.0, -9.0, 'MISSING', 'MISSING']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_num = NumericsImputer(
strategy='constant', value=-9.).fit(X)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X)
X_new = obj_num.transform_numpy(X.to_numpy())
X_new = obj_object.transform_numpy(X_new)
assert_frame_equal(
pd.DataFrame(X_new, columns=list('ABCDEF')),
X_expected.astype(object))
def test_num_np():
X = pd.DataFrame({'A': [0, 1, np.nan]})
obj = NumericsImputer(strategy='mean').fit(X)
assert obj.transform_numpy(X.to_numpy()).tolist() == [[0.0], [1.0], [0.5]]
def test_imputers_stategy():
X = pd.DataFrame([])
with pytest.raises(TypeError):
_ = FloatImputer(strategy=0)
with pytest.raises(TypeError):
_ = NumericsImputer(strategy=0)
with pytest.raises(TypeError):
_ = IntImputer(strategy='constant', value='a').fit(X)
with pytest.raises(TypeError):
_ = FloatImputer(strategy='constant', value='a').fit(X)
with pytest.raises(TypeError):
_ = NumericsImputer(strategy='constant', value='a').fit(X)
with pytest.raises(TypeError):
_ = ObjectImputer(strategy='constant', value=1).fit(X)
with pytest.raises(ValueError):
_ = IntImputer(strategy='').fit(X)
with pytest.raises(ValueError):
_ = FloatImputer(strategy='').fit(X)
with pytest.raises(ValueError):
_ = NumericsImputer(strategy='').fit(X)
with pytest.raises(ValueError):
_ = ObjectImputer(strategy='').fit(X)
with pytest.raises(ValueError):
_ = FloatImputer(strategy='most_frequent').fit(X)
with pytest.raises(ValueError):
_ = NumericsImputer(strategy='most_frequent').fit(X)
with pytest.raises(ValueError):
_ = ObjectImputer(strategy='mean').fit(X)
with pytest.raises(ValueError):
_ = ObjectImputer(strategy='median').fit(X)
with pytest.raises(ValueError):
_ = ObjectImputer(strategy='constant').fit(X)
with pytest.raises(ValueError):
_ = FloatImputer(strategy='constant').fit(X)
with pytest.raises(ValueError):
_ = NumericsImputer(strategy='constant').fit(X)
with pytest.raises(ValueError):
_ = IntImputer(strategy='constant').fit(X)
with pytest.raises(ValueError):
_ = ObjectImputer(strategy='abc').fit(X)
def test_compute_stategy():
with pytest.raises(ValueError):
X = pd.DataFrame(
np.arange(9).reshape(3, 3) + .1, columns=list('qwe'))
X.iloc[:, 0] = np.nan
_ = FloatImputer(strategy='mean').fit(X)
def test_imputers_input_data():
with pytest.raises(TypeError):
_ = FloatImputer(strategy='mean').fit(np.array([[]]))
with pytest.raises(TypeError):
_ = IntImputer(strategy='most_frequent').fit(np.array([[]]))
with pytest.raises(TypeError):
_ = ObjectImputer(strategy='most_frequent').fit(np.array([[]]))
with pytest.raises(TypeError):
_ = ObjectImputer(strategy='most_frequent', columns='a')
def test_imputers_transform_input_data():
with pytest.raises(TypeError):
_ = FloatImputer(strategy='mean').fit_transform(np.array([]))
with pytest.raises(TypeError):
_ = IntImputer(strategy='most_frequent').fit(
np.array([])).transform(np.array([]))
with pytest.raises(TypeError):
_ = ObjectImputer(strategy='most_frequent').transform(np.array([]))
def test_warnings_empty_columns(data):
objs_dict, X_dict, X_expected_dict = data
with pytest.warns(Warning):
obj = FloatImputer(strategy='mean')
obj.fit(X_dict['int'])
with pytest.warns(Warning):
obj = IntImputer(strategy='mean')
obj.fit(X_dict['float'])
with pytest.warns(Warning):
obj = ObjectImputer(strategy='most_frequent')
obj.fit(X_dict['int'])
with pytest.warns(Warning):
obj = NumericsImputer(strategy='mean')
obj.fit(X_dict['object'])
def test_empty_columns_float():
X = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
obj = FloatImputer(strategy='mean')
_ = obj.fit(X)
assert_frame_equal(obj.transform(X.copy()), X)
assert np.allclose(obj.transform_numpy(X.to_numpy()),
X.to_numpy(), equal_nan=True)
def test_empty_columns_int():
X = pd.DataFrame({'A': [0.1, 1, 1, np.nan], 'B': [3.1, 4, 4, np.nan]})
obj = IntImputer(strategy='mean')
_ = obj.fit(X)
assert_frame_equal(obj.transform(X.copy()), X)
assert np.allclose(obj.transform_numpy(X.to_numpy()),
X.to_numpy(), equal_nan=True)
def test_empty_columns_object():
X = pd.DataFrame({'A': | |
<filename>gpyrn/covfunc.py
"""
Covariance functions to use on the GPRN
"""
from gpyrn.meanfunc import array_input
import numpy as np
class covFunction():
"""
A base class for covariance functions (kernels) used for nodes and weights
in the GPRN.
"""
def __init__(self, *args):
""" Puts all kernel arguments in an array pars """
self.pars = np.array(args, dtype=float)
# self.pars[self.pars > 1e50] = 1e50
def __call__(self, r, t1=None, t2=None):
"""
r = t - t'
Not sure if this is a good approach since will make our life harder
when defining certain non-stationary kernels, e.g linear kernel.
"""
raise NotImplementedError
def __repr__(self):
""" Representation of each kernel instance """
if hasattr(self, '_param_names'):
pars = ', '.join(
[f'{p}={v}' for p, v in zip(self._param_names, self.pars)])
else:
pars = ', '.join(map(str, self.pars))
return f"{self.__class__.__name__}({pars})"
def get_parameters(self):
return self.pars
@array_input
def set_parameters(self, p):
msg = f'too few parameters for kernel {self.__class__.__name__}'
assert len(p) >= self.pars.size, msg
if len(p) > self.pars.size:
p = list(p)
self.pars = np.array(p[:self.pars.size], dtype=float)
for _ in range(self.pars.size):
p.pop(0)
return np.array(p)
else:
self.pars = p
def __add__(self, b):
return Sum(self, b)
def __radd__(self, b):
return self.__add__(b)
def __mul__(self, b):
return Multiplication(self, b)
def __rmul__(self, b):
return self.__mul__(b)
class _operator(covFunction):
""" To allow operations between two kernels """
def __init__(self, k1, k2):
self.k1 = k1
self.k2 = k2
self.kerneltype = 'complex'
self.pars = np.r_[self.k1.pars, self.k2.pars]
class Sum(_operator):
""" To allow the sum of kernels """
def __call__(self, r):
return self.k1(r) + self.k2(r)
def __repr__(self):
return "{0} + {1}".format(self.k1, self.k2)
class Multiplication(_operator):
""" To allow the multiplication of kernels """
def __call__(self, r):
return self.k1(r) * self.k2(r)
def __repr__(self):
return "{0} * {1}".format(self.k1, self.k2)
##### Constant #################################################################
class Constant(covFunction):
"""
This kernel returns the square of its constant argument c
Parameters
----------
c: float
Constant
"""
_param_names = 'c',
_tag = 'C'
def __init__(self, c):
super(Constant, self).__init__(c)
def __call__(self, r):
c = self.pars[0]
return np.full_like(r, c**2)
##### White Noise ##############################################################
class WhiteNoise(covFunction):
"""
Definition of the white noise kernel.
Parameters
----------
wn: float
White noise amplitude
"""
_param_names = 'wn',
_tag = 'WN'
def __init__(self, wn):
super(WhiteNoise, self).__init__(wn)
def __call__(self, r):
wn = self.pars[0]
if r.ndim == 2 and r[0, :].shape == r[:, 0].shape:
return wn**2 * np.diag(np.diag(np.ones_like(r)))
return np.full_like(r, wn**2)
##### Squared exponential ######################################################
class SquaredExponential(covFunction):
"""
Squared Exponential kernel, also known as radial basis function or RBF
kernel in other works.
Parameters
----------
theta: float
Amplitude
ell: float
Length-scale
"""
_param_names = 'theta', 'ell'
_tag = 'SE'
def __init__(self, theta, ell):
super(SquaredExponential, self).__init__(theta, ell)
def __call__(self, r):
return self.pars[0]**2 * np.exp(-0.5 * r**2 / self.pars[1]**2)
##### Periodic #################################################################
class Periodic(covFunction):
"""
Definition of the periodic kernel.
Parameters
----------
theta: float
Amplitude
P: float
Period
lp: float
Lenght scale
"""
_param_names = 'theta', 'P', 'lp'
_tag = 'P'
def __init__(self, theta, P, lp):
super(Periodic, self).__init__(theta, P, lp)
def __call__(self, r):
θ, P, lp = self.pars
return θ**2 * np.exp(-2 * np.sin(np.pi * np.abs(r) / P)**2 / lp**2)
##### Quasi Periodic ###########################################################
class QuasiPeriodic(covFunction):
"""
This kernel is the product between the exponential sine squared kernel
and the squared exponential kernel, commonly known as the quasi-periodic
kernel
Parameters
----------
theta: float
Amplitude
le: float
Evolutionary time scale
P: float
Kernel periodicity
lp: float
Length scale of the periodic component
"""
_param_names = 'theta', 'le', 'P', 'lp'
_tag = 'QP'
def __init__(self, theta, le, P, lp):
super(QuasiPeriodic, self).__init__(theta, le, P, lp)
def __call__(self, r):
θ, le, P, lp = self.pars
return θ**2 * np.exp(-2 * np.sin(np.pi * np.abs(r) / P)**2 / lp**2 - \
r**2 / (2 * le**2))
##### Rational Quadratic #######################################################
class RationalQuadratic(covFunction):
"""
Definition of the rational quadratic kernel.
Parameters
----------
theta: float
Amplitude of the kernel
alpha: float
Amplitude of large and small scale variations
ell: float
Characteristic lenght scale to define the kernel "smoothness"
"""
_param_names = 'theta', 'alpha', 'ell'
_tag = 'RQ'
def __init__(self, amplitude, alpha, ell):
super(RationalQuadratic, self).__init__(theta, alpha, ell)
def __call__(self, r):
θ, α, ell = self.pars
return θ**2 * (1 + 0.5 * r**2 / (α * ell**2))**(-α)
##### RQP kernel ###############################################################
class RQP(covFunction):
"""
Definition of the product between the exponential sine squared kernel and
the rational quadratic kernel that we called RQP kernel. If I am thinking
this correctly then this kernel should tend to the QuasiPeriodic kernel as
alpha increases, although I am not sure if we can say that it tends to the
QuasiPeriodic kernel as alpha tends to infinity.
Parameters
----------
theta: float
Amplitude
alpha: float
Alpha of the rational quadratic kernel
ell_e: float
Aperiodic length scale
P: float
Periodic repetitions of the kernel
ell_p: float
Periodic length scale
"""
_param_names = 'theta', 'alpha', 'ell_e', 'ell_p', 'P'
_tag = 'RQP'
def __init__(self, theta, alpha, ell_e, P, ell_p):
super(RQP, self).__init__(theta, alpha, ell_e, P, ell_p)
def __call__(self, r):
θ, α, ℓe, P, ℓp = self.pars
return θ**2 * np.exp(-2 * np.sin(np.pi * np.abs(r) / P)**2 /
ℓp**2) * (1 + r**2 / (2 * α * ℓe**2))**(-α)
##### Cosine ###################################################################
class COSINE(covFunction):
"""
Definition of the cosine kernel
Parameters
----------
theta: float
Amplitude
P: float
Period
"""
_param_names = 'theta', 'P'
_tag = 'COS'
def __init__(self, theta, P):
super(COSINE, self).__init__(theta, P)
def __call__(self, r):
return self.pars[0]**2 * np.cos(2 * np.pi * np.abs(r) / self.pars[1])
##### Laplacian ##############################################################
class Laplacian(covFunction):
"""
Definition of the Laplacian kernel
Parameters
----------
theta: float
Amplitude
ell: float
Characteristic lenght scale
"""
_param_names = 'theta', 'ell'
_tag = 'LAP'
def __init__(self, theta, ell):
super(Laplacian, self).__init__(theta, ell)
def __call__(self, r):
return self.pars[0]**2 * np.exp(-np.abs(r) / self.pars[1])
##### Exponential ##############################################################
class Exponential(covFunction):
"""
Definition of the exponential kernel
Parameters
----------
theta: float
Amplitude
ell: float
Characteristic lenght scale
"""
_param_names = 'theta', 'ell'
_tag = 'EXP'
def __init__(self, theta, ell):
super(Exponential, self).__init__(theta, ell)
def __call__(self, r):
return self.pars[0]**2 * np.exp(-np.abs(r) / self.pars[1])
##### Matern 3/2 ###############################################################
class Matern32(covFunction):
"""
Definition of the Matern 3/2 kernel. This kernel arise when setting
v=3/2 in the matern family of kernels
Parameters
----------
theta: float
Amplitude
ell: float
Characteristic lenght scale
"""
_param_names = 'theta', 'ell'
_tag = 'M32'
def __init__(self, theta, ell):
super(Matern32, self).__init__(theta, ell)
def __call__(self, r):
return self.pars[0]**2 * (
1.0 + np.sqrt(3.0) * np.abs(r) / self.pars[1]) * np.exp(
-np.sqrt(3.0) * np.abs(r) / self.pars[1])
#### Matern 5/2 ################################################################
class Matern52(covFunction):
"""
Definition of the Matern 5/2 kernel. This kernel arise when setting v=5/2
in the matern family of kernels
Parameters
----------
theta: float
Amplitude
ell: float
Characteristic lenght scale
"""
_param_names = 'theta', 'ell'
_tag = 'M52'
def __init__(self, theta, ell):
super(Matern52, self).__init__(theta, ell)
def __call__(self, r):
return self.pars[0]**2 * (
1.0 +
(3 * np.sqrt(5) * self.pars[1] * np.abs(r) + 5 * np.abs(r)**2) /
(3 * self.pars[1]**2)) * np.exp(
-np.sqrt(5.0) * np.abs(r) / self.pars[1])
#### Linear ####################################################################
class Linear(covFunction):
"""
Definition of the Linear kernel
Parameters
----------
theta: float
Amplitude (should we even have an amplitude???)
c: float
Constant
"""
def __init__(self, theta, c):
super(Linear, self).__init__(theta, c)
self.tag = 'LIN'
self.theta = theta
self.c = c
def __call__(self, r, t1, t2):
return (t1 - self.pars[1]) * (t2 - self.pars[1])
##### Gamma-exponential ########################################################
class GammaExp(covFunction):
"""
Definition of the gamma-exponential kernel
Parameters
----------
theta: float
Amplitude
gamma: float
Shape parameter ( 0 < gamma <= 2)
l: float
Lenght scale
Returns
-------
"""
def __init__(self, theta, gamma, l):
super(GammaExp, self).__init__(theta, gamma, l)
self.tag = 'GammaExp'
self.theta = theta
self.gamma = gamma
self.l = l
def __call__(self, r):
return self.pars[0]**2 *np.exp(-(np.abs(r)/self.pars[2])**self.pars[1])
##### Polynomial ###############################################################
class Polynomial(covFunction):
"""
Definition of the polynomial kernel
Parameters
----------
theta: float
Amplitude ???
a: float
Real value > 0
b: foat
Real value >= 0
c: int
Integer value
wn: float
White noise amplitude
Returns
-------
"""
def __init__(self, theta, a, b, c):
super(Polynomial, self).__init__(theta, a, b, c)
self.tag = 'POLY'
self.theta = theta
self.a = a
self.b = b
self.c = c
def __call__(self, r, t1, t2):
return (self.pars[1] * t1 * t2 + self.pars[2])**self.pars[3]
##### Piecewise ################################################################
class Piecewise(covFunction):
"""
WARNING: EXPERIMENTAL KERNEL
Parameters
----------
"""
def __init__(self, eta):
super(Piecewise, self).__init__(eta)
self.eta = eta
self.type = 'unknown'
def __call__(self, | |
return self.copy()
else:
result = self._new_like_me(_get_common_dtype(self, other))
return self._axpbz(1, other, result)
__radd__ = __add__
def __sub__(self, other):
"""Substract an array from an array or a scalar from an array."""
if isinstance(other, GPUArray):
result = self._new_like_me(_get_common_dtype(self, other))
return self._axpbyz(1, other, -1, result)
else:
if other == 0:
return self.copy()
else:
# create a new array for the result
result = self._new_like_me(_get_common_dtype(self, other))
return self._axpbz(1, -other, result)
def __rsub__(self, other):
"""Substracts an array by a scalar or an array::
x = n - self
"""
# other must be a scalar
result = self._new_like_me(_get_common_dtype(self, other))
return self._axpbz(-1, other, result)
def __iadd__(self, other):
if isinstance(other, GPUArray):
return self._axpbyz(1, other, 1, self)
else:
return self._axpbz(1, other, self)
def __isub__(self, other):
if isinstance(other, GPUArray):
return self._axpbyz(1, other, -1, self)
else:
return self._axpbz(1, -other, self)
def __neg__(self):
result = self._new_like_me()
return self._axpbz(-1, 0, result)
def __mul__(self, other):
if isinstance(other, GPUArray):
result = self._new_like_me(_get_common_dtype(self, other))
return self._elwise_multiply(other, result)
else:
result = self._new_like_me(_get_common_dtype(self, other))
return self._axpbz(other, 0, result)
def __rmul__(self, scalar):
result = self._new_like_me(_get_common_dtype(self, scalar))
return self._axpbz(scalar, 0, result)
def __imul__(self, other):
if isinstance(other, GPUArray):
return self._elwise_multiply(other, self)
else:
return self._axpbz(other, 0, self)
def __div__(self, other):
"""Divides an array by an array or a scalar::
x = self / n
"""
if isinstance(other, GPUArray):
result = self._new_like_me(_get_common_dtype(self, other))
return self._div(other, result)
else:
if other == 1:
return self.copy()
else:
# create a new array for the result
result = self._new_like_me(_get_common_dtype(self, other))
return self._axpbz(1/other, 0, result)
__truediv__ = __div__
def __rdiv__(self, other):
"""Divides an array by a scalar or an array::
x = n / self
"""
# create a new array for the result
result = self._new_like_me(_get_common_dtype(self, other))
return self._rdiv_scalar(other, result)
__rtruediv__ = __rdiv__
def __idiv__(self, other):
"""Divides an array by an array or a scalar::
x /= n
"""
if isinstance(other, GPUArray):
return self._div(other, self)
else:
if other == 1:
return self
else:
return self._axpbz(1/other, 0, self)
__itruediv__ = __idiv__
def fill(self, value, stream=None):
"""fills the array with the specified value"""
func = elementwise.get_fill_kernel(self.dtype)
func.prepared_async_call(self._grid, self._block, stream,
value, self.gpudata, self.mem_size)
return self
def bind_to_texref(self, texref, allow_offset=False):
return texref.set_address(self.gpudata, self.nbytes,
allow_offset=allow_offset) / self.dtype.itemsize
def bind_to_texref_ext(self, texref, channels=1, allow_double_hack=False,
allow_complex_hack=False, allow_offset=False):
if not self.flags.forc:
raise RuntimeError("only contiguous arrays may "
"be used as arguments to this operation")
if self.dtype == np.float64 and allow_double_hack:
if channels != 1:
raise ValueError(
"'fake' double precision textures can "
"only have one channel")
channels = 2
fmt = drv.array_format.SIGNED_INT32
read_as_int = True
elif self.dtype == np.complex64 and allow_complex_hack:
if channels != 1:
raise ValueError(
"'fake' complex64 textures can "
"only have one channel")
channels = 2
fmt = drv.array_format.UNSIGNED_INT32
read_as_int = True
elif self.dtype == np.complex128 and allow_complex_hack:
if channels != 1:
raise ValueError(
"'fake' complex128 textures can "
"only have one channel")
channels = 4
fmt = drv.array_format.SIGNED_INT32
read_as_int = True
else:
fmt = drv.dtype_to_array_format(self.dtype)
read_as_int = np.integer in self.dtype.type.__mro__
offset = texref.set_address(self.gpudata, self.nbytes,
allow_offset=allow_offset)
texref.set_format(fmt, channels)
if read_as_int:
texref.set_flags(texref.get_flags() | drv.TRSF_READ_AS_INTEGER)
return offset/self.dtype.itemsize
def __len__(self):
"""Return the size of the leading dimension of self."""
if len(self.shape):
return self.shape[0]
else:
return TypeError("scalar has no len()")
def __abs__(self):
"""Return a `GPUArray` of the absolute values of the elements
of `self`.
"""
result = self._new_like_me()
if self.dtype == np.float32:
fname = "fabsf"
elif self.dtype == np.float64:
fname = "fabs"
else:
fname = "abs"
if issubclass(self.dtype.type, np.complexfloating):
from pytools import match_precision
out_dtype = match_precision(np.dtype(np.float64), self.dtype)
result = self._new_like_me(out_dtype)
else:
out_dtype = self.dtype
func = elementwise.get_unary_func_kernel(fname, self.dtype,
out_dtype=out_dtype)
func.prepared_async_call(self._grid, self._block, None,
self.gpudata, result.gpudata, self.mem_size)
return result
def _pow(self, other, new):
"""
Do the pow operator.
with new, the user can choose between ipow or just pow
"""
if isinstance(other, GPUArray):
if not self.flags.forc or not other.flags.forc:
raise RuntimeError("only contiguous arrays may "
"be used as arguments to this operation")
assert self.shape == other.shape
if new:
result = self._new_like_me(_get_common_dtype(self, other))
else:
result = self
func = elementwise.get_pow_array_kernel(
self.dtype, other.dtype, result.dtype)
func.prepared_async_call(self._grid, self._block, None,
self.gpudata, other.gpudata, result.gpudata,
self.mem_size)
return result
else:
if not self.flags.forc:
raise RuntimeError("only contiguous arrays may "
"be used as arguments to this operation")
if new:
result = self._new_like_me()
else:
result = self
func = elementwise.get_pow_kernel(self.dtype)
func.prepared_async_call(self._grid, self._block, None,
other, self.gpudata, result.gpudata,
self.mem_size)
return result
def __pow__(self, other):
"""pow function::
example:
array = pow(array)
array = pow(array,4)
array = pow(array,array)
"""
return self._pow(other,new=True)
def __ipow__(self, other):
"""ipow function::
example:
array **= 4
array **= array
"""
return self._pow(other,new=False)
def reverse(self, stream=None):
"""Return this array in reversed order. The array is treated
as one-dimensional.
"""
if not self.flags.forc:
raise RuntimeError("only contiguous arrays may "
"be used as arguments to this operation")
result = self._new_like_me()
func = elementwise.get_reverse_kernel(self.dtype)
func.prepared_async_call(self._grid, self._block, stream,
self.gpudata, result.gpudata,
self.mem_size)
return result
def astype(self, dtype, stream=None):
if not self.flags.forc:
raise RuntimeError("only contiguous arrays may "
"be used as arguments to this operation")
if dtype == self.dtype:
return self.copy()
result = self._new_like_me(dtype=dtype)
func = elementwise.get_copy_kernel(dtype, self.dtype)
func.prepared_async_call(self._grid, self._block, stream,
result.gpudata, self.gpudata,
self.mem_size)
return result
def reshape(self, *shape, **kwargs):
"""Gives a new shape to an array without changing its data."""
# Python 2.x compatibility: use kwargs instead of named 'order' keyword
order = kwargs.pop("order", "C")
# TODO: add more error-checking, perhaps
if not self.flags.forc:
raise RuntimeError("only contiguous arrays may "
"be used as arguments to this operation")
if isinstance(shape[0], tuple) or isinstance(shape[0], list):
shape = tuple(shape[0])
same_contiguity = ((order == "C" and self.flags.c_contiguous) or
(order == "F" and self.flags.f_contiguous))
if shape == self.shape and same_contiguity:
return self
if -1 in shape:
shape = list(shape)
idx = shape.index(-1)
size = -reduce(lambda x, y: x * y, shape, 1)
shape[idx] = self.size // size
if -1 in shape[idx:]:
raise ValueError("can only specify one unknown dimension")
shape = tuple(shape)
size = reduce(lambda x, y: x * y, shape, 1)
if size != self.size:
raise ValueError("total size of new array must be unchanged")
return GPUArray(
shape=shape,
dtype=self.dtype,
allocator=self.allocator,
base=self,
gpudata=int(self.gpudata),
order=order)
def ravel(self):
return self.reshape(self.size)
def view(self, dtype=None):
if dtype is None:
dtype = self.dtype
old_itemsize = self.dtype.itemsize
itemsize = np.dtype(dtype).itemsize
from pytools import argmin2
min_stride_axis = argmin2(
(axis, abs(stride))
for axis, stride in enumerate(self.strides))
if self.shape[min_stride_axis] * old_itemsize % itemsize != 0:
raise ValueError("new type not compatible with array")
new_shape = (
self.shape[:min_stride_axis]
+ (self.shape[min_stride_axis] * old_itemsize // itemsize,)
+ self.shape[min_stride_axis+1:])
new_strides = (
self.strides[:min_stride_axis]
+ (self.strides[min_stride_axis] * itemsize // old_itemsize,)
+ self.strides[min_stride_axis+1:])
return GPUArray(
shape=new_shape,
dtype=dtype,
allocator=self.allocator,
strides=new_strides,
base=self,
gpudata=int(self.gpudata))
def squeeze(self):
"""
Returns a view of the array with dimensions of
length 1 removed.
"""
new_shape = tuple([dim for dim in self.shape if dim > 1])
new_strides = tuple([self.strides[i]
for i, dim in enumerate(self.shape) if dim > 1])
return GPUArray(
shape=new_shape,
dtype=self.dtype,
allocator=self.allocator,
strides=new_strides,
base=self,
gpudata=int(self.gpudata))
def transpose(self, axes=None):
"""Permute the dimensions of an array.
:arg axes: list of ints, optional.
By default, reverse the dimensions, otherwise permute the axes
according to the values given.
:returns: :class:`GPUArray` A view of the array with its axes permuted.
.. versionadded:: 2015.2
"""
if axes is None:
axes = range(self.ndim-1, -1, -1)
if len(axes) != len(self.shape):
raise ValueError("axes don't match array")
new_shape = [self.shape[axes[i]] for i in range(len(axes))]
new_strides = [self.strides[axes[i]] for i in range(len(axes))]
return GPUArray(shape=tuple(new_shape),
dtype=self.dtype,
allocator=self.allocator,
base=self.base or self,
gpudata=self.gpudata,
strides=tuple(new_strides))
@property
def T(self): # noqa
"""
.. versionadded:: 2015.2
"""
return self.transpose()
# {{{ slicing
def __getitem__(self, index):
"""
.. versionadded:: 2013.1
"""
if not isinstance(index, tuple):
index = (index,)
new_shape = []
new_offset = 0
new_strides = []
seen_ellipsis = False
index_axis = 0
array_axis = 0
while index_axis < len(index):
index_entry = index[index_axis]
if array_axis > len(self.shape):
raise IndexError("too many axes in index")
if isinstance(index_entry, slice):
start, stop, idx_stride = index_entry.indices(
self.shape[array_axis])
array_stride = self.strides[array_axis]
new_shape.append((abs(stop-start)-1)//abs(idx_stride)+1)
new_strides.append(idx_stride*array_stride)
new_offset += array_stride*start
index_axis += 1
array_axis += 1
elif isinstance(index_entry, (int, np.integer)):
array_shape = self.shape[array_axis]
if index_entry < 0:
index_entry += array_shape
if not (0 <= index_entry < array_shape):
raise IndexError(
"subindex in axis %d | |
<filename>tf_agents/networks/network.py
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base extension to Keras network to simplify copy operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tensorflow.keras import layers # pylint: disable=unused-import
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step
from tf_agents.utils import common
from tf_agents.utils import object_identity
# pylint:disable=g-direct-tensorflow-import
from tensorflow.python.keras.utils import layer_utils # TF internal
from tensorflow.python.training.tracking import base # TF internal
from tensorflow.python.util import tf_decorator # TF internal
from tensorflow.python.util import tf_inspect # TF internal
# pylint:enable=g-direct-tensorflow-import
class _NetworkMeta(abc.ABCMeta):
"""Meta class for Network object.
We mainly use this class to capture all args to `__init__` of all `Network`
instances, and store them in `instance._saved_kwargs`. This in turn is
used by the `instance.copy` method.
"""
def __new__(mcs, classname, baseclasses, attrs):
"""Control the creation of subclasses of the Network class.
Args:
classname: The name of the subclass being created.
baseclasses: A tuple of parent classes.
attrs: A dict mapping new attributes to their values.
Returns:
The class object.
Raises:
RuntimeError: if the class __init__ has *args in its signature.
"""
if baseclasses[0] == tf.keras.layers.Layer:
# This is just Network below. Return early.
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
init = attrs.get("__init__", None)
if not init:
# This wrapper class does not define an __init__. When someone creates
# the object, the __init__ of its parent class will be called. We will
# call that __init__ instead separately since the parent class is also a
# subclass of Network. Here just create the class and return.
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
arg_spec = tf_inspect.getargspec(init)
if arg_spec.varargs is not None:
raise RuntimeError(
"%s.__init__ function accepts *args. This is not allowed." %
classname)
def _capture_init(self, *args, **kwargs):
"""Captures init args and kwargs and stores them into `_saved_kwargs`."""
if len(args) > len(arg_spec.args) + 1:
# Error case: more inputs than args. Call init so that the appropriate
# error can be raised to the user.
init(self, *args, **kwargs)
# Convert to a canonical kwarg format.
kwargs = tf_inspect.getcallargs(init, self, *args, **kwargs)
kwargs.pop("self")
init(self, **kwargs)
# Avoid auto tracking which prevents keras from tracking layers that are
# passed as kwargs to the Network.
with base.no_automatic_dependency_tracking_scope(self):
setattr(self, "_saved_kwargs", kwargs)
attrs["__init__"] = tf_decorator.make_decorator(init, _capture_init)
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
@six.add_metaclass(_NetworkMeta)
class Network(tf.keras.layers.Layer):
"""Base extension to Keras network to simplify copy operations."""
def __init__(self, input_tensor_spec, state_spec, name=None):
"""Creates an instance of `Network`.
Args:
input_tensor_spec: A nest of `tensor_spec.TensorSpec` representing the
input observations.
state_spec: A nest of `tensor_spec.TensorSpec` representing the state
needed by the network. Use () if none.
name: (Optional.) A string representing the name of the network.
"""
super(Network, self).__init__(name=name)
common.check_tf1_allowed()
# Required for summary() to work.
self._is_graph_network = False
self._input_tensor_spec = input_tensor_spec
self._state_spec = state_spec
@property
def state_spec(self):
return self._state_spec
@property
def input_tensor_spec(self):
"""Returns the spec of the input to the network of type InputSpec."""
return self._input_tensor_spec
def create_variables(self, **kwargs):
if not self.built:
random_input = tensor_spec.sample_spec_nest(
self.input_tensor_spec, outer_dims=(1,))
random_state = tensor_spec.sample_spec_nest(
self.state_spec, outer_dims=(1,))
step_type = tf.fill((1,), time_step.StepType.FIRST)
self.__call__(
random_input, step_type=step_type, network_state=random_state,
**kwargs)
@property
def variables(self):
if not self.built:
raise ValueError(
"Network has not been built, unable to access variables. "
"Please call `create_variables` or apply the network first.")
return super(Network, self).variables
@property
def trainable_variables(self):
if not self.built:
raise ValueError(
"Network has not been built, unable to access variables. "
"Please call `create_variables` or apply the network first.")
return super(Network, self).trainable_variables
@property
def layers(self):
"""Get the list of all (nested) sub-layers used in this Network."""
return list(_filter_empty_layer_containers(self._layers))
def get_layer(self, name=None, index=None):
"""Retrieves a layer based on either its name (unique) or index.
If `name` and `index` are both provided, `index` will take precedence.
Indices are based on order of horizontal graph traversal (bottom-up).
Arguments:
name: String, name of layer.
index: Integer, index of layer.
Returns:
A layer instance.
Raises:
ValueError: In case of invalid layer name or index.
"""
if index is not None and name is not None:
raise ValueError("Provide only a layer name or a layer index.")
if index is not None:
if len(self.layers) <= index:
raise ValueError("Was asked to retrieve layer at index " + str(index) +
" but model only has " + str(len(self.layers)) +
" layers.")
else:
return self.layers[index]
if name is not None:
for layer in self.layers:
if layer.name == name:
return layer
raise ValueError("No such layer: " + name + ".")
def summary(self, line_length=None, positions=None, print_fn=None):
"""Prints a string summary of the network.
Args:
line_length: Total length of printed lines
(e.g. set this to adapt the display to different
terminal window sizes).
positions: Relative or absolute positions of log elements
in each line. If not provided,
defaults to `[.33, .55, .67, 1.]`.
print_fn: Print function to use. Defaults to `print`.
It will be called on each line of the summary.
You can set it to a custom function
in order to capture the string summary.
Raises:
ValueError: if `summary()` is called before the model is built.
"""
if not self.built:
raise ValueError("This model has not yet been built. "
"Build the model first by calling `build()` or "
"`__call__()` with some data, or `create_variables()`.")
layer_utils.print_summary(self,
line_length=line_length,
positions=positions,
print_fn=print_fn)
def copy(self, **kwargs):
"""Create a shallow copy of this network.
**NOTE** Network layer weights are *never* copied. This method recreates
the `Network` instance with the same arguments it was initialized with
(excepting any new kwargs).
Args:
**kwargs: Args to override when recreating this network. Commonly
overridden args include 'name'.
Returns:
A shallow copy of this network.
"""
return type(self)(**dict(self._saved_kwargs, **kwargs))
def __call__(self, inputs, *args, **kwargs):
"""A wrapper around `Network.call`.
A typical `call` method in a class subclassing `Network` looks like this:
```python
def call(self,
observation,
step_type=None,
network_state=(),
training=False):
...
return outputs, new_network_state
```
In this case, we will validate the first argument (`observation`)
against `self.input_tensor_spec`.
If a `network_state` kwarg is given it is also validated against
`self.state_spec`. Similarly, the return value
of the `call` method is expected to be a tuple/list with 2 values:
`(output, new_state)`; we validate `new_state` against `self.state_spec`.
Args:
inputs: The inputs to `self.call`, matching `self.input_state_spec`.
*args: Additional arguments to `self.call`.
**kwargs: Additional keyword arguments to `self.call`.
Returns:
A tuple `(outputs, new_network_state)`.
"""
tf.nest.assert_same_structure(inputs, self.input_tensor_spec)
network_state = kwargs.get("network_state", None)
if network_state is not None:
tf.nest.assert_same_structure(network_state, self.state_spec)
outputs, new_state = super(Network, self).__call__(inputs, *args, **kwargs)
tf.nest.assert_same_structure(new_state, self.state_spec)
return outputs, new_state
def _check_trainable_weights_consistency(self):
"""Check trainable weights count consistency.
This method makes up for the missing method (b/143631010) of the same name
in `keras.Network`, which is needed when calling `Network.summary()`. This
method is a no op. If a Network wants to check the consistency of trainable
weights, see `keras.Model._check_trainable_weights_consistency` as a
reference.
"""
# TODO(b/143631010): If recognized and fixed, remove this entire method.
return
def get_initial_state(self, batch_size=None):
"""Returns an initial state usable by the network.
Args:
batch_size: Tensor or constant: size of the batch dimension. Can be None
in which case not dimensions gets added.
Returns:
A nested object of type `self.state_spec` containing properly
initialized Tensors.
"""
return self._get_initial_state(batch_size)
def _get_initial_state(self, batch_size):
"""Returns the initial state of the policy network.
Args:
batch_size: A constant or Tensor holding the batch size. Can be None, in
which case the state will not have a batch dimension added.
Returns:
A nest of zero tensors matching the spec of the policy network state.
"""
return tensor_spec.zero_spec_nest(
self._state_spec,
outer_dims=None if batch_size is None else [batch_size])
class DistributionNetwork(Network):
"""Base class for networks which generate Distributions as their output."""
def __init__(self, input_tensor_spec, state_spec, output_spec, name):
super(DistributionNetwork, self).__init__(
input_tensor_spec=input_tensor_spec, state_spec=state_spec, | |
'http://www.die-boersenformel.com/',
# Why: #2649 in Alexa global
'http://www.watchcartoononline.com/',
# Why: #2650 in Alexa global
'http://www.abclocal.go.com/',
# Why: #2651 in Alexa global
'http://www.techrepublic.com/',
# Why: #2652 in Alexa global
'http://www.just-fuck.com/',
# Why: #2653 in Alexa global
'http://www.camster.com/',
# Why: #2654 in Alexa global
'http://www.akairan.com/',
# Why: #2655 in Alexa global
'http://www.yeslibertin.com/',
# Why: #2656 in Alexa global
'http://www.abc.go.com/',
# Why: #2657 in Alexa global
'http://www.searchtherightwords.com/',
# Why: #2658 in Alexa global
'http://www.scotiabank.com/',
# Why: #2659 in Alexa global
'http://www.justclick.ru/',
# Why: #2660 in Alexa global
'http://www.douguo.com/',
# Why: #2661 in Alexa global
'http://www.discover.com/',
# Why: #2662 in Alexa global
'http://www.britishairways.com/',
# Why: #2663 in Alexa global
'http://www.mobafire.com/',
# Why: #2664 in Alexa global
'http://www.gi-akademie.ning.com/',
# Why: #2666 in Alexa global
'http://www.desirulez.net/',
# Why: #2667 in Alexa global
'http://www.qiushibaike.com/',
# Why: #2668 in Alexa global
'http://www.moonbasa.com/',
# Why: #2669 in Alexa global
'http://www.all.biz/',
# Why: #2670 in Alexa global
'http://www.tbs.co.jp/',
# Why: #2671 in Alexa global
'http://www.springer.com/',
# Why: #2672 in Alexa global
'http://www.emai.com/',
# Why: #2673 in Alexa global
'http://www.deadspin.com/',
# Why: #2674 in Alexa global
'http://www.hulkshare.com/',
# Why: #2675 in Alexa global
'http://www.fast-torrent.ru/',
# Why: #2676 in Alexa global
'http://www.oriflame.com/',
# Why: #2677 in Alexa global
'http://www.imgchili.net/',
# Why: #2678 in Alexa global
'http://www.mega-juegos.mx/',
# Why: #2679 in Alexa global
'http://www.gyazo.com/',
# Why: #2680 in Alexa global
'http://www.persianv.com/',
# Why: #2681 in Alexa global
'http://www.adk2.com/',
# Why: #2682 in Alexa global
'http://www.ingbank.pl/',
# Why: #2683 in Alexa global
'http://www.nationalconsumercenter.com/',
# Why: #2684 in Alexa global
'http://www.xxxkinky.com/',
# Why: #2685 in Alexa global
'http://www.mywot.com/',
# Why: #2686 in Alexa global
'http://www.gaymaletube.com/',
# Why: #2687 in Alexa global
'http://www.1tv.ru/',
# Why: #2688 in Alexa global
'http://www.manutd.com/',
# Why: #2689 in Alexa global
'http://www.merchantcircle.com/',
# Why: #2691 in Alexa global
'http://www.canalblog.com/',
# Why: #2692 in Alexa global
'http://www.capitalone360.com/',
# Why: #2693 in Alexa global
'http://www.tlbb8.com/',
# Why: #2694 in Alexa global
'http://www.softonic.fr/',
# Why: #2695 in Alexa global
'http://www.ccavenue.com/',
# Why: #2696 in Alexa global
'http://www.vector.co.jp/',
# Why: #2697 in Alexa global
'http://www.tyroodr.com/',
# Why: #2698 in Alexa global
'http://exam8.com/',
# Why: #2699 in Alexa global
'http://www.allmusic.com/',
# Why: #2700 in Alexa global
'http://www.stubhub.com/',
# Why: #2701 in Alexa global
'http://www.arcor.de/',
# Why: #2702 in Alexa global
'http://www.yolasite.com/',
# Why: #2703 in Alexa global
'http://www.haraj.com.sa/',
# Why: #2704 in Alexa global
'http://www.mypopup.ir/',
# Why: #2705 in Alexa global
'http://www.memurlar.net/',
# Why: #2706 in Alexa global
'http://www.smugmug.com/',
# Why: #2707 in Alexa global
'http://www.filefactory.com/',
# Why: #2708 in Alexa global
'http://www.fantasti.cc/',
# Why: #2709 in Alexa global
'http://www.bokra.net/',
# Why: #2710 in Alexa global
'http://www.goarticles.com/',
# Why: #2711 in Alexa global
'http://www.empowernetwork.com/2Se8w/',
# Why: #2712 in Alexa global
'http://www.moneysavingexpert.com/',
# Why: #2713 in Alexa global
'http://www.donga.com/',
# Why: #2714 in Alexa global
'http://www.lastminute.com/',
# Why: #2715 in Alexa global
'http://www.xkcd.com/',
# Why: #2716 in Alexa global
'http://www.sou300.com/',
# Why: #2717 in Alexa global
'http://www.magnovideo.com/',
# Why: #2718 in Alexa global
'http://www.inquirer.net/',
# Why: #2719 in Alexa global
'http://www.phoenix.edu/',
# Why: #2721 in Alexa global
'http://www.videogenesis.com/',
# Why: #2722 in Alexa global
'http://www.thestar.com/',
# Why: #2723 in Alexa global
'http://www.tripadvisor.es/',
# Why: #2724 in Alexa global
'http://www.blankrefer.com/',
# Why: #2725 in Alexa global
'http://www.yle.fi/',
# Why: #2726 in Alexa global
'http://www.beamtele.com/',
# Why: #2727 in Alexa global
'http://www.oanda.com/',
# Why: #2728 in Alexa global
'http://www.yaplog.jp/',
# Why: #2729 in Alexa global
'http://www.iheart.com/',
# Why: #2730 in Alexa global
'http://www.google.co.tz/',
# Why: #2731 in Alexa global
'http://www.stargazete.com/',
# Why: #2732 in Alexa global
'http://www.bossip.com/',
# Why: #2733 in Alexa global
'http://www.defaultsear.ch/',
# Why: #2734 in Alexa global
'http://www.thaiseoboard.com/',
# Why: #2735 in Alexa global
'http://www.qinbei.com/',
# Why: #2736 in Alexa global
'http://www.ninisite.com/',
# Why: #2737 in Alexa global
'http://www.j.gs/',
# Why: #2738 in Alexa global
'http://www.xinmin.cn/',
# Why: #2739 in Alexa global
'http://www.nos.nl/',
# Why: #2740 in Alexa global
'http://www.qualtrics.com/',
# Why: #2741 in Alexa global
'http://www.kommersant.ru/',
# Why: #2743 in Alexa global
'http://www.urban-rivals.com/',
# Why: #2744 in Alexa global
'http://www.computerbild.de/',
# Why: #2745 in Alexa global
'http://www.fararu.com/',
# Why: #2746 in Alexa global
'http://www.menshealth.com/',
# Why: #2747 in Alexa global
'http://www.jobstreet.com/',
# Why: #2749 in Alexa global
'http://www.rbcroyalbank.com/',
# Why: #2750 in Alexa global
'http://www.inmotionhosting.com/',
# Why: #2751 in Alexa global
'http://www.surveyrouter.com/',
# Why: #2752 in Alexa global
'http://www.kankanews.com/',
# Why: #2753 in Alexa global
'http://www.aol.de/',
# Why: #2754 in Alexa global
'http://www.bol.com/',
# Why: #2755 in Alexa global
'http://www.datpiff.com/',
# Why: #2757 in Alexa global
'http://mplife.com/',
# Why: #2758 in Alexa global
'http://www.sale-fire.com/',
# Why: #2759 in Alexa global
'http://www.inbox.lv/',
# Why: #2760 in Alexa global
'http://www.offeratum.com/',
# Why: #2761 in Alexa global
'http://www.pandora.tv/',
# Why: #2762 in Alexa global
'http://www.eltiempo.com/',
# Why: #2763 in Alexa global
'http://www.indiarailinfo.com/',
# Why: #2764 in Alexa global
'http://www.solidtrustpay.com/',
# Why: #2765 in Alexa global
'http://www.warthunder.ru/',
# Why: #2766 in Alexa global
'http://www.kuronekoyamato.co.jp/',
# Why: #2767 in Alexa global
'http://www.novamov.com/',
# Why: #2768 in Alexa global
'http://www.folkd.com/',
# Why: #2769 in Alexa global
'http://www.envato.com/',
# Why: #2770 in Alexa global
'http://www.wetpaint.com/',
# Why: #2771 in Alexa global
'http://www.tempo.co/',
# Why: #2772 in Alexa global
'http://www.howtogeek.com/',
# Why: #2773 in Alexa global
'http://www.foundationapi.com/',
# Why: #2774 in Alexa global
'http://www.zjol.com.cn/',
# Why: #2775 in Alexa global
'http://www.care2.com/',
# Why: #2776 in Alexa global
'http://www.bendibao.com/',
# Why: #2777 in Alexa global
'http://www.mazika2day.com/',
# Why: #2779 in Alexa global
'http://www.asda.com/',
# Why: #2780 in Alexa global
'http://www.nowvideo.ch/',
# Why: #2781 in Alexa global
'http://www.hiapk.com/',
# Why: #2782 in Alexa global
'http://17u.com/',
# Why: #2783 in Alexa global
'http://www.tutu.ru/',
# Why: #2784 in Alexa global
'http://www.ncdownloader.com/',
# Why: #2785 in Alexa global
'http://www.warez-bb.org/',
# Why: #2786 in Alexa global
'http://www.jsoftj.com/',
# Why: #2787 in Alexa global
'http://www.batepapo.uol.com.br/',
# Why: #2788 in Alexa global
'http://www.xmarks.com/',
# Why: #2789 in Alexa global
'http://www.36kr.com/',
# Why: #2790 in Alexa global
'http://www.runetki.com/',
# Why: #2791 in Alexa global
'http://www.quoka.de/',
# Why: #2792 in Alexa global
'http://www.heureka.cz/',
# Why: #2793 in Alexa global
'http://www.sbisec.co.jp/',
# Why: #2794 in Alexa global
'http://www.monografias.com/',
# Why: #2796 in Alexa global
'http://www.zhenai.com/',
# Why: #2797 in Alexa global
'http://www.4porn.com/',
# Why: #2798 in Alexa global
'http://www.antena3.com/',
# Why: #2799 in Alexa global
'http://lintas.me/',
# Why: #2800 in Alexa global
'http://www.seroundtable.com/',
# Why: #2802 in Alexa global
'http://www.e1.ru/',
# Why: #2803 in Alexa global
'http://www.berkeley.edu/',
# Why: #2804 in Alexa global
'http://www.officedepot.com/',
# Why: #2805 in Alexa global
'http://www.myflorida.com/',
# Why: #2806 in Alexa global
'http://www.parispornmovies.com/',
# Why: #2807 in Alexa global
'http://www.uniqlo.com/',
# Why: #2808 in Alexa global
'http://www.topky.sk/',
# Why: #2809 in Alexa global
'http://www.lumovies.com/',
# Why: #2810 in Alexa global
'http://www.buysellads.com/',
# Why: #2811 in Alexa global
'http://www.stirileprotv.ro/',
# Why: #2812 in Alexa global
'http://www.scottrade.com/',
# Why: #2813 in Alexa global
'http://www.tiboo.cn/',
# Why: #2814 in Alexa global
'http://www.mmtrends.net/',
# Why: #2815 in Alexa global
'http://www.wholesale-dress.net/',
# Why: #2816 in Alexa global
'http://www.metacritic.com/',
# Why: #2817 in Alexa global
'http://www.pichunter.com/',
# Why: #2818 in Alexa global
'http://www.moneybookers.com/',
# Why: #2819 in Alexa global
'http://www.idealista.com/',
# Why: #2820 in Alexa global
'http://www.buzzle.com/',
# Why: #2821 in Alexa global
'http://www.rcom.co.in/',
# Why: #2822 in Alexa global
'http://www.weightwatchers.com/',
# Why: #2823 in Alexa global
'http://www.itv.com/',
# Why: #2824 in Alexa global
'http://www.inilah.com/',
# Why: #2825 in Alexa global
'http://www.vic.gov.au/',
# Why: #2826 in Alexa global
'http://www.prom.ua/',
# Why: #2827 in Alexa global
'http://www.with2.net/',
# Why: #2828 in Alexa global
'http://www.suumo.jp/',
# Why: #2830 in Alexa global
'http://www.doodle.com/',
# Why: #2831 in Alexa global
'http://www.trafficbroker.com/',
# Why: #2832 in Alexa global
'http://www.h33t.com/',
# Why: #2833 in Alexa global
'http://www.avaaz.org/',
# Why: #2834 in Alexa global
'http://www.maultalk.com/',
# Why: #2835 in Alexa global
'http://www.bmo.com/',
# Why: #2836 in Alexa global
'http://www.nerdbux.com/',
# Why: #2837 in Alexa global
'http://www.abnamro.nl/',
# Why: #2838 in Alexa global
'http://www.didigames.com/',
# Why: #2839 in Alexa global
'http://www.pornorama.com/',
# Why: #2840 in Alexa global
'http://www.forumotion.com/',
# Why: #2841 in Alexa | |
<gh_stars>0
#! /usr/bin/python3
# -*- coding: utf-8 -*-
#--------------------------------------------------------------------------------------------------
# Python client library of Tkrzw-RPC
#
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#--------------------------------------------------------------------------------------------------
import grpc
import pathlib
import sys
import threading
import time
sys.path.append(str(pathlib.Path(__file__).parent))
from . import tkrzw_rpc_pb2
from . import tkrzw_rpc_pb2_grpc
def _StrGRPCError(error):
code_name = str(error.code())
delim_pos = code_name.find(".")
if delim_pos >= 0:
code_name = code_name[delim_pos + 1:]
details = error.details()
if details:
return code_name + ": " + details
return code_name
def _MakeStatusFromProto(proto_status):
return Status(proto_status.code, proto_status.message)
def _SetStatusFromProto(status, proto_status):
status.Set(proto_status.code, proto_status.message)
def _MakeBytes(obj):
if isinstance(obj, bytes):
return obj
if not isinstance(obj, str):
obj = str(obj)
return obj.encode('utf-8')
class Status:
"""
Status of operations.
"""
SUCCESS = 0
"""Success."""
UNKNOWN_ERROR = 1
"""Generic error whose cause is unknown."""
SYSTEM_ERROR = 2
"""Generic error from underlying systems."""
NOT_IMPLEMENTED_ERROR = 3
"""Error that the feature is not implemented."""
PRECONDITION_ERROR = 4
"""Error that a precondition is not met."""
INVALID_ARGUMENT_ERROR = 5
"""Error that a given argument is invalid."""
CANCELED_ERROR = 6
"""Error that the operation is canceled."""
NOT_FOUND_ERROR = 7
"""Error that a specific resource is not found."""
PERMISSION_ERROR = 8
"""Error that the operation is not permitted."""
INFEASIBLE_ERROR = 9
"""Error that the operation is infeasible."""
DUPLICATION_ERROR = 10
"""Error that a specific resource is duplicated."""
BROKEN_DATA_ERROR = 11
"""Error that internal data are broken."""
NETWORK_ERROR = 12
"""Error caused by networking failure."""
APPLICATION_ERROR = 13
"""Generic error caused by the application logic."""
def __init__(self, code=SUCCESS, message=""):
"""
Sets the code and the message.
:param code: The status code. This can be omitted and then SUCCESS is set.
:param message: An arbitrary status message. This can be omitted and the an empty string is set.
"""
self.code = code
self.message = message
def __repr__(self):
"""
Returns a string representation of the object.
:return: The string representation of the object.
"""
if self.message:
return "<tkrzw_rpc.Status: " + self.CodeName(self.code) + ": " + self.message + ">"
return "<tkrzw_rpc.Status: " + self.CodeName(self.code) + ">"
def __str__(self):
"""
Returns a string representation of the content.
:return: The string representation of the content.
"""
if self.message:
return self.CodeName(self.code) + ": " + self.message
return self.CodeName(self.code)
def __eq__(self, rhs):
"""
Returns true if the given object is equivalent to this object.
:return: True if the given object is equivalent to this object.
This supports comparison between a status object and a status code number.
"""
if isinstance(rhs, type(self)):
return self.code == rhs.code
if isinstance(rhs, int):
return self.code == rhs
return False
def Set(self, code=SUCCESS, message=""):
"""
Sets the code and the message.
:param code: The status code. This can be omitted and then SUCCESS is set.
:param message: An arbitrary status message. This can be omitted and the an empty string is set.
"""
self.code = code
self.message = message
def Join(self, rht):
"""
Assigns the internal state from another status object only if the current state is success.
:param rhs: The status object.
"""
if self.code == self.SUCCESS:
self.code = rht.code
self.message = rht.message
def GetCode(self):
"""
Gets the status code.
:return: The status code.
"""
return self.code
def GetMessage(self):
"""
Gets the status message.
:return: The status message.
"""
return self.message
def IsOK(self):
"""
Returns true if the status is success.
:return: True if the status is success, or False on failure.
"""
return self.code == self.SUCCESS
def OrDie(self):
"""
Raises an exception if the status is not success.
:raise StatusException: An exception containing the status object.
"""
if self.code != self.SUCCESS:
raise StatusException(self)
@classmethod
def CodeName(cls, code):
"""
Gets the string name of a status code.
:param: code The status code.
:return: The name of the status code.
"""
if code == cls.SUCCESS: return "SUCCESS"
if code == cls.UNKNOWN_ERROR: return "UNKNOWN_ERROR"
if code == cls.SYSTEM_ERROR: return "SYSTEM_ERROR"
if code == cls.NOT_IMPLEMENTED_ERROR: return "NOT_IMPLEMENTED_ERROR"
if code == cls.PRECONDITION_ERROR: return "PRECONDITION_ERROR"
if code == cls.INVALID_ARGUMENT_ERROR: return "INVALID_ARGUMENT_ERROR"
if code == cls.CANCELED_ERROR: return "CANCELED_ERROR"
if code == cls.NOT_FOUND_ERROR: return "NOT_FOUND_ERROR"
if code == cls.PERMISSION_ERROR: return "PERMISSION_ERROR"
if code == cls.INFEASIBLE_ERROR: return "INFEASIBLE_ERROR"
if code == cls.DUPLICATION_ERROR: return "DUPLICATION_ERROR"
if code == cls.BROKEN_DATA_ERROR: return "BROKEN_DATA_ERROR"
if code == cls.NETWORK_ERROR: return "NETWORK_ERROR"
if code == cls.APPLICATION_ERROR: return "APPLICATION_ERROR"
return "unknown"
class StatusException(RuntimeError):
"""
Exception to convey the status of operations.
"""
def __init__(self, status):
"""
Sets the status.
:param status: The status object.
"""
self.status = status
def __repr__(self):
"""
Returns A string representation of the object.
:return: The string representation of the object.
"""
return "<tkrzw_rpc.StatusException: " + str(self.status) + ">"
def __str__(self):
"""
Returns A string representation of the content.
:return: The string representation of the content.
"""
return str(self.status)
def GetStatus(self):
"""
Gets the status object
:return: The status object.
"""
return self.status
class RemoteDBM:
"""
Remote database manager.
All operations are thread-safe; Multiple threads can access the same database concurrently. The SetDBMIndex affects all threads so it should be called before the object is shared. This class implements the iterable protocol so an instance is usable with "for-in" loop.
"""
ANY_DATA = b"\x00[ANY]\x00"
"""The special bytes value for no-operation or any data."""
def __init__(self):
"""
Does nothing especially.
"""
self.channel = None
self.stub = None
self.timeout = None
self.dbm_index = 0
def __repr__(self):
"""
Returns A string representation of the object.
:return: The string representation of the object.
"""
expr = "connected" if self.channel else "not connected"
return "<tkrzw_rpc.RemoteDBM: " + hex(id(self)) + ": " + expr + ">"
def __str__(self):
"""
Returns A string representation of the content.
:return: The string representation of the content.
"""
expr = "connected" if self.channel else "not connected"
return "RemoteDBM: " + hex(id(self)) + ": " + expr
def __len__(self):
"""
Gets the number of records, to enable the len operator.
:return: The number of records on success, or 0 on failure.
"""
if not self.channel:
return 0
request = tkrzw_rpc_pb2.CountRequest()
request.dbm_index = self.dbm_index
try:
response = self.stub.Count(request, timeout=self.timeout)
except grpc.RpcError as error:
return 0
return response.count
def __contains__(self, key):
"""
Checks if a record exists or not, to enable the in operator.
:param key: The key of the record.
:return: True if the record exists, or False if not. No exception is raised for missing records.
"""
if not self.channel:
raise StatusException(Status(Status.PRECONDITION_ERROR, "not opened connection"))
request = tkrzw_rpc_pb2.GetRequest()
request.dbm_index = self.dbm_index
request.key = _MakeBytes(key)
request.omit_value = True
try:
response = self.stub.Get(request, timeout=self.timeout)
except grpc.RpcError as error:
raise StatusException(Status(Status.NETWORK_ERROR, _StrGRPCError(error)))
return response.status.code == Status.SUCCESS
def __getitem__(self, key):
"""
Gets the value of a record, to enable the [] operator.
:param key: The key of the record.
:return: The value of the matching record. An exception is raised for missing records. If the given key is a string, the returned value is also a string. Otherwise, the return value is bytes.
:raise StatusException: An exception containing the status object.
"""
if not self.channel:
raise StatusException(Status(Status.PRECONDITION_ERROR, "not opened connection"))
request = tkrzw_rpc_pb2.GetRequest()
request.dbm_index = self.dbm_index
request.key = _MakeBytes(key)
try:
response = self.stub.Get(request, timeout=self.timeout)
except grpc.RpcError as error:
raise StatusException(Status(Status.NETWORK_ERROR, _StrGRPCError(error)))
if response.status.code != Status.SUCCESS:
raise StatusException(_MakeStatusFromProto(response.status))
if isinstance(key, str):
return response.value.decode("utf-8", "replace")
return response.value
def __setitem__(self, key, value):
"""
Sets a record of a key and a value, to enable the []= operator.
:param key: The key of the record.
:param value: The value of the record.
:raise StatusException: An exception containing the status object.
"""
if not self.channel:
raise StatusException(Status(Status.PRECONDITION_ERROR, "not opened connection"))
request = tkrzw_rpc_pb2.SetRequest()
request.dbm_index = self.dbm_index
request.key = _MakeBytes(key)
request.value = _MakeBytes(value)
request.overwrite = True
try:
response = self.stub.Set(request, | |
coefficient (visible)
expfac = F.expression(
'sqrt(ameanv) * akb * F',
{'ameanv': ameanv, 'akb': akb, 'F': F})
xnum = F.expression(
'(rbcpyv * rbcpyv - 1.0) * exp(-expfac)',
{'rbcpyv': rbcpyv, 'expfac': expfac})
xden = F.expression(
'(rbcpyv * rsoilv - 1.0) + '
'rbcpyv * (rbcpyv - rsoilv) * exp(-2.0 * expfac)',
{'rbcpyv': rbcpyv, 'rsoilv': rsoilv, 'expfac': expfac})
# Eq 15.11
taubtv = F.expression('xnum / xden', {'xnum': xnum, 'xden': xden})
# print('\nexpfac: {:>20.14f}'.format(utils.image_value(expfac).values()[0]))
# print('rbcpyv: {:>20.14f}'.format(utils.image_value(rbcpyv).values()[0]))
# print('rsoilv: {:>20.14f}'.format(utils.image_value(rsoilv).values()[0]))
# print('xnum: {:>20.14f}'.format(utils.image_value(xnum).values()[0]))
# print('xden: {:>20.14f}'.format(utils.image_value(xden).values()[0]))
# print('taubtv: {:>20.14f}'.format(utils.image_value(taubtv).values()[0]))
# Direct beam+scattered canopy transmission coefficient (NIR)
expfac = F.expression(
'sqrt(ameann) * akb * F',
{'ameann': ameann, 'akb': akb, 'F': F})
xnum = F.expression(
'(rbcpyn * rbcpyn - 1.0) * exp(-expfac)',
{'rbcpyn': rbcpyn, 'expfac': expfac})
xden = F.expression(
'(rbcpyn * rsoiln - 1.0) + '
'rbcpyn * (rbcpyn - rsoiln) * exp(-2.0 * expfac)',
{'rbcpyn': rbcpyn, 'rsoiln': rsoiln, 'expfac': expfac})
# Eq 15.11
taubtn = F.expression('xnum / xden', {'xnum': xnum, 'xden': xden})
# print('\nexpfac: {:>20.14f}'.format(utils.image_value(expfac).values()[0]))
# print('rbcpyn: {:>20.14f}'.format(utils.image_value(rbcpyn).values()[0]))
# print('rsoiln: {:>20.14f}'.format(utils.image_value(rsoiln).values()[0]))
# print('xnum: {:>20.14f}'.format(utils.image_value(xnum).values()[0]))
# print('xden: {:>20.14f}'.format(utils.image_value(xden).values()[0]))
# print('taubtn: {:>20.14f}'.format(utils.image_value(taubtn).values()[0]))
# Shortwave radiation components
tausolar = F.expression(
'fvis * (difvis * taudv + dirvis * taubtv) + '
'fnir * (difnir * taudn + dirnir * taubtn)',
{'difnir': difnir, 'difvis': difvis,
'dirnir': dirnir, 'dirvis': dirvis,
'fnir': fnir, 'fvis': fvis,
'taubtn': taubtn, 'taubtv': taubtv,
'taudn': taudn, 'taudv': taudv})
# print('tausolar: {}'.format(utils.image_value(tausolar).values()[0]))
# print('Rs_1: {}'.format(utils.image_value(Rs_1).values()[0]))
Rs_c = Rs_1.expression(
'Rs_1 * (1.0 - tausolar)', {'Rs_1': Rs_1, 'tausolar': tausolar})
Rs_s = Rs_1.expression(
'Rs_1 * tausolar', {'Rs_1': Rs_1, 'tausolar': tausolar})
# print('\nRs_c: {:>20.14f}'.format(utils.image_value(Rs_c).values()[0]))
# print('Rs_s: {:>20.14f}'.format(utils.image_value(Rs_s).values()[0]))
# print('albedo_c: {:>20.14f}'.format(utils.image_value(albedo_c).values()[0]))
# print('albedo_s: {:>20.14f}'.format(utils.image_value(albedo_s).values()[0]))
return Rs_c, Rs_s, albedo_c, albedo_s
def compute_G0(Rn, Rn_s, albedo, ndvi, t_rise, t_end, time, EF_s):
"""
Parameters
----------
Rn : ee.Image
Rn_s : ee.Image
albedo : ee.Image
ndvi : ee.Image
t_rise : ee.Image
t_end : ee.Image
time :
EF_s :
Returns
-------
G0 : ee.Image
"""
w = EF_s.expression('1 / (1 + (EF_s / 0.5) ** 8.0)', {'EF_s': EF_s})
# Maximum fraction of Rn,s that become G0
# (0.35 for dry soil and 0.31 for wet soil)
c_g = w.expression('(w * 0.35) + ((1 - w) * 0.31)', {'w': w})
t_g = w.expression('(w * 100000.0) + ((1 - w) * 74000.0)', {'w': w})
t_noon = t_rise.expression(
'0.5 * (t_rise + t_end)', {'t_rise': t_rise, 't_end': t_end})
t_g0 = t_noon.expression(
'(time - t_noon) * 3600.0', {'time': time, 't_noon': t_noon})
G0 = Rn_s.expression(
'c_g * cos(2 * pi * (t_g0 + 10800.0) / t_g) * Rn_s',
{'c_g': c_g, 'pi': math.pi, 'Rn_s': Rn_s, 't_g': t_g, 't_g0': t_g0})
water_mask = ndvi.lte(0).And(albedo.lte(0.05))
G0 = G0.where(water_mask, Rn.multiply(0.5))
return G0
def compute_resistance(u, T_s, T_c, hc, F, d0, z0m, z0h, z_u, z_t, xl,
leaf, leaf_s, leaf_c, fm, fh, fm_h):
"""
Parameters
----------
u : ee.Image
T_s : ee.Image
T_c : ee.Image
hc : ee.Image
F : ee.Image
Input is LAI?
d0
z0m
z0h
z_u
z_t
xl
leaf
leaf_s
leaf_c
fm
fh
fm_h
Returns
-------
r_ah
r_s
r_x
u_attr
"""
# Free convective velocity constant for r_s modelling
c_a = 0.004
# Empirical constant for r_s modelling
c_b = 0.012
# Empirical constant for r_s modelling
# (new formulation Kustas and Norman, 1999)
c_c = 0.0025
# Parameter for canopy boundary-layer resistance
# (C=90 Grace '81, C=175 Cheubouni 2001, 144 Li '98)
C = 175.
# Computation of friction velocity and aerodynamic resistance
u_attr = u \
.expression(
'0.41 * u / ((log((z_u - d0) / z0m)) - fm)',
{'d0': d0, 'fm': fm, 'u': u, 'z0m': z0m, 'z_u': z_u})
u_attr = u_attr.where(u_attr.eq(0), 10)
u_attr = u_attr.where(u_attr.lte(0), 0.01)
r_ah = u.expression(
'((log((z_t - d0) / z0h)) - fh) / u_attr / 0.41',
{'d0': d0, 'fh': fh, 'u_attr': u_attr, 'z0h': z0h, 'z_t': z_t})
# CGM - The second conditional will overwrite the first one?
r_ah = r_ah.where(r_ah.eq(0), 500)
r_ah = r_ah.where(r_ah.lte(1.0), 1.0)
# DEADBEEF
# r_ah[r_ah == 0] = 500.
# r_ah[r_ah <= 1.] = 1.
# Computation of the resistance of the air between soil and canopy space
u_c = u.expression(
'u_attr / 0.41 * ((log((hc - d0) / z0m)) - fm_h)',
{'d0': d0, 'fm_h': fm_h, 'hc': hc, 'u_attr': u_attr, 'z0m': z0m})
u_c = u_c.where(u_c.lte(0), 0.1)
u_s = u.expression(
'u_c * exp(-leaf * (1 - (0.05 / hc)))',
{'hc': hc, 'leaf': leaf, 'u_c': u_c})
r_ss = u.expression(
'1.0 / (c_a + (c_b * (u_c * exp(-leaf_s * (1.0 - (0.05 / hc))))))',
{'c_a': c_a, 'c_b': c_b, 'hc': hc, 'leaf_s': leaf_s, 'u_c': u_c})
r_s1 = T_s.expression(
'1.0 / ((((abs(T_s - T_c)) ** (1.0 / 3.0)) * c_c) + (c_b * u_s))',
{'c_b': c_b, 'c_c': c_c, 'T_c': T_c, 'T_s': T_s, 'u_s': u_s})
r_s2 = u.expression(
'1.0 / (c_a + (c_b * u_s))', {'c_a': c_a, 'c_b': c_b, 'u_s': u_s})
r_s = u.expression(
'(((r_ss - 1.0) / 0.09 * (F - 0.01)) + 1.0)', {'F': F, 'r_ss': r_ss})
# Linear function between 0 (bare soil) and the value at F=0.1
r_s = r_s.where(F.gt(0.1), r_s1)
r_s = r_s.where(T_s.subtract(T_c).abs().lt(1), r_s2)
# Use "new" formula only for high DT values
# Use "new" formula only for partial coverage (lai<3)
r_s = r_s.where(F.gt(3), r_s2)
# Computation of the canopy boundary layer resistance
u_d = u.expression(
'u_c * exp(-leaf_c * (1 - ((d0 + z0m) / hc)))',
{'d0': d0, 'hc': hc, 'leaf_c': leaf_c, 'u_c': u_c, 'z0m': z0m})
u_d = u_d.where(u_d.lte(0), 100)
r_x = u.expression(
'C / F * ((xl / u_d) ** 0.5)', {'C': C, 'F': F, 'u_d': u_d, 'xl': xl})
r_x = r_x.where(u_d.eq(100), 0.1)
return r_ah, r_s, r_x, u_attr
def compute_u_attr(u, d0, z0m, z_u, fm):
"""Friction Velocity
Parameters
----------
u : ee.Image
d0
z0m
z_u
fm
Returns
-------
u_attr
"""
u_attr = u.expression(
'0.41 * u / ((log((z_u - d0) / z0m)) - fm)',
{'d0': d0, 'fm': fm, 'u': u, 'z0m': z0m, 'z_u': z_u})
u_attr = u_attr.where(u_attr.eq(0), 10)
u_attr = u_attr.where(u_attr.lte(0), 0.01)
return u_attr
def compute_r_ah(u_attr, d0, z0h, z_t, fh):
"""
Parameters
----------
u_attr : ee.Image
d0
z0h
z_t
fh
Returns
-------
r_ah
"""
r_ah = u_attr.expression(
'((log((z_t - d0) / z0h)) - fh) / u_attr / 0.41',
{'d0': d0, 'fh': fh, 'u_attr': u_attr, 'z0h': z0h, 'z_t': z_t})
# CGM - The second conditional will overwrite the first one?
r_ah = r_ah.where(r_ah.eq(0), 500)
r_ah = r_ah.where(r_ah.lte(1.0), 1.0)
return r_ah
def compute_r_s(u_attr, T_s, T_c, hc, F, d0, z0m, leaf, leaf_s, fm_h):
"""
Parameters
----------
u_attr : ee.Image
T_s : ee.Image
Soil temperature (Kelvin).
T_c : ee.Image
Canopy temperature (Kelvin).
hc : ee.Image
F : ee.Image
Input is LAI?
d0
z0m
leaf
leaf_s
fm_h
Returns
-------
r_s
"""
# Free convective velocity constant for r_s modelling
c_a = 0.004
# Empirical constant for r_s modelling
c_b = 0.012
# Empirical constant for r_s modelling
# (new formulation Kustas and Norman, 1999)
c_c = 0.0025
# Computation of the resistance of the air between soil and canopy space
u_c = u_attr.expression(
'u_attr / 0.41 * ((log((hc - d0) / z0m)) - fm_h)',
{'d0': d0, 'fm_h': fm_h, 'hc': hc, 'u_attr': u_attr, 'z0m': z0m})
u_c = u_c.where(u_c.lte(0), 0.1)
u_s = u_attr.expression(
'u_c * exp(-leaf * (1 - (0.05 / hc)))',
{'hc': hc, 'leaf': leaf, 'u_c': u_c})
r_ss = u_attr.expression(
'1.0 / (c_a + (c_b * (u_c * exp(-leaf_s * (1.0 - (0.05 / hc))))))',
{'c_a': c_a, 'c_b': c_b, 'hc': hc, 'leaf_s': leaf_s, 'u_c': u_c})
r_s1 = T_s.expression(
'1.0 / ((((abs(T_s - T_c)) ** (1.0 / 3.0)) * c_c) + (c_b * Us))',
{'c_b': c_b, 'c_c': c_c, 'T_c': T_c, 'T_s': T_s, 'Us': u_s})
r_s2 = u_attr.expression(
'1.0 / (c_a + (c_b * Us))', {'c_a': c_a, 'c_b': c_b, 'Us': u_s})
r_s = u_attr.expression(
'(((r_ss - 1.0) / 0.09 * (F - 0.01)) + 1.0)', {'F': F, 'r_ss': r_ss})
# Linear function between 0 (bare soil) and the value at F=0.1
r_s = r_s.where(F.gt(0.1), r_s1)
r_s = r_s.where(T_s.subtract(T_c).abs().lt(1), r_s2)
# Use "new" formula only for high DT values
# Use "new" formula only for partial coverage (lai<3)
| |
<reponame>delfick/delfick_project
# coding: spec
from delfick_project.app import CliParser, Ignore, BadOption
from delfick_project.errors_pytest import assertRaises
from contextlib import contextmanager
from itertools import combinations
from unittest import mock
import sys
import re
import os
describe "CliParser":
@contextmanager
def swapped_env(self, **swapped):
originals = {}
try:
for name, val in swapped.items():
originals[name] = os.environ.get(name, Ignore)
os.environ[name] = val
yield
finally:
for name in swapped:
original = originals[name]
if original is Ignore and name in os.environ:
del os.environ[name]
elif original is not Ignore:
os.environ[name] = original
it "takes in description, positional_replacements and environment_defaults":
description = mock.Mock(name="description")
environment_defaults = mock.Mock(name="environment_defaults")
positional_replacements = mock.Mock(name="positional_replacements")
parser = CliParser(
description,
environment_defaults=environment_defaults,
positional_replacements=positional_replacements,
)
assert parser.description is description
assert parser.environment_defaults is environment_defaults
assert parser.positional_replacements is positional_replacements
it "defaults positional_replacements to an empty array":
assert CliParser(None).positional_replacements == []
it "defaults environment_defaults to an empty dictionary":
assert CliParser(None).environment_defaults == {}
describe "parse_args":
it "splits, makes, parses and checks the args":
argv = mock.Mock(name="argv")
args_obj = mock.Mock(name="args_obj")
other_args = mock.Mock(name="other_args")
defaults = mock.Mock(name="defaults")
positional_replacements = mock.Mock(name="positional_replacements")
parser = mock.Mock(name="parser")
parsed = mock.Mock(name="parsed")
parser.parse_args.return_value = parsed
split_args = mock.Mock(name="split_args", return_value=(args_obj, other_args, defaults))
make_parser = mock.Mock(name="make_parser", return_value=parser)
check_args = mock.Mock(name="check_args")
cli_parser = CliParser("", positional_replacements)
with mock.patch.multiple(
cli_parser, split_args=split_args, make_parser=make_parser, check_args=check_args
):
assert cli_parser.parse_args(argv) == (parsed, other_args)
make_parser.assert_called_once_with(defaults)
parser.parse_args.assert_called_once_with(args_obj)
check_args.assert_called_once_with(argv, defaults, positional_replacements)
it "works":
class Parser(CliParser):
def specify_other_args(slf, parser, defaults):
parser.add_argument("--task", help="specify the task", **defaults["--task"])
parser.add_argument("--blah", help="I don't know", **defaults["--blah"])
parser.add_argument("--meh", help="I don't know")
parser = Parser("", [("--task", "list_tasks"), "--blah"], {})
parsed, other_args = parser.parse_args(
["whatever", "tree", "--meh", "bus", "--", "--blah", "fire"]
)
assert other_args == "--blah fire"
assert parsed.task == "whatever"
assert parsed.blah == "tree"
assert parsed.meh == "bus"
it "works in the error case":
class Parser(CliParser):
def specify_other_args(slf, parser, defaults):
parser.add_argument("--task", help="specify the task", **defaults["--task"])
parser = Parser("", ["--task"], {})
with assertRaises(
BadOption,
"Please don't specify an option as a positional argument and as a --flag",
argument="--task",
position=1,
):
parsed, other_args = parser.parse_args(
["whatever", "--task", "whatever", "--", "--task", "fire"]
)
describe "check_args":
it "complains if it finds something both has a default and is in args and positional_replacements":
positional_replacements = ["--task", "--env"]
defaults = {"--task": {}, "--env": {"default": "prod"}}
parser = CliParser("")
parser.check_args([], defaults, positional_replacements)
assert True, "That definitely should not have failed"
with assertRaises(
BadOption,
"Please don't specify an option as a positional argument and as a --flag",
argument="--env",
position=2,
):
parser.check_args(
["list_tasks", "dev", "--env", "staging"], defaults, positional_replacements
)
describe "interpret_args":
it "can categorize based on categories and names of args":
class Parser(CliParser):
def specify_other_args(slf, parser, defaults):
parser.add_argument("--one", dest="my_app_one")
parser.add_argument("--two", dest="my_app_two")
parser.add_argument("--other")
parser = Parser("")
args_obj, args_dict, extra = parser.interpret_args(
[
"--one",
"1",
"--two",
"2",
"--other",
"3",
"--logging-program",
"my-app",
"--syslog-address",
"/dev/log",
],
["my_app"],
)
assert extra == ""
assert args_obj.my_app_one == "1"
assert args_obj.my_app_two == "2"
assert args_obj.other == "3"
assert args_obj.logging_program == "my-app"
assert args_dict == {
"my_app": {"one": "1", "two": "2"},
"other": "3",
"silent": False,
"debug": False,
"verbose": False,
"version": False,
"logging_program": "my-app",
"syslog_address": "/dev/log",
"json_console_logs": False,
"tcp_logging_address": "",
"udp_logging_address": "",
"logging_handler_file": None,
}
it "Doesn't complain about flagged values in positional placement":
class Parser(CliParser):
def specify_other_args(slf, parser, defaults):
parser.add_argument("--one", **defaults["--one"])
parser.add_argument("--two", **defaults["--two"])
parser.add_argument("--three", **defaults["--three"])
parser = Parser("", ["--one", "--two", ("--three", "dflt")], {})
parsed, args_dict, extra = parser.interpret_args(
["whatever", "--three", "whatever2", "--two", "stuff"]
)
assert parsed.one == "whatever"
assert parsed.two == "stuff"
assert parsed.three == "whatever2"
it "does complain about flagged values combined with positional placement":
class Parser(CliParser):
def specify_other_args(slf, parser, defaults):
parser.add_argument("--one", **defaults["--one"])
parser.add_argument("--two", **defaults["--two"])
parser.add_argument("--three", **defaults["--three"])
parser = Parser("", ["--one", "--two", ("--three", "dflt")], {})
with assertRaises(
BadOption,
"Please don't specify an option as a positional argument and as a --flag",
argument="--two",
position=2,
):
parser.interpret_args(["whatever", "trees", "whatever2", "--two", "stuff"])
describe "make_defaults":
it "has no defaults if there are no positional_replacements or environment_defaults":
parser = CliParser("")
defaults = parser.make_defaults([], [], {})
assert defaults == {}
argv = ["one", "two", "--three"]
defaults = parser.make_defaults(argv, [], {})
assert defaults == {}
assert argv == ["one", "two", "--three"]
it "maps argv positionals to positional_replacements and takes those from argv":
argv = ["one", "two", "three"]
positional_replacements = ["--task", "--env", "--stack"]
parser = CliParser("")
defaults = parser.make_defaults(argv, positional_replacements, {})
assert argv == []
assert defaults == {
"--task": {"default": "one"},
"--env": {"default": "two"},
"--stack": {"default": "three"},
}
it "ignores positional_replacements after a -flag":
argv = ["one", "two", "-three"]
positional_replacements = ["--task", "--env", "--stack"]
parser = CliParser("")
defaults = parser.make_defaults(argv, positional_replacements, {})
assert argv == ["-three"]
assert defaults == {
"--task": {"default": "one"},
"--env": {"default": "two"},
"--stack": {},
}
it "finds environment variables from environment_defaults as defaults":
argv = []
environment_defaults = {"CONFIG_LOCATION": "--config"}
parser = CliParser("")
somewhere = "/some/nice/config.yml"
with self.swapped_env(CONFIG_LOCATION=somewhere):
defaults = parser.make_defaults(argv, [], environment_defaults)
assert argv == []
assert defaults == {"--config": {"default": somewhere}}
it "uses default from environment if flag in positional_replacements":
argv = []
environment_defaults = {"CONFIG_LOCATION": "--config"}
positional_replacements = ["--config"]
parser = CliParser("")
somewhere = "/some/nice/config.yml"
with self.swapped_env(CONFIG_LOCATION=somewhere):
defaults = parser.make_defaults(argv, positional_replacements, environment_defaults)
assert argv == []
assert defaults == {"--config": {"default": somewhere}}
it "overrides default from environment_defaults with value from argv if in positional_replacements":
argv = ["a/better/place.yml"]
environment_defaults = {"CONFIG_LOCATION": "--config"}
positional_replacements = ["--config"]
parser = CliParser("")
somewhere = "/some/nice/config.yml"
with self.swapped_env(CONFIG_LOCATION=somewhere):
defaults = parser.make_defaults(argv, positional_replacements, environment_defaults)
assert argv == []
assert defaults == {"--config": {"default": "a/better/place.yml"}}
it "environment_defaults overrides positional_replacements default":
argv = []
environment_defaults = {"CONFIG_LOCATION": "--config"}
positional_replacements = [("--config", "a/nicer/place.yml")]
parser = CliParser("")
somewhere = "/some/nice/config.yml"
with self.swapped_env(CONFIG_LOCATION=somewhere):
defaults = parser.make_defaults(argv, positional_replacements, environment_defaults)
assert argv == []
assert defaults == {"--config": {"default": somewhere}}
it "environment_defaults default value overrides positional_replacements default":
argv = []
environment_defaults = {"CONFIG_LOCATION": ("--config", "the/best/place.yml")}
positional_replacements = [("--config", "a/nicer/place.yml")]
parser = CliParser("")
defaults = parser.make_defaults(argv, positional_replacements, environment_defaults)
assert argv == []
assert defaults == {"--config": {"default": "the/best/place.yml"}}
it "can have defaults for positional_replacements":
argv = []
positional_replacements = [("--task", "list_tasks")]
parser = CliParser("")
defaults = parser.make_defaults(argv, positional_replacements, {})
assert argv == []
assert defaults == {"--task": {"default": "list_tasks"}}
it "can have defaults for environment_defaults":
argv = []
environment_defaults = {"SOMETHING": ("--something", "something")}
parser = CliParser("")
defaults = parser.make_defaults(argv, [], environment_defaults)
assert argv == []
assert defaults == {"--something": {"default": "something"}}
describe "split_args":
it "returns args before and after -- and calls make_defaults":
dflts = mock.Mock(name="dflts")
make_defaults = mock.Mock(name="make_defaults", return_value=dflts)
description = mock.Mock(name="description")
environment_defaults = mock.Mock(name="environment_defaults")
positional_replacements = mock.Mock(name="positional_replacements")
parser = CliParser(description, positional_replacements, environment_defaults)
with mock.patch.object(parser, "make_defaults", make_defaults):
args, other_args, defaults = parser.split_args(["a", "b", "c", "--", "d", "e", "f"])
assert args == ["a", "b", "c"]
assert other_args == "d e f"
assert defaults is dflts
make_defaults.assert_called_once_with(
["a", "b", "c"], positional_replacements, environment_defaults
)
it "returns other_args as empty if there is no --":
args, other_args, defaults = CliParser("").split_args(["a", "b", "c"])
assert args == ["a", "b", "c"]
assert other_args == ""
assert defaults == {}
it "sets args as an empty list if args is just from --":
args, other_args, defaults = CliParser("").split_args(["--", "a", "b", "c"])
assert args == []
assert other_args == "a b c"
assert defaults == {}
it "works":
argv = ["dev", "--blah", "1", "--", "and", "stuff"]
args, other_args, defaults = CliParser(
"",
["--env", ("--task", "list_tasks")],
{"CONFIG_LOCATION": ("--config", "somewhere")},
).split_args(argv)
assert args == ["--blah", "1"]
assert other_args == "and stuff"
assert defaults == {
"--env": {"default": "dev"},
"--task": {"default": "list_tasks"},
"--config": {"default": "somewhere"},
}
describe "make_parser":
it "calls specify_other_args with the parser":
parser = mock.Mock(name="parser")
defaults = {"--silent": {"default": False}}
description = mock.Mock(name="description")
FakeArgumentParser = mock.Mock(name="ArgumentParser", return_value=parser)
called = []
class Parser(CliParser):
def specify_other_args(slf, parser, defaults):
called.append((parser, defaults))
with mock.patch("argparse.ArgumentParser", FakeArgumentParser):
assert Parser(description).make_parser(defaults) is parser
assert called == [(parser, defaults)]
FakeArgumentParser.assert_called_once_with(description=description)
it "specifies verbose, silent and debug":
parser = CliParser("").make_parser({})
args_obj = parser.parse_args([])
assert args_obj.verbose is False
assert args_obj.silent is False
assert args_obj.debug is False
args_obj = parser.parse_args(["--verbose"])
assert args_obj.verbose is True
assert args_obj.silent is False
assert args_obj.debug is False
args_obj = parser.parse_args(["--silent"])
assert args_obj.verbose is False
assert args_obj.silent is True
assert args_obj.debug is False
args_obj = parser.parse_args(["--debug"])
assert args_obj.verbose is False
assert args_obj.silent is False
assert args_obj.debug is True
it "can have silent | |
'C', (2,3),'AN'],
['CIV13', 'C', 35,'AN'],
['CIV14', 'C', (3,3),'AN'],
['CIV15', 'C', (2,3),'AN'],
['CIV16', 'C', 35,'AN'],
['CIV17', 'C', 80,'AN'],
],
'CL': [
['BOTSID', 'M', 3,'AN'],
['CL01', 'M', (2,5),'AN'],
],
'CL1': [
['BOTSID', 'M', 3,'AN'],
['CL101', 'C', 1,'AN'],
['CL102', 'C', 1,'AN'],
['CL103', 'C', 2,'AN'],
['CL104', 'C', 1,'AN'],
],
'CLD': [
['BOTSID', 'M', 3,'AN'],
['CLD01', 'M', 5,'R'],
['CLD02', 'M', 10,'R'],
['CLD03', 'C', (5,5),'AN'],
['CLD04', 'C', 8,'R'],
['CLD05', 'C', (2,2),'AN'],
],
'CLI': [
['BOTSID', 'M', 3,'AN'],
['CLI01', 'C', (2,2),'AN'],
['CLI02', 'C', (2,2),'AN'],
['CLI03', 'C', 11,'AN'],
['CLI04', 'C', 45,'AN'],
['CLI05', 'C', 2,'AN'],
['CLI06', 'C', (2,2),'AN'],
],
'CLM': [
['BOTSID', 'M', 3,'AN'],
['CLM01', 'M', 38,'AN'],
['CLM02', 'C', 15,'R'],
['CLM03', 'C', 2,'AN'],
['CLM04', 'C', 2,'AN'],
['CLM05', 'C', 2,'AN'],
['CLM06', 'C', 2,'AN'],
['CLM07', 'C', 1,'AN'],
['CLM08', 'C', 1,'AN'],
['CLM09', 'C', 1,'AN'],
['CLM10', 'C', 1,'AN'],
['CLM11', 'C', 1,'AN'],
['CLM12', 'C', 1,'AN'],
['CLM13', 'C', (2,3),'AN'],
['CLM14', 'C', (2,3),'AN'],
['CLM15', 'C', (2,3),'AN'],
['CLM16', 'C', (2,2),'AN'],
['CLM17', 'C', (2,3),'AN'],
['CLM18', 'C', (2,3),'AN'],
['CLM19', 'C', 1,'AN'],
['CLM20', 'C', 3,'AN'],
['CLM21', 'C', 1,'AN'],
['CLM22', 'C', 1,'AN'],
['CLM23', 'C', 2,'AN'],
['CLM24', 'C', 1,'AN'],
['CLM25', 'C', (2,2),'AN'],
],
'CLP': [
['BOTSID', 'M', 3,'AN'],
['CLP01', 'M', 38,'AN'],
['CLP02', 'M', 2,'AN'],
['CLP03', 'M', 15,'R'],
['CLP04', 'M', 15,'R'],
['CLP05', 'C', 15,'R'],
['CLP06', 'C', 2,'AN'],
['CLP07', 'C', 30,'AN'],
['CLP08', 'C', 2,'AN'],
['CLP09', 'C', 1,'AN'],
['CLP10', 'C', 2,'AN'],
['CLP11', 'C', 4,'AN'],
],
'CM': [
['BOTSID', 'M', 3,'AN'],
['CM01', 'M', (2,10),'AN'],
['CM02', 'M', 1,'AN'],
['CM03', 'M', (2,24),'AN'],
['CM04', 'C', (6,6),'DT'],
['CM05', 'C', 17,'AN'],
['CM06', 'C', (2,4),'AN'],
['CM07', 'C', (2,4),'AN'],
['CM08', 'C', (6,6),'DT'],
['CM09', 'C', (2,28),'AN'],
['CM10', 'C', 4,'AN'],
['CM11', 'C', (2,14),'AN'],
['CM12', 'C', (2,30),'AN'],
['CM13', 'C', (2,2),'AN'],
['CM14', 'C', (2,3),'AN'],
],
'CMA': [
['BOTSID', 'M', 3,'AN'],
['CMA01', 'M', (2,2),'AN'],
['CMA02', 'M', (2,2),'AN'],
['CMA03', 'M', 30,'AN'],
['CMA04', 'M', (6,6),'DT'],
['CMA05', 'M', 2,'R'],
['CMA06', 'C', 3,'AN'],
['CMA07', 'C', 12,'AN'],
['CMA08', 'C', (3,3),'AN'],
['CMA09', 'C', 8,'AN'],
['CMA10', 'C', 30,'AN'],
['CMA11', 'C', (2,2),'AN'],
],
'CN1': [
['BOTSID', 'M', 3,'AN'],
['CN101', 'M', (2,2),'AN'],
['CN102', 'C', 15,'R'],
['CN103', 'C', 6,'R'],
['CN104', 'C', 30,'AN'],
['CN105', 'C', 6,'R'],
['CN106', 'C', 30,'AN'],
],
'COB': [
['BOTSID', 'M', 3,'AN'],
['COB01', 'C', 1,'AN'],
['COB02', 'C', 30,'AN'],
['COB03', 'C', 1,'AN'],
],
'CON': [
['BOTSID', 'M', 3,'AN'],
['CON01', 'M', (2,2),'AN'],
['CON02', 'M', 30,'AN'],
['CON03', 'M', (2,2),'AN'],
],
'CPR': [
['BOTSID', 'M', 3,'AN'],
['CPR01', 'M', (3,3),'AN'],
['CPR02', 'M', (6,6),'DT'],
['CPR03', 'M', 14,'R'],
['CPR04', 'M', (2,2),'AN'],
['CPR05', 'C', 1,'AN'],
],
'CR1': [
['BOTSID', 'M', 3,'AN'],
['CR101', 'C', (2,2),'AN'],
['CR102', 'C', 10,'R'],
['CR103', 'C', 1,'AN'],
['CR104', 'C', 1,'AN'],
['CR105', 'C', (2,2),'AN'],
['CR106', 'C', 15,'R'],
['CR107', 'C', 35,'AN'],
['CR108', 'C', 35,'AN'],
['CR109', 'C', 80,'AN'],
['CR110', 'C', 80,'AN'],
],
'CR2': [
['BOTSID', 'M', 3,'AN'],
['CR201', 'C', 9,'R'],
['CR202', 'C', 15,'R'],
['CR203', 'C', (2,3),'AN'],
['CR204', 'C', (2,3),'AN'],
['CR205', 'C', (2,2),'AN'],
['CR206', 'C', 15,'R'],
['CR207', 'C', 15,'R'],
['CR208', 'C', 1,'AN'],
['CR209', 'C', 1,'AN'],
['CR210', 'C', 80,'AN'],
['CR211', 'C', 80,'AN'],
],
'CR3': [
['BOTSID', 'M', 3,'AN'],
['CR301', 'C', 1,'AN'],
['CR302', 'C', (2,2),'AN'],
['CR303', 'C', 15,'R'],
['CR304', 'C', 1,'AN'],
['CR305', 'C', 80,'AN'],
],
'CR4': [
['BOTSID', 'M', 3,'AN'],
['CR401', 'M', 1,'AN'],
['CR402', 'C', 1,'AN'],
['CR403', 'C', (2,2),'AN'],
['CR404', 'C', 15,'R'],
['CR405', 'C', (2,2),'AN'],
['CR406', 'C', 15,'R'],
['CR407', 'C', 1,'AN'],
['CR408', 'C', (2,2),'AN'],
['CR409', 'C', 15,'R'],
['CR410', 'C', (2,2),'AN'],
['CR411', 'C', 8,'R'],
['CR412', 'C', (2,2),'AN'],
['CR413', 'C', 10,'R'],
['CR414', 'C', 15,'R'],
['CR415', 'C', 80,'AN'],
['CR416', 'C', 1,'AN'],
['CR417', 'C', 1,'AN'],
['CR418', 'C', 15,'R'],
['CR419', 'C', 15,'R'],
['CR420', 'C', 80,'AN'],
['CR421', 'C', 15,'R'],
['CR422', 'C', 10,'R'],
['CR423', 'C', 15,'R'],
['CR424', 'C', 15,'R'],
['CR425', 'C', 10,'R'],
['CR426', 'C', 15,'R'],
['CR427', 'C', 10,'R'],
['CR428', 'C', 15,'R'],
['CR429', 'C', 80,'AN'],
],
'CR5': [
['BOTSID', 'M', 3,'AN'],
['CR501', 'C', 1,'AN'],
['CR502', 'C', 15,'R'],
['CR503', 'C', 1,'AN'],
['CR504', 'C', 1,'AN'],
['CR505', 'C', 80,'AN'],
['CR506', 'C', 15,'R'],
['CR507', 'C', 15,'R'],
['CR508', 'C', 15,'R'],
['CR509', 'C', 80,'AN'],
['CR510', 'C', 15,'R'],
['CR511', 'C', 15,'R'],
['CR512', 'C', 1,'AN'],
['CR513', 'C', 1,'AN'],
['CR514', 'C', 1,'AN'],
['CR515', 'C', 1,'AN'],
['CR516', 'C', 15,'R'],
['CR517', 'C', 1,'AN'],
],
'CR8': [
['BOTSID', 'M', 3,'AN'],
['CR801', 'M', 1,'AN'],
['CR802', 'M', 1,'AN'],
['CR803', 'M', (6,6),'DT'],
['CR804', 'M', (6,6),'DT'],
['CR805', 'M', 30,'AN'],
['CR806', 'M', 30,'AN'],
['CR807', 'M', 30,'AN'],
['CR808', 'M', 1,'AN'],
['CR809', 'M', 1,'AN'],
],
'CRC': [
['BOTSID', 'M', 3,'AN'],
['CRC01', 'M', (2,2),'AN'],
['CRC02', 'M', 1,'AN'],
['CRC03', 'M', (2,2),'AN'],
['CRC04', 'C', (2,2),'AN'],
['CRC05', 'C', (2,2),'AN'],
['CRC06', 'C', (2,2),'AN'],
['CRC07', 'C', (2,2),'AN'],
],
'CRI': [
['BOTSID', 'M', 3,'AN'],
['CRI01', 'C', (3,3),'AN'],
['CRI02', 'C', 2,'AN'],
['CRI03', 'C', (2,3),'AN'],
['CRI04', 'C', 1,'AN'],
['CRI05', 'C', 1,'AN'],
],
'CRO': [
['BOTSID', 'M', 3,'AN'],
['CRO01', 'M', (2,3),'AN'],
['CRO02', 'M', 35,'AN'],
['CRO03', 'M', (2,2),'AN'],
['CRO04', 'M', 30,'AN'],
['CRO05', 'M', 2,'AN'],
],
'CRS': [
['BOTSID', 'M', 3,'AN'],
['CRS01', 'M', 1,'AN'],
['CRS02', 'C', 1,'AN'],
['CRS03', 'C', 15,'R'],
['CRS04', 'C', 15,'R'],
['CRS05', 'C', 3,'AN'],
['CRS06', 'C', 3,'AN'],
['CRS07', 'C', 1,'AN'],
['CRS08', 'C', 2,'AN'],
['CRS09', 'C', 1,'AN'],
['CRS10', 'C', 2,'AN'],
['CRS11', 'C', (2,17),'AN'],
['CRS12', 'C', 15,'R'],
['CRS13', 'C', (2,2),'AN'],
['CRS14', 'C', 35,'AN'],
['CRS15', 'C', 30,'AN'],
['CRS16', 'C', 35,'AN'],
['CRS17', 'C', 15,'R'],
['CRS18', 'C', 15,'R'],
['CRS19', 'C', (6,6),'DT'],
['CRS20', 'C', (2,2),'AN'],
],
'CRT': [
['BOTSID', 'M', 3,'AN'],
['CRT01', 'M', (2,2),'AN'],
['CRT02', 'C', (2,2),'AN'],
['CRT03', 'C', (2,2),'AN'],
['CRT04', 'C', (2,2),'AN'],
['CRT05', 'C', 2,'AN'],
['CRT06', 'C', 2,'AN'],
['CRT07', 'C', (2,2),'AN'],
['CRT08', 'C', (2,2),'AN'],
['CRT09', 'C', 45,'AN'],
['CRT10', 'C', (2,2),'AN'],
],
'CS': [
['BOTSID', 'M', 3,'AN'],
['CS01', 'C', 30,'AN'],
['CS02', 'C', 8,'AN'],
['CS03', 'C', 30,'AN'],
['CS04', 'C', (2,2),'AN'],
['CS05', 'C', 30,'AN'],
['CS06', 'C', 22,'AN'],
['CS07', 'C', (2,10),'AN'],
['CS08', 'C', (2,2),'AN'],
['CS09', 'C', 10,'R'],
['CS10', 'C', 10,'R'],
['CS11', 'C', 15,'R'],
['CS12', 'C', (2,2),'AN'],
['CS13', 'C', (2,10),'AN'],
['CS14', 'C', (2,2),'AN'],
['CS15', 'C', 14,'R'],
['CS16', 'C', (2,2),'AN'],
['CS17', 'C', 1,'AN'],
['CS18', 'C', 1,'AN'],
],
'CSB': [
['BOTSID', 'M', 3,'AN'],
['CSB01', 'M', (2,4),'AN'],
['CSB02', 'C', 32,'AN'],
],
'CSF': [
['BOTSID', 'M', 3,'AN'],
['CSF01', 'M', (2,2),'AN'],
['CSF02', 'C', 6,'R'],
['CSF03', 'C', 9,'R'],
],
'CSH': [
['BOTSID', 'M', 3,'AN'],
['CSH01', 'C', 2,'AN'],
['CSH02', 'C', 1,'AN'],
['CSH03', 'C', (2,9),'N2'],
['CSH04', 'C', 35,'AN'],
['CSH05', 'C', (6,6),'DT'],
['CSH06', 'C', (2,2),'AN'],
['CSH07', 'C', (2,10),'AN'],
['CSH08', 'C', 2,'AN'],
['CSH09', 'C', 10,'R'],
],
'CSI': [
['BOTSID', 'M', 3,'AN'],
['CSI01', 'M', (2,2),'AN'],
['CSI02', 'M', (3,3),'AN'],
['CSI03', 'M', (2,3),'AN'],
['CSI04', 'M', 35,'AN'],
],
'CSM': [
['BOTSID', 'M', 3,'AN'],
['CSM01', 'M', (3,4),'AN'],
['CSM02', 'M', (4,16),'AN'],
['CSM03', 'M', (4,16),'AN'],
],
'CSS': [
['BOTSID', 'M', 3,'AN'],
['CSS01', 'M', (2,2),'AN'],
['CSS02', 'M', (2,2),'AN'],
['CSS03', 'M', 3,'R'],
['CSS04', 'C', 3,'R'],
['CSS05', 'C', 3,'R'],
['CSS06', 'C', 3,'R'],
['CSS07', 'C', 3,'R'],
],
'CST': [
['BOTSID', 'M', 3,'AN'],
['CST01', 'M', (3,3),'AN'],
['CST02', 'M', 15,'R'],
['CST03', 'C', (2,2),'AN'],
['CST04', 'C', 15,'R'],
],
'CSU': [
['BOTSID', 'M', 3,'AN'],
['CSU01', 'C', 35,'AN'],
['CSU02', 'C', 30,'AN'],
['CSU03', 'C', (2,3),'AN'],
['CSU04', 'C', 35,'AN'],
['CSU05', 'C', (2,3),'AN'],
['CSU06', 'C', 35,'AN'],
],
'CTB': [
['BOTSID', 'M', 3,'AN'],
['CTB01', 'M', (2,2),'AN'],
['CTB02', 'C', 80,'AN'],
['CTB03', 'C', (2,2),'AN'],
['CTB04', 'C', 15,'R'],
['CTB05', 'C', 2,'AN'],
['CTB06', 'C', 15,'N2'],
],
'CTC': [
['BOTSID', 'M', 3,'AN'],
['CTC01', 'M', (2,4),'AN'],
['CTC02', 'M', (2,4),'AN'],
['CTC03', 'M', 1,'AN'],
['CTC04', 'M', (2,2),'AN'],
['CTC05', 'M', (2,2),'AN'],
['CTC06', 'M', (2,2),'R'],
['CTC07', 'M', (2,2),'R'],
['CTC08', 'M', (2,2),'AN'],
['CTC09', 'C', (2,2),'R'],
['CTC10', 'C', (2,2),'R'],
['CTC11', 'C', (2,2),'AN'],
['CTC12', 'C', 2,'AN'],
],
'CTP': [
['BOTSID', 'M', 3,'AN'],
['CTP01', 'C', (2,2),'AN'],
['CTP02', 'C', (3,3),'AN'],
['CTP03', 'C', 14,'R'],
['CTP04', 'C', 15,'R'],
['CTP05', 'C', (2,2),'AN'],
['CTP06', 'C', (3,3),'AN'],
['CTP07', 'C', 10,'R'],
['CTP08', 'C', 15,'R'],
['CTP09', 'C', (2,2),'AN'],
],
'CTT': [
['BOTSID', 'M', 3,'AN'],
['CTT01', 'M', 6,'R'],
['CTT02', 'C', 10,'R'],
['CTT03', 'C', 10,'R'],
['CTT04', 'C', (2,2),'AN'],
['CTT05', 'C', 8,'R'],
['CTT06', 'C', (2,2),'AN'],
['CTT07', 'C', 80,'AN'],
],
'CUR': [
['BOTSID', 'M', 3,'AN'],
['CUR01', 'M', (2,2),'AN'],
['CUR02', 'M', (3,3),'AN'],
['CUR03', 'C', (4,6),'R'],
['CUR04', 'C', (2,2),'AN'],
['CUR05', 'C', (3,3),'AN'],
['CUR06', 'C', (3,3),'AN'],
['CUR07', 'C', (3,3),'AN'],
['CUR08', 'C', (6,6),'DT'],
['CUR09', 'C', (4,8),'TM'],
['CUR10', 'C', (3,3),'AN'],
['CUR11', 'C', (6,6),'DT'],
['CUR12', 'C', (4,8),'TM'],
['CUR13', 'C', (3,3),'AN'],
['CUR14', 'C', (6,6),'DT'],
['CUR15', 'C', (4,8),'TM'],
['CUR16', 'C', (3,3),'AN'],
['CUR17', 'C', (6,6),'DT'],
['CUR18', 'C', (4,8),'TM'],
['CUR19', 'C', (3,3),'AN'],
['CUR20', 'C', (6,6),'DT'],
['CUR21', 'C', (4,8),'TM'],
],
'CV': [
['BOTSID', 'M', 3,'AN'],
['CV01', 'M', 1,'AN'],
['CV02', 'C', (2,2),'AN'],
['CV03', 'C', 1,'AN'],
['CV04', 'C', 1,'AN'],
['CV05', 'C', 15,'R'],
['CV06', 'C', 1,'AN'],
['CV07', 'C', 15,'R'],
['CV08', 'C', 15,'R'],
['CV09', 'C', 15,'R'],
['CV10', 'C', 15,'R'],
['CV11', 'C', 15,'R'],
['CV12', 'C', 15,'R'],
['CV13', 'C', 15,'R'],
['CV14', 'C', 1,'AN'],
],
'CYC': [
['BOTSID', 'M', 3,'AN'],
['CYC01', 'M', (2,2),'R'],
['CYC02', 'M', (2,2),'R'],
['CYC03', 'M', (2,2),'AN'],
['CYC04', 'M', 3,'R'],
['CYC05', 'M', (6,9),'AN'],
['CYC06', 'M', (2,4),'AN'],
['CYC07', 'C', (7,7),'AN'],
],
'D8': [
['BOTSID', 'M', 3,'AN'],
['D801', 'M', (2,4),'AN'],
],
'D9': [
['BOTSID', 'M', 3,'AN'],
['D901', 'C', 5,'AN'],
['D902', 'M', (2,30),'AN'],
['D903', 'M', (2,2),'AN'],
['D904', 'C', (2,3),'AN'],
['D905', 'C', 5,'AN'],
['D906', 'C', (2,30),'AN'],
['D907', 'C', (2,2),'AN'],
['D908', 'C', (6,9),'AN'],
['D909', 'C', (3,9),'AN'],
],
'DAD': [
['BOTSID', 'M', 3,'AN'],
['DAD01', 'M', 2,'AN'],
| |
"""Generated message classes for gkehub version v1alpha1.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'gkehub'
class AuditConfig(_messages.Message):
r"""Specifies the audit configuration for a service. The configuration
determines which permission types are logged, and what identities, if any,
are exempted from logging. An AuditConfig must have one or more
AuditLogConfigs. If there are AuditConfigs for both `allServices` and a
specific service, the union of the two AuditConfigs is used for that
service: the log_types specified in each AuditConfig are enabled, and the
exempted_members in each AuditLogConfig are exempted. Example Policy with
multiple AuditConfigs: { "audit_configs": [ {
"service": "allServices" "audit_log_configs": [ {
"log_type": "DATA_READ", "exempted_members": [
"user:<EMAIL>" ] }, {
"log_type": "DATA_WRITE", }, {
"log_type": "ADMIN_READ", } ] }, {
"service": "fooservice.googleapis.com" "audit_log_configs": [
{ "log_type": "DATA_READ", }, {
"log_type": "DATA_WRITE", "exempted_members": [
"user:<EMAIL>" ] } ] }
] } For fooservice, this policy enables DATA_READ, DATA_WRITE and
ADMIN_READ logging. It also exempts <EMAIL> from DATA_READ logging,
and <EMAIL> from DATA_WRITE logging.
Fields:
auditLogConfigs: The configuration for logging of each type of permission.
exemptedMembers: A string attribute.
service: Specifies a service that will be enabled for audit logging. For
example, `storage.googleapis.com`, `cloudsql.googleapis.com`.
`allServices` is a special value that covers all services.
"""
auditLogConfigs = _messages.MessageField('AuditLogConfig', 1, repeated=True)
exemptedMembers = _messages.StringField(2, repeated=True)
service = _messages.StringField(3)
class AuditLogConfig(_messages.Message):
r"""Provides the configuration for logging a type of permissions. Example:
{ "audit_log_configs": [ { "log_type": "DATA_READ",
"exempted_members": [ "user:<EMAIL>" ]
}, { "log_type": "DATA_WRITE", } ] }
This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting
<EMAIL> from DATA_READ logging.
Enums:
LogTypeValueValuesEnum: The log type that this config enables.
Fields:
exemptedMembers: Specifies the identities that do not cause logging for
this type of permission. Follows the same format of Binding.members.
logType: The log type that this config enables.
"""
class LogTypeValueValuesEnum(_messages.Enum):
r"""The log type that this config enables.
Values:
LOG_TYPE_UNSPECIFIED: Default case. Should never be this.
ADMIN_READ: Admin reads. Example: CloudIAM getIamPolicy
DATA_WRITE: Data writes. Example: CloudSQL Users create
DATA_READ: Data reads. Example: CloudSQL Users list
"""
LOG_TYPE_UNSPECIFIED = 0
ADMIN_READ = 1
DATA_WRITE = 2
DATA_READ = 3
exemptedMembers = _messages.StringField(1, repeated=True)
logType = _messages.EnumField('LogTypeValueValuesEnum', 2)
class AuthorizationLoggingOptions(_messages.Message):
r"""Authorization-related information used by Cloud Audit Logging.
Enums:
PermissionTypeValueValuesEnum: The type of the permission that was
checked.
Fields:
permissionType: The type of the permission that was checked.
"""
class PermissionTypeValueValuesEnum(_messages.Enum):
r"""The type of the permission that was checked.
Values:
PERMISSION_TYPE_UNSPECIFIED: Default. Should not be used.
ADMIN_READ: A read of admin (meta) data.
ADMIN_WRITE: A write of admin (meta) data.
DATA_READ: A read of standard data.
DATA_WRITE: A write of standard data.
"""
PERMISSION_TYPE_UNSPECIFIED = 0
ADMIN_READ = 1
ADMIN_WRITE = 2
DATA_READ = 3
DATA_WRITE = 4
permissionType = _messages.EnumField('PermissionTypeValueValuesEnum', 1)
class Binding(_messages.Message):
r"""Associates `members` with a `role`.
Fields:
condition: Unimplemented. The condition that is associated with this
binding. NOTE: an unsatisfied condition will not allow user access via
current binding. Different bindings, including their conditions, are
examined independently.
members: Specifies the identities requesting access for a Cloud Platform
resource. `members` can have the following values: * `allUsers`: A
special identifier that represents anyone who is on the internet;
with or without a Google account. * `allAuthenticatedUsers`: A special
identifier that represents anyone who is authenticated with a Google
account or a service account. * `user:{emailid}`: An email address that
represents a specific Google account. For example, `<EMAIL>`
. * `serviceAccount:{emailid}`: An email address that represents a
service account. For example, `my-other-
<EMAIL>`. * `group:{emailid}`: An email address
that represents a Google group. For example, `<EMAIL>`.
* `domain:{domain}`: A Google Apps domain name that represents all the
users of that domain. For example, `google.com` or `example.com`.
role: Role that is assigned to `members`. For example, `roles/viewer`,
`roles/editor`, or `roles/owner`.
"""
condition = _messages.MessageField('Expr', 1)
members = _messages.StringField(2, repeated=True)
role = _messages.StringField(3)
class CancelOperationRequest(_messages.Message):
r"""The request message for Operations.CancelOperation."""
class CloudAuditOptions(_messages.Message):
r"""Write a Cloud Audit log
Enums:
LogNameValueValuesEnum: The log_name to populate in the Cloud Audit
Record.
Fields:
authorizationLoggingOptions: Information used by the Cloud Audit Logging
pipeline.
logName: The log_name to populate in the Cloud Audit Record.
"""
class LogNameValueValuesEnum(_messages.Enum):
r"""The log_name to populate in the Cloud Audit Record.
Values:
UNSPECIFIED_LOG_NAME: Default. Should not be used.
ADMIN_ACTIVITY: Corresponds to "cloudaudit.googleapis.com/activity"
DATA_ACCESS: Corresponds to "cloudaudit.googleapis.com/data_access"
"""
UNSPECIFIED_LOG_NAME = 0
ADMIN_ACTIVITY = 1
DATA_ACCESS = 2
authorizationLoggingOptions = _messages.MessageField('AuthorizationLoggingOptions', 1)
logName = _messages.EnumField('LogNameValueValuesEnum', 2)
class Condition(_messages.Message):
r"""A condition to be met.
Enums:
IamValueValuesEnum: Trusted attributes supplied by the IAM system.
OpValueValuesEnum: An operator to apply the subject with.
SysValueValuesEnum: Trusted attributes supplied by any service that owns
resources and uses the IAM system for access control.
Fields:
iam: Trusted attributes supplied by the IAM system.
op: An operator to apply the subject with.
svc: Trusted attributes discharged by the service.
sys: Trusted attributes supplied by any service that owns resources and
uses the IAM system for access control.
values: The objects of the condition.
"""
class IamValueValuesEnum(_messages.Enum):
r"""Trusted attributes supplied by the IAM system.
Values:
NO_ATTR: Default non-attribute.
AUTHORITY: Either principal or (if present) authority selector.
ATTRIBUTION: The principal (even if an authority selector is present),
which must only be used for attribution, not authorization.
SECURITY_REALM: Any of the security realms in the IAMContext (go
/security-realms). When used with IN, the condition indicates "any of
the request's realms match one of the given values; with NOT_IN, "none
of the realms match any of the given values". Note that a value can
be: - 'self' (i.e., allow connections from clients that are in the
same security realm) - a realm (e.g., 'campus-abc') - a realm group
(e.g., 'realms-for-borg-cell-xx', see: go/realm-groups) A match is
determined by a realm group membership check performed by a
RealmAclRep object (go/realm-acl-howto). It is not permitted to grant
access based on the *absence* of a realm, so realm conditions can only
be used in a "positive" context (e.g., ALLOW/IN or DENY/NOT_IN).
APPROVER: An approver (distinct from the requester) that has authorized
this request. When used with IN, the condition indicates that one of
the approvers associated with the request matches the specified
principal, or is a member of the specified group. Approvers can only
grant additional access, and are thus only used in a strictly positive
context (e.g. ALLOW/IN or DENY/NOT_IN).
JUSTIFICATION_TYPE: What types of justifications have been supplied with
this request. String values should match enum names from
tech.iam.JustificationType, e.g. "MANUAL_STRING". It is not permitted
to grant access based on the *absence* of a justification, so
justification conditions can only be used in a "positive" context
(e.g., ALLOW/IN or DENY/NOT_IN). Multiple justifications, e.g., a
Buganizer ID and a manually-entered reason, are normal and supported.
CREDENTIALS_TYPE: What type of credentials have been supplied with this
request. String values should match enum names from
security_loas_l2.CredentialsType - currently, only
CREDS_TYPE_EMERGENCY is supported. It is not permitted to grant access
based on the *absence* of a credentials type, so the conditions can
only be used in a "positive" context (e.g., ALLOW/IN or DENY/NOT_IN).
"""
NO_ATTR = 0
AUTHORITY = 1
ATTRIBUTION = 2
SECURITY_REALM = 3
APPROVER = 4
JUSTIFICATION_TYPE = 5
CREDENTIALS_TYPE = 6
class OpValueValuesEnum(_messages.Enum):
r"""An operator to apply the subject with.
Values:
NO_OP: Default no-op.
EQUALS: DEPRECATED. Use IN instead.
NOT_EQUALS: DEPRECATED. Use NOT_IN instead.
IN: The condition is true if the subject (or any element of it if it is
a set) matches any of the supplied values.
NOT_IN: The condition is true if the subject (or every element of it if
it is a set) matches none of the supplied values.
DISCHARGED: Subject is discharged
"""
NO_OP = 0
EQUALS = 1
NOT_EQUALS = 2
IN = 3
NOT_IN = 4
DISCHARGED = 5
class SysValueValuesEnum(_messages.Enum):
r"""Trusted attributes supplied by any service that owns resources | |
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_accounts
# Purpose: Identify the existence of a given acount on various sites thanks
# to Micah Hoffman's (https://github.com/WebBreacher) list.
#
# Author: <NAME> <<EMAIL>>
#
# Created: 18/02/2015
# Copyright: (c) <NAME> 2015
# Licence: MIT
# -------------------------------------------------------------------------------
import json
import random
import threading
import time
from queue import Empty as QueueEmpty
from queue import Queue
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_accounts(SpiderFootPlugin):
meta = {
'name': "Account Finder",
'summary': "Look for possible associated accounts on nearly 200 websites like Ebay, Slashdot, reddit, etc.",
'useCases': ["Footprint", "Passive"],
'categories': ["Social Media"]
}
# Default options
opts = {
"ignorenamedict": True,
"ignoreworddict": True,
"musthavename": True,
"userfromemail": True,
"permutate": False,
"usernamesize": 4,
"_maxthreads": 20
}
# Option descriptions
optdescs = {
"ignorenamedict": "Don't bother looking up names that are just stand-alone first names (too many false positives).",
"ignoreworddict": "Don't bother looking up names that appear in the dictionary.",
"musthavename": "The username must be mentioned on the social media page to consider it valid (helps avoid false positives).",
"userfromemail": "Extract usernames from e-mail addresses at all? If disabled this can reduce false positives for common usernames but for highly unique usernames it would result in missed accounts.",
"permutate": "Look for the existence of account name permutations. Useful to identify fraudulent social media accounts or account squatting.",
"usernamesize": "The minimum length of a username to query across social media sites. Helps avoid false positives for very common short usernames.",
"_maxthreads": "Maximum threads"
}
results = None
reportedUsers = list()
siteResults = dict()
sites = list()
errorState = False
distrustedChecked = False
lock = None
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
self.commonNames = list()
self.reportedUsers = list()
self.errorState = False
self.distrustedChecked = False
self.__dataSource__ = "Social Media"
self.lock = threading.Lock()
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
self.commonNames = set(self.sf.dictnames())
self.words = set(self.sf.dictwords())
content = self.sf.cacheGet("sfaccounts", 48)
if content is None:
url = "https://raw.githubusercontent.com/WebBreacher/WhatsMyName/master/web_accounts_list.json"
data = self.sf.fetchUrl(url, useragent="SpiderFoot")
if data['content'] is None:
self.error(f"Unable to fetch {url}")
self.errorState = True
return
content = data['content']
self.sf.cachePut("sfaccounts", content)
try:
self.sites = [site for site in json.loads(content)['sites'] if site['valid']]
except Exception as e:
self.error(f"Unable to parse social media accounts list: {e}")
self.errorState = True
return
def watchedEvents(self):
return ["EMAILADDR", "DOMAIN_NAME", "HUMAN_NAME", "USERNAME"]
def producedEvents(self):
return ["USERNAME", "ACCOUNT_EXTERNAL_OWNED",
"SIMILAR_ACCOUNT_EXTERNAL"]
def checkSite(self, name, site):
if 'check_uri' not in site:
return
url = site['check_uri'].format(account=name)
if 'pretty_uri' in site:
ret_url = site['pretty_uri'].format(account=name)
else:
ret_url = url
retname = f"{site['name']} (Category: {site['category']})\n<SFURL>{ret_url}</SFURL>"
res = self.sf.fetchUrl(
url,
timeout=self.opts['_fetchtimeout'],
useragent=self.opts['_useragent'],
noLog=True,
verify=False
)
if not res['content']:
with self.lock:
self.siteResults[retname] = False
return
if res['code'] != site.get('account_existence_code'):
with self.lock:
self.siteResults[retname] = False
return
if site.get('account_existence_string') not in res['content']:
with self.lock:
self.siteResults[retname] = False
return
if self.opts['musthavename']:
if name.lower() not in res['content'].lower():
self.debug(f"Skipping {site['name']} as username not mentioned.")
with self.lock:
self.siteResults[retname] = False
return
# Some sites can't handle periods so treat bob.abc and bob as the same
# TODO: fix this once WhatsMyName has support for usernames with '.'
if "." in name:
firstname = name.split(".")[0]
if firstname + "<" in res['content'] or firstname + '"' in res['content']:
with self.lock:
self.siteResults[retname] = False
return
with self.lock:
self.siteResults[retname] = True
def checkSites(self, username, sites=None):
def processSiteQueue(username, queue):
try:
while True:
site = queue.get(timeout=0.1)
try:
self.checkSite(username, site)
except Exception as e:
self.debug(f'Thread {threading.current_thread().name} exception: {e}')
except QueueEmpty:
return
startTime = time.monotonic()
# results will be collected in siteResults
self.siteResults = {}
sites = self.sites if sites is None else sites
# load the queue
queue = Queue()
for site in sites:
queue.put(site)
# start the scan threads
threads = []
for i in range(min(len(sites), self.opts['_maxthreads'])):
thread = threading.Thread(
name=f'sfp_accounts_scan_{i}',
target=processSiteQueue,
args=(username, queue))
thread.start()
threads.append(thread)
# wait for all scan threads to finish
while threads:
threads.pop(0).join()
duration = time.monotonic() - startTime
scanRate = len(sites) / duration
self.debug(f'Scan statistics: name={username}, count={len(self.siteResults)}, duration={duration:.2f}, rate={scanRate:.0f}')
return [site for site, found in self.siteResults.items() if found]
def generatePermutations(self, username):
permutations = list()
prefixsuffix = ['_', '-']
replacements = {
'a': ['4', 's'],
'b': ['v', 'n'],
'c': ['x', 'v'],
'd': ['s', 'f'],
'e': ['w', 'r'],
'f': ['d', 'g'],
'g': ['f', 'h'],
'h': ['g', 'j', 'n'],
'i': ['o', 'u', '1'],
'j': ['k', 'h', 'i'],
'k': ['l', 'j'],
'l': ['i', '1', 'k'],
'm': ['n'],
'n': ['m'],
'o': ['p', 'i', '0'],
'p': ['o', 'q'],
'r': ['t', 'e'],
's': ['a', 'd', '5'],
't': ['7', 'y', 'z', 'r'],
'u': ['v', 'i', 'y', 'z'],
'v': ['u', 'c', 'b'],
'w': ['v', 'vv', 'q', 'e'],
'x': ['z', 'y', 'c'],
'y': ['z', 'x'],
'z': ['y', 'x'],
'0': ['o'],
'1': ['l'],
'2': ['5'],
'3': ['e'],
'4': ['a'],
'5': ['s'],
'6': ['b'],
'7': ['t'],
'8': ['b'],
'9': []
}
pairs = {
'oo': ['00'],
'll': ['l1l', 'l1l', '111', '11'],
'11': ['ll', 'lll', 'l1l', '1l1']
}
# Generate a set with replacements, then
# add suffixes and prefixes.
pos = 0
for c in username:
if c not in replacements:
continue
if len(replacements[c]) == 0:
continue
npos = pos + 1
for xc in replacements[c]:
newuser = username[0:pos] + xc + username[npos:len(username)]
permutations.append(newuser)
pos += 1
# Search for common double-letter replacements
for p in pairs:
if p in username:
for r in pairs[p]:
permutations.append(username.replace(p, r))
# Search for prefixed and suffixed usernames
for c in prefixsuffix:
permutations.append(username + c)
permutations.append(c + username)
# Search for double character usernames
pos = 0
for c in username:
permutations.append(username[0:pos] + c + c + username[(pos + 1):len(username)])
pos += 1
return list(set(permutations))
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
users = list()
if self.errorState:
return
self.debug(f"Received event, {eventName}, from {srcModuleName}")
# Skip events coming from me unless they are USERNAME events
if eventName != "USERNAME" and srcModuleName == "sfp_accounts":
self.debug(f"Ignoring {eventName}, from self.")
return
if eventData in list(self.results.keys()):
return
self.results[eventData] = True
# If being called for the first time, let's see how trusted the
# sites are by attempting to fetch a garbage user.
if not self.distrustedChecked:
# Check if a state cache exists first, to not have to do this all the time
content = self.sf.cacheGet("sfaccounts_state_v2", 72)
if content:
if content != "None": # "None" is written to the cached file when no sites are distrusted
delsites = list()
for line in content.split("\n"):
if line == '':
continue
delsites.append(line)
self.sites = [d for d in self.sites if d['name'] not in delsites]
else:
randpool = 'abcdefghijklmnopqrstuvwxyz1234567890'
randuser = ''.join([random.SystemRandom().choice(randpool) for x in range(10)])
res = self.checkSites(randuser)
if res:
delsites = list()
for site in res:
sitename = site.split(" (Category:")[0]
self.debug(f"Distrusting {sitename}")
delsites.append(sitename)
self.sites = [d for d in self.sites if d['name'] not in delsites]
else:
# The caching code needs *some* content
delsites = "None"
self.sf.cachePut("sfaccounts_state_v2", delsites)
self.distrustedChecked = True
if eventName == "HUMAN_NAME":
names = [eventData.lower().replace(" ", ""), eventData.lower().replace(" ", ".")]
for name in names:
users.append(name)
if eventName == "DOMAIN_NAME":
kw = self.sf.domainKeyword(eventData, self.opts['_internettlds'])
if not kw:
return
users.append(kw)
if eventName == "EMAILADDR" and self.opts['userfromemail']:
name = eventData.split("@")[0].lower()
users.append(name)
if eventName == "USERNAME":
users.append(eventData)
for user in set(users):
if user in self.opts['_genericusers'].split(","):
self.debug(f"{user} is a generic account name, skipping.")
continue
if self.opts['ignorenamedict'] and user in self.commonNames:
self.debug(f"{user} is found in our name dictionary, skipping.")
continue
if self.opts['ignoreworddict'] and user in self.words:
self.debug(f"{user} is found in our word dictionary, skipping.")
continue
if user not in self.reportedUsers and eventData != user:
if len(user) < self.opts['usernamesize']:
self.debug(f"{user} is too short, skipping.")
continue
evt = SpiderFootEvent("USERNAME", user, self.__name__, event)
self.notifyListeners(evt)
self.reportedUsers.append(user)
# Only look up accounts when we've received a USERNAME event (possibly from
# ourselves), since we want them to have gone through some verification by
# this module, and we don't want duplicates (one based on EMAILADDR and another
# based on USERNAME).
if eventName == "USERNAME":
res = self.checkSites(user)
for site in res:
evt = SpiderFootEvent(
"ACCOUNT_EXTERNAL_OWNED",
site,
self.__name__,
event
)
self.notifyListeners(evt)
if self.opts['permutate']:
permutations = self.generatePermutations(user)
for puser in permutations:
res = self.checkSites(puser)
for site in res:
evt = | |
= self._vtf_importer.stage(
params["$bumpmap2"], vmt_data.param_open_texture("$bumpmap2"), 'Non-Color'
)
if "$bumptransform2" in params:
transform = vmt_data.param_as_transform("$bumptransform2")
if transform.scale != (1, 1) or transform.rotate != 0 or transform.translate != (0, 0):
texture_inputs["$bumpmap2"] = _TransformedTextureInput(
transform.scale, transform.rotate, transform.translate, interpolation
)
texture_inputs["$bumpmap2"].setimage(image2)
if vmt_data.param_flag("$addbumpmaps"):
# FIXME: mixing textures is not a correct way of combining normal maps
if "$bumpdetailscale1" in params:
bumpamount1 = vmt_data.param_as_float("$bumpdetailscale1")
else:
bumpamount1 = 1
if "$bumpdetailscale2" in params:
bumpamount2 = vmt_data.param_as_float("$bumpdetailscale2")
else:
bumpamount2 = 1
blend_fac = bumpamount2 / (bumpamount1 + bumpamount2)
blended = _BlendedTextureInput(blend_fac, texture_inputs["$bumpmap"], texture_inputs["$bumpmap2"])
else:
blended = _BlendedTextureInput(blend_input, texture_inputs["$bumpmap"], texture_inputs["$bumpmap2"])
texture_inputs["$bumpmap"] = blended
self._shader_dict['Normal'].input = texture_inputs["$bumpmap"].color
if vmt_data.param_flag("$ssbump"):
self._shader_dict['Normal'].append(_SsbumpToNormalMaterialNode())
else:
if not self.simple:
self._shader_dict['Normal'].append(_DXNormalMapConverterMaterialNode())
self._shader_dict['Normal'].append(_NormalMapMaterialNode())
elif not self.simple and ("$detail" in params and "$detailblendmode" in params
and vmt_data.param_as_int("$detailblendmode") == 10):
dimage = self._vtf_importer.stage(params["$detail"], vmt_data.param_open_texture("$detail"), 'Non-Color')
scale = (1, 1)
if "$detailscale" in params:
try:
scale_x, scale_y, _ = vmt_data.param_as_vec3("$detailscale")
except vmt.VMTParseException:
try:
scale_x, scale_y = vmt_data.param_as_vec2("$detailscale")
except vmt.VMTParseException:
scale_x = scale_y = vmt_data.param_as_float("$detailscale")
scale = (scale[0] * scale_x, scale[1] * scale_y)
if "$detailtexturetransform" in params:
transform = vmt_data.param_as_transform("$detailtexturetransform")
scale = (scale[0] * transform.scale[0], scale[1] * transform.scale[1])
rotate = transform.rotate
translate = transform.translate
else:
rotate = 0
translate = (0, 0)
if scale != (1, 1) or rotate != 0 or translate != (0, 0):
texture_inputs["$detail"] = _TransformedTextureInput(scale, rotate, translate, interpolation)
texture_inputs["$detail"].setimage(dimage)
self._shader_dict['Normal'].input = texture_inputs["$detail"].color
self._shader_dict['Normal'].append(_SsbumpToNormalMaterialNode())
if vmt_data.param_flag("$translucent"):
self.blend_method = 'BLEND'
self.shadow_method = 'HASHED'
if "$basetexture" in params:
self._shader_dict['Alpha'].input = texture_inputs["$basetexture"].alpha
if not self.simple and "$alpha" in params:
self._shader_dict['Alpha'].append(_MultiplyMaterialNode(vmt_data.param_as_float("$alpha")))
elif vmt_data.param_flag("$alphatest"):
self.blend_method = 'CLIP'
self.shadow_method = 'CLIP'
if "$basetexture" in params:
self._shader_dict['Alpha'].input = texture_inputs["$basetexture"].alpha
if "$alphatestreference" in params:
self.alpha_reference = vmt_data.param_as_float("$alphatestreference")
elif "$allowalphatocoverage" in params:
self.blend_method = 'HASHED'
if not self.simple and "$alpha" in params:
self._shader_dict['Alpha'].append(_MultiplyMaterialNode(vmt_data.param_as_float("$alpha")))
elif not self.simple and vmt_data.param_flag("$vertexalpha"):
self.blend_method = 'BLEND'
self.shadow_method = 'HASHED'
self._shader_dict['Alpha'].input = vertex_col_input.alpha
if not self.simple and "$alpha" in params:
self._shader_dict['Alpha'].append(_MultiplyMaterialNode(vmt_data.param_as_float("$alpha")))
elif "$alpha" in params:
self._shader_dict['Alpha'].const = vmt_data.param_as_float("$alpha")
self.blend_method = 'BLEND'
self.shadow_method = 'HASHED'
if "$masks1" in params:
image = self._vtf_importer.stage(params["$masks1"], vmt_data.param_open_texture("$masks1"), 'Non-Color')
texture_inputs["$masks1"].setimage(image)
masks1 = True
else:
masks1 = False
if vmt_data.param_flag("$phong") or vmt_data.shader == "character":
if vmt_data.param_flag("$basemapluminancephongmask") and "$basetexture" in params:
self._shader_dict['Specular'].input = texture_inputs["$basetexture"].color
elif not self.simple and vmt_data.param_flag("$basemapalphaphongmask") and "$basetexture" in params:
self._shader_dict['Specular'].input = texture_inputs["$basetexture"].alpha
elif not self.simple and masks1:
self._shader_dict['Specular'].input = texture_inputs["$masks1"].channels.g
elif not self.simple and "$bumpmap" in params:
self._shader_dict['Specular'].input = texture_inputs["$bumpmap"].alpha
if "$phongexponent" in params:
if not self.simple and "$phongexponent2" in params:
self._shader_dict['Roughness'].input = _BlendedConstantInput(
blend_input,
((150 - vmt_data.param_as_float("$phongexponent")) / 150) * 0.66,
((150 - vmt_data.param_as_float("$phongexponent2")) / 150) * 0.66,
).const
else:
self._shader_dict['Roughness'].const = (
(150 - vmt_data.param_as_float("$phongexponent")) / 150
) * 0.66
elif not self.simple and "$phongexponenttexture" in params:
image = self._vtf_importer.stage(
params["$phongexponenttexture"],
vmt_data.param_open_texture("$phongexponenttexture"),
'Non-Color'
)
texture_inputs["$phongexponenttexture"].setimage(image)
self._shader_dict['Roughness'].input = texture_inputs["$phongexponenttexture"].channels.r
self._shader_dict['Roughness'].append(_InvertMaterialNode())
self._shader_dict['Roughness'].append(_MultiplyMaterialNode(0.66))
if vmt_data.param_flag("$phongalbedotint"):
self._shader_dict['Specular Tint'].input = texture_inputs["$phongexponenttexture"].channels.g
else:
self._shader_dict['Roughness'].const = 0.6
elif "$envmap" in params:
if not self.simple and ((vmt_data.param_flag("$basealphaenvmapmask")
or vmt_data.param_flag("$basealphaenvmask"))
and "$basetexture" in params):
self._shader_dict['Specular'].input = texture_inputs["$basetexture"].alpha
self._shader_dict['Specular'].append(_InvertMaterialNode())
elif not self.simple and vmt_data.param_flag("$normalmapalphaenvmapmask") and "$bumpmap" in params:
self._shader_dict['Specular'].input = texture_inputs["$bumpmap"].alpha
elif not self.simple and (vmt_data.param_flag("$envmapmaskintintmasktexture")
and "$tintmasktexture" in params):
self._shader_dict['Specular'].input = texture_inputs["$tintmasktexture"].channels.r
elif "$envmapmask" in params:
image = self._vtf_importer.stage(
params["$envmapmask"], vmt_data.param_open_texture("$envmapmask"), 'Non-Color'
)
if "$envmapmasktransform" in params:
transform = vmt_data.param_as_transform("$envmapmasktransform")
if transform.scale != (1, 1) or transform.rotate != 0 or transform.translate != (0, 0):
texture_inputs["$envmapmask"] = _TransformedTextureInput(
transform.scale, transform.rotate, transform.translate, interpolation
)
texture_inputs["$envmapmask"].setimage(image)
self._shader_dict['Specular'].input = texture_inputs["$envmapmask"].color
if not self.simple and "$envmaptint" in params:
tint = vmt_data.param_as_color("$envmaptint")
self._shader_dict['Specular'].append(_MultiplyMaterialNode(sum(tint) / 3))
elif "$envmaptint" in params:
tint = vmt_data.param_as_color("$envmaptint")
self._shader_dict['Specular'].const = sum(tint) / 3
else:
self._shader_dict['Specular'].const = 0.8
self._shader_dict['Roughness'].const = 0.1
elif vmt_data.shader == "unlitgeneric" or vmt_data.param_flag("%compilenolight"):
self._shader_dict['Specular'].const = 0.0
self._shader_dict['Roughness'].const = 1.0
else:
self._shader_dict['Specular'].const = 0.1
self._shader_dict['Roughness'].const = 0.9
if not self.simple and masks1:
self._shader_dict['Metallic'].input = texture_inputs["$masks1"].channels.b
self._shader_dict['Metallic'].append(_InvertMaterialNode())
elif "$metalness" in params:
self._shader_dict['Metallic'].const = vmt_data.param_as_float("$metalness")
selfillum_input = None
if not self.simple and vmt_data.param_flag("$selfillum_envmapmask_alpha") and "$envmapmask" in params:
selfillum_input = texture_inputs["$envmapmask"].alpha
elif vmt_data.param_flag("$selfillum"):
if "$selfillummask" in params:
image = self._vtf_importer.stage(
params["$selfillummask"], vmt_data.param_open_texture("$selfillummask"), 'Non-Color'
)
texture_inputs["$selfillummask"].setimage(image)
selfillum_input = texture_inputs["$selfillummask"].color
elif not self.simple and "$basetexture" in params:
selfillum_input = texture_inputs["$basetexture"].alpha
if selfillum_input is not None:
if not self.simple:
self._shader_dict['Emission'].input = texture_inputs["$basetexture"].color
self._shader_dict['Emission'].append(_MultiplyRGBMaterialNode(selfillum_input, 1))
else:
self._shader_dict['Emission'].input = selfillum_input
def get_size(self) -> Tuple[int, int]:
if self.size_reference is None:
return 1, 1
return self.size_reference.get_image().size
def get_material(self) -> bpy.types.Material:
if self._material is None:
self._material = bpy.data.materials.new(self.name)
self._material.vmt_data.nodraw = self.nodraw
return self._material
def set_material(self, material: bpy.types.Material) -> None:
self._material = material
material.name = self.name
self._material.vmt_data.nodraw = self.nodraw
def build(self) -> bpy.types.Material:
material = self.get_material()
material.use_nodes = True
material.blend_method = self.blend_method
material.shadow_method = self.shadow_method
material.alpha_threshold = self.alpha_reference
material.use_backface_culling = self.cull
nt = material.node_tree
nt.nodes.clear()
pos_ref = _PosRef()
out_node: Node = nt.nodes.new('ShaderNodeOutputMaterial')
out_node.location = pos_ref.loc()
pos_ref.x -= 300
if self.nodraw:
shader_node: Node = nt.nodes.new('ShaderNodeBsdfTransparent')
elif self.water:
material.use_screen_refraction = True
shader_node = nt.nodes.new('ShaderNodeBsdfGlass')
shader_node.inputs['IOR'].default_value = 1.333
shader_node.inputs['Roughness'].default_value = 0.3
else:
shader_node = nt.nodes.new('ShaderNodeBsdfPrincipled')
shader_node.location = pos_ref.loc()
pos_ref.x -= 100
nt.links.new(shader_node.outputs['BSDF'], out_node.inputs['Surface'])
if self.nodraw:
return material
required_inputs: Dict[_MaterialInputBase, None] = {} # Waiting for ordered sets
paths_pos_ref = pos_ref.copy()
path_end_pos_x = 0
for socket_name in self._shader_dict:
paths_pos_ref.y = min(paths_pos_ref.y, self._shader_dict[socket_name].min_start_y)
path_pos_ref = paths_pos_ref.copy()
required_inputs.update(map(
lambda x: (x, None),
self._shader_dict[socket_name].connect_path(nt, shader_node.inputs[socket_name], path_pos_ref)
))
if path_pos_ref.x < path_end_pos_x:
path_end_pos_x = path_pos_ref.x
paths_pos_ref.y -= self._shader_dict[socket_name].dimension_y()
created_inputs: Set[_MaterialInputBase] = set()
input_pos_ref = pos_ref.copy()
input_pos_ref.x = path_end_pos_x - 100
for material_input in required_inputs:
if material_input in created_inputs:
continue
dimension_y = material_input.full_dimension_y(created_inputs)
material_input.full_create(nt, input_pos_ref.copy(), created_inputs)
input_pos_ref.y -= dimension_y
for socket_name in self._shader_dict:
self._shader_dict[socket_name].connect_inputs(nt)
return material
class StagedMaterial():
def __init__(self, importer: 'VMTImporter', name: str,
builder: Optional[_MaterialBuilder], reused: Optional[bpy.types.Material] = None) -> None:
self.name = name
self.builder = builder
self.reused = reused
self._vmt_importer = importer
def get_material(self) -> bpy.types.Material:
if self.reused is not None:
return self.reused
if self.builder is None:
raise Exception("a builder was not specified for non-reused staged material")
return self.builder.get_material()
def set_material(self, material: bpy.types.Material) -> None:
if self.builder is not None:
self.builder.set_material(material)
@staticmethod
def from_existing(importer: 'VMTImporter', material: bpy.types.Material) -> 'StagedMaterial':
return StagedMaterial(importer, material.vmt_data.full_name, None, material)
def _fallback_material(material_name: str, truncated_name: str) -> VMTData:
return VMTData(1, 1, fallback_material(material_name, truncated_name))
class VMTImporter():
def __init__(self, verbose: bool = False, simple: bool = False,
interpolation: str = 'Linear', cull: bool = False,
editor_materials: bool = False,
reuse_old: bool = True, reuse_old_images: bool = True) -> None:
self.verbose = verbose
self.simple = simple
self.interpolation = interpolation
self.cull = cull
self.editor_materials = editor_materials
self.reuse_old = reuse_old
self.progress_callback: Callable[[int, int], None] = lambda current, total: None
self.texture_progress_callback: Callable[[int, int], None] = lambda current, total: None
self._nodraw_cache: Dict[str, bool] = {}
self._precache: Dict[str, _MaterialBuilder] = {}
self._cache: Dict[str, VMTData] = {}
self._vtf_importer = import_vtf.VTFImporter(reuse_old=reuse_old_images)
self._staging: Dict[str, StagedMaterial] = {}
self._loaded: Dict[str, StagedMaterial] = {}
self.reusable_amount = 0
self.importable_amount = 0
self.invalid_amount = 0
def is_nodraw(self, material_name: str, vmt_data: Callable[[], vmt.VMT]) -> bool:
material_name = material_name.lower()
truncated_name = truncate_name(material_name)
if material_name in self._nodraw_cache:
return self._nodraw_cache[material_name]
if self.reuse_old and truncated_name in bpy.data.materials:
material: bpy.types.Material = bpy.data.materials[truncated_name]
if material.use_nodes and len(material.node_tree.nodes) != 0:
is_nodraw = material.vmt_data.nodraw
self._nodraw_cache[material_name] = is_nodraw
return is_nodraw
try:
builder = _MaterialBuilder(self._vtf_importer, truncated_name, vmt_data(),
simple=self.simple, interpolation=self.interpolation,
cull=self.cull, editor_materials=self.editor_materials)
except FileNotFoundError:
print(f"[WARNING] MATERIAL {material_name} NOT FOUND")
self._cache[material_name] = _fallback_material(material_name, truncated_name)
is_nodraw = is_invisible_tool((material_name,))
except vmt.VMTParseException as err:
print(f"[WARNING] MATERIAL {material_name} IS INVALID")
if self.verbose:
traceback.print_exception(type(err), err, err.__traceback__)
self._cache[material_name] = _fallback_material(material_name, truncated_name)
is_nodraw = is_invisible_tool((material_name,))
else:
self._precache[material_name] = builder
is_nodraw = builder.nodraw
self._nodraw_cache[material_name] = is_nodraw
return is_nodraw
def stage(self, material_name: str, vmt_data: Callable[[], vmt.VMT]) -> StagedMaterial:
material_name = material_name.lower()
truncated_name = truncate_name(material_name)
if material_name in self._staging:
return self._staging[material_name]
if material_name in self._loaded:
return self._loaded[material_name]
if material_name in self._cache:
# material was not found in nodraw check
self._staging[material_name] = StagedMaterial.from_existing(self, self._cache[material_name].material)
self.invalid_amount += 1
return self._staging[material_name]
if self.verbose:
print(f"[VERBOSE] Staging material {material_name}")
if self.reuse_old and truncated_name in bpy.data.materials:
material: bpy.types.Material = bpy.data.materials[truncated_name]
if material.use_nodes and len(material.node_tree.nodes) != 0:
self._staging[material_name] = StagedMaterial.from_existing(self, material)
self.reusable_amount += 1
return self._staging[material_name]
try:
if material_name in self._precache:
builder = self._precache[material_name]
else:
builder = _MaterialBuilder(self._vtf_importer, truncated_name, vmt_data(),
simple=self.simple, interpolation=self.interpolation,
cull=self.cull, editor_materials=self.editor_materials)
self._nodraw_cache[material_name] = builder.nodraw
except FileNotFoundError:
print(f"[WARNING] MATERIAL {material_name} NOT FOUND")
data = _fallback_material(material_name, truncated_name)
self._cache[material_name] = data
self._staging[material_name] = StagedMaterial.from_existing(self, data.material)
self.invalid_amount += 1
| |
<filename>py_dataset/libdataset.py<gh_stars>1-10
#!/usr/bin/env python3
#
# libdataet.py is a C type wrapper for our libdataset.go is a C shared.
# It is used to test our dataset functions exported from the C-Shared
# library libdataset.so, libdataset.dynlib or libdataset.dll.
#
# @author <NAME>, <<EMAIL>>
#
# Copyright (c) 2019, Caltech
# All rights not granted herein are expressly reserved by Caltech.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys, platform
import os
import json
from ctypes import CDLL, c_char_p, c_int, c_bool
# Figure out shared library extension
go_basename = 'lib/libdataset'
ext = '.so'
if sys.platform.startswith('win'):
ext = '.dll'
if sys.platform.startswith('darwin'):
#M1 mac uses a special dylib
if platform.processor() == 'arm':
ext = '-arm.dylib'
else:
ext = '.dylib'
if sys.platform.startswith('linux'):
ext = '.so'
# Find our shared library and load it
dir_path = os.path.dirname(os.path.realpath(__file__))
libdataset = CDLL(os.path.join(dir_path, go_basename+ext))
#
# Setup our Go functions to be nicely wrapped
#
# NOTE: we use a horrible hack in this library. It is a royal pain
# to pass dynamic dataset structures between C and Python let alone
# between Go and C. As a result I've chosen the easy programing path
# of passing JSON source between the code spaces. This has proven
# simple, reliable and *inefficent* in memory usage. I've opted for
# reliability and simplicity. RSD, 2020-03-18
# error_clear() clears the error values
#
# It takes no args and returns no value.
# error_message() returns the error messages aggregated
# in previously envoked shared library functions.
#
# Return: error message text (string)
libdataset.error_message.restype = c_char_p
# use_strict_dotpath() sets the state of the strict dotpath
# interpretation. Strict dot paths expect a period at the
# beginning, non strict will prefix a period automatigally.
# This is useful where you're using labels in a report
# (the period is annoying) and also in generating transformed
# object attributes (the period is useful).
#
# Args: is True (1) or False (0)
libdataset.use_strict_dotpath.argtypes = [ c_bool ]
# Return: True (1) or False (0)
libdataset.use_strict_dotpath.restype = c_bool
# dataset_version() returns the version number of the libdataset
# used.
#
# Return: semver (string)
libdataset.dataset_version.restype = c_char_p
# is_verbose() returns the state of the verbose flag.
#
# Returns: True (1) or False (0)
libdataset.is_verbose.restype = c_bool
# verbose_on() sets the verbose flag to True.
#
# Returns: True (1) or False (0)
libdataset.verbose_on.restype = c_bool
# verbose_off() sets the verbose flag to False
#
# Returns: True (1) or False (0)
libdataset.verbose_off.restype = c_bool
# init_collection() creates a dataset collection.
#
# Args: collection_name (string)
libdataset.init_collection.argtypes = [ c_char_p ]
# Returns: True (1) or False (0)
libdataset.init_collection.restype = c_bool
# is_collection_open() checks to see if the collection
# is already open and in the list of open collections.
#
# Args: collection_name (string)
libdataset.is_collection_open.argtypes = [ c_char_p ]
# Returns: Ture (1) or False (0)
libdataset.is_collection_open.restype = c_bool
# collections() returns a list of opened collections.
#
# Returns: string (names of collections)
libdataset.collections.restype = c_char_p
# open_collcetion() explicitly opens a collection.
#
# Args: collection_name (string)
libdataset.open_collection.argtypes = [ c_char_p ]
# Returns: True (1) or False (0)
libdataset.open_collection.restype = c_bool
# close_collection() closes a previously opened collection.
# Most libdataset commands auto-magically open the collection.
#
# Args: collection_name (string)
libdataset.close_collection.argtypes = [ c_char_p ]
# Returns: True (1) or False (0)
libdataset.close_collection.restype = c_bool
# close_all_collections closes all opened collections.
#
# Returns: True (1) or False (0)
libdataset.close_all_collections.restype = c_bool
# create_object() creates a JSON object in a collection.
#
# Args: collection_name (string), key (string), value (JSON source)
libdataset.create_object.argtypes = [ c_char_p, c_char_p, c_char_p ]
# Returns: True (1) or False (0)
libdataset.create_object.restype = c_bool
# read_object() retrieves a JSON object from a collection.
#
# Args: collection_name (string), key (string), clean_object (bool)
libdataset.read_object.argtypes = [ c_char_p, c_char_p, c_bool ]
# Returns: string (JSON source)
libdataset.read_object.restype = c_char_p
# read_object_list() returns a list of objects for the provided keys
# in the collection.
#
# Args: collection_name (string), keys (list of key strings AS JSON),
# clean_object (bool)
libdataset.read_object_list.argtypes = [ c_char_p, c_char_p, c_bool ]
# Returns: string (JSON source)
libdataset.read_object_list.restype = c_char_p
# update_object() updates an object in the collection given a key
# and new object.
#
# Args: collection_name (string), key (string), value (JSON sourc)
libdataset.update_object.argtypes = [ c_char_p, c_char_p, c_char_p ]
# Returns: True (1) or False (0)
libdataset.update_object.restype = c_bool
# delete_object() removes an object from a collection.
#
# Args: collection_name (string), key (string)
libdataset.delete_object.argtypes = [ c_char_p, c_char_p ]
# Returns: True (1), False (0)
libdataset.delete_object.restype = c_bool
# key_exists() tests for a key in a collection.
#
# Args: collection_name (string), key (string)
libdataset.key_exists.argtypes = [ c_char_p, c_char_p ]
# Returns: True (1), False (0)
libdataset.key_exists.restype = c_bool
# keys() returns a list of all keys in a collection.
#
# Args: collection_name (string)
libdataset.keys.argtypes = [ c_char_p ]
# Returns: string (JSON source)
libdataset.keys.restype = c_char_p
# count_objects() returns the number of objects in a collection.
#
# Args: collection_name (string)
libdataset.count_objects.argtypes = [ c_char_p ]
# Returns: integer (int)
libdataset.count_objects.restype = c_int
# NOTE: this diverges from cli and reflects low level dataset organization
#
# import_csv - import a CSV file into a collection
# syntax: COLLECTION CSV_FILENAME ID_COL
#
# options that should support sensible defaults:
#
# UseHeaderRow (bool, 1 true, 0 false)
# Overwrite (bool, 1 true, 0 false)
# Args: collection_name (string), csv_filename (string), id_column_no (int), use_header_row (bool), overwrite (bool)
libdataset.import_csv.argtypes = [ c_char_p, c_char_p, c_int, c_bool, c_bool ]
# Returns: True (1), False (0)
libdataset.import_csv.restype = c_bool
# export_csv() exports a dataset collection objects into a CSV file
# based on the contents of a frame.
#
# Args: collection_name (string), frame_name (string), csv_filename (string)
libdataset.export_csv.argtypes = [ c_char_p, c_char_p, c_char_p ]
# Returns: True (1), False (0)
libdataset.export_csv.restype = c_bool
# sync_receive_csv() retrieves data in a CSV file and updates a collection
# using a frame.
#
# Args: collection_name (string), frame_name (string), csv_filename (string), overwrite (bool)
libdataset.sync_recieve_csv.argtypes = [ c_char_p, c_char_p, c_char_p, c_bool]
# Returns: True (1) or False (0)
libdataset.sync_recieve_csv.restype = c_bool
# sync_send_csv() updates a CSV file based on the objects in a collection
# using a frame.
#
# Args: collection_name (string), frame_name (string), csv_filename (string), ovewrite (bool)
libdataset.sync_send_csv.argtypes = [ c_char_p, c_char_p, c_char_p, c_bool ]
# Returns: True (1) or False (0)
libdataset.sync_send_csv.restype = c_bool
# collection_exists() returns True if a collection exists, False otherwise.
# NOTE: This will be renamed collection_exists() in a coming release
# of libdataset.
#
# Returns: True (1) or False (0)
libdataset.collection_exists.restype = c_bool
# list_objects() returns a list of objects for a list of keys.
#
# Args: collection_name (string), key list (JSON array source)
libdataset.list_objects.argtypes = [ c_char_p, c_char_p ]
# Returns: string (JSON Array of Objects source)
libdataset.list_objects.restype = c_char_p
# object_path() returns the file system path to an object in a
# collection.
#
# Args: collection_name (string), key (string)
libdataset.object_path.argtypes = [ c_char_p, c_char_p ]
# Return: string
libdataset.object_path.restype = c_char_p
# check_collection() checks a collection for structural errors.
#
# Args: collection_name (string)
libdataset.check_collection.argtypes = [ c_char_p ]
# Returns: True (1) or False (0)
libdataset.check_collection.restype = c_bool
# repair_collection() trys to repair a collection when it has
# structural errors.
#
# Args: collection_name (string)
libdataset.repair_collection.argtypes = [ c_char_p ]
# Returns: True (1) or False (0)
libdataset.repair_collection.restype = c_bool
# attach() adds a file to a JSON object record.
#
# Args: collection_name (string), key (string), semver (string), filenames (string)
libdataset.attach.argtypes = [ c_char_p, c_char_p, c_char_p, c_char_p ]
# Returns: True (1) or False (0)
libdataset.attach.restype = c_bool
# attachments() lists the files attached to a JSON | |
<filename>ops/dataset.py
# Code for "TSM: Temporal Shift Module for Efficient Video Understanding"
# arXiv:1811.08383
# <NAME>*, <NAME>, <NAME>
# {jilin, <EMAIL>, <EMAIL>
import torch.utils.data as data
from PIL import Image
import os
import numpy as np
from numpy.random import randint
# intesities = ['BL1', 'PA1', 'PA2', 'PA3', 'PA4']
dict_biovid = [None, None,
{'BL1': 0, 'PA4': 1}, {'BL1': 0, 'PA3': 1, 'PA4': 2},
{'BL1': 0, 'PA2': 1, 'PA3': 2, 'PA4': 3},
{'BL1': 0, 'PA1': 1, 'PA2': 2, 'PA3': 3, 'PA4': 4}]
no_biovid = [37, 38, 41, 42, 43, 50, 57, 68] # Not used in biomedical signals due to poor signal quality
class VideoRecord(object):
def __init__(self, row):
self._data = row
@property
def path(self):
return self._data[0]
@property
def num_frames(self):
return int(self._data[1])
@property
def label(self):
return int(self._data[2])
class VideoRecordIPN(object):
def __init__(self, row, idn=1):
self._data = row
self._idn = idn
@property
def path(self):
return self._data[0]
@property
def st_frame(self):
return int(self._data[3])
@property
def en_frame(self):
return int(self._data[4])
@property
def num_frames(self):
return int(self._data[-1])
@property
def label(self):
return int(self._data[2])-int(self._idn)
class TSNDataSet(data.Dataset):
def __init__(self, root_path, list_file,
num_segments=3, new_length=1, modality='RGB',
image_tmpl='img_{:05d}.jpg', transform=None,
random_shift=True, test_mode=False, remove_missing=False,
dense_sample=False, twice_sample=False, dense_window=False, full_sample=False,
ipn=False, ipn_no_class=1):
self.root_path = root_path
self.list_file = list_file
self.num_segments = num_segments
self.new_length = new_length
self.modality = modality
self.image_tmpl = image_tmpl
self.transform = transform
self.random_shift = random_shift
self.test_mode = test_mode
self.remove_missing = remove_missing
self.dense_sample = dense_sample # using dense sample as I3D
self.twice_sample = twice_sample # twice sample for more validation
self.dense_window = dense_window
self.full_sample = full_sample
self.ipn = ipn
self.id_noc = ipn_no_class
if self.dense_sample:
print('=> Using dense sample for the dataset...')
if self.twice_sample:
print('=> Using twice sample for the dataset...')
if self.dense_window:
print('=> Using dense window sample for the dataset...')
if self.full_sample:
print('=> Using full sample for the dataset...')
if self.modality == 'RGBDiff':
self.new_length += 1 # Diff needs one more image to calculate diff
self._parse_list()
def _load_image(self, directory, idx):
if self.modality == 'RGB' or self.modality == 'RGBDiff':
if self.image_tmpl == '{}_{:06d}.jpg':
file_name = self.image_tmpl.format(directory, idx)
return [Image.open(os.path.join(self.root_path, directory, file_name)).convert('RGB')]
elif self.image_tmpl == '{}/{}_{:04d}.jpg':
file_name = self.image_tmpl.format(directory, directory, idx)
return [Image.open(os.path.join(self.root_path, directory.split('-')[0], file_name)).convert('RGB')]
else:
try:
return [Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format(idx))).convert('RGB')]
except Exception:
print('error loading image:', os.path.join(self.root_path, directory, self.image_tmpl.format(idx)))
return [Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format(1))).convert('RGB')]
elif self.modality == 'Flow':
if self.image_tmpl == 'flow_{}_{:05d}.jpg': # ucf
x_img = Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format('x', idx))).convert(
'L')
y_img = Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format('y', idx))).convert(
'L')
elif self.image_tmpl == '{:06d}-{}_{:05d}.jpg': # something v1 flow
x_img = Image.open(os.path.join(self.root_path, '{:06d}'.format(int(directory)), self.image_tmpl.
format(int(directory), 'x', idx))).convert('L')
y_img = Image.open(os.path.join(self.root_path, '{:06d}'.format(int(directory)), self.image_tmpl.
format(int(directory), 'y', idx))).convert('L')
else:
try:
# idx_skip = 1 + (idx-1)*5
flow = Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format(idx))).convert(
'RGB')
except Exception:
print('error loading flow file:',
os.path.join(self.root_path, directory, self.image_tmpl.format(idx)))
flow = Image.open(os.path.join(self.root_path, directory, self.image_tmpl.format(1))).convert('RGB')
# the input flow file is RGB image with (flow_x, flow_y, blank) for each channel
flow_x, flow_y, _ = flow.split()
x_img = flow_x.convert('L')
y_img = flow_y.convert('L')
return [x_img, y_img]
elif self.modality in ['RGB-flo', 'RGB-seg']:
if self.modality.split('-')[1] == 'flo':
sensor = 'flow'
ext = 'jpg'
elif self.modality.split('-')[1] == 'seg':
sensor = 'segment_five'
ext = 'png'
file_name = self.image_tmpl.format(directory, idx)
imgC = Image.open(os.path.join(self.root_path, directory, file_name)).convert('RGB')
imgD = Image.open(os.path.join(self.root_path.replace('frames',sensor), directory, file_name.replace('jpg',ext))).convert('RGB')
if self.modality.split('-')[1] == 'seg':
return [imgC, self._ipn_fassd(imgD.convert('L'))]
else:
return [imgC, imgD]
def _ipn_fassd(self, img, pix_val=190):
imgS = np.asarray(img)
imgT = imgS.copy()
imgT[imgS==pix_val] = 0
imgT[imgT>0] = 255
imgT = np.uint8(imgT)
return Image.fromarray(np.concatenate([np.expand_dims(imgT, 2),np.expand_dims(imgT, 2),np.expand_dims(imgT, 2)], axis=2))
def _parse_biovid(self, directory, frames=138):
folder_list = os.listdir(os.path.join(self.root_path, directory))
folder_list.sort()
bio_labels = dict_biovid[self.id_noc]
# print(list(bio_labels.keys()))
out_list = []
for i, path in enumerate(folder_list):
if path.split('-')[1] not in list(bio_labels.keys()):
continue
out_list.append([path, frames, bio_labels[path.split('-')[1]]])
return out_list
def _parse_list(self):
# hacer una funcion que genere todos los items de un folder de persona
# lo que generaria listas de forma que VideoRecord class se pueda usar
# check the frame number is large >3:
if self.ipn:
tmp = [x.strip().split(',') for x in open(self.list_file)]
if self.id_noc > 1:
tmp = [item for item in tmp if int(item[2]) > self.id_noc-1]
self.video_list = [VideoRecordIPN(item, self.id_noc) for item in tmp]
elif self.image_tmpl == '{}/{}_{:04d}.jpg':
val_ids_raw = self.list_file.split(',')[1]
val_ids = [int(item) for item in val_ids_raw.split('.')]
main_folder_list = os.listdir(self.root_path)
main_folder_list.sort()
if self.list_file.split(',')[0] == 'train':
print('generating training list of {} subjects...'.format(len(main_folder_list)-len(val_ids)-len(no_biovid)))
tmp = []
for i, item in enumerate(main_folder_list):
if i in no_biovid:
continue
if i not in val_ids:
tmp += self._parse_biovid(item)
else:
print('validating BioVid with {} subjects:'.format(len(val_ids)))
print(' {}'.format([main_folder_list[item] for item in val_ids]))
tmp = []
for val_id in val_ids:
tmp += self._parse_biovid(main_folder_list[val_id])
self.video_list = [VideoRecord(item) for item in tmp]
else:
tmp = [x.strip().split(' ') for x in open(self.list_file)]
if not self.test_mode or self.remove_missing:
tmp = [item for item in tmp if int(item[1]) >= 3]
self.video_list = [VideoRecord(item) for item in tmp]
if self.image_tmpl == '{:06d}-{}_{:05d}.jpg':
for v in self.video_list:
v._data[1] = int(v._data[1]) / 2
print('video clips:%d' % (len(self.video_list)))
def _sample_indices(self, record):
"""
:param record: VideoRecord
:return: list
"""
if self.dense_sample: # i3d dense sample
sample_pos = max(1, 1 + record.num_frames - 64)
t_stride = 64 // self.num_segments
start_idx = 0 if sample_pos == 1 else np.random.randint(0, sample_pos - 1)
offsets = [(idx * t_stride + start_idx) % record.num_frames for idx in range(self.num_segments)]
return np.array(offsets) + 1
else: # normal sample
average_duration = (record.num_frames - self.new_length + 1) // self.num_segments
if average_duration > 0:
offsets = np.multiply(list(range(self.num_segments)), average_duration) + randint(average_duration,
size=self.num_segments)
elif record.num_frames > self.num_segments:
offsets = np.sort(randint(record.num_frames - self.new_length + 1, size=self.num_segments))
else:
offsets = np.zeros((self.num_segments,))
return offsets + 1
def _get_val_indices(self, record):
if self.dense_sample: # i3d dense sample
sample_pos = max(1, 1 + record.num_frames - 64)
t_stride = 64 // self.num_segments
start_idx = 0 if sample_pos == 1 else np.random.randint(0, sample_pos - 1)
offsets = [(idx * t_stride + start_idx) % record.num_frames for idx in range(self.num_segments)]
return np.array(offsets) + 1
else:
if record.num_frames > self.num_segments + self.new_length - 1:
tick = (record.num_frames - self.new_length + 1) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segments)])
else:
offsets = np.zeros((self.num_segments,))
return offsets + 1
def _get_test_indices(self, record):
if self.dense_sample:
# # Orginal:
# sample_pos = max(1, 1 + record.num_frames - 64)
# t_stride = 64 // self.num_segments
# start_list = np.linspace(0, sample_pos - 1, num=10, dtype=int)
# Proposed:
chunks = record.num_frames//self.num_segments
t_stride = max(1, chunks // self.num_segments)
sample_pos = max(1, 1 + record.num_frames - t_stride*self.num_segments)
start_list = np.linspace(0, sample_pos - 1, num=10, dtype=int)
offsets = []
for start_idx in start_list.tolist():
offsets += [(idx * t_stride + start_idx) % record.num_frames for idx in range(self.num_segments)]
return np.array(offsets) + 1
elif self.dense_window:
chunks = record.num_frames//self.num_segments
t_stride = max(1, chunks // self.num_segments)
sample_pos = max(1, 1 + record.num_frames - t_stride*self.num_segments)
start_list = np.linspace(0, sample_pos - 1, num=chunks+1, dtype=int)
offsets = []
for start_idx in start_list.tolist():
offsets += [(idx * t_stride + start_idx) % record.num_frames for idx in range(self.num_segments)]
return np.array(offsets) + 1
elif self.twice_sample:
tick = (record.num_frames - self.new_length + 1) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segments)] +
[int(tick * x) for x in range(self.num_segments)])
return offsets + 1
elif self.full_sample:
tick = (record.num_frames - self.new_length + 1) / float(self.num_segments)
offsets = []
for start_idx in range(int(tick+1)):
offsets += [int(start_idx + tick * x) for x in range(self.num_segments)]
return np.array(offsets) + 1
else:
tick = (record.num_frames - self.new_length + 1) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segments)])
return offsets + 1
def __getitem__(self, index):
record = self.video_list[index]
# check this is a legit video folder
if self.image_tmpl == 'flow_{}_{:05d}.jpg':
file_name = self.image_tmpl.format('x', 1)
full_path = os.path.join(self.root_path, record.path, file_name)
elif self.image_tmpl == '{:06d}-{}_{:05d}.jpg':
file_name = self.image_tmpl.format(int(record.path), 'x', 1)
full_path = os.path.join(self.root_path, '{:06d}'.format(int(record.path)), file_name)
elif self.image_tmpl == '{}_{:06d}.jpg':
file_name = self.image_tmpl.format(record.path, record.st_frame)
full_path = os.path.join(self.root_path, record.path, file_name)
elif self.image_tmpl == '{}/{}_{:04d}.jpg':
file_name = self.image_tmpl.format(record.path, record.path, 1)
full_path = os.path.join(self.root_path, record.path.split('-')[0], file_name)
else:
file_name = self.image_tmpl.format(1)
full_path = os.path.join(self.root_path, record.path, file_name)
while not os.path.exists(full_path):
print('################## Not Found:', os.path.join(self.root_path, record.path, file_name))
index = np.random.randint(len(self.video_list))
record = self.video_list[index]
if self.image_tmpl == 'flow_{}_{:05d}.jpg':
file_name = self.image_tmpl.format('x', 1)
full_path = os.path.join(self.root_path, record.path, file_name)
elif self.image_tmpl == '{:06d}-{}_{:05d}.jpg':
| |
import typing
import os
import sys
import select
import io
import warnings
import logging
from collections import namedtuple
import functools
import shlex
import subprocess
import google.auth
import paramiko
import shutil
import time
import numpy as np
import pandas as pd
import requests
import hashlib
def isatty(*streams: typing.IO) -> bool:
"""
Returns true if all of the provided streams are ttys
"""
for stream in streams:
try:
if not (hasattr(stream, 'fileno') and os.isatty(stream.fileno())):
return False
except io.UnsupportedOperation:
return False
return True
class ArgumentHelper(dict):
"""
Helper class for setting arguments to slurm commands
Used only to handle keyword arguments to console commands
Should not be responsible for positionals
"""
def __init__(self, *flags: str, **params: typing.Any):
"""
Creates a new ArgumentHelper
Flags can be passed as positional arguments
Parameters can be passed as keyword arguments
"""
object.__setattr__(self, 'defaults', {})
object.__setattr__(self, 'flags', [item for item in flags])
object.__setattr__(self, 'params', {k:v for k,v in params.items()})
for key, val in [*self.params.items()]:
if val is True:
self.flags.append(key)
del self.params[key]
def __repr__(self) -> str:
return '<ArgumentHelper{}>'.format(
self.commandline
)
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
del self[name]
def __getitem__(self, name):
if name in self.params:
return self.params[name]
if name in self.flags:
return True
if name in self.defaults:
return self.defaults[name]
def __setitem__(self, name, value):
if value is True:
self.flags.append(name)
elif value is False:
object.__setattr__(self, 'flags', [flag for flag in self.flags if flag != name])
else:
self.params[name] = value
def __delitem__(self, name):
if name in self.params:
del self.params[name]
elif name in self.flags:
object.__setattr__(self, 'flags', [flag for flag in self.flags if flag != name])
else:
raise KeyError("No such argument {}".format(name))
@staticmethod
def translate(flag) -> str:
"""Converts acceptable python strings to command line args"""
return flag.replace('_', '-')
@property
def commandline(self) -> str:
"""Expands the arguments to command line form"""
return '{short_prespace}{short_flags}{short_params}{long_flags}{params}'.format(
short_prespace=' -' if len([f for f in self.flags if len(f) == 1]) else '',
short_flags=''.join(flag for flag in self.flags if len(flag)==1),
short_params=''.join(
' -{}={}'.format(self.translate(key), shlex.quote(value))
for key, value in self.params.items()
if len(key) == 1
),
long_flags=''.join(' --{}'.format(self.translate(flag)) for flag in self.flags if len(flag) > 1),
params=''.join(
' --{}={}'.format(self.translate(key), shlex.quote(value))
for key, value in self.params.items()
if len(key) > 1
)
)
def setdefaults(self, **kwargs: typing.Any):
self.defaults.update(kwargs)
def make_interactive(channel: paramiko.Channel) -> typing.Tuple[int, typing.BinaryIO, typing.BinaryIO]:
"""
Manages an interactive command
Takes in a paramiko.Channel shared by stdin, stdout, stderr of a currently running command
The current interpreter stdin is duplicated and written to the command's stdin
The command's stdout and stderr are written to the interpreter's stdout and stderr
and also buffered in a ByteStream for later reading
Returns (exit status, Stdout buffer, Stderr bufer)
"""
infd = sys.stdin.fileno()
channelfd = channel.fileno()
poll = select.poll()
poll.register(infd, select.POLLIN+select.POLLPRI+select.POLLERR+select.POLLHUP)
poll.register(channelfd, select.POLLIN+select.POLLPRI+select.POLLERR+select.POLLHUP)
stdout = io.BytesIO()
stderr = io.BytesIO()
while not channel.exit_status_ready():
for fd, event in poll.poll(0.5):
if fd == infd and event & (select.POLLIN + select.POLLPRI):
# Text available on python stdin
channel.send(os.read(infd, 4096))
if channel.recv_ready():
content = channel.recv(4096)
sys.stdout.write(content.decode())
sys.stdout.flush()
stdout.write(content)
if channel.recv_stderr_ready():
content = channel.recv_stderr(4096)
sys.stderr.write(content.decode())
sys.stderr.flush()
stderr.write(content)
if channel.recv_ready():
content = channel.recv(4096)
sys.stdout.write(content.decode())
sys.stdout.flush()
stdout.write(content)
if channel.recv_stderr_ready():
content = channel.recv_stderr(4096)
sys.stderr.write(content.decode())
sys.stderr.flush()
stderr.write(content)
stdout.seek(0,0)
stderr.seek(0,0)
return channel.recv_exit_status(), stdout, stderr
def get_default_gcp_zone():
try:
response = requests.get(
'http://metadata.google.internal/computeMetadata/v1/instance/zone',
headers={
'Metadata-Flavor': 'Google'
}
)
if response.status_code == 200:
return os.path.basename(response.text)
except requests.exceptions.ConnectionError:
pass
# not on GCE instance, check env
try:
response = subprocess.run('gcloud config get-value compute/zone', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if response.returncode == 0 and b'(unset)' not in response.stdout.strip():
return response.stdout.strip().decode()
except subprocess.CalledProcessError:
pass
# gcloud config not happy, just return default
return 'us-central1-a'
__DEFAULT_GCP_PROJECT__ = None
def get_default_gcp_project():
"""
Returns the currently configured default project
"""
global __DEFAULT_GCP_PROJECT__
try:
if __DEFAULT_GCP_PROJECT__ is None:
__DEFAULT_GCP_PROJECT__ = google.auth.default()[1]
except google.auth.exceptions.GoogleAuthError:
warnings.warn(
"Unable to load gcloud credentials. Some features may not function properly",
stacklevel=1
)
return __DEFAULT_GCP_PROJECT__
def check_call(cmd:str, rc: int, stdout: typing.Optional[typing.BinaryIO] = None, stderr: typing.Optional[typing.BinaryIO] = None):
"""
Checks that the rc is 0
If not, flush stdout and stderr streams and raise a CalledProcessError
"""
if rc != 0:
if stdout is not None:
sys.stdout.write(stdout.read().decode())
sys.stdout.flush()
if stderr is not None:
sys.stderr.write(stderr.read().decode())
sys.stderr.flush()
raise subprocess.CalledProcessError(rc, cmd)
predefined_mtypes = {
# cost / CPU in each of the predefined tracks
'n1-standard': (0.0475, 0.01),
'n1-highmem': (0.0592, 0.0125),
'n1-highcpu': (0.03545, 0.0075),
'n2-standard': (0.4855, 0.01175),
'n2-highmem': (0.0655, 0.01585),
'n2-highcpu': (0.03585, 0.00865),
'm1-ultramem': (0.1575975, 0.0332775),
'm2-ultramem': (0.2028173077, 0), # no preemptible version
'c2-standard': (0.0522, 0.012625)
}
CustomPricing = namedtuple('CustomPricing', [
'cpu_cost',
'mem_cost',
'ext_cost',
'preempt_cpu_cost',
'preempt_mem_cost',
'preempt_ext_cost'
])
custom_mtypes = {
# cost / CPU, extended memory cost, preemptible cpu, preemptible extended memory
'n1-custom': CustomPricing(
0.033174, 0.004446, 0.00955,
0.00698, 0.00094, 0.002014 #extends > 6.5gb/core
),
'n2-custom': CustomPricing(
0.033174, 0.004446, 0.00955,
0.00802, 0.00108, 0.002310 # extends > 7gb/core
)
}
fixed_cost = {
# For fixed machine types, direct mapping of cost
'm1-megamem-96': (10.6740, 2.26),
'f1-micro': (0.0076, 0.0035),
'g1-small': (0.0257, 0.007)
}
gpu_pricing = {
'nvidia-tesla-t4': (0.95, 0.29),
'nvidia-tesla-p4': (0.60, 0.216),
'nvidia-tesla-v100': (2.48, 0.74),
'nvidia-tesla-p100': (1.46, 0.43),
'nvidia-tesla-k80': (0.45, 0.135)
}
@functools.lru_cache()
def _get_mtype_cost(mtype: str) -> typing.Tuple[float, float]:
"""
Returns the hourly cost of a google VM based on machine type.
Returns a tuple of (non-preemptible cost, preemptible cost)
"""
if mtype in fixed_cost:
return fixed_cost[mtype]
components = mtype.split('-')
if len(components) < 3:
raise ValueError("mtype {} not in expected format".format(mtype))
track = '{}-{}'.format(components[0], components[1])
if 'custom' in mtype:
# (n1-|n2-)?custom-(\d+)-(\d+)(-ext)?
if components[0] == 'custom':
components = ['n1'] + components
if len(components) not in {4, 5}:
raise ValueError("Custom mtype {} not in expected format".format(mtype))
cores = int(components[2])
mem = int(components[3]) / 1024
if track == 'n1-custom':
reg_mem = min(mem, cores * 6.5)
else:
reg_mem = min(mem, cores * 8)
ext_mem = mem - reg_mem
price_model = custom_mtypes[track]
return (
(price_model.cpu_cost * cores) + (price_model.mem_cost * reg_mem) + (price_model.ext_cost * ext_mem),
(price_model.preempt_cpu_cost * cores) + (price_model.preempt_mem_cost * reg_mem) + (price_model.preempt_ext_cost * ext_mem)
)
if track not in predefined_mtypes:
raise ValueError("mtype family {} not defined".format(track))
cores = int(components[2])
return (
predefined_mtypes[track][0] * cores,
predefined_mtypes[track][1] * cores
)
def gcp_hourly_cost(mtype: str, preemptible: bool = False, ssd_size: int = 0, hdd_size: int = 0, gpu_type: typing.Optional[str] = None, gpu_count: int = 0) -> float:
"""
Gets the hourly cost of a GCP VM based on its machine type and disk size.
Does not include any sustained usage discounts. Actual pricing may vary based
on compute region
"""
mtype_cost, preemptible_cost = _get_mtype_cost(mtype)
return (
(preemptible_cost if preemptible else mtype_cost) +
(0.00023287671232876715 * ssd_size) +
(0.00005479452055 * hdd_size) +
(
0 if gpu_type is None or gpu_count < 1
else (gpu_pricing[gpu_type][1 if preemptible else 0] * gpu_count)
)
)
# rmtree_retry removed in favor of AbstractTransport.rmtree
from threading import Lock
write_lock = Lock()
read_lock = Lock()
def pandas_write_hdf5_buffered(df: pd.DataFrame, key: str, buf: io.BufferedWriter):
"""
Write a Pandas dataframe in HDF5 format to a buffer.
"""
## I am getting
## HDF5ExtError("Unable to open/create file '/dev/null'")
## unable to truncate a file which is already open
with write_lock:
with pd.HDFStore(
"/dev/null",
mode = "w",
driver = "H5FD_CORE",
driver_core_backing_store = 0
) as store:
store["results"] = df
buf.write(store._handle.get_file_image())
def pandas_read_hdf5_buffered(key: str, buf: io.BufferedReader) -> pd.DataFrame:
"""
Read a Pandas dataframe in HDF5 format from a buffer.
"""
## Without this lock, job avoidance breaks when starting two jobs simultaneously!!
with read_lock:
with pd.HDFStore(
"dummy_hdf5",
mode = "r",
driver = "H5FD_CORE",
driver_core_backing_store = 0,
driver_core_image = buf.read()
) as store:
return store[key]
def base32(buf: bytes):
"""
Convert a byte array into a base32 encoded string
"""
table = np.array(list("abcdefghijklmnopqrstuvwxyz012345"))
bits = np.unpackbits(np.frombuffer(buf, dtype = np.uint8))
bits = np.pad(bits, (0, 5 - (len(bits) % 5)), constant_values = 0).reshape(-1, 5)
return "".join(table[np.ravel(bits@2**np.c_[4:-1:-1])])
def sha1_base32(buf: bytes, n: int = None):
"""
Return a base32 representation of the first n bytes of SHA1(buf).
If n = None, the entire buffer will be encoded.
"""
return base32(hashlib.sha1(buf).digest()[slice(0, n)])
## Hook for get external logging module
CANINE_GET_LOGGER_HOOK = None
class canine_logging:
@staticmethod
def set_get_logger_hook(func):
global CANINE_GET_LOGGER_HOOK
CANINE_GET_LOGGER_HOOK = func
@staticmethod
def log(level, msg, *args, **kwargs):
if not CANINE_GET_LOGGER_HOOK:
return print(msg)
else:
return CANINE_GET_LOGGER_HOOK().log(level, msg, *args, **kwargs)
@staticmethod
def info(msg):
if not CANINE_GET_LOGGER_HOOK:
return print(msg)
else:
return CANINE_GET_LOGGER_HOOK().info(msg)
## Increased logging level. By default, we want to log our staff with info1.
## | |
is only one player in the category. Add a dummy None player
full_set.add(None)
full_list = list(full_set)
for category_combo in itertools.combinations(full_list, int(math.ceil(len(full_list)/2.0))):
yield (category_combo, tuple(full_set.difference(category_combo)))
def generate_team_combinations(deviation_categories_dict, total_players, prune_search=False):
# generate all valid cross-category 2-team picks via a filtered cartesian product of valid in-category combos.
generators = collections.OrderedDict()
for deviation_category, player_stats in deviation_categories_dict.items():
generators[deviation_category] = generate_category_combo_sets(player_stats)
# feed the generators into the cartesian product generator
for teams_combo in (itertools.product(*generators.values())):
running_delta = 0
valid_combo = True
# strip out dummy/None players
strip_none = lambda ps: tuple(p for p in ps if p is not None)
teams_combo = tuple((strip_none(team_category[0]), strip_none(team_category[1])) for team_category in teams_combo)
counted_players = sum(len(team_category) for team_category in itertools.chain.from_iterable(teams_combo))
if prune_search_space:
for team_category in teams_combo:
# filter to disallow bias on same team in 2 adjacent skill categories
players_a, players_b = team_category
category_delta = len(players_b) - len(players_a)
if abs(category_delta) >= 2:
valid_combo = False
break
running_delta += category_delta
if abs(running_delta) >= 2:
valid_combo = False
break
if valid_combo:
yield teams_combo
def worst_case_search_space_combo_count(players):
players_count = len(players)
return nchoosek(players_count, int(math.ceil(players_count/2.0)))
def search_optimal_team_combinations(teams_generator):
# iterate through the generated teams, using heuristics to rate them and keep the top N
for teams_combo in teams_generator:
teams = tuple(tuple(itertools.chain.from_iterable(team)) for team in zip(*teams_combo))
yield teams
def analyze_teams(teams):
return BalancePrediction(teams[0], teams[1])
total_players = len(players)
teams_combos_generator = generate_team_combinations(deviation_categories, total_players, prune_search=prune_search_space)
max_iterations = worst_case_search_space_combo_count(players)
max_iteration_digits = int(math.log10(max_iterations)+1)
FixedSizePriorityQueue(max_results)
results = FixedSizePriorityQueue(max_results)
for i, teams in enumerate(search_optimal_team_combinations(teams_combos_generator)):
balance_prediction = analyze_teams(teams)
assert isinstance(balance_prediction, BalancePrediction)
match_prediction = balance_prediction.generate_match_prediction(player_stats)
abs_balance_distance = abs(match_prediction.distance)
results.add_item((abs_balance_distance, match_prediction, teams))
if verbose:
combo_desc = str(i+1).ljust(max_iteration_digits, " ")
print "Combo %s : %s" % (combo_desc, describe_balanced_team_combo(teams[0], teams[1], match_prediction))
# This step seems heavyweight if we are to return a lot of results, so max_results should always be small.
# convert it back into a list of players
result_combos = []
for result in results.nsmallest():
teams_as_players = []
(abs_balance_distance, match_prediction, teams) = result
for team in teams:
teams_as_players.append(tuple(player_stats[pid].player for pid in team))
team_combo = BalancedTeamCombo(teams_tup=tuple(teams_as_players), match_prediction=match_prediction)
result_combos.append(team_combo)
return result_combos
SwitchOperation = collections.namedtuple("SwitchOperation", ["players_affected",
"players_moved_from_a_to_b", "players_moved_from_b_to_a"])
SwitchProposal = collections.namedtuple("SwitchProposal", ["switch_operation", "balanced_team_combo"])
def get_proposed_team_combo_moves(team_combo_1, team_combo_2):
# team_combo_1 is current, team_combo_2 is a proposed combination
assert len(team_combo_1) == 2 and len(team_combo_2) == 2
team1a, team1b = set(team_combo_1[0]), set(team_combo_1[1])
if isinstance(team_combo_2, BalancedTeamCombo):
team2a, team2b = set(team_combo_2.teams_tup[0]), set(team_combo_2.teams_tup[1])
else:
team2a, team2b = set(team_combo_2[0]), set(team_combo_2[1])
assert team1a.union(team1b) == team2a.union(team2b), "inconsistent input data"
assert not team1a.intersection(team1b), "inconsistent input data"
assert not team2a.intersection(team2b), "inconsistent input data"
players_moved_from_a_to_b = team2a.difference(team1a)
players_moved_from_b_to_a = team2b.difference(team1b)
players_affected = players_moved_from_a_to_b.union(players_moved_from_b_to_a)
return SwitchOperation(players_affected=players_affected,
players_moved_from_a_to_b=players_moved_from_a_to_b,
players_moved_from_b_to_a=players_moved_from_b_to_a)
def describe_switch_operation(switch_op, team_names=None):
assert isinstance(switch_op, SwitchOperation)
left_team_desc = ""
right_team_desc = ""
if team_names:
assert len(team_names) == 2
left_team_desc = "%s " % team_names[0]
right_team_desc = " %s" % team_names[1]
def get_names(player_set):
s = []
for i, player in enumerate(sorted(list(player_set), key=lambda p: p.elo, reverse=True)):
if i != 0:
s.append(", ")
s.append("%s(%d)" % (player.name, player.elo))
return "".join(s)
out = []
if switch_op.players_moved_from_a_to_b:
out.append("%s --->%s" % (get_names(switch_op.players_moved_from_a_to_b), right_team_desc))
if switch_op.players_moved_from_a_to_b and switch_op.players_moved_from_b_to_a:
out.append(" | ")
if switch_op.players_moved_from_b_to_a:
out.append("%s<--- %s" % (left_team_desc, get_names(switch_op.players_moved_from_b_to_a)))
return "".join(out)
def generate_switch_proposals(teams, verbose=False, max_results=5):
# add 1 to max results, because if the input teams are optimal, then they will come as a result.
players = []
[[players.append(p) for p in team_players] for team_players in teams]
balanced_team_combos = balance_players_by_skill_variance(players,
verbose=verbose,
prune_search_space=True,
max_results=max_results+1)
switch_proposals = []
for balanced_combo in balanced_team_combos:
switch_op = get_proposed_team_combo_moves(teams, balanced_combo)
assert isinstance(switch_op, SwitchOperation)
assert isinstance(balanced_combo, BalancedTeamCombo)
if not switch_op.players_affected:
# no change
continue
switch_proposals.append(SwitchProposal(switch_operation=switch_op, balanced_team_combo=balanced_combo))
return switch_proposals
class Unstaker(object):
"""
This class encapsulates a set of unstak balancing suggestions, and data related to current server
operations on these suggestions. It can be seen as a finite state machine managing these steps:
- STARTGEN: Invalidation of old suggestions. (e.g. vote enacted, teams change, new match).
- GENERATING: Generation of new suggestions (possibly a long running operation).
- STOREGEN: Recording the generated results (multiple choices of balancing)
- PRESENTGEN: Presentation of the group of suggestions, ready for selection.
- VOTECHOICE: Accepting democratic player votes for selecting the balance suggestion.
(It can be forced by admin).
- RESETCHOICE: An admin nominated transition from VOTECHOICE to PRESENTGEN.
(not part of the standard flow).
- PLAYERCONFIRMATION: Waiting for nominated switch players to confirm unanimous agreement.
(It can be forced by admin).
- EXECUTESWITCH: Perform the swap action. After this we are back at STARTGEN.
When PRESENTGEN occurs, the options are listed in descending order of predicted fitness.
In other words, the calculated best balanced option is presented first.
A natural consequence of this structure is that we can encode an admin forced balance operation
("unstak") as a forced progression through all FSM steps assuming all players voted the first
choice in VOTECHOICE, followed by all players agreeing in PLAYERCONFIRMATION. So a balance operation
can simply set a bit that auto progresses through all states.
There are a few complexities to bear in mind when thinking about unstak balancing compared to the
existing balance operation:
- unstak will try to balance mismatched odd-even teams (n vs n+1).
- legacy balance will only attempt to balance teams with matching player counts (n vs n).
- unstak can suggest player switches that can involve a single player or up to half of all players.
- legacy balance will only suggest a switch between player pairs.
- unstak tries to match "skill distribution shape" of the teams, and not just aggregated values.
- legacy balance can consider a fully uniform team vs a highly skewed team as balanced,
unstak will not.
- As an example of a skewed vs uniform matching: Team A has 6 players around 1400 skillrating
(normal distribution). Team B has players at [2200, 1950, 1800, 1100, 750, 600] skillratings.
Both teams have the same skillrating average (1400) and sum. However, while Team B has a
chance of winning, the load on the top 3 players is large, due to the anchoring effect of
the bottom 3 players on Team B. From experience, it can work, but it is most commonly a
frustrating experience for all members of the skewed team, especially if Team A works together.
Teamwork is a lot less effective for Team B due to skill disparity and focusing effects.
The "shape matching" property of unstak addresses this, but could be considered a disadvantage,
because sometimes you can have interesting matches with skewed players, but this is rare.
These differences are basically due to the fact that legacy balance uses a naive hill-climbing
style algorithm using player pair switches for iterative improvements (locally optimal solutions).
In contrast, unstak tries to completely re-assemble teams by first categorizing players based on
relative stat deviations, and then performing an exhaustive search between these categories and
using a set of heuristics to keep the top N results. The search space is drastically reduced compared
to a naive (N choose K) search by restricting to combinations which contain subsets of players in
the same "skill deviation group" to be equally spread in a way that is non-consequetively biased
across adjacent deviation groups. This allows it to find a globally optimal solution (satisfying
the hueristic) in a smaller search space than a pure brute force that returns "shape matched" results.
There is a very small chance that the heuristic-based global optimum lies outside of the trimmed
search space, but that would probably be explained by a deficiency in the heuristic and would also
probably represent a low quality match.
Therefore, unstak is generally a more involved and expensive operation due to its exhaustive search approach
and may require being run as a delayed/background operation since it may take more than one frame to complete.
"""
STARTGEN = 0
GENERATING = 1
STOREGEN = 2
PRESENTGEN = 3
VOTECHOICE = 4
RESETCHOICE = 5
PLAYERCONFIRMATION = 6
EXECUTESWITCH = 7
def __init__(self):
self.state = self.STARTGEN
self.switch_proposals = []
| |
<filename>asr1k_neutron_l3/plugins/l3/service_plugins/l3_extension_adapter.py
# Copyright 2017 SAP SE
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import time
from neutron.db import dns_db
from neutron.db import extraroute_db
from neutron.db import l3_gwmode_db as l3_db
from neutron_lib.db import api as db_api
from oslo_log import helpers as log_helpers
from oslo_log import log
from asr1k_neutron_l3.common import asr1k_constants as constants
from asr1k_neutron_l3.common import cache_utils
from asr1k_neutron_l3.common import utils
from asr1k_neutron_l3.common.instrument import instrument
from asr1k_neutron_l3.extensions import asr1koperations as asr1k_ext
from asr1k_neutron_l3.plugins.db import asr1k_db
from asr1k_neutron_l3.plugins.db import models as asr1k_models
from asr1k_neutron_l3.plugins.l3.rpc import ask1k_l3_notifier
from asr1k_neutron_l3.plugins.l3.schedulers import asr1k_scheduler_db
from asr1k_neutron_l3.plugins.l3.service_plugins.initializer import Initializer
LOG = log.getLogger(__name__)
class L3RpcNotifierMixin(object):
"""Mixin class to add rpc notifier attribute to db_base_plugin_v2."""
@property
def l3_rpc_notifier(self):
if not hasattr(self, '_l3_rpc_notifier') or \
not isinstance(self._l3_rpc_notifier, ask1k_l3_notifier.ASR1KAgentNotifyAPI):
self._l3_rpc_notifier = ask1k_l3_notifier.ASR1KAgentNotifyAPI()
return self._l3_rpc_notifier
@l3_rpc_notifier.setter
def l3_rpc_notifier(self, value):
self._l3_rpc_notifier = value
@log_helpers.log_method_call
def notify_router_updated(self, context, router_id,
operation=None):
if router_id:
self.l3_rpc_notifier.routers_updated(
context, [router_id], operation)
@log_helpers.log_method_call
def notify_routers_updated(self, context, router_ids,
operation=None, data=None):
if router_ids:
self.l3_rpc_notifier.routers_updated(
context, router_ids, operation, data)
@log_helpers.log_method_call
def notify_router_deleted(self, context, router_id):
self.l3_rpc_notifier.router_deleted(context, router_id)
@log_helpers.log_method_call
def notify_router_sync(self, context, router_id):
notifier = ask1k_l3_notifier.ASR1KAgentNotifyAPI()
return notifier.router_sync(context, router_id)
@log_helpers.log_method_call
def notify_router_teardown(self, context, router_id):
notifier = ask1k_l3_notifier.ASR1KAgentNotifyAPI()
return notifier.router_teardown(context, router_id)
@log_helpers.log_method_call
def notify_router_validate(self, context, router_id):
notifier = ask1k_l3_notifier.ASR1KAgentNotifyAPI()
return notifier.router_validate(context, router_id)
@log_helpers.log_method_call
def notify_network_sync(self, context, network_id):
notifier = ask1k_l3_notifier.ASR1KAgentNotifyAPI()
return notifier.network_sync(context, network_id)
@log_helpers.log_method_call
def notify_network_validate(self, context, network_id):
notifier = ask1k_l3_notifier.ASR1KAgentNotifyAPI()
return notifier.network_validate(context, network_id)
@log_helpers.log_method_call
def notify_interface_statistics(self, context, router_id):
notifier = ask1k_l3_notifier.ASR1KAgentNotifyAPI()
return notifier.interface_statistics(context, router_id)
@log_helpers.log_method_call
def notify_show_orphans(self, context, host):
notifier = ask1k_l3_notifier.ASR1KAgentNotifyAPI()
return notifier.show_orphans(context, host)
@log_helpers.log_method_call
def notify_delete_orphans(self, context, host):
notifier = ask1k_l3_notifier.ASR1KAgentNotifyAPI()
return notifier.delete_orphans(context, host)
@log_helpers.log_method_call
def notify_list_devices(self, context, host):
notifier = ask1k_l3_notifier.ASR1KAgentNotifyAPI()
return notifier.list_devices(context, host)
@log_helpers.log_method_call
def notify_show_device(self, context, host, device_id):
notifier = ask1k_l3_notifier.ASR1KAgentNotifyAPI()
return notifier.show_device(context, host, device_id)
@log_helpers.log_method_call
def notify_agent_init_config(self, context, host, router_infos):
LOG.debug('agent_initial_config')
notifier = ask1k_l3_notifier.ASR1KAgentNotifyAPI()
return notifier.agent_init_config(context, host, router_infos)
class ASR1KPluginBase(l3_db.L3_NAT_db_mixin,
asr1k_scheduler_db.AZASR1KL3AgentSchedulerDbMixin, extraroute_db.ExtraRoute_db_mixin,
dns_db.DNSDbMixin, L3RpcNotifierMixin, asr1k_ext.DevicePluginBase):
def __init__(self):
self.db = asr1k_db.get_db_plugin()
def get_agent_for_router(self, context, router_id):
"""Returns all hosts to send notification about router update"""
agents = self.list_l3_agents_hosting_router(context, router_id)
agents_list = agents.get('agents', [])
if len(agents_list) == 1:
return agents_list[0]
else:
LOG.error('get host for router: there should be one and only one agent, got {}'.format(agents_list))
def get_host_for_router(self, context, router_id):
agent = self.get_agent_for_router(context, router_id)
if agent is not None:
return agent.get('host')
def get_hosts_for_network(self, context, network_id):
return self.db.get_asr1k_hosts_for_network(context, network_id)
def _ensure_second_dot1q(self, context):
session = db_api.get_writer_session()
extra_atts = session.query(asr1k_models.ASR1KExtraAttsModel).all()
second_dot1qs = []
for extra_att in extra_atts:
second_dot1qs.append(extra_att.second_dot1q)
for extra_att in extra_atts:
if extra_att.second_dot1q == 0:
for x in range(asr1k_db.MIN_SECOND_DOT1Q, asr1k_db.MAX_SECOND_DOT1Q):
if x not in second_dot1qs:
extra_att.second_dot1q = x
second_dot1qs.append(x)
break
with context.session.begin(subtransactions=True):
entry = session.query(asr1k_models.ASR1KExtraAttsModel).filter_by(router_id=extra_att.router_id,
agent_host=extra_att.agent_host,
port_id=extra_att.port_id,
segment_id=extra_att.segment_id
).first()
if entry:
entry.update(extra_att)
@instrument()
@log_helpers.log_method_call
def get_sync_data(self, context, router_ids=None, active=None, host=None):
if host is not None:
host_router_ids = self.db.get_all_router_ids(context, host)
router_ids = [r for r in router_ids if r in host_router_ids]
if not bool(router_ids):
return []
extra_atts = self._get_extra_atts(context, router_ids, host)
router_atts = self._get_router_atts(context, router_ids)
try:
routers = super(ASR1KPluginBase, self).get_sync_data(context, router_ids=router_ids, active=active)
except TypeError:
# We may have a race in the L3/L2 scavengers, lets back of and try again
time.sleep(.25)
routers = super(ASR1KPluginBase, self).get_sync_data(context, router_ids=router_ids, active=active)
if not bool(routers):
routers = []
for router_id in router_ids:
routers.append({'id': router_id, constants.ASR1K_ROUTER_ATTS_KEY: router_atts.get(router_id, {})})
for router in routers:
extra_att = extra_atts.get(router['id'])
if extra_atts is None:
if host is None:
LOG.debug("Not including router {} in sync its extra atts are missing.".format(router['id']))
else:
LOG.debug("Not including router {} in sync its extra atts are missing for host {}."
"".format(router['id'], host))
continue
router[constants.ASR1K_EXTRA_ATTS_KEY] = extra_att
router_att = router_atts.get(router['id'], {})
router[constants.ASR1K_ROUTER_ATTS_KEY] = router_att
# Make sure the gateway IPs all have prefixes and are sorted consistently
# this is to prevent foo when we have to assign to nat pool, because we
# can guarantee a consistent order from neutron and we can't change the
# pool on the active device and it has (currently) to be different from
# the interface device.
gw_info = router.get('external_gateway_info', None)
gw_port = router.get('gw_port', None)
if gw_port is not None:
ips = gw_port.get('fixed_ips', [])
prefixes = {}
if bool(ips):
for ip in ips:
prefix = ip.get('prefixlen', None)
subnet_id = ip.get('subnet_id', None)
if prefix is not None and subnet_id is not None:
prefixes[subnet_id] = prefix
for ip in ips:
if ip.get('prefixlen', None) is None:
prefix = prefixes.get(ip.get('subnet_id', None))
if prefix is not None:
ip['prefixlen'] = prefix
gw_port['fixed_ips'] = sorted(ips, key=lambda k: k.get('ip_address'))
if gw_info is not None:
gw_info['external_fixed_ips'] = gw_port['fixed_ips']
rt_import = []
rt_export = []
bgpvpns = self.db.get_bgpvpns_by_router_id(context, router['id'])
router["bgpvpn_advertise_extra_routes"] = True
if bgpvpns:
adv_mode = self.db.get_bgpvpn_advertise_extra_routes_by_router_id(context, router['id'])
router["bgpvpn_advertise_extra_routes"] = adv_mode
for bgpvpn in bgpvpns:
if bgpvpn.route_targets:
rt_import += bgpvpn.route_targets.split(",")
rt_export += bgpvpn.route_targets.split(",")
if bgpvpn.import_targets:
rt_import += bgpvpn.import_targets.split(",")
if bgpvpn.export_targets:
rt_export += bgpvpn.export_targets.split(",")
router["rt_export"] = list(set(rt_export))
router["rt_import"] = list(set(rt_import))
return routers
def get_deleted_router_atts(self, context):
return self.db.get_deleted_router_atts(context)
def _get_device_info(self, context, host):
return self.db.get_device_info(context, host)
def _get_extra_atts(self, context, router_ids, host=None):
extra_atts = self.db.get_extra_atts_for_routers(context, router_ids, host=host)
return_dict = {}
for extra_att in extra_atts:
router_id = extra_att.get('router_id')
if return_dict.get(router_id) is None:
return_dict[router_id] = {}
if host is None:
return_dict[router_id][extra_att.get('port_id')] = extra_att
else:
if host == extra_att.get('agent_host'):
return_dict[router_id][extra_att.get('port_id')] = extra_att
return return_dict
def _get_router_atts(self, context, router_ids):
router_atts = self.db.get_router_atts_for_routers(context, router_ids)
return_dict = {}
for router_att in router_atts:
if return_dict.get(router_att.get('router_id')) is None:
return_dict[router_att.get('router_id')] = {}
return_dict[router_att.get('router_id')] = router_att
return return_dict
@log_helpers.log_method_call
def create_router(self, context, router):
result = super(ASR1KPluginBase, self).create_router(context, router)
asr1k_db.RouterAttsDb.ensure(context, result.get('id'))
return result
@log_helpers.log_method_call
def update_router(self, context, id, router):
result = super(ASR1KPluginBase, self).update_router(context, id, router)
asr1k_db.RouterAttsDb.ensure(context, result.get('id'))
return result
@log_helpers.log_method_call
def get_router(self, context, id, fields=None):
return super(ASR1KPluginBase, self).get_router(context, id, fields)
def _add_router_to_cache(self, context, router_id):
LOG.debug("Adding router %s to internal router cache", router_id)
host = self.get_host_for_router(context, [router_id])
routers = self.get_sync_data(context, [router_id])
if not routers:
LOG.warning("Could not add router %s to internal router cache: get_sync_data came up empty", router_id)
return
cache_utils.cache_deleted_router(host, router_id, routers[0])
@log_helpers.log_method_call
def delete_router(self, context, id):
self._add_router_to_cache(context, id)
return super(ASR1KPluginBase, self).delete_router(context, id)
@log_helpers.log_method_call
def add_router_to_l3_agent(self, context, agent_id, router_id):
result = super(ASR1KPluginBase, self).add_router_to_l3_agent(context, agent_id, router_id)
asr1k_db.RouterAttsDb.ensure(context, router_id)
return result
@log_helpers.log_method_call
def remove_router_from_l3_agent(self, context, agent_id, router_id):
self._add_router_to_cache(context, router_id)
return super(ASR1KPluginBase, self).remove_router_from_l3_agent(context, agent_id, router_id)
@log_helpers.log_method_call
def add_router_interface(self, context, router_id, interface_info=None):
return super(ASR1KPluginBase, self).add_router_interface(context, router_id, interface_info)
@log_helpers.log_method_call
def remove_router_interface(self, context, router_id, interface_info):
return super(ASR1KPluginBase, self).remove_router_interface(context, router_id, interface_info)
def validate(self, context, id, fields=None):
result = self.notify_router_validate(context, id)
return {'diffs': result}
def sync(self, context, id, fields=None):
result = self.notify_router_sync(context, id)
return {'device': {'network_id': result}}
def validate_network(self, context, id, fields=None):
result = self.notify_network_validate(context, id)
return {'diffs': result}
def sync_network(self, context, id, fields=None):
result = self.notify_network_sync(context, id)
return {'device': {'network_id': result}}
def orphans(self, context, dry_run=True):
result = self.notify_router_sync(context, dry_run)
return result
def get_config(self, context, id):
router_atts = self._get_router_atts(context, [id])
extra_atts = self._get_extra_atts(context, [id])
atts = extra_atts.get(id, None)
result = OrderedDict({'id': id, 'rd': None})
if len(router_atts) > 0:
att = router_atts.get(id, None)
if att is not None:
result['rd'] = att.get('rd')
ports = []
if atts is not None:
for port_id in atts.keys():
port = OrderedDict({'port_id': port_id})
att = atts.get(port_id)
if att is not None:
port['segment_id'] = att.segment_id
port['segmentation_id'] = att.segmentation_id
port['second_dot1q'] = att.second_dot1q
port['external_service_instance'] = att.segmentation_id
port['loopback_service_instance'] = utils.to_bridge_domain(att.second_dot1q)
port['bridge_domain'] = utils.to_bridge_domain(att.second_dot1q)
port['deleted_l2'] = att.deleted_l2
port['deleted_l3'] = att.deleted_l3
ports.append(port)
result['ports'] = ports
return dict(result)
def ensure_config(self, context, id):
asr1k_db.RouterAttsDb.ensure(context, id)
ports = self.db.get_router_ports(context, id)
for port in ports:
segment = self.db.get_router_segment_for_port(context, id, port.get('id'))
asr1k_db.ExtraAttsDb.ensure(id, port, segment, clean_old=True)
return self.get_config(context, id)
def interface_statistics(self, context, id, fields=None):
result = self.notify_interface_statistics(context, id)
return {'interface_statistics': result}
def teardown(self, context, id, fields=None):
result = self.notify_router_teardown(context, id)
return {'device': {'id': result}}
def show_orphans(self, context, host):
result = self.notify_show_orphans(context, host)
return result
def delete_orphans(self, context, host):
result = self.notify_delete_orphans(context, host)
return result
def list_devices(self, context, host):
result = self.notify_list_devices(context, host)
device_info = self.db.get_device_info(context, host)
for id in result:
device = result.get(id)
self._add_device_enabled(device_info, device)
| |
<gh_stars>0
import ast
import base64
import json
from datetime import datetime
import dicttoxml
from django.db.models import Q
from django.http import HttpResponse
from django.shortcuts import render
from django.views import View
import MySQLdb as db
# The main view that handles Private API requests
import redis
from api.models import APIRequest, APIKey
from core.models import Resource, ResourceParameter, ResourceHeader, ResourceDataSourceColumn, DatabaseColumn, Database, \
ResourceDataSourceFilter, DatabaseTable, ResourceParentChildRelationship, BlockedIP, ResourceDataBind, \
ResourceUserGroup, ResourceTextSource
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_REAL_IP')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
class RequestHandlerPrivate(View):
# First handle GET requests
def get(self, request):
# The first thing we need to do is check to see if the header information has been sent for authorisation etc.
if 'HTTP_AUTHORIZATION' in request.META:
# Access GET params: print(request.GET)
# The key was provided so check it. First we need to base64 decode the key.
# Extract the key from the string. Base64decode, remove the last colon, and decode to utf-8 rather than bytes
api_key = base64.b64decode(request.META['HTTP_AUTHORIZATION'].split('Basic ', 1)[1])[:-1].decode('utf-8')
# Look up API key
try:
api_key = APIKey.objects.get(key=api_key)
# Check to see if the requesters IP is blocked
ip = get_client_ip(request)
try:
blocked_ip = BlockedIP.objects.get(ip_address=ip)
response = json.dumps({
'error': {
'message': 'Requests from this IP are blocked',
'type': 'blocked_ip'
}
})
# This means it does exist so send a return message.
api_request = APIRequest(
authentication_type='KEY',
type=request.method,
resource=request.META['HTTP_RESOURCE'],
url=request.get_full_path(),
status='403 ERR',
ip_address=get_client_ip(request),
source=request.META['HTTP_USER_AGENT'],
api_key=api_key,
response_to_user=response
)
api_request.save()
return HttpResponse(response, content_type='application/json', status=403)
except:
# If it doesn't exist then just pass and continue
pass
# API Key is found. No check for a resource.
if 'HTTP_RESOURCE' in request.META:
# Now that we know they provided a resource, let's check to see if it exists.
try:
resource = Resource.objects.get(
name=request.META['HTTP_RESOURCE'],
project=api_key.project,
request_type='GET'
)
# Check to see if the resource is "turned off"
if not resource.status:
response = {
'error': {
'message': 'The owner of this resource has it disabled/off. Check back later as it may be enabled/turned on',
'type': 'endpoint_off'
}
}
if resource.response_format == 'JSON':
return HttpResponse(json.dumps(response), content_type='application/json')
elif resource.response_format == 'XML':
return HttpResponse(dicttoxml.dicttoxml(response), content_type='application/xml')
# Now we have the API Key. Let's make sure that the groups match.
user_groups = ResourceUserGroup.objects.all().filter(resource=resource)
# We only check for user groups if they are present or if the api_key doesn't contain master
if user_groups and 'rb_mstr_key_' not in api_key.key:
# If the api_key has a user group attached to it
# If it doesn't ignore it
if api_key.user_group:
# Check to see if that user_group is in the set user_groups. If not then say permission denied
in_group = False
for user_group in user_groups:
if api_key.user_group == user_group.user_group:
in_group = True
if in_group is False:
response = {
'error': {
'message': 'Permission Denied. Your API Key/User Group doesn\'t allow you to access that resource.',
'type': 'permission_denied'
}
}
if resource.response_format == 'JSON':
response = json.dumps(response)
elif resource.response_format == 'XML':
response = dicttoxml.dicttoxml(response)
# This means a required header is not provided so record it and respond
api_request = APIRequest(
authentication_type='KEY',
type=request.method,
resource=request.META['HTTP_RESOURCE'],
url=request.get_full_path(),
status='403 ERR',
ip_address=get_client_ip(request),
source=request.META['HTTP_USER_AGENT'],
api_key=api_key,
response_to_user=response
)
api_request.save()
if resource.response_format == 'JSON':
return HttpResponse(response, content_type='application/json', status=403)
elif resource.response_format == 'XML':
return HttpResponse(response,
content_type='application/xml', status=403)
# The resource does exist! Now we need to go through the request and check to see
# if what is required has been sent.
# Create a resource_request object that holds all data as we move futher through
resource_request = {}
# HEADERS CHECK
# We need to check and see if there are headers.
resource_headers = ResourceHeader.objects.all().filter(resource=resource)
# Create a list of the provided headers and their values
provided_headers = []
# If there are headers
if resource_headers:
# Loop through each header and check to see if it exists in the request
for header in resource_headers:
# Check to see if that one is present. HTTP_+header name with dashes replaced with underscores.
if 'HTTP_' + header.key.upper().replace('-', '_') in request.META:
# Does exist.
single_header_object = {
'obj': header,
'provided_value': request.META['HTTP_' + header.key.upper().replace('-', '_')]
}
# Append it to the users provided headers
provided_headers.append(single_header_object)
else:
response = {
'error': {
'message': 'Your request is missing a required header. Missing header is: ' + header.key,
'type': 'missing_header'
}
}
if resource.response_format == 'JSON':
response = json.dumps(response)
elif resource.response_format == 'XML':
response = dicttoxml.dicttoxml(response)
# This means a required header is not provided so record it and respond
api_request = APIRequest(
authentication_type='KEY',
type=request.method,
resource=request.META['HTTP_RESOURCE'],
url=request.get_full_path(),
status='400 ERR',
ip_address=get_client_ip(request),
source=request.META['HTTP_USER_AGENT'],
api_key=api_key,
response_to_user=response
)
api_request.save()
if resource.response_format == 'JSON':
return HttpResponse(response, content_type='application/json', status=400)
elif resource.response_format == 'XML':
return HttpResponse(response,
content_type='application/xml', status=400)
# If we got here we either have a list with no headers or a list with headers that have values.
# Either way, if the incorrect values were given we would not be here.
resource_request['headers'] = provided_headers
# GET PARAMETER CHECK
# Now we have looked at headers, lets do the same with GET parameters.
resource_parameters = ResourceParameter.objects.all().filter(resource=resource)
# Create a list of provided resources
provided_parameters = []
# If there are resource parameters, i.e GET URL parameters
if resource_parameters:
# Loop through each parameter
for parameter in resource_parameters:
# Check to see if that was provided
if parameter.key in request.GET:
# It is provided so lets get it.
single_parameter_obj = {
'obj': parameter,
'provided_value': request.GET[parameter.key]
}
# Append it to the list of provided parameters
provided_parameters.append(single_parameter_obj)
else:
response = {
'error': {
'message': 'Your request is missing a required GET parameter. Missing parameter is: ' + parameter.key,
'type': 'missing_parameter'
}
}
if resource.response_format == 'JSON':
response = json.dumps(response)
elif resource.response_format == 'XML':
response = dicttoxml.dicttoxml(response)
# This means a required header is not provided so record it and respond
api_request = APIRequest(
authentication_type='KEY',
type=request.method,
resource=request.META['HTTP_RESOURCE'],
url=request.get_full_path(),
status='400 ERR',
ip_address=get_client_ip(request),
source=request.META['HTTP_USER_AGENT'],
api_key=api_key,
response_to_user=response
)
api_request.save()
if resource.response_format == 'JSON':
return HttpResponse(response, content_type='application/json', status=400)
elif resource.response_format == 'XML':
return HttpResponse(response,
content_type='application/xml', status=400)
# Like with headers, if we have gotten here we have analysed the correct GET parameters
resource_request['parameters'] = provided_parameters
# Value to determine if the resulting response should be saved to Redis
need_to_be_cached = False
# Let us check to see if the user has caching on. If they do then we can check Redis for cached results
if resource.project.caching:
# Now that we know that caching is enabled. Check to see if there has been any stores today of this cache
# Let's connect to Redis
r = redis.Redis(host='127.0.0.1', port=6379, db=0)
# Now we have a connection, check to see if there is a cache with todays timestamp
if r.get(datetime.now().strftime('%Y-%m-%d:%H')):
# So there is a cache from the last hour. This means that we now need to basically look
# through all requests to this resource and see if any of them have happened in this hour
# of this day. If they have then we must let SQL do its thing, else we can just returned the
# Redis cache.
day = datetime.today().strftime('%d')
month = datetime.today().strftime('%m')
year = datetime.today().strftime('%Y')
hour = datetime.today().strftime('%H')
# Get all POST requests
post_requests = APIRequest.objects.all().filter(
type='POST',
resource=resource.name,
date__day=day,
date__month=month,
date__year=year,
date__hour=hour
)
# Get all DELETE requests
delete_requests = APIRequest.objects.all().filter(
type='DELETE',
resource=resource.name,
date__day=day,
date__month=month,
date__year=year,
date__hour=hour
)
# So if neither have entries then we can return the Redis result
# Any future checks here to whether or not the data has been modified
# would go here
if not post_requests and not delete_requests:
# Now that we know there hasn't been POSTs or DELETEs to any other resource with the same name, we still
# have to find out if any tables have been involved in POSTs that are included in this resource.
# First get the tables.
# We can do this here while we have access to that objects
data_source_columns = ResourceDataSourceColumn.objects.all().filter(
resource=resource)
tables_included = []
for data_source_column in data_source_columns:
# Get the the table from that column
table = DatabaseColumn.objects.get(id=int(data_source_column.column_id)).table
if table not in tables_included:
tables_included.append(table)
| |
# -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.api import errors
from synapse.api.constants import EventTypes
from synapse.util import stringutils
from synapse.util.async import Linearizer
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.retryutils import NotRetryingDestination
from synapse.util.metrics import measure_func
from synapse.types import get_domain_from_id, RoomStreamToken
from twisted.internet import defer
from ._base import BaseHandler
import logging
logger = logging.getLogger(__name__)
class DeviceHandler(BaseHandler):
def __init__(self, hs):
super(DeviceHandler, self).__init__(hs)
self.hs = hs
self.state = hs.get_state_handler()
self.federation_sender = hs.get_federation_sender()
self.federation = hs.get_replication_layer()
self._edu_updater = DeviceListEduUpdater(hs, self)
self.federation.register_edu_handler(
"m.device_list_update", self._edu_updater.incoming_device_list_update,
)
self.federation.register_query_handler(
"user_devices", self.on_federation_query_user_devices,
)
hs.get_distributor().observe("user_left_room", self.user_left_room)
@defer.inlineCallbacks
def check_device_registered(self, user_id, device_id,
initial_device_display_name=None):
"""
If the given device has not been registered, register it with the
supplied display name.
If no device_id is supplied, we make one up.
Args:
user_id (str): @user:id
device_id (str | None): device id supplied by client
initial_device_display_name (str | None): device display name from
client
Returns:
str: device id (generated if none was supplied)
"""
if device_id is not None:
new_device = yield self.store.store_device(
user_id=user_id,
device_id=device_id,
initial_device_display_name=initial_device_display_name,
)
if new_device:
yield self.notify_device_update(user_id, [device_id])
defer.returnValue(device_id)
# if the device id is not specified, we'll autogen one, but loop a few
# times in case of a clash.
attempts = 0
while attempts < 5:
device_id = stringutils.random_string(10).upper()
new_device = yield self.store.store_device(
user_id=user_id,
device_id=device_id,
initial_device_display_name=initial_device_display_name,
)
if new_device:
yield self.notify_device_update(user_id, [device_id])
defer.returnValue(device_id)
attempts += 1
raise errors.StoreError(500, "Couldn't generate a device ID.")
@defer.inlineCallbacks
def get_devices_by_user(self, user_id):
"""
Retrieve the given user's devices
Args:
user_id (str):
Returns:
defer.Deferred: list[dict[str, X]]: info on each device
"""
device_map = yield self.store.get_devices_by_user(user_id)
ips = yield self.store.get_last_client_ip_by_device(
user_id, device_id=None
)
devices = device_map.values()
for device in devices:
_update_device_from_client_ips(device, ips)
defer.returnValue(devices)
@defer.inlineCallbacks
def get_device(self, user_id, device_id):
""" Retrieve the given device
Args:
user_id (str):
device_id (str):
Returns:
defer.Deferred: dict[str, X]: info on the device
Raises:
errors.NotFoundError: if the device was not found
"""
try:
device = yield self.store.get_device(user_id, device_id)
except errors.StoreError:
raise errors.NotFoundError
ips = yield self.store.get_last_client_ip_by_device(
user_id, device_id,
)
_update_device_from_client_ips(device, ips)
defer.returnValue(device)
@defer.inlineCallbacks
def delete_device(self, user_id, device_id):
""" Delete the given device
Args:
user_id (str):
device_id (str):
Returns:
defer.Deferred:
"""
try:
yield self.store.delete_device(user_id, device_id)
except errors.StoreError, e:
if e.code == 404:
# no match
pass
else:
raise
yield self.store.user_delete_access_tokens(
user_id, device_id=device_id,
delete_refresh_tokens=True,
)
yield self.store.delete_e2e_keys_by_device(
user_id=user_id, device_id=device_id
)
yield self.notify_device_update(user_id, [device_id])
@defer.inlineCallbacks
def delete_devices(self, user_id, device_ids):
""" Delete several devices
Args:
user_id (str):
device_ids (str): The list of device IDs to delete
Returns:
defer.Deferred:
"""
try:
yield self.store.delete_devices(user_id, device_ids)
except errors.StoreError, e:
if e.code == 404:
# no match
pass
else:
raise
# Delete access tokens and e2e keys for each device. Not optimised as it is not
# considered as part of a critical path.
for device_id in device_ids:
yield self.store.user_delete_access_tokens(
user_id, device_id=device_id,
delete_refresh_tokens=True,
)
yield self.store.delete_e2e_keys_by_device(
user_id=user_id, device_id=device_id
)
yield self.notify_device_update(user_id, device_ids)
@defer.inlineCallbacks
def update_device(self, user_id, device_id, content):
""" Update the given device
Args:
user_id (str):
device_id (str):
content (dict): body of update request
Returns:
defer.Deferred:
"""
try:
yield self.store.update_device(
user_id,
device_id,
new_display_name=content.get("display_name")
)
yield self.notify_device_update(user_id, [device_id])
except errors.StoreError, e:
if e.code == 404:
raise errors.NotFoundError()
else:
raise
@measure_func("notify_device_update")
@defer.inlineCallbacks
def notify_device_update(self, user_id, device_ids):
"""Notify that a user's device(s) has changed. Pokes the notifier, and
remote servers if the user is local.
"""
users_who_share_room = yield self.store.get_users_who_share_room_with_user(
user_id
)
hosts = set()
if self.hs.is_mine_id(user_id):
hosts.update(get_domain_from_id(u) for u in users_who_share_room)
hosts.discard(self.server_name)
position = yield self.store.add_device_change_to_streams(
user_id, device_ids, list(hosts)
)
room_ids = yield self.store.get_rooms_for_user(user_id)
yield self.notifier.on_new_event(
"device_list_key", position, rooms=room_ids,
)
if hosts:
logger.info("Sending device list update notif to: %r", hosts)
for host in hosts:
self.federation_sender.send_device_messages(host)
@measure_func("device.get_user_ids_changed")
@defer.inlineCallbacks
def get_user_ids_changed(self, user_id, from_token):
"""Get list of users that have had the devices updated, or have newly
joined a room, that `user_id` may be interested in.
Args:
user_id (str)
from_token (StreamToken)
"""
room_ids = yield self.store.get_rooms_for_user(user_id)
# First we check if any devices have changed
changed = yield self.store.get_user_whose_devices_changed(
from_token.device_list_key
)
# Then work out if any users have since joined
rooms_changed = self.store.get_rooms_that_changed(room_ids, from_token.room_key)
stream_ordering = RoomStreamToken.parse_stream_token(
from_token.room_key).stream
possibly_changed = set(changed)
for room_id in rooms_changed:
# Fetch the current state at the time.
try:
event_ids = yield self.store.get_forward_extremeties_for_room(
room_id, stream_ordering=stream_ordering
)
except errors.StoreError:
# we have purged the stream_ordering index since the stream
# ordering: treat it the same as a new room
event_ids = []
current_state_ids = yield self.store.get_current_state_ids(room_id)
# special-case for an empty prev state: include all members
# in the changed list
if not event_ids:
for key, event_id in current_state_ids.iteritems():
etype, state_key = key
if etype != EventTypes.Member:
continue
possibly_changed.add(state_key)
continue
# mapping from event_id -> state_dict
prev_state_ids = yield self.store.get_state_ids_for_events(event_ids)
# If there has been any change in membership, include them in the
# possibly changed list. We'll check if they are joined below,
# and we're not toooo worried about spuriously adding users.
for key, event_id in current_state_ids.iteritems():
etype, state_key = key
if etype != EventTypes.Member:
continue
# check if this member has changed since any of the extremities
# at the stream_ordering, and add them to the list if so.
for state_dict in prev_state_ids.values():
prev_event_id = state_dict.get(key, None)
if not prev_event_id or prev_event_id != event_id:
possibly_changed.add(state_key)
break
users_who_share_room = yield self.store.get_users_who_share_room_with_user(
user_id
)
# Take the intersection of the users whose devices may have changed
# and those that actually still share a room with the user
defer.returnValue(users_who_share_room & possibly_changed)
@defer.inlineCallbacks
def on_federation_query_user_devices(self, user_id):
stream_id, devices = yield self.store.get_devices_with_keys_by_user(user_id)
defer.returnValue({
"user_id": user_id,
"stream_id": stream_id,
"devices": devices,
})
@defer.inlineCallbacks
def user_left_room(self, user, room_id):
user_id = user.to_string()
room_ids = yield self.store.get_rooms_for_user(user_id)
if not room_ids:
# We no longer share rooms with this user, so we'll no longer
# receive device updates. Mark this in DB.
yield self.store.mark_remote_user_device_list_as_unsubscribed(user_id)
def _update_device_from_client_ips(device, client_ips):
ip = client_ips.get((device["user_id"], device["device_id"]), {})
device.update({
"last_seen_ts": ip.get("last_seen"),
"last_seen_ip": ip.get("ip"),
})
class DeviceListEduUpdater(object):
"Handles incoming device list updates from federation and updates the DB"
def __init__(self, hs, device_handler):
self.store = hs.get_datastore()
self.federation = hs.get_replication_layer()
self.clock = hs.get_clock()
self.device_handler = device_handler
self._remote_edu_linearizer = Linearizer(name="remote_device_list")
# user_id -> list of updates waiting to be handled.
self._pending_updates = {}
# Recently seen stream ids. We don't bother keeping these in the DB,
# but they're useful to have them about to reduce the number of spurious
# resyncs.
self._seen_updates = ExpiringCache(
cache_name="device_update_edu",
clock=self.clock,
max_len=10000,
expiry_ms=30 * 60 * 1000,
iterable=True,
)
@defer.inlineCallbacks
def incoming_device_list_update(self, origin, edu_content):
"""Called on incoming device list update from federation. Responsible
for parsing the EDU and adding to pending updates list.
"""
user_id = edu_content.pop("user_id")
device_id = edu_content.pop("device_id")
stream_id = str(edu_content.pop("stream_id")) # They may come as ints
prev_ids = edu_content.pop("prev_id", [])
prev_ids = [str(p) for p in prev_ids] # They may come as ints
if get_domain_from_id(user_id) != origin:
# TODO: Raise?
logger.warning("Got device list update edu for %r from %r", user_id, origin)
return
room_ids = yield self.store.get_rooms_for_user(user_id)
if not room_ids:
# We don't share any rooms with this user. Ignore update, as we
# probably won't get any further updates.
return
self._pending_updates.setdefault(user_id, []).append(
(device_id, stream_id, prev_ids, edu_content)
)
yield self._handle_device_updates(user_id)
@measure_func("_incoming_device_list_update")
@defer.inlineCallbacks
def _handle_device_updates(self, user_id):
"Actually handle pending updates."
with (yield self._remote_edu_linearizer.queue(user_id)):
pending_updates = self._pending_updates.pop(user_id, [])
if not pending_updates:
# This can happen since we batch updates
return
# Given a list of updates we check if we need to resync. This
# happens if we've missed updates.
resync = yield self._need_to_do_resync(user_id, pending_updates)
if resync:
# Fetch all devices for the user.
origin = get_domain_from_id(user_id)
try:
result = yield self.federation.query_user_devices(origin, user_id)
except NotRetryingDestination:
# TODO: Remember that we are now out of sync and try | |
1 on edge 1 and
# point 2 on edge 2, and assign.
sp_12 = len_1
if len_1 > len_2:
sp_12 = len_2
nearest[p1, p2] = sp_12
if symmetric:
# Mirror the upper and lower triangle when symmetric.
nearest[p2,p1] = nearest[p1,p2]
# Populate the main diagonal when symmetric.
if symmetric:
if fill_diagonal == None:
np.fill_diagonal(nearest, np.nan)
else:
np.fill_diagonal(nearest, fill_diagonal)
return nearest
def nearestneighbordistances(self, sourcepattern, destpattern=None, n_processes=None):
"""
Compute the interpattern nearest neighbor distances or the intrapattern
nearest neighbor distances between a source pattern and a destination pattern.
Parameters
----------
sourcepattern: str
The key of a point pattern snapped to the network.
destpattern: str
(Optional) The key of a point pattern snapped to the network.
n_processes: int, str
(Optional) Specify the number of cores to utilize.
Default is 1 core. Use (int) to specify an exact number or cores.
Use ("all") to request all available cores.
Returns
-------
nearest: ndarray (n,2)
With column[:,0] containing the id of the nearest neighbor and
column [:,1] containing the distance.
"""
if not sourcepattern in self.pointpatterns.keys():
raise KeyError("Available point patterns are {}".format(self.pointpatterns.keys()))
if not hasattr(self,'alldistances'):
self.node_distance_matrix(n_processes)
pt_indices = self.pointpatterns[sourcepattern].points.keys()
dist_to_node = self.pointpatterns[sourcepattern].dist_to_node
nearest = np.zeros((len(pt_indices), 2), dtype=np.float32)
nearest[:,1] = np.inf
if destpattern == None:
destpattern = sourcepattern
searchpts = copy.deepcopy(pt_indices)
searchnodes = {}
for s in searchpts:
e1, e2 = dist_to_node[s].keys()
searchnodes[s] = (e1, e2)
for p1 in pt_indices:
# Get the source nodes and dist to source nodes.
source1, source2 = searchnodes[p1]
sdist1, sdist2 = dist_to_node[p1].values()
searchpts.remove(p1)
for p2 in searchpts:
dest1, dest2 = searchnodes[p2]
ddist1, ddist2 = dist_to_node[p2].values()
source1_to_dest1 = sdist1 + self.alldistances[source1][0][dest1] + ddist1
source1_to_dest2 = sdist1 + self.alldistances[source1][0][dest2] + ddist2
source2_to_dest1 = sdist2 + self.alldistances[source2][0][dest1] + ddist1
source2_to_dest2 = sdist2 + self.alldistances[source2][0][dest2] + ddist2
if source1_to_dest1 < nearest[p1, 1]:
nearest[p1, 0] = p2
nearest[p1, 1] = source1_to_dest1
if source1_to_dest1 < nearest[p2, 1]:
nearest[p2, 0] = p1
nearest[p2, 1] = source1_to_dest1
if source1_to_dest2 < nearest[p1, 1]:
nearest[p1, 0] = p2
nearest[p1, 1] = source1_to_dest2
if source1_to_dest1 < nearest[p2, 1]:
nearest[p2, 0] = p1
nearest[p2, 1] = source1_to_dest2
if source2_to_dest1 < nearest[p1, 1]:
nearest[p1, 0] = p2
nearest[p1, 1] = source2_to_dest1
if source2_to_dest1 < nearest[p2, 1]:
nearest[p2, 0] = p1
nearest[p2, 1] = source2_to_dest1
if source2_to_dest2 < nearest[p1, 1]:
nearest[p1, 0] = p2
nearest[p1, 1] = source2_to_dest2
if source2_to_dest2 < nearest[p2, 1]:
nearest[p2, 0] = p1
nearest[p2, 1] = source2_to_dest2
return nearest
def NetworkF(self, pointpattern, nsteps=10, permutations=99,
threshold=0.2, distribution='uniform',
lowerbound=None, upperbound=None):
"""
Computes a network constrained F-Function
Parameters
----------
pointpattern: object
A PySAL point pattern object.
nsteps: int
The number of steps at which the count of the nearest neighbors
is computed.
permutations: int
The number of permutations to perform (default 99).
threshold: float
The level at which significance is computed.
-- 0.5 would be 97.5% and 2.5%
distribution: str
The distribution from which random points are sampled:
-- uniform or poisson
lowerbound: float
The lower bound at which the F-function is computed. (Default 0)
upperbound: float
The upper bound at which the F-function is computed.
Defaults to the maximum observed nearest neighbor distance.
Returns
-------
NetworkF: object
A network F class instance.
"""
return NetworkF(self, pointpattern, nsteps=nsteps,
permutations=permutations,threshold=threshold,
distribution=distribution,lowerbound=lowerbound,
upperbound=upperbound)
def NetworkG(self, pointpattern, nsteps=10, permutations=99,
threshold=0.5, distribution='uniform',
lowerbound=None, upperbound=None):
"""
Computes a network constrained G-Function
Parameters
----------
pointpattern: object
A PySAL point pattern object.
nsteps: int
The number of steps at which the count of the nearest neighbors
is computed.
permutations: int
The number of permutations to perform (default 99).
threshold: float
The level at which significance is computed.
-- 0.5 would be 97.5% and 2.5%
distribution: str
The distribution from which random points are sampled:
-- uniform or poisson
lowerbound: float
The lower bound at which the G-function is computed. (Default 0)
upperbound: float
The upper bound at which the G-function is computed.
Defaults to the maximum observed nearest neighbor distance.
Returns
-------
NetworkG: object
A network G class instance.
"""
return NetworkG(self, pointpattern, nsteps=nsteps,
permutations=permutations,threshold=threshold,
distribution=distribution,lowerbound=lowerbound,
upperbound=upperbound)
def NetworkK(self, pointpattern, nsteps=10, permutations=99,
threshold=0.5, distribution='uniform',
lowerbound=None, upperbound=None):
"""
Computes a network constrained K-Function
Parameters
----------
pointpattern: object
A PySAL point pattern object.
nsteps: int
The number of steps at which the count of the nearest neighbors
is computed.
permutations: int
The number of permutations to perform (default 99).
threshold: float
The level at which significance is computed.
-- 0.5 would be 97.5% and 2.5%
distribution: str
The distribution from which random points are sampled:
-- uniform or poisson
lowerbound: float
The lower bound at which the K-function is computed. (Default 0)
upperbound: float
The upper bound at which the K-function is computed.
Defaults to the maximum observed nearest neighbor distance.
Returns
-------
NetworkK: object
A network K class instance.
"""
return NetworkK(self, pointpattern, nsteps=nsteps,
permutations=permutations,threshold=threshold,
distribution=distribution,lowerbound=lowerbound,
upperbound=upperbound)
def segment_edges(self, distance):
"""
Segment all of the edges in the network at either a fixed distance or a fixed
number of segments.
Parameters
-----------
distance: float
The distance at which edges are split.
Returns
-------
sn: object
PySAL Network Object.
Example
-------
>>> ntw = ps.Network(ps.examples.get_path('streets.shp'))
>>> n200 = ntw.segment_edges(200.0)
>>> len(n200.edges)
688
"""
sn = Network()
sn.adjacencylist = copy.deepcopy(self.adjacencylist)
sn.edge_lengths = copy.deepcopy(self.edge_lengths)
sn.edges = set(copy.deepcopy(self.edges))
sn.node_coords = copy.deepcopy(self.node_coords)
sn.node_list = copy.deepcopy(self.node_list)
sn.nodes = copy.deepcopy(self.nodes)
sn.pointpatterns = copy.deepcopy(self.pointpatterns)
sn.in_shp = self.in_shp
current_node_id = max(self.nodes.values())
newedges = set()
removeedges = set()
for e in sn.edges:
length = sn.edge_lengths[e]
interval = distance
totallength = 0
currentstart = startnode = e[0]
endnode = e[1]
# If the edge will be segmented remove the current edge from the adjacency list.
if interval < length:
sn.adjacencylist[e[0]].remove(e[1])
sn.adjacencylist[e[1]].remove(e[0])
sn.edge_lengths.pop(e, None)
removeedges.add(e)
else:
continue
while totallength < length:
currentstop = current_node_id
if totallength + interval > length:
currentstop = endnode
interval = length - totallength
totallength = length
else:
current_node_id += 1
currentstop = current_node_id
totallength += interval
# Compute the new node coordinate.
newx, newy = self._newpoint_coords(e, totallength)
# Update node_list.
if currentstop not in sn.node_list:
sn.node_list.append(currentstop)
# Update nodes and node_coords.
sn.node_coords[currentstop] = newx, newy
sn.nodes[(newx, newy)] = currentstop
# Update the adjacency list.
sn.adjacencylist[currentstart].append(currentstop)
sn.adjacencylist[currentstop].append(currentstart)
# Add the new edge to the edge dict.
# Iterating over this so we need to add after iterating.
newedges.add(tuple(sorted([currentstart, currentstop])))
# Modify edge_lengths.
sn.edge_lengths[tuple(sorted([currentstart, currentstop]))] = interval
# Increment the start to the stop.
currentstart = currentstop
sn.edges.update(newedges)
sn.edges.difference_update(removeedges)
sn.edges = list(sn.edges)
# Update the point pattern snapping.
for instance in sn.pointpatterns.itervalues():
sn._snap_to_edge(instance)
return sn
def savenetwork(self, filename):
"""
Save a network to disk as a binary file
Parameters
----------
filename: str
The filename where the network should be saved. This should be a full
path or the file is saved whereever this method is called from.
Example
--------
>>> ntw = ps.Network(ps.examples.get_path('streets.shp'))
>>> ntw.savenetwork('mynetwork.pkl')
"""
with open(filename, 'wb') as networkout:
cPickle.dump(self, networkout, protocol=2)
@staticmethod
def loadnetwork(filename):
with open(filename, 'rb') as networkin:
self = cPickle.load(networkin)
return self
class PointPattern():
"""
A stub point pattern class used to store a point pattern. This class is monkey patched
with network specific attributes when the points are snapped to a network.
In the future this class may be replaced with a generic point
pattern class.
Parameters
----------
shapefile: str
The input shapefile.
idvariable: str
Field in the shapefile to use as an id variable.
attribute: bool
{False, True}
A flag to indicate whether all attributes are tagged to this class.
Attributes
----------
points: dict
Keys are the point ids.
Values are the coordinates.
npoints: int
The number of points.
"""
def __init__(self, shapefile, idvariable=None, attribute=False):
self.points = {}
self.npoints = 0
if idvariable:
ids = get_ids(shapefile, idvariable)
else:
ids = None
pts = ps.open(shapefile)
# Get attributes if requested
if attribute == True:
dbname = os.path.splitext(shapefile)[0] + '.dbf'
db = ps.open(dbname)
else:
db = None
| |
<filename>google/cloud/bigquery/v2/bigquery-v2-py/google/cloud/bigquery_v2/services/model_service/async_client.py
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.bigquery_v2.types import encryption_config
from google.cloud.bigquery_v2.types import model
from google.cloud.bigquery_v2.types import model as gcb_model
from google.cloud.bigquery_v2.types import model_reference
from google.cloud.bigquery_v2.types import standard_sql
from google.protobuf import wrappers_pb2 # type: ignore
from .transports.base import ModelServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport
from .client import ModelServiceClient
class ModelServiceAsyncClient:
""""""
_client: ModelServiceClient
DEFAULT_ENDPOINT = ModelServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = ModelServiceClient.DEFAULT_MTLS_ENDPOINT
common_billing_account_path = staticmethod(ModelServiceClient.common_billing_account_path)
parse_common_billing_account_path = staticmethod(ModelServiceClient.parse_common_billing_account_path)
common_folder_path = staticmethod(ModelServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(ModelServiceClient.parse_common_folder_path)
common_organization_path = staticmethod(ModelServiceClient.common_organization_path)
parse_common_organization_path = staticmethod(ModelServiceClient.parse_common_organization_path)
common_project_path = staticmethod(ModelServiceClient.common_project_path)
parse_common_project_path = staticmethod(ModelServiceClient.parse_common_project_path)
common_location_path = staticmethod(ModelServiceClient.common_location_path)
parse_common_location_path = staticmethod(ModelServiceClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ModelServiceAsyncClient: The constructed client.
"""
return ModelServiceClient.from_service_account_info.__func__(ModelServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ModelServiceAsyncClient: The constructed client.
"""
return ModelServiceClient.from_service_account_file.__func__(ModelServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> ModelServiceTransport:
"""Returns the transport used by the client instance.
Returns:
ModelServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(type(ModelServiceClient).get_transport_class, type(ModelServiceClient))
def __init__(self, *,
credentials: ga_credentials.Credentials = None,
transport: Union[str, ModelServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the model service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.ModelServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = ModelServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def get_model(self,
request: model.GetModelRequest = None,
*,
project_id: str = None,
dataset_id: str = None,
model_id: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> model.Model:
r"""Gets the specified model resource by model ID.
Args:
request (:class:`google.cloud.bigquery_v2.types.GetModelRequest`):
The request object.
project_id (:class:`str`):
Required. Project ID of the requested
model.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
dataset_id (:class:`str`):
Required. Dataset ID of the requested
model.
This corresponds to the ``dataset_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
model_id (:class:`str`):
Required. Model ID of the requested
model.
This corresponds to the ``model_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_v2.types.Model:
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, dataset_id, model_id])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = model.GetModelRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if dataset_id is not None:
request.dataset_id = dataset_id
if model_id is not None:
request.model_id = model_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_model,
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("project_id", request.project_id),
("dataset_id", request.dataset_id),
("model_id", request.model_id),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def list_models(self,
request: model.ListModelsRequest = None,
*,
project_id: str = None,
dataset_id: str = None,
max_results: wrappers_pb2.UInt32Value = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> model.ListModelsResponse:
r"""Lists all models in the specified dataset. Requires
the READER dataset role. After retrieving the list of
models, you can get information about a particular model
by calling the models.get method.
Args:
request (:class:`google.cloud.bigquery_v2.types.ListModelsRequest`):
The request object.
project_id (:class:`str`):
Required. Project ID of the models to
list.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
dataset_id (:class:`str`):
Required. Dataset ID of the models to
list.
This corresponds to the ``dataset_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
max_results (:class:`google.protobuf.wrappers_pb2.UInt32Value`):
The maximum number of results to
return in a single response page.
Leverage the page tokens to iterate
through the entire collection.
This corresponds to the ``max_results`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_v2.types.ListModelsResponse:
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, dataset_id, max_results])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = model.ListModelsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if dataset_id is not None:
request.dataset_id = dataset_id
if max_results is not None:
request.max_results = max_results
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_models,
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within | |
<reponame>calbach/curation<filename>data_steward/analytics/table_metrics/Table_Metrics_part_1.py
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.1
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# #!pip install --upgrade google-cloud-bigquery[pandas]
# -
from google.cloud import bigquery
client = bigquery.Client()
# %load_ext google.cloud.bigquery
# %reload_ext google.cloud.bigquery
# +
#######################################
print('Setting everything up...')
#######################################
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import matplotlib.pyplot as plt
DATASET =
plt.style.use('ggplot')
pd.options.display.max_rows = 999
pd.options.display.max_columns = 999
pd.options.display.max_colwidth = 999
def cstr(s, color='black'):
return "<text style=color:{}>{}</text>".format(color, s)
print('done.')
# +
dic = {'src_hpo_id': ["trans_am_essentia", "saou_ummc", "seec_miami", "seec_morehouse", "seec_emory", "uamc_banner",
"pitt", "nyc_cu", "ipmc_uic", "trans_am_spectrum", "tach_hfhs", "nec_bmc", "cpmc_uci", "nec_phs",
"nyc_cornell", "ipmc_nu", "nyc_hh", "ipmc_uchicago", "aouw_mcri", "syhc", "cpmc_ceders",
"seec_ufl", "saou_uab", "trans_am_baylor", "cpmc_ucsd", "ecchc", "chci", "aouw_uwh", "cpmc_usc",
"hrhc", "ipmc_northshore", "chs", "cpmc_ucsf", "jhchc", "aouw_mcw", "cpmc_ucd", "ipmc_rush"],
'HPO': ["Essentia Health Superior Clinic", "University of Mississippi", "SouthEast Enrollment Center Miami",
"SouthEast Enrollment Center Morehouse", "SouthEast Enrollment Center Emory", "Banner Health",
"University of Pittsburgh", "Columbia University Medical Center", "University of Illinois Chicago",
"Spectrum Health", "Henry Ford Health System", "Boston Medical Center", "UC Irvine",
"Partners HealthCare", "Weill Cornell Medical Center", "Northwestern Memorial Hospital",
"Harlem Hospital", "University of Chicago", "Marshfield Clinic", "San Ysidro Health Center",
"Cedars-Sinai", "University of Florida", "University of Alabama at Birmingham", "Baylor", "UC San Diego",
"Eau Claire Cooperative Health Center", "Community Health Center, Inc.",
"UW Health (University of Wisconsin Madison)", "University of Southern California", "HRHCare",
"NorthShore University Health System", "Cherokee Health Systems", "UC San Francisco",
"Jackson-Hinds CHC", "Medical College of Wisconsin", "UC Davis", "Rush University"]}
site_df = pd.DataFrame(data=dic)
site_df
# -
# # There should not be duplicate rows.
# ## visit_occurrence table
# +
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
src_hpo_id,
person_id, visit_concept_id, visit_start_date, visit_start_datetime, visit_end_date, visit_end_datetime,
visit_type_concept_id, provider_id, care_site_id, visit_source_value, visit_source_concept_id,
admitting_source_concept_id, admitting_source_value, discharge_to_concept_id,
discharge_to_source_value, preceding_visit_occurrence_id,
COUNT(*) as cnt
FROM
`{}.unioned_ehr_visit_occurrence` AS t1
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_visit_occurrence`) AS t2
ON
t1.visit_occurrence_id=t2.visit_occurrence_id
WHERE
t1.visit_concept_id!=0 AND t1.visit_concept_id IS NOT NULL AND
t1.person_id!=0 and t1.person_id IS NOT NULL
GROUP BY
1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17
HAVING
COUNT(*) > 1
ORDER BY
1,2,3,4,5,6,7,8,9
'''.format(DATASET, DATASET, DATASET, DATASET, DATASET, DATASET),
dialect='standard')
print(foreign_key_df.shape[0], 'records received.')
# -
foreign_key_df.head()
visit_occurrence = foreign_key_df.groupby(['src_hpo_id']).size().reset_index().rename(
columns={0: 'visit_occurrence'}).sort_values(["visit_occurrence"]).set_index("src_hpo_id")
visit_occurrence = visit_occurrence.reset_index()
visit_occurrence
# ## condition_occurrence table
# +
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
src_hpo_id,
person_id, condition_concept_id, condition_start_date, condition_start_datetime, condition_end_date,
condition_end_datetime, condition_type_concept_id, stop_reason, provider_id, visit_occurrence_id,
condition_source_value, condition_source_concept_id, condition_status_source_value, condition_status_concept_id,
COUNT(*) as cnt
FROM
`{}.unioned_ehr_condition_occurrence` AS t1
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_condition_occurrence`) AS t2
ON
t1.condition_occurrence_id=t2.condition_occurrence_id
WHERE
t1.condition_concept_id!=0 AND t1.condition_concept_id IS NOT NULL AND
t1.person_id!=0 and t1.person_id IS NOT NULL
GROUP BY
1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
HAVING
COUNT(*) > 1
ORDER BY
1,2,3,4,5,6,7,8,9,10,11,12,13,14
'''.format(DATASET, DATASET, DATASET, DATASET, DATASET, DATASET),
dialect='standard')
print(foreign_key_df.shape[0], 'records received.')
# -
foreign_key_df.head()
condition_occurrence = foreign_key_df.groupby(['src_hpo_id']).size().reset_index().rename(
columns={0: 'condition_occurrence'}).sort_values(["condition_occurrence"]).set_index("src_hpo_id")
condition_occurrence = condition_occurrence.reset_index()
condition_occurrence
# ## drug_exposure table
# +
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
src_hpo_id,
person_id, drug_concept_id, drug_exposure_start_date,drug_exposure_start_datetime,
drug_exposure_end_date,drug_exposure_end_datetime, verbatim_end_date, drug_type_concept_id,
stop_reason, refills, quantity,
days_supply, sig, route_concept_id, lot_number, provider_id, visit_occurrence_id, drug_source_value,
drug_source_concept_id, route_source_value, dose_unit_source_value,
COUNT(*) as cnt
FROM
`{}.unioned_ehr_drug_exposure` AS t1
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_drug_exposure`) AS t2
ON
t1.drug_exposure_id=t2.drug_exposure_id
WHERE
t1.drug_concept_id!=0 AND t1.drug_concept_id IS NOT NULL AND
t1.person_id!=0 and t1.person_id IS NOT NULL
GROUP BY
1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22
HAVING
COUNT(*) > 1
ORDER BY
1,2,3
'''.format(DATASET, DATASET, DATASET, DATASET, DATASET, DATASET),
dialect='standard')
print(foreign_key_df.shape[0], 'records received.')
# -
foreign_key_df.head()
drug_exposure = foreign_key_df.groupby(['src_hpo_id']).size().reset_index().rename(
columns={0: 'drug_exposure'}).sort_values(["drug_exposure"]).set_index("src_hpo_id")
drug_exposure = drug_exposure.reset_index()
drug_exposure
# ## measurement table
# +
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
src_hpo_id,
person_id, measurement_concept_id, measurement_date, measurement_datetime, measurement_type_concept_id,
operator_concept_id, value_as_number, value_as_concept_id, unit_concept_id, range_low,
range_high, provider_id, visit_occurrence_id,
measurement_source_value, measurement_source_concept_id, unit_source_value, value_source_value,
COUNT(*) as cnt
FROM
`{}.unioned_ehr_measurement` AS t1
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_measurement`) AS t2
ON
t1.measurement_id=t2.measurement_id
WHERE
t1.measurement_concept_id!=0 AND t1.measurement_concept_id IS NOT NULL AND
t1.person_id!=0 and t1.person_id IS NOT NULL
GROUP BY
1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18
HAVING
COUNT(*) > 1
ORDER BY
1,2,3
'''.format(DATASET, DATASET, DATASET, DATASET, DATASET, DATASET),
dialect='standard')
print(foreign_key_df.shape[0], 'records received.')
# -
foreign_key_df.head()
measurement = foreign_key_df.groupby(['src_hpo_id']).size().reset_index().rename(
columns={0: 'measurement'}).sort_values(["measurement"]).set_index("src_hpo_id")
measurement = measurement.reset_index()
measurement
# ## procedure_occurrence
# +
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
src_hpo_id,
person_id, procedure_concept_id, procedure_date, procedure_datetime, procedure_type_concept_id, modifier_concept_id,
quantity, provider_id, visit_occurrence_id, procedure_source_value, procedure_source_concept_id, qualifier_source_value,
COUNT(*) as cnt
FROM
`{}.unioned_ehr_procedure_occurrence` AS t1
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_procedure_occurrence`) AS t2
ON
t1.procedure_occurrence_id=t2.procedure_occurrence_id
WHERE
t1.procedure_concept_id!=0 AND t1.procedure_concept_id IS NOT NULL AND
t1.person_id!=0 and t1.person_id IS NOT NULL
GROUP BY
1,2,3,4,5,6,7,8,9,10,11,12,13
HAVING
COUNT(*) > 1
ORDER BY
1,2,3,4,5,6,7,8,9,10,11,12,13,14
'''.format(DATASET, DATASET, DATASET, DATASET, DATASET, DATASET),
dialect='standard')
print(foreign_key_df.shape[0], 'records received.')
# -
foreign_key_df.head()
procedure_occurrence = foreign_key_df.groupby(['src_hpo_id']).size().reset_index().rename(
columns={0: 'procedure_occurrence'}).sort_values(["procedure_occurrence"]).set_index("src_hpo_id")
procedure_occurrence = procedure_occurrence.reset_index()
procedure_occurrence
# ## observation table
# +
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
src_hpo_id,
person_id, observation_concept_id, observation_date, observation_datetime, observation_type_concept_id, value_as_number,
value_as_string, value_as_concept_id, qualifier_concept_id, unit_concept_id, provider_id, visit_occurrence_id,
observation_source_value, observation_source_concept_id, unit_source_value, qualifier_source_value, value_source_concept_id,
value_source_value, questionnaire_response_id,
COUNT(*) as cnt
FROM
`{}.unioned_ehr_observation` AS t1
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_observation`) AS t2
ON
t1.observation_id=t2.observation_id
WHERE
t1.observation_concept_id!=0 AND t1.observation_concept_id IS NOT NULL AND
t1.person_id!=0 and t1.person_id IS NOT NULL
GROUP BY
1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20
HAVING
COUNT(*) > 1
ORDER BY
1,2,3,4,5,6,7,8,9,10,11,12,13,14
'''.format(DATASET, DATASET, DATASET, DATASET, DATASET, DATASET),
dialect='standard')
print(foreign_key_df.shape[0], 'records received.')
# -
foreign_key_df.head()
observation = foreign_key_df.groupby(['src_hpo_id']).size().reset_index().rename(
columns={0: 'observation'}).sort_values(["observation"]).set_index("src_hpo_id")
observation = observation.reset_index()
observation
# ## provider table
# +
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
provider_name, NPI, DEA, specialty_concept_id, care_site_id, year_of_birth,
gender_concept_id, provider_source_value, specialty_source_value,
specialty_source_concept_id, gender_source_value, gender_source_concept_id,
COUNT(*) as cnt
FROM
`{}.unioned_ehr_provider` AS t1
GROUP BY
1,2,3,4,5,6,7,8,9,10,11,12
HAVING
COUNT(*) > 1
ORDER BY
1,2,3,4,5,6,7,8,9,10,11,12
'''.format(DATASET, DATASET, DATASET, DATASET, DATASET, DATASET),
dialect='standard')
print(foreign_key_df.shape[0], 'records received.')
# -
foreign_key_df.head()
# ## device_exposure table
# +
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
src_hpo_id,
person_id, device_concept_id, device_exposure_start_date, device_exposure_start_datetime, device_exposure_end_date,
device_exposure_end_datetime, device_type_concept_id, unique_device_id, quantity, provider_id,
visit_occurrence_id, device_source_value, device_source_concept_id,
COUNT(*) as cnt
FROM
`{}.unioned_ehr_device_exposure` AS t1
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_device_exposure`) AS t2
ON
t1.device_exposure_id=t2.device_exposure_id
WHERE
t1.device_concept_id!=0 AND t1.device_concept_id IS NOT NULL AND
t1.person_id!=0 and t1.person_id IS NOT NULL
GROUP BY
1,2,3,4,5,6,7,8,9,10,11,12,13,14
HAVING
COUNT(*) > 1
ORDER BY
1,2,3,4,5,6,7,8,9,10,11,12,13,14
'''.format(DATASET, DATASET, DATASET, DATASET, DATASET, DATASET),
dialect='standard')
print(foreign_key_df.shape[0], 'records received.')
# -
foreign_key_df.head()
device_exposure = foreign_key_df.groupby(['src_hpo_id']).size().reset_index().rename(
columns={0: 'device_exposure'}).sort_values(["device_exposure"]).set_index("src_hpo_id")
device_exposure = device_exposure.reset_index()
device_exposure
# ## death table
# +
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
death_date, death_datetime, death_type_concept_id, cause_concept_id, cause_source_value, cause_source_concept_id,
COUNT(*) as cnt
FROM
`{}.unioned_ehr_death` AS t1
WHERE
t1.death_date IS NOT NULL AND t1.person_id IS NOT NULL
GROUP BY
1,2,3,4,5,6
HAVING
COUNT(*) > 1
ORDER BY
1,2,3,4,5,6
'''.format(DATASET, DATASET, DATASET, DATASET, DATASET, DATASET),
dialect='standard')
print(foreign_key_df.shape[0], 'records received.')
# -
foreign_key_df.head()
# ## care_site table
# +
######################################
print('Getting the data from the database...')
######################################
foreign_key_df = pd.io.gbq.read_gbq('''
SELECT
place_of_service_concept_id, location_id, place_of_service_source_value,
care_site_name, care_site_source_value,
COUNT(*) as cnt
FROM
`{}.unioned_ehr_care_site` AS t1
GROUP BY
1,2,3,4,5
HAVING
COUNT(*) > 1
ORDER BY
1,2,3,4,5
'''.format(DATASET, DATASET, DATASET, DATASET, DATASET, DATASET),
dialect='standard')
print(foreign_key_df.shape[0], 'records received.')
# -
foreign_key_df.head()
# ## Sites combined
sites_success = pd.merge(visit_occurrence, condition_occurrence, how='outer', on='src_hpo_id')
sites_success = pd.merge(sites_success, drug_exposure, how='outer', on='src_hpo_id')
sites_success = pd.merge(sites_success, measurement, how='outer', on='src_hpo_id')
sites_success = pd.merge(sites_success, procedure_occurrence, how='outer', on='src_hpo_id')
sites_success = pd.merge(sites_success, device_exposure, how='outer', on='src_hpo_id')
sites_success = pd.merge(sites_success, observation, how='outer', on='src_hpo_id')
sites_success = pd.merge(sites_success, site_df, how='outer', on='src_hpo_id')
sites_success = sites_success.fillna(0)
sites_success[["visit_occurrence", "condition_occurrence", "drug_exposure", "measurement", "procedure_occurrence",
"device_exposure", "observation"]] \
= sites_success[["visit_occurrence", "condition_occurrence", "drug_exposure", "measurement", "procedure_occurrence",
"device_exposure", "observation"]].astype(int)
sites_success
sites_success.to_csv("data\\duplicates.csv")
# # 20.Dataframe (row for each hpo_id) Condition_occurrence table, condition_source_concept_id field
condition_concept_df = pd.io.gbq.read_gbq('''
WITH
data1 AS (
SELECT
src_hpo_id,
COUNT(*) AS condition_total_row
FROM
`{}.unioned_ehr_condition_occurrence` AS t1
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_condition_occurrence`) AS t2
ON
t1.condition_occurrence_id=t2.condition_occurrence_id
GROUP BY
1
),
data2 AS (
SELECT
src_hpo_id,
COUNT(*) AS condition_well_defined_row
FROM
`{}.unioned_ehr_condition_occurrence` AS t1
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_condition_occurrence`) AS t2
ON
t1.condition_occurrence_id=t2.condition_occurrence_id
INNER JOIN
`{}.concept` as t3
ON
t3.concept_id = t1.condition_concept_id
WHERE
t3.domain_id="Condition" and t3.standard_concept="S"
GROUP BY
1
),
data3 AS (
SELECT
src_hpo_id,
COUNT(*) AS condition_total_zero
FROM
`{}.unioned_ehr_condition_occurrence` AS t1
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_condition_occurrence`) AS t2
ON
t1.condition_occurrence_id=t2.condition_occurrence_id
INNER JOIN
`{}.concept` as t3
ON
t3.concept_id = t1.condition_concept_id
WHERE
(t3.concept_id=0 or t3.concept_id is null)
GROUP BY
1
)
SELECT
data1.src_hpo_id,
condition_well_defined_row,
condition_total_row,
round(100*(condition_well_defined_row/condition_total_row),1) as condition_success_rate
FROM
data1
LEFT OUTER JOIN
data2
ON
data1.src_hpo_id=data2.src_hpo_id
LEFT OUTER JOIN
data3
ON
data1.src_hpo_id=data3.src_hpo_id
ORDER BY
1 DESC
'''.format(DATASET, DATASET, DATASET, DATASET, DATASET, DATASET, DATASET, DATASET, DATASET, DATASET, DATASET,
DATASET, DATASET),
dialect='standard'
)
condition_concept_df.shape
condition_concept_df = condition_concept_df.fillna(0)
condition_concept_df
# # 21.Dataframe (row for each hpo_id) Procedure_occurrence table, procedure_source_concept_id field
procedure_concept_df = pd.io.gbq.read_gbq('''
WITH
data1 AS (
SELECT
src_hpo_id,
COUNT(*) AS procedure_total_row
FROM
`{}.unioned_ehr_procedure_occurrence` AS t1
INNER JOIN
(SELECT
DISTINCT *
FROM
`{}._mapping_procedure_occurrence`) AS t2
ON
t1.procedure_occurrence_id=t2.procedure_occurrence_id
GROUP BY
1
),
data2 AS (
| |
"""
var, name, freq = fields
print name
outputMeter = ops.Meter(model)
outputMeter.setMeterFileOnly(False)
outputMeter.setName(name.strip())
outputMeter.setReportingFrequency(freq.strip())
def setOutputs(self, simulationOutputs, model):
if simulationOutputs == []:
return
else:
for output in simulationOutputs:
try:
# remove comment
outstr = output.split("!")[0].strip()
# remove ; from the end
finalout = outstr.replace(";", "", 1)
# split into fields
fields = finalout.split(",")
print fields
if fields[0].strip().lower() == "output:variable":
self.setOutputVariable(fields, model)
elif fields[0].strip().lower() == "output:meter":
self.setOutputMeter(fields, model)
elif fields[0].strip().lower() == "outputcontrol:table:style":
pass
#self.setOutputControl(fields, model)
else:
msg = fields[0] + " is missing from the outputs!"
#ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, msg)
except Exception, e:
print e
pass
class RunOPS(object):
def __init__(self, model, weatherFilePath = r"C:\EnergyPlusV8-1-0\WeatherData\USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw"):
self.weatherFile = weatherFilePath # just for batch file as an alternate solution
self.EPPath = ops.Path(r"C:\EnergyPlusV8-1-0\EnergyPlus.exe")
self.epwFile = ops.Path(weatherFilePath)
self.iddFile = ops.Path(r"C:\EnergyPlusV8-1-0\Energy+.idd")
self.model = model
def osmToidf(self, workingDir, projectName, osmPath):
# create a new folder to run the analysis
projectFolder =os.path.join(workingDir, projectName)
try: os.mkdir(projectFolder)
except: pass
idfFolder = os.path.join(projectFolder)
idfFilePath = ops.Path(os.path.join(projectFolder, "ModelToIdf", "in.idf"))
forwardTranslator = ops.EnergyPlusForwardTranslator()
workspace = forwardTranslator.translateModel(self.model)
# remove the current object
tableStyleObjects = workspace.getObjectsByType(ops.IddObjectType("OutputControl_Table_Style"))
for obj in tableStyleObjects: obj.remove()
tableStyle = ops.IdfObject(ops.IddObjectType("OutputControl_Table_Style"))
tableStyle.setString(0, "CommaAndHTML")
workspace.addObject(tableStyle)
workspace.save(idfFilePath, overwrite = True)
"""
CHarriman added code to always add monthly reports to idf for ease of use in SQL
on Nov 8 2014
"""
#Monthly code added based on
#git site:https://github.com/NREL/OpenStudio/blob/develop/openstudiocore/src/runmanager/lib/EnergyPlusPreProcessJob.cpp#L202
makeMonthly = True
if makeMonthly:
self.writeIDFWithMonthly(idfFilePath)
#DBPath = ops.Path(os.path.join(projectFolder, projectName + "_osmToidf.db"))
# start run manager
#rm = ops.RunManager(DBPath, True, True)
# create workflow
#wf = ops.Workflow("EnergyPlus")
# put in queue and let it go
#rm.enqueue(wf.create(ops.Path(projectFolder), osmPath, self.epwFile), True)
#rm.setPaused(False)
#while rm.workPending():
# time.sleep(.5)
# print "Converting osm to idf ..."
#rm.Dispose() # don't remove this as Rhino will crash if you don't dispose run manager
return idfFolder, idfFilePath
def writeIDFWithMonthly(self, idfFilePath):
print "Making Monthly SQL reading possible."
print idfFilePath
fi = open(str(idfFilePath),'r')
fi.seek(0)
prepare=False
count = 0
lines=[]
for line in fi:
if line.strip() != 'Output:SQLite,':
if (prepare):
count+=1;
if (count==2):
lines.append("\n")
lines.append("Output:Table:Monthly," + "\n")
lines.append(" Building Energy Performance - Electricity, !- Name"+ "\n")
lines.append(" 2, !- Digits After Decimal"+ "\n")
lines.append(" InteriorLights:Electricity, !- Variable or Meter 1 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 1"+ "\n")
lines.append(" ExteriorLights:Electricity, !- Variable or Meter 2 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 2"+ "\n")
lines.append(" InteriorEquipment:Electricity, !- Variable or Meter 3 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 3"+ "\n")
lines.append(" ExteriorEquipment:Electricity, !- Variable or Meter 4 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 4"+ "\n")
lines.append(" Fans:Electricity, !- Variable or Meter 5 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 5"+ "\n")
lines.append(" Pumps:Electricity, !- Variable or Meter 6 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 6"+ "\n")
lines.append(" Heating:Electricity, !- Variable or Meter 7 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 7"+ "\n")
lines.append(" Cooling:Electricity, !- Variable or Meter 8 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 8"+ "\n")
lines.append(" HeatRejection:Electricity, !- Variable or Meter 9 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 9"+ "\n")
lines.append(" Humidifier:Electricity, !- Variable or Meter 10 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 10"+ "\n")
lines.append(" HeatRecovery:Electricity,!- Variable or Meter 11 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 11"+ "\n")
lines.append(" WaterSystems:Electricity,!- Variable or Meter 12 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 12"+ "\n")
lines.append(" Cogeneration:Electricity,!- Variable or Meter 13 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 13"+ "\n")
lines.append(" Refrigeration:Electricity,!- Variable or Meter 14 Name"+ "\n")
lines.append(" SumOrAverage; !- Aggregation Type for Variable or Meter 14"+ "\n")
"""
lines.append("\n")
lines.append("Output:Table:Monthly," + "\n")
lines.append(" Building Energy Performance - Water, !- Name"+ "\n")
lines.append(" 2, !- Digits After Decimal"+ "\n")
lines.append(" InteriorLights:Water, !- Variable or Meter 1 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 1"+ "\n")
lines.append(" ExteriorLights:Water, !- Variable or Meter 2 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 2"+ "\n")
lines.append(" InteriorEquipment:Water, !- Variable or Meter 3 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 3"+ "\n")
lines.append(" ExteriorEquipment:Water, !- Variable or Meter 4 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 4"+ "\n")
lines.append(" Fans:Water, !- Variable or Meter 5 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 5"+ "\n")
lines.append(" Pumps:Water, !- Variable or Meter 6 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 6"+ "\n")
lines.append(" Heating:Water, !- Variable or Meter 7 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 7"+ "\n")
lines.append(" Cooling:Water, !- Variable or Meter 8 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 8"+ "\n")
lines.append(" HeatRejection:Water, !- Variable or Meter 9 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 9"+ "\n")
lines.append(" Humidifier:Water, !- Variable or Meter 10 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 10"+ "\n")
lines.append(" HeatRecovery:Water,!- Variable or Meter 11 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 11"+ "\n")
lines.append(" WaterSystems:Water,!- Variable or Meter 12 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 12"+ "\n")
lines.append(" Cogeneration:Water,!- Variable or Meter 13 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 13"+ "\n")
lines.append(" Refrigeration:Water,!- Variable or Meter 14 Name"+ "\n")
lines.append(" SumOrAverage; !- Aggregation Type for Variable or Meter 14"+ "\n")
"""
lines.append("\n")
lines.append("Output:Table:Monthly,"+ "\n")
lines.append(" Building Energy Performance - Natural Gas, !- Name"+ "\n")
lines.append(" 2, !- Digits After Decimal"+ "\n")
lines.append(" InteriorEquipment:Gas, !- Variable or Meter 1 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 1"+ "\n")
lines.append(" ExteriorEquipment:Gas, !- Variable or Meter 2 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 2"+ "\n")
lines.append(" Heating:Gas, !- Variable or Meter 3 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 3"+ "\n")
lines.append(" Cooling:Gas, !- Variable or Meter 4 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 4"+ "\n")
lines.append(" WaterSystems:Gas, !- Variable or Meter 5 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 5"+ "\n")
lines.append(" Cogeneration:Gas, !- Variable or Meter 6 Name"+ "\n")
lines.append(" SumOrAverage; !- Aggregation Type for Variable or Meter 6"+ "\n")
lines.append("\n")
lines.append("Output:Table:Monthly,"+ "\n")
lines.append(" Building Energy Performance - District Heating, !- Name"+ "\n")
lines.append(" 2, !- Digits After Decimal"+ "\n")
lines.append(" InteriorLights:DistrictHeating, !- Variable or Meter 1 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 1"+ "\n")
lines.append(" ExteriorLights:DistrictHeating, !- Variable or Meter 2 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 2"+ "\n")
lines.append(" InteriorEquipment:DistrictHeating, !- Variable or Meter 3 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 3"+ "\n")
lines.append(" ExteriorEquipment:DistrictHeating, !- Variable or Meter 4 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 4"+ "\n")
lines.append(" Fans:DistrictHeating, !- Variable or Meter 5 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 5"+ "\n")
lines.append(" Pumps:DistrictHeating, !- Variable or Meter 6 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 6"+ "\n")
lines.append(" Heating:DistrictHeating, !- Variable or Meter 7 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 7"+ "\n")
lines.append(" Cooling:DistrictHeating, !- Variable or Meter 8 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 8"+ "\n")
lines.append(" HeatRejection:DistrictHeating, !- Variable or Meter 9 Name"+ "\n")
lines.append(" SumOrAverage, !- Aggregation Type for Variable or Meter 9"+ "\n")
lines.append(" Humidifier:DistrictHeating, !- Variable or Meter 10 Name"+ "\n")
lines.append(" SumOrAverage, !- | |
# wcurve.py - basic arithmetic on elliptic curves in short Weiertsrass form.
#
# Copyright (c) 2010-2011 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
This package implements basic arithmetic operations such as point addition and
single-scalar multiplication on elliptic curves in short Weiertsrass form.
Example::
import wcurve, random
# Instantiate secp256r1 aka nistp256r1 standardized curve
curve = wcurve.secp256r1_curve()
# Generate a new secret value
sk = random.SystemRandom().randint(1, curve.n - 1)
# Compute the public key associated with the previous secret
pk = sk * curve.base_point
# Get its affine coordinates
pkx, pky = pk.to_affine()
Internally, curve points are represented in Jacobian coordinates. There's
currently no optimized implementation for the double scalar multiplication
operation, it is merely the addition of two independents single-scalar
multiplications.
The primary goal of this code is to keep things simple and offer a pure
Python standalone interface to some of currently the most used curves.
As implemented, single-scalar multiplications are not protected against DPA
and some types of fault attacks. However, exponentiations algorithms are
regulars, without dummy operations and conditional branching instructions are
avoided.
Beside the usual scalar multiplication algorithm transparently used when
:py:func:`secp256r1_curve` is instantiated, another algorithm is implemented.
This one uses *infective computations* techniques [2]_ to prevent an attacker
from extracting useful information from a wrong scalar-multiplication result.
This algorithm is automatically used when a :py:func:`secp256r1_curve_infective`
curve is instantiated. For more details on *infective computations* read the
docstring of :py:meth:`JacobianPoint.scalar_multiplication_infective`.
.. note::
functions, classes and methods prefixed with _ in the source code are privates
to this module, there are not intended to be called from external client code.
"""
import copy
import random
__author__ = "<NAME> (<EMAIL>)"
__version__ = "0.1.9"
def _check_integer_type(val):
"""
Check val is an integer, raise an exception if it is not the case.
"""
try:
import numbers
if isinstance(val, numbers.Integral):
return True
except:
if isinstance(val, int) or isinstance(val, long):
return True
raise TypeError("Invalid type %s, expected integral type." % type(val))
def _bit_length(x):
"""
Returns the bit length of |x|.
"""
x = abs(x)
# Try to use r to protect the top bit of x.
r = random.SystemRandom().randint(0, 64)
x = x << r
# See comment in JacobianPoint.scalar_multiplication().
if not x:
return 0
n = -r
while x:
n += 1
x >>= 1
return n
def _cond_swap_values(swap, u, v):
"""
Conditionally swap u and v if swap=1 and otherwise left unchanged if
swap=0. The value of swap must be 0 or 1 exclusively.
"""
swap_diff = (-swap) & (u ^ v)
return u ^ swap_diff, v ^ swap_diff
class _ZpZArithmetic:
def __init__(self, p, order):
"""
order = euler_phi(p)
"""
self.p = p
self.order = order
@staticmethod
def create_from_curve(curve):
if hasattr(curve, 'phi_p'):
return _ZpZArithmetic(curve.p, curve.phi_p)
return _FpArithmetic(curve.p)
def exp(self, g, k, k_num_bits):
"""
Returns g^k mod self.p with |k|=k_num_bits.
"""
return self._exp(g, k, k_num_bits, self.p)
def _exp(self, g, k, k_num_bits, n):
"""
Montgomery Ladder. Compute g^k mod n with |k|=k_num_bits.
"""
r0 = 1
r1 = g
while k_num_bits >= 0:
cur_bit = (k >> k_num_bits) & 1
r0, r1 = _cond_swap_values(1 - cur_bit, r0, r1)
r0 = r0 * r1 % n
r1 = r1 ** 2 % n
r0, r1 = _cond_swap_values(1 - cur_bit, r0, r1)
k_num_bits -= 1
return r0
def inverse(self, g):
"""
Returns the inverse of g mod p. This method does not check the element
is invertible, this is the responsability of the caller.
"""
return self._inverse(g, self.order, self.p)
def _inverse(self, g, order, p):
"""
Returns inverse of g mod p.
"""
e = order - 1
r = self._exp(g, e, _bit_length(e), p)
# Could not get a null result if the element had been invertible.
if not r: # equiv. to 'not r == 0'
raise ValueError("%d has no inverse mod %d." % (g, p))
return r
class _FpArithmetic(_ZpZArithmetic):
def __init__(self, p):
"""
p is prime
"""
_ZpZArithmetic.__init__(self, p, p - 1)
def _crt(lst, modulus):
"""
Compute a list of crts sharing the same modulus.
"""
prod = 1
for m in modulus:
prod *= m
ldiv = tuple(map(lambda m: prod // m, modulus))
linv = tuple(map(lambda x, y: _FpArithmetic(y).inverse(x), ldiv, modulus))
def _sum(a):
t = sum(map(lambda x, y, z: x * y * z, a, linv, ldiv))
return t % prod
return tuple(map(_sum, lst))
class _CoZArithmetic:
"""
Co-Z arithmetic from "Co-Z Addition Formulae and Binary Ladders on Elliptic
Curves", <NAME> and <NAME> and <NAME>. The zaddu, zaddc
and dblu formulas are copied from the Appendix A and the section 4.3 of this
paper.
"""
def __init__(self, curve):
"""
You shouldn't have to instantiate this class directly.
"""
self.curve = curve
def zaddu(self, p, q):
"""
Point addition with update.
(R,P)=ZADDU(P,Q) where R=P+Q=(X3:Y3:Z3) and P=(h2X1:h3Y1:Z3)
with Z3=hZ1 for some h!=0
"""
assert p.z % self.curve.p == q.z % self.curve.p
t1 = p.x; t2 = p.y; t3 = p.z; t4 = q.x; t5 = q.y;
t6 = t1 - t4
t3 = t3 * t6 % self.curve.p # z3
t6 = t6 ** 2 % self.curve.p # c
t1 = t1 * t6 % self.curve.p # w1
t6 = t6 * t4 % self.curve.p # w2
t5 = t2 - t5
t4 = t5 ** 2 % self.curve.p # d
t4 = t4 - t1
t4 = (t4 - t6) % self.curve.p # x3
t6 = t1 - t6
t2 = t2 * t6 % self.curve.p # a1
t6 = t1 - t4
t5 = t5 * t6 % self.curve.p
t5 = (t5 - t2) % self.curve.p # y3
return (JacobianPoint(t4, t5, t3, self.curve),
JacobianPoint(t1, t2, t3, self.curve))
def zaddc(self, p, q):
"""
Conjugate point addition.
(R,S)=ZADDC(P,Q) where R=P+Q=(X3:Y3:Z3) and S=P-Q=(X3:Y3:Z3)
"""
assert p.z % self.curve.p == q.z % self.curve.p
t1 = p.x; t2 = p.y; t3 = p.z; t4 = q.x; t5 = q.y;
t6 = t1 - t4
t3 = t3 * t6 % self.curve.p
t6 = t6 ** 2 % self.curve.p
t7 = t1 * t6 % self.curve.p
t6 = t6 * t4 % self.curve.p
t1 = t2 + t5
t4 = t1 ** 2 % self.curve.p
t4 = t4 - t7
t4 = (t4 - t6) % self.curve.p
t1 = t2 - t5
t1 = t1 ** 2 % self.curve.p
t1 = t1 - t7
t1 = (t1 - t6) % self.curve.p
t6 = t6 - t7
t6 = t6 * t2 % self.curve.p
t2 = t2 - t5
t5 = 2 * t5
t5 = t2 + t5
t7 = t7 - t4
t5 = t5 * t7 % self.curve.p
t5 = (t5 + t6) % self.curve.p
t7 = t4 + t7
t7 = t7 - t1
t2 = t2 * t7 % self.curve.p
t2 = (t2 + t6) % self.curve.p
return (JacobianPoint(t1, t2, t3, self.curve),
JacobianPoint(t4, t5, t3, self.curve))
def dblu(self, p):
"""
Initial point doubling (requires z=1).
(2P,P) = DBLU(P)
"""
assert p.z % self.curve.p == 1
t1 = p.x; t2 = p.y;
t4 = t1 ** 2 % self.curve.p # b
t5 = 3 * t4
t5 = t5 + self.curve.a # | |
email_flow_folder, **kwargs): # noqa: E501
"""Insert email flow folder # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insert_email_flow_folder(storefront_oid, email_flow_folder, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:param EmailFlowFolder email_flow_folder: Email flow folder (required)
:return: EmailFlowFolderResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.insert_email_flow_folder_with_http_info(storefront_oid, email_flow_folder, **kwargs) # noqa: E501
else:
(data) = self.insert_email_flow_folder_with_http_info(storefront_oid, email_flow_folder, **kwargs) # noqa: E501
return data
def insert_email_flow_folder_with_http_info(self, storefront_oid, email_flow_folder, **kwargs): # noqa: E501
"""Insert email flow folder # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insert_email_flow_folder_with_http_info(storefront_oid, email_flow_folder, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:param EmailFlowFolder email_flow_folder: Email flow folder (required)
:return: EmailFlowFolderResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['storefront_oid', 'email_flow_folder'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method insert_email_flow_folder" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'storefront_oid' is set
if ('storefront_oid' not in params or
params['storefront_oid'] is None):
raise ValueError("Missing the required parameter `storefront_oid` when calling `insert_email_flow_folder`") # noqa: E501
# verify the required parameter 'email_flow_folder' is set
if ('email_flow_folder' not in params or
params['email_flow_folder'] is None):
raise ValueError("Missing the required parameter `email_flow_folder` when calling `insert_email_flow_folder`") # noqa: E501
collection_formats = {}
path_params = {}
if 'storefront_oid' in params:
path_params['storefront_oid'] = params['storefront_oid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'email_flow_folder' in params:
body_params = params['email_flow_folder']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartBrowserApiKey', 'ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/storefront/{storefront_oid}/email/flow_folders', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EmailFlowFolderResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def insert_email_list(self, storefront_oid, email_list, **kwargs): # noqa: E501
"""Insert email list # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insert_email_list(storefront_oid, email_list, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:param EmailList email_list: Email list (required)
:return: EmailListResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.insert_email_list_with_http_info(storefront_oid, email_list, **kwargs) # noqa: E501
else:
(data) = self.insert_email_list_with_http_info(storefront_oid, email_list, **kwargs) # noqa: E501
return data
def insert_email_list_with_http_info(self, storefront_oid, email_list, **kwargs): # noqa: E501
"""Insert email list # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insert_email_list_with_http_info(storefront_oid, email_list, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:param EmailList email_list: Email list (required)
:return: EmailListResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['storefront_oid', 'email_list'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method insert_email_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'storefront_oid' is set
if ('storefront_oid' not in params or
params['storefront_oid'] is None):
raise ValueError("Missing the required parameter `storefront_oid` when calling `insert_email_list`") # noqa: E501
# verify the required parameter 'email_list' is set
if ('email_list' not in params or
params['email_list'] is None):
raise ValueError("Missing the required parameter `email_list` when calling `insert_email_list`") # noqa: E501
collection_formats = {}
path_params = {}
if 'storefront_oid' in params:
path_params['storefront_oid'] = params['storefront_oid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'email_list' in params:
body_params = params['email_list']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartBrowserApiKey', 'ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/storefront/{storefront_oid}/email/lists', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EmailListResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def insert_email_list_segment_folder(self, storefront_oid, email_list_segment_folder, **kwargs): # noqa: E501
"""Insert email campaign folder # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insert_email_list_segment_folder(storefront_oid, email_list_segment_folder, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:param EmailListSegmentFolder email_list_segment_folder: Email campaign folder (required)
:return: EmailListSegmentFolderResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.insert_email_list_segment_folder_with_http_info(storefront_oid, email_list_segment_folder, **kwargs) # noqa: E501
else:
(data) = self.insert_email_list_segment_folder_with_http_info(storefront_oid, email_list_segment_folder, **kwargs) # noqa: E501
return data
def insert_email_list_segment_folder_with_http_info(self, storefront_oid, email_list_segment_folder, **kwargs): # noqa: E501
"""Insert email campaign folder # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insert_email_list_segment_folder_with_http_info(storefront_oid, email_list_segment_folder, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:param EmailListSegmentFolder email_list_segment_folder: Email campaign folder (required)
:return: EmailListSegmentFolderResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['storefront_oid', 'email_list_segment_folder'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method insert_email_list_segment_folder" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'storefront_oid' is set
if ('storefront_oid' not in params or
params['storefront_oid'] is None):
raise ValueError("Missing the required parameter `storefront_oid` when calling `insert_email_list_segment_folder`") # noqa: E501
# verify the required parameter 'email_list_segment_folder' is set
if ('email_list_segment_folder' not in params or
params['email_list_segment_folder'] is None):
raise ValueError("Missing the required parameter `email_list_segment_folder` when calling `insert_email_list_segment_folder`") # noqa: E501
collection_formats = {}
path_params = {}
if 'storefront_oid' in params:
path_params['storefront_oid'] = params['storefront_oid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'email_list_segment_folder' in params:
body_params = params['email_list_segment_folder']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartBrowserApiKey', 'ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/storefront/{storefront_oid}/email/list_segment_folders', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EmailListSegmentFolderResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def insert_email_postcard(self, storefront_oid, email_commseq_postcard, **kwargs): # noqa: E501
"""Insert email postcard # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insert_email_postcard(storefront_oid, email_commseq_postcard, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:param EmailCommseqPostcard email_commseq_postcard: Email postcard (required)
:return: EmailCommseqPostcardResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.insert_email_postcard_with_http_info(storefront_oid, email_commseq_postcard, **kwargs) # noqa: E501
else:
(data) = self.insert_email_postcard_with_http_info(storefront_oid, email_commseq_postcard, **kwargs) # noqa: E501
return data
def insert_email_postcard_with_http_info(self, storefront_oid, email_commseq_postcard, **kwargs): # noqa: E501
"""Insert email postcard # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insert_email_postcard_with_http_info(storefront_oid, email_commseq_postcard, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int storefront_oid: (required)
:param EmailCommseqPostcard email_commseq_postcard: Email postcard (required)
:return: EmailCommseqPostcardResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['storefront_oid', 'email_commseq_postcard'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method insert_email_postcard" % key
)
params[key] = val
del params['kwargs']
# verify the required | |
<filename>qutip/fortran/mcsolve_f90.py
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, <NAME> and <NAME>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Original Code by <NAME> (2012): github.com/arnelg/qutipf90mc
###############################################################################
import numpy as np
from qutip import *
from qutip.fortran import qutraj_run as qtf90
from qutip.odeconfig import odeconfig
from qutip.qobj import Qobj
from qutip.mcsolve import _mc_data_config
from qutip.odeoptions import Odeoptions
from qutip.odedata import Odedata
from qutip.settings import debug
if debug:
import inspect
import os
# Working precision
wpr = np.dtype(np.float64)
wpc = np.dtype(np.complex128)
def mcsolve_f90(H, psi0, tlist, c_ops, e_ops, ntraj=None,
options=Odeoptions(), sparse_dms=True, serial=False,
ptrace_sel=[], calc_entropy=False):
"""
Monte-Carlo wave function solver with fortran 90 backend.
Usage is identical to qutip.mcsolve, for problems without explicit
time-dependence, and with some optional input:
Parameters
----------
H : qobj
System Hamiltonian.
psi0 : qobj
Initial state vector
tlist : array_like
Times at which results are recorded.
ntraj : int
Number of trajectories to run.
c_ops : array_like
``list`` or ``array`` of collapse operators.
e_ops : array_like
``list`` or ``array`` of operators for calculating expectation values.
options : Odeoptions
Instance of ODE solver options.
sparse_dms : boolean
If averaged density matrices are returned, they will be stored as
sparse (Compressed Row Format) matrices during computation if
sparse_dms = True (default), and dense matrices otherwise. Dense
matrices might be preferable for smaller systems.
serial : boolean
If True (default is False) the solver will not make use of the
multiprocessing module, and simply run in serial.
ptrace_sel: list
This optional argument specifies a list of components to keep when
returning a partially traced density matrix. This can be convenient for
large systems where memory becomes a problem, but you are only
interested in parts of the density matrix.
calc_entropy : boolean
If ptrace_sel is specified, calc_entropy=True will have the solver
return the averaged entropy over trajectories in results.entropy. This
can be interpreted as a measure of entanglement. See Phys. Rev. Lett.
93, 120408 (2004), Phys. Rev. A 86, 022310 (2012).
Returns
-------
results : Odedata
Object storing all results from simulation.
"""
if ntraj is None:
ntraj = options.ntraj
if psi0.type != 'ket':
raise Exception("Initial state must be a state vector.")
odeconfig.options = options
# set num_cpus to the value given in qutip.settings
# if none in Odeoptions
if not odeconfig.options.num_cpus:
odeconfig.options.num_cpus = qutip.settings.num_cpus
# set initial value data
if options.tidy:
odeconfig.psi0 = psi0.tidyup(options.atol).full()
else:
odeconfig.psi0 = psi0.full()
odeconfig.psi0_dims = psi0.dims
odeconfig.psi0_shape = psi0.shape
# set general items
odeconfig.tlist = tlist
if isinstance(ntraj, (list, np.ndarray)):
raise Exception("ntraj as list argument is not supported.")
else:
odeconfig.ntraj = ntraj
# ntraj_list = [ntraj]
# set norm finding constants
odeconfig.norm_tol = options.norm_tol
odeconfig.norm_steps = options.norm_steps
if not options.rhs_reuse:
odeconfig.soft_reset()
# no time dependence
odeconfig.tflag = 0
# no gui
odeconfig.options.gui = False
# check for collapse operators
if len(c_ops) > 0:
odeconfig.cflag = 1
else:
odeconfig.cflag = 0
# Configure data
_mc_data_config(H, psi0, [], c_ops, [], [], e_ops, options, odeconfig)
# Load Monte Carlo class
mc = _MC_class()
# Set solver type
if (options.method == 'adams'):
mc.mf = 10
elif (options.method == 'bdf'):
mc.mf = 22
else:
if debug:
print('Unrecognized method for ode solver, using "adams".')
mc.mf = 10
# store ket and density matrix dims and shape for convenience
mc.psi0_dims = psi0.dims
mc.psi0_shape = psi0.shape
mc.dm_dims = (psi0 * psi0.dag()).dims
mc.dm_shape = (psi0 * psi0.dag()).shape
# use sparse density matrices during computation?
mc.sparse_dms = sparse_dms
# run in serial?
mc.serial_run = serial or (ntraj == 1)
# are we doing a partial trace for returned states?
mc.ptrace_sel = ptrace_sel
if (ptrace_sel != []):
if debug:
print("ptrace_sel set to " + str(ptrace_sel))
print("We are using dense density matrices during computation " +
"when performing partial trace. Setting sparse_dms = False")
print("This feature is experimental.")
mc.sparse_dms = False
mc.dm_dims = psi0.ptrace(ptrace_sel).dims
mc.dm_shape = psi0.ptrace(ptrace_sel).shape
if (calc_entropy):
if (ptrace_sel == []):
if debug:
print("calc_entropy = True, but ptrace_sel = []. Please set " +
"a list of components to keep when calculating average " +
"entropy of reduced density matrix in ptrace_sel. " +
"Setting calc_entropy = False.")
calc_entropy = False
mc.calc_entropy = calc_entropy
# construct output Odedata object
output = Odedata()
# Run
mc.run()
output.states = mc.sol.states
output.expect = mc.sol.expect
output.col_times = mc.sol.col_times
output.col_which = mc.sol.col_which
if (hasattr(mc.sol, 'entropy')):
output.entropy = mc.sol.entropy
output.solver = 'Fortran 90 Monte Carlo solver'
# simulation parameters
output.times = odeconfig.tlist
output.num_expect = odeconfig.e_num
output.num_collapse = odeconfig.c_num
output.ntraj = odeconfig.ntraj
return output
class _MC_class():
def __init__(self):
self.cpus = odeconfig.options.num_cpus
self.nprocs = self.cpus
self.sol = Odedata()
self.mf = 10
# If returning density matrices, return as sparse or dense?
self.sparse_dms = True
# Run in serial?
self.serial_run = False
self.ntraj = odeconfig.ntraj
self.ntrajs = []
self.seed = None
self.psi0_dims = None
self.psi0_shape = None
self.dm_dims = None
self.dm_shape = None
self.unravel_type = 2
self.ptrace_sel = []
self.calc_entropy = False
def parallel(self):
from multiprocessing import Process, Queue, JoinableQueue
if debug:
print(inspect.stack()[0][3])
self.ntrajs = []
for i in range(self.cpus):
self.ntrajs.append(min(int(np.floor(float(self.ntraj)
/ self.cpus)),
self.ntraj - sum(self.ntrajs)))
cnt = sum(self.ntrajs)
while cnt < self.ntraj:
for i in range(self.cpus):
self.ntrajs[i] += 1
cnt += 1
if (cnt >= self.ntraj):
break
self.ntrajs = np.array(self.ntrajs)
self.ntrajs = self.ntrajs[np.where(self.ntrajs > 0)]
self.nprocs = len(self.ntrajs)
sols = []
processes = []
resq = JoinableQueue()
resq.join()
if debug:
print("Number of cpus: " + str(self.cpus))
print("Trying to start " + str(self.nprocs) + " process(es).")
print("Number of trajectories for each process: " + str(self.ntrajs))
for i in range(self.nprocs):
p = Process(target=self.evolve_serial,
args=((resq, self.ntrajs[i], i, self.seed * (i + 1)),))
p.start()
processes.append(p)
cnt = 0
while True:
try:
sols.append(resq.get())
resq.task_done()
cnt += 1
if (cnt >= self.nprocs):
break
except KeyboardInterrupt:
break
except:
pass
resq.join()
for proc in processes:
try:
proc.join()
except KeyboardInterrupt:
if debug:
print("Cancel thread on keyboard interrupt")
proc.terminate()
proc.join()
resq.close()
return sols
def serial(self):
if debug:
print(inspect.stack()[0][3])
self.nprocs = 1
self.ntrajs = [self.ntraj]
if debug:
print("Running in serial.")
print("Number of trajectories: " + str(self.ntraj))
sol = self.evolve_serial((0, self.ntraj, 0, self.seed))
return [sol]
def run(self):
if debug:
print(inspect.stack()[0][3])
from numpy.random import random_integers
if (odeconfig.c_num == 0):
# force one trajectory if no collapse operators
odeconfig.ntraj = 1
self.ntraj = 1
# Set unravel_type to 1 to integrate without collapses
self.unravel_type = 1
if (odeconfig.e_num == 0):
# If we are returning states, and there are no
# collapse operators, set average_states to False to return
# ket vectors instead of density matrices
odeconfig.options.average_states = False
# generate a random seed, useful if e.g. running with MPI
self.seed = random_integers(1e8)
if (self.serial_run):
# run | |
# ##
# Copyright 2016 <NAME>, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ##
# -*- coding: utf-8 -*-
""" Upload Component Command for rdmc """
import os
import sys
import json
import time
import shutil
from random import choice
from string import ascii_lowercase
from argparse import ArgumentParser
import ctypes
from ctypes import c_char_p, c_int, c_uint32
from six.moves import input
import redfish.hpilo.risblobstore2 as risblobstore2
from redfish.ris.rmc_helper import InvalidPathError
from rdmc_base_classes import RdmcCommandBase, add_login_arguments_group
from rdmc_helper import ReturnCodes, InvalidCommandLineErrorOPTS, Encryption, UploadError, \
InvalidCommandLineError, IncompatibleiLOVersionError, TimeOutError
def human_readable_time(seconds):
""" Returns human readable time
:param seconds: Amount of seconds to parse.
:type seconds: string.
"""
seconds = int(seconds)
hours = seconds / 3600
seconds = seconds % 3600
minutes = seconds / 60
seconds = seconds % 60
return str(hours) + " hour(s) " + str(minutes) + \
" minute(s) " + str(seconds) + " second(s) "
class UploadComponentCommand(RdmcCommandBase):
""" Constructor """
def __init__(self, rdmcObj):
RdmcCommandBase.__init__(self, \
name='uploadcomp', \
usage='uploadcomp [OPTIONS]\n\n\tRun to upload the component on ' \
'to iLO Repository\n\n\tUpload component to the iLO ' \
'repository.\n\texample: uploadcomp --component <path> ' \
'--compsig <path_to_signature>\n\n\tFlash the component ' \
'instead of add to the iLO repository.\n\texample: ' \
'uploadcomp --component <binary_path> --update_target ' \
'--update_repository', \
summary='Upload components/binary to the iLO Repository.', \
aliases=['Uploadcomp'], \
argparser=ArgumentParser())
self.definearguments(self.parser)
self._rdmc = rdmcObj
self.typepath = rdmcObj.app.typepath
self.lobobj = rdmcObj.commands_dict["LoginCommand"](rdmcObj)
self.logoutobj = rdmcObj.commands_dict["LogoutCommand"](rdmcObj)
self.fwpkgprepare = rdmcObj.commands_dict["FwpkgCommand"].preparefwpkg
def run(self, line):
""" Wrapper function for upload command main function
:param line: string of arguments passed in
:type line: str.
"""
try:
(options, _) = self._parse_arglist(line)
except (InvalidCommandLineErrorOPTS, SystemExit):
if ("-h" in line) or ("--help" in line):
return ReturnCodes.SUCCESS
else:
raise InvalidCommandLineErrorOPTS("")
self.uploadcommandvalidation(options)
fwpkg = False
if options.component.endswith('.fwpkg'):
fwpkg = True
comp, loc, ctype = self.fwpkgprepare(self, options.component)
if ctype == 'C':
options.component = comp[0]
else:
options.component = os.path.join(loc, comp[0])
if self.typepath.defs.isgen9:
raise IncompatibleiLOVersionError(\
'iLO Repository commands are only available on iLO 5.')
filestoupload = self._check_and_split_files(options)
if self.componentvalidation(options, filestoupload):
start_time = time.time()
ret = ReturnCodes.FAILED_TO_UPLOAD_COMPONENT
if 'blobstore' in self._rdmc.app.current_client.base_url:
ret = self.uploadlocally(filestoupload, options)
else:
ret = self.uploadfunction(filestoupload, options)
sys.stdout.write("%s\n" % human_readable_time(time.time() - start_time))
if len(filestoupload) > 1:
path, _ = os.path.split((filestoupload[0])[1])
shutil.rmtree(path)
elif fwpkg:
shutil.rmtree(loc)
if options.logout:
self.logoutobj.run("")
else:
ret = ReturnCodes.SUCCESS
return ret
def componentvalidation(self, options, filelist):
""" Check for duplicate component in repository
:param options: command line options
:type options: list.
:param filelist: list of files to be uploaded (multiple files will be
generated for items over 32K in size)
:type filelist: list of strings
"""
validation = True
prevfile = None
path = '/redfish/v1/UpdateService/ComponentRepository/?$expand=.'
results = self._rdmc.app.get_handler(path, service=True, silent=True)
results = results.dict
if 'Members' in results and results['Members']:
for comp in results['Members']:
for filehndl in filelist:
if comp['Filename'].upper() == unicode(filehndl[0]).upper()\
and not options.forceupload and prevfile != filehndl[0].upper():
ans = input("A component with the same name (%s) has " \
"been found. Would you like to upload and "\
"overwrite this file? (y/n)" % comp['Filename'])
if ans.lower() == 'n':
sys.stdout.write('Upload stopped by user due to filename conflict.'\
' If you would like to bypass this check include the'\
' "--forceupload" flag.\n')
validation = False
break
if comp['Filename'].upper() == unicode(filehndl[0]).upper()\
and prevfile != filehndl[0].upper() and comp['Locked']:
sys.stdout.write('Component is currently locked by a taskqueue task or '\
'installset. Remove any installsets or taskqueue tasks '\
'containing the file and try again.\n')
validation = False
break
prevfile = str(comp['Filename'].upper())
return validation
def _check_and_split_files(self, options):
""" Check and split the file to upload on to iLO Repository
:param options: command line options
:type options: list.
"""
def check_file_wr(filename, rw):
try:
fd = open(filename, rw)
fd.close()
except IOError:
raise InvalidFileInputError("The file \'%s\' could not be opened for upload" % \
filename)
maxcompsize = 32 * 1024 * 1024
size = os.path.getsize(options.component)
filelist = []
# Lets get the component filename
_, filename = os.path.split(options.component)
check_file_wr(os.path.normpath(options.component), 'r')
# This is to upload the binary directly to flash scenario
if not options.componentsig:
if not self.findcompsig(filename):
return [(filename, options.component, options.componentsig, 0)]
if size > maxcompsize:
sys.stdout.write("Component is more than 32MB in size. ")
sys.stdout.write("Component size = %s\n" % str(size))
section = 1
sigpath, _ = os.path.split(options.componentsig)
check_file_wr(os.path.normpath(options.componentsig), 'r')
filebasename = filename[:filename.rfind('.')]
tempfoldername = "bmn" + ''.join(choice(ascii_lowercase) for i in range(12))
tempdir = os.path.join(self._rdmc.app.config.get_cachedir(), tempfoldername)
sys.stdout.write("Spliting component. Temporary " \
"cache directory at %s\n" % tempdir)
if not os.path.exists(tempdir):
os.makedirs(tempdir)
with open(options.component, 'rb') as component:
while True:
data = component.read(maxcompsize)
if len(data) != 0:
sectionfilename = filebasename + "_part" + str(section)
sectionfilepath = os.path.join(tempdir, sectionfilename)
sectioncompsigpath = os.path.join(sigpath, sectionfilename + ".compsig")
writefile = open(sectionfilepath, 'wb')
writefile.write(data)
writefile.close()
item = (filename, sectionfilepath, sectioncompsigpath, section - 1)
filelist.append(item)
section += 1
if len(data) != maxcompsize:
break
return filelist
else:
return [(filename, options.component, options.componentsig, 0)]
def uploadfunction(self, filelist, options=None):
""" Main upload command worker function
:param filelist: List of files to upload.
:type filelist: list.
:param options: command line options
:type options: list.
"""
# returns a tuple with the state and the result dict
state, result = self.get_update_service_state()
if (state != "COMPLETED" and
state != "COMPLETE" and
state != "ERROR" and
state != "IDLE"):
sys.stdout.write("iLO UpdateService is busy. Please try again.")
return ReturnCodes.UPDATE_SERVICE_BUSY
sessionkey = self._rdmc.app.current_client.session_key
etag = ""
hpe = result['Oem']['Hpe']
urltosend = "/cgi-bin/uploadFile"
if 'PushUpdateUri' in hpe:
urltosend = hpe['PushUpdateUri']
elif 'HttpPushUri' in result:
urltosend = result['HttpPushUri']
else:
return ReturnCodes.FAILED_TO_UPLOAD_COMPONENT
for item in filelist:
ilo_upload_filename = item[0]
ilo_upload_compsig_filename = ilo_upload_filename[\
:ilo_upload_filename.rfind('.')] + ".compsig"
componentpath = item[1]
compsigpath = item[2]
_, filename = os.path.split(componentpath)
if not etag:
etag = "sum" + filename.replace('.', '')
etag = etag.replace('-', '')
etag = etag.replace('_', '')
section_num = item[3]
sessionkey = (sessionkey)
parameters = {'UpdateRepository': options.update_repository, \
'UpdateTarget': options.update_target, \
'ETag': etag, 'Section': section_num}
data = [('sessionKey', sessionkey), ('parameters', json.dumps(parameters))]
if not compsigpath:
compsigpath = self.findcompsig(componentpath)
if compsigpath:
with open(compsigpath, 'rb') as fle:
output = fle.read()
data.append(('compsig', (ilo_upload_compsig_filename, output, \
'application/octet-stream')))
output = None
with open(componentpath, 'rb') as fle:
output = fle.read()
data.append(('file', (ilo_upload_filename, output, 'application/octet-stream')))
res = self._rdmc.app.post_handler(str(urltosend), data, response=True, \
headers={'Cookie': 'sessionKey=' + sessionkey})
if res.status != 200:
return ReturnCodes.FAILED_TO_UPLOAD_COMPONENT
else:
sys.stdout.write("Component " + filename + \
" uploaded successfully\n")
if not self.wait_for_state_change():
# Failed to upload the component.
raise UploadError("Error while processing the component.")
return ReturnCodes.SUCCESS
def wait_for_state_change(self, wait_time=420):
""" Wait for the iLO UpdateService to a move to terminal state.
:param options: command line options
:type options: list.
:param wait_time: time to wait on upload
:type wait_time: int.
"""
total_time = 0
spinner = ['|', '/', '-', '\\']
state = ""
sys.stdout.write("Waiting for iLO UpdateService to finish processing the component\n")
while total_time < wait_time:
state, _ = self.get_update_service_state()
if state == "ERROR":
return False
elif state != "COMPLETED" and state != "IDLE" and state != "COMPLETE":
# Lets try again after 8 seconds
count = 0
# fancy spinner
while count <= 32:
sys.stdout.write('Updating: %s\r' % spinner[count % 4])
time.sleep(0.25)
count += 1
total_time += 8
else:
break
if total_time > wait_time:
raise TimeOutError("UpdateService in " + state + " state for " + str(wait_time) + "s")
return True
def get_update_service_state(self):
""" Get the current UpdateService state
:param options: command line options
:type options: list.
"""
path = "/redfish/v1/UpdateService"
results = self._rdmc.app.get_handler(path, service=True, silent=True)
if results and results.status == 200 and results.dict:
output = results.dict
if self._rdmc.opts.verbose:
sys.stdout.write("UpdateService state = " + \
(output['Oem']['Hpe']['State']).upper() + "\n")
return (output['Oem']['Hpe']['State']).upper(), results.dict
else:
return 'UNKNOWN', {}
def findcompsig(self, comppath):
"""Try to find compsig if not included
:param comppath: Path of file to find compsig for.
:type comppath: str.
"""
compsig = ''
cutpath = comppath.split(os.sep)
_file = cutpath[-1]
_file_rev | |
(self == other)
class next_row_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(Cell, Cell.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype70, _size67) = iprot.readListBegin()
for _i71 in xrange(_size67):
_elem72 = Cell()
_elem72.read(iprot)
self.success.append(_elem72)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('next_row_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter73 in self.success:
iter73.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class next_row_as_arrays_args:
"""
Attributes:
- scanner
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'scanner', None, None, ), # 1
)
def __init__(self, scanner=None,):
self.scanner = scanner
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.scanner = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('next_row_as_arrays_args')
if self.scanner != None:
oprot.writeFieldBegin('scanner', TType.I64, 1)
oprot.writeI64(self.scanner)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class next_row_as_arrays_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.LIST,(TType.STRING,None)), None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype77, _size74) = iprot.readListBegin()
for _i78 in xrange(_size74):
_elem79 = []
(_etype83, _size80) = iprot.readListBegin()
for _i84 in xrange(_size80):
_elem85 = iprot.readString();
_elem79.append(_elem85)
iprot.readListEnd()
self.success.append(_elem79)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('next_row_as_arrays_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.LIST, len(self.success))
for iter86 in self.success:
oprot.writeListBegin(TType.STRING, len(iter86))
for iter87 in iter86:
oprot.writeString(iter87)
oprot.writeListEnd()
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class next_row_serialized_args:
"""
Attributes:
- scanner
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'scanner', None, None, ), # 1
)
def __init__(self, scanner=None,):
self.scanner = scanner
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.scanner = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('next_row_serialized_args')
if self.scanner != None:
oprot.writeFieldBegin('scanner', TType.I64, 1)
oprot.writeI64(self.scanner)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class next_row_serialized_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = ClientException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('next_row_serialized_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
if self.e != None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_row_args:
"""
Attributes:
- ns
- table_name
- row
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'ns', None, None, ), # 1
(2, TType.STRING, 'table_name', None, None, ), # 2
(3, TType.STRING, 'row', None, None, ), # 3
)
def __init__(self, ns=None, table_name=None, row=None,):
self.ns = ns
self.table_name = table_name
self.row = row
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.ns = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.table_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_row_args')
if self.ns != None:
oprot.writeFieldBegin('ns', TType.I64, 1)
oprot.writeI64(self.ns)
oprot.writeFieldEnd()
if self.table_name != None:
oprot.writeFieldBegin('table_name', TType.STRING, 2)
oprot.writeString(self.table_name)
oprot.writeFieldEnd()
if self.row != None:
oprot.writeFieldBegin('row', TType.STRING, 3)
oprot.writeString(self.row)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_row_result:
"""
Attributes:
- success
- e
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(Cell, Cell.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'e', (ClientException, ClientException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype91, _size88) = iprot.readListBegin()
for _i92 in xrange(_size88):
_elem93 = Cell()
_elem93.read(iprot)
self.success.append(_elem93)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == | |
import sys
sys.path.append('/home/george2/Raise/ProgramRepair/CodeSeer/projects/src/main/python')
from CodeJam.Y14R5P1.kusano.A import *
def func_423b915d9ce740ba94f31ad435e988c2(D):
a -= 1
B += D[a]
return B
def func_b94e6667fda54c809e409e83d306253b(D):
a -= 1
B += D[a]
return a
def func_01f4c9980c1e402db83c6b2b44957ea7(D, a):
B += D[a]
A -= D[a]
return A
def func_a6da5619e76d4b5b899ae8c98b2a8895(D, a):
B += D[a]
A -= D[a]
return B
def func_48c6027e0d8f49bcb228a602fd904ca2(D):
a -= 1
B += D[a]
A -= D[a]
return B
def func_72f1e170c35643878cee7af17f621942(D):
a -= 1
B += D[a]
A -= D[a]
return A
def func_00e15a6009724540b1e8c65086c6d98d(D):
a -= 1
B += D[a]
A -= D[a]
return a
def func_ca0ad3a583b84900ae17904df72d19ff(D, a):
B -= D[a]
A += D[a]
return B
def func_a73f62a8aaa541148bd6b4a43ca354b3(D, a):
B -= D[a]
A += D[a]
return A
def func_91fea46230c44582b41482c58c57a419(D):
A += D[a]
a += 1
return A
def func_a3989d71f18d4e5c9e40284adbe9f35b(D):
A += D[a]
a += 1
return a
def func_1f157e3e7b0f47e1975c0532cb0e6261(C, B, A):
a += 1
t = max(A, B, C)
return a
def func_226808347d7d4d479eb30ebab4af1533(C, B, A):
a += 1
t = max(A, B, C)
return t
def func_e959cf105f9d4ba49a4e520762b40dc2(D):
B -= D[a]
A += D[a]
a += 1
return B
def func_51cc323a577b4f66b43a43f525231061(D):
B -= D[a]
A += D[a]
a += 1
return A
def func_24493df319fa46949023b48a7293ab0c(D):
B -= D[a]
A += D[a]
a += 1
return a
def func_84115b907e8b45eca7fcbcf566584a30(C, B, D):
A += D[a]
a += 1
t = max(A, B, C)
return t
def func_a871538e9489492a912b0e85bb526a17(C, B, D):
A += D[a]
a += 1
t = max(A, B, C)
return A
def func_d9f777c452154c96aef0c14e1fc564dc(C, B, D):
A += D[a]
a += 1
t = max(A, B, C)
return a
def func_bab03939161f42d098797010f9d79207(C, D):
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
return t
def func_7a1a0ca356414b05ba5071f9a62daaea(C, D):
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
return B
def func_571a239af28d4d5694c5d45bce687b04(C, D):
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
return a
def func_6efed0d2d6cf491aaacbca184552e776(C, D):
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
return A
def func_c2352deb110a442188b2c76a643abc18(D):
b += 1
C -= D[b]
return C
def func_02faa767a67646f6ab5e465012c29197(D):
b += 1
C -= D[b]
return b
def func_8620ab6842af46f4bf11c3744eaf3d25(b, D):
C -= D[b]
B += D[b]
return C
def func_e756efaa56434464a2046204173de711(b, D):
C -= D[b]
B += D[b]
return B
def func_57cbfd0df9a44afca326c0ca7c064031(C, b, D, A):
B += D[b]
p = max(A, B, C)
return B
def func_717ac3ae6ef04ca78327f3d4411a95a0(C, b, D, A):
B += D[b]
p = max(A, B, C)
return p
def func_87d62cd5a7d9423da8e178433d11d0bd(C, b, D):
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
return t
def func_729f4b0abdee45db92697ad60f22de43(C, b, D):
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
return B
def func_224443b73bf94833951150b9c0ac39d6(C, b, D):
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
return A
def func_046e3efa7fa4462f844b092d96dac21a(C, b, D):
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
return a
def func_bb0e4f90a1d24f84a234dc3da71019cf(C, b, D):
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
return p
def func_d57a3651beb14ede861a92d8e43841bd(C, b, D):
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
return B
def func_cd1127704f9f4e1e9955f038cf61d483(C, b, D):
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
return a
def func_0b04e22065404608b3ff4537fcebd37b(C, b, D):
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
return A
def func_e15be5cd479f4d02854035982ffe0a59(C, b, D):
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
return p
def func_b0ffc645b8fb4cd38b1f208ee5ba1bd9(C, b, D):
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
return t
def func_d68e7071a5634c2e9b44bcdd8af173b0(C, b, D):
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
return ans
def func_94b93592fa214040b0ee2e0724c2c401(D):
b += 1
C -= D[b]
B += D[b]
return B
def func_add4a515daa14ce798537566a9dc9cb2(D):
b += 1
C -= D[b]
B += D[b]
return C
def func_4522067fe54a4a62aa1e715ea4165da7(D):
b += 1
C -= D[b]
B += D[b]
return b
def func_093e85d8dfb84c5195be05ad7e63fa96(b, D, A):
C -= D[b]
B += D[b]
p = max(A, B, C)
return p
def func_e5ac5214bc254084b78cd58ab3edc207(b, D, A):
C -= D[b]
B += D[b]
p = max(A, B, C)
return C
def func_91342846479b47fcaa0fccc8e5941249(b, D, A):
C -= D[b]
B += D[b]
p = max(A, B, C)
return B
def func_06aee82aae7a435e833abc86781dd193(C, b, D):
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
return A
def func_b0e69cf895814692be2e6194658be459(C, b, D):
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
return B
def func_4693be6469624743a699ce6802c68bec(C, b, D):
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
return a
def func_a16820b104c24240a6775804f8ea3fb1(C, b, D):
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
return t
def func_b4bb4e9f3b4946b6a152524ced22a653(C, b, D):
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
return p
def func_5045ca50c3b54f33a6342da74145fbff(C, b, D):
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
return p
def func_d565540fc63340caa481d7287cdb79e6(C, b, D):
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
return ans
def func_ead070062a6c4ec2b22cffb9686c5e1a(C, b, D):
p | |
1j * t_values[2]))
+ (R4 / (1 + w * 1j * t_values[3]))
+ (R5 / (1 + w * 1j * t_values[4]))
+ (R6 / (1 + w * 1j * t_values[5]))
+ (R7 / (1 + w * 1j * t_values[6]))
+ (R8 / (1 + w * 1j * t_values[7]))
+ (R9 / (1 + w * 1j * t_values[8]))
+ (R10 / (1 + w * 1j * t_values[9]))
+ (R11 / (1 + w * 1j * t_values[10]))
+ (R12 / (1 + w * 1j * t_values[11]))
+ (R13 / (1 + w * 1j * t_values[12]))
+ (R14 / (1 + w * 1j * t_values[13]))
+ (R15 / (1 + w * 1j * t_values[14]))
+ (R16 / (1 + w * 1j * t_values[15]))
+ (R17 / (1 + w * 1j * t_values[16]))
+ (R18 / (1 + w * 1j * t_values[17]))
+ (R19 / (1 + w * 1j * t_values[18]))
+ (R20 / (1 + w * 1j * t_values[19]))
+ (R21 / (1 + w * 1j * t_values[20]))
+ (R22 / (1 + w * 1j * t_values[21]))
+ (R23 / (1 + w * 1j * t_values[22]))
+ (R24 / (1 + w * 1j * t_values[23]))
+ (R25 / (1 + w * 1j * t_values[24]))
+ (R26 / (1 + w * 1j * t_values[25]))
+ (R27 / (1 + w * 1j * t_values[26]))
+ (R28 / (1 + w * 1j * t_values[27]))
+ (R29 / (1 + w * 1j * t_values[28]))
+ (R30 / (1 + w * 1j * t_values[29]))
+ (R31 / (1 + w * 1j * t_values[30]))
+ (R32 / (1 + w * 1j * t_values[31]))
+ (R33 / (1 + w * 1j * t_values[32]))
+ (R34 / (1 + w * 1j * t_values[33]))
+ (R35 / (1 + w * 1j * t_values[34]))
+ (R36 / (1 + w * 1j * t_values[35]))
+ (R37 / (1 + w * 1j * t_values[36]))
+ (R38 / (1 + w * 1j * t_values[37]))
+ (R39 / (1 + w * 1j * t_values[38]))
+ (R40 / (1 + w * 1j * t_values[39]))
+ (R41 / (1 + w * 1j * t_values[40]))
+ (R42 / (1 + w * 1j * t_values[41]))
+ (R43 / (1 + w * 1j * t_values[42]))
+ (R44 / (1 + w * 1j * t_values[43]))
+ (R45 / (1 + w * 1j * t_values[44]))
+ (R46 / (1 + w * 1j * t_values[45]))
+ (R47 / (1 + w * 1j * t_values[46]))
+ (R48 / (1 + w * 1j * t_values[47]))
+ (R49 / (1 + w * 1j * t_values[48]))
+ (R50 / (1 + w * 1j * t_values[49]))
+ (R51 / (1 + w * 1j * t_values[50]))
+ (R52 / (1 + w * 1j * t_values[51]))
+ (R53 / (1 + w * 1j * t_values[52]))
+ (R54 / (1 + w * 1j * t_values[53]))
)
def KK_RC55_fit(params, w, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
Rs = params["Rs"]
R1 = params["R1"]
R2 = params["R2"]
R3 = params["R3"]
R4 = params["R4"]
R5 = params["R5"]
R6 = params["R6"]
R7 = params["R7"]
R8 = params["R8"]
R9 = params["R9"]
R10 = params["R10"]
R11 = params["R11"]
R12 = params["R12"]
R13 = params["R13"]
R14 = params["R14"]
R15 = params["R15"]
R16 = params["R16"]
R17 = params["R17"]
R18 = params["R18"]
R19 = params["R19"]
R20 = params["R20"]
R21 = params["R21"]
R22 = params["R22"]
R23 = params["R23"]
R24 = params["R24"]
R25 = params["R25"]
R26 = params["R26"]
R27 = params["R27"]
R28 = params["R28"]
R29 = params["R29"]
R30 = params["R30"]
R31 = params["R31"]
R32 = params["R32"]
R33 = params["R33"]
R34 = params["R34"]
R35 = params["R35"]
R36 = params["R36"]
R37 = params["R37"]
R38 = params["R38"]
R39 = params["R39"]
R40 = params["R40"]
R41 = params["R41"]
R42 = params["R42"]
R43 = params["R43"]
R44 = params["R44"]
R45 = params["R45"]
R46 = params["R46"]
R47 = params["R47"]
R48 = params["R48"]
R49 = params["R49"]
R50 = params["R50"]
R51 = params["R51"]
R52 = params["R52"]
R53 = params["R53"]
R54 = params["R54"]
R55 = params["R55"]
return (
Rs
+ (R1 / (1 + w * 1j * t_values[0]))
+ (R2 / (1 + w * 1j * t_values[1]))
+ (R3 / (1 + w * 1j * t_values[2]))
+ (R4 / (1 + w * 1j * t_values[3]))
+ (R5 / (1 + w * 1j * t_values[4]))
+ (R6 / (1 + w * 1j * t_values[5]))
+ (R7 / (1 + w * 1j * t_values[6]))
+ (R8 / (1 + w * 1j * t_values[7]))
+ (R9 / (1 + w * 1j * t_values[8]))
+ (R10 / (1 + w * 1j * t_values[9]))
+ (R11 / (1 + w * 1j * t_values[10]))
+ (R12 / (1 + w * 1j * t_values[11]))
+ (R13 / (1 + w * 1j * t_values[12]))
+ (R14 / (1 + w * 1j * t_values[13]))
+ (R15 / (1 + w * 1j * t_values[14]))
+ (R16 / (1 + w * 1j * t_values[15]))
+ (R17 / (1 + w * 1j * t_values[16]))
+ (R18 / (1 + w * 1j * t_values[17]))
+ (R19 / (1 + w * 1j * t_values[18]))
+ (R20 / (1 + w * 1j * t_values[19]))
+ (R21 / (1 + w * 1j * t_values[20]))
+ (R22 / (1 + w * 1j * t_values[21]))
+ (R23 / (1 + w * 1j * t_values[22]))
+ (R24 / (1 + w * 1j * t_values[23]))
+ (R25 / (1 + w * 1j * t_values[24]))
+ (R26 / (1 + w * 1j * t_values[25]))
+ (R27 / (1 + w * 1j * t_values[26]))
+ (R28 / (1 + w * 1j * t_values[27]))
+ (R29 / (1 + w * 1j * t_values[28]))
+ (R30 / (1 + w * 1j * t_values[29]))
+ (R31 / (1 + w * 1j * t_values[30]))
+ (R32 / (1 + w * 1j * t_values[31]))
+ (R33 / (1 + w * 1j * t_values[32]))
+ (R34 / (1 + w * 1j * t_values[33]))
+ (R35 / (1 + w * 1j * t_values[34]))
+ (R36 / (1 + w * 1j * t_values[35]))
+ (R37 / (1 + w * 1j * t_values[36]))
+ (R38 / (1 + w * 1j * t_values[37]))
+ (R39 / (1 + w * 1j * t_values[38]))
+ (R40 / (1 + w * 1j * t_values[39]))
+ (R41 / (1 + w * 1j * t_values[40]))
+ (R42 / (1 + w * 1j * t_values[41]))
+ (R43 / (1 + w * 1j * t_values[42]))
+ (R44 / (1 + w * 1j * t_values[43]))
+ (R45 / (1 + w * 1j * t_values[44]))
+ (R46 / (1 + w * 1j * t_values[45]))
+ (R47 / (1 + w * 1j * t_values[46]))
+ (R48 / (1 + w * 1j * t_values[47]))
+ (R49 / (1 + w * 1j * t_values[48]))
+ (R50 / (1 + w * 1j * t_values[49]))
+ (R51 / (1 + w * 1j * t_values[50]))
+ (R52 / (1 + w * 1j * t_values[51]))
+ (R53 / (1 + w * 1j * t_values[52]))
+ (R54 / (1 + w * 1j * t_values[53]))
+ (R55 / (1 + w * 1j * t_values[54]))
)
def KK_RC56_fit(params, w, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
Rs = params["Rs"]
R1 = params["R1"]
R2 = params["R2"]
R3 = params["R3"]
R4 = params["R4"]
R5 = params["R5"]
R6 = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.