ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7dfb00544334ecab24d9880b2b932bd8168f9523 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .b2_c_tenant import *
from .get_b2_c_tenant import *
from .get_guest_usage import *
from .guest_usage import *
from ._inputs import *
from . import outputs
# Make subpackages available:
if typing.TYPE_CHECKING:
import pulumi_azure_native.azureactivedirectory.v20190101preview as __v20190101preview
v20190101preview = __v20190101preview
import pulumi_azure_native.azureactivedirectory.v20200501preview as __v20200501preview
v20200501preview = __v20200501preview
else:
v20190101preview = _utilities.lazy_import('pulumi_azure_native.azureactivedirectory.v20190101preview')
v20200501preview = _utilities.lazy_import('pulumi_azure_native.azureactivedirectory.v20200501preview')
|
py | 7dfb009372afca4413394772f4ce5da3bfcc10bd | from random import shuffle
from procgame.dmd import GroupedLayer, TextLayer
from procgame.game import Mode
from crimescenes import CrimeSceneShots
from timer import TimedMode
class UltimateChallenge(Mode):
"""Wizard mode or start of supergame"""
def __init__(self, game, priority):
super(UltimateChallenge, self).__init__(game, priority)
self.fear = Fear(game, self.priority + 1)
self.mortis = Mortis(game, self.priority + 1)
self.death = Death(game, self.priority + 1)
self.fire = Fire(game, self.priority + 1)
self.celebration = Celebration(game, self.priority + 1)
self.mode_list = [self.fear, self.mortis, self.death, self.fire, self.celebration]
for mode in self.mode_list[0:4]:
mode.exit_callback = self.judge_level_ended
self.celebration.exit_callback = self.end_challenge
def mode_started(self):
self.active_mode = self.game.getPlayerState('challenge_mode', 0)
self.game.coils.resetDropTarget.pulse(30)
self.continue_after_drain = False
self.start_level()
def mode_stopped(self):
# when Celebration was awarded, the next challenge starts from the beginning
self.game.setPlayerState('challenge_mode', self.active_mode if self.active_mode < 4 else 0)
self.game.remove_modes([self.mode_list[self.active_mode]])
def start_level(self):
self.game.enable_flippers(True)
if self.game.num_balls_requested() == 0:
# serve one ball in the shooter lane and wait for player to plunge
self.game.base_play.auto_plunge = False
self.game.launch_balls(1)
self.game.modes.add(self.mode_list[self.active_mode])
self.game.update_lamps()
self.game.sound.play_music('mode', loops=-1)
self.mode_list[self.active_mode].ready()
def judge_level_ended(self, success=True):
self.game.ball_save.disable()
self.game.sound.fadeout_music()
# drain intentionally
# delay ball search, hopefully we can drain quietly before ball search triggers
self.game.enable_flippers(False)
self.game.base_play.boring.pause()
self.game.ball_search.reset(None)
# success will start next level, failure will end player's turn
self.continue_after_drain = success
def evt_ball_drained(self):
if self.continue_after_drain:
if self.game.num_balls_requested() == 0:
self.continue_after_drain = False
self.next_level()
# abort the event to ignore this drain
return True
def next_level(self):
# all balls have intentionally drained, move to the next mode
self.game.remove_modes([self.mode_list[self.active_mode]])
self.active_mode += 1 # next mode
self.start_level()
def end_challenge(self):
# go back to regular play after Celebration
self.game.remove_modes([self])
self.game.update_lamps()
self.exit_callback()
def update_lamps(self):
self.game.lamps.ultChallenge.enable()
self.game.disable_drop_lamps()
def sw_popperR_active_for_300ms(self, sw):
self.game.base_play.flash_then_pop('flashersRtRamp', 'popperR', 20)
class ChallengeBase(TimedMode):
"""Base class for all wizard modes"""
def __init__(self, game, priority, initial_time, instructions, num_shots_required, num_balls, ball_save_time):
name = self.__class__.__name__
super(ChallengeBase, self).__init__(game, priority, 0, name, instructions, num_shots_required)
self.initial_time = initial_time
self.num_balls = num_balls
self.ball_save_time = ball_save_time
def mode_started(self):
super(ChallengeBase, self).mode_started()
self.started = False
if self.initial_time > 0:
# display initial time without starting countdown
self.timer_update(self.initial_time)
if self.num_balls > 1:
self.game.addPlayerState('multiball_active', 0x4)
def mode_stopped(self):
super(ChallengeBase, self).mode_stopped()
if self.num_balls > 1:
self.game.addPlayerState('multiball_active', -0x4)
def ready(self):
if self.game.switches.popperR.is_active():
# we were started from regular mode
# put the ball back in play and start the timer if applicable
self.game.base_play.flash_then_pop('flashersRtRamp', 'popperR', 20)
self.start()
else:
# wait for the player to plunge the ball
self.game.sound.play_music('ball_launch', loops=-1)
def start(self):
# the first ball is now in play (popped from popperR or plunged by player)
# launch remaining balls for the mode (if applicable)
self.game.base_play.auto_plunge = True
balls_to_launch = self.num_balls - self.game.num_balls_requested()
if balls_to_launch > 0:
self.game.launch_balls(balls_to_launch)
if self.ball_save_time > 0:
self.game.ball_save_start(time=self.ball_save_time, now=True, allow_multiple_saves=True)
if self.initial_time > 0:
self.start_timer(self.initial_time)
self.started = True
def sw_shooterR_inactive_for_900ms(self, sw):
if not self.started:
self.start()
def sw_leftRampToLock_active(self, sw):
self.game.deadworld.eject_balls(1)
def sw_dropTargetJ_active_for_250ms(self, sw):
self.drop_target_active()
def sw_dropTargetU_active_for_250ms(self, sw):
self.drop_target_active()
def sw_dropTargetD_active_for_250ms(self, sw):
self.drop_target_active()
def sw_dropTargetG_active_for_250ms(self, sw):
self.drop_target_active()
def sw_dropTargetE_active_for_250ms(self, sw):
self.drop_target_active()
def drop_target_active(self):
self.reset_drops()
def reset_drops(self):
self.game.coils.resetDropTarget.pulse(30)
class DarkJudge(ChallengeBase):
"""Base class for dark judge wizard modes"""
def __init__(self, game, priority, initial_time, instructions, num_shots_required, num_balls, ball_save_time):
super(DarkJudge, self).__init__(game, priority, initial_time, instructions, num_shots_required, num_balls, ball_save_time)
self.taunt_sound = self.name.lower() + ' - taunt'
self.text_layer = TextLayer(128/2, 7, self.game.fonts['tiny7'], 'center', opaque=True)
wait_layer = TextLayer(128/2, 20, self.game.fonts['tiny7'], 'center', opaque=False).set_text('Wait, balls draining...')
self.finish_layer = GroupedLayer(128, 32, [self.text_layer, wait_layer])
def mode_stopped(self):
super(DarkJudge, self).mode_stopped()
self.layer = None
def expired(self):
self.finish(success=False)
def taunt(self):
self.game.sound.play_voice(self.taunt_sound)
self.delay(name='taunt', event_type=None, delay=20, handler=self.taunt)
def check_for_completion(self):
if self.num_shots == self.num_shots_required:
self.finish(True)
def finish(self, success):
self.cancel_delayed('taunt')
self.stop_timer()
self.game.update_lamps()
self.text_layer.set_text(self.name + ' Defeated' if success else 'You lose')
self.layer = self.finish_layer
if success:
self.game.score(100000)
self.game.addPlayerState('num_dark_judges', 1)
self.exit_callback(success)
class Fear(DarkJudge):
""" Fear wizard mode
Judge Fear is reigning terror on the city.
Shoot alternating ramps then subway
1 ball with temporary ball save.
Timer is short and resets with every successful shot
"""
def __init__(self, game, priority):
self.time_for_shot = game.user_settings['Gameplay']['Time for Fear shot']
ball_save_time = game.user_settings['Gameplay']['Fear ballsave time']
super(Fear, self).__init__(game, priority, initial_time=self.time_for_shot, instructions='Shoot lit ramps then subway',
num_shots_required=5, num_balls=1, ball_save_time=ball_save_time)
def mode_started(self):
super(Fear, self).mode_started()
self.mystery_lit = True
self.state = 'ramps'
self.active_ramp = 'left'
def update_lamps(self):
schedule = 0x80808080 if self.state != 'finished' else 0
self.game.coils.flasherFear.schedule(schedule=schedule, cycle_seconds=0, now=True)
style = 'on' if self.mystery_lit else 'off'
self.game.drive_lamp('mystery', style)
schedule = 0x00030003 if self.state == 'ramps' and self.active_ramp == 'left' else 0
self.game.coils.flasherPursuitL.schedule(schedule=schedule, cycle_seconds=0, now=True)
schedule = 0x00030003 if self.state == 'ramps' and self.active_ramp == 'right' else 0
self.game.coils.flasherPursuitR.schedule(schedule=schedule, cycle_seconds=0, now=True)
style = 'medium' if self.state == 'subway' and self.game.switches.dropTargetD.is_inactive() else 'off'
self.game.drive_lamp('dropTargetD', style)
style = 'medium' if self.state == 'subway' else 'off'
for lamp in ['pickAPrize', 'awardSafecracker', 'awardBadImpersonator', 'multiballJackpot']:
self.game.drive_lamp(lamp, style)
def sw_mystery_active(self, sw):
if self.state != 'finished':
self.game.sound.play('mystery')
if self.mystery_lit:
self.mystery_lit = False
self.reset_timer(2 * self.time_for_shot)
self.game.update_lamps()
def sw_leftRampExit_active(self, sw):
if self.state == 'ramps' and self.active_ramp == 'left':
self.ramp_shot_hit()
def sw_rightRampExit_active(self, sw):
if self.state == 'ramps' and self.active_ramp == 'right':
self.ramp_shot_hit()
def ramp_shot_hit(self):
if self.num_shots < self.num_shots_required - 1:
self.game.score(10000)
self.incr_num_shots()
self.game.lampctrl.play_show('shot_hit', False, self.game.update_lamps)
if self.num_shots == self.num_shots_required - 1:
self.state = 'subway'
else:
# switch ramp
self.active_ramp = 'right' if self.active_ramp == 'left' else 'left'
self.reset_timer(self.time_for_shot)
self.game.update_lamps()
def sw_dropTargetD_inactive_for_400ms(self, sw):
if self.state == 'subway':
self.game.coils.tripDropTarget.pulse(60)
def sw_dropTargetD_active_for_250ms(self, sw):
if self.state == 'ramps':
self.reset_drops()
else:
self.game.update_lamps()
def reset_drops(self):
if self.state != 'subway':
super(Fear, self).reset_drops()
def sw_subwayEnter1_closed(self, sw):
self.subway_hit()
# Ball might jump over first switch. Use 2nd switch as a catch all.
def sw_subwayEnter2_closed(self, sw):
if self.num_shots < self.num_shots_required:
self.subway_hit()
def subway_hit(self):
if self.state == 'subway':
self.incr_num_shots()
self.game.lampctrl.play_show('shot_hit', False, self.game.update_lamps)
self.game.score(10000)
self.finish(success=True)
def finish(self, success):
self.state = 'finished'
super(Fear, self).finish(success)
class Mortis(DarkJudge):
""" Mortis wizard mode
Judge Mortis is spreading disease throughout the city.
Shoot the lit shots.
2 ball multiball with temporary ball save.
No timer, mode ends when last ball is lost
"""
def __init__(self, game, priority):
ball_save_time = game.user_settings['Gameplay']['Mortis ballsave time']
super(Mortis, self).__init__(game, priority, initial_time=0, instructions='Shoot lit shots',
num_shots_required=5, num_balls=2, ball_save_time=ball_save_time)
self.lamp_names = ['mystery', 'perp1G', 'perp3G', 'perp5G', 'stopMeltdown']
def mode_started(self):
super(Mortis, self).mode_started()
self.targets = [1, 1, 1, 1, 1]
def timer_update(self, time):
# do not show a timer countdown on display
pass
def update_lamps(self):
schedule = 0x80808080 if any(self.targets) else 0
self.game.coils.flasherMortis.schedule(schedule=schedule, cycle_seconds=0, now=True)
for shot in range(0, 5):
lamp_name = self.lamp_names[shot]
style = 'medium' if self.targets[shot] else 'off'
self.game.drive_lamp(lamp_name, style)
def sw_mystery_active(self, sw):
self.switch_hit(0)
def sw_topRightOpto_active(self, sw):
if self.game.switches.leftRollover.time_since_change() < 1:
# ball came around outer left loop
self.switch_hit(1)
def sw_popperR_active_for_300ms(self, sw):
self.switch_hit(2)
def sw_rightRampExit_active(self, sw):
self.switch_hit(3)
def sw_captiveBall2_active(self, sw): # make it easier with captiveBall2 instead of captiveBall3
self.switch_hit(4)
def switch_hit(self, index):
if self.targets[index]:
self.targets[index] = 0
self.incr_num_shots()
self.game.lampctrl.play_show('shot_hit', False, self.game.update_lamps)
self.game.score(10000)
self.check_for_completion()
class Death(DarkJudge, CrimeSceneShots):
""" Death wizard mode
Judge Death is on a murder spree.
Shoot crime scene shots quickly before they relight.
1 ball with temporary ball save.
Timer is long and always counts down.
"""
def __init__(self, game, priority):
initial_time = game.user_settings['Gameplay']['Time for Death']
ball_save_time = game.user_settings['Gameplay']['Death ballsave time']
self.time_for_shot = game.user_settings['Gameplay']['Time for Death shot']
super(Death, self).__init__(game, priority, initial_time=initial_time, instructions='Shoot lit shots quickly',
num_shots_required=5, num_balls=1, ball_save_time=ball_save_time)
self.shot_order = [4, 2, 0, 3, 1] # from easiest to hardest
def mode_started(self):
super(Death, self).mode_started()
self.shot_timer = self.time_for_shot
self.active_shots = [1, 1, 1, 1, 1]
self.reset_drops()
def update_lamps(self):
schedule = 0x80808080 if any(self.active_shots) else 0
self.game.coils.flasherDeath.schedule(schedule=schedule, cycle_seconds=0, now=True)
for shot in range(0, 5):
style = 'off' if self.active_shots[shot] == 0 else 'medium'
self.game.drive_lamp('perp' + str(shot + 1) + 'W', style)
def switch_hit(self, index):
if self.active_shots[index]:
self.active_shots[index] = 0
self.incr_num_shots()
self.game.lampctrl.play_show('shot_hit', False, self.game.update_lamps)
self.game.score(10000)
self.shot_timer = self.time_for_shot
self.check_for_completion()
def decrement_timer(self):
super(Death, self).decrement_timer()
if self.shot_timer > 0:
self.shot_timer -= 1
else:
self.shot_timer = self.time_for_shot
self.add_shot()
def add_shot(self):
for shot in self.shot_order:
if not self.active_shots[shot]:
self.active_shots[shot] = 1
self.decr_num_shots()
break
self.game.update_lamps()
def finish(self, success):
# disable all shots, also disables all shot lamps
self.active_shots = [0, 0, 0, 0, 0]
super(Death, self).finish(success)
class Fire(DarkJudge, CrimeSceneShots):
""" Fire wizard mode
Judge Fire is lighting fires all over Mega City One.
Shoot each crime scene shot twice.
4 ball multiball. No ball save. Possibility to add two more balls.
No timer, mode ends when last ball is lost
"""
def __init__(self, game, priority):
super(Fire, self).__init__(game, priority, initial_time=0, instructions='Shoot lit shots twice',
num_shots_required=10, num_balls=4, ball_save_time=0)
self.lamp_styles = ['off', 'medium', 'fast']
def mode_started(self):
super(Fire, self).mode_started()
self.mystery_lit = True
self.shots_required = [2, 2, 2, 2, 2]
def timer_update(self, time):
# do not show a timer countdown on display
pass
def update_lamps(self):
self.game.enable_gi(False)
schedule = 0x80808080 if self.num_shots < self.num_shots_required else 0
self.game.coils.flasherFire.schedule(schedule=schedule, cycle_seconds=0, now=True)
for shot in range(0, 5):
lamp_name = 'perp' + str(shot + 1) + 'R'
style = self.lamp_styles[self.shots_required[shot]]
self.game.drive_lamp(lamp_name, style)
def sw_mystery_active(self, sw):
self.game.sound.play('mystery')
if self.mystery_lit:
self.mystery_lit = False
self.game.set_status('ADD 2 BALLS')
self.game.launch_balls(2)
self.game.update_lamps()
def switch_hit(self, index):
if self.shots_required[index] > 0:
self.shots_required[index] -= 1
self.incr_num_shots()
self.game.lampctrl.play_show('shot_hit', False, self.game.update_lamps)
self.game.score(10000)
self.check_for_completion()
def finish(self, success):
self.mystery_lit = False
super(Fire, self).finish(success)
class Celebration(ChallengeBase, CrimeSceneShots):
""" Final multiball wizard mode after all dark judges have been defeated
All shots score.
6 ball multiball with temporary ball save.
No timer, mode ends when a single ball remains.
"""
def __init__(self, game, priority):
ball_save_time = game.user_settings['Gameplay']['Celebration ballsave time']
super(Celebration, self).__init__(game, priority, initial_time=0, instructions='All shots score',
num_shots_required=0, num_balls=6, ball_save_time=ball_save_time)
def mode_started(self):
super(Celebration, self).mode_started()
# This player reached the end of supergame, his next ball is regular play
# do this early now in case the game tilts
self.game.setPlayerState('supergame', False)
def update_lamps(self):
# rotate 0xFFFF0000 pattern to all 32 bit positions
lamp_schedules = [(0xFFFF0000 >> d)|(0xFFFF0000 << (32 - d)) & 0xFFFFFFFF for d in range (0, 32)]
shuffle(lamp_schedules)
i = 0
for lamp in self.game.lamps:
if lamp.name not in ['gi01', 'gi02', 'gi03', 'gi04', 'gi05', 'startButton', 'buyIn', 'drainShield', 'superGame', 'judgeAgain']:
lamp.schedule(schedule=lamp_schedules[i%32], cycle_seconds=0, now=False)
i += 1
# By default, Ultimate Challenge modes ignore evt_ball_drained,
# so BasePlay.drain_callback() will end the mode when the number of balls reaches 0
# That's how multiball Challenge modes continue to run on a single ball.
# (RegularPlay.evt_ball_drained() ends multiball modes on the last ball
# but remember RegularPlay does not run when UltimateChallenge is running.)
# Celebration is the only multiball Challenge mode that ends on the last ball in play
# therefore it has to trap evt_ball_drained and implement that behavior itself.
def evt_ball_drained(self):
# The trough does not expect a multiball to start from 0 balls and gets confused,
# It calls the end multiball callback when launching the first ball
# thinking we got down to 1 ball when in fact we are going up to 6 balls.
# The work-around is to implement the end multiball callback ourselves
if (self.started and self.game.num_balls_requested() == 1):
# down to just one ball, revert to regular play
self.exit_callback()
# else celebration continues until we are really down to the last ball
# shots 0 to 4 are crime scene shots
def evt_shooterL_active_500ms(self):
self.switch_hit(5)
def sw_mystery_active(self, sw):
self.switch_hit(6)
def sw_leftScorePost_active(self, sw):
self.switch_hit(7)
def sw_leftRampExit_active(self, sw):
self.switch_hit(8)
def drop_target_active(self):
self.switch_hit(9)
if (self.game.switches.dropTargetJ.is_active() and
self.game.switches.dropTargetU.is_active() and
self.game.switches.dropTargetD.is_active() and
self.game.switches.dropTargetG.is_active() and
self.game.switches.dropTargetE.is_active()):
self.reset_drops()
def sw_subwayEnter2_closed(self, sw):
self.switch_hit(10)
def sw_centerRampExit_active(self, sw):
self.switch_hit(11)
def sw_rightTopPost_active(self, sw):
self.switch_hit(12)
def sw_threeBankTargets_active(self, sw):
self.switch_hit(13)
def sw_captiveBall1_active(self, sw):
self.switch_hit(14)
def sw_captiveBall2_active(self, sw):
self.switch_hit(15)
def sw_captiveBall3_active(self, sw):
self.switch_hit(16)
def switch_hit(self, index):
self.game.score(10000)
self.incr_num_shots()
def update_status(self):
self.status_layer.set_text('Shots made: ' + str(self.num_shots))
|
py | 7dfb00f963a01dacebacbf80575005702b7a94f8 | __author__ = 'alexander'
from lingvodoc.views.v2.sociolinguistics import check_socio
from lingvodoc.exceptions import CommonException
from lingvodoc.models import (
Client,
DBSession,
User,
UserBlobs,
ObjectTOC
)
from lingvodoc.utils.creation import create_object
from lingvodoc.utils.verification import check_client_id
from lingvodoc.views.v2.utils import (
get_user_by_client_id,
)
from lingvodoc.scripts.convert_rules import rules
from pyramid.httpexceptions import (
HTTPBadRequest,
HTTPConflict,
HTTPFound,
HTTPInternalServerError,
HTTPNotFound,
HTTPOk,
HTTPForbidden,
HTTPUnauthorized
)
from pyramid.renderers import render_to_response
from pyramid.response import Response
from pyramid.security import authenticated_userid
from pyramid.view import view_config
from sqlalchemy.exc import IntegrityError
import logging
import os
import random
import string
import sys
import time
log = logging.getLogger(__name__)
@view_config(route_name='convert', renderer='json', request_method='POST')
def convert(request): # TODO: test when convert in blobs will be needed
import requests
try:
variables = {'auth': request.authenticated_userid}
req = request.json_body
client = DBSession.query(Client).filter_by(id=variables['auth']).first()
if not client:
raise KeyError("Invalid client id (not registered on server). Try to logout and then login.")
user = DBSession.query(User).filter_by(id=client.user_id).first()
if not user:
raise CommonException("This client id is orphaned. Try to logout and then login once more.")
out_type = req['out_type']
client_id = req['client_id']
object_id = req['object_id']
blob = DBSession.query(UserBlobs).filter_by(client_id=client_id, object_id=object_id).first()
if not blob:
raise KeyError("No such file")
r = requests.get(blob.content)
if not r:
raise CommonException("Cannot access file")
content = r.content
try:
n = 10
filename = time.ctime() + ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits)
for c in range(n))
extension = os.path.splitext(blob.content)[1]
f = open(filename + extension, 'wb')
except Exception as e:
request.response.status = HTTPInternalServerError.code
return {'error': str(e)}
try:
f.write(content)
f.close()
data_type = blob.data_type
for rule in rules:
if data_type == rule.in_type and out_type == rule.out_type:
if extension in rule.in_extensions:
if os.path.getsize(filename) / 1024 / 1024.0 < rule.max_in_size:
content = rule.convert(filename, req.get('config'), rule.converter_config)
if sys.getsizeof(content) / 1024 / 1024.0 < rule.max_out_size:
request.response.status = HTTPOk.code
return {'content': content}
raise KeyError("Cannot access file")
except Exception as e:
request.response.status = HTTPInternalServerError.code
return {'error': str(e)}
finally:
os.remove(filename)
pass
except KeyError as e:
request.response.status = HTTPBadRequest.code
return {'error': str(e)}
except IntegrityError as e:
request.response.status = HTTPInternalServerError.code
return {'error': str(e)}
except CommonException as e:
request.response.status = HTTPConflict.code
return {'error': str(e)}
@view_config(route_name='upload_user_blob', renderer='json', request_method='POST')
def upload_user_blob(request): # TODO: remove blob Object
variables = {'auth': authenticated_userid(request)}
response = dict()
filename = request.POST['blob'].filename
input_file = request.POST['blob'].file
class Object(object):
pass
client_id = variables['auth']
client = DBSession.query(Client).filter_by(id=variables['auth']).first()
if not client:
raise KeyError("Invalid client id (not registered on server). Try to logout and then login.")
user = DBSession.query(User).filter_by(id=client.user_id).first()
if not user:
raise CommonException("This client id is orphaned. Try to logout and then login once more.")
if request.POST.get('client_id', None):
if check_client_id(authenticated=variables['auth'], client_id=request.POST['client_id']) or user.id == 1:
client_id = request.POST['client_id']
else:
request.response.status_code = HTTPBadRequest
return {'error': 'client_id from another user'}
blob = Object()
blob.client_id = client_id
client = DBSession.query(Client).filter_by(id=variables['auth']).first()
blob.data_type = request.POST['data_type']
blob.filename = filename
current_user = DBSession.query(User).filter_by(id=client.user_id).first()
object_id = request.POST.get('object_id', None)
blob_object = UserBlobs(object_id=object_id,
client_id=blob.client_id,
name=filename,
data_type=blob.data_type,
user_id=current_user.id)
current_user.userblobs.append(blob_object)
blob_object.real_storage_path, blob_object.content = create_object(request, input_file, blob_object, blob.data_type,
blob.filename, json_input=False)
if blob.data_type == "sociolinguistics":
try:
check_socio(blob_object.real_storage_path)
except Exception as e:
request.response.status = HTTPBadRequest.code
response = {"error": str(e)}
return response
DBSession.add(blob_object)
DBSession.flush()
request.response.status = HTTPOk.code
response = {"client_id": blob_object.client_id, "object_id": blob_object.object_id, "content": blob_object.content}
return response
@view_config(route_name='get_user_blob', renderer='json', request_method='GET')
def get_user_blob(request): # TODO: test
variables = {'auth': authenticated_userid(request)}
response = dict()
client_id = request.matchdict.get('client_id')
object_id = request.matchdict.get('object_id')
blob = DBSession.query(UserBlobs).filter_by(client_id=client_id, object_id=object_id).first()
if blob:
response = {'name': blob.name, 'content': blob.content, 'data_type': blob.data_type,
'client_id': blob.client_id, 'object_id': blob.object_id, 'created_at': blob.created_at}
request.response.status = HTTPOk.code
return response
request.response.status = HTTPNotFound.code
return {'error': 'No such blob in the system'}
@view_config(route_name='delete_user_blob', renderer='json', request_method='DELETE')
def delete_user_blob(request):
user = get_user_by_client_id(authenticated_userid(request))
if user is None:
request.response.status = HTTPUnauthorized.code
return {'error': "Guests can not delete resources."}
client_id = request.matchdict.get('client_id')
object_id = request.matchdict.get('object_id')
if user != get_user_by_client_id(client_id):
request.response.status = HTTPForbidden.code
return {'error': "That file doesn't belong to you."}
blob = DBSession.query(UserBlobs).filter_by(client_id=client_id, object_id=object_id).first()
if not blob:
request.response.status = HTTPNotFound.code
return {'error': 'No such blob in the system'}
filelocation = blob.real_storage_path
DBSession.delete(blob)
objecttoc = DBSession.query(ObjectTOC).filter_by(client_id=blob.client_id,
object_id=blob.object_id).one()
DBSession.delete(objecttoc)
request.response.status = HTTPOk.code
try:
os.unlink(filelocation)
except:
# NOTE: intentionally not an error
return {"warning": "File can not be deleted physically; deleting from DMBS only."}
return
@view_config(route_name='list_user_blobs', renderer='json', request_method='GET')
def list_user_blobs(request): # TODO: test
variables = {'auth': authenticated_userid(request)}
allowed_global_types = ["sociolinguistics"]
client = DBSession.query(Client).filter_by(id=variables['auth']).first()
data_type = request.params.get('data_type')
is_global = request.params.get('is_global')
if data_type:
if not is_global:
user_blobs = DBSession.query(UserBlobs).filter_by(user_id=client.user_id, data_type=data_type).all()
else:
if data_type in allowed_global_types:
user_blobs = DBSession.query(UserBlobs).filter_by(data_type=data_type).all()
else:
request.response.status = HTTPForbidden.code
return {"error": "You can not list that data type globally."}
else:
user_blobs = DBSession.query(UserBlobs).filter_by(user_id=client.user_id).all()
request.response.status = HTTPOk.code
response = [{'name': blob.name, 'content': blob.content, 'data_type': blob.data_type,
'client_id': blob.client_id, 'object_id': blob.object_id, 'created_at': blob.created_at}
for blob in user_blobs]
return response
@view_config(route_name='blob_upload', renderer='templates/user_upload.pt', request_method='GET')
def blob_upload_get(request):
client_id = authenticated_userid(request)
user = get_user_by_client_id(client_id)
if user is None:
response = Response()
return HTTPFound(location=request.route_url('login'), headers=response.headers)
dictionary_client_id = request.matchdict.get('dictionary_client_id')
dictionary_object_id = request.matchdict.get('dictionary_object_id')
perspective_client_id = request.matchdict.get('perspective_client_id')
perspective_object_id = request.matchdict.get('perspective_object_id')
variables = {'client_id': client_id, 'user': user, 'dictionary_client_id': dictionary_client_id,
'dictionary_object_id': dictionary_object_id, 'perspective_client_id': perspective_client_id,
'perspective_object_id': perspective_object_id}
return render_to_response('templates/user_upload.pt', variables, request=request) |
py | 7dfb02412621dce34fdd0e4f94c6ad393bcdcd69 | #! /usr/bin/env python
from socket import *
host = 'localhost'
port = 10000
sock = socket(AF_INET,SOCK_DGRAM)
sock.bind((host,port))
while 1:
data = sock.recvfrom(1024)
print data
sock.close()
|
py | 7dfb026ffc48ec08f116ca7efac2f53c789b1516 | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6320, generator: {generator})
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ClustersOperations:
"""ClustersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~avs_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
private_cloud_name: str,
**kwargs
) -> AsyncIterable["_models.ClusterList"]:
"""List clusters in a private cloud.
List clusters in a private cloud.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param private_cloud_name: Name of the private cloud.
:type private_cloud_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ClusterList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~avs_client.models.ClusterList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'privateCloudName': self._serialize.url("private_cloud_name", private_cloud_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ClusterList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/clusters'} # type: ignore
async def get(
self,
resource_group_name: str,
private_cloud_name: str,
cluster_name: str,
**kwargs
) -> "_models.Cluster":
"""Get a cluster by name in a private cloud.
Get a cluster by name in a private cloud.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param private_cloud_name: Name of the private cloud.
:type private_cloud_name: str
:param cluster_name: Name of the cluster in the private cloud.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Cluster, or the result of cls(response)
:rtype: ~avs_client.models.Cluster
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Cluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'privateCloudName': self._serialize.url("private_cloud_name", private_cloud_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Cluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/clusters/{clusterName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
private_cloud_name: str,
cluster_name: str,
cluster: "_models.Cluster",
**kwargs
) -> "_models.Cluster":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Cluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'privateCloudName': self._serialize.url("private_cloud_name", private_cloud_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(cluster, 'Cluster')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Cluster', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Cluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/clusters/{clusterName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
private_cloud_name: str,
cluster_name: str,
cluster: "_models.Cluster",
**kwargs
) -> AsyncLROPoller["_models.Cluster"]:
"""Create or update a cluster in a private cloud.
Create or update a cluster in a private cloud.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param private_cloud_name: The name of the private cloud.
:type private_cloud_name: str
:param cluster_name: Name of the cluster in the private cloud.
:type cluster_name: str
:param cluster: A cluster in the private cloud.
:type cluster: ~avs_client.models.Cluster
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Cluster or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~avs_client.models.Cluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Cluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
private_cloud_name=private_cloud_name,
cluster_name=cluster_name,
cluster=cluster,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Cluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'privateCloudName': self._serialize.url("private_cloud_name", private_cloud_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/clusters/{clusterName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
private_cloud_name: str,
cluster_name: str,
cluster_update: "_models.ClusterUpdate",
**kwargs
) -> "_models.Cluster":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Cluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'privateCloudName': self._serialize.url("private_cloud_name", private_cloud_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(cluster_update, 'ClusterUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Cluster', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Cluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/clusters/{clusterName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
private_cloud_name: str,
cluster_name: str,
cluster_update: "_models.ClusterUpdate",
**kwargs
) -> AsyncLROPoller["_models.Cluster"]:
"""Update a cluster in a private cloud.
Update a cluster in a private cloud.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param private_cloud_name: Name of the private cloud.
:type private_cloud_name: str
:param cluster_name: Name of the cluster in the private cloud.
:type cluster_name: str
:param cluster_update: The cluster properties to be updated.
:type cluster_update: ~avs_client.models.ClusterUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Cluster or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~avs_client.models.Cluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Cluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
private_cloud_name=private_cloud_name,
cluster_name=cluster_name,
cluster_update=cluster_update,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Cluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'privateCloudName': self._serialize.url("private_cloud_name", private_cloud_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/clusters/{clusterName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
private_cloud_name: str,
cluster_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-01-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'privateCloudName': self._serialize.url("private_cloud_name", private_cloud_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/clusters/{clusterName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
private_cloud_name: str,
cluster_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Delete a cluster in a private cloud.
Delete a cluster in a private cloud.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param private_cloud_name: Name of the private cloud.
:type private_cloud_name: str
:param cluster_name: Name of the cluster in the private cloud.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
private_cloud_name=private_cloud_name,
cluster_name=cluster_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'privateCloudName': self._serialize.url("private_cloud_name", private_cloud_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AVS/privateClouds/{privateCloudName}/clusters/{clusterName}'} # type: ignore
|
py | 7dfb027dd11e26eaf08ab0f30923c60c1602c9f2 | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .upload_resource import UploadResource
from .download_resource import DownloadResource
from .thumbnail_resource import ThumbnailResource
from .identicon_resource import IdenticonResource
from .filepath import MediaFilePaths
from twisted.web.resource import Resource
import logging
logger = logging.getLogger(__name__)
class MediaRepositoryResource(Resource):
"""File uploading and downloading.
Uploads are POSTed to a resource which returns a token which is used to GET
the download::
=> POST /_matrix/media/v1/upload HTTP/1.1
Content-Type: <media-type>
Content-Length: <content-length>
<media>
<= HTTP/1.1 200 OK
Content-Type: application/json
{ "content_uri": "mxc://<server-name>/<media-id>" }
=> GET /_matrix/media/v1/download/<server-name>/<media-id> HTTP/1.1
<= HTTP/1.1 200 OK
Content-Type: <media-type>
Content-Disposition: attachment;filename=<upload-filename>
<media>
Clients can get thumbnails by supplying a desired width and height and
thumbnailing method::
=> GET /_matrix/media/v1/thumbnail/<server_name>
/<media-id>?width=<w>&height=<h>&method=<m> HTTP/1.1
<= HTTP/1.1 200 OK
Content-Type: image/jpeg or image/png
<thumbnail>
The thumbnail methods are "crop" and "scale". "scale" trys to return an
image where either the width or the height is smaller than the requested
size. The client should then scale and letterbox the image if it needs to
fit within a given rectangle. "crop" trys to return an image where the
width and height are close to the requested size and the aspect matches
the requested size. The client should scale the image if it needs to fit
within a given rectangle.
"""
def __init__(self, hs):
Resource.__init__(self)
filepaths = MediaFilePaths(hs.config.media_store_path)
self.putChild("upload", UploadResource(hs, filepaths))
self.putChild("download", DownloadResource(hs, filepaths))
self.putChild("thumbnail", ThumbnailResource(hs, filepaths))
self.putChild("identicon", IdenticonResource())
|
py | 7dfb02cc5e42d353d2f9b553bcd4e143beed53cf | #!/usr/bin/env python2.4
#
# trie_tests.py
""" Script for running trie tests. """
import sys
import os
import os.path
import tempfile
import subprocess
import time
trie = "./trie"
srcdir = os.getenv("SRCDIR", "")
topsrcdir = os.getenv("TOPSRCDIR", "")
sys.path.append(os.path.join(topsrcdir, "testsuite"))
try:
from lydia_tests import *
except ImportError:
print "You must set the TOPSRCDIR environment variable"
print "to the location of your Lydia source tree"
sys.exit(1)
def create_default_input(test):
tmp_filename = tempfile.mktemp('-tmp.input',
'trie-test-%s-' % os.path.basename(test.model),
'/tmp/')
tmp_file = open(tmp_filename, 'w')
tmp_file.writelines(["fm\n", "quit\n"])
tmp_file.close()
return tmp_filename
def run_trie_command(test):
temporary_infile = None
try:
infile = open(test.input)
except AttributeError:
temporary_infile = create_default_input(test)
infile = open(temporary_infile)
starttime = time.time()
pipe = subprocess.Popen([trie, test.model, test.observation],
stdin=infile,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, errors = pipe.communicate()
stoptime = time.time()
deltatime = stoptime - starttime
result = pipe.returncode
if result < 0:
raise ToolCrashError(output+errors)
infile.close()
try:
os.remove(temporary_infile)
except TypeError:
pass
errors = errors.replace(test.model, os.path.basename(test.model))
return (result,
output.splitlines(True),
errors.splitlines(True),
deltatime)
if __name__ == "__main__":
run_tests(srcdir, run_trie_command)
|
py | 7dfb03031413f91bdab02fc441e34a8acb4d6309 | # coding:utf-8
import random
import string
from flanker.addresslib import address
from mock import patch
from nose.tools import assert_equal, assert_not_equal
from nose.tools import nottest
from ... import skip_if_asked
DOMAIN = '@gmail.com'
SAMPLE_MX = 'sample.gmail-smtp-in.l.google.com'
ATOM_STR = string.ascii_letters + string.digits + '!#$%&\'*+-/=?^_`{|}~'
@nottest
def mock_exchanger_lookup(arg, metrics=False):
mtimes = {'mx_lookup': 0, 'dns_lookup': 0, 'mx_conn': 0}
return (SAMPLE_MX, mtimes)
def test_exchanger_lookup():
'''
Test if exchanger lookup is occuring correctly. If this simple test
fails that means custom grammar was hit. Then the rest of the tests
can be mocked. Should always be run during deployment, can be skipped
during development.
'''
skip_if_asked()
# very simple test that should fail Gmail custom grammar
addr_string = '!mailgun' + DOMAIN
addr = address.validate_address(addr_string)
assert_equal(addr, None)
def test_gmail_pass():
with patch.object(address, 'mail_exchanger_lookup') as mock_method:
mock_method.side_effect = mock_exchanger_lookup
# valid length range
for i in range(6, 31):
localpart = ''.join(random.choice(string.ascii_letters) for x in range(i))
addr = address.validate_address(localpart + DOMAIN)
assert_not_equal(addr, None)
# start must be letter or num
for i in string.ascii_letters + string.digits:
localpart = str(i) + 'aaaaa'
addr = address.validate_address(localpart + DOMAIN)
assert_not_equal(addr, None)
# end must be letter or number
for i in string.ascii_letters + string.digits:
localpart = 'aaaaa' + str(i)
addr = address.validate_address(localpart + DOMAIN)
assert_not_equal(addr, None)
# must be letter, num, or dots
for i in string.ascii_letters + string.digits + '.':
localpart = 'aaa' + str(i) + '000'
addr = address.validate_address(localpart + DOMAIN)
assert_not_equal(addr, None)
# non-consecutive dots (.) within an address are legal
for localpart in ['a.aaaaa', 'aa.aaaa', 'aaa.aaa','aa.aa.aa']:
addr = address.validate_address(localpart + DOMAIN)
assert_not_equal(addr, None)
# everything after plus (+) is ignored
for localpart in ['aaaaaa+', 'aaaaaa+tag', 'aaaaaa+tag+tag','aaaaaa++tag', 'aaaaaa+' + ATOM_STR]:
addr = address.validate_address(localpart + DOMAIN)
assert_not_equal(addr, None)
def test_gmail_fail():
with patch.object(address, 'mail_exchanger_lookup') as mock_method:
mock_method.side_effect = mock_exchanger_lookup
# invalid length range
for i in range(0, 6) + range(31, 40):
localpart = ''.join(random.choice(string.ascii_letters) for x in range(i))
addr = address.validate_address(localpart + DOMAIN)
assert_equal(addr, None)
# invalid start char (must start with letter)
for i in string.punctuation:
localpart = str(i) + 'aaaaa'
addr = address.validate_address(localpart + DOMAIN)
assert_equal(addr, None)
# invalid end char (must end with letter or digit)
for i in string.punctuation:
localpart = 'aaaaa' + str(i)
addr = address.validate_address(localpart + DOMAIN)
assert_equal(addr, None)
# invalid chars (must be letter, num, or dot)
invalid_chars = string.punctuation
invalid_chars = invalid_chars.replace('.', '')
for i in invalid_chars:
localpart = 'aaa' + str(i) + '000'
addr = address.validate_address(localpart + DOMAIN)
assert_equal(addr, None)
# invalid consecutive dots (.)
for localpart in ['aaaaaa......', '......aaaaaa', 'aaa......aaa','aa...aa...aa']:
addr = address.validate_address(localpart + DOMAIN)
assert_equal(addr, None)
# everything after plus (+) is ignored
for localpart in ['+t1', 'a+t1', 'aa+', 'aaa+t1', 'aaaa+t1+t2','aaaaa++t1']:
addr = address.validate_address(localpart + DOMAIN)
assert_equal(addr, None)
|
py | 7dfb03fe443d4990010441954705f526b0384c63 | from pytorch.losses.imports import *
from system.imports import *
@accepts(dict, post_trace=True)
@TraceFunction(trace_args=False, trace_rv=False)
def load_loss(system_dict):
name = system_dict["local"]["criterion"];
if(name == "softmaxcrossentropy"):
system_dict["local"]["criterion"] = torch.nn.CrossEntropyLoss(
weight=system_dict["hyper-parameters"]["loss"]["params"]["weight"],
size_average=system_dict["hyper-parameters"]["loss"]["params"]["size_average"],
ignore_index=system_dict["hyper-parameters"]["loss"]["params"]["ignore_index"],
reduce=system_dict["hyper-parameters"]["loss"]["params"]["reduce"],
reduction=system_dict["hyper-parameters"]["loss"]["params"]["reduction"]);
elif(name == "nll"):
system_dict["local"]["criterion"] = torch.nn.NLLLoss(
weight=system_dict["hyper-parameters"]["loss"]["params"]["weight"],
size_average=system_dict["hyper-parameters"]["loss"]["params"]["size_average"],
ignore_index=system_dict["hyper-parameters"]["loss"]["params"]["ignore_index"],
reduce=system_dict["hyper-parameters"]["loss"]["params"]["reduce"],
reduction=system_dict["hyper-parameters"]["loss"]["params"]["reduction"]);
elif(name == "poissonnll"):
system_dict["local"]["criterion"] = torch.nn.PoissonNLLLoss(
log_input=system_dict["hyper-parameters"]["loss"]["params"]["log_input"],
full=system_dict["hyper-parameters"]["loss"]["params"]["log_input"],
size_average=system_dict["hyper-parameters"]["loss"]["params"]["log_input"],
eps=system_dict["hyper-parameters"]["loss"]["params"]["log_input"],
reduce=system_dict["hyper-parameters"]["loss"]["params"]["reduce"],
reduction=system_dict["hyper-parameters"]["loss"]["params"]["reduction"]);
elif(name == "binarycrossentropy"):
system_dict["local"]["criterion"] = torch.nn.BCELoss(
weight=system_dict["hyper-parameters"]["loss"]["params"]["weight"],
size_average=system_dict["hyper-parameters"]["loss"]["params"]["size_average"],
reduce=system_dict["hyper-parameters"]["loss"]["params"]["reduce"],
reduction=system_dict["hyper-parameters"]["loss"]["params"]["reduction"]);
elif(name == "binarycrossentropywithlogits"):
system_dict["local"]["criterion"] = torch.nn.BCEWithLogitsLoss(
weight=system_dict["hyper-parameters"]["loss"]["params"]["weight"],
size_average=system_dict["hyper-parameters"]["loss"]["params"]["size_average"],
reduce=system_dict["hyper-parameters"]["loss"]["params"]["reduce"],
reduction=system_dict["hyper-parameters"]["loss"]["params"]["reduction"],
pos_weight=system_dict["hyper-parameters"]["loss"]["params"]["pos_weight"]);
return system_dict; |
py | 7dfb04995fa8193e04aba6d5e509b9334ffa5a4d | def f():
"""
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
""" |
py | 7dfb0533492fead9c6039ea6191742c9edae8a5e | """
Copyright (C) 2019 Authors of gHHC
This file is part of "hyperbolic_hierarchical_clustering"
http://github.com/nmonath/hyperbolic_hierarchical_clustering
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import tensorflow as tf
from absl import logging
from ghhc.model.ghhc import squared_euclidean_cdist
from sklearn.cluster import AgglomerativeClustering
tf.enable_eager_execution()
def euc_dist_batched(x_i, nodes, batch_size=1000):
"""Batched cdist operation."""
dists = np.zeros((x_i.shape[0], nodes.shape[0]), np.float32)
for i in range(0, x_i.shape[0], batch_size):
logging.log_every_n_seconds(logging.INFO, 'euc_dist_batched processed %s of %s', 5, i, x_i.shape[0])
for j in range(0, nodes.shape[0], batch_size):
logging.log_every_n_seconds(logging.INFO, 'euc_dist_batched processed %s of %s', 5, j, nodes.shape[0])
dists[i:(i + batch_size), j:(j + batch_size)] = squared_euclidean_cdist(
x_i[i:(i + batch_size), :], nodes[j:(j + batch_size), :]).numpy()
return dists
def afkmc2(data, k, m=20):
"""Implementation of Fast and Provably Good Seedings for k-Means https://las.inf.ethz.ch/files/bachem16fast.pdf """
n = data.shape[0]
c1 = np.random.randint(data.shape[0])
c1_vec = np.expand_dims(data[c1], 0)
q_nom = np.squeeze(euc_dist_batched(c1_vec, data))
q_denom = np.sum(q_nom)
q = 0.5 * q_nom / q_denom + 1.0 / (2.0 * n)
indices = np.arange(n)
c_i_minus_1 = np.zeros((k, data.shape[1]), dtype=np.float32)
c_i_minus_1[0, :] = c1_vec
for i in range(1, k):
logging.log_every_n_seconds(logging.INFO, 'afkmc2 processed %s of %s', 5, i, k)
x_ind = np.random.choice(indices, p=q)
x = np.expand_dims(data[x_ind], 0)
d_x = np.min(np.squeeze(euc_dist_batched(x, c_i_minus_1[:i])))
for j in range(1, m):
y_ind = np.random.choice(indices, p=q)
y = np.expand_dims(data[y_ind], 0)
d_y = np.min(np.squeeze(euc_dist_batched(y, c_i_minus_1[:i])))
if ((d_y * q[x_ind]) / (d_x * q[y_ind])) > np.random.rand():
x = y
d_x = d_y
c_i_minus_1[i] = x
return c_i_minus_1
def init_from_rand_and_hac(data, k, scale):
"""Pick random points for leaves, find internals with HAC heuristic."""
centers = random_pts(data, int(k / 2), 1.0)
hac_pts = init_from_hac(centers, centers.shape[0] - 1)
res = np.zeros((k, data.shape[1]), dtype=np.float32)
assert k % 2 == 0
res[0] += scale * data[np.random.randint(data.shape[0])]
res[1:centers.shape[0] + 1, :] = centers
res[centers.shape[0] + 1:, :] = hac_pts
res = tf.clip_by_norm(scale * res, 0.80, axes=[1]).numpy()
return res
def init_from_afkmc2_and_hac(data, k):
"""Pick leaves using afkmc2, find internals with HAC heuristic"""
centers = afkmc2(data, int(k / 2))
hac_pts = init_from_hac(centers, centers.shape[0] - 1)
res = np.zeros((k, data.shape[1]), dtype=np.float32)
assert k % 2 == 0
res[0] += 0.65 * data[np.random.randint(data.shape[0])]
res[1:centers.shape[0] + 1, :] = centers
res[centers.shape[0] + 1:, :] = hac_pts
res = tf.clip_by_norm(0.65 * res, 0.80, axes=[1]).numpy()
return res
def hac_scaling_factor(n):
return np.log2(n + 1 - np.arange(n)) / np.log2(n + 1)
def init_from_hac(data, k):
"""Find internal structure using hac heuristic"""
agg = AgglomerativeClustering(n_clusters=1, linkage='average')
agg.fit(data)
internals = np.zeros((data.shape[0] - 1, data.shape[1]), dtype=np.float32)
counts = np.zeros((data.shape[0] - 1), dtype=np.float32)
children = agg.children_
# find each agglomeration vector and
def get_vector_for_idx(idx):
if idx < data.shape[0]:
return data[idx]
else:
return internals[idx - data.shape[0]]
def get_count_for_idx(idx):
if idx < data.shape[0]:
return 1
else:
return counts[idx - data.shape[0]]
for i in range(0, children.shape[0]):
internals[i, :] = get_vector_for_idx(children[i, 0]) + get_vector_for_idx(children[i, 1])
counts[i] = get_count_for_idx(children[i, 0]) + get_count_for_idx(children[i, 1])
mean_internals = internals / np.expand_dims(counts, 1)
normalized_internals = mean_internals / np.linalg.norm(mean_internals, axis=1, keepdims=True)
selected_internals = normalized_internals[-k:, :]
# print(mean_internals.shape)
# print(normalized_internals.shape)
# print(selected_internals.shape)
# print(k)
sf = hac_scaling_factor(data.shape[0])[-k:]
# print(sf.shape)
result = selected_internals * np.expand_dims(sf, 1)
return result
def random_pts(data, n, scale):
"""Pick random points"""
x_sample = np.random.choice(data.shape[0], size=n, replace=False)
return scale * data[x_sample, :]
def random(data, n, scale):
"""Sample random points from normal(0,1)"""
sample = np.random.randn(n, data.shape[1]).astype(np.float32)
return scale * sample
|
py | 7dfb056feeb8eed546b181327452ebb048dd183c | # Make the classes below importable from the `.api` subpackage directly.
from .enrich import EnrichAPI
from .inspect import InspectAPI
from .response import ResponseAPI
from .intel import IntelAPI
from .commands import CommandsAPI
|
py | 7dfb0679075e2db1d026a471d41d912febe26deb | from telegram_bot_sdk.telegram_objects.inputMessageContent import InputMessageContent
class InputTextMessageContent(InputMessageContent):
"""This class represents the content of a text message to be sent as the result of an inline query
:param message_text: Text of the message to be sent, 1-4096 characters
:type message_text: str
:param parse_mode: Optional: Send *Markdown* or *HTML*, if oyu want Telegram apps to show bold, italic, fixed-width\
text or inline URLs in you bot's message
:type parse_mode: str
:param disable_web_page_preview: Optional: Disables link preview for links in the sent message
:type disable_web_page_preview: bool
"""
def __init__(self, *, message_text, parse_mode=None, disable_web_page_preview=None):
super().__init__()
self.message_text = message_text
self.parse_mode = parse_mode
self.disable_web_page_preview = disable_web_page_preview
|
py | 7dfb083d1a02e644e8a372b52684a71c2215ee01 | # BSP Note: For TI EK-TM4C1294XL Tiva C Series Connected LancuhPad (REV D)
import os
# toolchains options
ARCH = 'arm'
CPU = 'cortex-m4'
CROSS_TOOL = 'gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
#device options
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = 'D:/ArdaArmTools/GNUARM_4.9_2015q1/bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = 'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = 'C:/Program Files (x86)/IAR Systems/Embedded Workbench 7.2'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
#BUILD = 'release'
if PLATFORM == 'gcc':
# tool-chains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m4 -mthumb -mfpu=fpv4-sp-d16 -mfloat-abi=hard -ffunction-sections -fdata-sections'
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-stm32.map,-cref,-u,Reset_Handler -T stm32_rom.ld'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M4.fp '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99' # -D' + PART_TYPE
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter "stm32_rom.sct" --info sizes --info totals --info unused --info veneers --list rtthread-stm32.map --strict'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/ARMCC/LIB'
CFLAGS += ' -D__MICROLIB '
AFLAGS += ' --pd "__MICROLIB SETA 1" '
LFLAGS += ' --library_type=microlib '
EXEC_PATH += '/arm/armcc/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm' # + ' -D' + PART_TYPE
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M4'
CFLAGS += ' -e'
CFLAGS += ' --fpu=VFPv4_sp'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M4'
AFLAGS += ' --fpu VFPv4_sp'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "stm32_rom.icf"'
LFLAGS += ' --entry __iar_program_start'
#LFLAGS += ' --silent'
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = '' |
py | 7dfb08bfd88d1a99c8cb387301481ee51033a08a | from marshmallow import fields, post_load
from qikfiller.schemas.lists import (
BaseCollectionObject, BaseCollectionSchema, BaseSchema, register_class,
)
from qikfiller.schemas.lists.task import TaskSchema
class ClientSchema(BaseSchema):
LOAD_INTO = 'Client'
id = fields.Integer(required=True)
name = fields.String(required=True)
owner_id = fields.Integer(allow_none=True)
owner_name = fields.String(allow_none=True)
custom_fields = fields.List(fields.String())
tasks = fields.Nested(TaskSchema, many=True)
@post_load
def to_obj(self, data):
try:
data["custom_fields"] = '|'.join(data["custom_fields"])
except KeyError:
pass
return super(ClientSchema, self).to_obj(data)
class ClientsSchema(BaseCollectionSchema):
LOAD_INTO = 'Clients'
clients = fields.Nested(ClientSchema, many=True)
@register_class
class Clients(BaseCollectionObject):
_SCHEMA = ClientsSchema
|
py | 7dfb08c2065a0cba3f034c83e9a910affb294153 | """
This file is part of LiberaForms.
# SPDX-FileCopyrightText: 2021 LiberaForms.org
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
from liberaforms import ma
from liberaforms.models.media import Media
from liberaforms.utils import utils
from liberaforms.utils.utils import human_readable_bytes
class MediaSchema(ma.SQLAlchemySchema):
class Meta:
model = Media
id = ma.Integer()
created = ma.Method('get_created')
file_name = ma.auto_field()
file_size = ma.Method('get_file_size')
image_url = ma.Method('get_image_url')
thumbnail_url = ma.Method('get_thumbnail_url')
alt_text = ma.auto_field()
def get_created(self, obj):
return utils.utc_to_g_timezone(obj.created).strftime("%Y-%m-%d")
def get_image_url(self, obj):
return obj.get_url()
def get_thumbnail_url(self, obj):
return obj.get_thumbnail_url()
def get_file_size(self, obj):
return human_readable_bytes(obj.file_size)
|
py | 7dfb093ff19c2695f9bca286263e4d7b58ab9f6d | """
Inverts an invertible n x n matrix -- i.e., given an n x n matrix A, returns
an n x n matrix B such that AB = BA = In, the n x n identity matrix.
For a 2 x 2 matrix, inversion is simple using the cofactor equation. For
larger matrices, this is a four step process:
1. calculate the matrix of minors: create an n x n matrix by considering each
position in the original matrix in turn. Exclude the current row and column
and calculate the determinant of the remaining matrix, then place that value
in the current position's equivalent in the matrix of minors.
2. create the matrix of cofactors: take the matrix of minors and multiply
alternate values by -1 in a checkerboard pattern.
3. adjugate: hold the top left to bottom right diagonal constant, but swap all
other values over it.
4. multiply the adjugated matrix by 1 / the determinant of the original matrix
This code combines steps 1 and 2 into one method to reduce traversals of the
matrix.
Possible edge cases: will not work for 0x0 or 1x1 matrix, though these are
trivial to calculate without use of this file.
"""
import fractions
def invert_matrix(m):
"""invert an n x n matrix"""
# Error conditions
if not array_is_matrix(m):
print("Invalid matrix: array is not a matrix")
return [[-1]];
elif len(m) != len(m[0]):
print("Invalid matrix: matrix is not square")
return [[-2]];
elif len(m) < 2:
print("Invalid matrix: matrix is too small")
return [[-3]];
elif get_determinant(m) == 0:
print("Invalid matrix: matrix is square, but singular (determinant = 0)")
return [[-4]];
# Calculation
elif len(m) == 2:
# simple case
multiplier = 1 / get_determinant(m)
inverted = [[multiplier] * len(m) for n in range(len(m))]
inverted[0][1] = inverted[0][1] * -1 * m[0][1]
inverted[1][0] = inverted[1][0] * -1 * m[1][0]
inverted[0][0] = multiplier * m[1][1]
inverted[1][1] = multiplier * m[0][0]
return inverted
else:
"""some steps combined in helpers to reduce traversals"""
# get matrix of minors w/ "checkerboard" signs
m_of_minors = get_matrix_of_minors(m)
# calculate determinant (we need to know 1/det)
multiplier = fractions.Fraction(1, get_determinant(m))
# adjugate (swap on diagonals) and multiply by 1/det
inverted = transpose_and_multiply(m_of_minors, multiplier)
return inverted
def get_determinant(m):
"""recursively calculate the determinant of an n x n matrix, n >= 2"""
if len(m) == 2:
# trivial case
return (m[0][0] * m[1][1]) - (m[0][1] * m[1][0])
else:
sign = 1
det = 0
for i in range(len(m)):
det += sign * m[0][i] * get_determinant(get_minor(m, 0, i))
sign *= -1
return det
def get_matrix_of_minors(m):
"""get the matrix of minors and alternate signs"""
matrix_of_minors = [[0 for i in range(len(m))] for j in range(len(m))]
for row in range(len(m)):
for col in range(len(m[0])):
if (row + col) % 2 == 0:
sign = 1
else:
sign = -1
matrix_of_minors[row][col] = sign * get_determinant(get_minor(m, row, col))
return matrix_of_minors
def get_minor(m, row, col):
"""
get the minor of the matrix position m[row][col]
(all values m[r][c] where r != row and c != col)
"""
minors = []
for i in range(len(m)):
if i != row:
new_row = m[i][:col]
new_row.extend(m[i][col + 1:])
minors.append(new_row)
return minors
def transpose_and_multiply(m, multiplier=1):
"""swap values along diagonal, optionally adding multiplier"""
for row in range(len(m)):
for col in range(row + 1):
temp = m[row][col] * multiplier
m[row][col] = m[col][row] * multiplier
m[col][row] = temp
return m
def array_is_matrix(m):
if len(m) == 0:
return False
first_col = len(m[0])
for row in m:
if len(row) != first_col:
return False
return True
|
py | 7dfb09eba138ee5255f3331f54ce5e0768ef621f | # -*- coding: utf-8 -*-
"""
# @file name : create_module.py
# @author : tingsongyu
# @date : 2019-09-18 10:08:00
# @brief : 学习模型创建学习
"""
import os
import random
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import torch.optim as optim
from matplotlib import pyplot as plt
from model.lenet import LeNet
from tools.my_dataset import RMBDataset
def set_seed(seed=1):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
set_seed() # 设置随机种子
rmb_label = {"1": 0, "100": 1}
# 参数设置
MAX_EPOCH = 10
BATCH_SIZE = 16
LR = 0.01
log_interval = 10
val_interval = 1
# ============================ step 1/5 数据 ============================
split_dir = os.path.join("..", "..", "data", "rmb_split")
train_dir = os.path.join(split_dir, "train")
valid_dir = os.path.join(split_dir, "valid")
norm_mean = [0.485, 0.456, 0.406]
norm_std = [0.229, 0.224, 0.225]
train_transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std),
])
valid_transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std),
])
# 构建MyDataset实例
train_data = RMBDataset(data_dir=train_dir, transform=train_transform)
valid_data = RMBDataset(data_dir=valid_dir, transform=valid_transform)
# 构建DataLoder
train_loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
valid_loader = DataLoader(dataset=valid_data, batch_size=BATCH_SIZE)
# ============================ step 2/5 模型 ============================
net = LeNet(classes=2)
net.initialize_weights()
# ============================ step 3/5 损失函数 ============================
criterion = nn.CrossEntropyLoss() # 选择损失函数
# ============================ step 4/5 优化器 ============================
optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9) # 选择优化器
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1) # 设置学习率下降策略
# ============================ step 5/5 训练 ============================
train_curve = list()
valid_curve = list()
for epoch in range(MAX_EPOCH):
loss_mean = 0.
correct = 0.
total = 0.
net.train()
for i, data in enumerate(train_loader):
# forward
inputs, labels = data
outputs = net(inputs)
# backward
optimizer.zero_grad()
loss = criterion(outputs, labels)
loss.backward()
# update weights
optimizer.step()
# 统计分类情况
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).squeeze().sum().numpy()
# 打印训练信息
loss_mean += loss.item()
train_curve.append(loss.item())
if (i+1) % log_interval == 0:
loss_mean = loss_mean / log_interval
print("Training:Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}".format(
epoch, MAX_EPOCH, i+1, len(train_loader), loss_mean, correct / total))
loss_mean = 0.
scheduler.step() # 更新学习率
# validate the model
if (epoch+1) % val_interval == 0:
correct_val = 0.
total_val = 0.
loss_val = 0.
net.eval()
with torch.no_grad():
for j, data in enumerate(valid_loader):
inputs, labels = data
outputs = net(inputs)
loss = criterion(outputs, labels)
_, predicted = torch.max(outputs.data, 1)
total_val += labels.size(0)
correct_val += (predicted == labels).squeeze().sum().numpy()
loss_val += loss.item()
valid_curve.append(loss_val)
print("Valid:\t Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}".format(
epoch, MAX_EPOCH, j+1, len(valid_loader), loss_val, correct / total))
train_x = range(len(train_curve))
train_y = train_curve
train_iters = len(train_loader)
valid_x = np.arange(1, len(valid_curve)+1) * train_iters*val_interval # 由于valid中记录的是epochloss,需要对记录点进行转换到iterations
valid_y = valid_curve
plt.plot(train_x, train_y, label='Train')
plt.plot(valid_x, valid_y, label='Valid')
plt.legend(loc='upper right')
plt.ylabel('loss value')
plt.xlabel('Iteration')
plt.show()
# ============================ inference ============================
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
test_dir = os.path.join(BASE_DIR, "test_data")
test_data = RMBDataset(data_dir=test_dir, transform=valid_transform)
valid_loader = DataLoader(dataset=test_data, batch_size=1)
for i, data in enumerate(valid_loader):
# forward
inputs, labels = data
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
rmb = 1 if predicted.numpy()[0] == 0 else 100
print("模型获得{}元".format(rmb))
|
py | 7dfb0a00fe1307ae8f5b63765d82c97e17404d00 | """List of muted accounts for server process."""
import logging
from time import perf_counter as perf
from urllib.request import urlopen
import ujson as json
log = logging.getLogger(__name__)
def _read_url(url):
return urlopen(url).read()
class Mutes:
"""Singleton tracking muted accounts."""
_instance = None
url = None
accounts = set() # list/irredeemables
blist = set() # list/any-blacklist
blist_map = dict() # cached account-list map
fetched = None
@classmethod
def instance(cls):
"""Get the shared instance."""
assert cls._instance, 'set_shared_instance was never called'
return cls._instance
@classmethod
def set_shared_instance(cls, instance):
"""Set the global/shared instance."""
cls._instance = instance
def __init__(self, url):
"""Initialize a muted account list by loading from URL"""
self.url = url
if url:
self.load()
def load(self):
"""Reload all accounts from irredeemables endpoint and global lists."""
self.accounts = set(_read_url(self.url).decode('utf8').split())
#jsn = _read_url('http://blacklist.usesteem.com/blacklists')
self.blist = dict() #set(json.loads(jsn))
self.blist_map = dict()
log.warning("%d muted, %d blacklisted", len(self.accounts), len(self.blist))
self.fetched = perf()
@classmethod
def all(cls):
"""Return the set of all muted accounts from singleton instance."""
return cls.instance().accounts
@classmethod
def lists(cls, name, rep):
"""Return blacklists the account belongs to."""
assert name
inst = cls.instance()
# update hourly
if perf() - inst.fetched > 3600:
inst.load()
if name not in inst.blist_map:
out = []
if name in inst.blist:
url = 'http://blacklist.usesteem.com/user/' + name
lists = json.loads(_read_url(url))
out.extend(lists['blacklisted'])
if name in inst.accounts:
if 'irredeemables' not in out:
out.append('irredeemables')
if int(rep) < 1:
out.append('reputation-0')
elif int(rep) == 1:
out.append('reputation-1')
inst.blist_map[name] = out
return inst.blist_map[name]
|
py | 7dfb0a2b01aa80dc96b8458966c3a577c133665c | # Copyright 2018 MLBenchmark Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test functions in compute_blue.py."""
import tempfile
import unittest
import compute_bleu
class ComputeBleuTest(unittest.TestCase):
def _create_temp_file(self, text):
w = tempfile.NamedTemporaryFile(delete=False)
w.write(text)
w.close()
return w.name
def test_bleu_same(self):
ref = self._create_temp_file("test 1 two 3\nmore tests!")
hyp = self._create_temp_file("test 1 two 3\nmore tests!")
uncased_score = compute_bleu.bleu_wrapper(ref, hyp, False)
cased_score = compute_bleu.bleu_wrapper(ref, hyp, True)
self.assertEqual(100, uncased_score)
self.assertEqual(100, cased_score)
def test_bleu_same_different_case(self):
ref = self._create_temp_file("Test 1 two 3\nmore tests!")
hyp = self._create_temp_file("test 1 two 3\nMore tests!")
uncased_score = compute_bleu.bleu_wrapper(ref, hyp, False)
cased_score = compute_bleu.bleu_wrapper(ref, hyp, True)
self.assertEqual(100, uncased_score)
self.assertLess(cased_score, 100)
def test_bleu_different(self):
ref = self._create_temp_file("Testing\nmore tests!")
hyp = self._create_temp_file("Dog\nCat")
uncased_score = compute_bleu.bleu_wrapper(ref, hyp, False)
cased_score = compute_bleu.bleu_wrapper(ref, hyp, True)
self.assertLess(uncased_score, 100)
self.assertLess(cased_score, 100)
def test_bleu_tokenize(self):
s = "Test0, 1 two, 3"
tokenized = compute_bleu.bleu_tokenize(s)
self.assertEqual(["Test0", ",", "1", "two", ",", "3"], tokenized)
if __name__ == "__main__":
unittest.main()
|
py | 7dfb0a858042e15e163b70a4fc4d35133bf93c05 | #!/usr/bin/env python
#
# Copyright 2017 team1@course_bigdata, Saint Joseph's University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This module includes view controller class for the query c:
- On average which URL produced the best story in 2010?
"""
# built-in libs
import os
# google/webapp2 libs
import webapp2
from google.appengine.ext.webapp import template
# homemade ones
import hacker_news as hacker
from settings import MAX_RESULT_COUNT
class LowestStoryScore(webapp2.RequestHandler):
def post(self):
rows, count = hacker.get_lowest_story_score()
temp_vals = {
'active_tab': 'QueryB',
'total_count': count,
'values': rows if rows else None
}
path = os.path.join(os.path.dirname(__file__), 'index.html')
self.response.headers['Content-Type'] = 'text/html'
self.response.out.write(template.render(path, temp_vals))
|
py | 7dfb0b76bd62ebaf268b4cbed03a8dab7c18cb99 | """
Utility class for handling experiment file locations (models, metrics) and finding best and last epochs.
Notes:
This is used inside trainers but can also be used standalone for analyzing results.
"""
import glob
import json
import os
import shutil
import time
from pathlib import Path
from typing import List, Union
import numpy as np
from nntrainer.trainer_configs import BaseTrainerState
from nntrainer.utils import TrainerPathConst
class ExperimentFilesHandler:
"""
Helper to handle with file locations, metrics etc.
Args:
model_type: Experiment type (retrieval, captioning, ...)
exp_group: Experiment group.
exp_name: Experiment name.
run_name: Name of a single run.
log_dir: Save directory for experiments.
"""
def __init__(
self, model_type: str, exp_group: str, exp_name: str, run_name: str, *,
log_dir: str = TrainerPathConst.DIR_EXPERIMENTS):
self.exp_group: str = exp_group
self.exp_name: str = exp_name
self.run_name: str = run_name
self.model_type: str = model_type
self.path_base: Path = Path(log_dir) / self.model_type / self.exp_group / "{}_{}".format(
self.exp_name, self.run_name)
self.path_logs = self.path_base / TrainerPathConst.DIR_LOGS
self.path_models = self.path_base / TrainerPathConst.DIR_MODELS
self.path_metrics = self.path_base / TrainerPathConst.DIR_METRICS
self.path_tensorb = self.path_base / TrainerPathConst.DIR_TB
self.path_embeddings = self.path_base / TrainerPathConst.DIR_EMBEDDINGS
def setup_dirs(self, *, reset: bool = False) -> None:
"""
Make sure all directories exist, delete them if a reset is requested.
Args:
reset: Delete this experiment.
"""
if reset:
# delete base path
shutil.rmtree(self.path_base, ignore_errors=True)
time.sleep(0.1) # this avoids "cannot create dir that exists" on windows
# create all paths
for path in self.path_logs, self.path_models, self.path_metrics, self.path_tensorb:
os.makedirs(path, exist_ok=True)
def get_existing_checkpoints(self) -> List[int]:
"""
Get list of all existing checkpoint numbers..
Returns:
List of checkpoint numbers.
"""
# get list of existing trainerstate filenames
list_of_files = glob.glob(str(self.get_trainerstate_file("*")))
# extract epoch numbers from those filenames
ep_nums = sorted([int(a.split(f"{TrainerPathConst.FILE_PREFIX_TRAINERSTATE}_")[-1].split(".json")[0])
for a in list_of_files])
return ep_nums
def find_best_epoch(self):
"""
Find best episode out of existing checkpoint data.
Returns:
Best epoch or -1 if no epochs are found.
"""
ep_nums = self.get_existing_checkpoints()
if len(ep_nums) == 0:
# no checkpoints found
return -1
# read trainerstate of the last epoch (contains all info needed to find the best epoch)
temp_state = BaseTrainerState.create_from_file(self.get_trainerstate_file(ep_nums[-1]))
if len(temp_state.infos_val_epochs) == 0:
# no validation has been done, assume last epoch is best
return ep_nums[-1]
# read the flags for each epoch that state whether that was a good or bad epoch
# the last good epoch is the best one
where_res = np.where(temp_state.infos_val_is_good)[0]
best_idx = where_res[-1]
best_epoch = temp_state.infos_val_epochs[best_idx]
return best_epoch
def find_last_epoch(self):
"""
Find last episode out of existing checkpoint data.
Returns:
Last epoch or -1 if no epochs are found.
"""
ep_nums = self.get_existing_checkpoints()
if len(ep_nums) == 0:
# no checkpoints found
return -1
# return last epoch
return ep_nums[-1]
def get_existing_metrics(self) -> List[int]:
"""
Get list checkpoint numbers by epoch metrics.
Returns:
List of checkpoint numbers.
"""
# get list of existing trainerstate filenames
list_of_files = glob.glob(str(self.get_metrics_epoch_file("*")))
# extract epoch numbers from those filenames
ep_nums = sorted([int(a.split(f"{TrainerPathConst.FILE_PREFIX_METRICS_EPOCH}_")[-1].split(".json")[0])
for a in list_of_files])
return ep_nums
# ---------- File definitions. ----------
# Parameter epoch allows str to create glob filenames with "*".
def get_models_file(self, epoch: Union[int, str]) -> Path:
"""
Get file path for storing the model.
Args:
epoch: Epoch.
Returns:
Path
"""
return self.path_models / f"{TrainerPathConst.FILE_PREFIX_MODEL}_{epoch}.pth"
def get_models_file_ema(self, epoch: Union[int, str]) -> Path:
"""
Get file path for storing the model EMA weights.
Args:
epoch: Epoch.
Returns:
Path
"""
return self.path_models / f"{TrainerPathConst.FILE_PREFIX_MODELEMA}_{epoch}.pth"
def get_optimizer_file(self, epoch: Union[int, str]) -> Path:
"""
Get file path for storing the model.
Args:
epoch: Epoch.
Returns:
Path
"""
return self.path_models / f"{TrainerPathConst.FILE_PREFIX_OPTIMIZER}_{epoch}.pth"
def get_data_file(self, epoch: Union[int, str]) -> Path:
"""
Get file path for storing the optimizer.
Args:
epoch: Epoch.
Returns:
Path
"""
return self.path_models / f"{TrainerPathConst.FILE_PREFIX_DATA}_{epoch}.pth"
def get_trainerstate_file(self, epoch: Union[int, str]) -> Path:
"""
Get file path for storing the state of the trainer. This is needed for currectly resuming training.
Args:
epoch: Epoch.
Returns:
Path
"""
return self.path_models / f"{TrainerPathConst.FILE_PREFIX_TRAINERSTATE}_{epoch}.json"
def get_metrics_step_file(self, epoch: Union[int, str]) -> Path:
"""
Get file path for storing step-based metrics.
Args:
epoch: Epoch.
Returns:
Path
"""
return self.path_metrics / f"{TrainerPathConst.FILE_PREFIX_METRICS_STEP}_{epoch}.json"
def get_metrics_epoch_file(self, epoch: Union[int, str]) -> Path:
"""
Get file path for storing epoch-based metrics.
Args:
epoch: Epoch.
Returns:
Path
"""
return self.path_metrics / f"{TrainerPathConst.FILE_PREFIX_METRICS_EPOCH}_{epoch}.json"
def get_profile_file(self) -> Path:
"""
Get file path for storing profiling results.
Returns:
Path.
"""
profile_dir = Path("profiles") / self.exp_group
pro_file = profile_dir / (self.exp_name + ".json")
if pro_file.is_file():
return json.load(pro_file.open("rt", encoding="utf8"))
return None
|
py | 7dfb0c3927609b4259e6a139532d2ecbe9019e36 | # Copyright 2021 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import multiprocessing
import time
from datetime import datetime, timedelta
from multiprocessing import Process
from sys import platform
from typing import List
import pandas as pd
import pytest
from _pytest.nodes import Item
from feast import FeatureStore
from tests.data.data_creator import create_dataset
from tests.integration.feature_repos.integration_test_repo_config import (
IntegrationTestRepoConfig,
)
from tests.integration.feature_repos.repo_configuration import (
FULL_REPO_CONFIGS,
GO_CYCLE_REPO_CONFIGS,
GO_REPO_CONFIGS,
REDIS_CLUSTER_CONFIG,
REDIS_CONFIG,
Environment,
TestData,
construct_test_environment,
construct_universal_test_data,
)
logger = logging.getLogger(__name__)
def pytest_configure(config):
if platform in ["darwin", "windows"]:
multiprocessing.set_start_method("spawn")
else:
multiprocessing.set_start_method("fork")
config.addinivalue_line(
"markers", "integration: mark test that has external dependencies"
)
config.addinivalue_line("markers", "benchmark: mark benchmarking tests")
config.addinivalue_line(
"markers", "universal: mark tests that use the universal feature repo"
)
config.addinivalue_line(
"markers", "goserver: mark tests that use the go feature server"
)
config.addinivalue_line(
"markers", "goserverlifecycle: mark tests that use the go feature server"
)
def pytest_addoption(parser):
parser.addoption(
"--integration",
action="store_true",
default=False,
help="Run tests with external dependencies",
)
parser.addoption(
"--benchmark", action="store_true", default=False, help="Run benchmark tests",
)
parser.addoption(
"--universal", action="store_true", default=False, help="Run universal tests",
)
parser.addoption(
"--goserver",
action="store_true",
default=False,
help="Run tests that use the go feature server",
)
parser.addoption(
"--goserverlifecycle",
action="store_true",
default=False,
help="Run tests on go feature server lifecycle",
)
def pytest_collection_modifyitems(config, items: List[Item]):
should_run_integration = config.getoption("--integration") is True
should_run_benchmark = config.getoption("--benchmark") is True
should_run_universal = config.getoption("--universal") is True
should_run_goserver = config.getoption("--goserver") is True
should_run_goserverlifecycle = config.getoption("--goserverlifecycle") is True
integration_tests = [t for t in items if "integration" in t.keywords]
if not should_run_integration:
for t in integration_tests:
items.remove(t)
else:
items.clear()
for t in integration_tests:
items.append(t)
benchmark_tests = [t for t in items if "benchmark" in t.keywords]
if not should_run_benchmark:
for t in benchmark_tests:
items.remove(t)
else:
items.clear()
for t in benchmark_tests:
items.append(t)
universal_tests = [t for t in items if "universal" in t.keywords]
if should_run_universal:
items.clear()
for t in universal_tests:
items.append(t)
goserver_tests = [t for t in items if "goserver" in t.keywords]
if should_run_goserver:
items.clear()
for t in goserver_tests:
items.append(t)
goserverlifecycle_tests = [t for t in items if "goserverlifecycle" in t.keywords]
if should_run_goserverlifecycle:
items.clear()
for t in goserverlifecycle_tests:
items.append(t)
@pytest.fixture
def simple_dataset_1() -> pd.DataFrame:
now = datetime.utcnow()
ts = pd.Timestamp(now).round("ms")
data = {
"id_join_key": [1, 2, 1, 3, 3],
"float_col": [0.1, 0.2, 0.3, 4, 5],
"int64_col": [1, 2, 3, 4, 5],
"string_col": ["a", "b", "c", "d", "e"],
"ts_1": [
ts,
ts - timedelta(hours=4),
ts - timedelta(hours=3),
ts - timedelta(hours=2),
ts - timedelta(hours=1),
],
}
return pd.DataFrame.from_dict(data)
@pytest.fixture
def simple_dataset_2() -> pd.DataFrame:
now = datetime.utcnow()
ts = pd.Timestamp(now).round("ms")
data = {
"id_join_key": ["a", "b", "c", "d", "e"],
"float_col": [0.1, 0.2, 0.3, 4, 5],
"int64_col": [1, 2, 3, 4, 5],
"string_col": ["a", "b", "c", "d", "e"],
"ts_1": [
ts,
ts - timedelta(hours=4),
ts - timedelta(hours=3),
ts - timedelta(hours=2),
ts - timedelta(hours=1),
],
}
return pd.DataFrame.from_dict(data)
def start_test_local_server(repo_path: str, port: int):
fs = FeatureStore(repo_path)
fs.serve("localhost", port, no_access_log=True)
@pytest.fixture(
params=FULL_REPO_CONFIGS, scope="session", ids=[str(c) for c in FULL_REPO_CONFIGS]
)
def environment(request, worker_id: str):
e = construct_test_environment(request.param, worker_id=worker_id)
proc = Process(
target=start_test_local_server,
args=(e.feature_store.repo_path, e.get_local_server_port()),
daemon=True,
)
if e.python_feature_server and e.test_repo_config.provider == "local":
proc.start()
# Wait for server to start
time.sleep(3)
def cleanup():
e.feature_store.teardown()
if proc.is_alive():
proc.kill()
request.addfinalizer(cleanup)
return e
@pytest.fixture(
params=GO_REPO_CONFIGS, scope="session", ids=[str(c) for c in GO_REPO_CONFIGS]
)
def go_environment(request, worker_id: str):
e = construct_test_environment(request.param, worker_id=worker_id)
def cleanup():
e.feature_store.teardown()
if e.feature_store._go_server:
e.feature_store._go_server.kill_go_server_explicitly()
request.addfinalizer(cleanup)
return e
@pytest.fixture(
params=GO_CYCLE_REPO_CONFIGS,
scope="session",
ids=[str(c) for c in GO_CYCLE_REPO_CONFIGS],
)
def go_cycle_environment(request, worker_id: str):
e = construct_test_environment(request.param, worker_id=worker_id)
def cleanup():
e.feature_store.teardown()
request.addfinalizer(cleanup)
return e
@pytest.fixture(
params=[REDIS_CONFIG, REDIS_CLUSTER_CONFIG],
scope="session",
ids=[str(c) for c in [REDIS_CONFIG, REDIS_CLUSTER_CONFIG]],
)
def local_redis_environment(request, worker_id):
e = construct_test_environment(
IntegrationTestRepoConfig(online_store=request.param), worker_id=worker_id
)
def cleanup():
e.feature_store.teardown()
request.addfinalizer(cleanup)
return e
@pytest.fixture(scope="session")
def universal_data_sources(request, environment) -> TestData:
def cleanup():
# logger.info("Running cleanup in %s, Request: %s", worker_id, request.param)
environment.data_source_creator.teardown()
request.addfinalizer(cleanup)
return construct_universal_test_data(environment)
@pytest.fixture(scope="session")
def redis_universal_data_sources(request, local_redis_environment):
def cleanup():
# logger.info("Running cleanup in %s, Request: %s", worker_id, request.param)
local_redis_environment.data_source_creator.teardown()
request.addfinalizer(cleanup)
return construct_universal_test_data(local_redis_environment)
@pytest.fixture(scope="session")
def go_data_sources(request, go_environment):
def cleanup():
# logger.info("Running cleanup in %s, Request: %s", worker_id, request.param)
go_environment.data_source_creator.teardown()
request.addfinalizer(cleanup)
return construct_universal_test_data(go_environment)
@pytest.fixture(scope="session")
def e2e_data_sources(environment: Environment, request):
df = create_dataset()
data_source = environment.data_source_creator.create_data_source(
df, environment.feature_store.project, field_mapping={"ts_1": "ts"},
)
def cleanup():
environment.data_source_creator.teardown()
request.addfinalizer(cleanup)
return df, data_source
|
py | 7dfb0cf84fd8e5753ba7b4f79d22e0c5e29ec787 | import tesuract
import unittest
import numpy as np
import warnings, pdb
from time import time
import pytest
import sklearn
from sklearn.datasets import make_friedman1
from sklearn.model_selection import cross_val_score
relpath = tesuract.__file__[:-11] # ignore the __init__.py specification
print(relpath)
from warnings import simplefilter
from sklearn.exceptions import ConvergenceWarning
simplefilter("ignore", category=ConvergenceWarning)
# regression test for multi output pca regressor
@pytest.mark.regression
class TestRegressionWrapper(unittest.TestCase):
@classmethod
def setUpClass(self):
# import a data set for our regression problem
self.X, self.y = make_friedman1(n_samples=100, n_features=6, random_state=1239)
def test_simple_pce_scalar_fit(self):
print("Fitting 8th order polynomial...")
pce_model = tesuract.PCEReg(order=6)
pce_model.fit(self.X, self.y)
print("done fitting!")
def test_cross_val_score_of_pce_reg_class(self):
# compute the cv score
pce = tesuract.PCEReg(order=7)
pce_score = cross_val_score(
pce, self.X, self.y, scoring="r2", verbose=1, n_jobs=1
).mean()
self.pce_score_1 = pce_score
print("PCE score is {0:.3f}".format(pce_score))
def test_mregcv_interface_w_single_parameter_choice(self):
start = time()
pce_grid = {
"order": 6,
"mindex_type": "total_order",
"fit_type": "ElasticNetCV",
}
# hyper-parameter tune the PCE regression class using all available cores
pce = tesuract.RegressionWrapperCV(
regressor="pce", reg_params=pce_grid, n_jobs=1, scorer="r2", verbose=1
)
pce.fit(self.X, self.y)
print("Hyper-parameter CV PCE score is {0:.3f}".format(pce.best_score_))
print("Total time is ", time() - start)
def test_mregcv_w_simple_parameter_grid(self):
start = time()
pce_grid = {
"order": list(range(1, 8)),
"mindex_type": ["total_order"],
"fit_type": ["linear", "LassoCV"],
}
# hyper-parameter tune the PCE regression class using all available cores
pce = tesuract.RegressionWrapperCV(
regressor="pce", reg_params=pce_grid, n_jobs=4, scorer="r2", verbose=1
)
pce.fit(self.X, self.y)
print("Hyper-parameter CV PCE score is {0:.3f}".format(pce.best_score_))
print("Total time is ", time() - start)
def test_mregcv_w_advanced_param_grid(self):
start = time()
pce_grid = [
{
"order": list(range(1, 8)),
"mindex_type": ["total_order"],
"fit_type": ["linear", "ElasticNetCV"],
"fit_params": [
{"alphas": np.logspace(-8, 4, 20), "max_iter": 10000, "tol": 1e-4}
],
},
{
"order": list(range(1, 8)),
"mindex_type": ["total_order", "hyperbolic"],
"fit_type": ["LassoCV"],
"fit_params": [
{"alphas": np.logspace(-8, 4, 20), "max_iter": 10000, "tol": 1e-4}
],
},
]
pce = tesuract.RegressionWrapperCV(
regressor="pce", reg_params=pce_grid, n_jobs=-1, scorer="r2", verbose=1
)
pce.fit(self.X, self.y)
print("Hyper-parameter CV PCE score is {0:.3f}".format(pce.best_score_))
print("Total time is ", time() - start)
# regression test for multi output pca regressor
@pytest.mark.regression
class TestMRegressionWrapper(unittest.TestCase):
@classmethod
def setUpClass(self):
self.X = np.loadtxt(relpath + "/tests/data/X_train_ESM.txt")
self.Y = np.loadtxt(relpath + "/tests/data/Y_train_ESM.txt")
def test_mregcv_with_esm_data(self):
pce_grid = [
{
"order": list(range(1, 2)),
"mindex_type": ["total_order"],
"fit_type": ["linear"],
"fit_params": [
{"alphas": np.logspace(-8, 4, 10), "max_iter": 10000, "tol": 1e-1}
],
}
]
target_transform = tesuract.preprocessing.PCATargetTransform(
n_components=2,
whiten=True,
exp_var_cutoff=0.5,
)
regmodel = tesuract.MRegressionWrapperCV(
regressor="pce",
reg_params=pce_grid,
target_transform=target_transform,
target_transform_params={},
n_jobs=-1,
verbose=1,
)
regmodel.fit(self.X, self.Y)
def test_mregcv_with_list_regressor_initiation(self):
pce_grid = [
{
"order": list(range(1, 2)),
"mindex_type": ["total_order"],
"fit_type": ["linear"],
}
]
target_transform = tesuract.preprocessing.PCATargetTransform(
n_components=2,
whiten=True,
exp_var_cutoff=0.5,
)
regmodel = tesuract.MRegressionWrapperCV(
regressor=["pce"],
reg_params=[pce_grid],
target_transform=target_transform,
target_transform_params={},
n_jobs=-1,
verbose=1,
)
regmodel.fit(self.X, self.Y)
def test_mregcv_with_auto_pca_target_transform_w_cv_score(self):
pce_grid = [
{
"order": list(range(1, 2)),
"mindex_type": ["total_order"],
"fit_type": ["LassoCV"],
}
]
target_transform = tesuract.preprocessing.PCATargetTransform(
n_components="auto",
whiten=True,
exp_var_cutoff=0.5,
)
regmodel = tesuract.MRegressionWrapperCV(
regressor="pce",
reg_params=pce_grid,
target_transform=target_transform,
target_transform_params={},
n_jobs=-1,
verbose=1,
)
regmodel.fit(self.X, self.Y)
# Clone and compute the cv score of full model
cv_score, surrogate_clone = regmodel.compute_cv_score(
X=self.X, y=self.Y, scoring="r2"
)
print("Mean CV score:", cv_score)
|
py | 7dfb0f239b61bd4feb3b7884adf0f22ed9e5aea2 | """
A module listing useful classes and errors that can be used in future programs
Also is used as BMPY template code
"""
import abc
import inspect
import math
import random
import sys
from typing import Union, Any, Optional, Callable, Type
class ExecutionError(Exception):
"""Only used during exec, as a way to exit the exec at that point"""
pass
class UnderflowError(Exception):
"""Opposite of OverFlowError"""
pass
class LengthError(Exception):
"""Used when a length goes wrong (to replace using ValueError)"""
pass
class OccurrenceError(Exception):
"""Used when a piece of data appears too often or too little"""
pass
class NumError(Exception):
"""Used to replace ValueError for numerical types"""
pass
class ArgumentError(Exception):
"""Used for invalid arguments"""
pass
class SizeError(Exception):
"""A 2D version of LengthError"""
pass
class Constants:
"""
An empty class defining useful constants
Class variables defined below:
lows ([str]): Lowercase letters
ups ([str]): Uppercase letters
symbols ([str]): symbols
digits ([int]): single digits
e (float): Euler's constant (equivalent to Exp().GetValue())
pi (float): The value used in cricles (equivalent to Pi().GetValue())
Instance variables defined below:
"""
lows = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v",
"w", "x", "y", "z"]
ups = [char.upper() for char in lows]
symbols = ["!", "£", "$", "%", "^", "&", "*", "(", ")", "_", "+", "-", "=", "¬", "`", "|", "\\", "\"", "'", ";", ":", "[",
"]", "{", "}", "#", "~", "/", "?", ".", ">", "<", ","]
digits = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
e = math.e
pi = math.pi
class Object(abc.ABC):
"""
An **abstract** base class to represent a mathematical expression ('pi' expression, 'e' expression, surd, or
trigonometric expression)
Class variables defined below:
__types ((type, type, type, type)) (private): The types the instance variables are allowed to be
_usages ([[str, ...], ...]) (protected): How the constructor can be called
_max_arg_length (int) (protected): The maximum number of arguments the constructor can take
_min_arg_length (int) (protected): The minumum number of arguments the constructor can take
_default_values ({str: int or None}) (protected): The default values for each instance variable
__setup (bool) (private) (default is False): Whether the class has been setup
Instance variables defined below:
mul (int or float or Object or Fraction) (default 1): The multiplier of the Object
add (int or float or Object or Fraction) (default 0): Any value added on the end of the Object
power (int) (default 1): The power the Object is being raised to before multiplication or addition
display (bool): Whether to display the full Object
"""
_usages: list[list[str]]
_max_arg_length: int
_min_arg_length: int
_default_values: dict[str, Optional[int]]
_default_values: dict[str, Optional[int]]
@abc.abstractmethod
def __init__(self, *args: Union[int, float, "Object", "Fraction"], **values: Union[int, float, "Object", "Fraction"]):
"""
An **abstract** method to construct necessary arguments for subclasses
:param args: int or float or Object or Fraction (multi-value) => The values used for subclasses instance variables
:param values: int or float or Object or Fraction (multi-value, called by reference) => Any overwritten values (used so that a
single instance variable can be set without adding values for all others)
:raises TypeError: If type of any value is incorrect (not in class attribute types) or power isn't an integer
:raises ValueError: If usage is incorrect or key in values already has been set
:raises AttributeError: If a key in values isn't an instance variable of the subclass
"""
self.mul: Union[int, float, Object, Fraction] = 1
self.add: Union[int, float, Object, Fraction] = 0
self.power: int = 1
self.display: bool = True
Object.typeCheck(*list(args), *list(values.values()))
for instancevar in values:
if instancevar not in type(self)._usages[-1]:
raise AttributeError(f"Unknown attribute '{instancevar}'")
self._argLengthCheck(args)
kwargs = {}
for usage in type(self)._usages:
if len(usage) == len(args):
for varname, varvalue in zip(usage, args):
kwargs[varname] = varvalue
else:
for varname in usage:
if kwargs.get(varname) is None:
kwargs[varname] = type(self)._default_values[varname]
for attr, value in kwargs.items():
setattr(self, attr, value)
for attr in type(self)._usages[-1]:
if values.get(attr) is None:
continue
value = values[attr]
if getattr(self, attr) != type(self)._default_values[attr]:
raise ValueError(f"Cannot override previously given argument '{attr}'")
setattr(self, attr, value)
if not isinstance(self.power, int):
raise TypeError("Cannot raise to a non-integer power")
@abc.abstractmethod
def __str__(self) -> Union[str, tuple[str, str, str]]:
"""
An **abstract** method for string conversion of Object's subclasses
:return: str => Used when full value cannot be displayed. (str,str,str) => Used when full value can be displayed.
"""
if self.mul == 0:
return str(self.add)
if not self.display:
val = self.mul + self.add
if int(val) == val:
val = int(val)
return str(val)
mul = f"{self.mul:-}" if self.mul != 1 else ""
add = f"{self.add:+}" if self.add != 0 else ""
power = f"^{self.power:-}" if self.power != 1 else ""
return mul, add, power
def __repr__(self) -> str:
"""
Method to return a string representation of the Object
:return: str => The evaluateable representation
"""
return f"{type(self).__name__}({','.join([repr(getattr(self, attr)) for attr in type(self)._usages[-1]])})"
def __int__(self) -> int:
"""
Method to return an integer version of the Object
:return: int => The integer version of the value of the Object
"""
return int(self.GetValue())
def __float__(self) -> float:
"""
Method to return a floating point version of the Object
:return: float => The decimal version of the value of the Object
"""
return self.GetValue()
def __add__(self, other: Union[int, float, "Object", "Fraction"]) -> "Object":
"""
A method to add an Object and another value (Object+value)
:param other: int or float or Object or Fraction => The value to add
:return: Object => The subclass used, with the values altered
"""
Object.typeCheck(other)
return self._Op("+", other, "add")
def __radd__(self, other: Union[int, float, "Object", "Fraction"]) -> "Object":
"""
A method to add an Object and another value (value+Object)
:param other: int or float or Object or Fraction => The value to add
:return: Object => The subclass used, with the values altered
"""
return self + other
def __sub__(self, other: Union[int, float, "Object", "Fraction"]) -> "Object":
"""
A method to subtract a value from an Object (Object-value)
:param other: int or float or Object or Fraction => The value to subtract
:return: Object => The subclass used, with the values altered
"""
Object.typeCheck(other)
return self + (-other)
def __rsub__(self, other: Union[int, float, "Object", "Fraction"]) -> "Object":
"""
A method to subtract an Object from another value (value-Object)
:param other: int or float or Object or Fraction => The value to subtract from
:return: Object => The subclass used, with the values altered
"""
return -(self - other)
@abc.abstractmethod
def __mul__(self, other: Union[int, float, "Object", "Fraction"]) -> "Object":
"""
An **abstract** method to multiply an Object and another value (Object*value)
:param other: int or float or Object or Fraction => The value to multiply
:return: Object => The subclass used, with the values altered
"""
Object.typeCheck(other)
if not isinstance(other, Object):
return other * self
if (self.add == 0 and other.add != 0) or (self.add != 0 and other.add == 0):
return other * self
First = (self - self.add) * (other - other.add)
Outer = (self - self.add) * (other)
Inner = (self) * (other - other.add)
Last = (self.add) * (other.add)
FOIL = First, Outer, Inner, Last
return sum(FOIL)
def __rmul__(self, other: Union[int, float, "Object", "Fraction"]) -> "Object":
"""
A method to multiply an Object and another value (value*Object)
:param other: int or float or Object or Fraction => The value to multiply
:return: Object => The subclass used, with the values altered
"""
Object.typeCheck(other)
if isinstance(other, Object):
return self * other
return self._Op("*", other, "add", "mul")
def __truediv__(self, other: Union[int, float, "Object", "Fraction"]) -> "Fraction":
"""
A method to divide an Object and another value (Object/value)
:param other: int or float or Object or Fraction => The value to divide by
:return: Fraction => The division
"""
Object.typeCheck(other)
return Fraction(self, other)
def __rtruediv__(self, other: Union[int, float, "Object", "Fraction"]) -> "Fraction":
"""
A method to divide another value by an Object (value/Object)
:param other: int or float or Object or Fraction => The value that is being divided by
:return: Fraction => The division
"""
return (self / other).Reciprocal()
def __pow__(self, power: Union[int, float, "Fraction"], modulo: Optional[int] = None) -> Union["Object", float]:
"""
A method to raise an Object by a power (Object**power)
:param power: int or float or Fraction => The power to raise it to
:param modulo: int or None => The value to mod it by afterwards
:return: Object or float => An Object if the modulo is None, or a float if the modulo isn't None
"""
if modulo is None:
Object.typeCheck(power)
if isinstance(power, Fraction):
return Surd(self, power = power.denominator) ** power.numerator
elif isinstance(power, float):
return self ** Fraction(*Fraction.Find(power))
value = self
for x in range(power - 1):
value *= self
return value
return self.__pow__(power) % modulo
def __rpow__(self, other: Union[int, float, "Object", "Fraction"]) -> None:
"""
A method to raise something to the power of an Object (power**Object)
:param other: int or float or Object or Fraction => The number that isn't the power
:raise: TypeError => If called
"""
raise TypeError(f"Cannot raise to power of type '{type(self).__name__}'")
def __mod__(self, other: int) -> float:
"""
A method to modulo an Object by a value (Object%value)
:param other: int => The value
:return: float => the value of the Object modded by the value
"""
return self.GetValue() % other
def __rmod__(self, other: Union[int, float, "Object", "Fraction"]) -> None:
"""
A method to modulo by an Object (value%Object)
:param other: int or float or Object or Fraction => The number that isn't the modulo
:raise: TypeError => If called
"""
raise TypeError(f"Cannot modulo by type '{type(self).__name__}'")
def __floordiv__(self, other: Union[int, float, "Object", "Fraction"]) -> int:
"""
A method to use the floor division of an Object by another value (Object//value)
:param other: int or float or Object or Fraction => The value to divide by
:return: int => the division rounded down
"""
return int(self / other)
def __rfloordiv__(self, other: Union[int, float, "Object", "Fraction"]) -> int:
"""
A method to use the floor division of another value by an Object (value//Object)
:param other: int or float or Object or Fraction => The value that is being divided by
:return: int => The division rounded down
"""
return int(other / self)
def __eq__(self, other: Union[int, float, "Object", "Fraction"]) -> bool:
"""
A method to equate an Object and a value
:param other: int or float or Object or Fraction => The value to compare to
:return: bool => Whether they're equal
"""
if isinstance(other, (Fraction, Object)):
return self.GetValue() == other.GetValue()
return self.GetValue() == other
def __ne__(self, other: Union[int, float, "Object", "Fraction"]) -> bool:
"""
A method to inequate an Object and a value
:param other: int or float or Object or Fraction => The value to compare to
:return: bool => Whether they're unequal
"""
return not self == other
def __lt__(self, other: Union[int, float, "Object", "Fraction"]) -> bool:
"""
A method to compare an Object and a value by less than (Object < value)
:param other: int or float or Object or Fraction => The value to compare to
:return: bool => Whether the Object is less than the value
"""
if isinstance(other, (Fraction, Object)):
return self.GetValue() < other.GetValue()
return self.GetValue() < other
def __gt__(self, other: Union[int, float, "Object", "Fraction"]) -> bool:
"""
A method to compare an Object and a value by greater than (Object > value)
:param other: int or float or Object or Fraction => The value to compare to
:return: bool => Whether the Object is greater than the value
"""
if isinstance(other, (Fraction, Object)):
return self.GetValue() > other.GetValue()
return self.GetValue() > other
def __le__(self, other: Union[int, float, "Object", "Fraction"]) -> bool:
"""
A method to compare an Object and a value by less than or equal to (Object <= value)
:param other: int or float or Object or Fraction => The value to compare to
:return: bool => Whether the Object is less than the value or equal to it
"""
return not self > other
def __ge__(self, other: Union[int, float, "Object", "Fraction"]) -> bool:
"""
A method to compare an Object and a value by greater than or equal to (Object >= value)
:param other: int or float or Object or Fraction => The value to compare to
:return: bool => Whether the Object is greater than the value or equal to it
"""
return not self < other
def __abs__(self) -> "Object":
"""
A method to return the absolute value of an Object
:return: Object => The positive value of the Object
"""
if self < 0:
trial = -self
print(trial.GetValue(), trial)
return trial
else:
return self._Op("*", 1)
def __neg__(self) -> "Object":
"""
A method to return the negated value of an Object
:return: Object => The Object with it's sign flipped
"""
return -1 * self
@abc.abstractmethod
def GetValue(self) -> tuple[float, float]:
"""
An **abstract** method to get the value of the Object
:return: (float, float) => The multiple of the Object, and any value added on the end. Both of these are converted to decimals
"""
mul = self.mul.GetValue() if isinstance(self.mul, (Object, Fraction)) else float(self.mul)
add = self.add.GetValue() if isinstance(self.add, (Object, Fraction)) else float(self.add)
return mul, add
def Conjugate(self) -> "Object":
"""
A method to return the conjugate of an Object (an Object where the additive value is negated)
:return: Object => The subclass used with the add negated
"""
return self._Op("*", -1, "add")
def conjugate(self) -> None:
"""A method to transform the Object into its conjugate"""
self.add *= -1
def _Op(self, op: str, other: Union[int, float, "Object", "Fraction"], *keys: str) -> "Object":
"""
A method to find every value of the Object, then perform an operation on certain instance variables
(Can be used as a copy if op is '*', other is 1, and keys is unspecified
:param op: str => The operation to perform
:param other: int or float or Object or Fraction => The value to perform the operation by
:param keys: str (multi-value) => The instance variables to perform the operation on
:return: Object => A new Object where the values have been altered
"""
args = {}
for var in type(self)._usages[-1]:
args[var] = getattr(self, var)
for key in keys:
args[key] = eval(f"{args[key]} {op} {other}")
return type(self)(*args.values())
def _argLengthCheck(self, args: tuple) -> None:
"""
A method to check the length of any argument given
:param args: (Any) => The tuple to check
:raise ValueError: If length isn't between class variable '_min_arg_length' and '_max_arg_length'
"""
if not (type(self)._min_arg_length <= len(args) <= type(self)._max_arg_length):
error = "\n"
for usage in type(self)._usages:
error += f"\t{type(self.__name__)}({','.join(usage)})\n"
raise ValueError(f"Available usages: {error}")
@staticmethod
def Types() -> tuple[type, type, type, type]:
"""
A method to return the allowed types
:return (type, type, type, type): the types an Object deems 'numeric'
"""
return (int, float, Object, Fraction)
@staticmethod
def Usage() -> list[list[str]]:
"""
A method to return the class variable '_usages' (as it's protected)
:return [[str,]]: the class variable '_usages'
"""
return Object._usages
@staticmethod
def typeCheck(*checks: Any) -> None:
"""
A method to check the type of any argument given
:param checks: Any (multi-value) => The operation to perform
:raise TypeError: If types aren't in class variable 'types'
"""
for arg in checks:
if not isinstance(arg, Object.Types()):
types = [ty.__name__ for ty in Object.Types()]
raise TypeError(f"Expected types ({','.join(types)}), got type '{type(arg).__name__}'")
class Surd(Object):
"""
A class to represent a Surd (a root that doesn't cancel nicely, such as root 2)
Class variables defined below:
Instance variables defined below:
root (int or float or Fraction or Object): The value inside the Surd
See help(Object) for more information on class and instance variables
"""
_usages = [
["root"],
["mul", "root"],
["mul", "root", "add"],
["mul", "root", "add", "power"]
]
_max_arg_length = 4
_min_arg_length = 1
_default_values = {"mul":1, "root":None, "add":0, "power":2}
def __init__(self, *args: Union[int, float, Object, "Fraction"], **values: Union[int, float, Object, "Fraction"]):
"""
A method to construct necessary arguments
:param args: int or float or Object or Fraction (multi-value) => The values used for instance variables
:param values: int or float or Object or Fraction (multi-value, called by reference) => Any overwritten values (used so that a
single instance variable can be set without adding values for all others)
:raises ValueError: If root is negative or power is less than 2
"""
self.root: Union[int, float, Object, Fraction] = 0
super().__init__(*args, **values)
if self.root < 0:
raise ValueError("Cannot take the square root of a negative number")
if self.power < 1:
raise ValueError("Cannot have negative root power")
if self.power == 1:
raise ValueError("Indeterminate root power")
self.display = self.root != 1
self.simplify()
def __str__(self) -> str:
"""
Converts surd to a string
:return: str => a neatly formatted string containing all necessary information
"""
trial = super().__str__()
if not isinstance(trial, tuple):
return trial
mul, add, _ = trial
power = "(" + (f"{self.power:-}" if self.power != 2 else "")
root = str(self.root) + ")"
return "(" + mul + power + "√" + root + add + ")"
def __mul__(self, other: Union[int, float, Object, "Fraction"]) -> "Surd":
"""
A method to multiply a Surd and another value (Surd*value)
:param other: int or float or Object or Fraction => The value to multiply
:return: Surd => The Surd used, with the values altered
"""
if type(other) == Surd and self.add == 0 == other.add:
other: Surd
other_surd = Surd(self.mul * other.mul, other.root, 0, other.power)
return Surd(other_surd, self.root, self.add, self.power)
return super().__mul__(other)
def GetValue(self) -> float:
"""
A method to return the value of the surd
:return: float => The final value
"""
mul, add = super().GetValue()
val = math.pow(self.root.GetValue() if isinstance(self.root, (Object, Fraction)) else self.root, 1 / self.power)
return mul * val + add
def simplify(self) -> None:
"""
A method to simplify a Surd, so that:
Surd(m1, r, 0, p) * Surd(m2, r, 0, p) = Surd(r*m1, 1, 0, p)
Surd(m1, r1, 0, p) * Surd(m2, r2, 0, p) = Surd(m1, r*m1, 0, p)
Surd(m1, Surd(m2, r, 0, p1), a, p2) = Surd(m1 * Surd(1, m2, 0, p2), r, a, p2*p1)
Surd(m, r, a, p) where r**(1/p) is a whole number = Surd(m * r**(1/p), 1, a, p)
Surd(m1, r, Surd(m2, r, a, p), p) = Surd(m1 + m2, r, a, p)
Surd(m, r, a, p) where r can be split into r1*r2, where r1**(1/p) is a whole number or r2**(1/p) is a whole number = Surd(
m*r1, r2, a, p)
"""
if isinstance(self.mul, Surd) and self.mul.power == self.power and self.add == 0 == self.mul.add:
if self.mul.root == self.root:
self.mul = self.root * self.mul.mul
self.root = 1
else:
self.root *= self.mul.root
self.mul = self.mul.mul
if isinstance(self.root, Surd) and self.root.add == 0:
self.mul *= Surd(self.root.mul, power = self.power)
self.power *= self.root.power
self.root = self.root.root
if int(math.pow(self.root, 1 / self.power)) == math.pow(self.root, 1 / self.power):
self.mul *= math.pow(self.root, 1 / self.power)
self.root = 1
if isinstance(self.add, Surd) and self.add.power == self.power and self.root == self.add.root:
self.mul += self.add.mul
self.add = self.add.add
for x in range(2, self.root):
for y in range(self.root, 1, -1):
if x * y == self.root:
value = math.pow(x, 1 / self.power)
if int(value) == value:
self.mul *= int(value)
self.root = y
return
class Exp(Object):
"""
A class to represent an expression using Euler's constant ('e')
Class variables defined below:
Instance variables defined below:
See help(Object) for more information on class and instance variables
"""
_usages = [
[],
["power"],
["mul", "power"],
["mul", "power", "add"]
]
_max_arg_length = 3
_min_arg_length = 0
_default_values = {"mul":1, "add":0, "power":1}
def __init__(self, *args: Union[int, float, Object, "Fraction"], **values: Union[int, float, Object, "Fraction"]):
"""
A method to construct necessary arguments
:param args: int or float or Object or Fraction (multi-value) => The values used for instance variables
:param values: int or float or Object or Fraction (multi-value, called by reference) => Any overwritten values (used so that a
single instance variable can be set without adding values for all others)
"""
super().__init__(*args, **values)
self.display = self.power != 0
self.simplify()
def __str__(self) -> str:
"""
Converts expression to a string
:return: str => a neatly formatted string containing all necessary information
"""
trial = super().__str__()
if not isinstance(trial, tuple):
return trial
mul, power, add = trial
return "(" + mul + "e" + power + add + ")"
def __mul__(self, other: "Union[int,float,Object,Fraction]") -> "Exp":
"""
Multiplies an expression and another value (Exp*value)
:param other: int or float or Object or Fraction => the value to multiply
:return: Exp => returns the new expression
"""
if type(other) == type(self) and self.add == 0 == other.add:
other_exp = type(self)(self.mul * other.mul, other.power)
return type(self)(other_exp, self.power, self.add)
else:
return super().__mul__(other)
def GetValue(self) -> float:
"""
Returns the value of an Euler expression
:return: float => the decimal value of the expression
"""
mul, add = super().GetValue()
val = math.pow(math.e, self.power)
return mul * val + add
def simplify(self) -> None:
"""Converts the expression to its simplest form"""
if type(self.mul) == type(self):
self.power += self.mul.power
self.mul = self.mul.mul
if type(self.add) == type(self) and self.add.power == self.power:
self.mul += self.add.mul
self.add = self.add.add
class Pi(Exp):
"""
A class to represent an expression using pi ('π')
Class variables defined below:
Instance variables defined below:
See help(Exp) for more information on class and instance variables
"""
def __str__(self) -> str:
"""
Converts expression to a string
:return: str => a neatly formatted string containing all necessary information
"""
return super().__str__().replace("e", "π")
def GetValue(self) -> float:
"""
Returns the value of a pi expression
:return: float => the decimal value of the expression
"""
mul, add = Object.GetValue(self)
val = math.pow(math.pi, self.power)
return mul * val + add
class Trig(Object):
"""
An **abstract** base class to represent an expression using a trigonometric function (sin, cos, tan)
Class variables defined below:
Instance variables defined below:
_function (str) (protected): The name of the function used
theta (int or float or Pi or Fraction): The angle used
func (callable): The actual function used
See help(Object) for more information on class and instance variables
"""
_usages = [
["theta"],
["mul", "theta"],
["mul", "theta", "add"],
["mul", "theta", "add", "power"]
]
_max_arg_length = 4
_min_arg_length = 1
_default_values = {"mul":1, "theta":None, "add":0, "power":1}
@abc.abstractmethod
def __init__(self, function: str, *args: Union[int, float, Object, "Fraction"],
**values: Union[int, float, Object, "Fraction"]) -> None:
"""
Constructs necessary attributes for child classes to use
:param function: str => the function to use
:param args: int or float or Object or Fraction (multi-value) => The values used for instance variables
:param values: int or float or Object or Fraction (multi-value, called by reference) => Any overwritten values (used so that a
single instance variable can be set without adding values for all others)
:raises TypeError: If the instance variable theta isn't an int, float, Fraction or Pi
:raises ValueError: If function isn't a recognised trigonometric function
"""
self.theta: Union[int, float, Fraction, Pi] = 0
self.func: Callable[[float], float]
self._function = function.lower()
if function in ["sin", "cos", "tan"]:
self.func = getattr(math, function)
elif function in ["arcsin", "arccos", "arctan"]:
self.func = getattr(math, "a" + function[3:])
elif function in ["cosec", "sec", "cot"]:
if function == "cot":
reverse = "tan"
elif function == "sec":
reverse = "cos"
else:
reverse = "sin"
self.func = lambda x:1 / getattr(math, reverse)(x)
elif function in ["arccosec", "arcsec", "arccot"]:
if function == "arccot":
reverse = "atan"
elif function == "arcsec":
reverse = "acos"
else:
reverse = "asin"
self.func = lambda x:1 / getattr(math, reverse)(x)
else:
raise ValueError(f"Unknown function type '{function}'")
super().__init__(*args, **values)
if not isinstance(self.theta, (int, float, Fraction, Pi)):
raise TypeError("Theta cannot be non-pi mathematical expression")
self.display = self.power != 0
self.simplify()
def __str__(self) -> str:
"""
Converts expression to a string
:return: str => a neatly formatted string containing all necessary information
"""
trial = super().__str__()
if not isinstance(trial, tuple):
return trial
mul, power, add = trial
return "(" + mul + self._function + power + "(" + str(self.theta) + ")" + add + ")"
def __mul__(self, other: Union[int, float, Object, "Fraction"]) -> "Trig":
"""
Multiplies an expression and another value (Trig*value)
:param other: int or float or Object or Fraction => the value to multiply
:return: Object => returns the new expression
"""
if type(other) == type(self) and self.add == 0 == other.add:
other: type(other)
other_trig = type(self)(self.mul * other.mul, other.theta, 0, other.power)
return type(self)(other_trig, self.theta, self.add, self.power)
else:
return super().__mul__(other)
def GetValue(self) -> float:
"""
Returns the value of a trigonometric expression
:return: float => the decimal value of the expression
:raise ValueError: If the value doesn't exist
"""
raiseerr = False
mul, add = super().GetValue()
theta = self.theta.GetValue() if isinstance(self.theta, (Object, Fraction)) else float(self.theta)
if not isinstance(theta, Pi):
theta = math.radians(theta)
try:
value = math.pow(self.func(theta), self.power)
except ZeroDivisionError:
raiseerr = True
value = 0.0
val = str(value)
if "e" in val:
i = val.index("e")
if int(val[i + 2:]) > 10:
if val[i + 1] == "+":
raiseerr = True
else:
value = 0.0
if raiseerr:
raise ValueError(f"'{self._function}' of '{self.theta}' is undefined")
return mul * value + add
def simplify(self) -> None:
"""Simplifies a trigonometric expression"""
if type(self.mul) == type(self) and self.mul.theta == self.theta:
self.power += self.mul.power
self.mul = self.mul.mul
if type(self.add) == type(self) and self.add.theta == self.theta and self.add.power == self.power:
self.mul += self.add.mul
self.add = self.add.add
class Sin(Trig):
"""
A class to represent an expression using Sin
Class variables defined below:
Instance variables defined below:
See help(Trig) for more information on class and instance variables
"""
def __init__(self, *args: Union[int, float, Object, "Fraction"], **values: Union[int, float, Object, "Fraction"]) -> None:
"""
Constructs necessary attributes
:param args: int or float or Object or Fraction (multi-value) => The values used for instance variables
:param values: int or float or Object or Fraction (multi-value, called by reference) => Any overwritten values (used so that a
single instance variable can be set without adding values for all others)
"""
super().__init__("sin", *args, **values)
class Cos(Trig):
"""
A class to represent an expression using Cos
Class variables defined below:
Instance variables defined below:
See help(Trig) for more information on class and instance variables
"""
def __init__(self, *args: Union[int, float, Object, "Fraction"], **values: Union[int, float, Object, "Fraction"]) -> None:
"""
Constructs necessary attributes
:param args: int or float or Object or Fraction (multi-value) => The values used for instance variables
:param values: int or float or Object or Fraction (multi-value, called by reference) => Any overwritten values (used so that a
single instance variable can be set without adding values for all others)
"""
super().__init__("cos", *args, **values)
class Tan(Trig):
"""
A class to represent an expression using Tan
Class variables defined below:
Instance variables defined below:
See help(Trig) for more information on class and instance variables
"""
def __init__(self, *args: Union[int, float, Object, "Fraction"], **values: Union[int, float, Object, "Fraction"]) -> None:
"""
Constructs necessary attributes
:param args: int or float or Object or Fraction (multi-value) => The values used for instance variables
:param values: int or float or Object or Fraction (multi-value, called by reference) => Any overwritten values (used so that a
single instance variable can be set without adding values for all others)
"""
super().__init__("tan", *args, **values)
class Asin(Trig):
"""
A class to represent an expression using the inverse Sin
Class variables defined below:
Instance variables defined below:
See help(Trig) for more information on class and instance variables
"""
def __init__(self, *args: Union[int, float, Object, "Fraction"], **values: Union[int, float, Object, "Fraction"]) -> None:
"""
Constructs necessary attributes
:param args: int or float or Object or Fraction (multi-value) => The values used for instance variables
:param values: int or float or Object or Fraction (multi-value, called by reference) => Any overwritten values (used so that a
single instance variable can be set without adding values for all others)
"""
super().__init__("arcsin", *args, **values)
class Acos(Trig):
"""
A class to represent an expression using the inverse Cos
Class variables defined below:
Instance variables defined below:
See help(Trig) for more information on class and instance variables
"""
def __init__(self, *args: Union[int, float, Object, "Fraction"], **values: Union[int, float, Object, "Fraction"]) -> None:
"""
Constructs necessary attributes
:param args: int or float or Object or Fraction (multi-value) => The values used for instance variables
:param values: int or float or Object or Fraction (multi-value, called by reference) => Any overwritten values (used so that a
single instance variable can be set without adding values for all others)
"""
super().__init__("arccos", *args, **values)
class Atan(Trig):
"""
A class to represent an expression using the inverse Tan
Class variables defined below:
Instance variables defined below:
See help(Trig) for more information on class and instance variables
"""
def __init__(self, *args: Union[int, float, Object, "Fraction"], **values: Union[int, float, Object, "Fraction"]) -> None:
"""
Constructs necessary attributes
:param args: int or float or Object or Fraction (multi-value) => The values used for instance variables
:param values: int or float or Object or Fraction (multi-value, called by reference) => Any overwritten values (used so that a
single instance variable can be set without adding values for all others)
"""
super().__init__("arctan", *args, **values)
class Cosec(Trig):
"""
A class to represent an expression using the reciprocal of Sin (1/Sin)
Class variables defined below:
Instance variables defined below:
See help(Trig) for more information on class and instance variables
"""
def __init__(self, *args: Union[int, float, Object, "Fraction"], **values: Union[int, float, Object, "Fraction"]) -> None:
"""
Constructs necessary attributes
:param args: int or float or Object or Fraction (multi-value) => The values used for instance variables
:param values: int or float or Object or Fraction (multi-value, called by reference) => Any overwritten values (used so that a
single instance variable can be set without adding values for all others)
"""
super().__init__("cosec", *args, **values)
class Sec(Trig):
"""
A class to represent an expression using the reciprocal of Cos (1/Cos)
Class variables defined below:
Instance variables defined below:
See help(Trig) for more information on class and instance variables
"""
def __init__(self, *args: Union[int, float, Object, "Fraction"], **values: Union[int, float, Object, "Fraction"]) -> None:
"""
Constructs necessary attributes
:param args: int or float or Object or Fraction (multi-value) => The values used for instance variables
:param values: int or float or Object or Fraction (multi-value, called by reference) => Any overwritten values (used so that a
single instance variable can be set without adding values for all others)
"""
super().__init__("sec", *args, **values)
class Cot(Trig):
"""
A class to represent an expression using the reciprocal of Tan (1/Tan)
Class variables defined below:
Instance variables defined below:
See help(Trig) for more information on class and instance variables
"""
def __init__(self, *args: Union[int, float, Object, "Fraction"], **values: Union[int, float, Object, "Fraction"]) -> None:
"""
Constructs necessary attributes
:param args: int or float or Object or Fraction (multi-value) => The values used for instance variables
:param values: int or float or Object or Fraction (multi-value, called by reference) => Any overwritten values (used so that a
single instance variable can be set without adding values for all others)
"""
super().__init__("cot", *args, **values)
class Acosec(Trig):
"""
A class to represent an expression using the inverse of the reciprocal of Sin (1/Arcsin)
Class variables defined below:
Instance variables defined below:
See help(Trig) for more information on class and instance variables
"""
def __init__(self, *args: Union[int, float, Object, "Fraction"], **values: Union[int, float, Object, "Fraction"]) -> None:
"""
Constructs necessary attributes
:param args: int or float or Object or Fraction (multi-value) => The values used for instance variables
:param values: int or float or Object or Fraction (multi-value, called by reference) => Any overwritten values (used so that a
single instance variable can be set without adding values for all others)
"""
super().__init__("arccosec", *args, **values)
class Asec(Trig):
"""
A class to represent an expression using the inverse of the reciprocal of Cos (1/Arccos)
Class variables defined below:
Instance variables defined below:
See help(Trig) for more information on class and instance variables
"""
def __init__(self, *args: Union[int, float, Object, "Fraction"], **values: Union[int, float, Object, "Fraction"]) -> None:
"""
Constructs necessary attributes
:param args: int or float or Object or Fraction (multi-value) => The values used for instance variables
:param values: int or float or Object or Fraction (multi-value, called by reference) => Any overwritten values (used so that a
single instance variable can be set without adding values for all others)
"""
super().__init__("arcsec", *args, **values)
class Acot(Trig):
"""
A class to represent an expression using the inverse of the reciprocal of Tan (1/Arctan)
Class variables defined below:
Instance variables defined below:
See help(Trig) for more information on class and instance variables
"""
def __init__(self, *args: Union[int, float, Object, "Fraction"], **values: Union[int, float, Object, "Fraction"]) -> None:
"""
Constructs necessary attributes
:param args: int or float or Object or Fraction (multi-value) => The values used for instance variables
:param values: int or float or Object or Fraction (multi-value, called by reference) => Any overwritten values (used so that a
single instance variable can be set without adding values for all others)
"""
super().__init__("arcsec", *args, **values)
class Log(Object):
"""
A class to represent a log expression, to calculate the power required to find a value
So that Log in base 10 of 100 (Log10(100)) is 2. Because 10*10 (10**2) is 1000
Class variables defined below:
Instance variables defined below:
base (int or float): The base used (the number being raised to the power calculated)
value (int or float or Fraction): The value (the value of the base raised to the power calculated)
See help(Object) for more information on class and instance variables
"""
_usages = [
["value"],
["value", "base"],
["mul", "value", "base"],
["mul", "value", "base", "add"],
["mul", "value", "base", "add", "power"]
]
_max_arg_length = 5
_min_arg_length = 1
_default_values = {"mul":1, "value":None, "base":10, "add":0, "power":1}
def __init__(self, *args: Union[int, float, Object, "Fraction"], **values: Union[int, float, Object, "Fraction"]):
"""
A method to construct necessary arguments
:param args: int or float or Object or Fraction (multi-value) => The values used for instance variables
:param values: int or float or Object or Fraction (multi-value, called by reference) => Any overwritten values (used so that a
single instance variable can be set without adding values for all others)
"""
self.base: Union[int, float] = 0
self.value: Union[int, float, Fraction] = 0
super().__init__(*args, **values)
self._setup()
self.simplify()
def __str__(self) -> str:
"""
Converts Log to a string
:return: str => a neatly formatted string containing all necessary information
"""
trial = super().__str__()
if not isinstance(trial, tuple):
return trial
mul, power, add = trial
base = str(self.base) if self.base != 10 else ""
value = f"({self.value})"
return "(" + mul + "log" + base + value + power + add + ")"
def __mul__(self, other: Union[int, float, Object, "Fraction"]) -> "Log":
"""
Multiplies a Log and another value (Log*value)
:param other: int or float or Object or Fraction => the value to multiply
:return: Log => returns the new Log
"""
if type(other) == type(self) and self.add == 0 == other.add and self.base == other.base:
other: Log
other_log = type(self)(self.mul * other.mul, other.value, other.base, 0, other.power)
return type(self)(other_log, self.value, self.base, self.add, self.power)
else:
return super().__mul__(other)
def GetValue(self) -> float:
"""
Calculates the power required to get the value from the base, and performs other relevant operations
:return: float => The final value
"""
mul, add = super().GetValue()
value = math.log(self.value, self.base)
return mul * value ** self.power + add
def simplify(self) -> None:
"""Simplifies a Log"""
if isinstance(self.mul, Log) and self.mul.base == self.base and self.mul.value == self.value:
self.power += self.mul.power
self.mul = self.mul.mul
if isinstance(self.add, Log) and self.add.base == self.base and self.power == self.add.power:
if self.value == self.add.value:
self.mul += self.add.mul
self.add = self.add.add
else:
if self.add > 0:
self.value *= (self.add.value ** self.add.mul)
else:
self.value = Fraction(self.value, self.add.value ** self.add.mul)
self.add = self.add.add
def _setup(self) -> None:
"""
Checks instance variables have correct values
:raises ValueError: If value is <= 0 or base is <= 1
:raises TypeError: If value isn't an int, float, or Fraction or base isn't an integer
"""
self.display = self.value != 1
if self.value <= 0:
raise ValueError("Cannot find the log of anything negative or 0")
if self.base < 1:
raise ValueError("Cannot have negative base")
if self.base == 1:
raise ValueError("Indeterminate base")
if not isinstance(self.base, int):
raise TypeError("Cannot have non-integer base")
if not isinstance(self.value, (int, float, Fraction)):
raise TypeError("Cannot find the power of a mathematical expression")
class Ln(Log):
"""
A class to represent a 'natural log' expression, which is a log expression with base 'e'
Class variables defined below:
Instance variables defined below:
See help(Log) for more information on class and instance variables
"""
_usages = [
["value"],
["mul", "value"],
["mul", "value", "add"],
["mul", "value", "add", "power"]
]
_max_arg_length = 4
_min_arg_length = 1
_default_values = {"mul":1, "value":None, "add":0, "power":1}
def __init__(self, *args: Union[int, float, Object, "Fraction"], **values: Union[int, float, Object, "Fraction"]):
"""
A method to construct necessary arguments
:param args: int or float or Object or Fraction (multi-value) => The values used for instance variables
:param values: int or float or Object or Fraction (multi-value, called by reference) => Any overwritten values (used so that a
single instance variable can be set without adding values for all others)
"""
Object.__init__(self, *args, **values)
self.base = 2
self._setup()
self.base = math.e
def __str__(self) -> str:
"""
Converts Ln to a string
:return: str => a neatly formatted string containing all necessary information
"""
trial = Object.__str__(self)
if not isinstance(trial, tuple):
return trial
mul, power, add = trial
value = f"({self.value})"
return "(" + mul + "ln" + value + power + add + ")"
class Fraction:
"""
A class to represent a fraction for division, so that 1 divided by 10 is equivalent to 1/10
Class variables defined below:
__upper_limit (int) (private) (default 1000): The highest value to work up to when trying to find a value based off of a float
Instance variables defined below:
numerator (int or Object): The top of the Fraction
denominator (int or Object): The bottom of the Fraction
See help(Object) for more information on class and instance variables
"""
__upper_limit = 1000
def __init__(self, *args: Union[int, float, Object, "Fraction"]):
"""
A method to construct necessary arguments
:param args: int or float or Object or Fraction (multi-value) => The values used for instance variables
:raises ValueError: If length of arguments isn't 1 or 2
:raises TypeCheck: If any argument's type isn't in Object's class attribute 'types'
"""
self.numerator: Union[int, Object]
self.denominator: Union[int, Object]
Object.typeCheck(*args)
if len(args) == 1:
self.numerator = 1
denom = args[0]
if isinstance(denom, Fraction):
self.numerator *= denom.denominator
self.denominator = denom.numerator
elif isinstance(denom, float):
self.numerator, self.denominator = Fraction.Find(denom)
elif len(args) == 2:
self.numerator, self.denominator = args
if isinstance(self.numerator, int) and isinstance(self.denominator, int):
self.simplify()
return
self.simplify()
return
raise ValueError("Usage:\n\tFraction(denominator)\n\tFraction(numerator,denominator)")
def __str__(self) -> str:
"""
A method to convert a Fraction into a string
:return str: The converted string made from the Fraction
"""
if self.denominator == 1:
return str(self.numerator)
return f"({self.numerator} / {self.denominator})"
def __repr__(self) -> str:
"""
Method to return a string representation of the Object
:return: str => The evaluateable representation
"""
return f"Fraction({self.numerator}, {self.denominator})"
def __int__(self) -> int:
"""
Method to convert a Fraction to an integer
:return: int => The value rounded down
"""
return int(self.GetValue())
def __float__(self) -> float:
"""
Method to convert a Fraction to a decimal
:return: float => The value of the Fraction
"""
return self.GetValue()
def __add__(self, other: Union[int, float, Object, "Fraction"]) -> "Fraction":
"""
A method to add a Fraction and another value (Fraction+value)
:param other: int or float or Object or Fraction => The value to add
:return: Fraction => A new Fraction made from the forumla (a/b)+(c/d)=(ad+bc)/(bd)
"""
Object.typeCheck(other)
otherfrac: Fraction = Fraction(other, 1) if not isinstance(other, Fraction) else other
Lnumer = self.numerator * otherfrac.denominator
Rnumer = otherfrac.numerator * self.denominator
denom = self.denominator * otherfrac.denominator
return Fraction(Lnumer * Rnumer, denom)
def __radd__(self, other: Union[int, float, Object, "Fraction"]) -> "Fraction":
"""
A method to add a Fraction and another value (value+Fraction)
:param other: int or float or Object or Fraction => The value to add
:return: Fraction => A new Fraction made from the forumla (a/b)+(c/d)=(ad+bc)/(bd)
"""
return self + other
def __sub__(self, other: Union[int, float, Object, "Fraction"]) -> "Fraction":
"""
A method to subtract a Fraction and another value (Fraction-value)
:param other: int or float or Object or Fraction => The value to subtract
:return: Fraction => A new Fraction made from the forumla (a/b)-(c/d)=(ad-bc)/(bd)
"""
Object.typeCheck(other)
return self + (-other)
def __rsub__(self, other: Union[int, float, Object, "Fraction"]) -> "Fraction":
"""
A method to subtract a Fraction from another value (value-Fraction)
:param other: int or float or Object or Fraction => The value to subtract from
:return: Fraction => A new Fraction made from the forumla (a/b)-(c/d)=(ad-bc)/(bd)
"""
return -(self - other)
def __mul__(self, other: Union[int, float, Object, "Fraction"]) -> "Fraction":
"""
A method to multiply a Fraction and another value (Fraction*value)
:param other: int or float or Object or Fraction => The value to multiply
:return: Fraction => A new Fraction made from the forumla (a/b)*(c/d)=(ac)/(bd)
"""
Object.typeCheck(other)
otherfrac: Fraction = Fraction(other, 1) if not isinstance(other, Fraction) else other
return Fraction(self.numerator * otherfrac.numerator, self.numerator * otherfrac.denominator)
def __rmul__(self, other: Union[int, float, Object, "Fraction"]) -> "Fraction":
"""
A method to multiply a Fraction and another value (value*Fraction)
:param other: int or float or Object or Fraction => The value to multiply
:return: Fraction => A new Fraction made from the forumla (a/b)*(c/d)=(ac)/(bd)
"""
return self * other
def __truediv__(self, other: Union[int, float, Object, "Fraction"]) -> "Fraction":
"""
A method to divide a Fraction and another value (Fraction/value)
:param other: int or float or Object or Fraction => The value to divide by
:return: Fraction => A new Fraction made from the forumla (a/b)/(c/d)=(a/b)*(d/c)=(ad)/(bc)
"""
Object.typeCheck(other)
otherfrac: Fraction = Fraction(other, 1) if not isinstance(other, Fraction) else other
return self * otherfrac.Reciprocal()
def __rtruediv__(self, other: Union[int, float, Object, "Fraction"]) -> "Fraction":
"""
A method to divide a Fraction from another value (value/Fraction)
:param other: int or float or Object or Fraction => The value to divide from
:return: Fraction => A new Fraction made from the forumla (a/b)/(c/d)=(a/b)*(d/c)=(ad)/(bc)
"""
return (self / other).Reciprocal()
def __pow__(self, power: Union[int, float, "Fraction"], modulo: Optional[int] = None) -> Union["Fraction", Surd, float]:
"""
A method to raise a Fraction to a power (Fraction**value)
:param power: int or float or Fraction => The value to raise to
:return: Fraction or Surd or float => A Fraction where each value is raised to the power, for Surds its a Fractional power (
type(power)=Fraction), floats are when modulo isn't None
"""
if modulo is None:
Object.typeCheck("power", power)
if isinstance(power, Fraction):
return Surd(self, power = power.denominator) ** power.numerator
elif isinstance(power, float):
return self ** Fraction(*Fraction.Find(power))
value: Fraction = self
for x in range(power - 1):
value *= self
return value
return self.__pow__(power) % modulo
def __rpow__(self, other: Union[int, float, "Object", "Fraction"]) -> Surd:
"""
A method to raise a value to a Fractional power (value**Fraction)
:param other: int or float or Object or Fraction => The value being raised
:return: Surd => A surd where the root is the value, the power is the denominator of the Fraction, and this Surd is raised to
the power of this Fraction's numerator
"""
return Surd(other, power = self.denominator) ** other.numerator
def __mod__(self, other: int) -> float:
"""
A method to return the remainder when a Fraction is divided by a value
:param other: int => The value to divide by
:return: float => The remainder
"""
return self.GetValue() % other
def __rmod__(self, other: Union[int, float, "Object", "Fraction"]) -> None:
"""
A method to modulo a value by a Fraction
:param other: int or float or Object or Fraction => The value to modulo
:raise TypeError: If called
"""
raise TypeError(f"Cannot modulo by type '{type(self).__name__}'")
def __floordiv__(self, other: Union[int, float, "Object", "Fraction"]) -> int:
"""
A method to use integer division on a Fraction and another value (Fraction//value)
:param other: int or float or Object or Fraction => The value to divide by
:return: int => An integer made from the forumla (a/b)/(c/d)=(a/b)*(d/c)=(ad)/(bc)
"""
return int(self / other)
def __rfloordiv__(self, other: Union[int, float, "Object", "Fraction"]) -> int:
"""
A method to use integer division on a Fraction and another value (Fraction//value)
:param other: int or float or Object or Fraction => The value to divide from
:return: int => An integer made from the forumla (a/b)/(c/d)=(a/b)*(d/c)=(ad)/(bc)
"""
return int(other / self)
def __eq__(self, other: Union[int, float, "Object", "Fraction"]) -> bool:
"""
A method to equate a Fraction and a value
:param other: int or float or Object or Fraction => The value to compare to
:return: bool => Whether they're equal
"""
if isinstance(other, (Fraction, Object)):
return self.GetValue() == other.GetValue()
return self.GetValue() == other
def __ne__(self, other: Union[int, float, "Object", "Fraction"]) -> bool:
"""
A method to inequate a Fraction and a value
:param other: int or float or Object or Fraction => The value to compare to
:return: bool => Whether they're unequal
"""
return not self == other
def __lt__(self, other: Union[int, float, "Object", "Fraction"]) -> bool:
"""
A method to compare a Fraction and a value by less than (Fraction < value)
:param other: int or float or Object or Fraction => The value to compare to
:return: bool => Whether the Object is less than the value
"""
if isinstance(other, (Fraction, Object)):
return self.GetValue() < other.GetValue()
return self.GetValue() < other
def __gt__(self, other: Union[int, float, "Object", "Fraction"]) -> bool:
"""
A method to compare a Fraction and a value by greater than (Fraction > value)
:param other: int or float or Object or Fraction => The value to compare to
:return: bool => Whether the Object is greater than the value
"""
if isinstance(other, (Fraction, Object)):
return self.GetValue() > other.GetValue()
return self.GetValue() > other
def __le__(self, other: Union[int, float, "Object", "Fraction"]) -> bool:
"""
A method to compare a Fraction and a value by less than or equal to (Fraction <= value)
:param other: int or float or Object or Fraction => The value to compare to
:return: bool => Whether the Object is less than the value or equal to it
"""
return not self > other
def __ge__(self, other: Union[int, float, "Object", "Fraction"]) -> bool:
"""
A method to compare a Fraction and a value by greater than or equal to (Fraction >= value)
:param other: int or float or Object or Fraction => The value to compare to
:return: bool => Whether the Object is greater than the value or equal to it
"""
return not self < other
def __abs__(self) -> "Fraction":
"""
A method to return the absolute value of a Fraction
:return: Fraction => The positive value of the Fraction
"""
if self < 0:
return -self
return self
def __neg__(self) -> "Fraction":
"""
A method to negate a Fraction
:return: Fraction => The positive Fraction if it's negative, and negative if it's positive
"""
return -1 * self
def GetValue(self) -> float:
"""
A method to find the value of a Fraction
:return: float => numerator divided by the denominator
"""
numer = self.numerator.GetValue() if isinstance(self.numerator, Object) else self.numerator
denom = self.denominator.GetValue() if isinstance(self.denominator, Object) else self.denominator
return numer / denom
def Reciprocal(self) -> "Fraction":
"""
A method to find and return the reciprocal of a Fraction (denominator/numerator)
:return: Fraction => a new Fraction wheere the numerator is now the denominator and the denominator is now the numerator
"""
return Fraction(self.denominator, self.numerator)
def reciprocal(self) -> None:
"""A method to turn a Fraction into its reciprocal"""
self.numerator, self.denominator = self.denominator, self.numerator
def simplify(self) -> None:
"""
A method to simplify a Fraction
:raise ZeroDivisionError: If the denominator is 0
"""
if self.denominator == 0:
raise ZeroDivisionError("Cannot divide by 0")
for x in range(2, Fraction.__upper_limit):
while self.numerator % x == 0 and self.denominator % x == 0:
self.numerator %= x
self.denominator %= x
self._setup()
def _setup(self) -> None:
"""
A method to setup a Fraction so that it creates a Fraction (from Objects or floats) and calculates numerator and denominator
:raise TypeError: If the division doesn't create a Fraction
"""
if isinstance(self.numerator, float):
self.numerator = Fraction(*Fraction.Find(self.numerator))
if isinstance(self.denominator, float):
self.denominator = Fraction(*Fraction.Find(self.denominator))
divide = self.numerator / self.denominator
if not isinstance(divide, Fraction):
raise TypeError(
f"Expected type 'Fraction', got type '{type(self).__name__}' with {self.numerator} and {self.denominator}")
self.numerator = divide.numerator
self.denominator = divide.denominator
@staticmethod
def Find(value: float) -> tuple[Union[Object, int], Union[Object, int]]:
"""
A method to find a values of a Fraction based off a float
:return (Object or int, Object or int): The numerator and denominator
:raises OverflowError: If the value can't be found based off of the class variable '__upper_limit'
:raises Exception: If it can't find a value, but something unexpected happened
"""
for numer in range(Fraction.__upper_limit):
for denom in range(1, Fraction.__upper_limit):
if numer / denom == value:
return numer, denom
members = inspect.getmembers(sys.modules[__name__],
lambda cls:(inspect.isclass(cls) and
Object in inspect.getmro(cls) and
cls not in [Object, Trig]))
for member in members:
val: Object = member[1]
fields = val.Usage()[-1]
code = "for denom in range(1,Fraction.__upper_limit):\n"
for x in range(1, len(fields) + 1):
code += ("\t" * x) + f"for {fields[x - 1]} in range(1,Fraction.__upper_limit):\n"
x = len(fields)
code += ("\t" * x) + "kwargs={field:eval(\"field\" for field in " + str(fields) + "}\n"
code += ("\t" * x) + "numervalues=list(kwargs.values())\n"
code += ("\t" * x) + "denomvalues=list(reversed(numervalues))\n"
code += ("\t" * x) + f"numer={member.__name__}(*numervalues)\n"
code += ("\t" * x) + f"if numer/denom=={value}:\n"
code += ("\t" * x) + "\traise ExecutionError(numer, denom)"
code += ("\t" * x) + f"denom={member.__name__}(*denomvalues)\n"
code += ("\t" * x) + f"if numer/denom == {value}:\n"
code += ("\t" * x) + "\traise ExecutionError(numer, denom)"
code += f"raise OverflowError(\"Cannot find Fraction for value {value}\")"
try:
exec(code)
except ExecutionError as value:
numer: Union[int, Object] = value.args[0]
denom: Union[int, Object] = value.args[1]
return numer, denom
raise Exception("Something unexpected happened")
@staticmethod
def SetLimit(value: int) -> None:
"""
Sets the upper limit of the Fraction
:param value: int => the value to set it too
:raise ValueError: If value is less than 1000
"""
if value < 1000:
raise ValueError("Upper limit cannot be below original value of 1000")
Fraction.__upper_limit = value
class Iter(abc.ABC):
"""
An **abstract** class designed to represent an iterable in BMPY
Class variables defined below:
__types ((type, type, type, type, type, type, type)) (private): The 'iterable types' allowed to combine with
__setup (bool) (private) (default is False): Whether the class has been setup
Instance variables defined below:
_dataset ({int: object}) (protected): The data within the iterable
_index (int) (default 0) (protected): The current index (used when iterating over an Iter)
"""
__setup: bool = False
__types: tuple[type, type, type, type, type, type, type]
@abc.abstractmethod
def __init__(self, *data: Any, split: bool = True):
"""
Sets up the instance variables
:param data: object (multi-value) => The data to use as the iterable's initial data
:param split: bool (default is True) => Whether to split an iterable's content into multiple entries
"""
self._dataset: dict[int, Any] = {}
self._index: int = 0
if not Iter.__setup:
Iter.__types = (list, tuple, set, type({}.keys()), type({}.values()), type({}.items()), Iter)
Iter.__setup = True
if len(data) == 0:
return
if isinstance(data[0], Iter.__types) and len(data) == 1 and split:
used_data = [elem for elem in data[0]]
elif isinstance(data[0], dict) and len(data) == 1 and split:
used_data = [elem for elem in data[0].items()]
elif len(data) != 0:
used_data = list(data)
else:
used_data = []
for i in range(len(used_data)):
self._dataset[i] = used_data[i]
def __len__(self) -> int:
"""
Measures the length of the Iter
:return int: How many items are in the Iter
"""
if 0 not in self._dataset.keys():
return 0
return list(self._dataset.keys())[-1] + 1
def __iter__(self) -> "Iter":
"""
Allows converting an Iter into an Iterable
:return Iter: A copy of the dataset
"""
return self.Copy()
def __next__(self) -> Any:
"""
Advances the Iterable version of the Iter
:return object: The item at the instance variable 'index'
"""
if self._index < len(self):
self._index += 1
return self[self._index - 1]
self._index = 0
raise StopIteration()
def __getitem__(self, index: Union[int, slice]) -> Union["Iter", Any]:
"""
Allows indexing of an Iter
:param index: int or slice => The index to use
:return object or Iter: The item at the specified index (an Iter if index is a slice)
"""
value_range = self.__FindValue(index)
items = []
for i in value_range:
if i < 0:
i += len(self)
try:
items.append(self._dataset[i])
except KeyError:
raise IndexError(f"Index '{i}' out of range")
if len(items) == 0:
if type(self) in [TypedList, FixedTypedList]:
self: TypedList
return type(self)(self._type)
return type(self)()
if isinstance(index, int):
return items[0]
else:
if type(self) in [TypedList, FixedTypedList]:
self: TypedList
return type(self)(self._type, items)
return type(self)(items)
def __setitem__(self, index: Union[int, slice], value: Any) -> None:
"""
Allows setting of values at certain indices
:param index: int or slice => The index to use
:param value: object => The value to use
:raise TypeError: If the index is a slice but value isn't an 'iterable type'
"""
value_range = self.__FindValue(index)
if isinstance(index, slice) and type(value) not in Iter.__types:
raise TypeError("Cannot assign a singular value to a selection of indices")
for i in value_range:
i_original = i
if i < 0:
i += len(self)
try:
self._dataset[i] = (value[i_original] if isinstance(index, slice) else value)
except KeyError:
raise IndexError(f"Index '{i}' out of range")
def __delitem__(self, index: Union[int, slice]) -> None:
"""
Allows deleting of values at certain indices
:param index: int or slice => The index to use
"""
value_range = self.__FindValue(index)
for i in value_range:
if i < 0:
i += len(self)
try:
del self._dataset[i]
except KeyError:
raise IndexError(f"Index '{i}' out of range")
cascade_index = 0
if len(self) == 0:
return
last = list(self._dataset.keys())[-1]
while cascade_index < last:
if cascade_index not in self._dataset.keys():
z = 1
while True:
try:
self._dataset[cascade_index] = self._dataset[cascade_index + z]
del self._dataset[cascade_index + z]
break
except KeyError:
z += 1
cascade_index += 1
def __str__(self) -> str:
"""
Converts the Iter into a string
:return str: The string version of the Iter
"""
string = [str(v) for v in self._dataset.values()]
return "[" + ",".join(string) + "]"
def __repr__(self) -> str:
"""
Converts the Iter into an evaluateable string
:return str: The string representation version of the Iter
"""
string = [repr(x) for x in self._dataset.values()]
return f"{type(self).__name__}({','.join(string)})"
def __add__(self, other: Union[list, tuple, set, "Iter"]) -> "Iter":
"""
Adds two 'iterable types' together (Iter+type)
:param other: (see Iter.__types for allowed types) => The iterable to add
:return Iter: The combined iterables
"""
self.__TypeCheck(other)
return type(self)(*self._dataset.values(), *other)
def __radd__(self, other: Union[list, tuple, set, "Iter"]) -> Union[list, tuple, set, "Iter"]:
"""
Adds two 'iterable types' together (type+Iter)
:param other: (see Iter.__types for allowed types) => The iterable to add
:return (see Iter.__types for allowed types): The combined iterables
"""
return type(other)(self + other)
def __sub__(self, other: Union[list, tuple, set, "Iter"]) -> "Iter":
"""
Subtracts two 'iterable types' together (Iter-type)
:param other: (see Iter.__types for allowed types) => The iterable to subtract
:return Iter: The iterable with the relevant elements removed
"""
copy = self.__TypeCheck(other)
other: Union[list, tuple, set, "Iter"]
for elem in other:
del copy[copy.Index(elem)]
return copy
def __rsub__(self, other: Union[list, tuple, set, "Iter"]) -> Union[list, tuple, set, "Iter"]:
"""
Subtracts two 'iterable types' together (Iter-type)
:param other: (see Iter.__types for allowed types) => The iterable to subtract
:return (see Iter.__types for allowed types): The new iterable with the relevant elements removed
"""
return type(other)(self - other)
def __mul__(self, other: int) -> "Iter":
"""
Multiplies an iterable by a constant (Iter*c)
:param other: int => The amount of copies of the list to have
:return Iter: The iterable duplicated 'other' times
:raise NumError: If other is 0
"""
if other < 0:
raise NumError(f"Cannot multiply type '{type(self).__name__}' by a number less than 0")
if other == 0:
return type(self)()
copy = self.Copy()
for x in range(other - 1):
copy += self.Copy()
return copy
def __rmul__(self, other: int) -> "Iter":
"""
Multiplies an iterable by a constant (c*Iter)
:param other: int => The amount of copies of the list to have
:return Iter: The iterable duplicated 'other' times
:raise NumError: If other is 0
"""
return self * other
def __eq__(self, other: Union[list, tuple, set, "Iter"]) -> bool:
"""
Equates two 'iterable types'
:param other: (see Iter.__types for allowed types) => The iterable to compare
:return bool: Whether each element is the same or not
"""
if len(self) != len(other):
return False
for x in range(len(self)):
if self[x] != other[x]:
return False
return True
def __ne__(self, other: Union[list, tuple, set, "Iter"]) -> bool:
"""
Equates two 'iterable types'
:param other: (see Iter.__types for allowed types) => The iterable to compare
:return bool: If the iterables aren't the same
"""
return not self == other
def __bool__(self) -> bool:
"""
Checks if the Iterable is empty
:return bool: If the iterable has 0 length
"""
return len(self) != 0
def Count(self, item: Any, *, search_for_classes: bool = False, search_through_iterables: bool = True) -> int:
"""
Counts how many times an item appears in the iterable
:param item: object => The item to count
:param search_for_classes: bool (default False) => Whether you're searching for the class as an item, or for instances of the
class
:param search_through_iterables: bool (default True) => Whether you're searching for the iterable as an item, or for all
elements of the iterable
:return int: The number of occurences
"""
occurences = 0
for elem in self:
if type(item) == type and not search_for_classes:
if isinstance(elem, item):
occurences += 1
elif isinstance(item, Iter.__types) and search_through_iterables:
if elem in item:
occurences += 1
else:
if elem == item:
occurences += 1
return occurences
def CountPos(self, item: Any, *, search_for_classes: bool = False, search_through_iterables: bool = True) -> list[int]:
"""
Counts where an item appears in the iterable
:param item: object => The item to count
:param search_for_classes: bool (default False) => Whether you're searching for the class as an item, or for instances of the
class
:param search_through_iterables: bool (default True) => Whether you're searching for the iterable as an item, or for all
elements of the iterable
:return [int,]: The positions of the occurences
"""
positions: list[int] = []
for x in range(len(self)):
elem = self[x]
if type(item) == type and not search_for_classes:
if isinstance(elem, item):
positions.append(x)
elif isinstance(item, Iter.__types) and search_through_iterables:
if elem in item:
positions.append(x)
else:
if elem == item:
positions.append(x)
return positions
def CountAll(self) -> dict[Any, int]:
"""
Counts how many times every item appears in the iterable
:return {object, int}: The number of occurences of every item
"""
occurences: dict[Any, int] = {}
for elem in self:
occurences[elem] = self.Count(elem, search_for_classes = True, search_through_iterables = False)
return occurences
def CountAllPos(self) -> dict[Any, list[int]]:
"""
Counts how many times every item appears in the iterable
:return {object, [int,]}: The position of the occurences of every item
"""
occurences: dict[Any, list[int]] = {}
for elem in self:
occurences[elem] = self.CountPos(elem, search_for_classes = True, search_through_iterables = False)
return occurences
def Copy(self) -> "Iter":
"""
Deep-copies an iterable
:return Iter: The iterable's copy
"""
li = []
if type(self) in [TypedList, FixedTypedList]:
self: TypedList
li.append(self._type)
for elem in self._dataset.values():
try:
li.append(elem.Copy())
except AttributeError:
try:
li.append(elem.copy())
except AttributeError:
li.append(elem)
return type(self)(*li)
def Reverse(self) -> "Iter":
"""
Returns the reversed iterable
:return Iter: The iterable in reverse
"""
temp = self.Copy()
temp.reverse()
return temp
def Index(self, elem: Any, *, occurrence: int = 1) -> int:
"""
Returns the index where the specified element was found
:param elem: Any => the value to search for
:param occurrence: int (keyword only) (default is 1) => the version to find (for things that appear multiple times)
:return: int => The position
:raise TypeError: If occurrence isn't an integer
:raise NumError: If occurrence less than 1 or higher than the number of occurrences
:raise ValueError: If item doesn't appear
"""
if not isinstance(occurrence, int):
raise TypeError("Occurrence count must be an integer")
if occurrence <= 0:
raise NumError("Cannot have occurrence less than 1")
occurrences = self.CountPos(elem, search_for_classes = True, search_through_iterables = False)
if occurrence > len(occurrences):
if occurrence == 1:
raise ValueError(f"Item '{repr(elem)}' does not appear")
raise NumError(f"Item '{repr(elem)}' has less than {str(occurrence)} occurrences")
return occurrences[occurrence - 1]
def reverse(self) -> None:
"""Reverses the iterable"""
self.swap(0, -1)
for x in range(1, len(self) // 2):
self.swap(x, -(x + 1))
def swap(self, pos1: int, pos2: int) -> None:
"""
Swaps the elements at the specified indices
:param pos1: int => The first index to swap
:param pos2: int => The second index to swap
:raise TypeError: If pos1 or pos2 aren't integers
"""
if not isinstance(pos1, int) or not isinstance(pos2, int):
raise TypeError("positions must be integers")
self[pos1], self[pos2] = self[pos2], self[pos1]
def __TypeCheck(self, other: Union[list, tuple, set, "Iter"]) -> "Iter":
"""
A private method to return a copy of the iterable if the type of the parameter is valid
:param other: (see Iter.__types for allowed types) => The parameter to type check
:return Iter: the copy
:raise TypeError: If type check fails
"""
if not isinstance(other, Iter.__types):
types = [ty.__name__ for ty in Iter.__types]
raise TypeError(f"Expected types ({','.join(types)}), got type '{type(other).__name__}' instead")
return self.Copy()
def __FindValue(self, index: Union[int, slice]) -> Union[range, tuple[int]]:
"""
Finds and calculates the value range for a specified index
:param index: int or slice => The index to find a range for
:return range or (int): The range
"""
if isinstance(index, slice):
start = 0 if index.start is None else index.start
stop = len(self) if index.stop is None else index.stop
step = 1 if index.step is None else index.step
return range(start, stop, step)
return index,
@staticmethod
def Types() -> tuple[type, type, type, type, type, type, type]:
"""
Gets the private static attribute '__types'
:return (type, type, type, type, type, type, type): The allowed iterable types"""
return Iter.__types
class List(Iter):
"""
A class designed to represent a list in BMPY
Class variables defined below:
Instance variables defined below:
_old (List or None) (protected): The old copy of the list (only used when folding)
See help(Iter) for more information on class and instance variables
"""
def __init__(self, *data: Any, split: bool = True):
"""
Sets up the dataset
:param data: object (multi-value) => The data to use as the iterable's initial data
:param split: bool (default is True) => Whether to split an iterable's content into multiple entries
"""
super().__init__(*data, split = split)
for x in range(len(self)):
self._change(index = x, elem = self._dataset[x], mode = "init")
self._old: Optional[List] = None
def __setitem__(self, index: Union[int, slice], value: Any) -> None:
"""
Allows setting of values at certain indices
:param index: int or slice => The index to use
:param value: object => The value to use
:raise TypeError: If the index is a slice but value isn't an 'iterable type'
"""
get = self[index]
super().__setitem__(index, value)
self._change(index = index, elem = value, mode = "set", old = get)
def __delitem__(self, index: Union[int, slice]) -> None:
"""
Allows deleting of values at certain indices
:param index: int or slice => The index to use
"""
get = self[index]
super().__delitem__(index)
self._change(index = index, elem = get, mode = "del")
def Head(self) -> Any:
"""
Returns the first element of the List
:return: Any => The first of the element of the List
"""
return self[0]
def Tail(self) -> "List":
"""
Returns the section of the List not included in 'Head'
:return: Any => The remainder of the list
"""
return self[1:]
def Last(self) -> Any:
"""
Returns the last element of the last
:return: Any => The last element of the list
"""
return self[-1]
def Init(self) -> "List":
"""
Returns the section of the List not included in 'Last'
:return: Any => The remainder of the list
"""
return self[:-1]
def Display(self, *, sep = ", ") -> str:
"""
Returns a formatted string, neatly displaying each non-null non-special-character element with a separator
:param sep: str (keyword only) (default is ", ") => The separator to use
:return: str => The formatted string
:raise TypeError: If separator isn't a string
"""
if not isinstance(sep, str):
raise TypeError("separator must be string")
arr = ""
x = 0
for elem in self:
if elem in ["\'", "\\", "\n", "\r", "\t", "\b", "\f", "", " "] or x == len(self) - 1:
arr += str(elem)
else:
arr += str(elem) + sep
x += 1
return arr
def FoldUsing(self, func: Callable[[Any, Any], Any]) -> "List":
"""
Combines the List into a List with a singular element
:param func: function => the function to apply to each element
:return: List => The list containing the singular element
:raise TypeError: If func isn't a function
:raise ArgumentError: If using func raises an error
"""
if not (inspect.isbuiltin(func) or inspect.isfunction(func) or inspect.ismethod(func)):
raise TypeError("Function parameter must be a function or method")
if self._old is None:
self._old = type(self)(self)
if len(self) == 1:
ret = self.Copy()
self.__init__(self._old)
self._old = None
return ret
try:
self[1] = func(self[0], self[1])
except Exception as err:
raise ArgumentError(f"Cannot use function on '{type(self).__name__}' due to error: '{err!s}'") from None
self.tail()
return self.FoldUsing(func)
def IsIdentical(self) -> bool:
"""
Evaluates whether the list is full of identical elements
:return: bool => Whether the List is full of the same elements
"""
for x in range(1, len(self)):
if self[x - 1] != self[x]:
return False
return True
def Pop(self, index: int = -1) -> Any:
"""
Removes and returns the element at the specified index
:param index: int => the index of the element to return
:return: Any => The element to return
:raise ValueError: If the element specified is not in List
"""
elem = self[index]
self.remove(elem)
return elem
def Zip(self, *others: Iter, fillvalue: "Any" = None) -> "List":
"""
Returns a zipped list.
See help(List.zip) for what a zipped list is
:param others: iter_ (multi value) => the new iterable to zip through
:param fillvalue: Any (keyword only) (default is None) => A value to fill of the length of the iterable is shorter than the
length of the List
:return: List => The zipped list
:raise LengthError: If length of any iterable is longer than the length of the List
"""
temp = self.Copy()
temp.zip(*others, fillvalue = fillvalue)
return temp
def Unzip(self) -> "List":
"""
Returns an unzipped list.
See help(List.unzip) for what an unzipped list is
:return: List => The unzipped list
"""
temp = self.Copy()
temp.unzip()
return temp
def Exclude(self, elem: Any, *, amount: Union[int, str] = 1) -> "List":
"""
Returns a list with the specified element removed
:return: List => The list
:raise ValueError: If the element specified is not in List
"""
if isinstance(amount, str):
return self.Exclude(elem, amount = self.Count(elem, search_for_classes = True, search_through_iterables = False))
temp = self.Copy()
for x in range(amount):
temp.remove(elem)
return temp
def To(self, ty: Type[Iter]) -> Any:
"""
Returns an Array built from the List
:return: Array => The Array version of the list
"""
if Iter in ty.mro():
return ty(self.Copy())
raise TypeError(f"Type '{ty.__name__}' isn't an iterable type")
def zip(self, *others: Iter, fillvalue: "Any" = None) -> None:
"""
Turns this List into a zipped list. A zipped list consists of tuples of elements, where each element of the tuple
is an element from each list you're zipping
:param others: iter_ (multi value) => the new iterable to zip through
:param fillvalue: Any (keyword only) (default is None) => A value to fill of the length of the iterable is shorter than the
length of the List
:raise LengthError: If length of any iterable is longer than the length of the List
"""
i = 0
for other in others:
if len(other) > len(self):
oname = type(other).__name__
sname = type(self).__name__
raise LengthError(f"Cannot zip a {oname} with a length greater than the length of the {sname} (at index '{i!s}')")
i += 1
for x in range(len(self)):
tup = []
for li in others:
try:
tup.append(li[x])
except IndexError:
tup.append(fillvalue)
self[x] = (self[x], *tup)
def unzip(self) -> None:
"""Creates an unzipped list. An unzipped list is a list where every element has been reverted to its original
form (so it is no longer zipped with other iterable's elements)"""
for x in range(len(self)):
self[x] = self[x][0]
def affect(self, func: Callable[[Any], Any]) -> None:
"""
Transforms the list by applying a function to every element
:param func: function => the function to apply to every element
:raise TypeError: If func isn't a function
"""
if not (inspect.isbuiltin(func) or inspect.isfunction(func) or inspect.ismethod(func)):
raise TypeError("Function parameter must be a function or method")
for x in range(len(self)):
try:
self[x] = func(self[x])
except Exception as err:
raise ArgumentError(f"Cannot use function on '{type(self).__name__}' due to error: '{err!s}'") from None
def extend(self, other: Union[list, tuple, set, Iter]) -> None:
"""
Extends this List by adding another one to it
:param other: list or tuple or iter_ => the iterable to extend by
:raise TypeError: If other isn't iterable
"""
if not isinstance(other, Iter.Types()):
raise TypeError("Parameter must be iterable")
other: Union[list, tuple, set, Iter]
length = len(self)
self._dataset = (self + other)._dataset
for x in range(len(other)):
elem = other[x]
try:
self._change(index = length + x, elem = elem)
except SizeError as err:
if x == len(other) - 1:
raise err
def tail(self) -> None:
"""Converts the List into the form returned by 'Tail'"""
self.__init__(self.Tail())
def init(self) -> None:
"""Converts the List into the form returned by 'Init'"""
self.__init__(self.Init())
def prepend(self, item: "Any") -> None:
"""
Adds an element onto the start of a list
:param item: Any => The element to add
"""
self.reverse()
self.append(item)
self.reverse()
def append(self, item: "Any") -> None:
"""
Adds an element onto the end of a list
:param item: Any => The element to add
"""
self._dataset[len(self)] = item
self._change(index = len(self) - 1, elem = item)
def remove(self, *items: "Any") -> None:
"""
Removes one occurrence of the elements specified from the List
:param items: Any (multi value) => The elements to remove
:raise ValueError: If any element specified is not in List
"""
for item in items:
if item not in self:
raise ValueError(repr(item) + " does not appear in " + type(self).__name__)
index = self.Index(item)
if index == len(self) - 1:
self.init()
return
if index == 0:
self.tail()
return
del self[index]
def removeAll(self, *items: "Any") -> None:
"""
Removes all occurrences of the elements specified from the List
:param items: Any (multi value) => The elements to remove
:raise ValueError: If any element specified is not in List
"""
for item in items:
if item not in self:
raise ValueError(repr(item) + " does not appear in " + type(self).__name__)
self.remove(
*[item for x in range(self.Count(item, search_through_iterables = False, search_for_classes = True))])
def sort(self, *, order: str = "asc") -> None:
"""
Sorts the list into the specified order
:param order: str (keyword only) => The order to sort into (asc or desc)
:raise ValueError: if order isn't 'asc' or 'desc'
"""
if order not in ["asc", "desc"]:
raise ValueError("Order must be 'asc' or 'desc'")
self._sorter(0, -1)
if order == "desc":
self.reverse()
def clear(self) -> None:
"""Clears the List so it has length 0"""
self.__init__()
def insert(self, pos: int, elem: "Any") -> None:
"""
Inserts an element at the position specified
:param pos: int => The position to put the element
:param elem: Any => The elem to add
"""
before = self[:pos]
after = self[pos:]
before.append(elem)
before.extend(after)
def shuffle(self, *, can_sort: bool = True,
sort_func: Optional[Callable[[Union[list, tuple, set, Iter]], None]] = None) -> None:
"""
Shuffles the list into a random order
:param can_sort: bool (keyword only) (default is True)=> Whether the list is full of sortable elements
:param sort_func: function (keyword only) (default is None)=> An optional sort function to use, if the list can't be sorted
"""
if sort_func is not None and not can_sort:
raise ValueError(f"Cannot sort using function {sort_func!r} as sorting is disabled")
times_round = random.randint(1, 10)
sort_mod = random.choice([x for x in range(times_round)])
swap_mod = random.choice([x for x in range(times_round) if times_round % sort_mod != 0])
for x in range(times_round):
lowest = random.randint(0, len(self) - 1)
inner = random.randint(1, times_round)
for y in range(inner):
highest = random.randint(lowest, len(self) - 1)
self.swap(lowest, highest)
if x % sort_mod == 0 and can_sort:
if sort_func is None:
self.sort()
else:
sort_func(self)
if x % swap_mod == 0:
self.swap(inner, times_round)
def deleteCopies(self) -> None:
"""Deletes all the copies in a List, so that it contains one of each element"""
i = 0
while True:
try:
elem = self[i]
index = self.Index(elem)
self.removeAll(elem)
self.insert(index, elem)
i += 1
except IndexError:
break
def splitIntoSubs(self, num_sets: int) -> None:
"""
Creates the specified amount of subsets from the elements of the List and fills the list with them
Amount is fixed, while size isn't
:param num_sets: int => The number of sets to have
"""
items_per_set = len(self) // num_sets
self.splitIntoSize(items_per_set)
while len(self) > num_sets:
self[-2] = self[-2] + self[-1]
self.init()
def splitIntoSize(self, size: int) -> None:
"""
Creates the specified amount of subsets from the elements of the List and fills the list with them
Size is fixed, while amount isn't
:param size: int => The size of each set
:raise ValueError: if size is less than 1, the size of each set is longer than the original set, or the size isn't a
multiple of the length of the set
"""
if size <= 0:
raise ValueError("Cannot split into sets with size less than 1")
elif size > len(self):
raise ValueError("Cannot split into sets with longer length than original set")
elif len(self) % size != 0 and size != 1:
raise ValueError("Cannot create consistent sets of size " + str(size))
i = 0
copy = self.Copy()
while True:
items = []
for x in range(size):
items.append(copy[x])
copy.remove(*items)
self[i] = items
if i < len(self) - 1 and size != 1:
del self[i + 1]
i += 1
if len(copy) == 0:
break
def _Partition(self, low: int, high: int) -> int:
"""
A helper method to swap the elements of a List around based on a determined value
:param low: int => The starting index to look at
:param high: int => The end index to look at
:return int: The middle index looked at
"""
i = low - 1
pivot = self[high]
for j in range(low, high):
if self[j] <= pivot:
i += 1
self[i], self[j] = self[j], self[i]
self[i + 1], self[high] = self[high], self[i + 1]
return i + 1
def _sorter(self, low: int, high: int) -> None:
"""
A helper method to sort the List
:param low: int => The starting index to sort from
:param high: int => The end index to stop sorting at
"""
if high == -1:
high = len(self) - 1
if low < high:
p = self._Partition(low, high)
self._sorter(low, p - 1)
self._sorter(p + 1, high)
def _change(self, **kwargs: Any) -> None:
"""A function to document changes (only used in subclasses)"""
pass
class FixedList(List):
"""
A class designed to represent a tuple in BMPY
Class variables defined below:
Instance variables defined below:
__size (int) (private): The size of the tuple
See help(List) for more information on class and instance variables
"""
def __init__(self, *data: Any, split: bool = True):
"""
Sets up the dataset
:param data: object (multi-value) => The data to use as the iterable's initial data
:param split: bool (default is True) => Whether to split an iterable's content into multiple entries
"""
self.__size: int = -1
super().__init__(*data, split = split)
def __str__(self) -> str:
"""
Converts the FixedList into a string
:return str: The string version of the tuple
"""
string = [str(v) for v in self._dataset.values()]
return "(" + ",".join(string) + ")"
def _change(self, **kwargs: Any) -> None:
"""
A method to document changes
:param kwargs: {str:object} => The data used in the change
Typical kwargs are index, elem and mode
"""
if self.__size == -1:
self.__size = len(self)
if len(self) != self.__size or kwargs.get("mode") is not None:
if kwargs.get("mode") == "init":
return
if kwargs.get("mode") == "set":
if self._old is not None:
return
self._dataset[kwargs["index"]] = kwargs["old"]
elif kwargs.get("mode") == "del":
before = self[:kwargs["index"]]
before._dataset[len(before)] = kwargs["elem"]
self._dataset = (before + self[kwargs["index"]:])._dataset
else:
del self[kwargs["index"]]
raise SizeError(f"Cannot change a '{type(self).__name__}'")
class TypedList(List):
"""
A class designed to represent a list in BMPY, using a more static approach of specifying a type
Class variables defined below:
Instance variables defined below:
_type (type) (protected): The data's type
See help(List) for more information on class and instance variables
"""
def __init__(self, ty: type, *data: Any, split: bool = True):
"""
Sets up the dataset
:param ty: type => The type the data should be
:param data: object (multi-value) => The data to use as the iterable's initial data
:param split: bool (default is True) => Whether to split an iterable's content into multiple entries
"""
self._type = ty
super().__init__(*data, split = split)
def __str__(self) -> str:
"""
Converts the TypedList into a string
:return str: The string version of the List
"""
string = [str(v) for v in self._dataset.values()]
return f"<{self._type.__name__}>" + super().__str__()
def _change(self, **kwargs: Any) -> None:
"""
A method to document changes
:param kwargs: {str:object} => The data used in the change
Typical kwargs are index, elem and mode
"""
if not isinstance(kwargs["elem"], self._type):
if kwargs.get("mode") == "init":
self._dataset = {}
elif kwargs.get("mode") == "del":
return
elif kwargs.get("mode") == "set":
if self._old is not None:
return
self._dataset[kwargs["index"]] = kwargs["old"]
else:
del self[kwargs["index"]]
raise TypeError(f"Expected type '{self._type.__name__}', got type '{type(kwargs['elem']).__name__}' instead")
class FixedTypedList(TypedList, FixedList):
"""
A class designed to represent a tuple in BMPY, using a more static approach of specifying a type
Class variables defined below:
Instance variables defined below:
See help(TypedList) and help(FixedList) for more information on class and instance variables
"""
def __init__(self, ty: type, *data: Any, split: bool = True):
"""
Sets up the dataset
:param ty: type => The type the data should be
:param data: object (multi-value) => The data to use as the iterable's initial data
:param split: bool (default is True) => Whether to split an iterable's content into multiple entries
"""
self._type = ty
FixedList.__init__(self, *data, split = split)
def _change(self, **kwargs: Any):
"""
A method to document changes
:param kwargs: {str:object} => The data used in the change
Typical kwargs are index, elem and mode
"""
FixedList._change(self, **kwargs)
TypedList._change(self, **kwargs)
class Array(TypedList):
"""
A class designed to represent a fully numerical list (with some added methods so that TypedList(Number,data) has less
functionality than Array(data))
Class variables defined below:
Instance variables defined below:
See help(TypedList) for more information on class and instance variables
"""
def __init__(self, *data: Any, split: bool = True):
"""
Sets up the dataset
:param data: object (multi-value) => The data to use as the iterable's initial data
:param split: bool (default is True) => Whether to split an iterable's content into multiple entries
"""
#Union[int, float, Object, Fraction] doesn't work, use base class Number for BMPY
super().__init__(int, *data, split = split)
def Max(self) -> Union[int, float, Object, Fraction]:
"""
Returns the highest value in the Array
:return: int or float or Object or Fraction => The greatest value in the list
"""
temp = self.Copy()
temp.sort()
return temp[-1]
def Min(self) -> Union[int, float, Object, Fraction]:
"""
Returns the lowest value in the Array
:return: int or float or Object or Fraction => The smallest value in the list
"""
temp = self.Copy()
temp.sort()
return temp[0]
def Mean(self) -> Fraction:
"""
Returns the mean of the data
:return: Fraction => The sum of all parts divided by it's length
"""
return Fraction(self.Fold(), len(self))
def Median(self) -> Union[Fraction, Union[int, float, Object, Fraction]]:
"""
Returns the middle value of the Array
:return: Fraction or int or float or Object => The middle value of the list
"""
temp = self.Copy()
while len(temp) not in [1, 2]:
temp.tail()
temp.init()
if len(temp) == 2:
return temp.Mean()
return temp[0]
def Mode(self) -> Union[int, float, Object, Fraction]:
"""
Returns the most common value in the Array
:return: Fraction or int or float or Object => The most common value
"""
data = self.CountAll()
keys = list(data.keys())
highest = data[keys[0]]
key = keys[0]
for k in keys:
if data[k] > highest:
highest = data[k]
key = k
return key
def Variance(self) -> Union[Fraction, int, float, Object]:
"""
Returns the average distance (squared) the values are away from the mean
:return: Fraction or int or float or Object => The square of the distance each value is away from the mean
"""
mean = self.Mean()
diff = Array()
for data in self:
diff.append(data - mean)
diff.affect(lambda x:x ** 2)
return diff.Mean()
def StandardDeviation(self) -> Surd:
"""
The root of the variance
:return: Surd => The square root of the variance
"""
return Surd(self.Variance())
def Percentile(self, percent: int) -> Union[Fraction, int, float, Object]:
"""
Returns what value the specified percentile sits on (50% is the same as the mean)
:param percent: int => The percentile to find
:return: Fraction or int or float or Object => The value within the Array
"""
temp = self.Copy()
temp.sort()
start = percent / 100 + len(self)
if int(start) == start:
start = int(start)
if isinstance(start, float):
index = list(str(start)).index(".")
if int(str(start)[index + 1]) >= 5:
start = int(str(start)[:index]) + 1
else:
start = int(str(start)[:index])
value = self._dataset[(start - 1)]
else:
value = Fraction(self._dataset[start] + self._dataset[start + 1], 2)
return value
def Fold(self) -> Union[Fraction, int, float, Object]:
"""
Returns the sum of every value in the Array
:return: Fraction or int or float or Object => The result of adding every single element in the Array together
"""
return self.FoldUsing(lambda x, y:x + y)[0]
def Reduce(self) -> Union[Fraction, int, float, Object]:
"""
Returns the product of every value in the Array
:return: Fraction or int or float or Object => The result of multiplying every single element in the Array together
"""
return self.FoldUsing(lambda x, y:x * y)[0]
def int(self) -> None:
"""Converts every value in the Array to an integer"""
self.affect(lambda x:int(x))
def float(self) -> None:
"""Converts every value in the Array to a floating point number"""
self.affect(lambda x:float(x))
class Set(List):
"""
A class designed to represent a set in BMPY
Class variables defined below:
Instance variables defined below:
See help(List) for more information on class and instance variables
"""
def _change(self, **kwargs: Any):
self.deleteCopies()
class Vector:
"""
A class designed to emulate a Vector with any amount of dimensions
Class variables defined below:
Instance variables defined below:
_data (Array) (protected): The values at each dimension
"""
def __init__(self, *data: Union[int, float, Object, Fraction]):
"""
Sets up the values at each dimension
:param data: int or float or Object or Fraction (multi-value) => The data to use as the Vector's data
"""
self._data = Array(data)
def __len__(self) -> int:
"""
Returns the number of dimensions a Vector has
:return: int => The size of the Array containing the data
"""
return len(self._data)
def __str__(self) -> str:
"""
Neatly formats the Vector into a string
:return: str => A neatly formatted string containing all the necessary information
"""
if not bool(self._data):
return "| 0 |"
data = self._data.To(List)
data.affect(lambda x:str(x))
length = Array([len(elem) for elem in data]).Max()
def inner(elem):
while len(elem) < length:
elem += " "
return elem
data.affect(inner)
data.affect(lambda x:"| " + x + " |")
del inner
return data.Display(sep = "\n")
def __repr__(self) -> str:
"""
Creates a string that can be evaluated to produce an identical Vector
:return: str => A representation of the object
"""
data = self._data.To(List)
data.affect(lambda x:repr(x))
return "Vector(" + ",".join(data) + ")"
def __add__(self, other: "Vector") -> "Vector":
"""
Adds two Vectors together
:param other: Vector => The other Vector to use
:return Vector: The resulting Vector, where each element is the sum of the corresponding elements
:raise TypeError: If other isn't a Vector
"""
if not isinstance(other, Vector):
raise TypeError("Can only add Vectors together")
arr = []
length = len(self) if len(self) > len(other) else len(other)
for x in range(length):
arr.append(self.Get(x) + other.Get(x))
return Vector(*arr)
def __radd__(self, other: "Vector") -> "Vector":
"""
Adds two Vectors together
:param other: Vector => The other Vector to use
:return Vector: The resulting Vector, where each element is the sum of the corresponding elements
:raise TypeError: If other isn't a Vector
"""
return self + other
def __sub__(self, other: "Vector") -> "Vector":
"""
Subtracts two Vectors together
:param other: Vector => The other Vector to use
:return Vector: The resulting Vector, where each element is the difference between the corresponding elements
:raise TypeError: If other isn't a Vector
"""
return self + (-other)
def __rsub__(self, other: "Vector") -> "Vector":
"""
Subtracts two Vectors together
:param other: Vector => The other Vector to use
:return Vector: The resulting Vector, where each element is the difference between the corresponding elements
:raise TypeError: If other isn't a Vector
"""
return self - other
def __mul__(self, other: Union[int, float, Object, Fraction, "Vector"]) -> "Vector":
"""
Multiplies a Vector by another Vector (cross product)
:param other: Vector or int or float or Object or Fraction => The other Vector to use. In the case of a constant, send to rmul
:return Vector: The cross product
:raise TypeError: If other isn't a Vector or in the '__types' class variable of Object
"""
if not isinstance(other, Object.Types()) and not isinstance(other, Vector):
raise TypeError("Cannot multiply Vector by type " + type(other).__name__)
if isinstance(other, Object.Types()):
other: Union[int, float, Object, Fraction]
return other * self
if len(other) > 3 or len(self) > 3:
raise LengthError("Cannot cross product a Vector in more than 3 dimensions")
matrix_0 = [1, 1, 1]
matrix_1 = list(self._data)
matrix_2 = list(other._data)
for row in [matrix_1, matrix_2]:
while len(row) < 3:
row.append(0)
mat = Matrix(Array(matrix_0), Array(matrix_1), Array(matrix_2))
return Vector(*[mat.Cofactor(0, x) for x in range(3)])
def __rmul__(self, other: Union[int, float, Object, Fraction]) -> "Vector":
"""
Multiplies a Vector by a constant
:param other: int or float or Object or Fraction => The constant to use
:return Vector: A new Vector, where each element is this Vector's element multiplied by the constant
:raise TypeError: If other isn't in the '__types' class variable of Object
"""
if not isinstance(other, Object.Types()):
raise TypeError("Cannot multiply Vector by type " + type(other).__name__)
return Vector(*[elem * other for elem in self._data])
def __truediv__(self, other: Union[int, float, Object, Fraction]) -> "Vector":
"""
Divides a Vector by a constant
:param other: int or float or Object or Fraction => The constant to use
:return Vector: A new Vector, where each element is this Vector's element divided by the constant
:raise TypeError: If other isn't in the '__types' class variable of Object
"""
return self * Fraction(other)
def __pow__(self, power: int, modulo: Optional[int] = None) -> Union[int, float, Object, Fraction]:
"""
Squares a Vector
:param power: int => The power to use (should be 2)
:param modulo: int or None => The modulus to use (should be None, only used to signature match)
:return int or float or Object or Fraction: The dot product of the Vector with itself
:raise ValueError: If power isn't 2 or modulo isn't None
"""
if power != 2:
raise ValueError("Can only square Vectors")
if modulo is not None:
raise ValueError("Cannot mod a Vector")
return Vector.DotProduct(self, self)
def __neg__(self) -> "Vector":
"""
Negates a Vector, turning it negative
:return Vector: The new Vector where every value is negative
"""
return -1 * self
def __eq__(self, other: "Vector") -> bool:
"""
Equates two Vectors
:param other: Vector => The other Vector
:return bool: Whether each value is equal
"""
return self._data == other._data
def __ne__(self, other: "Vector") -> bool:
"""
Equates two Vectors
:param other: Vector => The other Vector
:return bool: Whether the Vectors are unequal
"""
return self._data != other._data
def Data(self) -> FixedList:
"""
Returns a copy of the data within the Vector
:return FixedList: A deep copy of the data, that cannot be modified
"""
return self._data.Copy().To(FixedList)
def Copy(self) -> "Vector":
"""
Returns a copy of the Vector
:return Vector: A new Vector using the old data
"""
return Vector(*self.Data())
def Get(self, index: int) -> "Union[int,float,Object,Fraction]":
"""
Returns the value of the Vector at the dimension specified (0 if not defined)
:return: int or float or Object or Fraction => The value
"""
try:
return self._data[index]
except IndexError:
return 0
def Magnitude(self) -> Surd:
"""
Returns the magnitude of the Vector- which is the square of all values summed up and square rooted
(Vector(3,4) becomes Surd(9+16))
:return: Surd => The magnitude
"""
copy = self._data.Copy()
copy.affect(lambda x:x ** 2)
return Surd(copy.Fold())
def Unit(self) -> "Vector":
"""
Returns a unit Vector (a vector with magnitude 1) based off of this Vector's values
:return: Vector => The unit vector
"""
copy = self._data.Copy()
copy.affect(lambda x:Fraction(x, self.Magnitude()))
return Vector(*copy)
def CheckZero(self) -> bool:
"""
Checks whether this Vector is a zero Vector (has 0 for every element)
:return: bool => The result of the check
"""
for elem in self._data:
if elem != 0:
return False
return True
def CheckUnit(self) -> bool:
"""
Checks whether this Vector is a unit Vector
:return: bool => The result of the check
"""
return self.Magnitude() == 1
def Transform(self, amount: int) -> "Matrix":
"""
Transform a Vector by amount specified
Transformation moves the Vector
:param amount: int => The amount to transform by
:return: Matrix => The transformed Vector (converted to a Matrix)
"""
mat = []
for x in range(len(self)):
mat.append([0 if row != x else amount for row in range(len(self))])
mat = Matrix(*mat)
return mat * self
def Rotate(self, theta: "Union[int,Pi,float,Fraction]") -> "Matrix":
"""
Rotate a Vector by amount specified
:param theta: int or float or Pi or Fraction => The angle to rotate by
:return: Matrix => The rotated Vector (converted to a Matrix)
"""
return Matrix(Array(Cos(theta), Sin(-theta)), Array(Sin(theta), Cos(-theta))) * self
def Project(self, theta: "Union[int,Pi,float,Fraction]") -> "Matrix":
"""
Project a Vector by amount specified
:param theta: int or float or Pi or Fraction => The angle to project by
:return: Matrix => The projected Vector (converted to a Matrix)
"""
return Matrix(Array(Cos(theta, power = 2), Cos(theta) * Sin(theta)),
Array(Sin(theta) * Cos(theta), Sin(theta, power = 2))) * self
def unit(self) -> None:
"""Transforms this Vector into a unit Vector (see help(Vector.Unit) for information on what a unit Vector is"""
self._data = self.Unit()._data.Copy()
@staticmethod
def DotProduct(vector1: "Vector", vector2: "Vector") -> Union[int, float, Object, Fraction]:
"""
Calculates the dot product of two Vectors (a.b)
This is the sum of the elements multiplied together, so Vector(2,5).Vector(3,2) becomes 6+10, becomes 16
:param vector1: Vector => The first vector
:param vector2: Vector => The second Vector
:return: int or float or Object or Fraction => The sum of all elements multiplied together
"""
arr = Array()
length = len(vector1) if len(vector1) > len(vector2) else len(vector1)
for x in range(length):
arr.append(vector1.Get(x) * vector2.Get(x))
return arr.Fold()
@staticmethod
def ScalarProjection(vector1: "Vector", vector2: "Vector") -> Union[int, float, Object, Fraction]:
"""
Calculates the scalar projection of two Vectors
This is the dot product of one with a unit Vector of the other
:param vector1: Vector => The first vector
:param vector2: Vector => The second Vector
:return: int or float or Object or Fraction => The dot product
"""
return Vector.DotProduct(vector1, vector2.Unit())
@staticmethod
def VectorProjection(vector1: "Vector", vector2: "Vector") -> "Vector":
"""
Calculates the vector projection of two Vectors
This is the scalar projection, then multiplied by a unit Vector of one Vector
:param vector1: Vector => The first vector
:param vector2: Vector => The second Vector
:return: Vector => The projection
"""
return Vector.ScalarProjection(vector1, vector2) * vector1.Unit()
@staticmethod
def ScalarTripleProduct(vector1: "Vector", vector2: "Vector", vector3: "Vector") -> Union[int, float, Object, Fraction]:
"""
Calculates the scalar product of three Vectors
:param vector1: Vector => The first vector
:param vector2: Vector => The second Vector
:param vector3: Vector => The third vector
:return: int or float or Fraction or Object => The product
"""
return Vector.DotProduct(vector1, vector2 * vector3)
@staticmethod
def VectorTripleProduct(vector1: "Vector", vector2: "Vector", vector3: "Vector") -> "Vector":
"""
Calculates the Vector product of three Vectors
:param vector1: Vector => The first vector
:param vector2: Vector => The second Vector
:param vector3: Vector => The third vector
:return: Vector => The product
"""
LHS = Vector.DotProduct(vector1, vector3) * vector2
RHS = Vector.DotProduct(vector1, vector2) * vector3
return LHS - RHS
class Matrix:
"""
A class designed to emulate a Matrix with any size
Class variables defined below:
Instance variables defined below:
_data (List) (protected): The rows and columns of the Matrix
_width (int) (protected): The size of each row
_height (int) (protected): The number of rows
"""
def __init__(self, *rows: Array):
"""
Sets up the values at each dimension
:param data: Array (multi-value) => The rows to use in the Matrix
"""
for row in rows:
if len(row) != len(rows[0]):
raise SizeError(f"Cannot find consistent width (expected width '{len(rows[0])}', but got width '{len(row)}')")
self._data = List([row.Copy() for row in rows])
self._width = len(rows[0])
self._height = len(rows)
def __str__(self) -> str:
"""
Neatly formats the Matrix into a string
:return: str => A neatly formatted string containing all the necessary information
"""
if not bool(self._data):
return "| 0 |"
data = List([data.To(List) for data in self._data])
for x in range(len(data)):
data[x].affect(lambda x:str(x))
arr = Array()
for li in data:
for elem in li:
arr.append(len(elem))
innerlength = arr.Max()
def inner(elem):
while len(elem) < innerlength:
elem += " "
return elem
for x in range(len(data)):
data[x].affect(lambda x:"| " + inner(x) + " |")
del inner
for x in range(len(data)):
if x != len(data) - 1:
print(data[x].Display(sep = " "))
else:
return data[x].Display(sep = " ")
def __repr__(self) -> str:
"""
Creates a string that can be evaluated to produce an identical Vector
:return: str => A representation of the object
"""
data = self._data.Copy()
data.affect(lambda li:repr(li))
return "Matrix(" + ", ".join(data) + ")"
def __add__(self, other: "Matrix") -> "Matrix":
"""
Adds two Matrices together
:param other: Matrix => The other Matrix to use
:return Matrix: The matrix where the corresponding elements are summed together
:raises TypeError: if other isn't a Matrix
:raises SizeError: if other isn't the same size
"""
if not isinstance(other, Matrix):
raise TypeError("Can only add matrices together")
if self.Size() != other.Size():
raise SizeError("Can only add matrices of the same size")
width, height = self.Size()
data = List()
for x in range(height):
data.append(self.Get(x).To(List).Zip(other.Get(x), fillvalue = 0))
for x in range(height):
data[x].affect(lambda tu:sum(tu))
data[x] = data[x].To(Array)
return Matrix(*data)
def __sub__(self, other: "Matrix") -> "Matrix":
"""
Subtracts two Matrices together
:param other: Matrix => The other Matrix to use
:return Matrix: The matrix where the corresponding elements are the difference between them
:raises TypeError: if other isn't a Matrix
:raises SizeError: if other isn't the same size
"""
return self + (-other)
def __mul__(self, other: Union["Matrix", Vector, int, float, Object, Fraction]) -> "Matrix":
"""
Multiplies two Matrices together
:param other: Matrix => The other Matrix to use
:return Matrix: The new matrix
:raises TypeError: if other isn't a Matrix, Vector, int, float, Object, or Fraction
:raises LengthError: if other's columns aren't the same size as this Matrix's rows
"""
if not isinstance(other, Object.Types()) and not isinstance(other, (Matrix, Vector)):
raise TypeError("Cannot multiply Matrix by type " + type(other).__name__)
if isinstance(other, Object.Types()):
other: Union[int, float, Object, Fraction]
return other * self
width, height = self.Size()
owidth, oheight = other.Size()
if isinstance(other, Vector):
if height < len(other):
raise LengthError("Vector is bigger than Matrix!")
rows = []
for x in range(height):
a = Vector(*self.Get(x))
b = Vector(*[other.Get(i) for i in range(len(other))])
rows.append(Array(Vector.DotProduct(a, b)))
return Matrix(*rows)
rows = []
if height > owidth:
raise LengthError("Second Matrix's width is bigger than the first Matrix's height!")
for x in range(height):
build = Array()
for y in range(width):
a = Vector(*self.Get(x))
b = Vector(*[other.Get(i, y) for i in range(oheight)])
build.append(Vector.DotProduct(a, b))
rows.append(build)
return Matrix(*rows)
def __rmul__(self, other: Union[int, float, Object, Fraction, Vector]) -> "Matrix":
"""
Multiplies a Matrix by a constant
:param other: int or float or Object or Fraction or Vector => The constant to use
:return Matrix: The new matrix
:raise TypeError: if other isn't a int, float, Object, or Fraction
"""
if isinstance(other, Vector):
return self * other
if not isinstance(other, Object.Types()):
raise TypeError("Cannot multiply Matrix by type " + type(other).__name__)
data = self._data.Copy()
for x in range(len(data)):
data[x].affect(lambda x:x * other)
return Matrix(*data)
def __truediv__(self, other: Union[int, float, Object, Fraction]) -> "Matrix":
"""
Divides a Matrix by a constant
:param other: int or float or Object or Fraction => The constant to use
:return Matrix: The new matrix
:raise TypeError: if other isn't a int, float, Object, or Fraction
"""
return self * Fraction(other)
def __neg__(self) -> "Matrix":
"""
Negates a Matrix (minus values become positive, positive values become negative)
:return: Matrix => The resulting vector
"""
return -1 * self
def __eq__(self, other: "Matrix") -> bool:
"""
Checks whether two Matrix are equal
:return: bool => The result of the check
"""
return self._data == other._data
def __ne__(self, other: "Matrix") -> bool:
"""
Checks whether two Matrix are unequal
:return: bool => The result of the check
"""
return self._data != other._data
def Size(self) -> FixedList:
"""
Returns the size of the Matrix
:return FixedList: A tuple of width,height
"""
return FixedList(self._width, self._height)
def Get(self, *args: int) -> Union[int, float, Object, Fraction, Array]:
"""
Returns the element at specified row and column, or returns the specified row
:param args: (multi value) => must be integers, 1 value means get the row, 2 means get the element
:return: int or float or Fraction or Object or Array => Array if a row, any other value will be an element
:raise ValueError: If length of args isn't one or two
"""
if len(args) not in [1, 2]:
raise ValueError("Must specify an index to get")
x, *y = args
if len(y) == 0:
return self._data[x]
y = int(y[0])
return self._data[x][y]
def Transpose(self) -> "Matrix":
"""
Returns a Matrix where the elements are swapped along the diagonals (so (0,1) becomes (1,0) and vice versa)
:return: Matrix => The new matrix with swapped elements
"""
copy = self._data.Copy()
for x in range(self._height):
for y in range(self._width):
copy.swap(x, y)
return Matrix(*copy)
def Minor(self, x: int, y: int) -> Union[int, float, Object, Fraction]:
"""
Calculates the minor of an element (the determinant of a new matrix excluding the element's row and column)
:param x: int => The row
:param y: int => The column
:return int or float or Object or Fraction: The determinant of the new matrix
:raise SizeError: if this Matrix is non-square
"""
if not self.CheckSquare():
raise SizeError("Minor isn't defined for non-square Matrices")
mat: list[Array] = []
for row in range(self._height):
rows = Array()
for col in range(self._width):
if not (row == x or col == y):
rows.append(self.Get(row, col))
mat.append(rows)
del mat[x]
return Matrix(*mat).Determinant()
def Cofactor(self, x: int, y: int) -> Union[int, float, Object, Fraction]:
"""
Calculates the cofactor of an element (the minor multiplied by -1^(x+y))
:param x: int => The row
:param y: int => The column
:return int or float or Object or Fraction: The determinant of the new matrix
:raise SizeError: if this Matrix is non-square
"""
return (-1) ** (x + y) * self.Minor(x, y)
def Adjugate(self) -> "Matrix":
"""
Creates and returns the transpose of a cofactor Matrix for this Matrix
A cofactor Matrix is a Matrix where very element is replaced by its cofactor
:return: Matrix => The cofactor matrix transposed
"""
copy = self._data.Copy()
for r in range(self._height):
for c in range(self._width):
copy[r][c] = self.Cofactor(r, c)
return Matrix(*copy).Transpose()
def Inverse(self) -> "Matrix":
"""
Creates and returns the inverse Matrix of this matrix
An inverse matrix is the adjugate Matrix divided by the determinant
see help(Matrix.Determinant) for what a determinant is
:return: Matrix => The inverse matrix
:raise ZeroDivisionError: If determinant is 0
"""
adj = self.Adjugate()
det = self.Determinant()
if det == 0:
raise ZeroDivisionError("Cannot find inverse of Matrix with 0 determinant")
for r in range(self._height):
for c in range(self._width):
adj._data[r][c] = Fraction(adj.Get(r, c), det)
return adj
def Determinant(self) -> Union[int, float, Object, Fraction]:
"""
Returns the sum of every top row element multiplied by its cofactor (so element 0,0 is multiplied by the cofactor of 0,0
which is added to 0,1 multiplied by the cofactor of 0,1 so on and so forth, until the end of the top row)
:return: int or float or Object or Fraction => The top row * cofactor sum
:raise SizeError: If matrix isn't square
"""
if not self.CheckSquare():
raise SizeError("Cannot find determinant of non-square Matrix")
if self._width == 1:
return self.Get(0, 0)
if self._width == 2:
return (self.Get(0, 0) * self.Get(1, 1)) - (self.Get(0, 1) * self.Get(1, 0))
terms = Array()
for i in range(self._width):
terms.append(self.Get(0, i) * self.Cofactor(0, i))
return terms.Fold()
def CheckSquare(self) -> bool:
"""
Checks whether this matrix is a square (#rows==#columns)
:return: bool => The result of the check
"""
return self._width == self._height
def CheckZero(self) -> bool:
"""
Checks whether this matrix is a 0 matrix (all elements are 0)
:return: bool => The result of the check
"""
for r in range(self._height):
for c in range(self._width):
if self.Get(r, c) != 0:
return False
return True
def CheckDiagonal(self) -> bool:
"""
Checks whether this matrix is a diagonal matrix (0 except for the diagonals)
:return: bool => The result of the check
"""
if not self.CheckSquare():
return False
for r in range(self._height):
for c in range(self._width):
if r != c and self.Get(r, c) != 0:
return False
return True
def CheckUnit(self) -> bool:
"""
Checks whether this matrix is a unit matrix (0 except for the diagonals, which are 1)
:return: bool => The result of the check
"""
if not self.CheckDiagonal():
return False
for r in range(self._height):
for c in range(self._width):
if r == c and self.Get(r, c) != 1:
return False
return True
def transpose(self) -> None:
"""Changes this matrix to be its transpose"""
copy = self.Transpose()
self._data = copy._data
def inverse(self) -> None:
"""
Changes this matrix to be its inverse
:raise ZeroDivisionError: If determinant is 0
"""
copy = self.Inverse()
self._data = copy._data
def adjugate(self) -> None:
"""Changes this matrix to be its adjugate"""
copy = self.Adjugate()
self._data = copy._data
del abc
del inspect
del math
del random
del sys
del Union
del Any
del Optional
del Callable
del Type
|
py | 7dfb0f3b28e0f3946775a462e8e1d4c4459abed5 | #!/usr/bin/env python
#
# Copyright 2019 International Business Machines
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import platform
from os.path import join as pathjoin
from os.path import isdir as isdir
from os.path import isfile as isfile
from ocaccel_utils import which
from ocaccel_utils import SystemCMD
from ocaccel_utils import msg
from os import environ as env
from ocaccel_utils import source
def env_check(options):
assert sys.version_info >= (2, 6)
msg.ok_msg_blue("--------> Environment Check")
gcc = SystemCMD("gcc")
gcc . check(existence_critical=True, minimum_version = "4.4.6")
if not options.no_make_model or not options.no_run_sim or options.make_image:
vivado = SystemCMD("vivado")
xterm = SystemCMD("xterm")
vivado . check(existence_critical=True, minimum_version = "2018.2")
xterm . check(existence_critical=True)
if options.simulator.lower() == "xcelium":
xrun = SystemCMD("xrun")
xrun . check(existence_critical=True)
elif options.simulator.lower() == "vcs":
vcs = SystemCMD("vcs")
vcs . check(existence_critical=True)
elif options.simulator.lower() == "nosim":
pass
elif options.simulator.lower() == "xsim":
# xsim is bundled with vivado, no need to check
pass
else:
msg.fail_msg("%s is an unknown simulator! Exiting ... " % options.simulator)
if options.no_run_sim == False or options.no_make_model == False:
if options.simulator.lower() != "nosim" and options.unit_sim != True:
if isdir(pathjoin(options.ocse_root, "ocse")) and\
isdir(pathjoin(options.ocse_root, "afu_driver")) and\
isdir(pathjoin(options.ocse_root, "libocxl")):
msg.ok_msg_blue("OCSE path %s is valid" % options.ocse_root)
else:
msg.fail_msg("OCSE path %s is not valid! Exiting ... " % options.ocse_root)
if isdir(pathjoin(options.ocaccel_root, "actions")) and\
isdir(pathjoin(options.ocaccel_root, "hardware")) and\
isdir(pathjoin(options.ocaccel_root, "software")):
msg.ok_msg_blue("SNAP ROOT %s is valid" % options.ocaccel_root)
else:
msg.fail_msg("SNAP ROOT %s is not valid! Exiting ... " % options.ocaccel_root)
if 'SNAP_ROOT' not in env:
env['SNAP_ROOT'] = options.ocaccel_root
source(pathjoin(env['SNAP_ROOT'], '.snap_config.sh'))
prflow_mode = env['USE_PRFLOW']
if prflow_mode == "TRUE":
if options.interactive == True:
options.image_mode = "cloud_action"
if options.image_mode == "normal":
msg.fail_msg("%s mode selected for image build while in PR flow! Exiting ... " % options.image_mode)
else:
msg.ok_msg("Partial reconfiguration mode detected")
else:
if options.image_mode != "normal":
msg.fail_msg("%s mode selected for image build while in Normal flow! Exiting ... " % options.image_mode)
else:
options.image_mode = "normal"
msg.ok_msg("Environment check PASSED")
|
py | 7dfb0f870f2697437d70f598ab26f1c1c17b2606 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import uuid
from .connections import resolve_connection
from .job import Job, Status
from .utils import import_attribute, utcnow
from .exceptions import (DequeueTimeout, InvalidJobOperationError,
NoSuchJobError, UnpickleError)
from .compat import total_ordering, string_types, as_text
from redis import WatchError
def get_failed_queue(connection=None):
"""Returns a handle to the special failed queue."""
return FailedQueue(connection=connection)
def compact(lst):
return [item for item in lst if item is not None]
@total_ordering
class Queue(object):
job_class = Job
DEFAULT_TIMEOUT = 180 # Default timeout seconds.
redis_queue_namespace_prefix = 'rq:queue:'
redis_queues_keys = 'rq:queues'
@classmethod
def all(cls, connection=None):
"""Returns an iterable of all Queues.
"""
connection = resolve_connection(connection)
def to_queue(queue_key):
return cls.from_queue_key(as_text(queue_key),
connection=connection)
return [to_queue(rq_key) for rq_key in connection.smembers(cls.redis_queues_keys) if rq_key]
@classmethod
def from_queue_key(cls, queue_key, connection=None):
"""Returns a Queue instance, based on the naming conventions for naming
the internal Redis keys. Can be used to reverse-lookup Queues by their
Redis keys.
"""
prefix = cls.redis_queue_namespace_prefix
if not queue_key.startswith(prefix):
raise ValueError('Not a valid RQ queue key: %s' % (queue_key,))
name = queue_key[len(prefix):]
return cls(name, connection=connection)
def __init__(self, name='default', default_timeout=None, connection=None,
async=True, job_class=None):
self.connection = resolve_connection(connection)
prefix = self.redis_queue_namespace_prefix
self.name = name
self._key = '%s%s' % (prefix, name)
self._default_timeout = default_timeout
self._async = async
if job_class is not None:
if isinstance(job_class, string_types):
job_class = import_attribute(job_class)
self.job_class = job_class
def __len__(self):
return self.count
@property
def key(self):
"""Returns the Redis key for this Queue."""
return self._key
def empty(self):
"""Removes all messages on the queue."""
script = b"""
local prefix = "rq:job:"
local q = KEYS[1]
local count = 0
while true do
local job_id = redis.call("lpop", q)
if job_id == false then
break
end
-- Delete the relevant keys
redis.call("del", prefix..job_id)
redis.call("del", prefix..job_id..":dependents")
count = count + 1
end
return count
"""
script = self.connection.register_script(script)
return script(keys=[self.key])
def is_empty(self):
"""Returns whether the current queue is empty."""
return self.count == 0
def fetch_job(self, job_id):
try:
return self.job_class.fetch(job_id, connection=self.connection)
except NoSuchJobError:
self.remove(job_id)
def get_job_ids(self, offset=0, length=-1):
"""Returns a slice of job IDs in the queue."""
start = offset
if length >= 0:
end = offset + (length - 1)
else:
end = length
return [as_text(job_id) for job_id in
self.connection.lrange(self.key, start, end)]
def get_jobs(self, offset=0, length=-1):
"""Returns a slice of jobs in the queue."""
job_ids = self.get_job_ids(offset, length)
return compact([self.fetch_job(job_id) for job_id in job_ids])
@property
def job_ids(self):
"""Returns a list of all job IDS in the queue."""
return self.get_job_ids()
@property
def jobs(self):
"""Returns a list of all (valid) jobs in the queue."""
return self.get_jobs()
@property
def count(self):
"""Returns a count of all messages in the queue."""
return self.connection.llen(self.key)
def remove(self, job_or_id, pipeline=None):
"""Removes Job from queue, accepts either a Job instance or ID."""
job_id = job_or_id.id if isinstance(job_or_id, self.job_class) else job_or_id
if pipeline is not None:
pipeline.lrem(self.key, 0, job_id)
return self.connection._lrem(self.key, 0, job_id)
def compact(self):
"""Removes all "dead" jobs from the queue by cycling through it, while
guarantueeing FIFO semantics.
"""
COMPACT_QUEUE = 'rq:queue:_compact:{0}'.format(uuid.uuid4())
self.connection.rename(self.key, COMPACT_QUEUE)
while True:
job_id = as_text(self.connection.lpop(COMPACT_QUEUE))
if job_id is None:
break
if self.job_class.exists(job_id, self.connection):
self.connection.rpush(self.key, job_id)
def push_job_id(self, job_id, pipeline=None):
"""Pushes a job ID on the corresponding Redis queue."""
connection = pipeline if pipeline is not None else self.connection
connection.rpush(self.key, job_id)
def enqueue_call(self, func, args=None, kwargs=None, timeout=None,
result_ttl=None, description=None, depends_on=None,
job_id=None):
"""Creates a job to represent the delayed function call and enqueues
it.
It is much like `.enqueue()`, except that it takes the function's args
and kwargs as explicit arguments. Any kwargs passed to this function
contain options for RQ itself.
"""
timeout = timeout or self._default_timeout
# TODO: job with dependency shouldn't have "queued" as status
job = self.job_class.create(func, args, kwargs, connection=self.connection,
result_ttl=result_ttl, status=Status.QUEUED,
description=description, depends_on=depends_on, timeout=timeout,
id=job_id)
# If job depends on an unfinished job, register itself on it's
# parent's dependents instead of enqueueing it.
# If WatchError is raised in the process, that means something else is
# modifying the dependency. In this case we simply retry
if depends_on is not None:
if not isinstance(depends_on, self.job_class):
depends_on = Job(id=depends_on, connection=self.connection)
with self.connection.pipeline() as pipe:
while True:
try:
pipe.watch(depends_on.key)
if depends_on.get_status() != Status.FINISHED:
job.register_dependency(pipeline=pipe)
job.save(pipeline=pipe)
pipe.execute()
return job
break
except WatchError:
continue
return self.enqueue_job(job)
def enqueue(self, f, *args, **kwargs):
"""Creates a job to represent the delayed function call and enqueues
it.
Expects the function to call, along with the arguments and keyword
arguments.
The function argument `f` may be any of the following:
* A reference to a function
* A reference to an object's instance method
* A string, representing the location of a function (must be
meaningful to the import context of the workers)
"""
if not isinstance(f, string_types) and f.__module__ == '__main__':
raise ValueError('Functions from the __main__ module cannot be processed '
'by workers.')
# Detect explicit invocations, i.e. of the form:
# q.enqueue(foo, args=(1, 2), kwargs={'a': 1}, timeout=30)
timeout = kwargs.pop('timeout', None)
description = kwargs.pop('description', None)
result_ttl = kwargs.pop('result_ttl', None)
depends_on = kwargs.pop('depends_on', None)
job_id = kwargs.pop('job_id', None)
if 'args' in kwargs or 'kwargs' in kwargs:
assert args == (), 'Extra positional arguments cannot be used when using explicit args and kwargs.' # noqa
args = kwargs.pop('args', None)
kwargs = kwargs.pop('kwargs', None)
return self.enqueue_call(func=f, args=args, kwargs=kwargs,
timeout=timeout, result_ttl=result_ttl,
description=description, depends_on=depends_on,
job_id=job_id)
def enqueue_job(self, job, set_meta_data=True):
"""Enqueues a job for delayed execution.
If the `set_meta_data` argument is `True` (default), it will update
the properties `origin` and `enqueued_at`.
If Queue is instantiated with async=False, job is executed immediately.
"""
# Add Queue key set
self.connection.sadd(self.redis_queues_keys, self.key)
if set_meta_data:
job.origin = self.name
job.enqueued_at = utcnow()
if job.timeout is None:
job.timeout = self.DEFAULT_TIMEOUT
job.save()
if self._async:
self.push_job_id(job.id)
else:
job.perform()
job.save()
return job
def enqueue_dependents(self, job):
"""Enqueues all jobs in the given job's dependents set and clears it."""
# TODO: can probably be pipelined
while True:
job_id = as_text(self.connection.spop(job.dependents_key))
if job_id is None:
break
dependent = self.job_class.fetch(job_id, connection=self.connection)
self.enqueue_job(dependent)
def pop_job_id(self):
"""Pops a given job ID from this Redis queue."""
return as_text(self.connection.lpop(self.key))
@classmethod
def lpop(cls, queue_keys, timeout, connection=None):
"""Helper method. Intermediate method to abstract away from some
Redis API details, where LPOP accepts only a single key, whereas BLPOP
accepts multiple. So if we want the non-blocking LPOP, we need to
iterate over all queues, do individual LPOPs, and return the result.
Until Redis receives a specific method for this, we'll have to wrap it
this way.
The timeout parameter is interpreted as follows:
None - non-blocking (return immediately)
> 0 - maximum number of seconds to block
"""
connection = resolve_connection(connection)
if timeout is not None: # blocking variant
if timeout == 0:
raise ValueError('RQ does not support indefinite timeouts. Please pick a timeout value > 0.')
result = connection.blpop(queue_keys, timeout)
if result is None:
raise DequeueTimeout(timeout, queue_keys)
queue_key, job_id = result
return queue_key, job_id
else: # non-blocking variant
for queue_key in queue_keys:
blob = connection.lpop(queue_key)
if blob is not None:
return queue_key, blob
return None
def dequeue(self):
"""Dequeues the front-most job from this queue.
Returns a job_class instance, which can be executed or inspected.
"""
job_id = self.pop_job_id()
if job_id is None:
return None
try:
job = self.job_class.fetch(job_id, connection=self.connection)
except NoSuchJobError as e:
# Silently pass on jobs that don't exist (anymore),
# and continue by reinvoking itself recursively
return self.dequeue()
except UnpickleError as e:
# Attach queue information on the exception for improved error
# reporting
e.job_id = job_id
e.queue = self
raise e
return job
@classmethod
def dequeue_any(cls, queues, timeout, connection=None):
"""Class method returning the job_class instance at the front of the given
set of Queues, where the order of the queues is important.
When all of the Queues are empty, depending on the `timeout` argument,
either blocks execution of this function for the duration of the
timeout or until new messages arrive on any of the queues, or returns
None.
See the documentation of cls.lpop for the interpretation of timeout.
"""
queue_keys = [q.key for q in queues]
result = cls.lpop(queue_keys, timeout, connection=connection)
if result is None:
return None
queue_key, job_id = map(as_text, result)
queue = cls.from_queue_key(queue_key, connection=connection)
try:
job = cls.job_class.fetch(job_id, connection=connection)
except NoSuchJobError:
# Silently pass on jobs that don't exist (anymore),
# and continue by reinvoking the same function recursively
return cls.dequeue_any(queues, timeout, connection=connection)
except UnpickleError as e:
# Attach queue information on the exception for improved error
# reporting
e.job_id = job_id
e.queue = queue
raise e
return job, queue
# Total ordering defition (the rest of the required Python methods are
# auto-generated by the @total_ordering decorator)
def __eq__(self, other): # noqa
if not isinstance(other, Queue):
raise TypeError('Cannot compare queues to other objects.')
return self.name == other.name
def __lt__(self, other):
if not isinstance(other, Queue):
raise TypeError('Cannot compare queues to other objects.')
return self.name < other.name
def __hash__(self):
return hash(self.name)
def __repr__(self): # noqa
return 'Queue(%r)' % (self.name,)
def __str__(self):
return '<Queue \'%s\'>' % (self.name,)
class FailedQueue(Queue):
def __init__(self, connection=None):
super(FailedQueue, self).__init__(Status.FAILED, connection=connection)
def quarantine(self, job, exc_info):
"""Puts the given Job in quarantine (i.e. put it on the failed
queue).
This is different from normal job enqueueing, since certain meta data
must not be overridden (e.g. `origin` or `enqueued_at`) and other meta
data must be inserted (`ended_at` and `exc_info`).
"""
job.ended_at = utcnow()
job.exc_info = exc_info
return self.enqueue_job(job, set_meta_data=False)
def requeue(self, job_id):
"""Requeues the job with the given job ID."""
try:
job = self.job_class.fetch(job_id, connection=self.connection)
except NoSuchJobError:
# Silently ignore/remove this job and return (i.e. do nothing)
self.remove(job_id)
return
# Delete it from the failed queue (raise an error if that failed)
if self.remove(job) == 0:
raise InvalidJobOperationError('Cannot requeue non-failed jobs.')
job.set_status(Status.QUEUED)
job.exc_info = None
q = Queue(job.origin, connection=self.connection)
q.enqueue_job(job)
|
py | 7dfb0fa02eb9a0f53fa4baebe08e5d1be347bb96 | # Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For CellsManager
"""
import copy
import datetime
from oslo.config import cfg
from nova.cells import messaging
from nova.cells import utils as cells_utils
from nova import context
from nova.openstack.common import rpc
from nova.openstack.common import timeutils
from nova import test
from nova.tests.cells import fakes
from nova.tests import fake_instance_actions
CONF = cfg.CONF
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
FAKE_COMPUTE_NODES = [dict(id=1), dict(id=2)]
FAKE_SERVICES = [dict(id=1, host='host1',
compute_node=[FAKE_COMPUTE_NODES[0]]),
dict(id=2, host='host2',
compute_node=[FAKE_COMPUTE_NODES[1]]),
dict(id=3, host='host3', compute_node=[])]
FAKE_TASK_LOGS = [dict(id=1, host='host1'),
dict(id=2, host='host2')]
class CellsManagerClassTestCase(test.TestCase):
"""Test case for CellsManager class."""
def setUp(self):
super(CellsManagerClassTestCase, self).setUp()
fakes.init(self)
# pick a child cell to use for tests.
self.our_cell = 'grandchild-cell1'
self.cells_manager = fakes.get_cells_manager(self.our_cell)
self.msg_runner = self.cells_manager.msg_runner
self.driver = self.cells_manager.driver
self.ctxt = 'fake_context'
def _get_fake_response(self, raw_response=None, exc=False):
if exc:
return messaging.Response('fake', test.TestingException(),
True)
if raw_response is None:
raw_response = 'fake-response'
return messaging.Response('fake', raw_response, False)
def test_get_cell_info_for_neighbors(self):
self.mox.StubOutWithMock(self.cells_manager.state_manager,
'get_cell_info_for_neighbors')
self.cells_manager.state_manager.get_cell_info_for_neighbors()
self.mox.ReplayAll()
self.cells_manager.get_cell_info_for_neighbors(self.ctxt)
def test_post_start_hook_child_cell(self):
self.mox.StubOutWithMock(self.driver, 'start_consumers')
self.mox.StubOutWithMock(context, 'get_admin_context')
self.mox.StubOutWithMock(self.cells_manager, '_update_our_parents')
self.driver.start_consumers(self.msg_runner)
context.get_admin_context().AndReturn(self.ctxt)
self.cells_manager._update_our_parents(self.ctxt)
self.mox.ReplayAll()
self.cells_manager.post_start_hook()
def test_post_start_hook_middle_cell(self):
cells_manager = fakes.get_cells_manager('child-cell2')
msg_runner = cells_manager.msg_runner
driver = cells_manager.driver
self.mox.StubOutWithMock(driver, 'start_consumers')
self.mox.StubOutWithMock(context, 'get_admin_context')
self.mox.StubOutWithMock(msg_runner,
'ask_children_for_capabilities')
self.mox.StubOutWithMock(msg_runner,
'ask_children_for_capacities')
driver.start_consumers(msg_runner)
context.get_admin_context().AndReturn(self.ctxt)
msg_runner.ask_children_for_capabilities(self.ctxt)
msg_runner.ask_children_for_capacities(self.ctxt)
self.mox.ReplayAll()
cells_manager.post_start_hook()
def test_update_our_parents(self):
self.mox.StubOutWithMock(self.msg_runner,
'tell_parents_our_capabilities')
self.mox.StubOutWithMock(self.msg_runner,
'tell_parents_our_capacities')
self.msg_runner.tell_parents_our_capabilities(self.ctxt)
self.msg_runner.tell_parents_our_capacities(self.ctxt)
self.mox.ReplayAll()
self.cells_manager._update_our_parents(self.ctxt)
def test_schedule_run_instance(self):
host_sched_kwargs = 'fake_host_sched_kwargs_silently_passed'
self.mox.StubOutWithMock(self.msg_runner, 'schedule_run_instance')
our_cell = self.msg_runner.state_manager.get_my_state()
self.msg_runner.schedule_run_instance(self.ctxt, our_cell,
host_sched_kwargs)
self.mox.ReplayAll()
self.cells_manager.schedule_run_instance(self.ctxt,
host_sched_kwargs=host_sched_kwargs)
def test_run_compute_api_method(self):
# Args should just be silently passed through
cell_name = 'fake-cell-name'
method_info = 'fake-method-info'
self.mox.StubOutWithMock(self.msg_runner,
'run_compute_api_method')
fake_response = self._get_fake_response()
self.msg_runner.run_compute_api_method(self.ctxt,
cell_name,
method_info,
True).AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.run_compute_api_method(
self.ctxt, cell_name=cell_name, method_info=method_info,
call=True)
self.assertEqual('fake-response', response)
def test_instance_update_at_top(self):
self.mox.StubOutWithMock(self.msg_runner, 'instance_update_at_top')
self.msg_runner.instance_update_at_top(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.instance_update_at_top(self.ctxt,
instance='fake-instance')
def test_instance_destroy_at_top(self):
self.mox.StubOutWithMock(self.msg_runner, 'instance_destroy_at_top')
self.msg_runner.instance_destroy_at_top(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.instance_destroy_at_top(self.ctxt,
instance='fake-instance')
def test_instance_delete_everywhere(self):
self.mox.StubOutWithMock(self.msg_runner,
'instance_delete_everywhere')
self.msg_runner.instance_delete_everywhere(self.ctxt,
'fake-instance',
'fake-type')
self.mox.ReplayAll()
self.cells_manager.instance_delete_everywhere(
self.ctxt, instance='fake-instance',
delete_type='fake-type')
def test_instance_fault_create_at_top(self):
self.mox.StubOutWithMock(self.msg_runner,
'instance_fault_create_at_top')
self.msg_runner.instance_fault_create_at_top(self.ctxt,
'fake-fault')
self.mox.ReplayAll()
self.cells_manager.instance_fault_create_at_top(
self.ctxt, instance_fault='fake-fault')
def test_bw_usage_update_at_top(self):
self.mox.StubOutWithMock(self.msg_runner,
'bw_usage_update_at_top')
self.msg_runner.bw_usage_update_at_top(self.ctxt,
'fake-bw-info')
self.mox.ReplayAll()
self.cells_manager.bw_usage_update_at_top(
self.ctxt, bw_update_info='fake-bw-info')
def test_heal_instances(self):
self.flags(instance_updated_at_threshold=1000,
instance_update_num_instances=2,
group='cells')
fake_context = context.RequestContext('fake', 'fake')
stalled_time = timeutils.utcnow()
updated_since = stalled_time - datetime.timedelta(seconds=1000)
def utcnow():
return stalled_time
call_info = {'get_instances': 0, 'sync_instances': []}
instances = ['instance1', 'instance2', 'instance3']
def get_instances_to_sync(context, **kwargs):
self.assertEqual(context, fake_context)
call_info['shuffle'] = kwargs.get('shuffle')
call_info['project_id'] = kwargs.get('project_id')
call_info['updated_since'] = kwargs.get('updated_since')
call_info['get_instances'] += 1
return iter(instances)
def instance_get_by_uuid(context, uuid):
return instances[int(uuid[-1]) - 1]
def sync_instance(context, instance):
self.assertEqual(context, fake_context)
call_info['sync_instances'].append(instance)
self.stubs.Set(cells_utils, 'get_instances_to_sync',
get_instances_to_sync)
self.stubs.Set(self.cells_manager.db, 'instance_get_by_uuid',
instance_get_by_uuid)
self.stubs.Set(self.cells_manager, '_sync_instance',
sync_instance)
self.stubs.Set(timeutils, 'utcnow', utcnow)
self.cells_manager._heal_instances(fake_context)
self.assertEqual(call_info['shuffle'], True)
self.assertEqual(call_info['project_id'], None)
self.assertEqual(call_info['updated_since'], updated_since)
self.assertEqual(call_info['get_instances'], 1)
# Only first 2
self.assertEqual(call_info['sync_instances'],
instances[:2])
call_info['sync_instances'] = []
self.cells_manager._heal_instances(fake_context)
self.assertEqual(call_info['shuffle'], True)
self.assertEqual(call_info['project_id'], None)
self.assertEqual(call_info['updated_since'], updated_since)
self.assertEqual(call_info['get_instances'], 2)
# Now the last 1 and the first 1
self.assertEqual(call_info['sync_instances'],
[instances[-1], instances[0]])
def test_sync_instances(self):
self.mox.StubOutWithMock(self.msg_runner,
'sync_instances')
self.msg_runner.sync_instances(self.ctxt, 'fake-project',
'fake-time', 'fake-deleted')
self.mox.ReplayAll()
self.cells_manager.sync_instances(self.ctxt,
project_id='fake-project',
updated_since='fake-time',
deleted='fake-deleted')
def test_service_get_all(self):
responses = []
expected_response = []
# 3 cells... so 3 responses. Each response is a list of services.
# Manager should turn these into a single list of responses.
for i in xrange(3):
cell_name = 'path!to!cell%i' % i
services = []
for service in FAKE_SERVICES:
services.append(copy.deepcopy(service))
expected_service = copy.deepcopy(service)
cells_utils.add_cell_to_service(expected_service, cell_name)
expected_response.append(expected_service)
response = messaging.Response(cell_name, services, False)
responses.append(response)
self.mox.StubOutWithMock(self.msg_runner,
'service_get_all')
self.msg_runner.service_get_all(self.ctxt,
'fake-filters').AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.service_get_all(self.ctxt,
filters='fake-filters')
self.assertEqual(expected_response, response)
def test_service_get_by_compute_host(self):
self.mox.StubOutWithMock(self.msg_runner,
'service_get_by_compute_host')
fake_cell = 'fake-cell'
fake_response = messaging.Response(fake_cell, FAKE_SERVICES[0],
False)
expected_response = copy.deepcopy(FAKE_SERVICES[0])
cells_utils.add_cell_to_service(expected_response, fake_cell)
cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
self.msg_runner.service_get_by_compute_host(self.ctxt,
fake_cell, 'fake-host').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.service_get_by_compute_host(self.ctxt,
host_name=cell_and_host)
self.assertEqual(expected_response, response)
def test_proxy_rpc_to_manager(self):
self.mox.StubOutWithMock(self.msg_runner,
'proxy_rpc_to_manager')
fake_response = self._get_fake_response()
cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
topic = rpc.queue_get_for(self.ctxt, CONF.compute_topic,
cell_and_host)
self.msg_runner.proxy_rpc_to_manager(self.ctxt, 'fake-cell',
'fake-host', topic, 'fake-rpc-msg',
True, -1).AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.proxy_rpc_to_manager(self.ctxt,
topic=topic, rpc_message='fake-rpc-msg', call=True,
timeout=-1)
self.assertEqual('fake-response', response)
def _build_task_log_responses(self, num):
responses = []
expected_response = []
# 3 cells... so 3 responses. Each response is a list of task log
# entries. Manager should turn these into a single list of
# task log entries.
for i in xrange(num):
cell_name = 'path!to!cell%i' % i
task_logs = []
for task_log in FAKE_TASK_LOGS:
task_logs.append(copy.deepcopy(task_log))
expected_task_log = copy.deepcopy(task_log)
cells_utils.add_cell_to_task_log(expected_task_log,
cell_name)
expected_response.append(expected_task_log)
response = messaging.Response(cell_name, task_logs, False)
responses.append(response)
return expected_response, responses
def test_task_log_get_all(self):
expected_response, responses = self._build_task_log_responses(3)
self.mox.StubOutWithMock(self.msg_runner,
'task_log_get_all')
self.msg_runner.task_log_get_all(self.ctxt, None,
'fake-name', 'fake-begin',
'fake-end', host=None, state=None).AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.task_log_get_all(self.ctxt,
task_name='fake-name',
period_beginning='fake-begin', period_ending='fake-end')
self.assertEqual(expected_response, response)
def test_task_log_get_all_with_filters(self):
expected_response, responses = self._build_task_log_responses(1)
cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
self.mox.StubOutWithMock(self.msg_runner,
'task_log_get_all')
self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell',
'fake-name', 'fake-begin', 'fake-end', host='fake-host',
state='fake-state').AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.task_log_get_all(self.ctxt,
task_name='fake-name',
period_beginning='fake-begin', period_ending='fake-end',
host=cell_and_host, state='fake-state')
self.assertEqual(expected_response, response)
def test_task_log_get_all_with_cell_but_no_host_filters(self):
expected_response, responses = self._build_task_log_responses(1)
# Host filter only has cell name.
cell_and_host = 'fake-cell'
self.mox.StubOutWithMock(self.msg_runner,
'task_log_get_all')
self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell',
'fake-name', 'fake-begin', 'fake-end', host=None,
state='fake-state').AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.task_log_get_all(self.ctxt,
task_name='fake-name',
period_beginning='fake-begin', period_ending='fake-end',
host=cell_and_host, state='fake-state')
self.assertEqual(expected_response, response)
def test_compute_node_get_all(self):
responses = []
expected_response = []
# 3 cells... so 3 responses. Each response is a list of computes.
# Manager should turn these into a single list of responses.
for i in xrange(3):
cell_name = 'path!to!cell%i' % i
compute_nodes = []
for compute_node in FAKE_COMPUTE_NODES:
compute_nodes.append(copy.deepcopy(compute_node))
expected_compute_node = copy.deepcopy(compute_node)
cells_utils.add_cell_to_compute_node(expected_compute_node,
cell_name)
expected_response.append(expected_compute_node)
response = messaging.Response(cell_name, compute_nodes, False)
responses.append(response)
self.mox.StubOutWithMock(self.msg_runner,
'compute_node_get_all')
self.msg_runner.compute_node_get_all(self.ctxt,
hypervisor_match='fake-match').AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.compute_node_get_all(self.ctxt,
hypervisor_match='fake-match')
self.assertEqual(expected_response, response)
def test_compute_node_stats(self):
raw_resp1 = {'key1': 1, 'key2': 2}
raw_resp2 = {'key2': 1, 'key3': 2}
raw_resp3 = {'key3': 1, 'key4': 2}
responses = [messaging.Response('cell1', raw_resp1, False),
messaging.Response('cell2', raw_resp2, False),
messaging.Response('cell2', raw_resp3, False)]
expected_resp = {'key1': 1, 'key2': 3, 'key3': 3, 'key4': 2}
self.mox.StubOutWithMock(self.msg_runner,
'compute_node_stats')
self.msg_runner.compute_node_stats(self.ctxt).AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.compute_node_stats(self.ctxt)
self.assertEqual(expected_resp, response)
def test_compute_node_get(self):
fake_cell = 'fake-cell'
fake_response = messaging.Response(fake_cell,
FAKE_COMPUTE_NODES[0],
False)
expected_response = copy.deepcopy(FAKE_COMPUTE_NODES[0])
cells_utils.add_cell_to_compute_node(expected_response, fake_cell)
cell_and_id = cells_utils.cell_with_item(fake_cell, 'fake-id')
self.mox.StubOutWithMock(self.msg_runner,
'compute_node_get')
self.msg_runner.compute_node_get(self.ctxt,
'fake-cell', 'fake-id').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.compute_node_get(self.ctxt,
compute_id=cell_and_id)
self.assertEqual(expected_response, response)
def test_actions_get(self):
fake_uuid = fake_instance_actions.FAKE_UUID
fake_req_id = fake_instance_actions.FAKE_REQUEST_ID1
fake_act = fake_instance_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
fake_response = messaging.Response('fake-cell', [fake_act], False)
expected_response = [fake_act]
self.mox.StubOutWithMock(self.msg_runner, 'actions_get')
self.msg_runner.actions_get(self.ctxt, 'fake-cell',
'fake-uuid').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.actions_get(self.ctxt, 'fake-cell',
'fake-uuid')
self.assertEqual(expected_response, response)
def test_action_get_by_request_id(self):
fake_uuid = fake_instance_actions.FAKE_UUID
fake_req_id = fake_instance_actions.FAKE_REQUEST_ID1
fake_act = fake_instance_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
fake_response = messaging.Response('fake-cell', fake_act, False)
expected_response = fake_act
self.mox.StubOutWithMock(self.msg_runner, 'action_get_by_request_id')
self.msg_runner.action_get_by_request_id(self.ctxt, 'fake-cell',
'fake-uuid', 'req-fake').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.action_get_by_request_id(self.ctxt,
'fake-cell',
'fake-uuid',
'req-fake')
self.assertEqual(expected_response, response)
def test_action_events_get(self):
fake_action_id = fake_instance_actions.FAKE_ACTION_ID1
fake_events = fake_instance_actions.FAKE_EVENTS[fake_action_id]
fake_response = messaging.Response('fake-cell', fake_events, False)
expected_response = fake_events
self.mox.StubOutWithMock(self.msg_runner, 'action_events_get')
self.msg_runner.action_events_get(self.ctxt, 'fake-cell',
'fake-action').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.action_events_get(self.ctxt, 'fake-cell',
'fake-action')
self.assertEqual(expected_response, response)
|
py | 7dfb10a15f89ec3434fe22264f707ddfb9f4e911 | from change_format import ChangeFormat as CF
#
chain_test = CF()
chain_test.from_xml(filename="chain_casl.xml")
chain_test.export_to_xml(filename="chain_casl_old.xml")
|
py | 7dfb119908e689aed71b5aec93bc19e02d22dcd3 | from django.apps import AppConfig
from django.utils.translation import ugettext_lazy
class PluginApp(AppConfig):
name = 'pretix_mollie'
verbose_name = 'Mollie payment integration for pretix'
class PretixPluginMeta:
name = ugettext_lazy('Mollie payment integration for pretix')
author = 'Raphael Michel'
description = ugettext_lazy('Integration for the Mollie payment provider.')
visible = True
version = '1.2.1'
def ready(self):
from . import signals # NOQA
default_app_config = 'pretix_mollie.PluginApp'
|
py | 7dfb11d1d7455619e3354f25de4cdb4943fb6f22 | from __future__ import generators
# Some raw iter tests. Some "high-level" iterator tests can be found in
# testvb.py and testOutlook.py
import sys
import unittest
from win32com.client.gencache import EnsureDispatch
from win32com.client import Dispatch
import win32com.server.util
import win32com.test.util
import pythoncom
def yield_iter(iter):
while 1:
yield next(iter)
class _BaseTestCase(win32com.test.util.TestCase):
def test_enumvariant_vb(self):
ob, iter = self.iter_factory()
got=[]
for v in iter:
got.append(v)
self.assertEquals(got, self.expected_data)
def test_yield(self):
ob, i = self.iter_factory()
got=[]
for v in yield_iter(iter(i)):
got.append(v)
self.assertEquals(got, self.expected_data)
def _do_test_nonenum(self, object):
try:
for i in object:
pass
self.fail("Could iterate over a non-iterable object")
except TypeError:
pass # this is expected.
self.assertRaises(TypeError, iter, object)
self.assertRaises(AttributeError, getattr, object, "next")
def test_nonenum_wrapper(self):
# Check our raw PyIDispatch
ob = self.object._oleobj_
try:
for i in ob:
pass
self.fail("Could iterate over a non-iterable object")
except TypeError:
pass # this is expected.
self.assertRaises(TypeError, iter, ob)
self.assertRaises(AttributeError, getattr, ob, "next")
# And our Dispatch wrapper
ob = self.object
try:
for i in ob:
pass
self.fail("Could iterate over a non-iterable object")
except TypeError:
pass # this is expected.
# Note that as our object may be dynamic, we *do* have a __getitem__
# method, meaning we *can* call iter() on the object. In this case
# actual iteration is what fails.
# So either the 'iter(); will raise a type error, or an attempt to
# fetch it
try:
next(iter(ob))
self.fail("Expected a TypeError fetching this iterator")
except TypeError:
pass
# And it should never have a 'next' method
self.assertRaises(AttributeError, getattr, ob, "next")
class VBTestCase(_BaseTestCase):
def setUp(self):
def factory():
# Our VB test harness exposes a property with IEnumVariant.
ob = self.object.EnumerableCollectionProperty
for i in self.expected_data:
ob.Add(i)
# Get the raw IEnumVARIANT.
invkind = pythoncom.DISPATCH_METHOD | pythoncom.DISPATCH_PROPERTYGET
iter = ob._oleobj_.InvokeTypes(pythoncom.DISPID_NEWENUM,0,invkind,(13, 10),())
return ob, iter.QueryInterface(pythoncom.IID_IEnumVARIANT)
# We *need* generated dispatch semantics, so dynamic __getitem__ etc
# don't get in the way of our tests.
self.object = EnsureDispatch("PyCOMVBTest.Tester")
self.expected_data = [1, "Two", "3"]
self.iter_factory = factory
def tearDown(self):
self.object = None
# Test our client semantics, but using a wrapped Python list object.
# This has the effect of re-using our client specific tests, but in this
# case is exercising the server side.
class SomeObject:
_public_methods_ = ["GetCollection"]
def __init__(self, data):
self.data = data
def GetCollection(self):
return win32com.server.util.NewCollection(self.data)
class WrappedPythonCOMServerTestCase(_BaseTestCase):
def setUp(self):
def factory():
ob = self.object.GetCollection()
flags = pythoncom.DISPATCH_METHOD | pythoncom.DISPATCH_PROPERTYGET
enum = ob._oleobj_.Invoke(pythoncom.DISPID_NEWENUM, 0, flags, 1)
return ob, enum.QueryInterface(pythoncom.IID_IEnumVARIANT)
self.expected_data = [1,'Two',3]
sv = win32com.server.util.wrap(SomeObject(self.expected_data))
self.object = Dispatch(sv)
self.iter_factory = factory
def tearDown(self):
self.object = None
def suite():
# We dont want our base class run
suite = unittest.TestSuite()
for item in globals().values():
if type(item)==type(unittest.TestCase) and \
issubclass(item, unittest.TestCase) and \
item != _BaseTestCase:
suite.addTest(unittest.makeSuite(item))
return suite
if __name__=='__main__':
unittest.main(argv=sys.argv + ['suite'])
|
py | 7dfb135ecde8df6dd29daee4e9b26c93358a31e5 | """Tests for tensorflow.contrib.layers.python.layers.encoders."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.layers.python.layers import encoders
from tensorflow.contrib.layers.python.ops import sparse_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
def _get_const_var(name, shape, value):
return variable_scope.get_variable(
name, shape, initializer=init_ops.constant_initializer(value))
if __name__ == '__main__':
with tf.Session() as sess:
docs = [[1, 1], [2, 3]]
with variable_scope.variable_scope('test'):
v = _get_const_var('embeddings', (4, 3),
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]])
# self.assertEqual(v.name, 'test/embeddings:0')
# emb = encoders.embed_sequence(docs, 4, 3, scope='test', reuse=True)
emb = encoders.embed_sequence(docs, 4, 3, scope='test', reuse=True)
sess.run(variables.global_variables_initializer())
print(emb.eval())
#self.assertAllClose(
# [[[3., 4., 5.], [3., 4., 5.]], [[6., 7., 8.], [9., 10., 11.]]],
# emb.eval())
#[
# [[ 3. 4. 5.]
# [ 3. 4. 5.]]
# [[ 6. 7. 8.]
# [ 9. 10. 11.]]
#]
|
py | 7dfb13abbd41e1a8a569a10f6571f2c1ed722539 | #===----------------------------------------------------------------------===##
#
# The LLVM Compiler Infrastructure
#
# This file is dual licensed under the MIT and the University of Illinois Open
# Source Licenses. See LICENSE.TXT for details.
#
#===----------------------------------------------------------------------===##
import os
import lit.util
import libcxx.util
class CXXCompiler(object):
def __init__(self, path, flags=None, compile_flags=None, link_flags=None,
use_ccache=False):
self.path = path
self.flags = list(flags or [])
self.compile_flags = list(compile_flags or [])
self.link_flags = list(link_flags or [])
self.use_ccache = use_ccache
self.type = None
self.version = None
self._initTypeAndVersion()
def _initTypeAndVersion(self):
# Get compiler type and version
macros = self.dumpMacros()
if macros is None:
return
compiler_type = None
major_ver = minor_ver = patchlevel = None
if '__clang__' in macros.keys():
compiler_type = 'clang'
# Treat apple's llvm fork differently.
if '__apple_build_version__' in macros.keys():
compiler_type = 'apple-clang'
major_ver = macros['__clang_major__']
minor_ver = macros['__clang_minor__']
patchlevel = macros['__clang_patchlevel__']
elif '__GNUC__' in macros.keys():
compiler_type = 'gcc'
major_ver = macros['__GNUC__']
minor_ver = macros['__GNUC_MINOR__']
patchlevel = macros['__GNUC_PATCHLEVEL__']
self.type = compiler_type
self.version = (major_ver, minor_ver, patchlevel)
def _basicCmd(self, source_files, out, is_link=False, input_is_cxx=False):
cmd = []
if self.use_ccache and not is_link:
cmd += ['ccache']
cmd += [self.path]
if out is not None:
cmd += ['-o', out]
if input_is_cxx:
cmd += ['-x', 'c++']
if isinstance(source_files, list):
cmd += source_files
elif isinstance(source_files, str):
cmd += [source_files]
else:
raise TypeError('source_files must be a string or list')
return cmd
def preprocessCmd(self, source_files, out=None, flags=[]):
cmd = self._basicCmd(source_files, out, input_is_cxx=True) + ['-E']
cmd += self.flags + self.compile_flags + flags
return cmd
def compileCmd(self, source_files, out=None, flags=[]):
cmd = self._basicCmd(source_files, out, input_is_cxx=True) + ['-c']
cmd += self.flags + self.compile_flags + flags
return cmd
def linkCmd(self, source_files, out=None, flags=[]):
cmd = self._basicCmd(source_files, out, is_link=True)
cmd += self.flags + self.link_flags + flags
return cmd
def compileLinkCmd(self, source_files, out=None, flags=[]):
cmd = self._basicCmd(source_files, out, is_link=True)
cmd += self.flags + self.compile_flags + self.link_flags + flags
return cmd
def preprocess(self, source_files, out=None, flags=[], env=None, cwd=None):
cmd = self.preprocessCmd(source_files, out, flags)
out, err, rc = lit.util.executeCommand(cmd, env=env, cwd=cwd)
return cmd, out, err, rc
def compile(self, source_files, out=None, flags=[], env=None, cwd=None):
cmd = self.compileCmd(source_files, out, flags)
out, err, rc = lit.util.executeCommand(cmd, env=env, cwd=cwd)
return cmd, out, err, rc
def link(self, source_files, out=None, flags=[], env=None, cwd=None):
cmd = self.linkCmd(source_files, out, flags)
out, err, rc = lit.util.executeCommand(cmd, env=env, cwd=cwd)
return cmd, out, err, rc
def compileLink(self, source_files, out=None, flags=[], env=None,
cwd=None):
cmd = self.compileLinkCmd(source_files, out, flags)
out, err, rc = lit.util.executeCommand(cmd, env=env, cwd=cwd)
return cmd, out, err, rc
def compileLinkTwoSteps(self, source_file, out=None, object_file=None,
flags=[], env=None, cwd=None):
if not isinstance(source_file, str):
raise TypeError('This function only accepts a single input file')
if object_file is None:
# Create, use and delete a temporary object file if none is given.
with_fn = lambda: libcxx.util.guardedTempFilename(suffix='.o')
else:
# Otherwise wrap the filename in a context manager function.
with_fn = lambda: libcxx.util.nullContext(object_file)
with with_fn() as object_file:
cc_cmd, cc_stdout, cc_stderr, rc = self.compile(
source_file, object_file, flags=flags, env=env, cwd=cwd)
if rc != 0:
return cc_cmd, cc_stdout, cc_stderr, rc
link_cmd, link_stdout, link_stderr, rc = self.link(
object_file, out=out, flags=flags, env=env, cwd=cwd)
return (cc_cmd + ['&&'] + link_cmd, cc_stdout + link_stdout,
cc_stderr + link_stderr, rc)
def dumpMacros(self, source_files=None, flags=[], env=None, cwd=None):
if source_files is None:
source_files = os.devnull
flags = ['-dM'] + flags
cmd, out, err, rc = self.preprocess(source_files, flags=flags, env=env,
cwd=cwd)
if rc != 0:
return None
parsed_macros = {}
lines = [l.strip() for l in out.split('\n') if l.strip()]
for l in lines:
assert l.startswith('#define ')
l = l[len('#define '):]
macro, _, value = l.partition(' ')
parsed_macros[macro] = value
return parsed_macros
def getTriple(self):
cmd = [self.path] + self.flags + ['-dumpmachine']
return lit.util.capture(cmd).strip()
def hasCompileFlag(self, flag):
if isinstance(flag, list):
flags = list(flag)
else:
flags = [flag]
# Add -Werror to ensure that an unrecognized flag causes a non-zero
# exit code. -Werror is supported on all known compiler types.
if self.type is not None:
flags += ['-Werror']
cmd, out, err, rc = self.compile(os.devnull, out=os.devnull,
flags=flags)
return rc == 0
def addCompileFlagIfSupported(self, flag):
if isinstance(flag, list):
flags = list(flag)
else:
flags = [flag]
if self.hasCompileFlag(flags):
self.compile_flags += flags
return True
else:
return False
def addWarningFlagIfSupported(self, flag):
"""
addWarningFlagIfSupported - Add a warning flag if the compiler
supports it. Unlike addCompileFlagIfSupported, this function detects
when "-Wno-<warning>" flags are unsupported. If flag is a
"-Wno-<warning>" GCC will not emit an unknown option diagnostic unless
another error is triggered during compilation.
"""
assert isinstance(flag, str)
if not flag.startswith('-Wno-'):
return self.addCompileFlagIfSupported(flag)
flags = ['-Werror', flag]
cmd = self.compileCmd('-', os.devnull, flags)
# Remove '-v' because it will cause the command line invocation
# to be printed as part of the error output.
# TODO(EricWF): Are there other flags we need to worry about?
if '-v' in cmd:
cmd.remove('-v')
out, err, rc = lit.util.executeCommand(cmd, input='#error\n')
assert rc != 0
if flag in err:
return False
self.compile_flags += [flag]
return True
|
py | 7dfb1497d335595c1db9c2381aa6579530b415b0 | '''
FQ20 - OMSBA 5061 - Week 10
TW14: World Series
Maroon Team: Chequala Fuller, Tamalyn Tamura, Andrew Nalundasan
Description: Write the WorldSeries class that models a baseball
world series. Make a stab at the simulation, too, if you have time.
'''
import random
class WorldSeries(object):
"""simulate the World Series of US baseball. The first team to
win 4 games wins the series. Randomly pick the winner for each
game using the given probability.
>>> ws = WorldSeries('Boston Red Sox', 'New York Yankees', p=.73)
>>> # story=True says print out results of each game and the series winner
>>> bostonWins = ws.simulate(story=True)
Game 1: Boston Red Sox win (Boston 1 games: New York 0 games)
Game 2: Boston Red Sox win (Boston 2 games: New York 0 games)
Game 3: New York Yankees win (Boston 2 games: New York 1 games)
Game 4: Boston Red Sox win (Boston 3 games: New York 1 games)
Game 5: Boston Red Sox win (Boston 4 games: New York 1 games)
Boston Red Sox win the series!
>>> bostonWins
True
>>> # without story (using the default of story=False, it just returns if team1 won
>>> ws.simulate()
True
>>> if ws.simulate():
print(ws.team1, "won!")
else:
print(ws.team2), "won!")
New York Yankees won!
"""
def __init__(self, team1, team2, p):
self.team1 = team1
self.team2 = team2
self.p = p
self.loc1 = 'Boston'
self.loc2 = 'New York'
#win = random.random() < p
#def __str__(self,
#def random_win(self):
#win = random.random() < p
def simulate(self, story = False):
"""returns the results of each game and the series winner, if story = True"""
counter1 = 0
counter2 = 0
for n in range(1, 6):
win = random.random()
#print(win)
#print(n)
if win < self.p:
counter2 += 1
else:
counter1 += 1
print('Game %d: %s win (%s %d games: %s %d games)' % (n, self.team1, self.loc1,
counter1, self.loc2, counter2))
if counter1 > counter2:
print('%s win the series!' % self.team1)
else:
print('%s win the series!' % self.team2)
|
py | 7dfb14cd7a48e572ce6bf7bb70b5f9cc3d3d88e2 | from sharpy.plans.acts import ActBase
from sc2 import UnitTypeId, AbilityId
from sc2.unit import Unit
class LowerDepots(ActBase):
async def execute(self) -> bool:
depot: Unit
for depot in self.cache.own(UnitTypeId.SUPPLYDEPOT):
if not self.knowledge.known_enemy_units_mobile.not_flying.closer_than(5, depot.position).exists:
# lower depot
self.do(depot(AbilityId.MORPH_SUPPLYDEPOT_LOWER))
for depot in self.cache.own(UnitTypeId.SUPPLYDEPOTLOWERED):
if self.knowledge.known_enemy_units_mobile.not_flying.closer_than(5, depot.position).exists:
# rise depot
self.do(depot(AbilityId.MORPH_SUPPLYDEPOT_RAISE))
return True
|
py | 7dfb15185b5928b42e0c69caa80b31116a8fea1a | # Generated by Django 2.2.4 on 2019-08-14 09:13
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("order", "0071_order_gift_cards")]
operations = [
migrations.RenameField(
model_name="order",
old_name="shipping_price_gross",
new_name="shipping_price_gross_amount",
),
migrations.RenameField(
model_name="order",
old_name="shipping_price_net",
new_name="shipping_price_net_amount",
),
migrations.RenameField(
model_name="order", old_name="total_gross", new_name="total_gross_amount"
),
migrations.RenameField(
model_name="order", old_name="total_net", new_name="total_net_amount"
),
migrations.RenameField(
model_name="orderline",
old_name="unit_price_gross",
new_name="unit_price_gross_amount",
),
migrations.RenameField(
model_name="orderline",
old_name="unit_price_net",
new_name="unit_price_net_amount",
),
migrations.AddField(
model_name="order",
name="currency",
field=models.CharField(
default=settings.DEFAULT_CURRENCY,
max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH,
),
),
migrations.AddField(
model_name="orderline",
name="currency",
field=models.CharField(
default=settings.DEFAULT_CURRENCY,
max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH,
),
),
]
|
py | 7dfb156c699e3e80aeea05ff439705f6e2596a50 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author : 陈坤泽
# @Email : [email protected]
# @Date : 2020/06/02 20:00
"""
from https://github.com/martsberger/tablepyxl
"""
|
py | 7dfb15cc3c10e59fbf2e8c11dbfe7200c0b4a799 |
import smart_imports
smart_imports.all()
def notify_post_service(answer, recipients_ids):
for recipient_id in recipients_ids:
post_service_prototypes.MessagePrototype.create(post_service_message_handlers.PersonalMessageHandler(message_id=answer.message_id, account_id=recipient_id))
def new_messages_number_url():
arguments = {'api_version': conf.settings.NEW_MESSAGES_NUMNER_API_VERSION,
'api_client': django_settings.API_CLIENT}
return dext_urls.url('accounts:messages:api-new-messages-number', **arguments)
def send_message(sender_id, recipients_ids, body, async=False):
def callback(answer):
notify_post_service(answer=answer,
recipients_ids=recipients_ids)
return tt_services.personal_messages.cmd_send_message(sender_id=sender_id,
recipients_ids=recipients_ids,
body=body,
async=async,
callback=callback)
|
py | 7dfb168d7eaa05e289fe837607368dda9641909c | """Test the speed and precision of the sum of many values.
When fitting data with many events or bins (say 1e6 or 1e9 or more), the fit statistic is first
computed per event or bin and then summed.
The sum (and thus the fit statistic and fit results) can become incorrect due to rounding errors
(see e.g. http://en.wikipedia.org/wiki/Kahan_summation_algorithm)
There are clever and slower methods to avoid this problem (like the Kahan summation algorithm),
but they are not readily available in stdlib C / C++ or numpy
(there's an open feature request for numpy though: https://github.com/numpy/numpy/issues/2448)
The simplest solution is to use 128 bit precision for the accumulator in numpy:
In [7]: np.sum([1e10, -1e10, 1e-6] * int(1e6))
Out[7]: 1.9073477254638671
In [8]: np.sum([1e10, -1e10, 1e-6] * int(1e6), dtype='float128')
Out[8]: 1.0002404448965787888
Sherpa uses Kahan summation:
$ASCDS_INSTALL/src/pkg/sherpa/sherpa/sherpa/include/sherpa/stats.hh
This is a quick benchmarking of the precision, speed of sum as a function of these parameters:
* Accumulator bit size: 32, 64 or 128
* Number of elements: 1e6, 1e9
* TODO: how to choose values in a meaningful way? Precision results will completely depend on this.
Should be chosen similar to typical / extreme fitting cases with CASH, CSTAT, CHI2 fits
For now we only check the speed.
* TODO: Check against C and Cython implementation
"""
from timeit import Timer
dtypes = ['f32', 'f64', 'f128']
sizes = [int(1e6), int(1e9)]
def setup(size, dtype):
return """
import numpy as np
data = np.zeros({size}, dtype='{dtype}')
"""[1:-1].format(**locals())
def statement(dtype):
return """data.sum(dtype='{dtype}')""".format(**locals())
for data_dtype in dtypes:
for accumulator_dtype in dtypes:
for size in sizes:
timer = Timer(statement(accumulator_dtype), setup(size, data_dtype))
time = min(timer.repeat(repeat=3, number=1))
# Let's use the frequency in GHz of summed elements as our measure of speed
speed = 1e-9 * (size / time)
print('%10s %10s %10d %10.5f' %
(data_dtype, accumulator_dtype, size, speed))
"""
On my 2.6 GHz Intel Core I7 Macbook the speed doesn't depend on data or accumulator dtype at all.
This is weird, because it's a 64 bit machine, so 128 bit addition should be slower.
Also for such a simple computation as sum the limiting factor should be memory loading speed,
so 128 bit data should be slower to process than 64 bit data?
In [53]: run sum_benchmark.py
f32 f32 1000000 0.82793
f32 f32 1000000000 1.12276
f32 f64 1000000 1.12207
f32 f64 1000000000 1.10964
f32 f128 1000000 1.04155
f32 f128 1000000000 1.12900
f64 f32 1000000 1.10609
f64 f32 1000000000 1.12823
f64 f64 1000000 1.10493
f64 f64 1000000000 1.11920
f64 f128 1000000 1.15450
f64 f128 1000000000 1.11794
f128 f32 1000000 1.12087
f128 f32 1000000000 1.12223
f128 f64 1000000 1.09885
f128 f64 1000000000 1.11911
f128 f128 1000000 1.06943
f128 f128 1000000000 1.12578
"""
|
py | 7dfb185a25fdd450cad9befc29fa07f3cf24b676 | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# my o(n)Solution
class Solution(object):
tilt = 0
def findTilt(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def postOrderTrav(root):
if not root:
return 0
left = postOrderTrav(root.left)
right = postOrderTrav(root.right)
self.tilt += abs(left - right)
return root.val + left + right
postOrderTrav(root)
return self.tilt
# This one is o(n^2) and naive so I'll just do o(n)
class Solution(object):
def findTilt(self, root):
"""
:type root: TreeNode
:rtype: int
"""
total = [0]
self.traverse(root, total)
return total[0]
def traverse(self, root, total):
if not root:
return
total[0] += self.getTilt(root)
self.traverse(root.left, total)
self.traverse(root.right, total)
def getSum(self, root, total):
if not root:
return
total[0] += root.val
self.getSum(root.left, total)
self.getSum(root.right, total)
def getTilt(self, root):
left = [0]
right = [0]
self.getSum(root.left, left)
self.getSum(root.right, right)
return abs(left[0] - right[0])
|
py | 7dfb189e2bfe639f5408d9c12f5ee2a1338d430b | import random
import numpy as np
from ._common import (_all_indices, _tally_at_pointer, _inc_pointer,
_dec_pointer)
def _order_tiebreak(winners, n=1):
"""
Given an iterable of possibly tied `winners`, select the highest numbered.
(Since they are to be eliminated.)
"""
return sorted(winners)[-n:]
def _random_tiebreak(winners, n=1):
"""
Given an iterable of possibly tied `winners`, select one at random.
"""
if len(winners) == 1:
return winners
else:
return random.sample(winners, n)
def _no_tiebreak(winners, n=1):
"""
Given an iterable of `winners`, return None if there is a tie.
"""
if len(winners) <= n:
return winners
else:
return [None]
_tiebreak_map = {'order': _order_tiebreak,
'random': _random_tiebreak,
None: _no_tiebreak}
def _get_tiebreak(tiebreaker):
try:
return _tiebreak_map[tiebreaker]
except KeyError:
raise ValueError('Tiebreaker not understood')
def coombs(election, tiebreaker=None):
"""
Find the winner of an election using Coomb's method.
If any candidate gets a majority of first-preference votes, they win.
Otherwise, the candidate(s) with the most number of last-preference votes
is eliminated, votes for eliminated candidates are transferred according
to the voters' preference rankings, and a series of runoff elections are
held between the remainders until a candidate gets a majority.[1]_
Parameters
----------
election : array_like
A collection of ranked ballots. See `borda` for election format.
Currently, this must include full rankings for each voter.
tiebreaker : {'random', None}, optional
If there is a tie, and `tiebreaker` is ``'random'``, tied candidates
are eliminated or selected at random
is returned.
If 'order', the lowest-ID tied candidate is preferred in each tie.
By default, ``None`` is returned if there are any ties.
Returns
-------
winner : int
The ID number of the winner, or ``None`` for an unbroken tie.
References
----------
.. [1] https://en.wikipedia.org/wiki/Coombs%27_method
Examples
--------
Label some candidates:
>>> A, B, C = 0, 1, 2
Specify the ballots for the 5 voters:
>>> election = [[A, C, B],
[A, C, B],
[B, C, A],
[B, C, A],
[C, A, B],
]
In the first round, no candidate gets a majority, so Candidate B (1) is
eliminated, for receiving 3 out of 5 last-place votes. Voter 2 and 3's
support of B is transferred to Candidate C (2), causing Candidate C to win,
with 3 out of 5 votes:
>>> coombs(election)
2
"""
election = np.asarray(election)
n_voters = election.shape[0]
n_cands = election.shape[1]
eliminated = set()
tiebreak = _get_tiebreak(tiebreaker)
first_pointer = np.zeros(n_voters, dtype=np.uint8)
first_tallies = np.empty(n_cands, dtype=np.uint)
last_pointer = np.full(n_voters, n_cands - 1, dtype=np.uint8)
last_tallies = np.empty(n_cands, dtype=np.uint)
for round_ in range(n_cands):
_tally_at_pointer(first_tallies, election, first_pointer)
# tolist makes things 2-4x faster
first_tallies_list = first_tallies.tolist()
# Did anyone get majority
highest = max(first_tallies_list)
if highest > n_voters / 2:
return first_tallies_list.index(highest)
# If not, eliminate candidate with highest number of last-preferences
_tally_at_pointer(last_tallies, election, last_pointer)
highest = max(last_tallies)
highly_hated = _all_indices(last_tallies, highest)
loser = tiebreak(highly_hated)[0]
# Handle no tiebreaker case
if loser is None:
return None
# Add candidate with lowest score in this round
eliminated.add(loser)
# Increment pointers past all eliminated candidates
_inc_pointer(election, first_pointer, eliminated)
_dec_pointer(election, last_pointer, eliminated)
# low and high pointer need to increment opposite
raise RuntimeError("Bug in Coombs' calculation")
|
py | 7dfb18c5420d001fa0d11aef502970fd28ddac7c | # Faça um programa que leia algo pelo teclado e mostre na tela o seu tipo primitivo e todas as informações possíveis sobre ele.
algo = input('Digite algo: ')
print('O tipo primitivo é: ', type(algo))
print(algo, 'é um número?', algo.isnumeric())
print(algo, 'é uma letra?', algo.isalpha())
print(algo, 'esta em maiusculo?', algo.isupper())
print(algo, 'esta em minusculo?', algo.islower())
print(algo, 'é apenas espacos?', algo.isspace())
print(algo, 'é alfanumérico?', algo.isalnum())
|
py | 7dfb18f1e29d9ececb25f728e38cd5afc871196f | # -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2004, 2006 Sean C. Gillies
# Copyright (c) 2007 STFC <http://www.stfc.ac.uk>
#
# Authors :
# Dominic Lowe <[email protected]>
#
# Contact email: [email protected]
# =============================================================================
from urllib.parse import urlencode, parse_qsl
from owslib.etree import etree
from owslib.util import Authentication, openURL
class ServiceException(Exception):
"""WCS ServiceException
Attributes:
message -- short error message
xml -- full xml error message from server
"""
def __init__(self, message, xml):
self.message = message
self.xml = xml
def __str__(self):
return repr(self.message)
class WCSBase(object):
"""Base class to be subclassed by version dependent WCS classes. Provides 'high-level'
version independent methods"""
def __new__(self, url, xml, cookies, auth=None, headers=None):
""" overridden __new__ method
@type url: string
@param url: url of WCS capabilities document
@type xml: string
@param xml: elementtree object
@param auth: instance of owslib.util.Authentication
@param headers: dict for geoserver's request's headers
@return: inititalised WCSBase object
"""
obj = object.__new__(self)
obj.__init__(url, xml, cookies, auth=auth, headers=headers)
self.cookies = cookies
self.headers = headers
self._describeCoverage = {} # cache for DescribeCoverage responses
return obj
def __init__(self, auth=None, headers=None):
self.auth = auth or Authentication()
self.headers = headers
def getDescribeCoverage(self, identifier):
''' returns a describe coverage document - checks the internal cache to see if it has been fetched before '''
if identifier not in list(self._describeCoverage.keys()):
reader = DescribeCoverageReader(
self.version, identifier, self.cookies, self.auth, self.headers)
self._describeCoverage[identifier] = reader.read(self.url)
return self._describeCoverage[identifier]
class WCSCapabilitiesReader(object):
"""Read and parses WCS capabilities document into a lxml.etree infoset
"""
def __init__(self, version=None, cookies=None, auth=None, headers=None):
"""Initialize
@type version: string
@param version: WCS Version parameter e.g '1.0.0'
"""
self.version = version
self._infoset = None
self.cookies = cookies
self.headers = headers
self.auth = auth or Authentication()
def capabilities_url(self, service_url):
"""Return a capabilities url
@type service_url: string
@param service_url: base url of WCS service
@rtype: string
@return: getCapabilities URL
"""
qs = []
if service_url.find('?') != -1:
qs = parse_qsl(service_url.split('?')[1])
params = [x[0] for x in qs]
if 'service' not in params:
qs.append(('service', 'WCS'))
if 'request' not in params:
qs.append(('request', 'GetCapabilities'))
if ('version' not in params) and (self.version is not None):
qs.append(('version', self.version))
urlqs = urlencode(tuple(qs))
return service_url.split('?')[0] + '?' + urlqs
def read(self, service_url, timeout=30):
"""Get and parse a WCS capabilities document, returning an
elementtree tree
@type service_url: string
@param service_url: The base url, to which is appended the service,
version, and request parameters
@rtype: elementtree tree
@return: An elementtree tree representation of the capabilities document
"""
request = self.capabilities_url(service_url)
u = openURL(request, timeout=timeout, cookies=self.cookies, auth=self.auth, headers=self.headers)
return etree.fromstring(u.read())
def readString(self, st):
"""Parse a WCS capabilities document, returning an
instance of WCSCapabilitiesInfoset
string should be an XML capabilities document
"""
return etree.fromstring(st)
class DescribeCoverageReader(object):
"""Read and parses WCS DescribeCoverage document into a lxml.etree infoset
"""
def __init__(self, version, identifier, cookies, auth=None, headers=None):
"""Initialize
@type version: string
@param version: WCS Version parameter e.g '1.0.0'
"""
self.version = version
self._infoset = None
self.identifier = identifier
self.cookies = cookies
self.headers = headers
self.auth = auth or Authentication()
def descCov_url(self, service_url):
"""Return a describe coverage url
@type service_url: string
@param service_url: base url of WCS service
@rtype: string
@return: getCapabilities URL
"""
qs = []
if service_url.find('?') != -1:
qs = parse_qsl(service_url.split('?')[1])
params = [x[0] for x in qs]
if 'service' not in params:
qs.append(('service', 'WCS'))
if 'request' not in params:
qs.append(('request', 'DescribeCoverage'))
if 'version' not in params:
qs.append(('version', self.version))
if self.version == '1.0.0':
if 'coverage' not in params:
qs.append(('coverage', self.identifier))
elif self.version == '2.0.0':
if 'CoverageID' not in params:
qs.append(('CoverageID', self.identifier))
elif self.version == '2.0.1':
if 'CoverageID' not in params:
qs.append(('CoverageID', self.identifier))
elif self.version == '1.1.0' or self.version == '1.1.1':
# NOTE: WCS 1.1.0 is ambigous about whether it should be identifier
# or identifiers (see tables 9, 10 of specification)
if 'identifiers' not in params:
qs.append(('identifiers', self.identifier))
if 'identifier' not in params:
qs.append(('identifier', self.identifier))
qs.append(('format', 'text/xml'))
urlqs = urlencode(tuple(qs))
return service_url.split('?')[0] + '?' + urlqs
def read(self, service_url, timeout=30):
"""Get and parse a Describe Coverage document, returning an
elementtree tree
@type service_url: string
@param service_url: The base url, to which is appended the service,
version, and request parameters
@rtype: elementtree tree
@return: An elementtree tree representation of the capabilities document
"""
request = self.descCov_url(service_url)
u = openURL(request, cookies=self.cookies, timeout=timeout, auth=self.auth, headers=self.headers)
return etree.fromstring(u.read())
|
py | 7dfb1970533210020e5993c71787411eaf5adf44 | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test cases for bigquery_to_variant module."""
import unittest
from apache_beam import transforms
from apache_beam.testing import test_pipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from gcp_variant_transforms.libs.bigquery_util import ColumnKeyConstants
from gcp_variant_transforms.testing.testdata_util import hash_name
from gcp_variant_transforms.transforms import bigquery_to_variant
from gcp_variant_transforms.beam_io import vcfio
class BigQueryToVariantTest(unittest.TestCase):
"""Test cases for `BigQueryToVariant` transform."""
def _get_bigquery_row_and_variant(self):
row = {str(ColumnKeyConstants.REFERENCE_NAME): str('chr19'),
str(ColumnKeyConstants.START_POSITION): 11,
str(ColumnKeyConstants.END_POSITION): 12,
str(ColumnKeyConstants.REFERENCE_BASES): 'C',
str(ColumnKeyConstants.NAMES): ['rs1', 'rs2'],
str(ColumnKeyConstants.QUALITY): 2,
str(ColumnKeyConstants.FILTER): ['PASS'],
str(ColumnKeyConstants.CALLS): [
{str(ColumnKeyConstants.CALLS_SAMPLE_ID): (
hash_name('Sample1')),
str(ColumnKeyConstants.CALLS_GENOTYPE): [0, 1],
str(ColumnKeyConstants.CALLS_PHASESET): str('*'),
str('GQ'): 20, str('FIR'): [10, 20]},
{str(ColumnKeyConstants.CALLS_SAMPLE_ID): (
hash_name('Sample2')),
str(ColumnKeyConstants.CALLS_GENOTYPE): [1, 0],
str(ColumnKeyConstants.CALLS_PHASESET): None,
str('GQ'): 10, str('FB'): True}
],
str(ColumnKeyConstants.ALTERNATE_BASES): [
{str(ColumnKeyConstants.ALTERNATE_BASES_ALT): str('A'),
str('IFR'): None,
str('IFR2'): 0.2},
{str(ColumnKeyConstants.ALTERNATE_BASES_ALT): str('TT'),
str('IFR'): 0.2,
str('IFR2'): 0.3}
],
str('IS'): str('some data'),
str('ISR'): [str('data1'), str('data2')]}
variant = vcfio.Variant(
reference_name='chr19', start=11, end=12, reference_bases='C',
alternate_bases=['A', 'TT'], names=['rs1', 'rs2'], quality=2,
filters=['PASS'],
info={'IFR': [0.2], 'IFR2': [0.2, 0.3],
'IS': 'some data', 'ISR': ['data1', 'data2']},
calls=[
vcfio.VariantCall(
sample_id=hash_name('Sample1'), genotype=[0, 1], phaseset='*',
info={'GQ': 20, 'FIR': [10, 20]}),
vcfio.VariantCall(
sample_id=hash_name('Sample2'), genotype=[1, 0],
info={'GQ': 10, 'FB': True})
]
)
return row, variant
def test_pipeline(self):
row, expected_variant = self._get_bigquery_row_and_variant()
pipeline = test_pipeline.TestPipeline()
variants = (
pipeline
| transforms.Create([row])
| bigquery_to_variant.BigQueryToVariant()
)
assert_that(variants, equal_to([expected_variant]))
pipeline.run()
|
py | 7dfb1a205ffb36a193bb7a6810884f1b253315e1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module is for testing unit functions of model
"""
import os
import pytest
from pykg2vec.common import KGEArgParser, Importer
from pykg2vec.utils.trainer import Trainer
from pykg2vec.data.kgcontroller import KnowledgeGraph
@pytest.mark.skip(reason="This is a functional method.")
def testing_function_with_args(name, l1_flag, display=False):
"""Function to test the models with arguments."""
# getting the customized configurations from the command-line arguments.
args = KGEArgParser().get_args([])
# Preparing data and cache the data for later usage
knowledge_graph = KnowledgeGraph(dataset=args.dataset_name)
knowledge_graph.prepare_data()
# Extracting the corresponding model config and definition from Importer().
config_def, model_def = Importer().import_model_config(name)
config = config_def(args)
config.epochs = 1
config.test_step = 1
config.test_num = 10
config.disp_result = display
config.save_model = True
config.l1_flag = l1_flag
config.debug = True
model = model_def(**config.__dict__)
# Create, Compile and Train the model. While training, several evaluation will be performed.
trainer = Trainer(model, config)
trainer.build_model()
trainer.train_model()
#can perform all the inference here after training the model
trainer.enter_interactive_mode()
#takes head, relation
tails = trainer.infer_tails(1, 10, topk=5)
assert len(tails) == 5
#takes relation, tail
heads = trainer.infer_heads(10, 20, topk=5)
assert len(heads) == 5
#takes head, tail
if not name in ["conve", "proje_pointwise", "tucker"]:
relations = trainer.infer_rels(1, 20, topk=5)
assert len(relations) == 5
trainer.exit_interactive_mode()
@pytest.mark.parametrize("model_name", [
'analogy',
'complex',
'complexn3',
'conve',
'convkb',
'cp',
'distmult',
'hole',
'kg2e',
'ntn',
'proje_pointwise',
'rotate',
'rescal',
'simple',
'simple_ignr',
'slm',
'sme',
'transd',
'transe',
'transh',
'transm',
'transr',
'tucker',
])
def test_inference(model_name):
"""Function to test Algorithms with arguments."""
testing_function_with_args(model_name, True)
def test_inference_on_pretrained_model():
args = KGEArgParser().get_args([])
config_def, model_def = Importer().import_model_config("transe")
config = config_def(args)
config.load_from_data = os.path.join(os.path.dirname(__file__), "resource", "pretrained", "TransE", Trainer.TRAINED_MODEL_FILE_NAME)
model = model_def(**config.__dict__)
# Create the model and load the trained weights.
trainer = Trainer(model, config)
trainer.build_model()
#takes head, relation
tails = trainer.infer_tails(1, 10, topk=5)
assert len(tails) == 5
#takes relation, tail
heads = trainer.infer_heads(10, 20, topk=5)
assert len(heads) == 5
#takes head, tail
relations = trainer.infer_rels(1, 20, topk=5)
assert len(relations) == 5
def test_error_on_building_pretrained_model():
with pytest.raises(ValueError) as e:
args = KGEArgParser().get_args([])
config_def, model_def = Importer().import_model_config("transe")
config = config_def(args)
config.load_from_data = "pretrained-model-does-not-exist"
model = model_def(**config.__dict__)
trainer = Trainer(model, config)
trainer.build_model()
assert "Cannot load model from %s" % config.load_from_data in str(e)
|
py | 7dfb1b0de3b1d5c34bbd9ce4b76abbd147cd89e6 | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the rawtransaction RPCs.
Test the following RPCs:
- createrawtransaction
- signrawtransactionwithwallet
- sendrawtransaction
- decoderawtransaction
- getrawtransaction
"""
from collections import OrderedDict
from decimal import Decimal
from io import BytesIO
from test_framework.messages import CTransaction, ToHex
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, bytes_to_hex_str, connect_nodes_bi, hex_str_to_bytes
class multidict(dict):
"""Dictionary that allows duplicate keys.
Constructed with a list of (key, value) tuples. When dumped by the json module,
will output invalid json with repeated keys, eg:
>>> json.dumps(multidict([(1,2),(1,2)])
'{"1": 2, "1": 2}'
Used to test calls to rpc methods with repeated keys in the json object."""
def __init__(self, x):
dict.__init__(self, x)
self.x = x
def items(self):
return self.x
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-addresstype=legacy"], ["-addresstype=legacy"], ["-addresstype=legacy"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self, split=False):
super().setup_network()
connect_nodes_bi(self.nodes, 0, 2)
def run_test(self):
self.log.info('prepare some coins for multiple *rawtransaction commands')
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
self.log.info('Test getrawtransaction on genesis block coinbase returns an error')
block = self.nodes[0].getblock(self.nodes[0].getblockhash(0))
assert_raises_rpc_error(-5, "The genesis block coinbase is not considered an ordinary transaction", self.nodes[0].getrawtransaction, block['merkleroot'])
self.log.info('Check parameter types and required parameters of createrawtransaction')
# Test `createrawtransaction` required parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction)
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [])
# Test `createrawtransaction` invalid extra parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [], {}, 0, False, 'foo')
# Test `createrawtransaction` invalid `inputs`
txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-3, "Expected type array", self.nodes[0].createrawtransaction, 'foo', {})
assert_raises_rpc_error(-1, "JSON value is not an object as expected", self.nodes[0].createrawtransaction, ['foo'], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{}], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{'txid': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, vout must be positive", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': -1}], {})
assert_raises_rpc_error(-8, "Invalid parameter, sequence number is out of range", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 0, 'sequence': -1}], {})
# Test `createrawtransaction` invalid `outputs`
address = self.nodes[0].getnewaddress()
address2 = self.nodes[0].getnewaddress()
assert_raises_rpc_error(-1, "JSON value is not an array as expected", self.nodes[0].createrawtransaction, [], 'foo')
self.nodes[0].createrawtransaction(inputs=[], outputs={}) # Should not throw for backwards compatibility
self.nodes[0].createrawtransaction(inputs=[], outputs=[])
assert_raises_rpc_error(-8, "Data must be hexadecimal string", self.nodes[0].createrawtransaction, [], {'data': 'foo'})
assert_raises_rpc_error(-5, "Invalid Monacoin address", self.nodes[0].createrawtransaction, [], {'foo': 0})
assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].createrawtransaction, [], {address: 'foo'})
assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].createrawtransaction, [], {address: -1})
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)]))
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], [{address: 1}, {address: 1}])
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair must contain exactly one key", self.nodes[0].createrawtransaction, [], [{'a': 1, 'b': 2}])
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair not an object as expected", self.nodes[0].createrawtransaction, [], [['key-value pair1'], ['2']])
# Test `createrawtransaction` invalid `locktime`
assert_raises_rpc_error(-3, "Expected type number", self.nodes[0].createrawtransaction, [], {}, 'foo')
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, -1)
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, 4294967296)
# Test `createrawtransaction` invalid `replaceable`
assert_raises_rpc_error(-3, "Expected type bool", self.nodes[0].createrawtransaction, [], {}, 0, 'foo')
self.log.info('Check that createrawtransaction accepts an array and object as outputs')
tx = CTransaction()
# One output
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs={address: 99}))))
assert_equal(len(tx.vout), 1)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}]),
)
# Two outputs
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=OrderedDict([(address, 99), (address2, 99)])))))
assert_equal(len(tx.vout), 2)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {address2: 99}]),
)
# Two data outputs
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=multidict([('data', '99'), ('data', '99')])))))
assert_equal(len(tx.vout), 2)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{'data': '99'}, {'data': '99'}]),
)
# Multiple mixed outputs
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=multidict([(address, 99), ('data', '99'), ('data', '99')])))))
assert_equal(len(tx.vout), 3)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {'data': '99'}, {'data': '99'}]),
)
for type in ["bech32", "p2sh-segwit", "legacy"]:
addr = self.nodes[0].getnewaddress("", type)
addrinfo = self.nodes[0].getaddressinfo(addr)
pubkey = addrinfo["scriptPubKey"]
self.log.info('sendrawtransaction with missing prevtx info (%s)' %(type))
# Test `signrawtransactionwithwallet` invalid `prevtxs`
inputs = [ {'txid' : txid, 'vout' : 3, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
prevtx = dict(txid=txid, scriptPubKey=pubkey, vout=3, amount=1)
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
if type == "legacy":
del prevtx["amount"]
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
if type != "legacy":
assert_raises_rpc_error(-3, "Missing amount", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"scriptPubKey": pubkey,
"vout": 3,
}
])
assert_raises_rpc_error(-3, "Missing vout", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"scriptPubKey": pubkey,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing txid", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"scriptPubKey": pubkey,
"vout": 3,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing scriptPubKey", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"vout": 3,
"amount": 1
}
])
#########################################
# sendrawtransaction with missing input #
#########################################
self.log.info('sendrawtransaction with missing input')
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
outputs = { self.nodes[0].getnewaddress() : 4.998 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransactionwithwallet(rawtx)
# This will raise an exception since there are missing inputs
assert_raises_rpc_error(-25, "Missing inputs", self.nodes[2].sendrawtransaction, rawtx['hex'])
#####################################
# getrawtransaction with block hash #
#####################################
# make a tx by sending then generate 2 blocks; block1 has the tx in it
tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1)
block1, block2 = self.nodes[2].generate(2)
self.sync_all()
# We should be able to get the raw transaction by providing the correct block
gottx = self.nodes[0].getrawtransaction(tx, True, block1)
assert_equal(gottx['txid'], tx)
assert_equal(gottx['in_active_chain'], True)
# We should not have the 'in_active_chain' flag when we don't provide a block
gottx = self.nodes[0].getrawtransaction(tx, True)
assert_equal(gottx['txid'], tx)
assert 'in_active_chain' not in gottx
# We should not get the tx if we provide an unrelated block
assert_raises_rpc_error(-5, "No such transaction found", self.nodes[0].getrawtransaction, tx, True, block2)
# An invalid block hash should raise the correct errors
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, True)
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, "foobar")
assert_raises_rpc_error(-8, "parameter 3 must be of length 64", self.nodes[0].getrawtransaction, tx, True, "abcd1234")
assert_raises_rpc_error(-5, "Block hash not found", self.nodes[0].getrawtransaction, tx, True, "0000000000000000000000000000000000000000000000000000000000000000")
# Undo the blocks and check in_active_chain
self.nodes[0].invalidateblock(block1)
gottx = self.nodes[0].getrawtransaction(txid=tx, verbose=True, blockhash=block1)
assert_equal(gottx['in_active_chain'], False)
self.nodes[0].reconsiderblock(block1)
assert_equal(self.nodes[0].getbestblockhash(), block2)
#########################
# RAW TX MULTISIG TESTS #
#########################
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
# Tests for createmultisig and addmultisigaddress
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, ["01020304"])
self.nodes[0].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) # createmultisig can only take public keys
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1]) # addmultisigaddress can take both pubkeys and addresses so long as they are in the wallet, which is tested here.
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr1])['address']
#use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 BTC to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
addr3Obj = self.nodes[2].getaddressinfo(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])['address']
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#THIS IS AN INCOMPLETE FEATURE
#NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# 2of2 test for combining transactions
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObjValid = self.nodes[2].getaddressinfo(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal) # the funds of a 2of2 multisig tx should not be marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx2['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "redeemScript" : mSigObjValid['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned1 = self.nodes[1].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned1)
assert_equal(rawTxPartialSigned1['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxPartialSigned2 = self.nodes[2].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned2)
assert_equal(rawTxPartialSigned2['complete'], False) #node2 only has one key, can't comp. sign the tx
rawTxComb = self.nodes[2].combinerawtransaction([rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']])
self.log.debug(rawTxComb)
self.nodes[2].sendrawtransaction(rawTxComb)
rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# decoderawtransaction tests
# witness transaction
encrawtx = "010000000001010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f50500000000000102616100000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, True) # decode as witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # force decode as non-witness transaction
# non-witness transaction
encrawtx = "01000000010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f505000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, False) # decode as non-witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
# getrawtransaction tests
# 1. valid parameters - only supply txid
txHash = rawTx["hash"]
assert_equal(self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex'])
# 2. valid parameters - supply txid and 0 for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex'])
# 3. valid parameters - supply txid and False for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, False), rawTxSigned['hex'])
# 4. valid parameters - supply txid and 1 for verbose.
# We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
assert_equal(self.nodes[0].getrawtransaction(txHash, 1)["hex"], rawTxSigned['hex'])
# 5. valid parameters - supply txid and True for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, True)["hex"], rawTxSigned['hex'])
# 6. invalid parameters - supply txid and string "Flase"
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, "Flase")
# 7. invalid parameters - supply txid and empty array
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, [])
# 8. invalid parameters - supply txid and empty dict
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, {})
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
# 9. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
# 10. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
####################################
# TRANSACTION VERSION NUMBER TESTS #
####################################
# Test the minimum transaction version number that fits in a signed 32-bit integer.
tx = CTransaction()
tx.nVersion = -0x80000000
rawtx = ToHex(tx)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], -0x80000000)
# Test the maximum transaction version number that fits in a signed 32-bit integer.
tx = CTransaction()
tx.nVersion = 0x7fffffff
rawtx = ToHex(tx)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], 0x7fffffff)
if __name__ == '__main__':
RawTransactionsTest().main()
|
py | 7dfb1cbbe3253147c29900c551a26527cc639866 | import os
from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
class Function2Conan(ConanFile):
name = "function2"
description = "Improved and configurable drop-in replacement to std::function that supports move only types, multiple overloads and more"
topics = ("function", "functional", "function-wrapper", "type-erasure", "header-only")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/Naios/function2"
license = "BSL-1.0"
settings = "compiler"
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
def configure(self):
minimal_cpp_standard = "14"
if self.settings.compiler.cppstd:
tools.check_min_cppstd(self, minimal_cpp_standard)
minimal_version = {
"gcc": "5",
"clang": "3.4",
"apple-clang": "10",
"Visual Studio": "14"
}
compiler = str(self.settings.compiler)
if compiler not in minimal_version:
self.output.warn(
"%s recipe lacks information about the %s compiler standard version support" % (self.name, compiler))
self.output.warn(
"%s requires a compiler that supports at least C++%s" % (self.name, minimal_cpp_standard))
return
version = tools.Version(self.settings.compiler.version)
if version < minimal_version[compiler]:
raise ConanInvalidConfiguration("%s requires a compiler that supports at least C++%s" % (self.name, minimal_cpp_standard))
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = "function2-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def package(self):
self.copy(pattern="LICENSE.txt", dst="licenses", src=self._source_subfolder)
self.copy(pattern="*.hpp", dst=os.path.join("include", "function2"), src=os.path.join(self._source_subfolder, "include", "function2"))
def package_id(self):
self.info.header_only()
|
py | 7dfb1d2e68d153adce4557fbb2b5e27fbca14a36 | import operator
class Bayes:
"""
An implemenation of a naive bayesian filter.
"""
def __init__(self, datastore):
self.datastore = datastore
def get_token_counts(self, tokens):
"""
return a list of tuples of the form:
(token, good_count, bad_count)
"""
(bmc, gmc, lm) = self.datastore.get_message_counts()
gmc = float(max(1, gmc))
bmc = float(max(1, bmc))
rv = []
for t in tokens:
(gc, bc, lm) = self.datastore.get(t)
rv.append((t, gc, gc/gmc, bc, bc/bmc))
return rv
def get_token_probabilities(self, tokens):
"""
given a list of tokens, return a sorted list of tuples of the
form:
(probability, token)
the list contains the 15 tokens whose probability is the farthest
away from 0.5.
"""
probs = []
(good_count, bad_count, lm) = self.datastore.get_message_counts()
good_count = float(max(1,good_count))
bad_count = float(max(1,bad_count))
for token in tokens:
(gc, bc, lm) = self.datastore.get(token)
gc *= 2
if gc + bc >= 5:
bp = min(1.0, bc / bad_count)
gp = min(1.0, gc / good_count)
mn = gp > 0.01 and 0.0001 or 0.01
mx = bp > 0.01 and 0.9999 or 0.99
p = max(mn, min(mx, bp / (bp + gp)))
else:
# this is a token that we "don't know"
p = 0.4
probs.append((abs(p - 0.5), p, token))
probs.sort()
probs = probs[-15:]
probs = [(y,z) for (x,y,z) in probs]
probs.sort()
return probs
def filter(self, tokens):
"""
compute the probabilities of each of the tokens and then compute
the aggregate probability of the top 15 tokens.
return a tuple of the form:
(aggregate_probability, [(probability, token), ...])
"""
probs = self.get_token_probabilities(tokens)
prod = reduce(operator.mul, [x for (x,y) in probs], 1)
inv_prod = reduce(operator.mul, [1-x for (x,y) in probs], 1)
try:
return (prod / (prod + inv_prod), probs)
except ZeroDivisionError:
print probs
print prod, inv_prod
return (0, probs)
def dump(self):
def display_map_sorted(prefix, map):
sorted = [(int(v),k) for k,v in map.iteritems()]
sorted.sort()
for v,k in sorted:
print "%s: %5d %s" % (prefix, v, k)
display_map_sorted("spam", self.bad)
display_map_sorted("ham", self.good)
|
py | 7dfb1e6bb548a29d2b5c49401199a96a0d4ba757 | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import sys
from workflow.hetero_ftl_workflow.hetero_arbiter_workflow import FTLArbiterWorkFlow
from arch.api import eggroll
from arch.api import federation
from arch.api.utils import log_utils
LOGGER = log_utils.getLogger()
config_path = "./conf/arbiter_runtime_conf.json"
class TestFTLArbiter(FTLArbiterWorkFlow):
def __init__(self):
super(TestFTLArbiter, self).__init__()
def _init_argument(self):
with open(config_path) as conf_f:
runtime_json = json.load(conf_f)
self._initialize(runtime_json)
LOGGER.debug("The Arbiter job id is {}".format(job_id))
LOGGER.debug("The Arbiter work mode id is {}".format(self.workflow_param.work_mode))
eggroll.init(job_id, self.workflow_param.work_mode)
federation.init(job_id, runtime_json)
LOGGER.debug("Finish eggroll and federation init")
if __name__ == '__main__':
job_id = sys.argv[1]
workflow = TestFTLArbiter()
workflow.run()
|
py | 7dfb1f48002ac978365567663b29361fe430e314 | # coding=utf-8
# Copyright (c) DIRECT Contributors
"""Tests for the direct.common.subsample module."""
# Code and comments can be shared with code of FastMRI under the same MIT license:
# https://github.com/facebookresearch/fastMRI/
# The code has been adjusted to our needs.
import numpy as np
import pytest
import torch
from direct.common.subsample import FastMRIRandomMaskFunc, RadialMaskFunc, SpiralMaskFunc
@pytest.mark.parametrize(
"center_fracs, accelerations, batch_size, dim",
[
([0.2], [4], 4, 320),
([0.2, 0.4], [4, 8], 2, 368),
],
)
def test_fastmri_random_mask_reuse(center_fracs, accelerations, batch_size, dim):
mask_func = FastMRIRandomMaskFunc(center_fracs, accelerations)
shape = (batch_size, dim, dim, 2)
mask1 = mask_func(shape, seed=123)
mask2 = mask_func(shape, seed=123)
mask3 = mask_func(shape, seed=123)
assert torch.all(mask1 == mask2)
assert torch.all(mask2 == mask3)
@pytest.mark.parametrize(
"center_fracs, accelerations, batch_size, dim",
[
([0.2], [4], 4, 320),
([0.2, 0.4], [4, 8], 2, 368),
],
)
def test_fastmri_random_mask_low_freqs(center_fracs, accelerations, batch_size, dim):
mask_func = FastMRIRandomMaskFunc(center_fracs, accelerations)
shape = (batch_size, dim, dim, 2)
mask = mask_func(shape, seed=123)
mask_shape = [1] * (len(shape) + 1)
mask_shape[-2] = dim
mask_shape[-3] = dim
assert list(mask.shape) == mask_shape
num_low_freqs_matched = False
for center_frac in center_fracs:
num_low_freqs = int(round(dim * center_frac))
pad = (dim - num_low_freqs + 1) // 2
if np.all(mask[pad : pad + num_low_freqs].numpy() == 1):
num_low_freqs_matched = True
assert num_low_freqs_matched
@pytest.mark.parametrize(
"shape, center_fractions, accelerations",
[
([4, 32, 32, 2], [0.08], [4]),
([2, 64, 64, 2], [0.04, 0.08], [8, 4]),
],
)
def test_apply_mask_fastmri(shape, center_fractions, accelerations):
mask_func = FastMRIRandomMaskFunc(
center_fractions=center_fractions,
accelerations=accelerations,
uniform_range=False,
)
mask = mask_func(shape[1:], seed=123)
acs_mask = mask_func(shape[1:], seed=123, return_acs=True)
expected_mask_shape = (1, shape[1], shape[2], 1)
assert mask.max() == 1
assert mask.min() == 0
assert mask.shape == expected_mask_shape
assert np.allclose(mask & acs_mask, acs_mask)
@pytest.mark.parametrize(
"shape, center_fractions, accelerations",
[
([4, 32, 32, 2], [0.08], [4]),
([2, 64, 64, 2], [0.04, 0.08], [8, 4]),
],
)
def test_same_across_volumes_mask_fastmri(shape, center_fractions, accelerations):
mask_func = FastMRIRandomMaskFunc(
center_fractions=center_fractions,
accelerations=accelerations,
uniform_range=False,
)
num_slices = shape[0]
masks = [mask_func(shape[1:], seed=123) for _ in range(num_slices)]
assert all(np.allclose(masks[_], masks[_ + 1]) for _ in range(num_slices - 1))
@pytest.mark.parametrize(
"shape, accelerations",
[
([4, 32, 32, 2], [4]),
([2, 64, 64, 2], [8, 4]),
],
)
def test_apply_mask_radial(shape, accelerations):
mask_func = RadialMaskFunc(
accelerations=accelerations,
)
mask = mask_func(shape[1:], seed=123)
acs_mask = mask_func(shape[1:], seed=123, return_acs=True)
expected_mask_shape = (1, shape[1], shape[2], 1)
assert mask.max() == 1
assert mask.min() == 0
assert mask.shape == expected_mask_shape
assert np.allclose(mask & acs_mask, acs_mask)
@pytest.mark.parametrize(
"shape, accelerations",
[
([4, 32, 32, 2], [4]),
([2, 64, 64, 2], [8, 4]),
],
)
def test_same_across_volumes_mask_radial(shape, accelerations):
mask_func = RadialMaskFunc(
accelerations=accelerations,
)
num_slices = shape[0]
masks = [mask_func(shape[1:], seed=123) for _ in range(num_slices)]
assert all(np.allclose(masks[_], masks[_ + 1]) for _ in range(num_slices - 1))
@pytest.mark.parametrize(
"shape, accelerations",
[
([4, 32, 32, 2], [4]),
([2, 64, 64, 2], [8, 4]),
],
)
def test_apply_mask_spiral(shape, accelerations):
mask_func = SpiralMaskFunc(
accelerations=accelerations,
)
mask = mask_func(shape[1:], seed=123)
acs_mask = mask_func(shape[1:], seed=123, return_acs=True)
expected_mask_shape = (1, shape[1], shape[2], 1)
assert mask.max() == 1
assert mask.min() == 0
assert mask.shape == expected_mask_shape
assert np.allclose(mask & acs_mask, acs_mask)
@pytest.mark.parametrize(
"shape, accelerations",
[
([4, 32, 32, 2], [4]),
([2, 64, 64, 2], [8, 4]),
],
)
def test_same_across_volumes_mask_spiral(shape, accelerations):
mask_func = SpiralMaskFunc(
accelerations=accelerations,
)
num_slices = shape[0]
masks = [mask_func(shape[1:], seed=123) for _ in range(num_slices)]
assert all(np.allclose(masks[_], masks[_ + 1]) for _ in range(num_slices - 1))
|
py | 7dfb230c40a2580c88f502974ab402da9cb68904 | ##############################################################################
# read_all_phm08.py
# https://github.com/DigiLog-N/DigiLog-N
# Copyright 2020 Canvass Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from DataSource import DataSource
from DataSourceRegistry import DataSourceRegistry
from PlasmaReader import PlasmaReader
if __name__ == '__main__':
dsr = DataSourceRegistry('127.0.0.1', 27017, 'digilog_n', 'data_sources')
data_source = dsr.get_data_source('PHM08 Prognostics Data Challenge Dataset')
if data_source:
# Readers specific to a particular data-source could be sub-classed from
# PlasmaReader. Currently, this PlasmaReader rips all columns and stores
# them as lists in an internal dictionary. You can access all of 'op3'
# for instance by simply using as below. At some time interval, you can
# read_all_columns() again, and PlasmaReader will determine which buffers
# are new, extract their data, mark the buffer names off in its internal
# list, and append the data to the existing data structure.
#
# the user may not want to preserve their own copy of the data in the
# data structure, hence one could simply delete self.d and reinitialize
# it to {}, etc. There's a number of possibilities, depending on
# the usage patterns needed for generating models in pyspark.
#
# I also have some code for loading this data into a DataFrame, but I
# think this might be heavyweight for our use-case.
pr = PlasmaReader(data_source.get_path_to_plasma_file())
results = pr.read_all_columns()
print("There should be 619 results in op3 right now.")
print(len(results['op3']))
fields = data_source.get_fields()
for field in fields:
print(field)
print(fields[field])
print(results[field])
print("")
|
py | 7dfb2500e4a66540c7d5d9175981fe9501e103a2 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.lines import Line2D
import utils
def displayPoints(ax, points):
x_axes = []
y_axes = []
for point in points:
x_axes.append(point.x)
y_axes.append(point.y)
ax.plot(x_axes, y_axes, 'ro')
def displayTesselationLines(ax, plt, d):
xLines = utils.tesselationLinesBetween(plt.xlim()[0], plt.xlim()[1], d)
for x in xLines:
line = Line2D([x, x], [plt.ylim()[0], plt.ylim()[1]], color="grey", linestyle="--")
ax.add_line(line)
yLines = utils.tesselationLinesBetween(plt.ylim()[0], plt.ylim()[1], d)
for y in yLines:
line = Line2D([plt.xlim()[0], plt.xlim()[1]], [y, y], color="grey", linestyle="--")
ax.add_line(line)
#print(plt.xlim())
#print(plt.ylim())
class Path:
def __init__(self, ID):
self.pathID = ID
self.segments = []
self.numberOfSegments = 0
self.stageBorders = []
def append(self, segment):
self.segments.append(segment)
self.numberOfSegments += 1
def display(self, locator = 1):
fig, ax = plt.subplots()
ax.set_aspect('equal', 'box')
ax.grid(True)
ax.xaxis.set_major_locator(ticker.MultipleLocator(locator))
ax.yaxis.set_major_locator(ticker.MultipleLocator(locator))
for segment in self.segments:
segment.draw(ax)
displayPoints(ax, self.stageBorders)
ax.autoscale_view()
displayTesselationLines(ax, plt, locator)
plt.show()
def calculateStageBorders(self, d):
for segment in self.segments:
self.stageBorders += segment.calculateStageBorders(d)
|
py | 7dfb26530111ffad979b798b7a77ab3cadc0691f | import os, datetime
import requests, json
from core.bssidFinder import bssidFinder
from core.employee_lookup import employee_lookup
from core.google import google
from core.hashDecrypt import hashdecrypt
from core.ipFinder import ipFinder
from core.mailToIP import mailToIP
from core.profilerFunc import profilerFunc
from core.searchAdresse import searchAdresse
from core.searchTwitter import searchTwitter
from core.searchPersonne import searchPersonne
from core.searchInstagram import searchInstagram
from core.searchUserName import searchUserName
from core.searchNumber import searchNumber
from core.searchEmail import SearchEmail
from core.Profiler import Profiler
from core.facebookStalk import facebookStalk
import time
def init(progress, frame1):
global version
global monip, monpays, country, countrycode, Region, Regionname
global pathDatabase, city, zip, timezone, isp, org, query
global bssidFinder, employee_lookup, google, hashdecrypt, ipFinder, mailToIP, profilerFunc
global searchPersonne, SearchEmail, searchInstagram, searchTwitter, searchNumber, searchAdresse, searchUserName, facebookStalk
global Profiler
version = 'alpha'
progress['value'] = 10
frame1.update_idletasks()
time.sleep(0.1)
pathDatabase = os.path.abspath(__file__).split("\\")[:-1]
pathDatabase = "\\".join(pathDatabase)+"\\Watched"
monip = requests.get("https://api.ipify.org/").text
monpays = requests.get("http://ip-api.com/json/"+monip).text
value = json.loads(monpays)
progress['value'] = 15
frame1.update_idletasks()
time.sleep(0.1)
country = value['country']
countrycode = value['countryCode']
Region = value['region']
Regionname = value['regionName']
city = value['city']
zip = value['zip']
progress['value'] = 20
frame1.update_idletasks()
time.sleep(0.1)
# timezone = value['timezone']
# isp = value['isp']
# org = value['org']
# query = value['query']
progress['value'] = 30
frame1.update_idletasks()
time.sleep(0.1)
if not os.path.exists(pathDatabase):
os.mkdir(pathDatabase)
|
py | 7dfb26a56383eb0895a9e0b48b99792bbd81f571 | import gitlab
import os
import boto3
import base64
from botocore.exceptions import ClientError
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
def get_secret():
''' '''
secret_name = os.environ['SecretName']
region_name = os.environ['Region']
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name
)
try:
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
except ClientError as e:
if e.response['Error']['Code'] == 'DecryptionFailureException':
logging.error(e)
raise e
elif e.response['Error']['Code'] == 'InternalServiceErrorException':
logging.error(e)
raise e
elif e.response['Error']['Code'] == 'InvalidParameterException':
logging.error(e)
raise e
elif e.response['Error']['Code'] == 'InvalidRequestException':
logging.error(e)
raise e
elif e.response['Error']['Code'] == 'ResourceNotFoundException':
logging.error(e)
raise e
else:
# Decrypts secret using the associated KMS CMK.
# Depending on whether the secret is a string or binary, one of these fields will be populated.
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
return secret.split(':')[-1].strip('" "}\n')
else:
decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary'])
return decoded_binary_secret.split(':')[-1].strip('"}')
return None
def lambda_handler(event, context):
''' '''
gitlab_project_name = os.environ['DeployProjectName']
gitlab_server_uri = os.environ['GitLabServer']
gitlab_private_token = get_secret()
project_id = os.environ['SageMakerProjectId']
if gitlab_private_token is None:
raise Exception("Failed to retrieve secret from Secrets Manager")
# Configure SDKs for GitLab and S3
gl = gitlab.Gitlab(gitlab_server_uri, private_token=gitlab_private_token)
print(gitlab_server_uri)
# Create the GitLab Project
try:
project = gl.projects.list(search = gitlab_project_name + '-' + project_id)[0]
logging.info("Project")
logging.info(project)
except Exception as e:
logger.error("Unable to find the project for model deploy in GitLab..")
logger.error(e)
return {
'message' : "Failed to find GitLab project.."
}
try:
trigger = project.triggers.create({'description' : gitlab_project_name + '-lambda-generated-token'})
token = trigger.token
project.trigger_pipeline('main', token)
trigger.delete()
except Exception as e:
logging.error("Failed to trigger pipeline..")
logging.error(e)
return {
'message' : "Failed to trigger pipeline.."
}
return {
'message' : "Success!"
}
|
py | 7dfb270ea89df6be0cceae54cbfcd73cba436ad2 | import random
import requests
import globals
def select_color_codes():
r = random.randint(0, 255)
g = random.randint(0, 255)
b = random.randint(0, 255)
if (r, g, b) not in globals.color_codes:
globals.color_codes.append((r, g, b, 150)) # transparency (alpha=150)
if len(globals.color_codes) > 5:
globals.color_codes = globals.color_codes[1:]
return r, g, b
return select_color_codes()
def ping_address(web_address):
"""
find if a particular address is valid or not
:param web_address:
:return:
"""
ret_val = -1
try:
page = requests.get(web_address, timeout=globals.timeout)
if page.status_code == 200:
ret_val = 1
except Exception as e:
pass
finally:
return ret_val
|
py | 7dfb271a3720941d287a337103fdea2d987d1fc6 | import os
# os.environ['CUDA_VISIBLE_DEVICES'] = '1'
os.chdir(os.path.dirname(os.path.abspath(__file__)))
import numpy as np
import torch
from skimage.metrics import peak_signal_noise_ratio
from matplotlib.pyplot import imread, imsave
from skimage.transform import resize
import time
import sys
import glob
sys.path.append('../')
from admm_utils import *
from torch import optim
from models import *
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
def run(f_name, specific_result_dir, noise_sigma, num_iter, GD_lr):
img = imread(f_name)
if img.dtype == 'uint8':
img = img.astype('float32') / 255 # scale to [0, 1]
elif img.dtype == 'float32':
img = img.astype('float32')
else:
raise TypeError()
img = np.clip(resize(img, (128, 128)), 0, 1)
imsave(specific_result_dir + 'true.png', img)
if len(img.shape) == 2:
img = img[:,:,np.newaxis]
num_channels = 1
else:
num_channels = 3
img = img.transpose((2, 0, 1))
x_true = torch.from_numpy(img).unsqueeze(0).type(dtype)
b = x_true.reshape(-1,)
b = b + noise_sigma * (2 * torch.rand(b.shape) - 1).type(dtype)
b_clipped = torch.clamp(b, 0, 1)
if num_channels == 3:
imsave(specific_result_dir+'corrupted.png', b_clipped.reshape(1, num_channels, 128, 128)[0].permute((1,2,0)).cpu().numpy())
else:
imsave(specific_result_dir + 'corrupted.png', b_clipped.reshape(1, num_channels, 128, 128)[0, 0].cpu().numpy(), cmap='gray')
def fn(x): return torch.norm(x.reshape(-1) - b) ** 2 / 2
# G = skip(3, 3,
# num_channels_down = [16, 32, 64, 128, 128, 128],
# num_channels_up = [16, 32, 64, 128, 128, 128],
# num_channels_skip = [4, 4, 4, 4, 4, 4],
# filter_size_up = [7, 7, 5, 5, 3, 3],filter_size_down = [7, 7, 5, 5, 3, 3], filter_skip_size=1,
# upsample_mode='bilinear', # downsample_mode='avg',
# need1x1_up=False,
# need_sigmoid=True, need_bias=True, pad='reflection', act_fun='LeakyReLU').type(dtype)
G = skip(3, 3,
num_channels_down=[16, 32, 64, 128, 128],
num_channels_up=[16, 32, 64, 128, 128],#[16, 32, 64, 128, 128],
num_channels_skip=[0, 0, 0, 0, 0],
filter_size_up=3, filter_size_down=3, filter_skip_size=1,
upsample_mode='nearest', # downsample_mode='avg',
need1x1_up=False,
need_sigmoid=True, need_bias=True, pad='reflection', act_fun='LeakyReLU').type(dtype)
z = torch.zeros_like(x_true).type(dtype).normal_()
z.requires_grad = False
opt = optim.Adam(G.parameters(), lr=GD_lr)
record = {"psnr_gt": [],
"mse_gt": [],
"total_loss": [],
"prior_loss": [],
"fidelity_loss": [],
"cpu_time": [],
}
results = None
for t in range(num_iter):
x = G(z)
fidelity_loss = fn(x)
# prior_loss = (torch.sum(torch.abs(x[:, :, :, :-1] - x[:, :, :, 1:])) + torch.sum(torch.abs(x[:, :, :-1, :] - x[:, :, 1:, :])))
total_loss = fidelity_loss #+ 0.01 * prior_loss
opt.zero_grad()
total_loss.backward()
opt.step()
if results is None:
results = x.detach().cpu().numpy()
else:
results = results * 0.99 + x.detach().cpu().numpy() * 0.01
psnr_gt = peak_signal_noise_ratio(x_true.cpu().numpy(), results)
mse_gt = np.mean((x_true.cpu().numpy() - results) ** 2)
fidelity_loss = fn(torch.tensor(results).cuda()).detach()
if (t + 1) % 1000 == 0:
if num_channels == 3:
imsave(specific_result_dir + 'iter%d_PSNR_%.2f.png'%(t, psnr_gt), results[0].transpose((1,2,0)))
else:
imsave(specific_result_dir + 'iter%d_PSNR_%.2f.png'%(t, psnr_gt), results[0, 0], cmap='gray')
record["psnr_gt"].append(psnr_gt)
record["mse_gt"].append(mse_gt)
record["fidelity_loss"].append(fidelity_loss.item())
record["cpu_time"].append(time.time())
if (t + 1) % 10 == 0:
print('Img %d Iteration %5d PSRN_gt: %.2f MSE_gt: %e' % (f_num, t + 1, psnr_gt, mse_gt))
np.savez(specific_result_dir+'record', **record)
# torch.manual_seed(500)
if torch.cuda.is_available():
dtype = torch.cuda.FloatTensor
else:
dtype = torch.FloatTensor
dataset_dir = '../../data/'
results_dir = '../../data/results/DIP_uniform/'
os.makedirs(results_dir)
f_name_list = glob.glob('../../data/*.jpg')
for f_num, f_name in enumerate(f_name_list):
specific_result_dir = results_dir+str(f_num)+'/'
os.makedirs(specific_result_dir)
run(f_name = f_name,
specific_result_dir = specific_result_dir,
noise_sigma = 25 / 255,
num_iter = 50000,
GD_lr=0.001)
|
py | 7dfb2775d7c2873df1b0af529881f1013db21fec | # -*- coding: utf-8 -*-
"""
pygments.lexers._mapping
~~~~~~~~~~~~~~~~~~~~~~~~
Lexer mapping definitions. This file is generated by itself. Everytime
you change something on a builtin lexer definition, run this script from
the lexers folder to update it.
Do not alter the LEXERS dictionary by hand.
:copyright: Copyright 2006-2014, 2016 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
LEXERS = {
'ABAPLexer': ('pygments.lexers.business', 'ABAP', ('abap',), ('*.abap', '*.ABAP'), ('text/x-abap',)),
'APLLexer': ('pygments.lexers.apl', 'APL', ('apl',), ('*.apl',), ()),
'AbnfLexer': ('pygments.lexers.grammar_notation', 'ABNF', ('abnf',), ('*.abnf',), ('text/x-abnf',)),
'ActionScript3Lexer': ('pygments.lexers.actionscript', 'ActionScript 3', ('as3', 'actionscript3'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
'ActionScriptLexer': ('pygments.lexers.actionscript', 'ActionScript', ('as', 'actionscript'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
'AdaLexer': ('pygments.lexers.pascal', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
'AdlLexer': ('pygments.lexers.archetype', 'ADL', ('adl',), ('*.adl', '*.adls', '*.adlf', '*.adlx'), ()),
'AgdaLexer': ('pygments.lexers.haskell', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)),
'AheuiLexer': ('pygments.lexers.esoteric', 'Aheui', ('aheui',), ('*.aheui',), ()),
'AlloyLexer': ('pygments.lexers.dsls', 'Alloy', ('alloy',), ('*.als',), ('text/x-alloy',)),
'AmbientTalkLexer': ('pygments.lexers.ambient', 'AmbientTalk', ('at', 'ambienttalk', 'ambienttalk/2'), ('*.at',), ('text/x-ambienttalk',)),
'AmplLexer': ('pygments.lexers.ampl', 'Ampl', ('ampl',), ('*.run',), ()),
'Angular2HtmlLexer': ('pygments.lexers.templates', 'HTML + Angular2', ('html+ng2',), ('*.ng2',), ()),
'Angular2Lexer': ('pygments.lexers.templates', 'Angular2', ('ng2',), (), ()),
'AntlrActionScriptLexer': ('pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-as', 'antlr-actionscript'), ('*.G', '*.g'), ()),
'AntlrCSharpLexer': ('pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
'AntlrCppLexer': ('pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
'AntlrJavaLexer': ('pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()),
'AntlrLexer': ('pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()),
'AntlrObjectiveCLexer': ('pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()),
'AntlrPerlLexer': ('pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()),
'AntlrPythonLexer': ('pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()),
'AntlrRubyLexer': ('pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
'ApacheConfLexer': ('pygments.lexers.configs', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
'AppleScriptLexer': ('pygments.lexers.scripting', 'AppleScript', ('applescript',), ('*.applescript',), ()),
'ArduinoLexer': ('pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)),
'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
'AsymptoteLexer': ('pygments.lexers.graphics', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)),
'AugeasLexer': ('pygments.lexers.configs', 'Augeas', ('augeas',), ('*.aug',), ()),
'AutoItLexer': ('pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)),
'AutohotkeyLexer': ('pygments.lexers.automation', 'autohotkey', ('ahk', 'autohotkey'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
'AwkLexer': ('pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
'BBCBasicLexer': ('pygments.lexers.basic', 'BBC Basic', ('bbcbasic',), ('*.bbc',), ()),
'BBCodeLexer': ('pygments.lexers.markup', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
'BCLexer': ('pygments.lexers.algebra', 'BC', ('bc',), ('*.bc',), ()),
'BSTLexer': ('pygments.lexers.bibtex', 'BST', ('bst', 'bst-pybtex'), ('*.bst',), ()),
'BaseMakefileLexer': ('pygments.lexers.make', 'Base Makefile', ('basemake',), (), ()),
'BashLexer': ('pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh', 'zsh', 'shell'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '*.exheres-0', '*.exlib', '*.zsh', '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'zshrc', '.zshrc', 'PKGBUILD'), ('application/x-sh', 'application/x-shellscript', 'text/x-shellscript')),
'BashSessionLexer': ('pygments.lexers.shell', 'Bash Session', ('console', 'shell-session'), ('*.sh-session', '*.shell-session'), ('application/x-shell-session', 'application/x-sh-session')),
'BatchLexer': ('pygments.lexers.shell', 'Batchfile', ('bat', 'batch', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
'BefungeLexer': ('pygments.lexers.esoteric', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
'BibTeXLexer': ('pygments.lexers.bibtex', 'BibTeX', ('bib', 'bibtex'), ('*.bib',), ('text/x-bibtex',)),
'BlitzBasicLexer': ('pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
'BlitzMaxLexer': ('pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
'BnfLexer': ('pygments.lexers.grammar_notation', 'BNF', ('bnf',), ('*.bnf',), ('text/x-bnf',)),
'BoaLexer': ('pygments.lexers.boa', 'Boa', ('boa',), ('*.boa',), ()),
'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
'BoogieLexer': ('pygments.lexers.verification', 'Boogie', ('boogie',), ('*.bpl',), ()),
'BrainfuckLexer': ('pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
'BroLexer': ('pygments.lexers.dsls', 'Bro', ('bro',), ('*.bro',), ()),
'BugsLexer': ('pygments.lexers.modeling', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
'CAmkESLexer': ('pygments.lexers.esoteric', 'CAmkES', ('camkes', 'idl4'), ('*.camkes', '*.idl4'), ()),
'CLexer': ('pygments.lexers.c_cpp', 'C', ('c',), ('*.c', '*.h', '*.idc'), ('text/x-chdr', 'text/x-csrc')),
'CMakeLexer': ('pygments.lexers.make', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
'CObjdumpLexer': ('pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
'CPSALexer': ('pygments.lexers.lisp', 'CPSA', ('cpsa',), ('*.cpsa',), ()),
'CSharpAspxLexer': ('pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'CSharpLexer': ('pygments.lexers.dotnet', 'C#', ('csharp', 'c#'), ('*.cs',), ('text/x-csharp',)),
'Ca65Lexer': ('pygments.lexers.asm', 'ca65 assembler', ('ca65',), ('*.s',), ()),
'CadlLexer': ('pygments.lexers.archetype', 'cADL', ('cadl',), ('*.cadl',), ()),
'CapDLLexer': ('pygments.lexers.esoteric', 'CapDL', ('capdl',), ('*.cdl',), ()),
'CapnProtoLexer': ('pygments.lexers.capnproto', "Cap'n Proto", ('capnp',), ('*.capnp',), ()),
'CbmBasicV2Lexer': ('pygments.lexers.basic', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
'CeylonLexer': ('pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
'Cfengine3Lexer': ('pygments.lexers.configs', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
'ChaiscriptLexer': ('pygments.lexers.scripting', 'ChaiScript', ('chai', 'chaiscript'), ('*.chai',), ('text/x-chaiscript', 'application/x-chaiscript')),
'ChapelLexer': ('pygments.lexers.chapel', 'Chapel', ('chapel', 'chpl'), ('*.chpl',), ()),
'CharmciLexer': ('pygments.lexers.c_like', 'Charmci', ('charmci',), ('*.ci',), ()),
'CheetahHtmlLexer': ('pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')),
'CheetahJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Cheetah', ('js+cheetah', 'javascript+cheetah', 'js+spitfire', 'javascript+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
'CheetahLexer': ('pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
'CirruLexer': ('pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru',), ('text/x-cirru',)),
'ClayLexer': ('pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
'CleanLexer': ('pygments.lexers.clean', 'Clean', ('clean',), ('*.icl', '*.dcl'), ()),
'ClojureLexer': ('pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')),
'ClojureScriptLexer': ('pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')),
'CobolFreeformatLexer': ('pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
'CobolLexer': ('pygments.lexers.business', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
'CoffeeScriptLexer': ('pygments.lexers.javascript', 'CoffeeScript', ('coffee-script', 'coffeescript', 'coffee'), ('*.coffee',), ('text/coffeescript',)),
'ColdfusionCFCLexer': ('pygments.lexers.templates', 'Coldfusion CFC', ('cfc',), ('*.cfc',), ()),
'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml'), ('application/x-coldfusion',)),
'ColdfusionLexer': ('pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
'CommonLispLexer': ('pygments.lexers.lisp', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp'), ('text/x-common-lisp',)),
'ComponentPascalLexer': ('pygments.lexers.oberon', 'Component Pascal', ('componentpascal', 'cp'), ('*.cp', '*.cps'), ('text/x-component-pascal',)),
'CoqLexer': ('pygments.lexers.theorem', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
'CppLexer': ('pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP'), ('text/x-c++hdr', 'text/x-c++src')),
'CppObjdumpLexer': ('pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
'CrmshLexer': ('pygments.lexers.dsls', 'Crmsh', ('crmsh', 'pcmk'), ('*.crmsh', '*.pcmk'), ()),
'CrocLexer': ('pygments.lexers.d', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
'CryptolLexer': ('pygments.lexers.haskell', 'Cryptol', ('cryptol', 'cry'), ('*.cry',), ('text/x-cryptol',)),
'CrystalLexer': ('pygments.lexers.crystal', 'Crystal', ('cr', 'crystal'), ('*.cr',), ('text/x-crystal',)),
'CsoundDocumentLexer': ('pygments.lexers.csound', 'Csound Document', ('csound-document', 'csound-csd'), ('*.csd',), ()),
'CsoundOrchestraLexer': ('pygments.lexers.csound', 'Csound Orchestra', ('csound', 'csound-orc'), ('*.orc', '*.udo'), ()),
'CsoundScoreLexer': ('pygments.lexers.csound', 'Csound Score', ('csound-score', 'csound-sco'), ('*.sco',), ()),
'CssDjangoLexer': ('pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), (), ('text/css+django', 'text/css+jinja')),
'CssErbLexer': ('pygments.lexers.templates', 'CSS+Ruby', ('css+erb', 'css+ruby'), (), ('text/css+ruby',)),
'CssGenshiLexer': ('pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
'CssLexer': ('pygments.lexers.css', 'CSS', ('css',), ('*.css',), ('text/css',)),
'CssPhpLexer': ('pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
'CssSmartyLexer': ('pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
'CudaLexer': ('pygments.lexers.c_like', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
'CypherLexer': ('pygments.lexers.graph', 'Cypher', ('cypher',), ('*.cyp', '*.cypher'), ()),
'CythonLexer': ('pygments.lexers.python', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
'DLexer': ('pygments.lexers.d', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
'DarcsPatchLexer': ('pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
'DartLexer': ('pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
'Dasm16Lexer': ('pygments.lexers.asm', 'DASM16', ('dasm16',), ('*.dasm16', '*.dasm'), ('text/x-dasm16',)),
'DebianControlLexer': ('pygments.lexers.installers', 'Debian Control file', ('control', 'debcontrol'), ('control',), ()),
'DelphiLexer': ('pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas', '*.dpr'), ('text/x-pascal',)),
'DgLexer': ('pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
'DiffLexer': ('pygments.lexers.diff', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
'DjangoLexer': ('pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
'DockerLexer': ('pygments.lexers.configs', 'Docker', ('docker', 'dockerfile'), ('Dockerfile', '*.docker'), ('text/x-dockerfile-config',)),
'DtdLexer': ('pygments.lexers.html', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
'DuelLexer': ('pygments.lexers.webmisc', 'Duel', ('duel', 'jbst', 'jsonml+bst'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
'DylanConsoleLexer': ('pygments.lexers.dylan', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
'DylanLexer': ('pygments.lexers.dylan', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
'DylanLidLexer': ('pygments.lexers.dylan', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
'ECLLexer': ('pygments.lexers.ecl', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
'ECLexer': ('pygments.lexers.c_like', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
'EarlGreyLexer': ('pygments.lexers.javascript', 'Earl Grey', ('earl-grey', 'earlgrey', 'eg'), ('*.eg',), ('text/x-earl-grey',)),
'EasytrieveLexer': ('pygments.lexers.scripting', 'Easytrieve', ('easytrieve',), ('*.ezt', '*.mac'), ('text/x-easytrieve',)),
'EbnfLexer': ('pygments.lexers.parsers', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)),
'EiffelLexer': ('pygments.lexers.eiffel', 'Eiffel', ('eiffel',), ('*.e',), ('text/x-eiffel',)),
'ElixirConsoleLexer': ('pygments.lexers.erlang', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
'ElixirLexer': ('pygments.lexers.erlang', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.exs'), ('text/x-elixir',)),
'ElmLexer': ('pygments.lexers.elm', 'Elm', ('elm',), ('*.elm',), ('text/x-elm',)),
'EmacsLispLexer': ('pygments.lexers.lisp', 'EmacsLisp', ('emacs', 'elisp', 'emacs-lisp'), ('*.el',), ('text/x-elisp', 'application/x-elisp')),
'EmailLexer': ('pygments.lexers.email', 'E-mail', ('email', 'eml'), ('*.eml',), ('message/rfc822',)),
'ErbLexer': ('pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
'ErlangLexer': ('pygments.lexers.erlang', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)),
'ErlangShellLexer': ('pygments.lexers.erlang', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)),
'EvoqueHtmlLexer': ('pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)),
'EvoqueLexer': ('pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
'EvoqueXmlLexer': ('pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
'EzhilLexer': ('pygments.lexers.ezhil', 'Ezhil', ('ezhil',), ('*.n',), ('text/x-ezhil',)),
'FSharpLexer': ('pygments.lexers.dotnet', 'F#', ('fsharp', 'f#'), ('*.fs', '*.fsi'), ('text/x-fsharp',)),
'FactorLexer': ('pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
'FancyLexer': ('pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
'FantomLexer': ('pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
'FelixLexer': ('pygments.lexers.felix', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
'FennelLexer': ('pygments.lexers.lisp', 'Fennel', ('fennel', 'fnl'), ('*.fnl',), ()),
'FishShellLexer': ('pygments.lexers.shell', 'Fish', ('fish', 'fishshell'), ('*.fish', '*.load'), ('application/x-fish',)),
'FlatlineLexer': ('pygments.lexers.dsls', 'Flatline', ('flatline',), (), ('text/x-flatline',)),
'FloScriptLexer': ('pygments.lexers.floscript', 'FloScript', ('floscript', 'flo'), ('*.flo',), ()),
'ForthLexer': ('pygments.lexers.forth', 'Forth', ('forth',), ('*.frt', '*.fs'), ('application/x-forth',)),
'FortranFixedLexer': ('pygments.lexers.fortran', 'FortranFixed', ('fortranfixed',), ('*.f', '*.F'), ()),
'FortranLexer': ('pygments.lexers.fortran', 'Fortran', ('fortran',), ('*.f03', '*.f90', '*.F03', '*.F90'), ('text/x-fortran',)),
'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()),
'FreeFemLexer': ('pygments.lexers.freefem', 'Freefem', ('freefem',), ('*.edp',), ('text/x-freefem',)),
'GAPLexer': ('pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()),
'GLShaderLexer': ('pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
'GenshiLexer': ('pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
'GenshiTextLexer': ('pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
'GettextLexer': ('pygments.lexers.textfmts', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
'GherkinLexer': ('pygments.lexers.testing', 'Gherkin', ('cucumber', 'gherkin'), ('*.feature',), ('text/x-gherkin',)),
'GnuplotLexer': ('pygments.lexers.graphics', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
'GoLexer': ('pygments.lexers.go', 'Go', ('go',), ('*.go',), ('text/x-gosrc',)),
'GoloLexer': ('pygments.lexers.jvm', 'Golo', ('golo',), ('*.golo',), ()),
'GoodDataCLLexer': ('pygments.lexers.business', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
'GosuLexer': ('pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)),
'GosuTemplateLexer': ('pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
'GroffLexer': ('pygments.lexers.markup', 'Groff', ('groff', 'nroff', 'man'), ('*.[1234567]', '*.man'), ('application/x-troff', 'text/troff')),
'GroovyLexer': ('pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy', '*.gradle'), ('text/x-groovy',)),
'HLSLShaderLexer': ('pygments.lexers.graphics', 'HLSL', ('hlsl',), ('*.hlsl', '*.hlsli'), ('text/x-hlsl',)),
'HamlLexer': ('pygments.lexers.html', 'Haml', ('haml',), ('*.haml',), ('text/x-haml',)),
'HandlebarsHtmlLexer': ('pygments.lexers.templates', 'HTML+Handlebars', ('html+handlebars',), ('*.handlebars', '*.hbs'), ('text/html+handlebars', 'text/x-handlebars-template')),
'HandlebarsLexer': ('pygments.lexers.templates', 'Handlebars', ('handlebars',), (), ()),
'HaskellLexer': ('pygments.lexers.haskell', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
'HaxeLexer': ('pygments.lexers.haxe', 'Haxe', ('hx', 'haxe', 'hxsl'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
'HexdumpLexer': ('pygments.lexers.hexdump', 'Hexdump', ('hexdump',), (), ()),
'HsailLexer': ('pygments.lexers.asm', 'HSAIL', ('hsail', 'hsa'), ('*.hsail',), ('text/x-hsail',)),
'HspecLexer': ('pygments.lexers.haskell', 'Hspec', ('hspec',), (), ()),
'HtmlDjangoLexer': ('pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), (), ('text/html+django', 'text/html+jinja')),
'HtmlGenshiLexer': ('pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
'HtmlLexer': ('pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
'HtmlPhpLexer': ('pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
'HtmlSmartyLexer': ('pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
'HttpLexer': ('pygments.lexers.textfmts', 'HTTP', ('http',), (), ()),
'HxmlLexer': ('pygments.lexers.haxe', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
'HyLexer': ('pygments.lexers.lisp', 'Hy', ('hylang',), ('*.hy',), ('text/x-hy', 'application/x-hy')),
'HybrisLexer': ('pygments.lexers.scripting', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')),
'IDLLexer': ('pygments.lexers.idl', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
'IconLexer': ('pygments.lexers.unicon', 'Icon', ('icon',), ('*.icon', '*.ICON'), ()),
'IdrisLexer': ('pygments.lexers.haskell', 'Idris', ('idris', 'idr'), ('*.idr',), ('text/x-idris',)),
'IgorLexer': ('pygments.lexers.igor', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)),
'Inform6Lexer': ('pygments.lexers.int_fiction', 'Inform 6', ('inform6', 'i6'), ('*.inf',), ()),
'Inform6TemplateLexer': ('pygments.lexers.int_fiction', 'Inform 6 template', ('i6t',), ('*.i6t',), ()),
'Inform7Lexer': ('pygments.lexers.int_fiction', 'Inform 7', ('inform7', 'i7'), ('*.ni', '*.i7x'), ()),
'IniLexer': ('pygments.lexers.configs', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg', '*.inf'), ('text/x-ini', 'text/inf')),
'IoLexer': ('pygments.lexers.iolang', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
'IokeLexer': ('pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
'IrcLogsLexer': ('pygments.lexers.textfmts', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
'IsabelleLexer': ('pygments.lexers.theorem', 'Isabelle', ('isabelle',), ('*.thy',), ('text/x-isabelle',)),
'JLexer': ('pygments.lexers.j', 'J', ('j',), ('*.ijs',), ('text/x-j',)),
'JagsLexer': ('pygments.lexers.modeling', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
'JasminLexer': ('pygments.lexers.jvm', 'Jasmin', ('jasmin', 'jasminxt'), ('*.j',), ()),
'JavaLexer': ('pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
'JavascriptDjangoLexer': ('pygments.lexers.templates', 'JavaScript+Django/Jinja', ('js+django', 'javascript+django', 'js+jinja', 'javascript+jinja'), (), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
'JavascriptErbLexer': ('pygments.lexers.templates', 'JavaScript+Ruby', ('js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
'JavascriptGenshiLexer': ('pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
'JavascriptLexer': ('pygments.lexers.javascript', 'JavaScript', ('js', 'javascript'), ('*.js', '*.jsm'), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
'JavascriptPhpLexer': ('pygments.lexers.templates', 'JavaScript+PHP', ('js+php', 'javascript+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
'JavascriptSmartyLexer': ('pygments.lexers.templates', 'JavaScript+Smarty', ('js+smarty', 'javascript+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
'JclLexer': ('pygments.lexers.scripting', 'JCL', ('jcl',), ('*.jcl',), ('text/x-jcl',)),
'JsgfLexer': ('pygments.lexers.grammar_notation', 'JSGF', ('jsgf',), ('*.jsgf',), ('application/jsgf', 'application/x-jsgf', 'text/jsgf')),
'JsonBareObjectLexer': ('pygments.lexers.data', 'JSONBareObject', ('json-object',), (), ('application/json-object',)),
'JsonLdLexer': ('pygments.lexers.data', 'JSON-LD', ('jsonld', 'json-ld'), ('*.jsonld',), ('application/ld+json',)),
'JsonLexer': ('pygments.lexers.data', 'JSON', ('json',), ('*.json',), ('application/json',)),
'JspLexer': ('pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
'JuliaConsoleLexer': ('pygments.lexers.julia', 'Julia console', ('jlcon',), (), ()),
'JuliaLexer': ('pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
'JuttleLexer': ('pygments.lexers.javascript', 'Juttle', ('juttle', 'juttle'), ('*.juttle',), ('application/juttle', 'application/x-juttle', 'text/x-juttle', 'text/juttle')),
'KalLexer': ('pygments.lexers.javascript', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')),
'KconfigLexer': ('pygments.lexers.configs', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
'KokaLexer': ('pygments.lexers.haskell', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
'KotlinLexer': ('pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt',), ('text/x-kotlin',)),
'LSLLexer': ('pygments.lexers.scripting', 'LSL', ('lsl',), ('*.lsl',), ('text/x-lsl',)),
'LassoCssLexer': ('pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)),
'LassoHtmlLexer': ('pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')),
'LassoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Lasso', ('js+lasso', 'javascript+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')),
'LassoLexer': ('pygments.lexers.javascript', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
'LassoXmlLexer': ('pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
'LeanLexer': ('pygments.lexers.theorem', 'Lean', ('lean',), ('*.lean',), ('text/x-lean',)),
'LessCssLexer': ('pygments.lexers.css', 'LessCss', ('less',), ('*.less',), ('text/x-less-css',)),
'LighttpdConfLexer': ('pygments.lexers.configs', 'Lighttpd configuration file', ('lighty', 'lighttpd'), (), ('text/x-lighttpd-conf',)),
'LimboLexer': ('pygments.lexers.inferno', 'Limbo', ('limbo',), ('*.b',), ('text/limbo',)),
'LiquidLexer': ('pygments.lexers.templates', 'liquid', ('liquid',), ('*.liquid',), ()),
'LiterateAgdaLexer': ('pygments.lexers.haskell', 'Literate Agda', ('lagda', 'literate-agda'), ('*.lagda',), ('text/x-literate-agda',)),
'LiterateCryptolLexer': ('pygments.lexers.haskell', 'Literate Cryptol', ('lcry', 'literate-cryptol', 'lcryptol'), ('*.lcry',), ('text/x-literate-cryptol',)),
'LiterateHaskellLexer': ('pygments.lexers.haskell', 'Literate Haskell', ('lhs', 'literate-haskell', 'lhaskell'), ('*.lhs',), ('text/x-literate-haskell',)),
'LiterateIdrisLexer': ('pygments.lexers.haskell', 'Literate Idris', ('lidr', 'literate-idris', 'lidris'), ('*.lidr',), ('text/x-literate-idris',)),
'LiveScriptLexer': ('pygments.lexers.javascript', 'LiveScript', ('live-script', 'livescript'), ('*.ls',), ('text/livescript',)),
'LlvmLexer': ('pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
'LogosLexer': ('pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
'LogtalkLexer': ('pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)),
'LuaLexer': ('pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
'MIMELexer': ('pygments.lexers.mime', 'MIME', ('mime',), (), ('multipart/mixed', 'multipart/related', 'multipart/alternative')),
'MOOCodeLexer': ('pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
'MSDOSSessionLexer': ('pygments.lexers.shell', 'MSDOS Session', ('doscon',), (), ()),
'MakefileLexer': ('pygments.lexers.make', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
'MakoCssLexer': ('pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
'MakoHtmlLexer': ('pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
'MakoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Mako', ('js+mako', 'javascript+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
'MakoLexer': ('pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
'MakoXmlLexer': ('pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
'MaqlLexer': ('pygments.lexers.business', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
'MarkdownLexer': ('pygments.lexers.markup', 'markdown', ('md',), ('*.md',), ('text/x-markdown',)),
'MaskLexer': ('pygments.lexers.javascript', 'Mask', ('mask',), ('*.mask',), ('text/x-mask',)),
'MasonLexer': ('pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
'MathematicaLexer': ('pygments.lexers.algebra', 'Mathematica', ('mathematica', 'mma', 'nb'), ('*.nb', '*.cdf', '*.nbp', '*.ma'), ('application/mathematica', 'application/vnd.wolfram.mathematica', 'application/vnd.wolfram.mathematica.package', 'application/vnd.wolfram.cdf')),
'MatlabLexer': ('pygments.lexers.matlab', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
'MatlabSessionLexer': ('pygments.lexers.matlab', 'Matlab session', ('matlabsession',), (), ()),
'MiniDLexer': ('pygments.lexers.d', 'MiniD', ('minid',), (), ('text/x-minidsrc',)),
'ModelicaLexer': ('pygments.lexers.modeling', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
'Modula2Lexer': ('pygments.lexers.modula2', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
'MoinWikiLexer': ('pygments.lexers.markup', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
'MonkeyLexer': ('pygments.lexers.basic', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
'MonteLexer': ('pygments.lexers.monte', 'Monte', ('monte',), ('*.mt',), ()),
'MoonScriptLexer': ('pygments.lexers.scripting', 'MoonScript', ('moon', 'moonscript'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
'MozPreprocCssLexer': ('pygments.lexers.markup', 'CSS+mozpreproc', ('css+mozpreproc',), ('*.css.in',), ()),
'MozPreprocHashLexer': ('pygments.lexers.markup', 'mozhashpreproc', ('mozhashpreproc',), (), ()),
'MozPreprocJavascriptLexer': ('pygments.lexers.markup', 'Javascript+mozpreproc', ('javascript+mozpreproc',), ('*.js.in',), ()),
'MozPreprocPercentLexer': ('pygments.lexers.markup', 'mozpercentpreproc', ('mozpercentpreproc',), (), ()),
'MozPreprocXulLexer': ('pygments.lexers.markup', 'XUL+mozpreproc', ('xul+mozpreproc',), ('*.xul.in',), ()),
'MqlLexer': ('pygments.lexers.c_like', 'MQL', ('mql', 'mq4', 'mq5', 'mql4', 'mql5'), ('*.mq4', '*.mq5', '*.mqh'), ('text/x-mql',)),
'MscgenLexer': ('pygments.lexers.dsls', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
'MuPADLexer': ('pygments.lexers.algebra', 'MuPAD', ('mupad',), ('*.mu',), ()),
'MxmlLexer': ('pygments.lexers.actionscript', 'MXML', ('mxml',), ('*.mxml',), ()),
'MySqlLexer': ('pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)),
'MyghtyCssLexer': ('pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)),
'MyghtyHtmlLexer': ('pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)),
'MyghtyJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Myghty', ('js+myghty', 'javascript+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')),
'MyghtyLexer': ('pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
'MyghtyXmlLexer': ('pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
'NCLLexer': ('pygments.lexers.ncl', 'NCL', ('ncl',), ('*.ncl',), ('text/ncl',)),
'NSISLexer': ('pygments.lexers.installers', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
'NasmLexer': ('pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)),
'NasmObjdumpLexer': ('pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)),
'NemerleLexer': ('pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
'NesCLexer': ('pygments.lexers.c_like', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
'NewLispLexer': ('pygments.lexers.lisp', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl', '*.kif'), ('text/x-newlisp', 'application/x-newlisp')),
'NewspeakLexer': ('pygments.lexers.smalltalk', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
'NginxConfLexer': ('pygments.lexers.configs', 'Nginx configuration file', ('nginx',), ('nginx.conf',), ('text/x-nginx-conf',)),
'NimrodLexer': ('pygments.lexers.nimrod', 'Nimrod', ('nim', 'nimrod'), ('*.nim', '*.nimrod'), ('text/x-nim',)),
'NitLexer': ('pygments.lexers.nit', 'Nit', ('nit',), ('*.nit',), ()),
'NixLexer': ('pygments.lexers.nix', 'Nix', ('nixos', 'nix'), ('*.nix',), ('text/x-nix',)),
'NuSMVLexer': ('pygments.lexers.smv', 'NuSMV', ('nusmv',), ('*.smv',), ()),
'NumPyLexer': ('pygments.lexers.python', 'NumPy', ('numpy',), (), ()),
'ObjdumpLexer': ('pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
'ObjectiveCLexer': ('pygments.lexers.objective', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
'ObjectiveCppLexer': ('pygments.lexers.objective', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)),
'ObjectiveJLexer': ('pygments.lexers.javascript', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
'OcamlLexer': ('pygments.lexers.ml', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
'OctaveLexer': ('pygments.lexers.matlab', 'Octave', ('octave',), ('*.m',), ('text/octave',)),
'OdinLexer': ('pygments.lexers.archetype', 'ODIN', ('odin',), ('*.odin',), ('text/odin',)),
'OocLexer': ('pygments.lexers.ooc', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
'OpaLexer': ('pygments.lexers.ml', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
'OpenEdgeLexer': ('pygments.lexers.business', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
'PacmanConfLexer': ('pygments.lexers.configs', 'PacmanConf', ('pacmanconf',), ('pacman.conf',), ()),
'PanLexer': ('pygments.lexers.dsls', 'Pan', ('pan',), ('*.pan',), ()),
'ParaSailLexer': ('pygments.lexers.parasail', 'ParaSail', ('parasail',), ('*.psi', '*.psl'), ('text/x-parasail',)),
'PawnLexer': ('pygments.lexers.pawn', 'Pawn', ('pawn',), ('*.p', '*.pwn', '*.inc'), ('text/x-pawn',)),
'Perl6Lexer': ('pygments.lexers.perl', 'Perl6', ('perl6', 'pl6'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t'), ('text/x-perl6', 'application/x-perl6')),
'PerlLexer': ('pygments.lexers.perl', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t'), ('text/x-perl', 'application/x-perl')),
'PhpLexer': ('pygments.lexers.php', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
'PigLexer': ('pygments.lexers.jvm', 'Pig', ('pig',), ('*.pig',), ('text/x-pig',)),
'PikeLexer': ('pygments.lexers.c_like', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)),
'PkgConfigLexer': ('pygments.lexers.configs', 'PkgConfig', ('pkgconfig',), ('*.pc',), ()),
'PlPgsqlLexer': ('pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
'PonyLexer': ('pygments.lexers.pony', 'Pony', ('pony',), ('*.pony',), ()),
'PostScriptLexer': ('pygments.lexers.graphics', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
'PostgresConsoleLexer': ('pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
'PostgresLexer': ('pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
'PovrayLexer': ('pygments.lexers.graphics', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
'PowerShellLexer': ('pygments.lexers.shell', 'PowerShell', ('powershell', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)),
'PowerShellSessionLexer': ('pygments.lexers.shell', 'PowerShell Session', ('ps1con',), (), ()),
'PraatLexer': ('pygments.lexers.praat', 'Praat', ('praat',), ('*.praat', '*.proc', '*.psc'), ()),
'PrologLexer': ('pygments.lexers.prolog', 'Prolog', ('prolog',), ('*.ecl', '*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
'PropertiesLexer': ('pygments.lexers.configs', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)),
'ProtoBufLexer': ('pygments.lexers.dsls', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()),
'PugLexer': ('pygments.lexers.html', 'Pug', ('pug', 'jade'), ('*.pug', '*.jade'), ('text/x-pug', 'text/x-jade')),
'PuppetLexer': ('pygments.lexers.dsls', 'Puppet', ('puppet',), ('*.pp',), ()),
'PyPyLogLexer': ('pygments.lexers.console', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
'Python2Lexer': ('pygments.lexers.python', 'Python 2.x', ('python2', 'py2'), (), ('text/x-python2', 'application/x-python2')),
'Python2TracebackLexer': ('pygments.lexers.python', 'Python 2.x Traceback', ('py2tb',), ('*.py2tb',), ('text/x-python2-traceback',)),
'PythonConsoleLexer': ('pygments.lexers.python', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
'PythonLexer': ('pygments.lexers.python', 'Python', ('python', 'py', 'sage', 'python3', 'py3'), ('*.py', '*.pyw', '*.jy', '*.sage', '*.sc', 'SConstruct', 'SConscript', '*.bzl', 'BUCK', 'BUILD', 'BUILD.bazel', 'WORKSPACE', '*.tac'), ('text/x-python', 'application/x-python', 'text/x-python3', 'application/x-python3')),
'PythonTracebackLexer': ('pygments.lexers.python', 'Python Traceback', ('pytb', 'py3tb'), ('*.pytb', '*.py3tb'), ('text/x-python-traceback', 'text/x-python3-traceback')),
'QBasicLexer': ('pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)),
'QVToLexer': ('pygments.lexers.qvt', 'QVTO', ('qvto', 'qvt'), ('*.qvto',), ()),
'QmlLexer': ('pygments.lexers.webmisc', 'QML', ('qml', 'qbs'), ('*.qml', '*.qbs'), ('application/x-qml', 'application/x-qt.qbs+qml')),
'RConsoleLexer': ('pygments.lexers.r', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
'RNCCompactLexer': ('pygments.lexers.rnc', 'Relax-NG Compact', ('rnc', 'rng-compact'), ('*.rnc',), ()),
'RPMSpecLexer': ('pygments.lexers.installers', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
'RacketLexer': ('pygments.lexers.lisp', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktd', '*.rktl'), ('text/x-racket', 'application/x-racket')),
'RagelCLexer': ('pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
'RagelCppLexer': ('pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
'RagelDLexer': ('pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
'RagelEmbeddedLexer': ('pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()),
'RagelJavaLexer': ('pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()),
'RagelLexer': ('pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()),
'RagelObjectiveCLexer': ('pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()),
'RagelRubyLexer': ('pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()),
'RawTokenLexer': ('pygments.lexers.special', 'Raw token data', ('raw',), (), ('application/x-pygments-tokens',)),
'RdLexer': ('pygments.lexers.r', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)),
'RebolLexer': ('pygments.lexers.rebol', 'REBOL', ('rebol',), ('*.r', '*.r3', '*.reb'), ('text/x-rebol',)),
'RedLexer': ('pygments.lexers.rebol', 'Red', ('red', 'red/system'), ('*.red', '*.reds'), ('text/x-red', 'text/x-red-system')),
'RedcodeLexer': ('pygments.lexers.esoteric', 'Redcode', ('redcode',), ('*.cw',), ()),
'RegeditLexer': ('pygments.lexers.configs', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
'ResourceLexer': ('pygments.lexers.resource', 'ResourceBundle', ('resource', 'resourcebundle'), (), ()),
'RexxLexer': ('pygments.lexers.scripting', 'Rexx', ('rexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)),
'RhtmlLexer': ('pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
'RoboconfGraphLexer': ('pygments.lexers.roboconf', 'Roboconf Graph', ('roboconf-graph',), ('*.graph',), ()),
'RoboconfInstancesLexer': ('pygments.lexers.roboconf', 'Roboconf Instances', ('roboconf-instances',), ('*.instances',), ()),
'RobotFrameworkLexer': ('pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.robot',), ('text/x-robotframework',)),
'RqlLexer': ('pygments.lexers.sql', 'RQL', ('rql',), ('*.rql',), ('text/x-rql',)),
'RslLexer': ('pygments.lexers.dsls', 'RSL', ('rsl',), ('*.rsl',), ('text/rsl',)),
'RstLexer': ('pygments.lexers.markup', 'reStructuredText', ('rst', 'rest', 'restructuredtext'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
'RtsLexer': ('pygments.lexers.trafficscript', 'TrafficScript', ('rts', 'trafficscript'), ('*.rts',), ()),
'RubyConsoleLexer': ('pygments.lexers.ruby', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
'RubyLexer': ('pygments.lexers.ruby', 'Ruby', ('rb', 'ruby', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby', 'Gemfile'), ('text/x-ruby', 'application/x-ruby')),
'RustLexer': ('pygments.lexers.rust', 'Rust', ('rust', 'rs'), ('*.rs', '*.rs.in'), ('text/rust',)),
'SASLexer': ('pygments.lexers.sas', 'SAS', ('sas',), ('*.SAS', '*.sas'), ('text/x-sas', 'text/sas', 'application/x-sas')),
'SLexer': ('pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
'SMLLexer': ('pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
'SarlLexer': ('pygments.lexers.jvm', 'SARL', ('sarl',), ('*.sarl',), ('text/x-sarl',)),
'SassLexer': ('pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)),
'ScalaLexer': ('pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
'ScamlLexer': ('pygments.lexers.html', 'Scaml', ('scaml',), ('*.scaml',), ('text/x-scaml',)),
'ScdocLexer': ('pygments.lexers.scdoc', 'scdoc', ('scdoc', 'scd'), ('*.scd', '*.scdoc'), ()),
'SchemeLexer': ('pygments.lexers.lisp', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
'ScilabLexer': ('pygments.lexers.matlab', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
'ScssLexer': ('pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
'ShExCLexer': ('pygments.lexers.rdf', 'ShExC', ('shexc', 'shex'), ('*.shex',), ('text/shex',)),
'ShenLexer': ('pygments.lexers.lisp', 'Shen', ('shen',), ('*.shen',), ('text/x-shen', 'application/x-shen')),
'SilverLexer': ('pygments.lexers.verification', 'Silver', ('silver',), ('*.sil', '*.vpr'), ()),
'SlashLexer': ('pygments.lexers.slash', 'Slash', ('slash',), ('*.sl',), ()),
'SlimLexer': ('pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)),
'SlurmBashLexer': ('pygments.lexers.shell', 'Slurm', ('slurm', 'sbatch'), ('*.sl',), ()),
'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
'SmalltalkLexer': ('pygments.lexers.smalltalk', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)),
'SmartGameFormatLexer': ('pygments.lexers.sgf', 'SmartGameFormat', ('sgf',), ('*.sgf',), ()),
'SmartyLexer': ('pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
'SnobolLexer': ('pygments.lexers.snobol', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
'SnowballLexer': ('pygments.lexers.dsls', 'Snowball', ('snowball',), ('*.sbl',), ()),
'SourcePawnLexer': ('pygments.lexers.pawn', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
'SourcesListLexer': ('pygments.lexers.installers', 'Debian Sourcelist', ('sourceslist', 'sources.list', 'debsources'), ('sources.list',), ()),
'SparqlLexer': ('pygments.lexers.rdf', 'SPARQL', ('sparql',), ('*.rq', '*.sparql'), ('application/sparql-query',)),
'SqlLexer': ('pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
'SqliteConsoleLexer': ('pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
'SquidConfLexer': ('pygments.lexers.configs', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
'SspLexer': ('pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
'StanLexer': ('pygments.lexers.modeling', 'Stan', ('stan',), ('*.stan',), ()),
'StataLexer': ('pygments.lexers.stata', 'Stata', ('stata', 'do'), ('*.do', '*.ado'), ('text/x-stata', 'text/stata', 'application/x-stata')),
'SuperColliderLexer': ('pygments.lexers.supercollider', 'SuperCollider', ('sc', 'supercollider'), ('*.sc', '*.scd'), ('application/supercollider', 'text/supercollider')),
'SwiftLexer': ('pygments.lexers.objective', 'Swift', ('swift',), ('*.swift',), ('text/x-swift',)),
'SwigLexer': ('pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)),
'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
'TAPLexer': ('pygments.lexers.testing', 'TAP', ('tap',), ('*.tap',), ()),
'TOMLLexer': ('pygments.lexers.configs', 'TOML', ('toml',), ('*.toml',), ()),
'Tads3Lexer': ('pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()),
'TasmLexer': ('pygments.lexers.asm', 'TASM', ('tasm',), ('*.asm', '*.ASM', '*.tasm'), ('text/x-tasm',)),
'TclLexer': ('pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
'TcshSessionLexer': ('pygments.lexers.shell', 'Tcsh Session', ('tcshcon',), (), ()),
'TeaTemplateLexer': ('pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
'TeraTermLexer': ('pygments.lexers.teraterm', 'Tera Term macro', ('ttl', 'teraterm', 'teratermmacro'), ('*.ttl',), ('text/x-teratermmacro',)),
'TermcapLexer': ('pygments.lexers.configs', 'Termcap', ('termcap',), ('termcap', 'termcap.src'), ()),
'TerminfoLexer': ('pygments.lexers.configs', 'Terminfo', ('terminfo',), ('terminfo', 'terminfo.src'), ()),
'TerraformLexer': ('pygments.lexers.configs', 'Terraform', ('terraform', 'tf'), ('*.tf',), ('application/x-tf', 'application/x-terraform')),
'TexLexer': ('pygments.lexers.markup', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
'ThriftLexer': ('pygments.lexers.dsls', 'Thrift', ('thrift',), ('*.thrift',), ('application/x-thrift',)),
'TodotxtLexer': ('pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)),
'TransactSqlLexer': ('pygments.lexers.sql', 'Transact-SQL', ('tsql', 't-sql'), ('*.sql',), ('text/x-tsql',)),
'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
'TurtleLexer': ('pygments.lexers.rdf', 'Turtle', ('turtle',), ('*.ttl',), ('text/turtle', 'application/x-turtle')),
'TwigHtmlLexer': ('pygments.lexers.templates', 'HTML+Twig', ('html+twig',), ('*.twig',), ('text/html+twig',)),
'TwigLexer': ('pygments.lexers.templates', 'Twig', ('twig',), (), ('application/x-twig',)),
'TypeScriptLexer': ('pygments.lexers.javascript', 'TypeScript', ('ts', 'typescript'), ('*.ts', '*.tsx'), ('text/x-typescript',)),
'TypoScriptCssDataLexer': ('pygments.lexers.typoscript', 'TypoScriptCssData', ('typoscriptcssdata',), (), ()),
'TypoScriptHtmlDataLexer': ('pygments.lexers.typoscript', 'TypoScriptHtmlData', ('typoscripthtmldata',), (), ()),
'TypoScriptLexer': ('pygments.lexers.typoscript', 'TypoScript', ('typoscript',), ('*.typoscript',), ('text/x-typoscript',)),
'UcodeLexer': ('pygments.lexers.unicon', 'ucode', ('ucode',), ('*.u', '*.u1', '*.u2'), ()),
'UniconLexer': ('pygments.lexers.unicon', 'Unicon', ('unicon',), ('*.icn',), ('text/unicon',)),
'UrbiscriptLexer': ('pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
'VBScriptLexer': ('pygments.lexers.basic', 'VBScript', ('vbscript',), ('*.vbs', '*.VBS'), ()),
'VCLLexer': ('pygments.lexers.varnish', 'VCL', ('vcl',), ('*.vcl',), ('text/x-vclsrc',)),
'VCLSnippetLexer': ('pygments.lexers.varnish', 'VCLSnippets', ('vclsnippets', 'vclsnippet'), (), ('text/x-vclsnippet',)),
'VCTreeStatusLexer': ('pygments.lexers.console', 'VCTreeStatus', ('vctreestatus',), (), ()),
'VGLLexer': ('pygments.lexers.dsls', 'VGL', ('vgl',), ('*.rpf',), ()),
'ValaLexer': ('pygments.lexers.c_like', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)),
'VbNetAspxLexer': ('pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'VbNetLexer': ('pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')),
'VelocityHtmlLexer': ('pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)),
'VelocityLexer': ('pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()),
'VelocityXmlLexer': ('pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)),
'VerilogLexer': ('pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)),
'VhdlLexer': ('pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)),
'VimLexer': ('pygments.lexers.textedit', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)),
'WDiffLexer': ('pygments.lexers.diff', 'WDiff', ('wdiff',), ('*.wdiff',), ()),
'WhileyLexer': ('pygments.lexers.whiley', 'Whiley', ('whiley',), ('*.whiley',), ('text/x-whiley',)),
'X10Lexer': ('pygments.lexers.x10', 'X10', ('x10', 'xten'), ('*.x10',), ('text/x-x10',)),
'XQueryLexer': ('pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
'XmlDjangoLexer': ('pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), (), ('application/xml+django', 'application/xml+jinja')),
'XmlErbLexer': ('pygments.lexers.templates', 'XML+Ruby', ('xml+erb', 'xml+ruby'), (), ('application/xml+ruby',)),
'XmlLexer': ('pygments.lexers.html', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
'XmlPhpLexer': ('pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
'XmlSmartyLexer': ('pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
'XorgLexer': ('pygments.lexers.xorg', 'Xorg', ('xorg.conf',), ('xorg.conf',), ()),
'XsltLexer': ('pygments.lexers.html', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
'XtendLexer': ('pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
'XtlangLexer': ('pygments.lexers.lisp', 'xtlang', ('extempore',), ('*.xtm',), ()),
'YamlJinjaLexer': ('pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls',), ('text/x-yaml+jinja', 'text/x-sls')),
'YamlLexer': ('pygments.lexers.data', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
'ZephirLexer': ('pygments.lexers.php', 'Zephir', ('zephir',), ('*.zep',), ()),
'ZigLexer': ('pygments.lexers.zig', 'Zig', ('zig',), ('*.zig',), ('text/zig',)),
}
if __name__ == '__main__': # pragma: no cover
import sys
import os
# lookup lexers
found_lexers = []
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
for root, dirs, files in os.walk('.'):
for filename in files:
if filename.endswith('.py') and not filename.startswith('_'):
module_name = 'pygments.lexers%s.%s' % (
root[1:].replace('/', '.'), filename[:-3])
print(module_name)
module = __import__(module_name, None, None, [''])
for lexer_name in module.__all__:
lexer = getattr(module, lexer_name)
found_lexers.append(
'%r: %r' % (lexer_name,
(module_name,
lexer.name,
tuple(lexer.aliases),
tuple(lexer.filenames),
tuple(lexer.mimetypes))))
# sort them to make the diff minimal
found_lexers.sort()
# extract useful sourcecode from this file
with open(__file__) as fp:
content = fp.read()
# replace crnl to nl for Windows.
#
# Note that, originally, contributers should keep nl of master
# repository, for example by using some kind of automatic
# management EOL, like `EolExtension
# <https://www.mercurial-scm.org/wiki/EolExtension>`.
content = content.replace("\r\n", "\n")
header = content[:content.find('LEXERS = {')]
footer = content[content.find("if __name__ == '__main__':"):]
# write new file
with open(__file__, 'w') as fp:
fp.write(header)
fp.write('LEXERS = {\n %s,\n}\n\n' % ',\n '.join(found_lexers))
fp.write(footer)
print ('=== %d lexers processed.' % len(found_lexers))
|
py | 7dfb27e07831533311c94c6028cfc7fdc8254349 | from tkinter import *
from tkinter import messagebox
import tkinter as tk
import cv2
import re
import database
import fontawesome as fa
import base64
from PIL import Image , ImageTk
class ViewUsers():
def __init__(self):
self.d = database.Database().getAll()
def disp(self, root):
i = 0
# ar = {}
self.roo = Toplevel(root)
self.roo.grid_columnconfigure(0, weight=1)
self.roo.minsize(300, 500)
self.roo.title("User Faces")
for doc in self.d:
Label(self.roo, text=doc['id']).grid(row=i, column=0, padx=15, pady=25)
Label(self.roo, text=doc['name']).grid(row=i, column=1, padx=15, pady=25)
im = PhotoImage(data=doc['image'])
im = im.subsample(4,4)
# ar[i] = im
# Label(self.roo, image=im).grid(row=i, column=2)
lab = Label(self.roo, image=im)
lab.image = im
lab.grid(row=i, column=2)
i = i+1
self.roo.mainloop()
# vu=ViewUsers().disp() |
py | 7dfb27ed129e5f4c81b4af13fc18f9f43870ee89 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import pytest
from google.cloud import datastore_v1
from google.cloud.datastore_v1 import enums
from google.cloud.datastore_v1.proto import datastore_pb2
from google.cloud.datastore_v1.proto import entity_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self, method, request_serializer=None, response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestDatastoreClient(object):
def test_lookup(self):
# Setup Expected Response
expected_response = {}
expected_response = datastore_pb2.LookupResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup Request
project_id = "projectId-1969970175"
keys = []
response = client.lookup(project_id, keys)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datastore_pb2.LookupRequest(project_id=project_id, keys=keys)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_lookup_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup request
project_id = "projectId-1969970175"
keys = []
with pytest.raises(CustomException):
client.lookup(project_id, keys)
def test_run_query(self):
# Setup Expected Response
expected_response = {}
expected_response = datastore_pb2.RunQueryResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup Request
project_id = "projectId-1969970175"
partition_id = {}
response = client.run_query(project_id, partition_id)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datastore_pb2.RunQueryRequest(
project_id=project_id, partition_id=partition_id
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_run_query_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup request
project_id = "projectId-1969970175"
partition_id = {}
with pytest.raises(CustomException):
client.run_query(project_id, partition_id)
def test_begin_transaction(self):
# Setup Expected Response
transaction = b"-34"
expected_response = {"transaction": transaction}
expected_response = datastore_pb2.BeginTransactionResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup Request
project_id = "projectId-1969970175"
response = client.begin_transaction(project_id)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datastore_pb2.BeginTransactionRequest(project_id=project_id)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_begin_transaction_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup request
project_id = "projectId-1969970175"
with pytest.raises(CustomException):
client.begin_transaction(project_id)
def test_commit(self):
# Setup Expected Response
index_updates = 1425228195
expected_response = {"index_updates": index_updates}
expected_response = datastore_pb2.CommitResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup Request
project_id = "projectId-1969970175"
mode = enums.CommitRequest.Mode.MODE_UNSPECIFIED
mutations = []
response = client.commit(project_id, mode, mutations)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datastore_pb2.CommitRequest(
project_id=project_id, mode=mode, mutations=mutations
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_commit_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup request
project_id = "projectId-1969970175"
mode = enums.CommitRequest.Mode.MODE_UNSPECIFIED
mutations = []
with pytest.raises(CustomException):
client.commit(project_id, mode, mutations)
def test_rollback(self):
# Setup Expected Response
expected_response = {}
expected_response = datastore_pb2.RollbackResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup Request
project_id = "projectId-1969970175"
transaction = b"-34"
response = client.rollback(project_id, transaction)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datastore_pb2.RollbackRequest(
project_id=project_id, transaction=transaction
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_rollback_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup request
project_id = "projectId-1969970175"
transaction = b"-34"
with pytest.raises(CustomException):
client.rollback(project_id, transaction)
def test_allocate_ids(self):
# Setup Expected Response
expected_response = {}
expected_response = datastore_pb2.AllocateIdsResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup Request
project_id = "projectId-1969970175"
keys = []
response = client.allocate_ids(project_id, keys)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datastore_pb2.AllocateIdsRequest(
project_id=project_id, keys=keys
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_allocate_ids_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup request
project_id = "projectId-1969970175"
keys = []
with pytest.raises(CustomException):
client.allocate_ids(project_id, keys)
def test_reserve_ids(self):
# Setup Expected Response
expected_response = {}
expected_response = datastore_pb2.ReserveIdsResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup Request
project_id = "projectId-1969970175"
keys = []
response = client.reserve_ids(project_id, keys)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datastore_pb2.ReserveIdsRequest(
project_id=project_id, keys=keys
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_reserve_ids_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = datastore_v1.DatastoreClient(channel=channel)
# Setup request
project_id = "projectId-1969970175"
keys = []
with pytest.raises(CustomException):
client.reserve_ids(project_id, keys)
|
py | 7dfb2866847d1e3f557f2a1eb75fc97f10616537 | import yaml
import yaml.parser
class BaseError(Exception):
pass
class ParserError(BaseError):
pass
if hasattr(yaml, 'CLoader'):
_Loader = yaml.CLoader # type: ignore
else:
_Loader = yaml.Loader # type: ignore
def load_file(path, encoding='utf-8'):
with open(path, 'r', encoding=encoding) as fp:
return load(fp)
def load(string_or_stream):
try:
return yaml.load(string_or_stream, Loader=_Loader)
except yaml.parser.ParserError as exc:
raise ParserError(str(exc)) from exc
|
py | 7dfb28ae8aca93e6260cb351164065768a49cda6 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "InnoLinksWeb.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
py | 7dfb293b773a3214958c58aed8e7b13a96fd1285 | {% extends 'docs/source/conf.py.jj2'%}
{%block SPHINX_EXTENSIONS%}
'sphinx.ext.autosummary',
'sphinxcontrib.spelling'
{%endblock%}
{%block custom_doc_theme%}
html_theme = 'default'
def setup(app):
app.add_stylesheet('theme_overrides.css')
{%endblock%}
|
py | 7dfb2ad9dedb28487e5d187427f7f5ba10b2d9a2 | """
Copyright (c) 2014 Thoughtworks.
Copyright (c) 2016 Platform9 Systems Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either expressed or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
from nova.virt.ec2 import ec2driver
EC2Driver = ec2driver.EC2Driver
|
py | 7dfb2b807af418f43da260e7817dd581897d97c4 | # -*- coding: utf-8 -*-
"""The Apple System Log (ASL) event formatter."""
from plaso.formatters import interface
from plaso.formatters import manager
from plaso.lib import errors
class AslFormatter(interface.ConditionalEventFormatter):
"""Formatter for an Apple System Log (ASL) log event."""
DATA_TYPE = u'mac:asl:event'
FORMAT_STRING_PIECES = [
u'MessageID: {message_id}',
u'Level: {level}',
u'User ID: {user_sid}',
u'Group ID: {group_id}',
u'Read User: {read_uid}',
u'Read Group: {read_gid}',
u'Host: {computer_name}',
u'Sender: {sender}',
u'Facility: {facility}',
u'Message: {message}',
u'{extra_information}']
FORMAT_STRING_SHORT_PIECES = [
u'Host: {host}',
u'Sender: {sender}',
u'Facility: {facility}']
SOURCE_LONG = u'ASL entry'
SOURCE_SHORT = u'LOG'
# Priority levels (criticality)
_PRIORITY_LEVELS = {
0 : u'EMERGENCY',
1 : u'ALERT',
2 : u'CRITICAL',
3 : u'ERROR',
4 : u'WARNING',
5 : u'NOTICE',
6 : u'INFO',
7 : u'DEBUG'}
def GetMessages(self, unused_formatter_mediator, event_object):
"""Determines the formatted message strings for an event object.
Args:
formatter_mediator: the formatter mediator object (instance of
FormatterMediator).
event_object: the event object (instance of EventObject).
Returns:
A tuple containing the formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
"""
if self.DATA_TYPE != event_object.data_type:
raise errors.WrongFormatter(u'Unsupported data type: {0:s}.'.format(
event_object.data_type))
event_values = event_object.GetValues()
priority_level = event_values.get(u'level', None)
if isinstance(priority_level, (int, long)):
event_values[u'level'] = u'{0:s} ({1:d})'.format(
self._PRIORITY_LEVELS.get(priority_level, u'UNKNOWN'), priority_level)
# If no rights are assigned the value is 0xffffffff (-1).
read_uid = event_values.get(u'read_uid', None)
if read_uid == 0xffffffff:
event_values[u'read_uid'] = u'ALL'
# If no rights are assigned the value is 0xffffffff (-1).
read_gid = event_values.get(u'read_gid', None)
if read_gid == 0xffffffff:
event_values[u'read_gid'] = u'ALL'
return self._ConditionalFormatMessages(event_values)
manager.FormattersManager.RegisterFormatter(AslFormatter)
|
py | 7dfb2c305757e4ce979903a0da732aeafdb585a0 | import setuptools
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setuptools.setup(
name='pycrostates',
version='0.0.1a',
author="Victor Férat",
author_email="[email protected]",
description="A simple open source Python package for EEGmicrostate segmentation",
long_description=long_description,
long_description_content_type="text/markdown",
url=None,
license="BSD-3-Clause",
python_requires='>=3.6',
install_requires=["mne", "numpy", "scipy", "joblib"],
packages=setuptools.find_packages(exclude=['docs', 'tests']),
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
]
) |
py | 7dfb2cd63c90b01bc641476db3bf64ae2e850b62 | import collections
class TrieNode:
def __init__(self):
self._children = collections.defaultdict(TrieNode)
self._is_word = False
class Trie:
def __init__(self):
self.root = TrieNode()
def insert(self, word):
current = self.root
for letter in word:
current = current._children[letter]
current._is_word = True
def search(self, word):
current = self.root
for letter in word:
current = current._children.get(letter)
if current is None:
return False
return current._is_word
def _starts_with(self, prefix):
current = self.root
for letter in prefix:
current = current._children.get(letter)
if current is None:
return None
return current
def starts_with(self, prefix, root=None, words=None, max_found=100):
if not root:
root = self._starts_with(prefix)
words = words if words is not None else []
if root and len(words) < max_found:
if root._is_word:
words.append(prefix)
for letter, trie_node in root._children.items():
self.starts_with(prefix + letter, root=trie_node, words=words, max_found=max_found)
return words
|
py | 7dfb2e1e70b65ccf6f466542610f622a4cf6a0ab | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import torch
import torch.nn.functional as Fu
import torchvision
import numpy as np
from tools.utils import auto_init_args, weight_init, choice_exclude,\
get_visdom_connection, save_gif, gradient_penalty
from torch import nn
from torch.nn.utils.spectral_norm import spectral_norm
from torch.nn.parameter import Parameter
from models.relate_helpers import AdaIngen_obj, AdaIngen_bg, NPE
class RELATEVideo(torch.nn.Module):
def __init__(self,
x_max=5.,
y_max=5.,
n_objects=2,
backgd_dim=30,
obj_dim=30,
seq_len=5,
fixed_objs=False,
obj_size=6,
past=3,
len_ev=None,
custom_param_groups=True,
loss_weights={
'l_gen': 1.,
'l_gen_eval': 1.,
'l_disc': 1.,
'l_xy': 0.,
'l_style_disc': 0.,
'l_style_gen': 0.,
'l_style_gen_eval': 0.,
'l_gradient': 0.,
},
log_vars=[
'objective',
'l_gen',
'l_gen_eval',
'l_disc',
'l_style_disc',
'l_style_gen',
'l_style_gen_eval',
'l_xy',
'l_gradient',
],
**kwargs):
super(RELATEVideo, self).__init__()
# autoassign constructor params to self
auto_init_args(self)
# Use gpu if available
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.latent_dims=[self.backgd_dim, self.obj_dim]
self.grad_pen=(loss_weights.get('l_gradient',0)>0.)
self.zbg = Parameter(0.02*torch.randn([1, 4*64, 16, 16], device=self.device))
self.zfg = Parameter(0.02*torch.randn([1+self.fixed_objs*(self.n_objects-1), 8*64,
self.obj_size, self.obj_size], device=self.device))
# Init generator
self.generator = GAN_gen(latent_dims=[self.latent_dims[0],self.latent_dims[1]+2])
# Init discriminator
self.discriminator = GAN_disc(pos_dim=2, first=self.seq_len)
# Init Gamma
self.Gamma = NPE(self.latent_dims[1]+(self.past-1)*self.latent_dims[1]*(1-int(self.fixed_objs))+2*(self.past-1),
self.latent_dims[0], out_dim=self.latent_dims[1]*(1-int(self.fixed_objs)))
# Init misc MLPs
self.mlp_speed = nn.Sequential(nn.Linear(self.latent_dims[1] + 2 + self.latent_dims[0], 128), nn.LeakyReLU(negative_slope=0.2),
nn.Linear(128, 128), nn.LeakyReLU(negative_slope=0.2), nn.Linear(128, 2*past), nn.Tanh())
self.mlp_xy = NPE(self.latent_dims[1], self.latent_dims[0])
# BCELoss init
self.loss_bce = torch.nn.BCEWithLogitsLoss()
# Init weights
self.apply(weight_init)
def forward(self, gt_images, labels=None, it=0,
exp_dir='', gen=False, **kwargs):
self.preds = {}
if not hasattr(self, 'batch_size'):
self.batch_size = gt_images.size(0)
# Run main network
loss, gen_im = self.run_model(gt_images, gen,
validation=(kwargs['trainmode'] == 'val'))
# fill in the dict of predictions
self.preds['gen_images'] = gen_im
if gen:
self.preds['l_gen_eval'] = loss['l_gen_eval']
self.preds['l_style_gen_eval'] = loss['l_style_gen_eval']
if not gen or kwargs['trainmode'] == 'val':
self.preds['l_disc'] = loss['l_disc']
self.preds['l_gen'] = loss['l_gen']
self.preds['l_style_disc'] = loss['l_style_disc']
self.preds['l_style_gen'] = loss['l_style_gen']
if 'xy' in loss.keys():
self.preds['l_xy'] = loss['xy']
# finally get the optimization objective using self.loss_weights
self.preds['objective'] = self.get_objective(self.preds)
# Save images: one batch during training and all at eval time
if kwargs['trainmode'] == 'val':
os.makedirs(exp_dir+'/videos_val', exist_ok=True)
for i in range(gen_im.size(0)):
save_gif(exp_dir+'/videos_val/%06u.gif' %
(it*self.batch_size+i),
torch.clamp(gen_im[i] + 0.5, 0., 1.))
return self.preds.copy(), self.preds['objective']
def get_objective(self, preds):
losses_weighted = [preds[k] * float(w) for k, w in
self.loss_weights.items()
if k in preds and w != 0.]
if not hasattr(self, '_loss_weights_printed') or\
not self._loss_weights_printed:
print('-------\nloss_weights:')
for k, w in self.loss_weights.items():
print('%20s: %1.2e' % (k, w))
print('-------')
self._loss_weights_printed = True
loss = torch.stack(losses_weighted).sum()
return loss
def _get_param_groups(self):
return {'disc': [{'params': self.discriminator.parameters()}],
'gen': [{'params': self.generator.parameters()},
{'params': self.zbg},
{'params': self.zfg},
{'params': self.Gamma.parameters()},
{'params': self.mlp_xy.parameters()},
{'params': self.mlp_speed.parameters()}]}
def visualize(self, visdom_env_imgs, trainmode,
preds, stats, clear_env=False,
exp_dir=None, show_gt=True):
viz = get_visdom_connection(server=stats.visdom_server,
port=stats.visdom_port)
if not viz.check_connection():
print("no visdom server! -> skipping batch vis")
return
if clear_env: # clear visualisations
print(" ... clearing visdom environment")
viz.close(env=visdom_env_imgs, win=None)
idx_image = 0
title = "e%d_it%d_im%d" % (stats.epoch, stats.it[trainmode], idx_image)
if show_gt:
types = ('gen_images', 'gt_images')
else:
types = ('gen_images',)
for image_type in types:
for i in range(2):
image = torch.clamp(preds[image_type][:, - i], -.5, .5)
image = (image + .5)
image = torchvision.utils.make_grid(image, nrow=8).data.cpu().numpy()
viz.image(image, env=visdom_env_imgs,
opts={'title': title+"_%s_%d" % (image_type, - i)})
def run_model(self, x, gen=False, validation=False):
batch_size = x.size(0)
loss = {}
# Sample different variables
if not validation:
self.background_vec, self.appearance_vec = 2*torch.rand([batch_size, self.latent_dims[0]], device=self.device)-1,\
2*torch.rand([1, batch_size, self.n_objects*self.latent_dims[1]], device=self.device)-1
else:
self.background_vec, self.appearance_vec = 1.5*torch.rand([batch_size, self.latent_dims[0]], device=self.device)-0.75,\
2*torch.rand([1, batch_size, self.n_objects*self.latent_dims[1]], device=self.device)-1
self.tx = (2*self.x_max*torch.rand([self.n_objects*2, batch_size, 1], device=self.device)-self.x_max) / 8.
self.ty = (2*self.y_max*torch.rand([self.n_objects*2, batch_size, 1], device=self.device)-self.y_max) / 8.
if not self.fixed_objs:
# Sample n_objects - numbers of objects
n_obj = np.random.randint(0, self.n_objects, batch_size)
choice = [np.random.choice(self.n_objects, n_obj[i], replace=False)
for i in range(batch_size)]
select_object = torch.from_numpy(np.array([choice_exclude(self.n_objects, exc) for exc in choice])).long()
for i in range(batch_size):
for j in choice[i]:
self.tx[j, i, 0] = -2.
self.ty[j, i, 0] = -2.
else:
n_obj = self.n_objects
choice = [[] for i in range(batch_size)]
select_object = torch.from_numpy(np.array([choice_exclude(self.n_objects, exc) for exc in choice])).long()
self.appearance_vec = torch.ones_like(self.appearance_vec)
# Compute adjustment to position
xy = self.mlp_xy(torch.cat((self.appearance_vec.view(batch_size, self.n_objects, self.latent_dims[1]).permute(1,0,2),\
self.tx[:self.n_objects], self.ty[:self.n_objects], self.tx[:self.n_objects]*0, self.ty[:self.n_objects]*0),2), self.background_vec)
tx, ty = self.tx, self.ty
self.tx = 0.5*xy[:, :, :1] + tx[:self.n_objects]
self.ty = 0.5*xy[:, :, 1:] + ty[:self.n_objects]
self.tx = torch.clamp(self.tx.reshape(self.n_objects,batch_size,1).contiguous()[:,:,:],-.8,.8) - 1.*(self.tx.reshape(self.n_objects,batch_size,1).contiguous()[:,:,:] < -1.5).float()
self.ty = torch.clamp(self.ty.reshape(self.n_objects,batch_size,1).contiguous()[:,:,:],-.8,.8) - 1.*(self.tx.reshape(self.n_objects,batch_size,1).contiguous()[:,:,:] < -1.5).float()
self.appearance_vec = self.appearance_vec.view(batch_size, self.n_objects, self.latent_dims[1]).permute(1,0,2)
# Adjust velocity
for i in range(self.n_objects):
v_n = self.mlp_speed(torch.cat((self.appearance_vec[i], self.tx[i].detach(), self.ty[i].detach(), self.background_vec),1)) * (self.tx[i] > -1.5).float()
if i == 0:
self.v = v_n.unsqueeze(0)
else:
self.v = torch.cat((self.v, v_n[None]),0)
self._vx, self._vy = [self.v[:,:,i:i+1] for i in range(self.past)], [self.v[:,:,self.past+i:self.past+i+1] for i in range(self.past)]
tx_tmp = self.tx
ty_tmp = self.ty
self.appearance_vec = [self.appearance_vec for _ in range(self.past)]
len_s = self.len_ev if validation else 20
for i in range(len_s):
new_sv = self.Gamma(torch.cat((torch.cat(self.appearance_vec[-self.past+(self.past-1)*int(self.fixed_objs):],2), tx_tmp, ty_tmp, torch.cat(self._vx[-self.past:],2), torch.cat(self._vy[-self.past:],2)),2), self.background_vec)
self._vx.append(new_sv[:, :, :1])
self._vy.append(new_sv[:, :, 1:2])
tx_tmp = tx_tmp + self._vx[-1]
ty_tmp = ty_tmp + self._vy[-1]
if not self.fixed_objs:
self.appearance_vec.append(0*new_sv[:,:,2:]+self.appearance_vec[-1])
else:
self.appearance_vec.append(self.appearance_vec[-1])
self._vx = torch.stack(self._vx[self.past-1:],0)
self._vy = torch.stack(self._vy[self.past-1:],0)
self.appearance_vec = torch.stack(self.appearance_vec[self.past-1:],0)
self._vx[0], self._vy[0] = 0*self._vx[0], 0*self._vy[0]
self._vx = self._vx.cumsum(0)
self._vy = self._vy.cumsum(0)
# Randomly select starting point
select_start = torch.from_numpy(np.random.randint(0,20-self.seq_len-1, batch_size)).long().view(1,1,batch_size,1).repeat(1,self.n_objects,1,1)
if torch.cuda.is_available():
select_start = select_start.cuda()
if validation:
select_start = (select_start*0 + 1.).long()
range_p = self.seq_len if self.len_ev is None else self.len_ev
self._vx = torch.cat([torch.gather(self._vx, 0, select_start+i) for i in range(range_p)], 0)
self._vy = torch.cat([torch.gather(self._vy, 0, select_start+i) for i in range(range_p)], 0)
self.appearance_vec = torch.cat([torch.gather(self.appearance_vec, 0, select_start.repeat(1,1,1,self.latent_dims[1])+i) for i in range(range_p)],0)
# Run encoder first for each positions
gen_images, gen_vec = [], []
for i in range(range_p):
gen_images_t, gen_vec_t = self.generator(batch_size, [self.zbg, self.zfg], [self.background_vec, torch.cat([self.appearance_vec[i],0*(self.tx+self._vx[:i+1].sum(0)).detach(),0*(self.ty+self._vy[:i+1].sum(0)).detach()],2)],\
self.tx+self._vx[i], self.ty+self._vy[i], select=[select_object] if not i else None)
gen_images.append(gen_images_t)
gen_vec.append(gen_vec_t)
# In case we generate a super resolution
# if gen_images.shape[2]!=x.shape[2] or gen_images.shape[3]!=x.shape[3]:
# gen_images = Fu.interpolate(gen_images, size=(x.shape[2], x.shape[3]))
# Position predictor
# self.discriminator.dh4.hidden = self.discriminator.dh4.init_hidden(batch_size)
_, _, gen_xy, _ = self.discriminator(torch.cat((gen_vec[0],gen_vec[0].repeat(1,self.seq_len-1,1,1).detach()),1))
pos_tensor = torch.cat((self.tx.detach()[select_object,torch.arange(batch_size)]+self._vx[0][select_object,torch.arange(batch_size)],\
self.ty.detach()[select_object,torch.arange(batch_size)]+self._vy[0][select_object,torch.arange(batch_size)]),1).contiguous().view(-1,2)
loss['xy'] = torch.mean((gen_xy-pos_tensor)**2)
# Run discriminator on real/fake images
_, gen_logits, _, gen_style = self.discriminator(torch.cat((gen_images[:self.seq_len]),1))
if not gen or validation:
_, x_logits, _, disc_style = self.discriminator(torch.cat(([x[:,i] for i in range(self.seq_len)]), 1))
loss['l_disc'] = self.loss_bce(x_logits, torch.ones_like(x_logits))
loss['l_gen'] = self.loss_bce(gen_logits, torch.zeros_like(gen_logits))
loss['l_style_disc'] = sum([self.loss_bce(z, torch.ones_like(z)) for z in disc_style])
loss['l_style_gen'] = sum([self.loss_bce(z, torch.zeros_like(z)) for z in gen_style])
if gen or validation:
loss['l_gen_eval'] = self.loss_bce(gen_logits, torch.ones_like(gen_logits))
loss['l_style_gen_eval'] = sum([self.loss_bce(z, torch.ones_like(z)) for z in gen_style])
return loss, torch.stack(gen_images,0).permute(1,0,2,3,4)
class GAN_gen(nn.Module):
def __init__(self, latent_dims=[90,30], gf_dim=64, c_dim=3):
super(GAN_gen, self).__init__()
auto_init_args(self)
s_h, s_w = 64, 64
s_h2, s_w2 = 32, 32
s_h4, s_w4 = 16, 16
self.bg_generator = AdaIngen_bg(latent_dims[0], gf_dim,
f_dim=gf_dim*2, lrelu=True)
self.obj_generator = AdaIngen_obj(latent_dims[1], latent_dims[0], gf_dim,
f_dim=gf_dim*2, lrelu=True)
self.deconv1 = nn.Sequential(nn.ConvTranspose2d(4*self.gf_dim, 2*self.gf_dim, 4, stride=2, padding=1), nn.LeakyReLU(negative_slope=0.2))
self.deconv2 = nn.Sequential(nn.ConvTranspose2d(2*self.gf_dim, self.gf_dim, 4, stride=2, padding=1), nn.LeakyReLU(negative_slope=0.2))
self.deconv3 = nn.Sequential(nn.ConvTranspose2d(self.gf_dim, self.gf_dim, 3, stride=1, padding=1), nn.LeakyReLU(negative_slope=0.2))
self.deconv4 = nn.Sequential(nn.ConvTranspose2d(self.gf_dim, self.gf_dim, 4, stride=2, padding=1), nn.LeakyReLU(negative_slope=0.2))
self.deconv5 = nn.Sequential(nn.ConvTranspose2d(self.gf_dim, self.c_dim, 3, stride=1, padding=1))
self.upsample_net = nn.Sequential(*[getattr(self,'deconv%u'%i) for i in range(1,6)])
def forward(self, batch_size, const_tensor, z, t_x, t_y, select=[]):
n_objects = z[1].size(0)
if const_tensor[1].size(0) > 1:
comp_func = lambda x : torch.sum(x,dim=0)
else:
comp_func = lambda x : torch.max(x,dim=0)[0]
all_objects = []
for i in range(n_objects):
if const_tensor[1].size(0) > 1 :
all_objects.append(self.obj_generator(const_tensor[1][i:i+1], z[1][i], t_x[i], t_y[i]))
else:
all_objects.append(self.obj_generator(const_tensor[1], z[1][i], t_x[i], t_y[i], z0=z[0]))
h_BG = self.bg_generator(const_tensor[0], z[0])
all_objects.append(h_BG)
h2 = comp_func(torch.stack(all_objects, 0))
output = self.upsample_net(h2)
gen_vec = None
if select:
select_objs = torch.stack(all_objects[:-1], 0)[select[0], torch.arange(batch_size)]
h2 = comp_func(torch.stack([select_objs, all_objects[-1]], 0))
out = self.upsample_net(h2)
gen_vec = Fu.tanh(out)
return Fu.tanh(output), gen_vec
class GAN_disc(nn.Module):
def __init__(self, df_dim=64, pos_dim=90, first=5):
super(GAN_disc, self).__init__()
auto_init_args(self)
def spec_conv(in_channels, out_channels, k_size=5):
return spectral_norm(nn.Conv2d(in_channels, out_channels,
k_size, stride=2, padding=k_size//2))
self.disc_conv1 = nn.Conv2d(3*first,int(df_dim),5,stride=2,padding=5//2)
self.lrelu1 = nn.LeakyReLU(negative_slope=0.2)
self.disc_conv2 = spec_conv(int(df_dim), int(df_dim*2))
self.inorm1 = nn.Sequential(spectral_norm(nn.InstanceNorm2d(int(df_dim*2), affine=True)), nn.LeakyReLU(negative_slope=0.2))
self.disc_conv3 = spec_conv(int(df_dim*2), int(df_dim*4))
self.inorm2 = nn.Sequential(spectral_norm(nn.InstanceNorm2d(int(df_dim*4), affine=True)), nn.LeakyReLU(negative_slope=0.2))
self.disc_conv4 = spec_conv(int(df_dim*4), int(df_dim*8))
self.inorm3 = nn.Sequential(spectral_norm(nn.InstanceNorm2d(int(df_dim*8), affine=True)), nn.LeakyReLU(negative_slope=0.2))
self.disc_conv5 = spec_conv(int(df_dim*8), int(df_dim*16))
self.inorm4 = nn.Sequential(spectral_norm(nn.InstanceNorm2d(int(df_dim*16), affine=True)), nn.LeakyReLU(negative_slope=0.2))
# Get all linear regressors
self.class1 = nn.Linear(int(df_dim*4),1)
self.class2 = nn.Linear(int(df_dim*8),1)
self.class3 = nn.Linear(int(df_dim*16),1)
self.class4 = nn.Linear(int(df_dim*32),1)
self.dh4 = spectral_norm(nn.Linear(int(df_dim*16*16), 1))
self.enc = spectral_norm(nn.Linear(int(df_dim*16*16), self.pos_dim))
def forward(self, x):
out = self.lrelu1(self.disc_conv1(x))
out = self.disc_conv2(out)
l1 = self.class1(torch.cat([torch.mean(out,[2,3]),\
torch.var(out.view(out.size(0), out.size(1), -1) ,2)],1))
out = self.disc_conv3(self.inorm1(out))
l2 = self.class2(torch.cat([torch.mean(out,[2,3]), \
torch.var(out.view(out.size(0), out.size(1), -1) ,2)],1))
out = self.disc_conv4(self.inorm2(out))
l3 = self.class3(torch.cat([torch.mean(out,[2,3]), \
torch.var(out.view(out.size(0), out.size(1), -1) ,2)],1))
out = self.inorm3(out)
style_list = [l1, l2, l3]
out = self.disc_conv5(out)
l4 = self.class4(torch.cat([torch.mean(out,[2,3]), \
torch.var(out.view(out.size(0), out.size(1), -1) ,2)],1))
out = self.inorm4(out)
style_list.append(l4)
out = out.view(out.size(0), -1)
#Returning logits to determine whether the images are real or fake
h4 = self.dh4(out)
cont_vars = self.enc(out)
return Fu.sigmoid(h4), h4, Fu.tanh(cont_vars), style_list |
py | 7dfb2e58defa66549510155fba2dc791d2dfdfa6 | from MegaFace_Evaluation.tools.plot_megaface_result import plot_megaface_result
if __name__ == '__main__':
Evaluation_Results = ['/data/face_datasets/test_datasets/face_recognition/MegaFace/results/']
Margin = ['ArcFace']
probe = 'facescrub'
other_methods_dir = None
save_tpr_and_rank1_for_others = False
target_fpr = 1e-6
save_dir = './visualization_results'
plot_megaface_result(Evaluation_Results, Margin,
probe,
save_dir,
other_methods_dir,
save_tpr_and_rank1_for_others,
target_fpr = target_fpr
)
|
py | 7dfb302c6422acbae119bdd0cc18daa91291ae84 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import re
import pytest
from asv import util
from . import tools
def test_check(capsys, basic_conf):
tmpdir, local, conf, machine_file = basic_conf
# Test check runs (with full benchmark suite)
with pytest.raises(util.UserError, match="Benchmark suite check failed"):
tools.run_asv_with_conf(conf, 'check', "--python=same")
text, err = capsys.readouterr()
assert re.search(r"params_examples\.track_wrong_number_of_args: call: "
r"wrong number of arguments.*: expected 1, has 2", text)
assert text.count("wrong number of arguments") == 1
|
py | 7dfb3194cd23bd7b39e2a6da7348bab64466a397 | import nmap
#Defining an object for Scanning Port
nm = nmap.PortScanner()
#Scanning Port as provided in arguments
nm.scan('11.11.3.200-220','1-100')
print(nm.all_hosts())
|
py | 7dfb3345c2962558d27cf92a485b45dfdba3846d |
import json
import boto3
import time
import uuid
chime = boto3.client('chime')
def authorizeEIP (voiceConnectorId, elasticIP):
response = chime.put_voice_connector_origination(
VoiceConnectorId=voiceConnectorId,
Origination={
'Routes': [
{
'Host': elasticIP,
'Port': 5060,
'Protocol': 'UDP',
'Priority': 1,
'Weight': 1
},
],
'Disabled': False
}
)
print(response)
response = chime.put_voice_connector_termination(
VoiceConnectorId=voiceConnectorId,
Termination={
'CpsLimit': 1,
'CallingRegions': [
'US',
],
'CidrAllowedList': [
elasticIP + '/32',
],
'Disabled': False
}
)
print(response)
def getPhoneNumber ():
search_response = chime.search_available_phone_numbers(
# AreaCode='string',
# City='string',
# Country='string',
State='IL',
# TollFreePrefix='string',
MaxResults=1
)
phoneNumberToOrder = search_response['E164PhoneNumbers'][0]
print ('Phone Number: ' + phoneNumberToOrder)
phone_order = chime.create_phone_number_order(
ProductType='VoiceConnector',
E164PhoneNumbers=[
phoneNumberToOrder,
]
)
print ('Phone Order: ' + str(phone_order))
check_phone_order = chime.get_phone_number_order(
PhoneNumberOrderId=phone_order['PhoneNumberOrder']['PhoneNumberOrderId']
)
order_status = check_phone_order['PhoneNumberOrder']['Status']
timeout = 0
while not order_status == 'Successful':
timeout += 1
print('Checking status: ' + str(order_status))
time.sleep(5)
check_phone_order = chime.get_phone_number_order(
PhoneNumberOrderId=phone_order['PhoneNumberOrder']['PhoneNumberOrderId']
)
order_status = check_phone_order['PhoneNumberOrder']['Status']
if timeout == 5:
return 'Could not get phone number'
return phoneNumberToOrder
def createVoiceConnector (region, phoneNumber):
print(str(uuid.uuid1()))
print(region)
response = chime.create_voice_connector(
Name='Trunk' + str(uuid.uuid1()),
AwsRegion='us-east-1',
RequireEncryption=False
)
voiceConnectorId = response['VoiceConnector']['VoiceConnectorId']
outboundHostName = response['VoiceConnector']['OutboundHostName']
response = chime.associate_phone_numbers_with_voice_connector(
VoiceConnectorId=voiceConnectorId,
E164PhoneNumbers=[
phoneNumber,
],
ForceAssociate=True
)
voiceConnector = { 'voiceConnectorId': voiceConnectorId, 'outboundHostName': outboundHostName, 'phoneNumber': phoneNumber}
return voiceConnector
def on_event(event, context):
print(event)
request_type = event['RequestType']
if request_type == 'Create': return on_create(event)
if request_type == 'Update': return on_update(event)
if request_type == 'Delete': return on_delete(event)
raise Exception("Invalid request type: %s" % request_type)
def on_create(event):
physical_id = 'VoiceConnectorResources'
region = event['ResourceProperties']['region']
elasticIP = event['ResourceProperties']['eip']
newPhoneNumber = getPhoneNumber()
voiceConnector = createVoiceConnector(region, newPhoneNumber)
authorizeEIP(voiceConnector['voiceConnectorId'], elasticIP)
return { 'PhysicalResourceId': physical_id, 'Data': voiceConnector }
def on_update(event):
physical_id = event["PhysicalResourceId"]
props = event["ResourceProperties"]
print("update resource %s with props %s" % (physical_id, props))
return { 'PhysicalResourceId': physical_id }
def on_delete(event):
physical_id = event["PhysicalResourceId"]
print("delete resource %s" % physical_id)
return { 'PhysicalResourceId': physical_id }
|
py | 7dfb33677d6c0cb2a195112031d39f008c2a445b | from tabulate import tabulate
class Profile:
def __init__(
self,
name: str,
combatlevel: int,
logged_in: bool,
rank: int,
melee_total: int,
magic_total: int,
ranged_total: int,
totalskill: int,
totalxp: int,
questsstarted: int,
questscomplete: int,
questsnotstarted: int,
activities: list,
attack: dict,
defence: dict,
strength: dict,
constitution: dict,
ranged: dict,
prayer: dict,
magic: dict,
cooking: dict,
woodcutting: dict,
fletching: dict,
fishing: dict,
firemaking: dict,
crafting: dict,
smithing: dict,
mining: dict,
herblore: dict,
agility: dict,
thieving: dict,
slayer: dict,
farming: dict,
runecrafting: dict,
hunter: dict,
construction: dict,
summoning: dict,
dungeoneering: dict,
divination: dict,
invention: dict,
archaeology: dict,
):
super().__init__()
self.name = name
self.combatlevel = combatlevel
self.logged_in = logged_in
self.rank = rank
self.melee_total = melee_total
self.magic_total = magic_total
self.ranged_total = ranged_total
self.totalskill = totalskill
self.totalxp = totalxp
self.questsstarted = questsstarted
self.questscomplete = questscomplete
self.questsnotstarted = questsnotstarted
self.activities = activities
self.attack = attack
self.defence = defence
self.strength = strength
self.constitution = constitution
self.ranged = ranged
self.prayer = prayer
self.magic = magic
self.cooking = cooking
self.woodcutting = woodcutting
self.fletching = fletching
self.fishing = fishing
self.firemaking = firemaking
self.crafting = crafting
self.smithing = smithing
self.mining = mining
self.herblore = herblore
self.agility = agility
self.thieving = thieving
self.slayer = slayer
self.farming = farming
self.runecrafting = runecrafting
self.hunter = hunter
self.construction = construction
self.summoning = summoning
self.dungeoneering = dungeoneering
self.divination = divination
self.invention = invention
self.archaeology = archaeology
def to_json(self) -> dict:
return {
"name": self.name,
"combatlevel": self.combatlevel,
"logged_in": self.logged_in,
"rank": self.rank,
"melee_total": self.melee_total,
"magic_total": self.magic_total,
"ranged_total": self.ranged_total,
"totalskill": self.totalskill,
"totalxp": self.totalxp,
"questsstarted": self.questsstarted,
"questscomplete": self.questscomplete,
"questsnotstarted": self.questsnotstarted,
"activities": self.activities,
"attack": self.attack,
"defence": self.defence,
"strength": self.strength,
"constitution": self.constitution,
"ranged": self.ranged,
"prayer": self.prayer,
"magic": self.magic,
"cooking": self.cooking,
"woodcutting": self.woodcutting,
"fletching": self.fletching,
"fishing": self.fishing,
"firemaking": self.firemaking,
"crafting": self.crafting,
"smithing": self.smithing,
"mining": self.mining,
"herblore": self.herblore,
"agility": self.agility,
"thieving": self.thieving,
"slayer": self.slayer,
"farming": self.farming,
"runecrafting": self.runecrafting,
"hunter": self.hunter,
"construction": self.construction,
"summoning": self.summoning,
"dungeoneering": self.dungeoneering,
"divination": self.divination,
"invention": self.invention,
"archaeology": self.archaeology,
}
@classmethod
async def from_json(cls, data: dict):
skill_id_conversion = {
"0": "Attack",
"1": "Defence",
"2": "Strength",
"3": "Constitution",
"4": "Ranged",
"5": "Prayer",
"6": "Magic",
"7": "Cooking",
"8": "Woodcutting",
"9": "Fletching",
"10": "Fishing",
"11": "Firemaking",
"12": "Crafting",
"13": "Smithing",
"14": "Mining",
"15": "Herblore",
"16": "Agility",
"17": "Thieving",
"18": "Slayer",
"19": "Farming",
"20": "Runecrafting",
"21": "Hunter",
"22": "Construction",
"23": "Summoning",
"24": "Dungeoneering",
"25": "Divination",
"26": "Invention",
"27": "Archaeology"
}
def get_skill(skill_id):
for skill in data["skillvalues"]:
if skill["id"] == skill_id:
skill["name"] = skill_id_conversion[str(skill_id)]
return skill
logged_in = True if data["loggedIn"] == "true" else False
return cls(
data["name"],
data["combatlevel"],
logged_in,
data["rank"],
data["melee"],
data["magic"],
data["ranged"],
data["totalskill"],
data["totalxp"],
data["questsstarted"],
data["questscomplete"],
data["questsnotstarted"],
data["activities"],
get_skill(0),
get_skill(1),
get_skill(2),
get_skill(3),
get_skill(4),
get_skill(5),
get_skill(6),
get_skill(7),
get_skill(8),
get_skill(9),
get_skill(10),
get_skill(11),
get_skill(12),
get_skill(13),
get_skill(14),
get_skill(15),
get_skill(16),
get_skill(17),
get_skill(18),
get_skill(19),
get_skill(20),
get_skill(21),
get_skill(22),
get_skill(23),
get_skill(24),
get_skill(25),
get_skill(26),
get_skill(27),
)
@classmethod
async def from_text(cls, data: str):
order = [
"Overall",
"Attack",
"Defence",
"Strength",
"Hitpoints",
"Ranged",
"Prayer",
"Magic",
"Cooking",
"Woodcutting",
"Fletching",
"Fishing",
"Firemaking",
"Crafting",
"Smithing",
"Mining",
"Herblore",
"Agility",
"Thieving",
"Slayer",
"Farming",
"Runecrafting",
"Hunter",
"Construction",
"Bounty Hunter - Hunter",
"Bounty Hunter - Rogues",
"Clue Scrolls All",
"Clue Scrolls Easy",
"Clue Scrolls Medium",
"Clue Scrolls Hard",
"Clue Scrolls Elite",
"Clue Scrolls Master",
"LMS Rank"
]
skills_list = []
for line in enumerate(data.decode().split("\n")):
try:
xp = line[1].split(",")[2]
rank = line[1].split(",")[0]
level = line[1].split(",")[1]
skills_list.append([order[line[0]], level, xp, rank])
except Exception:
pass
return tabulate(
skills_list, headers=["Skill", "Level", "Experience", "Rank"], tablefmt="orgtbl"
)
|
py | 7dfb336dcf7ac051ec86f0314bb714bc9cc914d1 | import ipywidgets as w
from ipyfilechooser import FileChooser
from IPython.display import display
from eln2nwb import labfolder as eln
from eln2nwb import eln2widget
from eln2nwb import convert2nwb
from nwbwidgets import nwb2widget
from pynwb import NWBHDF5IO
import os
INITIAL_PARAMS = {'project_options': ['AG Tovote - States', 'AG Ip - Deep brain stimulation']}
class GUI:
def __init__(self):
self.launch_converter = Launch_converter(INITIAL_PARAMS['project_options'])
self.out = w.Output()
self.widget = w.VBox([self.launch_converter.widget,
self.out])
self.launch_converter.button.on_click(self.on_launch_converter_button_clicked)
def on_launch_converter_button_clicked(self, b):
if self.launch_converter.dropdown.value == 'AG Tovote - States':
with self.out:
self.converter = Convert_states(self.out, self.widget)
self.widget.children = [self.converter.widget,
self.out]
elif self.launch_converter.dropdown.value == 'AG Ip - Deep brain stimulation':
with self.out:
print('Coming soon!')
class Launch_converter:
def __init__(self, options):
self.dropdown = w.Dropdown(description='In order to launch the correct NWB converter, please select your project:',
options=options,
value=options[0],
layout={'width': '80%'},
style={'description_width': 'initial'})
self.button = w.Button(description='Launch converter', icon='rocket')
self.widget = w.HBox([self.dropdown, self.button])
class Convert_states:
def __init__(self, parent_out, parent_widget):
self.params = {}
self.hspace = w.Label(value='', layout={'width': '10px'})
self.vspace = w.Label(value='', layout={'height': '3px'})
self.intro = w.Label(value='First, please provide the IDs of the ELN entries where you documented the respective surgeries:')
self.set_injection_eln_entry_id = w.Text(description='ID of injection ELN entry:',
placeholder='1234567',
layout={'width': '40%', 'height': '50px'},
style={'description_width': 'initial'})
self.set_implantation_eln_entry_id = w.Text(description='ID of implantation ELN entry',
placeholder='1234567',
layout={'width': '40%', 'height': '50px'},
style={'description_width': 'initial'})
self.button_retrieve_eln_data = w.Button(description='Confirm', icon='check')
self.out_injection = w.Output(layout={'width': '40%'})
self.out_implantation = w.Output(layout={'width': '40%'})
self.sessions_accordion = w.Accordion(children=[],
layout={'width': '90%',
'visibility': 'hidden'})
self.sessions_accordion.children = [States_session(self.sessions_accordion, 0).widget]
self.sessions_accordion.set_title(0, 'session 1')
self.vspace = w.Label(value='', layout={'width': '90%', 'height': '20px'})
self.button_initialize_conversion = w.Button(description='Initialize conversion', icon='rocket',
style={'description_width': 'initial',
'button_color': 'orange',
'fontcolor': 'black'},
layout={'width': '90%',
'visibility': 'hidden'})
self.parent_out = parent_out
self.widget = w.VBox([self.intro,
self.vspace,
w.HBox([self.set_injection_eln_entry_id,
self.hspace,
self.set_implantation_eln_entry_id,
self.button_retrieve_eln_data],
layout={'width': '90%'}),
w.HBox([self.out_injection, self.hspace, self.out_implantation], layout={'width': '90%'}),
self.vspace,
self.sessions_accordion,
self.vspace,
self.button_initialize_conversion])
self.button_initialize_conversion.on_click(self.on_button_initialize_conversion_clicked)
self.button_retrieve_eln_data.on_click(self.on_button_retrieve_eln_data_clicked)
def on_button_retrieve_eln_data_clicked(self, b):
self.get_login_credentials()
self.params['injection'] = {'eln_entry_id': self.set_injection_eln_entry_id.value}
self.params['implantation'] = {'eln_entry_id': self.set_implantation_eln_entry_id.value}
self.params = eln2widget.States(self.params).get_metadata_injection()
self.params = eln2widget.States(self.params).get_metadata_implantation()
# Call functions from labfolder bindings to retrieve the information
with self.out_injection:
self.out_injection.clear_output()
print('LabFolder entry found:')
print('--> Procedure: ', self.params['injection']['procedure'])
print('--> Animal ID: ', self.params['injection']['mouse_id'])
print('--> Viral construct: ', self.params['injection']['viral_construct'])
print('--> Date: ', self.params['injection']['date'])
print('--> Experimenter: ', self.params['injection']['experimenter'])
with self.out_implantation:
self.out_implantation.clear_output()
print('LabFolder entry found:')
print('--> Procedure: ', self.params['implantation']['procedure'])
print('--> Animal ID: ', self.params['implantation']['mouse_id'])
print('--> Target region: ', self.params['implantation']['target_region'])
print('--> Date: ', self.params['implantation']['date'])
print('--> Experimenter: ', self.params['implantation']['experimenter'])
self.sessions_accordion.layout.visibility = 'visible'
self.button_initialize_conversion.layout.visibility = 'visible'
def on_button_initialize_conversion_clicked(self, b):
self.params['file_dir'] = self.sessions_accordion.children[0].children[2].value
self.params['session_description'] = self.sessions_accordion.children[0].children[0].children[0].value
if self.params['session_description'] == 'open field':
self.params['session_id'] = 'OF'
elif self.params['session_description'] == 'elevated plus maze':
self.params['session_id'] = 'EPM'
elif self.params['session_description'] == 'conditioning day 1':
self.params['session_id'] = 'CD1'
elif self.params['session_description'] == 'conditioning day 2':
self.params['session_id'] = 'CD2'
with self.parent_out:
print('Conversion initialized! This might take some moments... ')
self.params['nwbfile'] = convert2nwb.convert_states(self.params)
with self.parent_out:
self.parent_out.clear_output()
self.inspect = Inspect(self.params)
self.widget.children = [self.inspect.widget]
def get_login_credentials(self):
with open('ELN_login.txt', 'r') as f:
lines = f.readlines()
for line in lines:
if lines.index(line) == len(lines)-1: #if its the last line
correct_for_newline = len(line)
else:
correct_for_newline = -1
if line.startswith('username'):
self.params['username'] = line[line.index(' ')+1:correct_for_newline]
if line.startswith('password'):
self.params['password'] = line[line.index(' ')+1:correct_for_newline]
class States_session:
def __init__(self, parent, session_id):
self.parent = parent
self.session_id = session_id
self.dropdown = w.Dropdown(options=['open field', 'elevated plus maze', 'conditioning day 1', 'conditioning day 2'],
description='Please specify the session type:',
layout={'width': '75%'},
style={'description_width': 'initial'})
self.checkbox = w.Checkbox(description='Create ELN entry', value=False)
self.describe_selection = w.Label(value='Please select the directory in which the recorded data can be found:')
self.select_directory = FileChooser('/home/ds/')
self.select_directory.show_only_dirs = True
self.button_add_more = w.Button(description='Add another session', icon='plus',
style={'description_width': 'initial'},
layout={'width': 'initial'})
self.button_delete_session = w.Button(description='Delete this session', icon='warning',
style={'description_width': 'initial'},
layout={'width': 'initial'})
self.vspace = w.Label(value='', layout={'hight': '30px'})
self.hspace = w.Label(value='', layout={'width': '10px'})
self.out = w.Output()
self.widget = w.VBox([w.HBox([self.dropdown, self.checkbox]),
self.describe_selection,
self.select_directory,
self.vspace,
w.HBox([self.button_add_more, self.hspace, self.button_delete_session, self.out])])
self.button_add_more.on_click(self.on_button_add_more_clicked)
self.button_delete_session.on_click(self.on_button_delete_session_clicked)
def on_button_add_more_clicked(self, b):
with self.out:
self.out.clear_output()
self.parent.children = self.parent.children + (States_session(self.parent, len(self.parent.children)).widget, )
self.parent.set_title(len(self.parent.children)-1, 'session {}'.format(str(len(self.parent.children))))
def on_button_delete_session_clicked(self, b):
l_children = list(self.parent.children)
if len(l_children) > 1:
del l_children[self.session_id]
self.parent.children = tuple(l_children)
else:
with self.out:
print('This is the last session. Please add another session first!')
class Inspect:
def __init__(self, params):
self.params = params
self.intro = w.Label(value='NWB conversion was successfull!! Please use this last step to insepct the created files carefully! Once you´re done, don´t forget to save them :-)',
layout={'width': '90%'})
self.select_nwb_file = w.Dropdown(options=[self.params['session_description']],
value=self.params['session_description'],
description='Please select for which session you would like to inspect the NWB file:',
style={'description_width': 'initial'},
layout={'width': '75%'})
self.button_inspect_nwb_file = w.Button(description='Inspect', icon='search')
self.button_save_nwb_file = w.Button(description='Save', icon='save')
self.vspace = w.Label(value=' ', layout={'heigth': '20px'})
self.widget = w.VBox([self.intro,
self.vspace,
w.HBox([self.select_nwb_file, self.button_inspect_nwb_file, self.button_save_nwb_file], layout={'width': '90%'})])
self.button_inspect_nwb_file.on_click(self.button_inspect_nwb_file_clicked)
self.button_save_nwb_file.on_click(self.button_save_nwb_file_clicked)
def button_inspect_nwb_file_clicked(self, b):
self.widget.children = [self.intro,
self.vspace,
w.HBox([self.select_nwb_file, self.button_inspect_nwb_file, self.button_save_nwb_file], layout={'width': '90%'}),
self.vspace,
nwb2widget(self.params['nwbfile'])]
def button_save_nwb_file_clicked(self, b):
filepath = '{}/{}_{}.nwb'.format(os.getcwd(), self.params['injection']['mouse_id'], self.params['session_id'])
with NWBHDF5IO(filepath, 'w') as io:
io.write(self.params['nwbfile'])
self.widget.children = [self.intro,
self.vspace,
w.HBox([self.select_nwb_file, self.button_inspect_nwb_file, self.button_save_nwb_file], layout={'width': '90%'}),
self.vspace,
w.Label(value='Your NWB file was successfully saved!')]
def launch():
display(GUI().widget) |
py | 7dfb3394208f8788168b00694229c70efd516640 | # process local states
local = range(7)
# L states
L = {"x0" : [0], "x1" : [1], "x2" : [2], "cr0" : [3], "cr1" : [4], "cr2" : [5], "v0" : [0, 3], "v1" : [1, 4], "v2" : [2, 5]}
# receive variables
rcv_vars = ["nr0", "nr1", "nr2"]
# initial states
initial = [0, 1, 2, 3, 4, 5]
# rules
rules = []
rules.append({'idx': 0, 'from': 0, 'to': 0, 'guard': "true"})
rules.append({'idx': 1, 'from': 1, 'to': 0, 'guard': "(>= nr0 1)"})
rules.append({'idx': 2, 'from': 1, 'to': 1, 'guard': "(< nr0 1)"})
rules.append({'idx': 3, 'from': 2, 'to': 0, 'guard': "(>= nr0 1)"})
rules.append({'idx': 4, 'from': 2, 'to': 1, 'guard': "(and (>= nr1 1) (< nr0 1))"})
rules.append({'idx': 5, 'from': 2, 'to': 2, 'guard': "(< (+ nr0 nr1) 1)"})
rules.append({'idx': 6, 'from': 0, 'to': 3, 'guard': "true"})
rules.append({'idx': 7, 'from': 1, 'to': 3, 'guard': "(>= nr0 1)"})
rules.append({'idx': 8, 'from': 1, 'to': 4, 'guard': "(< nr0 1)"})
rules.append({'idx': 9, 'from': 2, 'to': 3, 'guard': "(>= nr0 1)"})
rules.append({'idx': 10, 'from': 2, 'to': 4, 'guard': "(and (>= (+ nr1 cr1) 1) (< nr0 1))"})
rules.append({'idx': 11, 'from': 2, 'to': 5, 'guard': "(< (+ nr0 nr1) 1)"})
rules.append({'idx': 12, 'from': 3, 'to': 6, 'guard': "true"})
rules.append({'idx': 13, 'from': 4, 'to': 6, 'guard': "true"})
rules.append({'idx': 14, 'from': 5, 'to': 6, 'guard': "true"})
rules.append({'idx': 15, 'from': 6, 'to': 6, 'guard': "true"})
# parameters, resilience condition
params = ["n", "t", "f"]
active = "n"
rc = ["(> n 0)", "(>= t 0)", "(>= t f)", "(> n t)"]
# faults
faults = "crash"
faulty = [3, 4, 5, 6]
crashed = [3, 4, 5]
max_faulty = "f"
phase = 1
# configuration/transition constraints
constraints = []
constraints.append({'type': 'configuration', 'sum': 'eq', 'object': local, 'result': active})
constraints.append({'type': 'configuration', 'sum': 'le', 'object': faulty, 'result': max_faulty})
constraints.append({'type': 'transition', 'sum': 'eq', 'object': range(len(rules)), 'result': active})
constraints.append({'type': 'round_config', 'sum': 'le', 'object': crashed, 'result': 1})
# receive environment constraints
environment = []
environment.append('(>= nr0 x0)')
environment.append('(<= nr0 (+ x0 cr0))')
environment.append('(>= nr1 x1)')
environment.append('(<= nr1 (+ x1 cr1))')
environment.append('(>= nr2 x2)')
environment.append('(<= nr2 (+ x2 cr2))')
# properties
properties = []
properties.append({'name':'validity0', 'spec':'safety', 'initial':'(= v0 0)', 'qf':'last', 'reachable':'(> x0 0)'})
properties.append({'name':'validity1', 'spec':'safety', 'initial':'(= v1 0)', 'qf':'last', 'reachable':'(> x1 0)'})
properties.append({'name':'validity2', 'spec':'safety', 'initial':'(= v2 0)', 'qf':'last', 'reachable':'(> x2 0)'})
properties.append({'name':'agreement', 'spec':'safety', 'initial':'true', 'qf':'last', 'reachable':'(and (> x0 0) (> x1 0) (> x2 0))'})
|
py | 7dfb34f48cc913b2f9341d88c7514a06cdb83149 | import csv
import json
import os
def main():
out = []
outtest = []
outval = []
with open('../data/quora_duplicate_questions.tsv','rb') as tsvin:
tsvin = csv.reader(tsvin, delimiter='\t')#read the tsv file of quora question pairs
count0 = 1
count1 = 1
counter = 1
for row in tsvin:
counter = counter+1
if row[5]=='0' and row[4][-1:]=='?':#the 6th entry in every row has value 0 or 1 and it represents paraphrases if that value is 1
count0=count0+1
elif row[5]=='1' and row[4][-1:]=='?':
count1=count1+1
if count1>1 and count1<100002:#taking the starting 1 lakh pairs as train set. Change this to 50002 for taking staring 50 k examples as train set
# get the question and unique id from the tsv file
quesid = row[1] #first question id
ques = row[3] #first question
img_id = row[0] #unique id for every pair
ques1 = row[4]#paraphrase question
quesid1 =row[2]#paraphrase question id
# set the parameters of json file for writing
jimg = {}
jimg['question'] = ques
jimg['question1'] = ques1
jimg['ques_id'] = quesid
jimg['ques_id1'] = quesid1
jimg['id'] = img_id
out.append(jimg)
elif count1>100001 and count1<130002:#next 30k as the test set acc to https://arxiv.org/pdf/1711.00279.pdf
quesid = row[1]
ques = row[3]
img_id = row[0]
ques1 = row[4]
quesid1 =row[2]
jimg = {}
jimg['question'] = ques
jimg['question1'] = ques1
jimg['ques_id'] = quesid
jimg['ques_id1'] = quesid1
jimg['id'] = img_id
outtest.append(jimg)
else :#rest as val
quesid = row[1]
ques = row[3]
img_id = row[0]
ques1 = row[4]
quesid1 =row[2]
jimg = {}
jimg['question'] = ques
jimg['question1'] = ques1
jimg['ques_id'] = quesid
jimg['ques_id1'] = quesid1
jimg['id'] = img_id
outval.append(jimg)
#write the json files for train test and val
print(len(out))
json.dump(out, open('../data/quora_raw_train.json', 'w'))
print(len(outtest))
json.dump(outtest, open('../data/quora_raw_test.json', 'w'))
print(len(outval))
json.dump(outval, open('../data/quora_raw_val.json', 'w'))
if __name__ == "__main__":
main()
|
py | 7dfb351052eaea47943bfc3f5024f7d77a2c39d4 | import os
class Config(object):
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DEBUG = False
TESTING = False
SECRET_KEY = 'This is an INSECURE secret!! DO NOT use this in production!!'
SESSION_COOKIE_SECURE = True
BOOTSTRAP_BOOTSWATCH_THEME = 'Morph'
DB_DIR = os.getenv('DB_DIR', 'database')
SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join(BASE_DIR, '..', DB_DIR, "db2.sqlite")
SQLALCHEMY_TRACK_MODIFICATIONS = False
UPLOAD_FOLDER = os.getenv('UPLOAD_FOLDER', os.path.join(BASE_DIR, '..', 'uploads'))
GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY', 'NOKEY')
LOG_DIR = os.path.join(BASE_DIR, '../logs')
class ProductionConfig(Config):
pass
class DevelopmentConfig(Config):
DEBUG = True
SESSION_COOKIE_SECURE = False
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = "sqlite:///:memory:"
SESSION_COOKIE_SECURE = False
DEBUG = True
|
py | 7dfb35d1fec5fa4cd74d664ded6fe591bf2e1724 | a,b=input().split()
r=50
for i in range(len(b)-len(a)+1):
t=0
for j in range(len(a)):
if a[j]!=b[i+j]: t+=1
r=min(r,t)
print(r) |
py | 7dfb361fcbe3b35ae9cdfbe95066ac8430ff3921 | import sys
from math import floor
from pathlib import Path
import numpy as np
if __package__:
import importlib
raw = importlib.import_module(f"{__package__}.realsr_ncnn_vulkan_wrapper")
else:
import realsr_ncnn_vulkan_wrapper as raw
class SR:
def __init__(
self,
gpuid=0,
model="models-DF2K",
tta_mode=False,
scale: float = 2,
tilesize=0,
param_path="test.param",
bin_path="test.bin",
):
"""
RealSR class which can do image super resolution.
:param gpuid: the id of the gpu device to use.
:param model: the name or the path to the model
:param tta_mode: whether to enable tta mode or not
:param scale: scale ratio. value: float. default: 2
:param tilesize: tile size. 0 for automatically setting the size. default: 0
"""
self._raw_realsr = raw.RealSRWrapped(gpuid, tta_mode)
self.model = model
self.gpuid = gpuid
self.scale = scale # the real scale ratio
self.set_params(scale, tilesize)
self.load(param_path, bin_path)
def set_params(self, scale=4., tilesize=0):
"""
set parameters for realsr object
:param scale: 1/2. default: 2
:param tilesize: default: 0
:return: None
"""
self._raw_realsr.scale = scale # control the real scale ratio at each raw process function call
self._raw_realsr.tilesize = self.get_tilesize() if tilesize <= 0 else tilesize
self._raw_realsr.prepadding = self.get_prepadding()
def load(self, parampath: str = "", modelpath: str = "") -> None:
"""
Load models from given paths. Use self.model if one or all of the parameters are not given.
:param parampath: the path to model params. usually ended with ".param"
:param modelpath: the path to model bin. usually ended with ".bin"
:return: None
"""
# cant delete this, otherwise it wont work
if not parampath or not modelpath:
model_dir = Path(self.model)
if not model_dir.is_absolute():
if (
not model_dir.is_dir()
): # try to load it from module path if not exists as directory
dir_path = Path(__file__).parent
model_dir = dir_path.joinpath("models", self.model)
if self._raw_realsr.scale == 4:
parampath = model_dir.joinpath("x4.param")
modelpath = model_dir.joinpath("x4.bin")
if Path(parampath).exists() and Path(modelpath).exists():
parampath_str, modelpath_str = raw.StringType(), raw.StringType()
if sys.platform in ("win32", "cygwin"):
parampath_str.wstr = raw.new_wstr_p()
raw.wstr_p_assign(parampath_str.wstr, str(parampath))
modelpath_str.wstr = raw.new_wstr_p()
raw.wstr_p_assign(modelpath_str.wstr, str(modelpath))
else:
parampath_str.str = raw.new_str_p()
raw.str_p_assign(parampath_str.str, str(parampath))
modelpath_str.str = raw.new_str_p()
raw.str_p_assign(modelpath_str.str, str(modelpath))
self._raw_realsr.load(parampath_str, modelpath_str)
else:
raise FileNotFoundError(f"{parampath} or {modelpath} not found")
def process(self, im):
if self.scale > 1:
cur_scale = 1
self.w = im.shape[1]
self.h = im.shape[0]
im = self._process(im)
return im
def _process(self, im):
"""
Call RealSR.process() once for the given image
"""
in_bytes = bytearray(np.array(im).tobytes(order='C'))
channels = int(len(in_bytes) / (self.w * self.h))
out_bytes = bytearray((self._raw_realsr.scale ** 2) * len(in_bytes))
raw_in_image = raw.Image(in_bytes, self.w, self.h, channels)
raw_out_image = raw.Image(
out_bytes,
self._raw_realsr.scale * self.w,
self._raw_realsr.scale * self.h,
channels,
)
self._raw_realsr.process(raw_in_image, raw_out_image)
out_numpy = np.frombuffer(bytes(out_bytes), dtype=np.uint8)
out_numpy = np.reshape(
out_numpy, (self._raw_realsr.scale * self.h, self._raw_realsr.scale * self.w, 3))
return out_numpy
def get_prepadding(self) -> int:
if self.model.find("models-DF2K") or self.model.find("models-DF2K_JPEG"):
return 10
else:
raise NotImplementedError(f'model "{self.model}" is not supported')
def get_tilesize(self):
heap_budget = raw.get_heap_budget(self.gpuid)
if self.model.find("models-DF2K") or self.model.find("models-DF2K_JPEG"):
if heap_budget > 1900:
return 200
elif heap_budget > 550:
return 100
elif heap_budget > 190:
return 64
else:
return 32
else:
raise NotImplementedError(f'model "{self.model}" is not supported')
|
py | 7dfb361feb58ae25a2b0128713c2d79d7ccb2652 | from terra_sdk.core.feegrant import BasicAllowance, PeriodicAllowance, AllowedMsgAllowance
from .base import create_demux, create_demux_proto
msgs = [BasicAllowance, PeriodicAllowance, AllowedMsgAllowance]
parse_feegrant = create_demux(msgs)
parse_feegrant_proto = create_demux_proto(msgs)
|
py | 7dfb3771a8bbccbca1e4f2119103aca3831fc67f | from .. import db
class MonitoringPhoto(db.Model):
""" Monitoring Photo Model for storing feedback photo related details """
__tablename__ = "monitoring_photo"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
buildings_id = db.Column(db.Integer, db.ForeignKey('buildings.id', ondelete='CASCADE'), nullable=False)
buildings = db.relationship('Buildings', backref=db.backref('buildings_photo', lazy='dynamic'))
timestamp = db.Column(db.DateTime, nullable=False)
photo_file = db.Column(db.String(100), nullable=False)
contributor = db.Column(db.String(50), nullable=False)
verified_on = db.Column(db.DateTime, nullable=True)
validator = db.Column(db.String(50), nullable=True)
reason = db.Column(db.String(50), nullable=True)
status_id = db.Column(db.Integer, db.ForeignKey('status.id', ondelete='CASCADE'), nullable=False)
status = db.relationship('Status', backref=db.backref('status_photo', lazy='dynamic'))
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns if c.name != 'user_id'} |
py | 7dfb37726a7cc089dce181aa16742b0bc5d8cbcb | from sql_alchemy import banco
class HotelModel(banco.Model):
__tablename__ = 'hoteis'
hotel_id = banco.Column(banco.String, primary_key=True)
nome = banco.Column(banco.String(80))
estrelas = banco.Column(banco.Float(precision=1))
diaria = banco.Column(banco.Float(precision=2))
cidade = banco.Column(banco.String(40))
def __init__(self, hotel_id, nome, estrelas, diaria, cidade):
self.hotel_id = hotel_id
self.nome = nome
self.estrelas = estrelas
self.diaria = diaria
self.cidade = cidade
def json(self):
return {
'hotel_id': self.hotel_id,
'nome': self.nome,
'estrelas': self.estrelas,
'diaria': self.diaria,
'cidade': self.cidade
}
@classmethod
def find_hotel(cls, hotel_id):
hotel = cls.query.filter_by(hotel_id=hotel_id).first() # SELECT * FROM hoteis where hotel_id = $hotel_id
if hotel:
return hotel
return None
def save_hotel(self):
banco.session.add(self)
banco.session.commit()
def update_hotel(self, nome, estrelas, diaria, cidade):
self.nome = nome
self.estrelas = estrelas
self.diaria = diaria
self.cidade = cidade
def delete_hotel(self):
banco.session.delete(self)
banco.session.commit() |
py | 7dfb379001e7935f0b56fb2ea1d3558ebd63ee31 | from django.shortcuts import render
def test_view(request):
return render(request, "test.html")
|
py | 7dfb3808776445a346343e29d632406cc94dedd0 | """ Parts of the U-Net model """
import torch
import torch.nn as nn
import torch.nn.functional as F
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=0),
nn.BatchNorm2d(mid_channels),
nn.LeakyReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=0),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.up = nn.ConvTranspose2d(in_channels , in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffX = x2.size()[2] - x1.size()[2]
diffY = x2.size()[3] - x1.size()[3]
x2 = x2[:,:, diffX//2 : x2.size()[2]-diffX//2, diffY//2 : x2.size()[3]-diffY//2]
#x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
# diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x) |
py | 7dfb3939ade82d3cbf677b6427c3f696c502f331 | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from django.contrib.auth.models import BaseUserManager
from django.conf import settings
class UserProfileManager(BaseUserManager):
"""User profile manager"""
def create_user(self,email,name,password=None):
"""Create a new user profile"""
if not email:
raise ValueError('User must have email address')
email = self.normalize_email(email)
user = self.model(email=email,name=name)
user.set_password(password) #Hashing password
user.save(using=self._db)
return user
def create_superuser(self,email,name,password):
user = self.create_user(email,name,password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
'''DB models for users'''
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def get_full_name(self):
"""Retreive fullname"""
return self.name
def get_short_name(self):
"""Retreive short name"""
return self.name
def __str__(self):
return self.email
|
py | 7dfb3a9f913c80718bcd07def9eca6cddec9d0b2 | """"""
TYPE_CPP2PY = {
"int": "int",
"char": "char",
"double": "double",
"short": "int",
}
class DataTypeGenerator:
"""DataType生成器"""
count = 0
count_ = 0
def __init__(self, filename: str, prefix: str):
"""Constructor"""
self.filename = filename
self.prefix = prefix
def run(self):
"""主函数"""
self.f_cpp = open(self.filename, "r")
self.f_define = open(f"{self.prefix}_constant.py", "w")
self.f_typedef = open(f"{self.prefix}_typedef.py", "w")
for line in self.f_cpp:
self.process_line(line)
self.f_cpp.close()
self.f_define.close()
self.f_typedef.close()
print(f"{self.prefix} DataType生成完毕")
def process_line(self, line: str):
"""处理每行"""
line = line.replace("\n", "")
line = line.replace(";", "")
if line.startswith("#define"):
self.process_define(line)
elif line.startswith("typedef"):
self.process_typedef(line)
def process_define(self, line: str):
"""处理常量定义"""
line = line.replace("\t", "")
line = line.replace(" ", " ")
line = line.split("//")[0]
words = line.split(" ")
words = [word for word in words if word]
if len(words) < 3:
if "UFT" in words[0]:
name = words[0].split("#define")[-1]
value = words[-1]
new_line = f"{name} = {value}\n"
self.f_define.write(new_line)
else:
content = words[1]
if "'" in content:
name = content.split("'")[0]
value = content.split("'")[1]
new_line = f"{name} = \'{value}\'\n"
self.f_define.write(new_line)
elif "-" in content:
name = content.split("-")[0]
value = content.split("-")[1]
new_line = f"{name} = -{value}\n"
self.f_define.write(new_line)
else:
name = words[1]
value = words[2]
new_line = f"{name} = {value}\n"
self.f_define.write(new_line)
def process_typedef(self, line: str):
"""处理类型定义"""
line = line.replace("\t", "")
words = line.split(" ")
words = [word for word in words if word != ""]
if len(words) > 3:
type_ = f"{words[1]} {words[2]}"
name = words[-1]
new_line = f"{name} = \"{type_}\"\n"
self.f_typedef.write(new_line)
else:
type_ = words[1]
name = words[2]
if "[" in name:
type_ = "string"
name = name[:name.index("[")]
new_line = f"{name} = \"{type_}\"\n"
self.f_typedef.write(new_line)
if __name__ == "__main__":
generator = DataTypeGenerator(
"../include/nst/uft4_UserApiDataType.h",
"nst"
)
generator.run()
|
py | 7dfb3af7c42daddac8b3e1fe258f006113de9436 | fakeFileNames = [
'Data.bin',
'run.exe',
'geolocation.json',
'index',
'users.sql',
'main.py',
'scipy.py',
'streamer.exe'
]
|
py | 7dfb3b0c331a1a064ac457d8ce41a4e10b22ef5c | #------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
#------------------------------------------------------------------------------
from collections import defaultdict
from atom.api import Atom, Event, Typed
from .plugin import Plugin
class Workbench(Atom):
""" A base class for creating plugin-style applications.
This class is used for managing the lifecycle of plugins. It does
not provide any plugins of its own. The UIWorkbench subclass adds
the 'core' and 'ui' workbench plugins by default.
"""
#: An event fired when a plugin is added to the workbench. The
#: payload will be the plugin id.
plugin_added = Event(str)
#: An event fired when a plugin is removed from the workbench. The
#: payload will be the plugin id.
plugin_removed = Event(str)
#: An event fired when an extension point is added to the
#: workbench. The payload will be the fully qualified id of the
#: extension point.
extension_point_added = Event(str)
#: An event fired when an extension point is removed from the
#: workbench. The payload will be the fully qualified id of the
#: extension point.
extension_point_removed = Event(str)
def register(self, manifest):
""" Register a plugin with the workbench.
Parameters
----------
manifest : PluginManifest
The plugin manifest to register with the workbench.
"""
plugin_id = manifest.id
if plugin_id in self._manifests:
msg = "plugin '%s' is already registered"
raise ValueError(msg % plugin_id)
self._manifests[plugin_id] = manifest
manifest.workbench = self
if not manifest.is_initialized:
manifest.initialize()
self._add_extensions(manifest.extensions)
self._add_extension_points(manifest.extension_points)
self.plugin_added(plugin_id)
def unregister(self, plugin_id):
""" Remove a plugin from the workbench.
This will remove the extension points and extensions from the
workbench, and stop the plugin if it was activated.
Parameters
----------
plugin_id : unicode
The identifier of the plugin of interest.
"""
manifest = self._manifests.get(plugin_id)
if manifest is None:
msg = "plugin '%s' is not registered"
raise ValueError(msg % plugin_id)
plugin = self._plugins.pop(plugin_id, None)
if plugin is not None:
plugin.stop()
plugin.manifest = None
self._remove_extensions(manifest.extensions)
self._remove_extension_points(manifest.extension_points)
del self._manifests[plugin_id]
manifest.workbench = None
if manifest.is_initialized:
manifest.destroy()
self.plugin_removed(plugin_id)
def get_manifest(self, plugin_id):
""" Get the plugin manifest for a given plugin id.
Parameters
----------
plugin_id : unicode
The identifier of the plugin of interest.
Returns
-------
result : PluginManifest or None
The manifest for the plugin of interest, or None if it
does not exist.
"""
return self._manifests.get(plugin_id)
def get_plugin(self, plugin_id, force_create=True):
""" Get the plugin object for a given plugin id.
Parameters
----------
plugin_id : unicode
The identifier of the plugin of interest.
force_create : bool, optional
Whether to automatically import and start the plugin object
if it is not already active. The default is True.
Returns
-------
result : Plugin or None
The plugin of interest, or None if it does not exist and/or
could not be created.
"""
if plugin_id in self._plugins:
return self._plugins[plugin_id]
manifest = self._manifests.get(plugin_id)
if manifest is None:
msg = "plugin '%s' is not registered"
raise ValueError(msg % plugin_id)
if not force_create:
return None
plugin = manifest.factory()
if not isinstance(plugin, Plugin):
msg = "plugin '%s' factory created non-Plugin type '%s'"
raise TypeError(msg % (plugin_id, type(plugin).__name__))
self._plugins[plugin_id] = plugin
plugin.manifest = manifest
plugin.start()
return plugin
def get_extension_point(self, extension_point_id):
""" Get the extension point associated with an id.
Parameters
----------
extension_point_id : unicode
The fully qualified id of the extension point of interest.
Returns
-------
result : ExtensionPoint or None
The desired ExtensionPoint or None if it does not exist.
"""
return self._extension_points.get(extension_point_id)
def get_extension_points(self):
""" Get all of the extension points in the workbench.
Returns
-------
result : list
A list of all of the extension points in the workbench.
"""
return list(self._extension_points.values())
#--------------------------------------------------------------------------
# Private API
#--------------------------------------------------------------------------
#: A mapping of plugin id to PluginManifest.
_manifests = Typed(dict, ())
#: A mapping of plugin id to Plugin instance.
_plugins = Typed(dict, ())
#: A mapping of extension point id to ExtensionPoint.
_extension_points = Typed(dict, ())
#: A mapping of extension id to Extension.
_extensions = Typed(dict, ())
#: A mapping of extension point id to set of Extensions.
_contributions = Typed(defaultdict, (set,))
def _add_extension_points(self, extension_points):
""" Add extension points to the workbench.
Parameters
----------
extension_points : list
The list of ExtensionPoints to add to the workbench.
"""
for point in extension_points:
self._add_extension_point(point)
def _add_extension_point(self, point):
""" Add an extension point to the workbench.
Parameters
----------
point : ExtensionPoint
The ExtensionPoint to add to the workbench.
"""
point_id = point.qualified_id
if point_id in self._extension_points:
msg = "extension point '%s' is already registered"
raise ValueError(msg % point_id)
self._extension_points[point_id] = point
if point_id in self._contributions:
to_add = self._contributions[point_id]
self._update_extension_point(point, [], to_add)
self.extension_point_added(point_id)
def _remove_extension_points(self, extension_points):
""" Remove extension points from the workbench.
Parameters
----------
extension_points : list
The list of ExtensionPoints to remove from the workbench.
"""
for point in extension_points:
self._remove_extension_point(point)
def _remove_extension_point(self, point):
""" Remove an extension point from the workbench.
Parameters
----------
point : ExtensionPoint
The ExtensionPoint to remove from the workbench.
"""
point_id = point.qualified_id
if point_id not in self._extension_points:
msg = "extension point '%s' is not registered"
raise ValueError(msg % point_id)
del self._extension_points[point_id]
if point_id in self._contributions:
to_remove = self._contributions.pop(point_id)
self._update_extension_point(point, to_remove, [])
self.extension_point_removed(point_id)
def _add_extensions(self, extensions):
""" Add extensions to the workbench.
Parameters
----------
extensions : list
The list of Extensions to add to the workbench.
"""
grouped = defaultdict(set)
for extension in extensions:
ext_id = extension.qualified_id
if ext_id in self._extensions:
msg = "extension '%s' is already registered"
raise ValueError(msg % ext_id)
self._extensions[ext_id] = extension
grouped[extension.point].add(extension)
for point_id, exts in grouped.items():
self._contributions[point_id].update(exts)
if point_id in self._extension_points:
point = self._extension_points[point_id]
self._update_extension_point(point, (), exts)
def _remove_extensions(self, extensions):
""" Remove extensions from a workbench.
Parameters
----------
extensions : list
The list of Extensions to remove from the workbench.
"""
grouped = defaultdict(set)
for extension in extensions:
ext_id = extension.qualified_id
if ext_id not in self._extensions:
msg = "extension '%s' is not registered"
raise ValueError(msg % ext_id)
del self._extensions[ext_id]
grouped[extension.point].add(extension)
for point_id, exts in grouped.items():
self._contributions[point_id].difference_update(exts)
if point_id in self._extension_points:
point = self._extension_points[point_id]
self._update_extension_point(point, exts, ())
def _update_extension_point(self, point, to_remove, to_add):
""" Update an extension point with delta extension objects.
Parameters
----------
point : ExtensionPoint
The extension point of interest.
to_remove : iterable
The Extension objects to remove from the point.
to_add : iterable
The Extension objects to add to the point.
"""
if to_remove or to_add:
extensions = set(point.extensions)
extensions.difference_update(to_remove)
extensions.update(to_add)
key = lambda ext: ext.rank
point.extensions = tuple(sorted(extensions, key=key))
|
py | 7dfb3c2660ef005ed7c2c748106f45c9cb4f9c84 | import numpy as np # Fundamental package for scientific computing
import matplotlib.pyplot as plt # 2D plotting library producing publication quality figures
import pyrealsense2.pyrealsense2 as rs # Intel RealSense cross-platform open-source API
import open3d as o3d
import imageio
import cv2
import pyransac3d as pyrsc # open3d librairie to use RANSAC for different shapes
from datetime import datetime as date # Library use to get the actual date and time
import subprocess
from subprocess import Popen, PIPE
import os
print("Environment Ready")
# Configure depth and color streams
# Change resolution here
pipe = rs.pipeline()
cfg = rs.config()
cfg.enable_stream(rs.stream.depth,848, 480)
#Start streaming
pipe.start(cfg)
# Filter generates color images based on input depth frame
colorizer = rs.colorizer()
# Skip 5 first frames to give the Auto-Exposure time to adjust
for x in range(5):pipe.wait_for_frames()
# Get intrinsic camera parameters
profile = pipe.get_active_profile()
# Change the type of stereo vision
device = profile.get_device()
depth_sensor = device.query_sensors()[0]
depth_sensor.set_option(rs.option.emitter_enabled, 1.0)
# Store frameset
frameset = pipe.wait_for_frames()
depth_frame = frameset.get_depth_frame()
# Cleanup
pipe.stop()
print("Frames Captured")
# Convert images to numpy arrays
depth_image = np.asanyarray(depth_frame.get_data())
# Collect the actual date and time
timestamp = date.now().strftime("%Y-%m-%d-%H-%M")
# save both images (the name is changed each time using the timestamp in order to save all the images)
imageio.imwrite("Output/DepthImage/depth"+timestamp+".png", depth_image)
# Get back the images
depth_raw = o3d.io.read_image("Output/DepthImage/depth"+timestamp+".png")
# Get the default intrinsic parameters of the camera
p = o3d.camera.PinholeCameraIntrinsic(o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault)
# Change the intrinsic parameters of the camera to match the chosen resolution
p.intrinsic_matrix=[[421.139, 0.0, 426.176], [ 0.0, 421.139, 237.017], [ 0.0, 0.0, 1.0]] # 848*480 resolution
# Create the point cloud from the rgbd image
pcd = o3d.geometry.PointCloud.create_from_depth_image(
depth_raw,p)
# Flip itthe point cloud, otherwise it will be upside down
pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
# Save the point cloud
o3d.io.write_point_cloud("Output/PointCloud/cloud"+timestamp+".ply", pcd)
# Get back the point cloud
pcd_load = o3d.io.read_point_cloud("Output/PointCloud/cloud"+timestamp+".ply")
p = subprocess.Popen(['main', '-f', 'Output/PointCloud/cloud'+timestamp+'.ply'], stdout=PIPE, stdin=PIPE, shell=True)
result = p.stdout.readline().strip()
print(result) |
py | 7dfb3c4ab20dbb3222170cc2888a2efbf82e2153 | """
Effects/StiffScrollEffect
=========================
An Effect to be used with ScrollView to prevent scrolling beyond
the bounds, but politely.
A ScrollView constructed with StiffScrollEffect,
eg. ScrollView(effect_cls=StiffScrollEffect), will get harder to
scroll as you get nearer to its edges. You can scroll all the way to
the edge if you want to, but it will take more finger-movement than
usual.
Unlike DampedScrollEffect, it is impossible to overscroll with
StiffScrollEffect. That means you cannot push the contents of the
ScrollView far enough to see what's beneath them. This is appropriate
if the ScrollView contains, eg., a background image, like a desktop
wallpaper. Overscrolling may give the impression that there is some
reason to overscroll, even if just to take a peek beneath, and that
impression may be misleading.
StiffScrollEffect was written by Zachary Spector. His other stuff is at:
https://github.com/LogicalDash/
He can be reached, and possibly hired, at:
[email protected]
"""
from time import time
from kivy.animation import AnimationTransition
from kivy.effects.kinetic import KineticEffect
from kivy.properties import NumericProperty, ObjectProperty
from kivy.uix.widget import Widget
class StiffScrollEffect(KineticEffect):
drag_threshold = NumericProperty("20sp")
"""Minimum distance to travel before the movement is considered as a
drag.
:attr:`drag_threshold` is an :class:`~kivy.properties.NumericProperty`
and defaults to `'20sp'`.
"""
min = NumericProperty(0)
"""Minimum boundary to stop the scrolling at.
:attr:`min` is an :class:`~kivy.properties.NumericProperty`
and defaults to `0`.
"""
max = NumericProperty(0)
"""Maximum boundary to stop the scrolling at.
:attr:`max` is an :class:`~kivy.properties.NumericProperty`
and defaults to `0`.
"""
max_friction = NumericProperty(1)
"""How hard should it be to scroll, at the worst?
:attr:`max_friction` is an :class:`~kivy.properties.NumericProperty`
and defaults to `1`.
"""
body = NumericProperty(0.7)
"""Proportion of the range in which you can scroll unimpeded.
:attr:`body` is an :class:`~kivy.properties.NumericProperty`
and defaults to `0.7`.
"""
scroll = NumericProperty(0.0)
"""Computed value for scrolling
:attr:`scroll` is an :class:`~kivy.properties.NumericProperty`
and defaults to `0.0`.
"""
transition_min = ObjectProperty(AnimationTransition.in_cubic)
"""The AnimationTransition function to use when adjusting the friction
near the minimum end of the effect.
:attr:`transition_min` is an :class:`~kivy.properties.ObjectProperty`
and defaults to :class:`kivy.animation.AnimationTransition`.
"""
transition_max = ObjectProperty(AnimationTransition.in_cubic)
"""The AnimationTransition function to use when adjusting the friction
near the maximum end of the effect.
:attr:`transition_max` is an :class:`~kivy.properties.ObjectProperty`
and defaults to :class:`kivy.animation.AnimationTransition`.
"""
target_widget = ObjectProperty(None, allownone=True, baseclass=Widget)
"""The widget to apply the effect to.
:attr:`target_widget` is an :class:`~kivy.properties.ObjectProperty`
and defaults to ``None``.
"""
displacement = NumericProperty(0)
"""The absolute distance moved in either direction.
:attr:`displacement` is an :class:`~kivy.properties.NumericProperty`
and defaults to `0`.
"""
def __init__(self, **kwargs):
"""Set ``self.base_friction`` to the value of ``self.friction`` just
after instantiation, so that I can reset to that value later.
"""
super().__init__(**kwargs)
self.base_friction = self.friction
def update_velocity(self, dt):
"""Before actually updating my velocity, meddle with ``self.friction``
to make it appropriate to where I'm at, currently.
"""
hard_min = self.min
hard_max = self.max
if hard_min > hard_max:
hard_min, hard_max = hard_max, hard_min
margin = (1.0 - self.body) * (hard_max - hard_min)
soft_min = hard_min + margin
soft_max = hard_max - margin
if self.value < soft_min:
try:
prop = (soft_min - self.value) / (soft_min - hard_min)
self.friction = self.base_friction + abs(
self.max_friction - self.base_friction
) * self.transition_min(prop)
except ZeroDivisionError:
pass
elif self.value > soft_max:
try:
# normalize how far past soft_max I've gone as a
# proportion of the distance between soft_max and hard_max
prop = (self.value - soft_max) / (hard_max - soft_max)
self.friction = self.base_friction + abs(
self.max_friction - self.base_friction
) * self.transition_min(prop)
except ZeroDivisionError:
pass
else:
self.friction = self.base_friction
return super().update_velocity(dt)
def on_value(self, *args):
"""Prevent moving beyond my bounds, and update ``self.scroll``"""
if self.value > self.min:
self.velocity = 0
self.scroll = self.min
elif self.value < self.max:
self.velocity = 0
self.scroll = self.max
else:
self.scroll = self.value
def start(self, val, t=None):
"""Start movement with ``self.friction`` = ``self.base_friction``"""
self.is_manual = True
t = t or time()
self.velocity = self.displacement = 0
self.friction = self.base_friction
self.history = [(t, val)]
def update(self, val, t=None):
"""Reduce the impact of whatever change has been made to me, in
proportion with my current friction.
"""
t = t or time()
hard_min = self.min
hard_max = self.max
if hard_min > hard_max:
hard_min, hard_max = hard_max, hard_min
gamut = hard_max - hard_min
margin = (1.0 - self.body) * gamut
soft_min = hard_min + margin
soft_max = hard_max - margin
distance = val - self.history[-1][1]
reach = distance + self.value
if (distance < 0 and reach < soft_min) or (
distance > 0 and soft_max < reach
):
distance -= distance * self.friction
self.apply_distance(distance)
self.history.append((t, val))
if len(self.history) > self.max_history:
self.history.pop(0)
self.displacement += abs(distance)
self.trigger_velocity_update()
def stop(self, val, t=None):
"""Work out whether I've been flung."""
self.is_manual = False
self.displacement += abs(val - self.history[-1][1])
if self.displacement <= self.drag_threshold:
self.velocity = 0
return super().stop(val, t)
|
py | 7dfb3d8b0603560e6605afe86d72eba0d0b376e5 | from typing import List
from uuid import UUID
from fastapi import APIRouter, Depends, HTTPException, Response, status
from app.api.dependencies import get_api_key, get_db
from app.db.database import MSSQLConnection
from app.schemas.requirements import (
RequirementResponse,
RequirementsCreateRequest,
RequirementsUpdateRequest,
)
from app.services.exceptions import (
InternalDatabaseError,
InvalidAuthenticationKeyForRequest,
)
from app.services.requirements import RequirementService
router = APIRouter()
@router.get("", response_model=List[RequirementResponse])
async def list_requirements(
db: MSSQLConnection = Depends(get_db),
) -> List[RequirementResponse]:
"""
**Retrieves the list of requirements.**
"""
return await RequirementService(db).get_multi()
@router.get(
"/{requirement_id}",
response_model=RequirementResponse,
responses={
status.HTTP_404_NOT_FOUND: {
"description": "The requirement with the specified id could not "
"be found."
}
},
)
async def retrieve_requirement_by_id(
requirement_id: int, db: MSSQLConnection = Depends(get_db)
) -> RequirementResponse:
"""
**Retrieves a requirement with the id from the `requirement_id` path
parameter.**
"""
requirement = await RequirementService(db).get(requirement_id)
if requirement is None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)
return requirement
@router.post(
"",
response_model=RequirementResponse,
responses={
status.HTTP_401_UNAUTHORIZED: {"description": "Invalid credentials."},
status.HTTP_403_FORBIDDEN: {
"description": "Invalid permissions or credentials."
},
},
)
async def create_requirement(
body: RequirementsCreateRequest,
db: MSSQLConnection = Depends(get_db),
api_key: UUID = Depends(get_api_key),
) -> RequirementResponse:
"""
**Creates a new requirement with the entity enclosed in the request
body.** On success, the new requirement is returned in the body of the
response.
"""
try:
requirement = await RequirementService(db).create(body, api_key)
except InvalidAuthenticationKeyForRequest as e:
raise HTTPException(status.HTTP_403_FORBIDDEN, e.message)
except InternalDatabaseError:
raise HTTPException(status.HTTP_500_INTERNAL_SERVER_ERROR)
return requirement
@router.put(
"/{requirement_id}",
response_model=RequirementResponse,
responses={
status.HTTP_401_UNAUTHORIZED: {"description": "Invalid credentials."},
status.HTTP_403_FORBIDDEN: {
"description": "Invalid permissions or credentials."
},
status.HTTP_404_NOT_FOUND: {
"description": "The location with the specified id could not be "
"found."
},
},
)
async def update_requirement(
requirement_id: int,
body: RequirementsUpdateRequest,
db: MSSQLConnection = Depends(get_db),
api_key: UUID = Depends(get_api_key),
) -> RequirementResponse:
"""
**Updates a requirement with the id from the `requirement_id` path
parameter with the entity enclosed in the request body.** On success,
the updated requirement is returned in the body of the response.
"""
# Check if requirement with the id exists
requirement = await RequirementService(db).get(requirement_id)
if not requirement:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)
# Perform update
try:
requirement = await RequirementService(db).update(
requirement_id, body, api_key
)
except InvalidAuthenticationKeyForRequest as e:
raise HTTPException(status.HTTP_403_FORBIDDEN, e.message)
except InternalDatabaseError:
raise HTTPException(status.HTTP_500_INTERNAL_SERVER_ERROR)
return requirement
@router.delete(
"/{requirement_id}",
status_code=status.HTTP_204_NO_CONTENT,
responses={
status.HTTP_204_NO_CONTENT: {
"description": "The requirement with the specified id has been "
"successfully deleted."
},
status.HTTP_401_UNAUTHORIZED: {"description": "Invalid credentials."},
status.HTTP_403_FORBIDDEN: {
"description": "Invalid permissions or credentials."
},
status.HTTP_404_NOT_FOUND: {
"description": "The requirement with the specified id could not "
"be found."
},
},
)
async def delete_requirement_by_id(
requirement_id: int,
db: MSSQLConnection = Depends(get_db),
api_key: UUID = Depends(get_api_key),
) -> Response:
"""
**Deletes a requirement with the id from the `requirement_id` path
parameter.**
"""
# Check if requirement with the id exists
requirement = await RequirementService(db).get(requirement_id)
if not requirement:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)
# Perform deletion
try:
await RequirementService(db).delete(requirement_id, api_key)
except InvalidAuthenticationKeyForRequest as e:
raise HTTPException(status.HTTP_403_FORBIDDEN, e.message)
except InternalDatabaseError:
raise HTTPException(status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response(status_code=status.HTTP_204_NO_CONTENT)
|
py | 7dfb3dac94e01aacf2f0e4a2bd52949ccde53eca | # Jay Williams 2017
# This module is included as part of https://github.com/codingJWilliams/jw_utils
# Liscenced according to ../../LISCENCE
# Contact codingJWilliams on github with any queries
import jw_utils
class Menu:
def __init__(self, name):
self.name = name
self.options = []
self.endLoop = False
def endOptionLoop(self):
self.endLoop = True
def addOption(self, optionName, callback, shouldShowExit = True):
self.options += [
{
"name": optionName,
"cb": callback,
"number": str(len(self.options) + 1),
"showExit": shouldShowExit
}
]
return True
def doMenu(self):
self.endLoop = False
tableArray = [["Key", "Action"]]
for option in self.options:
tableArray += [
[
str(option['number']),
option['name']
]
]
tableText = self.name + "\n\n"
tableText += jw_utils.graphics.tables.get_table(tableArray);
while self.endLoop != True:
print("\n" * 150)
print(tableText)
choice = input("> ")
opt = self.getOption(choice)
if opt == False:
print("----- Invalid Choice. Please choose again -----")
else:
opt['cb']()
if opt["showExit"]:
input("Press enter to go back to menu.")
def getOption(self, number):
for opt in self.options:
if opt['number'] == number:
return opt
return False
|
py | 7dfb3e8e8133e3a30ad90145e8bb91ad403e2bcc | """
Event module
"""
# Import modules
import mysql.connector
import numpy as np
import scipy
import matplotlib.pyplot as plt
from inpoly import inpoly2
import pickle
import datetime
import math
import time
from src import publish_mqtt
__author__ = "Vaclav Kuna"
__copyright__ = ""
__license__ = ""
__version__ = "1.0"
__maintainer__ = "Vaclav Kuna"
__email__ = "[email protected]"
__status__ = ""
class Event:
"""This class handles all the detection procedures"""
def __init__(self, devices, detections, events, travel_times, params) -> None:
super().__init__()
self.devices = devices
self.detections = detections
self.params = params
self.events = events
self.travel_times = travel_times
self.active_events = {}
def find_and_locate(self):
# 1. Get new detections
new_detections = self.get_detections()
# 2. Associate new detections with events
# for each new detection
for new_index, new_detection in new_detections.iterrows():
# initially, the detection is not associated
det_assoc = False
for event_id in self.active_events.keys():
# while not associate, continue trying
det_assoc = self.associate(event_id, new_index, new_detection)
if det_assoc == True:
break
if det_assoc == False:
# if it could not be associated with an existing event, create a new one
self.set_new_event(new_index, new_detection)
print("⭐ New detection at the device " + new_detection["device_id"] + ".")
print(
" Associated with event id: "
+ str(self.detections.data["event_id"].iloc[-1])
)
# 3. Update location and magnitude of each event
for event_id in list(self.active_events.keys()):
# time since the last detection
tsl = self.time_since_last(event_id)
# Delete event if it is too old
if tsl > self.params["tsl_max"]:
del self.active_events[event_id]
# Or update location, magnitude, and origin time
else:
self.update_events(event_id)
json_data = (
self.events.data[self.events.data["event_id"] == event_id]
.iloc[-1]
.to_json()
)
publish_mqtt.run("event", json_data)
def get_detections(self):
"""Get new detections from the detection table"""
# Get new detections
new_detections = self.detections.data[self.detections.data["event_id"].isnull()]
return new_detections
def set_new_event(self, new_index, new_detection):
"""This sets a new event in the class"""
# Get event ID
try:
event_id = self.events.data["event_id"].to_list()
event_id.append(max(self.active_events.keys()))
event_id = max(event_id) + 1
except:
event_id = 1
self.active_events[event_id] = {}
# Get location and magnitude based on the first detection
self.get_loc_not_yet_arrived(event_id, new_detection)
# Associate detection with event
self.detections.data.loc[new_index, "event_id"] = event_id
def prior_loc(self):
"""
This function sets the prior probability distribution for earthquake location
The function is rather a placeholder for a more sophisticated initial distrubution
given by historical seismicity etc.
"""
loc_prob = np.zeros_like(self.travel_times.grid_lat)
return loc_prob
def prior_mag(self):
"""
This function sets the prior probability distribution for magnitude
It uses the concept of magnitude of completeness and exponential
decay of probability with increasing magnitude
The prior probability distribution is a lineary increasing function
(in a log10 space) from the Mc-2 to Mc (Mc is the magnitude of completenes).
It peaks at the Mc and decreases to 0 at magnitude 10 with the slope of
b_value (set to 1 by default)
"""
prior_type = self.params["prior_type"]
mc = self.params["mc"]
b_value = self.params["b_value"]
# set limits on magnitude and the discretization step
mag_step = 0.01
mag_min = 0
mag_max = 10
mag_bins = np.arange(mag_min, mag_max, mag_step)
if prior_type == "gutenberg":
# create an array with zero probability everywhere
mag_prob = np.zeros(len(mag_bins))
mag_step = mag_bins[1] - mag_bins[0]
# the index of Mc
peak_index = int(mc * 1 / mag_step)
# the linear decrease with the b_value
max_value = (10 - mc) * b_value
num_of_steps = (10 - mc) * (1 / mag_step)
mag_prob[peak_index:] = max_value - np.arange(
0, max_value, max_value / num_of_steps
)
# the linear increase to the Mc
num_of_steps = int(2 * (1 / mag_step))
mag_prob[peak_index - num_of_steps : peak_index] = np.arange(
0, max_value, max_value / num_of_steps
)
mag_prob = np.ones(len(mag_bins))
# transform from linear to exponential
mag_prob = 10 ** mag_prob
elif prior_type == "constant":
mag_prob = np.ones(len(mag_bins))
# normalize probability density function
mag_prob = mag_prob / max(np.cumsum(mag_prob))
# return the probability function
return mag_prob, mag_bins
def get_loc_not_yet_arrived(self, event_id, new_detection):
"""
Updates location for a new event
"""
# get list of active stations
active_devices = self.get_active_devices()
# get the station with the first detection
first_device = new_detection["device_id"]
# get all the not-yet arrived devices
nya_devices = list(set(active_devices) ^ set([first_device]))
# device loc
device_loc = self.devices.data
# get location of all the not-yet arrived devices
loc_nya = [
(
device_loc[device_loc["device_id"] == n]["longitude"].to_list()[0],
device_loc[device_loc["device_id"] == n]["latitude"].to_list()[0],
)
for n in nya_devices
]
# get location of all the detected device
loc_det = [
(
device_loc[device_loc["device_id"] == first_device][
"longitude"
].to_list()[0],
device_loc[device_loc["device_id"] == first_device][
"latitude"
].to_list()[0],
)
]
# append the loc_det at the beginning
loc_all = loc_det + loc_nya
# compute the Voronoi cells
vor = scipy.spatial.Voronoi(loc_all)
regions, vertices = self.voronoi_finite_polygons_2d(vor)
# get the lat and lon grid
lat_grid = self.travel_times.grid_lat
lon_grid = self.travel_times.grid_lon
# get the polygon aroud the device with detection
polygon = vertices[regions[0]]
# get the points in the polygon
points = np.concatenate(
(
np.reshape(lon_grid, (lon_grid.size, 1)),
np.reshape(lat_grid, (lat_grid.size, 1)),
),
axis=1,
)
inside, _ = inpoly2(points, polygon)
# change the points in the polygons to 1 and out of the polygon to 0
inside = inside.reshape(lon_grid.shape)
inside[inside == True] = 1
inside[inside == False] = 0
# get the best prob
loc_prior = self.prior_loc()
best_prob = loc_prior + inside
# and replace the prob with the best prob
self.active_events[event_id] = {"loc_prob": best_prob}
def get_active_devices(self):
"""Grabs all the devices that are sending data
This functions as a placeholder for more sophisticated function that
would grab active devices from some device SOH info
"""
try:
device_id = self.devices.data["device_id"]
except:
device_id = None
return device_id
def globe_distance(self, lat1, lon1, lat2, lon2):
# approximate radius of earth in km
R = 6373.0
lat1 = math.radians(lat1)
lon1 = math.radians(lon1)
lat2 = math.radians(lat2)
lon2 = math.radians(lon2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = (
math.sin(dlat / 2) ** 2
+ math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2
)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
distance = R * c
return distance
def get_sta_delta(self, event_id, sta, **kwargs):
sta_lat = self.devices.data[self.devices.data["device_id"] == sta]["latitude"]
sta_lon = self.devices.data[self.devices.data["device_id"] == sta]["longitude"]
if "eq_lat" in kwargs.keys():
eq_lat = kwargs["eq_lat"]
else:
eq_lat = self.events.data[self.events.data["event_id"] == event_id][
"lat"
].iloc[-1]
if "eq_lon" in kwargs.keys():
eq_lon = kwargs["eq_lon"]
else:
eq_lon = self.events.data[self.events.data["event_id"] == event_id][
"lon"
].iloc[-1]
epic_dist = self.globe_distance(sta_lat, sta_lon, eq_lat, eq_lon)
return epic_dist
def time_since_last(self, event_id):
"""
Get time elapsed since the last detection
"""
# get timestamp for the received trace
dt = datetime.datetime.now(datetime.timezone.utc)
utc_time = dt.replace(tzinfo=datetime.timezone.utc)
cloud_t = utc_time.timestamp()
last_detection = self.detections.data[
self.detections.data["event_id"] == event_id
]["cloud_t"].iloc[-1]
last_det_time = cloud_t - last_detection
return last_det_time
def num_of_dets(self):
"""
Get the number of associated detections
"""
number_of_detections = len(
[n["assoc"] for n in self.detections.values() if n["assoc"] == True]
)
return number_of_detections
def voronoi_finite_polygons_2d(self, vor, radius=None):
"""
Reconstruct infinite voronoi regions in a 2D diagram to finite
regions.
Parameters
----------
vor : Voronoi
Input diagram
radius : float, optional
Distance to 'points at infinity'.
Returns
-------
regions : list of tuples
Indices of vertices in each revised Voronoi regions.
vertices : list of tuples
Coordinates for revised Voronoi vertices. Same as coordinates
of input vertices, with 'points at infinity' appended to the
end.
Credit: Pauli Virtanen, github: pv
"""
if vor.points.shape[1] != 2:
raise ValueError("Requires 2D input")
new_regions = []
new_vertices = vor.vertices.tolist()
center = vor.points.mean(axis=0)
if radius is None:
radius = vor.points.ptp().max() * 2
# Construct a map containing all ridges for a given point
all_ridges = {}
for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices):
all_ridges.setdefault(p1, []).append((p2, v1, v2))
all_ridges.setdefault(p2, []).append((p1, v1, v2))
# Reconstruct infinite regions
for p1, region in enumerate(vor.point_region):
vertices = vor.regions[region]
if all(v >= 0 for v in vertices):
# finite region
new_regions.append(vertices)
continue
# reconstruct a non-finite region
ridges = all_ridges[p1]
new_region = [v for v in vertices if v >= 0]
for p2, v1, v2 in ridges:
if v2 < 0:
v1, v2 = v2, v1
if v1 >= 0:
# finite ridge: already in the region
continue
# Compute the missing endpoint of an infinite ridge
t = vor.points[p2] - vor.points[p1] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[[p1, p2]].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
far_point = vor.vertices[v2] + direction * radius
new_region.append(len(new_vertices))
new_vertices.append(far_point.tolist())
# sort region counterclockwise
vs = np.asarray([new_vertices[v] for v in new_region])
c = vs.mean(axis=0)
angles = np.arctan2(vs[:, 1] - c[1], vs[:, 0] - c[0])
new_region = np.array(new_region)[np.argsort(angles)]
# finish
new_regions.append(new_region.tolist())
return new_regions, np.asarray(new_vertices)
def get_best_location(self, event_id, add_prob=0):
lat = self.travel_times.grid_lat
lon = self.travel_times.grid_lon
# initial probability is equal to the prior
loc_prob = self.active_events[event_id]["loc_prob"]
# add aditional probability (for calling the function by the associator)
loc_prob = loc_prob + add_prob
# get best location
best_lat = lat[loc_prob == loc_prob.max()][0]
best_lon = lon[loc_prob == loc_prob.max()][0]
best_depth = self.params["eq_depth"] # depth is fixed for all
# get first detection
first_det = self.detections.data[
self.detections.data["event_id"] == event_id
].iloc[0]
# get origin time based on the location and the first detection
first_sta = first_det["device_id"]
first_time = first_det["cloud_t"]
sta_travel_time = self.travel_times.travel_times[first_sta][
loc_prob == loc_prob.max()
]
best_orig_time = first_time - sta_travel_time[0]
return best_lat, best_lon, best_depth, best_orig_time
def get_magnitude(self, event_id, best_lat, best_lon):
"""
This function uses the station magnitude estimation and calculates
the probability distribution for the magnitude.
It also updates the most likely magnitude and the 68 and 96 percent
probability intervals
"""
# get magnitude bins and prior
mag_prob, mag_bins = self.prior_mag()
# get all detections
detections = self.detections.data[self.detections.data["event_id"] == event_id]
for _, det in detections.iterrows():
det_sta = det["device_id"]
pd_all = det[
["mag1", "mag2", "mag3", "mag4", "mag5", "mag6", "mag7", "mag8", "mag9"]
]
pd = [n for n in pd_all if n is not None]
try:
pd_type = "mag" + str(len(pd))
pd = pd[-1]
a = self.params[pd_type][0]
b = self.params[pd_type][1]
c = self.params[pd_type][2]
std = self.params[pd_type][3]
# Normalize the displacement for the epicentral distance of 1 km
dist = self.get_sta_delta(
event_id, sta=det_sta, eq_lat=best_lat, eq_lon=best_lon
)
pd = np.log10(pd) + c * np.log10(dist + 1)
# Calculate station magnitude from pd given the linear function with a, b, c
sta_mag_mu = a * pd + b
# generate the probability distribution for the station magnitude
p_m_pd = scipy.stats.norm(sta_mag_mu, std).pdf(mag_bins)
# multiply the prior and the current measurement (the Bayes happens in here)
mag_prob = np.multiply(mag_prob, p_m_pd)
except:
pass
# normalize the mag_prob
mag_prob = mag_prob / max(np.cumsum(mag_prob))
# get magnitude and confidence
magnitude = mag_bins[np.argmax(mag_prob)]
cum_prob = np.cumsum(mag_prob)
conf2 = mag_bins[np.argmin(abs(cum_prob - 0.02))]
conf16 = mag_bins[np.argmin(abs(cum_prob - 0.16))]
conf84 = mag_bins[np.argmin(abs(cum_prob - 0.84))]
conf98 = mag_bins[np.argmin(abs(cum_prob - 0.98))]
return magnitude, conf2, conf16, conf84, conf98
def update_events(self, event_id):
# Update location
best_lat, best_lon, best_depth, best_orig_time = self.get_best_location(
event_id
)
# Update magnitude
magnitude, conf2, conf16, conf84, conf98 = self.get_magnitude(
event_id, best_lat, best_lon
)
# Number of associated phases
num_assoc = len(
self.detections.data[self.detections.data["event_id"] == event_id]
)
# Add line in events
new_event = {
"event_id": event_id,
"cloud_t": 0,
"orig_time": best_orig_time,
"lat": best_lat,
"lon": best_lon,
"dep": best_depth,
"mag": magnitude,
"num_assoc": num_assoc,
}
self.events.update(new_event)
print("🔥 Event id " + str(event_id) + " in progress:")
print(
" Magnitude: "
+ str(magnitude)
+ ", Lat: "
+ str(best_lat)
+ ", Lon: "
+ str(best_lon)
+ ", Associated detections: "
+ str(num_assoc)
+ "."
)
def associate(self, event_id, new_index, new_detection):
"""
Calculate probabilities and associate new event
"""
# get all detections of the event
all_detections = self.detections.data[
self.detections.data["event_id"] == event_id
]
# get all detected devices
detected_devices = all_detections["device_id"]
# get the new device id and detection time
new_detection_id = new_detection["detection_id"]
new_device = new_detection["device_id"]
new_time = new_detection["cloud_t"]
# set a new list of new probabilities
new_prob = np.zeros_like(self.travel_times.grid_lat)
if new_device not in detected_devices:
# loop over all associated detections
for _, detection in all_detections.iterrows():
# get device ID and detection time
det_device = detection["device_id"]
det_time = detection["cloud_t"]
# get sigma
sigma = self.get_sigma(event_id, new_device, det_device)
# calculate probability curve
tt_prob = np.exp(
-(
(
self.travel_times.travel_times[det_device]
- self.travel_times.travel_times[new_device]
- det_time
+ new_time
)
** 2
)
/ (2 * sigma ** 2)
)
# and add the probability the rest
new_prob = new_prob + tt_prob
# ASSOCIATE THE NEW DETECTION
# get updated potential location of the eq epicenter
best_lat, best_lon, _, _ = self.get_best_location(
event_id, add_prob=new_prob
)
# test the RMS of mispics
tt_precalc = self.travel_times.tt_vector
misfit = []
# get the new location
for _, detection in all_detections.iterrows():
det_device_old = detection["device_id"]
det_time_old = detection["cloud_t"]
epic_dist_old = self.get_sta_delta(
event_id, det_device_old, eq_lat=best_lat, eq_lon=best_lon
)
epic_dist_new = self.get_sta_delta(
event_id, new_device, eq_lat=best_lat, eq_lon=best_lon
)
# find the closest time from the tt_precalc and place it in the grid
tt_old = tt_precalc["travel_time"][
np.argmin(np.abs(tt_precalc["dist"] - epic_dist_old / 111.3))
]
tt_new = tt_precalc["travel_time"][
np.argmin(np.abs(tt_precalc["dist"] - epic_dist_new / 111.3))
]
misfit.append(((tt_old - tt_new) - (det_time_old - new_time)) ** 2)
misfit_mean = np.sqrt(np.sum(np.array(misfit)) / len(misfit))
assoc_win = self.params["assoc_win"]
if misfit_mean < assoc_win:
# if associated, append the probabbilities
self.active_events[event_id]["loc_prob"] = (
self.active_events[event_id]["loc_prob"] + new_prob
)
# add new detection to detections
self.detections.data.loc[new_index, "event_id"] = event_id
return True
else:
return False
def get_sigma(self, event_id, new_device, det_device):
"""
Get sigma from distances between the detections and easrthquakes
"""
# if constant sigma is chosen
if self.params["sigma_type"] == "const":
sigma = self.params["sigma_const"]
# if sigma is computed from the sigmoid function
elif self.params["sigma_type"] == "linear":
try:
dist1 = self.get_sta_delta(event_id, new_device)
dist2 = self.get_sta_delta(event_id, det_device)
dist_ave = (dist1 + dist2) / 2
sigma = dist_ave * 0.05 + 1
if sigma > 8:
sigma = 8
except:
sigma = self.params["sigma_const"]
return sigma
def run(self):
# run loop indefinitely
while True:
self.find_and_locate()
time.sleep(self.params["sleep_time"])
|
py | 7dfb3eeddbdcca4a24d69d2cae8136536d34d30f | import warnings
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout, REDIRECT_FIELD_NAME
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.views import LogoutView
from django.contrib.messages.views import SuccessMessageMixin
from django.core.exceptions import PermissionDenied
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import redirect, get_object_or_404
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.translation import gettext as _
from django.views.generic import TemplateView
from django.views.generic.list import ListView
from guardian.decorators import permission_required_or_403
from userena import settings as userena_settings
from userena import signals as userena_signals
from userena.decorators import secure_required
from userena.forms import (
SignupForm,
SignupFormOnlyEmail,
AuthenticationForm,
ChangeEmailForm,
EditProfileForm,
)
from userena.models import UserenaSignup
from userena.utils import signin_redirect, get_profile_model, get_user_profile
class ExtraContextTemplateView(TemplateView):
""" Add extra context to a simple template view """
extra_context = None
def get_context_data(self, *args, **kwargs):
context = super(ExtraContextTemplateView, self).get_context_data(
*args, **kwargs
)
if self.extra_context:
context.update(self.extra_context)
return context
# this view is used in POST requests, e.g. signup when the form is not valid
post = TemplateView.get
class ProfileListView(ListView):
""" Lists all profiles """
context_object_name = "profile_list"
page = 1
paginate_by = 50
template_name = userena_settings.USERENA_PROFILE_LIST_TEMPLATE
extra_context = None
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(ProfileListView, self).get_context_data(**kwargs)
try:
page = int(self.request.GET.get("page", None))
except (TypeError, ValueError):
page = self.page
if (
userena_settings.USERENA_DISABLE_PROFILE_LIST
and not self.request.user.is_staff
):
raise Http404
if not self.extra_context:
self.extra_context = dict()
context["page"] = page
context["paginate_by"] = self.paginate_by
context["extra_context"] = self.extra_context
return context
def get_queryset(self):
profile_model = get_profile_model()
queryset = profile_model.objects.get_visible_profiles(
self.request.user
).select_related()
return queryset
@secure_required
def signup(
request,
signup_form=SignupForm,
template_name="userena/signup_form.html",
success_url=None,
extra_context=None,
):
"""
Signup of an account.
Signup requiring a username, email and password. After signup a user gets
an email with an activation link used to activate their account. After
successful signup redirects to ``success_url``.
:param signup_form:
Form that will be used to sign a user. Defaults to userena's
:class:`SignupForm`.
:param template_name:
String containing the template name that will be used to display the
signup form. Defaults to ``userena/signup_form.html``.
:param success_url:
String containing the URI which should be redirected to after a
successful signup. If not supplied will redirect to
``userena_signup_complete`` view.
:param extra_context:
Dictionary containing variables which are added to the template
context. Defaults to a dictionary with a ``form`` key containing the
``signup_form``.
**Context**
``form``
Form supplied by ``signup_form``.
"""
# If signup is disabled, return 403
if userena_settings.USERENA_DISABLE_SIGNUP:
raise PermissionDenied
# If no usernames are wanted and the default form is used, fallback to the
# default form that doesn't display to enter the username.
if userena_settings.USERENA_WITHOUT_USERNAMES and (signup_form == SignupForm):
signup_form = SignupFormOnlyEmail
form = signup_form()
if request.method == "POST":
form = signup_form(request.POST, request.FILES)
if form.is_valid():
user = form.save()
# Send the signup complete signal
userena_signals.signup_complete.send(sender=None, user=user)
if success_url:
redirect_to = success_url
else:
redirect_to = reverse(
"userena_signup_complete", kwargs={"username": user.username}
)
# A new signed user should logout the old one.
authenticated = request.user.is_authenticated
if authenticated:
logout(request)
if (
userena_settings.USERENA_SIGNIN_AFTER_SIGNUP
and not userena_settings.USERENA_ACTIVATION_REQUIRED
):
user = authenticate(identification=user.email, check_password=False)
login(request, user)
return redirect(redirect_to)
if not extra_context:
extra_context = dict()
extra_context["form"] = form
return ExtraContextTemplateView.as_view(
template_name=template_name, extra_context=extra_context
)(request)
@secure_required
def activate(
request,
activation_key,
template_name="userena/activate_fail.html",
retry_template_name="userena/activate_retry.html",
success_url=None,
extra_context=None,
):
"""
Activate a user with an activation key.
The key is a SHA1 string. When the SHA1 is found with an
:class:`UserenaSignup`, the :class:`User` of that account will be
activated. After a successful activation the view will redirect to
``success_url``. If the SHA1 is not found, the user will be shown the
``template_name`` template displaying a fail message.
If the SHA1 is found but expired, ``retry_template_name`` is used instead,
so the user can proceed to :func:`activate_retry` to get a new activation key.
:param activation_key:
String of a SHA1 string of 40 characters long. A SHA1 is always 160bit
long, with 4 bits per character this makes it --160/4-- 40 characters
long.
:param template_name:
String containing the template name that is used when the
``activation_key`` is invalid and the activation fails. Defaults to
``userena/activate_fail.html``.
:param retry_template_name:
String containing the template name that is used when the
``activation_key`` is expired. Defaults to
``userena/activate_retry.html``.
:param success_url:
String containing the URL where the user should be redirected to after
a successful activation. Will replace ``%(username)s`` with string
formatting if supplied. If ``success_url`` is left empty, will direct
to ``userena_profile_detail`` view.
:param extra_context:
Dictionary containing variables which could be added to the template
context. Default to an empty dictionary.
"""
try:
if (
not UserenaSignup.objects.check_expired_activation(activation_key)
or not userena_settings.USERENA_ACTIVATION_RETRY
):
user = UserenaSignup.objects.activate_user(activation_key)
if user:
# Sign the user in.
auth_user = authenticate(
identification=user.email, check_password=False
)
login(request, auth_user)
if userena_settings.USERENA_USE_MESSAGES:
messages.success(
request,
_(
"Your account has been activated and you have been signed in."
),
fail_silently=True,
)
if success_url:
redirect_to = success_url % {"username": user.username}
else:
redirect_to = reverse(
"userena_profile_detail", kwargs={"username": user.username}
)
return redirect(redirect_to)
else:
if not extra_context:
extra_context = dict()
return ExtraContextTemplateView.as_view(
template_name=template_name, extra_context=extra_context
)(request)
else:
if not extra_context:
extra_context = dict()
extra_context["activation_key"] = activation_key
return ExtraContextTemplateView.as_view(
template_name=retry_template_name, extra_context=extra_context
)(request)
except UserenaSignup.DoesNotExist:
if not extra_context:
extra_context = dict()
return ExtraContextTemplateView.as_view(
template_name=template_name, extra_context=extra_context
)(request)
@secure_required
def activate_retry(
request,
activation_key,
template_name="userena/activate_retry_success.html",
extra_context=None,
):
"""
Reissue a new ``activation_key`` for the user with the expired
``activation_key``.
If ``activation_key`` does not exists, or ``USERENA_ACTIVATION_RETRY`` is
set to False and for any other error condition user is redirected to
:func:`activate` for error message display.
:param activation_key:
String of a SHA1 string of 40 characters long. A SHA1 is always 160bit
long, with 4 bits per character this makes it --160/4-- 40 characters
long.
:param template_name:
String containing the template name that is used when new
``activation_key`` has been created. Defaults to
``userena/activate_retry_success.html``.
:param extra_context:
Dictionary containing variables which could be added to the template
context. Default to an empty dictionary.
"""
if not userena_settings.USERENA_ACTIVATION_RETRY:
return redirect(reverse("userena_activate", args=(activation_key,)))
try:
if UserenaSignup.objects.check_expired_activation(activation_key):
new_key = UserenaSignup.objects.reissue_activation(activation_key)
if new_key:
if not extra_context:
extra_context = dict()
return ExtraContextTemplateView.as_view(
template_name=template_name, extra_context=extra_context
)(request)
else:
return redirect(reverse("userena_activate", args=(activation_key,)))
else:
return redirect(reverse("userena_activate", args=(activation_key,)))
except UserenaSignup.DoesNotExist:
return redirect(reverse("userena_activate", args=(activation_key,)))
@secure_required
def email_confirm(
request,
confirmation_key,
template_name="userena/email_confirm_fail.html",
success_url=None,
extra_context=None,
):
"""
Confirms an email address with a confirmation key.
Confirms a new email address by running :func:`User.objects.confirm_email`
method. If the method returns an :class:`User` the user will have his new
e-mail address set and redirected to ``success_url``. If no ``User`` is
returned the user will be represented with a fail message from
``template_name``.
:param confirmation_key:
String with a SHA1 representing the confirmation key used to verify a
new email address.
:param template_name:
String containing the template name which should be rendered when
confirmation fails. When confirmation is successful, no template is
needed because the user will be redirected to ``success_url``.
:param success_url:
String containing the URL which is redirected to after a successful
confirmation. Supplied argument must be able to be rendered by
``reverse`` function.
:param extra_context:
Dictionary of variables that are passed on to the template supplied by
``template_name``.
"""
user = UserenaSignup.objects.confirm_email(confirmation_key)
if user:
if userena_settings.USERENA_USE_MESSAGES:
messages.success(
request, _("Your email address has been changed."), fail_silently=True
)
if success_url:
redirect_to = success_url
else:
redirect_to = reverse(
"userena_email_confirm_complete", kwargs={"username": user.username}
)
return redirect(redirect_to)
else:
if not extra_context:
extra_context = dict()
return ExtraContextTemplateView.as_view(
template_name=template_name, extra_context=extra_context
)(request)
def direct_to_user_template(request, username, template_name, extra_context=None):
"""
Simple wrapper for Django's :func:`direct_to_template` view.
This view is used when you want to show a template to a specific user. A
wrapper for :func:`direct_to_template` where the template also has access to
the user that is found with ``username``. For ex. used after signup,
activation and confirmation of a new e-mail.
:param username:
String defining the username of the user that made the action.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signup_complete.html``.
**Keyword arguments**
``extra_context``
A dictionary containing extra variables that should be passed to the
rendered template. The ``account`` key is always the ``User``
that completed the action.
**Extra context**
``viewed_user``
The currently :class:`User` that is viewed.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
if not extra_context:
extra_context = dict()
extra_context["viewed_user"] = user
extra_context["profile"] = get_user_profile(user=user)
return ExtraContextTemplateView.as_view(
template_name=template_name, extra_context=extra_context
)(request)
def disabled_account(request, username, template_name, extra_context=None):
"""
Checks if the account is disabled, if so, returns the disabled account template.
:param username:
String defining the username of the user that made the action.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signup_complete.html``.
**Keyword arguments**
``extra_context``
A dictionary containing extra variables that should be passed to the
rendered template. The ``account`` key is always the ``User``
that completed the action.
**Extra context**
``viewed_user``
The currently :class:`User` that is viewed.
``profile``
Profile of the viewed user.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
if user.is_active:
raise Http404
if not extra_context:
extra_context = dict()
extra_context["viewed_user"] = user
extra_context["profile"] = get_user_profile(user=user)
return ExtraContextTemplateView.as_view(
template_name=template_name, extra_context=extra_context
)(request)
@secure_required
def signin(
request,
auth_form=AuthenticationForm,
template_name="userena/signin_form.html",
redirect_field_name=REDIRECT_FIELD_NAME,
redirect_signin_function=signin_redirect,
extra_context=None,
):
"""
Signin using email or username with password.
Signs a user in by combining email/username with password. If the
combination is correct and the user :func:`is_active` the
:func:`redirect_signin_function` is called with the arguments
``REDIRECT_FIELD_NAME`` and an instance of the :class:`User` who is is
trying the login. The returned value of the function will be the URL that
is redirected to.
A user can also select to be remembered for ``USERENA_REMEMBER_DAYS``.
:param auth_form:
Form to use for signing the user in. Defaults to the
:class:`AuthenticationForm` supplied by userena.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signin_form.html``.
:param redirect_field_name:
Form field name which contains the value for a redirect to the
succeeding page. Defaults to ``next`` and is set in
``REDIRECT_FIELD_NAME`` setting.
:param redirect_signin_function:
Function which handles the redirect. This functions gets the value of
``REDIRECT_FIELD_NAME`` and the :class:`User` who has logged in. It
must return a string which specifies the URI to redirect to.
:param extra_context:
A dictionary containing extra variables that should be passed to the
rendered template. The ``form`` key is always the ``auth_form``.
**Context**
``form``
Form used for authentication supplied by ``auth_form``.
"""
form = auth_form()
if request.method == "POST":
form = auth_form(request.POST, request.FILES)
if form.is_valid():
identification, password, remember_me = (
form.cleaned_data["identification"],
form.cleaned_data["password"],
form.cleaned_data["remember_me"],
)
user = authenticate(identification=identification, password=password)
if user.is_active:
login(request, user)
if remember_me:
request.session.set_expiry(
userena_settings.USERENA_REMEMBER_ME_DAYS[1] * 86400
)
else:
request.session.set_expiry(0)
if userena_settings.USERENA_USE_MESSAGES:
messages.success(
request, _("You have been signed in."), fail_silently=True
)
# send a signal that a user has signed in
userena_signals.account_signin.send(sender=None, user=user)
# Whereto now?
redirect_to = redirect_signin_function(
request.GET.get(
redirect_field_name, request.POST.get(redirect_field_name)
),
user,
)
return HttpResponseRedirect(redirect_to)
else:
return redirect(
reverse("userena_disabled", kwargs={"username": user.username})
)
if not extra_context:
extra_context = dict()
extra_context.update(
{
"form": form,
"next": request.GET.get(
redirect_field_name, request.POST.get(redirect_field_name)
),
}
)
return ExtraContextTemplateView.as_view(
template_name=template_name, extra_context=extra_context
)(request)
class SignoutView(LogoutView, SuccessMessageMixin):
"""
Signs out the user and adds a success message ``You have been signed
out.`` If next_page is defined you will be redirected to the URI. If
not the template in template_name is used.
:param next_page:
A string which specifies the URI to redirect to.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signout.html``.
"""
template_name = "userena/signout.html"
next_page = userena_settings.USERENA_REDIRECT_ON_SIGNOUT
def get_success_message(self, cleaned_data):
authenticated = self.request.user.is_authenticated
if authenticated and userena_settings.USERENA_USE_MESSAGES: # pragma: no cover
return _("You have been signed out.")
else:
return ""
@method_decorator(secure_required)
def dispatch(self, request, *args, **kwargs):
response = super(SignoutView, self).dispatch(request, *args, **kwargs)
userena_signals.account_signout.send(sender=None, user=request.user)
return response
@secure_required
@permission_required_or_403("change_user", (get_user_model(), "username", "username"))
def email_change(
request,
username,
email_form=ChangeEmailForm,
template_name="userena/email_form.html",
success_url=None,
extra_context=None,
):
"""
Change email address
:param username:
String of the username which specifies the current account.
:param email_form:
Form that will be used to change the email address. Defaults to
:class:`ChangeEmailForm` supplied by userena.
:param template_name:
String containing the template to be used to display the email form.
Defaults to ``userena/email_form.html``.
:param success_url:
Named URL where the user will get redirected to when successfully
changing their email address. When not supplied will redirect to
``userena_email_complete`` URL.
:param extra_context:
Dictionary containing extra variables that can be used to render the
template. The ``form`` key is always the form supplied by the keyword
argument ``form`` and the ``user`` key by the user whose email address
is being changed.
**Context**
``form``
Form that is used to change the email address supplied by ``form``.
``account``
Instance of the ``Account`` whose email address is about to be changed.
**Todo**
Need to have per-object permissions, which enables users with the correct
permissions to alter the email address of others.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
prev_email = user.email
form = email_form(user)
if request.method == "POST":
form = email_form(user, request.POST, request.FILES)
if form.is_valid():
form.save()
if success_url:
# Send a signal that the email has changed
userena_signals.email_change.send(
sender=None, user=user, prev_email=prev_email, new_email=user.email
)
redirect_to = success_url
else:
redirect_to = reverse(
"userena_email_change_complete", kwargs={"username": user.username}
)
return redirect(redirect_to)
if not extra_context:
extra_context = dict()
extra_context["form"] = form
extra_context["profile"] = get_user_profile(user=user)
return ExtraContextTemplateView.as_view(
template_name=template_name, extra_context=extra_context
)(request)
@secure_required
@permission_required_or_403("change_user", (get_user_model(), "username", "username"))
def password_change(
request,
username,
template_name="userena/password_form.html",
pass_form=PasswordChangeForm,
success_url=None,
extra_context=None,
):
""" Change password of user.
This view is almost a mirror of the view supplied in
:func:`contrib.auth.views.password_change`, with the minor change that in
this view we also use the username to change the password. This was needed
to keep our URLs logical (and REST) across the entire application. And
that in a later stadium administrators can also change the users password
through the web application itself.
:param username:
String supplying the username of the user who's password is about to be
changed.
:param template_name:
String of the name of the template that is used to display the password
change form. Defaults to ``userena/password_form.html``.
:param pass_form:
Form used to change password. Default is the form supplied by Django
itself named ``PasswordChangeForm``.
:param success_url:
Named URL that is passed onto a :func:`reverse` function with
``username`` of the active user. Defaults to the
``userena_password_complete`` URL.
:param extra_context:
Dictionary of extra variables that are passed on to the template. The
``form`` key is always used by the form supplied by ``pass_form``.
**Context**
``form``
Form used to change the password.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
form = pass_form(user=user)
if request.method == "POST":
form = pass_form(user=user, data=request.POST)
if form.is_valid():
form.save()
# Send a signal that the password has changed
userena_signals.password_complete.send(sender=None, user=user)
if success_url:
redirect_to = success_url
else:
redirect_to = reverse(
"userena_password_change_complete",
kwargs={"username": user.username},
)
return redirect(redirect_to)
if not extra_context:
extra_context = dict()
extra_context["form"] = form
extra_context["profile"] = get_user_profile(user=user)
return ExtraContextTemplateView.as_view(
template_name=template_name, extra_context=extra_context
)(request)
@secure_required
@permission_required_or_403(
"change_profile", (get_profile_model(), "user__username", "username")
)
def profile_edit(
request,
username,
edit_profile_form=EditProfileForm,
template_name="userena/profile_form.html",
success_url=None,
extra_context=None,
**kwargs
):
"""
Edit profile.
Edits a profile selected by the supplied username. First checks
permissions if the user is allowed to edit this profile, if denied will
show a 404. When the profile is successfully edited will redirect to
``success_url``.
:param username:
Username of the user which profile should be edited.
:param edit_profile_form:
Form that is used to edit the profile. The :func:`EditProfileForm.save`
method of this form will be called when the form
:func:`EditProfileForm.is_valid`. Defaults to :class:`EditProfileForm`
from userena.
:param template_name:
String of the template that is used to render this view. Defaults to
``userena/edit_profile_form.html``.
:param success_url:
Named URL which will be passed on to a django ``reverse`` function after
the form is successfully saved. Defaults to the ``userena_detail`` url.
:param extra_context:
Dictionary containing variables that are passed on to the
``template_name`` template. ``form`` key will always be the form used
to edit the profile, and the ``profile`` key is always the edited
profile.
**Context**
``form``
Form that is used to alter the profile.
``profile``
Instance of the ``Profile`` that is edited.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
profile = get_user_profile(user=user)
user_initial = {"first_name": user.first_name, "last_name": user.last_name}
form = edit_profile_form(instance=profile, initial=user_initial)
if request.method == "POST":
form = edit_profile_form(
request.POST, request.FILES, instance=profile, initial=user_initial
)
if form.is_valid():
profile = form.save()
if userena_settings.USERENA_USE_MESSAGES:
messages.success(
request, _("Your profile has been updated."), fail_silently=True
)
if success_url:
# Send a signal that the profile has changed
userena_signals.profile_change.send(sender=None, user=user)
redirect_to = success_url
else:
redirect_to = reverse(
"userena_profile_detail", kwargs={"username": username}
)
return redirect(redirect_to)
if not extra_context:
extra_context = dict()
extra_context["form"] = form
extra_context["profile"] = profile
return ExtraContextTemplateView.as_view(
template_name=template_name, extra_context=extra_context
)(request)
def profile_detail(
request,
username,
template_name=userena_settings.USERENA_PROFILE_DETAIL_TEMPLATE,
extra_context=None,
**kwargs
):
"""
Detailed view of an user.
:param username:
String of the username of which the profile should be viewed.
:param template_name:
String representing the template name that should be used to display
the profile.
:param extra_context:
Dictionary of variables which should be supplied to the template. The
``profile`` key is always the current profile.
**Context**
``profile``
Instance of the currently viewed ``Profile``.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
profile = get_user_profile(user=user)
if not profile.can_view_profile(request.user):
raise PermissionDenied
if not extra_context:
extra_context = dict()
extra_context["profile"] = profile
extra_context["hide_email"] = userena_settings.USERENA_HIDE_EMAIL
return ExtraContextTemplateView.as_view(
template_name=template_name, extra_context=extra_context
)(request)
def profile_list(
request,
page=1,
template_name="userena/profile_list.html",
paginate_by=50,
extra_context=None,
**kwargs
): # pragma: no cover
"""
Returns a list of all profiles that are public.
It's possible to disable this by changing ``USERENA_DISABLE_PROFILE_LIST``
to ``True`` in your settings.
:param page:
Integer of the active page used for pagination. Defaults to the first
page.
:param template_name:
String defining the name of the template that is used to render the
list of all users. Defaults to ``userena/list.html``.
:param paginate_by:
Integer defining the amount of displayed profiles per page. Defaults to
50 profiles per page.
:param extra_context:
Dictionary of variables that are passed on to the ``template_name``
template.
**Context**
``profile_list``
A list of profiles.
``is_paginated``
A boolean representing whether the results are paginated.
If the result is paginated. It will also contain the following variables.
``paginator``
An instance of ``django.core.paginator.Paginator``.
``page_obj``
An instance of ``django.core.paginator.Page``.
"""
warnings.warn(
"views.profile_list is deprecated. Use ProfileListView instead",
DeprecationWarning,
stacklevel=2,
)
try:
page = int(request.GET.get("page", None))
except (TypeError, ValueError):
page = page
if userena_settings.USERENA_DISABLE_PROFILE_LIST and not request.user.is_staff:
raise Http404
profile_model = get_profile_model()
queryset = profile_model.objects.get_visible_profiles(request.user)
if not extra_context:
extra_context = dict()
return ProfileListView.as_view(
queryset=queryset,
paginate_by=paginate_by,
page=page,
template_name=template_name,
extra_context=extra_context,
**kwargs
)(request)
|
py | 7dfb40e94d28a9128fef7ba889b91d37f0eb475e | # Generated by Django 2.2.10 on 2021-09-23 16:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Point',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('point_string', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Location',
fields=[
('point', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='location', serialize=False, to='app.Point')),
('place_id', models.CharField(max_length=50)),
('full_address', models.CharField(max_length=150)),
('street_address', models.CharField(max_length=150)),
('street_number', models.CharField(max_length=8)),
('street_name', models.CharField(max_length=50)),
('suburb', models.CharField(max_length=50)),
('state', models.CharField(max_length=50)),
('postcode', models.CharField(max_length=4)),
('council', models.CharField(max_length=50)),
('lat', models.CharField(max_length=20)),
('lon', models.CharField(max_length=20)),
('link_code', models.CharField(max_length=10)),
('qr_filepath', models.CharField(max_length=100)),
('qr_path', models.CharField(max_length=100)),
('qrcode', models.ImageField(default='profile_pics/default.jpg', upload_to='profile_pics')),
],
),
migrations.CreateModel(
name='Estimate',
fields=[
('location', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='estimate', serialize=False, to='app.Location')),
('os_identifier', models.CharField(blank=True, max_length=200)),
('os_is_residential', models.CharField(blank=True, max_length=200)),
('os_lead_source', models.CharField(blank=True, max_length=200)),
('os_notes', models.CharField(blank=True, max_length=200)),
('os_lat', models.CharField(blank=True, max_length=200)),
('os_lon', models.CharField(blank=True, max_length=200)),
('os_address', models.CharField(blank=True, max_length=200)),
('os_locality', models.CharField(blank=True, max_length=200)),
('os_state', models.CharField(blank=True, max_length=200)),
('os_country_iso2', models.CharField(blank=True, max_length=200)),
('os_zip', models.CharField(blank=True, max_length=200)),
('os_number_of_phases', models.CharField(blank=True, max_length=200)),
('os_roof_type_id', models.CharField(blank=True, max_length=200)),
('os_assgined_role', models.CharField(blank=True, max_length=200)),
('os_access', models.CharField(blank=True, max_length=200)),
('os_number_of_storeys', models.CharField(blank=True, max_length=200)),
('os_events', models.CharField(blank=True, max_length=200)),
('os_org_id', models.CharField(blank=True, max_length=200)),
('os_actions', models.CharField(blank=True, max_length=200)),
('os_org', models.CharField(blank=True, max_length=200)),
('os_parcel_identifier', models.CharField(blank=True, max_length=200)),
('os_assigned_role_data', models.CharField(blank=True, max_length=200)),
('os_payment_option_sold', models.CharField(blank=True, max_length=200)),
('os_assigned_role_email', models.CharField(blank=True, max_length=200)),
('os_priority', models.CharField(blank=True, max_length=200)),
('os_assigned_role_id', models.CharField(blank=True, max_length=200)),
('os_private_files_data', models.CharField(blank=True, max_length=200)),
('os_assigned_role_name', models.CharField(blank=True, max_length=200)),
('os_private_files', models.CharField(blank=True, max_length=200)),
('os_assigned_role_phone', models.CharField(blank=True, max_length=200)),
('os_proposal_message', models.CharField(blank=True, max_length=200)),
('os_assigned_role', models.CharField(blank=True, max_length=200)),
('os_proposal_content', models.CharField(blank=True, max_length=200)),
('os_assigned_role_accreditation', models.CharField(blank=True, max_length=200)),
('os_contract_terms', models.CharField(blank=True, max_length=200)),
('os_assigned_installer_role_data', models.CharField(blank=True, max_length=200)),
('os_proposal_template', models.CharField(blank=True, max_length=200)),
('os_assigned_installer_role', models.CharField(blank=True, max_length=200)),
('os_roof_type', models.CharField(blank=True, max_length=200)),
('os_assigned_site_inspector_role_data', models.CharField(blank=True, max_length=200)),
('os_serial_numbers_panels', models.CharField(blank=True, max_length=200)),
('os_assigned_site_inspector_role', models.CharField(blank=True, max_length=200)),
('os_serial_numbers_inverters', models.CharField(blank=True, max_length=200)),
('os_available_customer_actions', models.CharField(blank=True, max_length=200)),
('os_serial_numbers_batteries', models.CharField(blank=True, max_length=200)),
('os_business_identifier', models.CharField(blank=True, max_length=200)),
('os_simulate_first_year_only', models.CharField(blank=True, max_length=200)),
('os_business_name', models.CharField(blank=True, max_length=200)),
('os_site_notes', models.CharField(blank=True, max_length=200)),
('os_configuration_override', models.CharField(blank=True, max_length=200)),
('os_sold_date', models.CharField(blank=True, max_length=200)),
('os_configuration', models.CharField(blank=True, max_length=200)),
('os_stage', models.CharField(blank=True, max_length=200)),
('os_costing_override', models.CharField(blank=True, max_length=200)),
('os_stars', models.CharField(blank=True, max_length=200)),
('os_costing', models.CharField(blank=True, max_length=200)),
('os_contacts_data', models.CharField(blank=True, max_length=200)),
('os_systems', models.CharField(blank=True, max_length=200)),
('os_contacts', models.CharField(blank=True, max_length=200)),
('os_system_sold', models.CharField(blank=True, max_length=200)),
('os_contract_date', models.CharField(blank=True, max_length=200)),
('os_system_installed', models.CharField(blank=True, max_length=200)),
('os_contract', models.CharField(blank=True, max_length=200)),
('os_tags_data', models.CharField(blank=True, max_length=200)),
('os_tags', models.CharField(blank=True, max_length=200)),
('os_country_name', models.CharField(blank=True, max_length=200)),
('os_testimonials_data', models.CharField(blank=True, max_length=200)),
('os_country', models.CharField(blank=True, max_length=200)),
('os_testimonials', models.CharField(blank=True, max_length=200)),
('os_county', models.CharField(blank=True, max_length=200)),
('os_timezone_offset', models.CharField(blank=True, max_length=200)),
('os_created_date', models.CharField(blank=True, max_length=200)),
('os_title', models.CharField(blank=True, max_length=200)),
('os_events_data', models.CharField(blank=True, max_length=200)),
('os_transactions_data', models.CharField(blank=True, max_length=200)),
('os_greenlancer_project_id', models.CharField(blank=True, max_length=200)),
('os_url', models.CharField(blank=True, max_length=200)),
('os_id', models.CharField(blank=True, max_length=200)),
('os_usage_annual_or_guess', models.CharField(blank=True, max_length=200)),
('os_usage', models.CharField(blank=True, max_length=200)),
('os_utility_tariff_current_custom', models.CharField(blank=True, max_length=200)),
('os_is_pricing_locked', models.CharField(blank=True, max_length=200)),
('os_utility_tariff_current_data', models.CharField(blank=True, max_length=200)),
('os_installation_date', models.CharField(blank=True, max_length=200)),
('os_utility_tariff_current', models.CharField(blank=True, max_length=200)),
('os_language', models.CharField(blank=True, max_length=200)),
('os_utility_tariff_or_guess', models.CharField(blank=True, max_length=200)),
('os_language_override', models.CharField(blank=True, max_length=200)),
('os_utility_tariff_proposed_custom', models.CharField(blank=True, max_length=200)),
('os_last_calculation_error', models.CharField(blank=True, max_length=200)),
('os_utility_tariff_proposed_data', models.CharField(blank=True, max_length=200)),
('os_utility_tariff_proposed', models.CharField(blank=True, max_length=200)),
('os_utility_tariff_proposed_or_guess', models.CharField(blank=True, max_length=200)),
('os_valid_until_date', models.CharField(blank=True, max_length=200)),
('os_wind_region', models.CharField(blank=True, max_length=200)),
('os_meter_identifier', models.CharField(blank=True, max_length=200)),
('os_has_cellular_coverage', models.CharField(blank=True, max_length=200)),
('os_modified_date', models.CharField(blank=True, max_length=200)),
('os_power_factor', models.CharField(blank=True, max_length=200)),
('os_natron', models.CharField(blank=True, max_length=200)),
('os_years_to_simulate', models.CharField(blank=True, max_length=200)),
('os_customer_proposal_data', models.CharField(blank=True, max_length=200)),
('os_number_of_wires', models.CharField(blank=True, max_length=200)),
],
),
]
|
py | 7dfb4163a15494bd9049524939e5bbaab155b4d3 | # # Unity ML-Agents Toolkit
from typing import Dict, List
from collections import defaultdict
import abc
import time
from mlagents.trainers.optimizer.tf_optimizer import TFOptimizer
from mlagents.trainers.buffer import AgentBuffer
from mlagents.trainers.trainer import Trainer
from mlagents.trainers.components.reward_signals import RewardSignalResult
from mlagents_envs.timers import hierarchical_timer
from mlagents.trainers.agent_processor import AgentManagerQueue
from mlagents.trainers.trajectory import Trajectory
from mlagents.trainers.stats import StatsPropertyType
RewardSignalResults = Dict[str, RewardSignalResult]
class RLTrainer(Trainer): # pylint: disable=abstract-method
"""
This class is the base class for trainers that use Reward Signals.
"""
def __init__(self, *args, **kwargs):
super(RLTrainer, self).__init__(*args, **kwargs)
# collected_rewards is a dictionary from name of reward signal to a dictionary of agent_id to cumulative reward
# used for reporting only. We always want to report the environment reward to Tensorboard, regardless
# of what reward signals are actually present.
self.cumulative_returns_since_policy_update: List[float] = []
self.collected_rewards: Dict[str, Dict[str, int]] = {
"environment": defaultdict(lambda: 0)
}
self.update_buffer: AgentBuffer = AgentBuffer()
self._stats_reporter.add_property(
StatsPropertyType.HYPERPARAMETERS, self.trainer_settings.as_dict()
)
def end_episode(self) -> None:
"""
A signal that the Episode has ended. The buffer must be reset.
Get only called when the academy resets.
"""
for rewards in self.collected_rewards.values():
for agent_id in rewards:
rewards[agent_id] = 0
def _update_end_episode_stats(self, agent_id: str, optimizer: TFOptimizer) -> None:
for name, rewards in self.collected_rewards.items():
if name == "environment":
self.stats_reporter.add_stat(
"Environment/Cumulative Reward", rewards.get(agent_id, 0)
)
self.cumulative_returns_since_policy_update.append(
rewards.get(agent_id, 0)
)
self.reward_buffer.appendleft(rewards.get(agent_id, 0))
rewards[agent_id] = 0
else:
self.stats_reporter.add_stat(
optimizer.reward_signals[name].stat_name, rewards.get(agent_id, 0)
)
rewards[agent_id] = 0
def _clear_update_buffer(self) -> None:
"""
Clear the buffers that have been built up during inference.
"""
self.update_buffer.reset_agent()
@abc.abstractmethod
def _is_ready_update(self):
"""
Returns whether or not the trainer has enough elements to run update model
:return: A boolean corresponding to wether or not update_model() can be run
"""
return False
@abc.abstractmethod
def _update_policy(self) -> bool:
"""
Uses demonstration_buffer to update model.
:return: Whether or not the policy was updated.
"""
pass
def _increment_step(self, n_steps: int, name_behavior_id: str) -> None:
"""
Increment the step count of the trainer
:param n_steps: number of steps to increment the step count by
"""
self.step += n_steps
self.next_summary_step = self._get_next_summary_step()
p = self.get_policy(name_behavior_id)
if p:
p.increment_step(n_steps)
def _get_next_summary_step(self) -> int:
"""
Get the next step count that should result in a summary write.
"""
return self.step + (self.summary_freq - self.step % self.summary_freq)
def _write_summary(self, step: int) -> None:
"""
Saves training statistics to Tensorboard.
"""
self.stats_reporter.add_stat("Is Training", float(self.should_still_train))
self.stats_reporter.write_stats(int(step))
@abc.abstractmethod
def _process_trajectory(self, trajectory: Trajectory) -> None:
"""
Takes a trajectory and processes it, putting it into the update buffer.
:param trajectory: The Trajectory tuple containing the steps to be processed.
"""
self._maybe_write_summary(self.get_step + len(trajectory.steps))
self._increment_step(len(trajectory.steps), trajectory.behavior_id)
def _maybe_write_summary(self, step_after_process: int) -> None:
"""
If processing the trajectory will make the step exceed the next summary write,
write the summary. This logic ensures summaries are written on the update step and not in between.
:param step_after_process: the step count after processing the next trajectory.
"""
if step_after_process >= self.next_summary_step and self.get_step != 0:
self._write_summary(self.next_summary_step)
def advance(self) -> None:
"""
Steps the trainer, taking in trajectories and updates if ready.
Will block and wait briefly if there are no trajectories.
"""
with hierarchical_timer("process_trajectory"):
for traj_queue in self.trajectory_queues:
# We grab at most the maximum length of the queue.
# This ensures that even if the queue is being filled faster than it is
# being emptied, the trajectories in the queue are on-policy.
_queried = False
for _ in range(traj_queue.qsize()):
_queried = True
try:
t = traj_queue.get_nowait()
self._process_trajectory(t)
except AgentManagerQueue.Empty:
break
if self.threaded and not _queried:
# Yield thread to avoid busy-waiting
time.sleep(0.0001)
if self.should_still_train:
if self._is_ready_update():
with hierarchical_timer("_update_policy"):
if self._update_policy():
for q in self.policy_queues:
# Get policies that correspond to the policy queue in question
q.put(self.get_policy(q.behavior_id))
else:
self._clear_update_buffer()
|
py | 7dfb429b8376c158f1fd98f5449c80f0218e5708 | # -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
from distutils.version import LooseVersion # pylint: disable=import-error,no-name-in-module
import random
import string
# Import Salt Testing libs
from salttesting.unit import skipIf, TestCase
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, patch
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt libs
import salt.config
import salt.loader
import salt.utils.boto
# pylint: disable=import-error,unused-import
from unit.modules.boto_vpc_test import BotoVpcTestCaseMixin
# Import 3rd-party libs
try:
import boto
import boto3
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
from moto import mock_ec2
HAS_MOTO = True
except ImportError:
HAS_MOTO = False
def mock_ec2(self):
'''
if the mock_ec2 function is not available due to import failure
this replaces the decorated function with stub_function.
Allows boto_vpc unit tests to use the @mock_ec2 decorator
without a "NameError: name 'mock_ec2' is not defined" error.
'''
def stub_function(self):
pass
return stub_function
# pylint: enable=import-error,unused-import
# the boto_vpc module relies on the connect_to_region() method
# which was added in boto 2.8.0
# https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12
required_boto_version = '2.8.0'
region = 'us-east-1'
access_key = 'GKTADJGHEIQSXMKKRBJ08H'
secret_key = 'askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs'
conn_parameters = {'region': region, 'key': access_key, 'keyid': secret_key, 'profile': {}}
cidr_block = '10.0.0.0/24'
subnet_id = 'subnet-123456'
dhcp_options_parameters = {'domain_name': 'example.com', 'domain_name_servers': ['1.2.3.4'], 'ntp_servers': ['5.6.7.8'],
'netbios_name_servers': ['10.0.0.1'], 'netbios_node_type': 2}
network_acl_entry_parameters = ('fake', 100, -1, 'allow', cidr_block)
dhcp_options_parameters.update(conn_parameters)
opts = salt.config.DEFAULT_MINION_OPTS
ctx = {}
utils = salt.loader.utils(opts, context=ctx, whitelist=['boto', 'boto3'])
serializers = salt.loader.serializers(opts)
funcs = salt.loader.minion_mods(opts, context=ctx, utils=utils, whitelist=['boto_vpc'])
salt_states = salt.loader.states(opts=opts, functions=funcs, utils=utils, whitelist=['boto_vpc'], serializers=serializers)
salt.utils.boto.__salt__ = {}
def _has_required_boto():
'''
Returns True/False boolean depending on if Boto is installed and correct
version.
'''
if not HAS_BOTO:
return False
elif LooseVersion(boto.__version__) < LooseVersion(required_boto_version):
return False
else:
return True
class BotoVpcStateTestCaseBase(TestCase):
def setUp(self):
ctx.clear()
# connections keep getting cached from prior tests, can't find the
# correct context object to clear it. So randomize the cache key, to prevent any
# cache hits
conn_parameters['key'] = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(50))
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(HAS_MOTO is False, 'The moto module must be installed.')
@skipIf(_has_required_boto() is False, 'The boto module must be greater than'
' or equal to version {0}'
.format(required_boto_version))
class BotoVpcTestCase(BotoVpcStateTestCaseBase, BotoVpcTestCaseMixin):
'''
TestCase for salt.states.boto_vpc state.module
'''
@mock_ec2
def test_present_when_vpc_does_not_exist(self):
'''
Tests present on a VPC that does not exist.
'''
with patch.dict('salt.utils.boto.__salt__', funcs):
vpc_present_result = salt_states['boto_vpc.present']('test', cidr_block)
self.assertTrue(vpc_present_result['result'])
self.assertEqual(vpc_present_result['changes']['new']['vpc']['state'], 'available')
@mock_ec2
def test_present_when_vpc_exists(self):
vpc = self._create_vpc(name='test')
vpc_present_result = salt_states['boto_vpc.present']('test', cidr_block)
self.assertTrue(vpc_present_result['result'])
self.assertEqual(vpc_present_result['changes'], {})
@mock_ec2
@skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493')
def test_present_with_failure(self):
with patch('moto.ec2.models.VPCBackend.create_vpc', side_effect=BotoServerError(400, 'Mocked error')):
vpc_present_result = salt_states['boto_vpc.present']('test', cidr_block)
self.assertFalse(vpc_present_result['result'])
self.assertTrue('Mocked error' in vpc_present_result['comment'])
@mock_ec2
def test_absent_when_vpc_does_not_exist(self):
'''
Tests absent on a VPC that does not exist.
'''
with patch.dict('salt.utils.boto.__salt__', funcs):
vpc_absent_result = salt_states['boto_vpc.absent']('test')
self.assertTrue(vpc_absent_result['result'])
self.assertEqual(vpc_absent_result['changes'], {})
@mock_ec2
def test_absent_when_vpc_exists(self):
vpc = self._create_vpc(name='test')
with patch.dict('salt.utils.boto.__salt__', funcs):
vpc_absent_result = salt_states['boto_vpc.absent']('test')
self.assertTrue(vpc_absent_result['result'])
self.assertEqual(vpc_absent_result['changes']['new']['vpc'], None)
@mock_ec2
@skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493')
def test_absent_with_failure(self):
vpc = self._create_vpc(name='test')
with patch('moto.ec2.models.VPCBackend.delete_vpc', side_effect=BotoServerError(400, 'Mocked error')):
vpc_absent_result = salt_states['boto_vpc.absent']('test')
self.assertFalse(vpc_absent_result['result'])
self.assertTrue('Mocked error' in vpc_absent_result['comment'])
class BotoVpcResourceTestCaseMixin(BotoVpcTestCaseMixin):
resource_type = None
backend_create = None
backend_delete = None
extra_kwargs = {}
def _create_resource(self, vpc_id=None, name=None):
_create = getattr(self, '_create_' + self.resource_type)
_create(vpc_id=vpc_id, name=name, **self.extra_kwargs)
@mock_ec2
def test_present_when_resource_does_not_exist(self):
'''
Tests present on a resource that does not exist.
'''
vpc = self._create_vpc(name='test')
with patch.dict('salt.utils.boto.__salt__', funcs):
resource_present_result = salt_states['boto_vpc.{0}_present'.format(self.resource_type)](
name='test', vpc_name='test', **self.extra_kwargs)
self.assertTrue(resource_present_result['result'])
exists = funcs['boto_vpc.resource_exists'](self.resource_type, 'test').get('exists')
self.assertTrue(exists)
@mock_ec2
def test_present_when_resource_exists(self):
vpc = self._create_vpc(name='test')
resource = self._create_resource(vpc_id=vpc.id, name='test')
with patch.dict('salt.utils.boto.__salt__', funcs):
resource_present_result = salt_states['boto_vpc.{0}_present'.format(self.resource_type)](
name='test', vpc_name='test', **self.extra_kwargs)
self.assertTrue(resource_present_result['result'])
self.assertEqual(resource_present_result['changes'], {})
@mock_ec2
@skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493')
def test_present_with_failure(self):
vpc = self._create_vpc(name='test')
with patch('moto.ec2.models.{0}'.format(self.backend_create), side_effect=BotoServerError(400, 'Mocked error')):
resource_present_result = salt_states['boto_vpc.{0}_present'.format(self.resource_type)](
name='test', vpc_name='test', **self.extra_kwargs)
self.assertFalse(resource_present_result['result'])
self.assertTrue('Mocked error' in resource_present_result['comment'])
@mock_ec2
def test_absent_when_resource_does_not_exist(self):
'''
Tests absent on a resource that does not exist.
'''
with patch.dict('salt.utils.boto.__salt__', funcs):
resource_absent_result = salt_states['boto_vpc.{0}_absent'.format(self.resource_type)]('test')
self.assertTrue(resource_absent_result['result'])
self.assertEqual(resource_absent_result['changes'], {})
@mock_ec2
def test_absent_when_resource_exists(self):
vpc = self._create_vpc(name='test')
self._create_resource(vpc_id=vpc.id, name='test')
with patch.dict('salt.utils.boto.__salt__', funcs):
resource_absent_result = salt_states['boto_vpc.{0}_absent'.format(self.resource_type)]('test')
self.assertTrue(resource_absent_result['result'])
self.assertEqual(resource_absent_result['changes']['new'][self.resource_type], None)
exists = funcs['boto_vpc.resource_exists'](self.resource_type, 'test').get('exists')
self.assertFalse(exists)
@mock_ec2
@skipIf(True, 'Disabled pending https://github.com/spulec/moto/issues/493')
def test_absent_with_failure(self):
vpc = self._create_vpc(name='test')
self._create_resource(vpc_id=vpc.id, name='test')
with patch('moto.ec2.models.{0}'.format(self.backend_delete), side_effect=BotoServerError(400, 'Mocked error')):
resource_absent_result = salt_states['boto_vpc.{0}_absent'.format(self.resource_type)]('test')
self.assertFalse(resource_absent_result['result'])
self.assertTrue('Mocked error' in resource_absent_result['comment'])
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(HAS_MOTO is False, 'The moto module must be installed.')
@skipIf(_has_required_boto() is False, 'The boto module must be greater than'
' or equal to version {0}'
.format(required_boto_version))
class BotoVpcSubnetsTestCase(BotoVpcStateTestCaseBase, BotoVpcResourceTestCaseMixin):
resource_type = 'subnet'
backend_create = 'SubnetBackend.create_subnet'
backend_delete = 'SubnetBackend.delete_subnet'
extra_kwargs = {'cidr_block': cidr_block}
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(HAS_MOTO is False, 'The moto module must be installed.')
@skipIf(_has_required_boto() is False, 'The boto module must be greater than'
' or equal to version {0}'
.format(required_boto_version))
class BotoVpcInternetGatewayTestCase(BotoVpcStateTestCaseBase, BotoVpcResourceTestCaseMixin):
resource_type = 'internet_gateway'
backend_create = 'InternetGatewayBackend.create_internet_gateway'
backend_delete = 'InternetGatewayBackend.delete_internet_gateway'
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(HAS_MOTO is False, 'The moto module must be installed.')
@skipIf(_has_required_boto() is False, 'The boto module must be greater than'
' or equal to version {0}'
.format(required_boto_version))
class BotoVpcRouteTableTestCase(BotoVpcStateTestCaseBase, BotoVpcResourceTestCaseMixin):
resource_type = 'route_table'
backend_create = 'RouteTableBackend.create_route_table'
backend_delete = 'RouteTableBackend.delete_route_table'
@mock_ec2
def test_present_with_subnets(self):
vpc = self._create_vpc(name='test')
subnet1 = self._create_subnet(vpc_id=vpc.id, name='test1')
subnet2 = self._create_subnet(vpc_id=vpc.id, name='test2')
route_table_present_result = salt_states['boto_vpc.route_table_present'](
name='test', vpc_name='test', subnet_names=['test1'], subnet_ids=[subnet2.id])
associations = route_table_present_result['changes']['new']['subnets_associations']
assoc_subnets = [x['subnet_id'] for x in associations]
self.assertEqual(set(assoc_subnets), set([subnet1.id, subnet2.id]))
route_table_present_result = salt_states['boto_vpc.route_table_present'](
name='test', vpc_name='test', subnet_ids=[subnet2.id])
changes = route_table_present_result['changes']
old_subnets = [x['subnet_id'] for x in changes['old']['subnets_associations']]
self.assertEqual(set(assoc_subnets), set(old_subnets))
new_subnets = changes['new']['subnets_associations']
self.assertEqual(new_subnets[0]['subnet_id'], subnet2.id)
@mock_ec2
def test_present_with_routes(self):
vpc = self._create_vpc(name='test')
igw = self._create_internet_gateway(name='test', vpc_id=vpc.id)
route_table_present_result = salt_states['boto_vpc.route_table_present'](
name='test', vpc_name='test', routes=[{'destination_cidr_block': '0.0.0.0/0',
'gateway_id': igw.id},
{'destination_cidr_block': '10.0.0.0/24',
'gateway_id': 'local'}])
routes = [x['gateway_id'] for x in route_table_present_result['changes']['new']['routes']]
self.assertEqual(set(routes), set(['local', igw.id]))
route_table_present_result = salt_states['boto_vpc.route_table_present'](
name='test', vpc_name='test', routes=[{'destination_cidr_block': '10.0.0.0/24',
'gateway_id': 'local'}])
changes = route_table_present_result['changes']
old_routes = [x['gateway_id'] for x in changes['old']['routes']]
self.assertEqual(set(routes), set(old_routes))
self.assertEqual(changes['new']['routes'][0]['gateway_id'], 'local')
|
py | 7dfb42ac1ba4ac510c82ef525a729d029c881693 | from typing import List, Tuple
from drkns.stepexecutionstatus.StepExecutionStatus import StepExecutionStatus
from drkns.stepexecutionstatus.get_status_message_and_output_from_status import\
get_status_message_and_output_from_status
def get_successful_flag_and_combined_output(
execution_history: List[StepExecutionStatus],
summary: bool = False,
limit_output: bool = False
) -> Tuple[bool, List[str]]:
outputs = []
statuses = []
successful = True
for status in execution_history:
message, output = get_status_message_and_output_from_status(status)
statuses.append(message)
if status.ignored or not status.successful:
successful = False
active_output = not status.ignored and not status.restored
append_output = not limit_output or active_output
if output is not None and append_output:
outputs.append(output)
if summary:
return successful, statuses
combined_output = []
if len(outputs) > 0:
combined_output += outputs + ['\n']
combined_output += statuses
return successful, combined_output
|
py | 7dfb43e0901fa75b6e7d430da1aaf4cd822cdd37 | """
WSGI config for django_logger_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_logger_project.settings')
application = get_wsgi_application()
|
py | 7dfb45b17a0988bb806c2ab75a77d7c5c84f59c7 | from .interaction_dataset_reader import BarkStateFromMotionState
from .interaction_dataset_reader import TrajectoryFromTrack
from .interaction_dataset_reader import ShapeFromTrack
from .interaction_dataset_reader import InitStateFromTrack
from .interaction_dataset_reader import GoalDefinitionFromTrack
from .interaction_dataset_reader import BehaviorFromTrack
from .interaction_dataset_reader import InteractionDatasetReader
from .dataset_decomposer import DatasetDecomposer
__all__ = ["DatasetDecomposer",
"InteractionDatasetReader",
"BarkStateFromMotionState",
"TrajectoryFromTrack",
"ShapeFromTrack",
"InitStateFromTrack",
"GoalDefinitionFromTrack",
"BehaviorFromTrack"]
|
py | 7dfb45bd0dd43f4419307a9955aa671fb26d70c1 | #
# PySNMP MIB module CISCO-TRUSTSEC-INTERFACE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-TRUSTSEC-INTERFACE-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:57:59 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
CtsSecurityGroupTag, = mibBuilder.importSymbols("CISCO-TRUSTSEC-TC-MIB", "CtsSecurityGroupTag")
ifIndex, ifName = mibBuilder.importSymbols("IF-MIB", "ifIndex", "ifName")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
TimeTicks, Unsigned32, Gauge32, ObjectIdentity, NotificationType, Bits, Counter64, IpAddress, Integer32, iso, MibIdentifier, ModuleIdentity, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "Unsigned32", "Gauge32", "ObjectIdentity", "NotificationType", "Bits", "Counter64", "IpAddress", "Integer32", "iso", "MibIdentifier", "ModuleIdentity", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
DateAndTime, StorageType, RowStatus, TruthValue, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DateAndTime", "StorageType", "RowStatus", "TruthValue", "DisplayString", "TextualConvention")
ciscoTrustSecIfMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 740))
ciscoTrustSecIfMIB.setRevisions(('2014-01-28 00:00', '2012-04-06 00:00', '2010-05-28 00:00',))
if mibBuilder.loadTexts: ciscoTrustSecIfMIB.setLastUpdated('201401280000Z')
if mibBuilder.loadTexts: ciscoTrustSecIfMIB.setOrganization('Cisco Systems, Inc.')
class CtsiCasheDataSource(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("unknown", 1), ("acs", 2), ("dram", 3), ("nvram", 4), ("all", 5))
class CtsSapNegMode(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("encapNoAuthenNoEncrypt", 1), ("gcmAuthenNoEncrypt", 2), ("gcmAuthenGcmEncrypt", 3), ("noEncap", 4))
class CtsSapNegModeList(TextualConvention, OctetString):
status = 'current'
class CtsiInterfaceControllerState(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
namedValues = NamedValues(("unknown", 1), ("initialize", 2), ("authenticating", 3), ("authorizing", 4), ("sapNegotiating", 5), ("open", 6), ("held", 7), ("disconnecting", 8), ("invalid", 9), ("licenseError", 10))
ciscoTrustSecIfMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 740, 0))
ciscoTrustSecIfMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 740, 1))
ciscoTrustSecIfMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 740, 2))
ctsiIfConfigObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 1))
ctsiIfDot1xObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 2))
ctsiIfManualObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 3))
ctsiIfL3ForwardObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 4))
ctsiIfStatusObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 5))
ctsiIfStatsObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 6))
ctsiAuthorizationObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 7))
ctsiIfcStatsObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 8))
ctsiEventsStatsObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 9))
ctsiIfModeStatsObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 10))
ctsiIfNotifsControlObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 11))
ctsiIfNotifsOnlyInfoObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 12))
ctsiIfConfigTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 1, 1), )
if mibBuilder.loadTexts: ctsiIfConfigTable.setStatus('current')
ctsiIfConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: ctsiIfConfigEntry.setStatus('current')
ctsiIfModeCapability = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 1, 1, 1, 1), Bits().clone(namedValues=NamedValues(("dot1x", 0), ("manual", 1), ("l3Forward", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfModeCapability.setStatus('current')
ctsiIfConfiguredMode = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unknown", 1), ("none", 2), ("dot1x", 3), ("manual", 4), ("l3Forward", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfConfiguredMode.setStatus('current')
ctsiIfCacheClear = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 1, 1, 1, 3), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctsiIfCacheClear.setStatus('current')
ctsiIfRekey = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 1, 1, 1, 4), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctsiIfRekey.setStatus('current')
ctsiIfDot1xTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 2, 1), )
if mibBuilder.loadTexts: ctsiIfDot1xTable.setStatus('current')
ctsiIfDot1xEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 2, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: ctsiIfDot1xEntry.setStatus('current')
ctsiIfDot1xSgtPropagateEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 2, 1, 1, 1), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctsiIfDot1xSgtPropagateEnabled.setStatus('current')
ctsiIfDot1xReauthInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 2, 1, 1, 2), Integer32().clone(86400)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctsiIfDot1xReauthInterval.setStatus('current')
ctsiIfDot1xSapModeList = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 2, 1, 1, 3), CtsSapNegModeList().clone(hexValue="04000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctsiIfDot1xSapModeList.setStatus('current')
ctsiIfDot1xDownloadReauthInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfDot1xDownloadReauthInterval.setStatus('current')
ctsiIfDot1xOperReauthInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfDot1xOperReauthInterval.setStatus('current')
ctsiIfDot1xReauthTimeLeft = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfDot1xReauthTimeLeft.setStatus('current')
ctsiIfDot1xStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 2, 1, 1, 7), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctsiIfDot1xStorageType.setStatus('current')
ctsiIfDot1xRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 2, 1, 1, 8), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctsiIfDot1xRowStatus.setStatus('current')
ctsiIfManualTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 3, 1), )
if mibBuilder.loadTexts: ctsiIfManualTable.setStatus('current')
ctsiIfManualEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 3, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: ctsiIfManualEntry.setStatus('current')
ctsiIfManualDynamicPeerId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 3, 1, 1, 1), SnmpAdminString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctsiIfManualDynamicPeerId.setStatus('current')
ctsiIfManualStaticSgt = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 3, 1, 1, 2), CtsSecurityGroupTag()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctsiIfManualStaticSgt.setStatus('current')
ctsiIfManualStaticSgtTrusted = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 3, 1, 1, 3), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctsiIfManualStaticSgtTrusted.setStatus('current')
ctsiIfManualSgtPropagateEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 3, 1, 1, 4), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctsiIfManualSgtPropagateEnabled.setStatus('current')
ctsiIfManualSapPmk = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 3, 1, 1, 5), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(32, 32), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctsiIfManualSapPmk.setStatus('current')
ctsiIfManualSapModeList = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 3, 1, 1, 6), CtsSapNegModeList()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctsiIfManualSapModeList.setStatus('current')
ctsiIfManualStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 3, 1, 1, 7), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctsiIfManualStorageType.setStatus('current')
ctsiIfManualRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 3, 1, 1, 8), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctsiIfManualRowStatus.setStatus('current')
ctsiIfL3ForwardTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 4, 1), )
if mibBuilder.loadTexts: ctsiIfL3ForwardTable.setStatus('current')
ctsiIfL3ForwardEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 4, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: ctsiIfL3ForwardEntry.setStatus('current')
ctsiIfL3ForwardMode = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 4, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("l3Ipv4Forward", 1), ("l3Ipv6Forward", 2), ("l3IpForward", 3))).clone('l3Ipv4Forward')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctsiIfL3ForwardMode.setStatus('current')
ctsiIfL3ForwardStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 4, 1, 1, 2), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctsiIfL3ForwardStorageType.setStatus('current')
ctsiIfL3ForwardRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 4, 1, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ctsiIfL3ForwardRowStatus.setStatus('current')
ctsiIfStatusTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 5, 1), )
if mibBuilder.loadTexts: ctsiIfStatusTable.setStatus('current')
ctsiIfStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 5, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: ctsiIfStatusEntry.setStatus('current')
ctsiIfControllerState = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 5, 1, 1, 1), CtsiInterfaceControllerState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfControllerState.setStatus('current')
ctsiIfAuthenticationStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 5, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("unknown", 1), ("succeeded", 2), ("rejected", 3), ("logOff", 4), ("noRespond", 5), ("notApplicable", 6), ("incomplete", 7), ("failed", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfAuthenticationStatus.setStatus('current')
ctsiIfPeerId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 5, 1, 1, 3), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfPeerId.setStatus('current')
ctsiIfPeerAdvCapability = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 5, 1, 1, 4), Bits().clone(namedValues=NamedValues(("sap", 0)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfPeerAdvCapability.setStatus('current')
ctsiIfAuthorizationStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 5, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("unknown", 1), ("inProgress", 2), ("succeeded", 3), ("failed", 4), ("fallBackPolicy", 5), ("incomplete", 6), ("peerSucceeded", 7), ("rbaclSucceeded", 8), ("policySucceeded", 9)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfAuthorizationStatus.setStatus('current')
ctsiIfPeerSgt = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 5, 1, 1, 6), CtsSecurityGroupTag()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfPeerSgt.setStatus('current')
ctsiIfPeerSgtTrusted = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 5, 1, 1, 7), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfPeerSgtTrusted.setStatus('current')
ctsiIfSapNegotiationStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 5, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("notApplicable", 1), ("unknown", 2), ("inProgress", 3), ("succeeded", 4), ("failed", 5), ("licenseError", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfSapNegotiationStatus.setStatus('current')
ctsiIfSapNegModeList = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 5, 1, 1, 9), CtsSapNegModeList()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfSapNegModeList.setStatus('current')
ctsiIfCacheExpirationTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 5, 1, 1, 10), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfCacheExpirationTime.setStatus('current')
ctsiIfCacheDataSource = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 5, 1, 1, 11), CtsiCasheDataSource()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfCacheDataSource.setStatus('current')
ctsiIfCriticalAuthStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 5, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("disable", 1), ("cache", 2), ("default", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfCriticalAuthStatus.setStatus('current')
ctsiIfStatsTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 6, 1), )
if mibBuilder.loadTexts: ctsiIfStatsTable.setStatus('current')
ctsiIfStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 6, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: ctsiIfStatsEntry.setStatus('current')
ctsiIfAuthenticationSuccess = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 6, 1, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfAuthenticationSuccess.setStatus('current')
ctsiIfAuthenticationReject = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 6, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfAuthenticationReject.setStatus('current')
ctsiIfAuthenticationFailure = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 6, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfAuthenticationFailure.setStatus('current')
ctsiIfAuthenticationNoResponse = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 6, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfAuthenticationNoResponse.setStatus('current')
ctsiIfAuthenticationLogoff = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 6, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfAuthenticationLogoff.setStatus('current')
ctsiIfAuthorizationSuccess = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 6, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfAuthorizationSuccess.setStatus('current')
ctsiIfAuthorizationPolicyFail = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 6, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfAuthorizationPolicyFail.setStatus('current')
ctsiIfAuthorizationFail = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 6, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfAuthorizationFail.setStatus('current')
ctsiIfSapSuccess = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 6, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfSapSuccess.setStatus('current')
ctsiIfSapFail = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 6, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfSapFail.setStatus('current')
ctsiAuthorizationTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 7, 1), )
if mibBuilder.loadTexts: ctsiAuthorizationTable.setStatus('current')
ctsiAuthorizationEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 7, 1, 1), ).setIndexNames((1, "CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiAuthorizationPeerId"))
if mibBuilder.loadTexts: ctsiAuthorizationEntry.setStatus('current')
ctsiAuthorizationPeerId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 7, 1, 1, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 64)))
if mibBuilder.loadTexts: ctsiAuthorizationPeerId.setStatus('current')
ctsiAuthorizationPeerSgt = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 7, 1, 1, 2), CtsSecurityGroupTag()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiAuthorizationPeerSgt.setStatus('current')
ctsiAuthorizationState = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 7, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("unknown", 1), ("start", 2), ("waitingRespond", 3), ("assessing", 4), ("complete", 5), ("failure", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiAuthorizationState.setStatus('current')
ctsiAuthorizationLastRefresh = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 7, 1, 1, 4), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiAuthorizationLastRefresh.setStatus('current')
ctsiAuthorizationTimeLeft = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 7, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiAuthorizationTimeLeft.setStatus('current')
ctsiAuthorizationTimeToRefresh = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 7, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647))).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiAuthorizationTimeToRefresh.setStatus('current')
ctsiAuthorizationCacheDataSource = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 7, 1, 1, 7), CtsiCasheDataSource()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiAuthorizationCacheDataSource.setStatus('current')
ctsiAuthorizationStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 7, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("unknown", 1), ("inProgress", 2), ("succeeded", 3), ("failed", 4), ("fallbackPolicy", 5), ("incomplete", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiAuthorizationStatus.setStatus('current')
ctsiIfcStatsTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 8, 1), )
if mibBuilder.loadTexts: ctsiIfcStatsTable.setStatus('current')
ctsiIfcStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 8, 1, 1), ).setIndexNames((0, "CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfcState"))
if mibBuilder.loadTexts: ctsiIfcStatsEntry.setStatus('current')
ctsiIfcState = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 8, 1, 1, 1), CtsiInterfaceControllerState())
if mibBuilder.loadTexts: ctsiIfcState.setStatus('current')
ctsiIfcStatsIfCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 8, 1, 1, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiIfcStatsIfCount.setStatus('current')
ctsiAuthenticationSuccess = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 9, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiAuthenticationSuccess.setStatus('current')
ctsiAuthenticationReject = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 9, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiAuthenticationReject.setStatus('current')
ctsiAuthenticationFailure = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 9, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiAuthenticationFailure.setStatus('current')
ctsiAuthenticationLogoff = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 9, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiAuthenticationLogoff.setStatus('current')
ctsiAuthenticationNoRespond = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 9, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiAuthenticationNoRespond.setStatus('current')
ctsiAuthorizationSuccess = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 9, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiAuthorizationSuccess.setStatus('current')
ctsiAuthorizationFailure = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 9, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiAuthorizationFailure.setStatus('current')
ctsiAuthorizationPolicyFailure = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 9, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiAuthorizationPolicyFailure.setStatus('current')
ctsiSapNegotiationSuccess = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 9, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiSapNegotiationSuccess.setStatus('current')
ctsiSapNegotiationFailure = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 9, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiSapNegotiationFailure.setStatus('current')
ctsiInDot1xModeIfCount = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 10, 1), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiInDot1xModeIfCount.setStatus('current')
ctsiInManualModeIfCount = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 10, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiInManualModeIfCount.setStatus('current')
ctsiInL3ForwardModeIfCount = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 10, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ctsiInL3ForwardModeIfCount.setStatus('current')
ctsiAuthorizationFailNotifEnable = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 11, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctsiAuthorizationFailNotifEnable.setStatus('current')
ctsiIfAddSupplicantFailNotifEnable = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 11, 2), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctsiIfAddSupplicantFailNotifEnable.setStatus('current')
ctsiIfAuthenticationFailNotifEnable = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 11, 3), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctsiIfAuthenticationFailNotifEnable.setStatus('current')
ctsiIfSapNegotiationFailNotifEnable = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 11, 4), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctsiIfSapNegotiationFailNotifEnable.setStatus('current')
ctsiIfUnauthorizedNotifEnable = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 11, 5), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ctsiIfUnauthorizedNotifEnable.setStatus('current')
ctsiIfNotifMessage = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 12, 1), SnmpAdminString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: ctsiIfNotifMessage.setStatus('current')
ctsiIfDot1xPaeRole = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 740, 1, 12, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("notApplicable", 1), ("authenticator", 2), ("supplicant", 3)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: ctsiIfDot1xPaeRole.setStatus('current')
ctsiAuthorizationFailNotif = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 740, 0, 1)).setObjects(("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiAuthorizationPeerSgt"))
if mibBuilder.loadTexts: ctsiAuthorizationFailNotif.setStatus('current')
ctsiIfAddSupplicantFailNotif = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 740, 0, 2)).setObjects(("IF-MIB", "ifName"))
if mibBuilder.loadTexts: ctsiIfAddSupplicantFailNotif.setStatus('current')
ctsiIfAuthenticationFailNotif = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 740, 0, 3)).setObjects(("IF-MIB", "ifName"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfPeerId"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfDot1xPaeRole"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfAuthenticationStatus"))
if mibBuilder.loadTexts: ctsiIfAuthenticationFailNotif.setStatus('current')
ctsiIfSapNegotiationFailNotif = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 740, 0, 4)).setObjects(("IF-MIB", "ifName"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfNotifMessage"))
if mibBuilder.loadTexts: ctsiIfSapNegotiationFailNotif.setStatus('current')
ctsiIfUnauthorizedNotif = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 740, 0, 5)).setObjects(("IF-MIB", "ifName"))
if mibBuilder.loadTexts: ctsiIfUnauthorizedNotif.setStatus('current')
ciscoTrustSecIfMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 740, 2, 1))
ciscoTrustSecIfMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 740, 2, 2))
ciscoTrustSecIfMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 740, 2, 1, 1)).setObjects(("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBIfConfigGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBDot1xGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBManualGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBL3ForwardGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBStatusGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBStatisticGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBAuthorizationGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBIfcStatisticGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBEventStatisticGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBIfModeStatisticGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecIfMIBCompliance = ciscoTrustSecIfMIBCompliance.setStatus('deprecated')
ciscoTrustSecIfMIBCompliance2 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 740, 2, 1, 2)).setObjects(("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBIfConfigGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBDot1xGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBManualGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBL3ForwardGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBStatusGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBStatisticGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBAuthorizationGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBIfcStatisticGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBEventStatisticGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBIfModeStatisticGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBNotifsCtrlGrp"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBNotifsOnlyInfoGrp"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBNotifsGrp"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecIfMIBCompliance2 = ciscoTrustSecIfMIBCompliance2.setStatus('deprecated')
ciscoTrustSecIfMIBCompliance3 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 740, 2, 1, 3)).setObjects(("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBIfConfigGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBDot1xGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBManualGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBL3ForwardGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBStatusGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBStatisticGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBAuthorizationGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBIfcStatisticGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBEventStatisticGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBIfModeStatisticGroup"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBNotifsCtrlGrp"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBNotifsOnlyInfoGrp"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBNotifsGrp"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ciscoTrustSecIfMIBCriticalAuthStatusGrp"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecIfMIBCompliance3 = ciscoTrustSecIfMIBCompliance3.setStatus('current')
ciscoTrustSecIfMIBIfConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 740, 2, 2, 1)).setObjects(("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfModeCapability"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfConfiguredMode"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfCacheClear"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfRekey"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecIfMIBIfConfigGroup = ciscoTrustSecIfMIBIfConfigGroup.setStatus('current')
ciscoTrustSecIfMIBDot1xGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 740, 2, 2, 2)).setObjects(("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfDot1xSgtPropagateEnabled"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfDot1xReauthInterval"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfDot1xSapModeList"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfDot1xDownloadReauthInterval"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfDot1xOperReauthInterval"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfDot1xReauthTimeLeft"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfDot1xStorageType"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfDot1xRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecIfMIBDot1xGroup = ciscoTrustSecIfMIBDot1xGroup.setStatus('current')
ciscoTrustSecIfMIBManualGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 740, 2, 2, 3)).setObjects(("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfManualDynamicPeerId"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfManualStaticSgt"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfManualStaticSgtTrusted"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfManualSgtPropagateEnabled"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfManualSapPmk"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfManualSapModeList"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfManualStorageType"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfManualRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecIfMIBManualGroup = ciscoTrustSecIfMIBManualGroup.setStatus('current')
ciscoTrustSecIfMIBL3ForwardGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 740, 2, 2, 4)).setObjects(("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfL3ForwardMode"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfL3ForwardStorageType"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfL3ForwardRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecIfMIBL3ForwardGroup = ciscoTrustSecIfMIBL3ForwardGroup.setStatus('current')
ciscoTrustSecIfMIBStatusGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 740, 2, 2, 5)).setObjects(("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfControllerState"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfAuthenticationStatus"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfPeerId"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfPeerAdvCapability"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfAuthorizationStatus"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfPeerSgt"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfPeerSgtTrusted"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfCacheExpirationTime"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfCacheDataSource"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfSapNegotiationStatus"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfSapNegModeList"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecIfMIBStatusGroup = ciscoTrustSecIfMIBStatusGroup.setStatus('current')
ciscoTrustSecIfMIBStatisticGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 740, 2, 2, 6)).setObjects(("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfAuthenticationSuccess"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfAuthenticationReject"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfAuthenticationFailure"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfAuthenticationNoResponse"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfAuthenticationLogoff"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfAuthorizationSuccess"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfAuthorizationPolicyFail"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfAuthorizationFail"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfSapSuccess"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfSapFail"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecIfMIBStatisticGroup = ciscoTrustSecIfMIBStatisticGroup.setStatus('current')
ciscoTrustSecIfMIBAuthorizationGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 740, 2, 2, 7)).setObjects(("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiAuthorizationPeerSgt"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiAuthorizationState"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiAuthorizationLastRefresh"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiAuthorizationTimeLeft"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiAuthorizationTimeToRefresh"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiAuthorizationCacheDataSource"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiAuthorizationStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecIfMIBAuthorizationGroup = ciscoTrustSecIfMIBAuthorizationGroup.setStatus('current')
ciscoTrustSecIfMIBIfcStatisticGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 740, 2, 2, 8)).setObjects(("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfcStatsIfCount"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecIfMIBIfcStatisticGroup = ciscoTrustSecIfMIBIfcStatisticGroup.setStatus('current')
ciscoTrustSecIfMIBEventStatisticGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 740, 2, 2, 9)).setObjects(("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiAuthenticationSuccess"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiAuthenticationReject"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiAuthenticationFailure"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiAuthenticationLogoff"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiAuthenticationNoRespond"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiAuthorizationSuccess"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiAuthorizationFailure"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiAuthorizationPolicyFailure"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiSapNegotiationSuccess"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiSapNegotiationFailure"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecIfMIBEventStatisticGroup = ciscoTrustSecIfMIBEventStatisticGroup.setStatus('current')
ciscoTrustSecIfMIBIfModeStatisticGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 740, 2, 2, 10)).setObjects(("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiInDot1xModeIfCount"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiInManualModeIfCount"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiInL3ForwardModeIfCount"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecIfMIBIfModeStatisticGroup = ciscoTrustSecIfMIBIfModeStatisticGroup.setStatus('current')
ciscoTrustSecIfMIBNotifsCtrlGrp = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 740, 2, 2, 11)).setObjects(("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiAuthorizationFailNotifEnable"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfAddSupplicantFailNotifEnable"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfAuthenticationFailNotifEnable"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfSapNegotiationFailNotifEnable"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfUnauthorizedNotifEnable"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecIfMIBNotifsCtrlGrp = ciscoTrustSecIfMIBNotifsCtrlGrp.setStatus('current')
ciscoTrustSecIfMIBNotifsOnlyInfoGrp = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 740, 2, 2, 12)).setObjects(("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfNotifMessage"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfDot1xPaeRole"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecIfMIBNotifsOnlyInfoGrp = ciscoTrustSecIfMIBNotifsOnlyInfoGrp.setStatus('current')
ciscoTrustSecIfMIBNotifsGrp = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 9, 740, 2, 2, 13)).setObjects(("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiAuthorizationFailNotif"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfAddSupplicantFailNotif"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfAuthenticationFailNotif"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfSapNegotiationFailNotif"), ("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfUnauthorizedNotif"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecIfMIBNotifsGrp = ciscoTrustSecIfMIBNotifsGrp.setStatus('current')
ciscoTrustSecIfMIBCriticalAuthStatusGrp = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 740, 2, 2, 14)).setObjects(("CISCO-TRUSTSEC-INTERFACE-MIB", "ctsiIfCriticalAuthStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoTrustSecIfMIBCriticalAuthStatusGrp = ciscoTrustSecIfMIBCriticalAuthStatusGrp.setStatus('current')
mibBuilder.exportSymbols("CISCO-TRUSTSEC-INTERFACE-MIB", ctsiIfDot1xStorageType=ctsiIfDot1xStorageType, ctsiIfModeCapability=ctsiIfModeCapability, ctsiIfManualSgtPropagateEnabled=ctsiIfManualSgtPropagateEnabled, ciscoTrustSecIfMIBDot1xGroup=ciscoTrustSecIfMIBDot1xGroup, CtsiCasheDataSource=CtsiCasheDataSource, ctsiSapNegotiationSuccess=ctsiSapNegotiationSuccess, ctsiIfAuthorizationFail=ctsiIfAuthorizationFail, ctsiIfModeStatsObjects=ctsiIfModeStatsObjects, ctsiInDot1xModeIfCount=ctsiInDot1xModeIfCount, ctsiAuthorizationCacheDataSource=ctsiAuthorizationCacheDataSource, ctsiIfManualRowStatus=ctsiIfManualRowStatus, ctsiIfPeerSgtTrusted=ctsiIfPeerSgtTrusted, ciscoTrustSecIfMIBManualGroup=ciscoTrustSecIfMIBManualGroup, ctsiAuthenticationReject=ctsiAuthenticationReject, ctsiIfSapNegotiationStatus=ctsiIfSapNegotiationStatus, ctsiIfConfigTable=ctsiIfConfigTable, ctsiIfAuthorizationStatus=ctsiIfAuthorizationStatus, ctsiIfAddSupplicantFailNotif=ctsiIfAddSupplicantFailNotif, ctsiIfManualObjects=ctsiIfManualObjects, ctsiIfL3ForwardMode=ctsiIfL3ForwardMode, ctsiIfAuthorizationPolicyFail=ctsiIfAuthorizationPolicyFail, ctsiAuthorizationObjects=ctsiAuthorizationObjects, ctsiIfUnauthorizedNotif=ctsiIfUnauthorizedNotif, ctsiIfcStatsTable=ctsiIfcStatsTable, ctsiIfManualSapPmk=ctsiIfManualSapPmk, ctsiAuthorizationTimeToRefresh=ctsiAuthorizationTimeToRefresh, ctsiIfAuthenticationStatus=ctsiIfAuthenticationStatus, ctsiIfConfigEntry=ctsiIfConfigEntry, ctsiIfStatusObjects=ctsiIfStatusObjects, ctsiIfCacheDataSource=ctsiIfCacheDataSource, ctsiIfConfiguredMode=ctsiIfConfiguredMode, ctsiIfDot1xSapModeList=ctsiIfDot1xSapModeList, ctsiIfNotifsControlObjects=ctsiIfNotifsControlObjects, ctsiAuthorizationState=ctsiAuthorizationState, ctsiIfPeerSgt=ctsiIfPeerSgt, ctsiIfDot1xSgtPropagateEnabled=ctsiIfDot1xSgtPropagateEnabled, CtsiInterfaceControllerState=CtsiInterfaceControllerState, ctsiIfRekey=ctsiIfRekey, ctsiIfManualStaticSgtTrusted=ctsiIfManualStaticSgtTrusted, ctsiIfPeerId=ctsiIfPeerId, ctsiIfSapFail=ctsiIfSapFail, ctsiAuthenticationNoRespond=ctsiAuthenticationNoRespond, ctsiIfAuthenticationFailNotif=ctsiIfAuthenticationFailNotif, ctsiIfcStatsEntry=ctsiIfcStatsEntry, PYSNMP_MODULE_ID=ciscoTrustSecIfMIB, ctsiInManualModeIfCount=ctsiInManualModeIfCount, ctsiAuthenticationLogoff=ctsiAuthenticationLogoff, ctsiIfL3ForwardRowStatus=ctsiIfL3ForwardRowStatus, ctsiIfAuthenticationFailure=ctsiIfAuthenticationFailure, ctsiIfDot1xPaeRole=ctsiIfDot1xPaeRole, ctsiAuthorizationPolicyFailure=ctsiAuthorizationPolicyFailure, ctsiAuthenticationSuccess=ctsiAuthenticationSuccess, ctsiIfDot1xObjects=ctsiIfDot1xObjects, ciscoTrustSecIfMIBNotifsCtrlGrp=ciscoTrustSecIfMIBNotifsCtrlGrp, ctsiIfAuthorizationSuccess=ctsiIfAuthorizationSuccess, ctsiIfStatusEntry=ctsiIfStatusEntry, ctsiIfL3ForwardEntry=ctsiIfL3ForwardEntry, ctsiInL3ForwardModeIfCount=ctsiInL3ForwardModeIfCount, ctsiIfNotifsOnlyInfoObjects=ctsiIfNotifsOnlyInfoObjects, ctsiIfAddSupplicantFailNotifEnable=ctsiIfAddSupplicantFailNotifEnable, ctsiIfAuthenticationLogoff=ctsiIfAuthenticationLogoff, ciscoTrustSecIfMIBCompliances=ciscoTrustSecIfMIBCompliances, ctsiIfL3ForwardStorageType=ctsiIfL3ForwardStorageType, ctsiIfManualEntry=ctsiIfManualEntry, ciscoTrustSecIfMIBIfConfigGroup=ciscoTrustSecIfMIBIfConfigGroup, ciscoTrustSecIfMIBEventStatisticGroup=ciscoTrustSecIfMIBEventStatisticGroup, ctsiAuthorizationEntry=ctsiAuthorizationEntry, ctsiAuthorizationLastRefresh=ctsiAuthorizationLastRefresh, ctsiIfManualStaticSgt=ctsiIfManualStaticSgt, ctsiIfSapNegModeList=ctsiIfSapNegModeList, ciscoTrustSecIfMIBStatusGroup=ciscoTrustSecIfMIBStatusGroup, ctsiIfSapSuccess=ctsiIfSapSuccess, ctsiIfNotifMessage=ctsiIfNotifMessage, ctsiAuthenticationFailure=ctsiAuthenticationFailure, ctsiIfDot1xReauthTimeLeft=ctsiIfDot1xReauthTimeLeft, ctsiIfDot1xReauthInterval=ctsiIfDot1xReauthInterval, ciscoTrustSecIfMIBIfModeStatisticGroup=ciscoTrustSecIfMIBIfModeStatisticGroup, ciscoTrustSecIfMIBL3ForwardGroup=ciscoTrustSecIfMIBL3ForwardGroup, ciscoTrustSecIfMIBCompliance3=ciscoTrustSecIfMIBCompliance3, ctsiIfL3ForwardObjects=ctsiIfL3ForwardObjects, ctsiIfStatsObjects=ctsiIfStatsObjects, ctsiAuthorizationTable=ctsiAuthorizationTable, ciscoTrustSecIfMIBObjects=ciscoTrustSecIfMIBObjects, ctsiAuthorizationPeerId=ctsiAuthorizationPeerId, ctsiIfStatusTable=ctsiIfStatusTable, ciscoTrustSecIfMIBAuthorizationGroup=ciscoTrustSecIfMIBAuthorizationGroup, ctsiIfDot1xOperReauthInterval=ctsiIfDot1xOperReauthInterval, ctsiIfDot1xRowStatus=ctsiIfDot1xRowStatus, ctsiIfControllerState=ctsiIfControllerState, ciscoTrustSecIfMIBStatisticGroup=ciscoTrustSecIfMIBStatisticGroup, ciscoTrustSecIfMIB=ciscoTrustSecIfMIB, ciscoTrustSecIfMIBNotifs=ciscoTrustSecIfMIBNotifs, ctsiAuthorizationFailure=ctsiAuthorizationFailure, ctsiIfSapNegotiationFailNotif=ctsiIfSapNegotiationFailNotif, CtsSapNegMode=CtsSapNegMode, ctsiIfManualStorageType=ctsiIfManualStorageType, ctsiIfDot1xTable=ctsiIfDot1xTable, ctsiIfManualTable=ctsiIfManualTable, ctsiEventsStatsObjects=ctsiEventsStatsObjects, ciscoTrustSecIfMIBGroups=ciscoTrustSecIfMIBGroups, CtsSapNegModeList=CtsSapNegModeList, ctsiAuthorizationStatus=ctsiAuthorizationStatus, ciscoTrustSecIfMIBCompliance2=ciscoTrustSecIfMIBCompliance2, ctsiIfManualSapModeList=ctsiIfManualSapModeList, ciscoTrustSecIfMIBIfcStatisticGroup=ciscoTrustSecIfMIBIfcStatisticGroup, ciscoTrustSecIfMIBCompliance=ciscoTrustSecIfMIBCompliance, ciscoTrustSecIfMIBCriticalAuthStatusGrp=ciscoTrustSecIfMIBCriticalAuthStatusGrp, ctsiIfSapNegotiationFailNotifEnable=ctsiIfSapNegotiationFailNotifEnable, ctsiSapNegotiationFailure=ctsiSapNegotiationFailure, ctsiIfL3ForwardTable=ctsiIfL3ForwardTable, ctsiAuthorizationFailNotifEnable=ctsiAuthorizationFailNotifEnable, ctsiIfAuthenticationNoResponse=ctsiIfAuthenticationNoResponse, ctsiIfAuthenticationReject=ctsiIfAuthenticationReject, ciscoTrustSecIfMIBConform=ciscoTrustSecIfMIBConform, ctsiIfStatsTable=ctsiIfStatsTable, ctsiIfUnauthorizedNotifEnable=ctsiIfUnauthorizedNotifEnable, ctsiIfAuthenticationSuccess=ctsiIfAuthenticationSuccess, ctsiIfManualDynamicPeerId=ctsiIfManualDynamicPeerId, ctsiAuthorizationTimeLeft=ctsiAuthorizationTimeLeft, ctsiIfConfigObjects=ctsiIfConfigObjects, ctsiIfAuthenticationFailNotifEnable=ctsiIfAuthenticationFailNotifEnable, ciscoTrustSecIfMIBNotifsOnlyInfoGrp=ciscoTrustSecIfMIBNotifsOnlyInfoGrp, ctsiIfDot1xEntry=ctsiIfDot1xEntry, ctsiIfPeerAdvCapability=ctsiIfPeerAdvCapability, ctsiIfcState=ctsiIfcState, ctsiIfCriticalAuthStatus=ctsiIfCriticalAuthStatus, ctsiAuthorizationPeerSgt=ctsiAuthorizationPeerSgt, ctsiIfStatsEntry=ctsiIfStatsEntry, ctsiIfCacheExpirationTime=ctsiIfCacheExpirationTime, ciscoTrustSecIfMIBNotifsGrp=ciscoTrustSecIfMIBNotifsGrp, ctsiAuthorizationSuccess=ctsiAuthorizationSuccess, ctsiIfDot1xDownloadReauthInterval=ctsiIfDot1xDownloadReauthInterval, ctsiIfcStatsObjects=ctsiIfcStatsObjects, ctsiIfcStatsIfCount=ctsiIfcStatsIfCount, ctsiAuthorizationFailNotif=ctsiAuthorizationFailNotif, ctsiIfCacheClear=ctsiIfCacheClear)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.