repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
tuxfux-hlp-notes/python-batches | refs/heads/master | archieves/batch-64/09-modules/myenv/lib/python2.7/site-packages/django/core/management/color.py | 151 | """
Sets up the terminal color scheme.
"""
import os
import sys
from django.utils import lru_cache, termcolors
def supports_color():
"""
Returns True if the running system's terminal supports color,
and False otherwise.
"""
plat = sys.platform
supported_platform = plat != 'Pocket PC' and (plat != 'win32' or 'ANSICON' in os.environ)
# isatty is not always implemented, #6223.
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
if not supported_platform or not is_a_tty:
return False
return True
def make_style(config_string=''):
"""
Create a Style object from the given config_string.
If config_string is empty django.utils.termcolors.DEFAULT_PALETTE is used.
"""
class Style(object):
pass
style = Style()
color_settings = termcolors.parse_color_setting(config_string)
# The nocolor palette has all available roles.
# Use that palette as the basis for populating
# the palette as defined in the environment.
for role in termcolors.PALETTES[termcolors.NOCOLOR_PALETTE]:
if color_settings:
format = color_settings.get(role, {})
style_func = termcolors.make_style(**format)
else:
style_func = lambda x: x
setattr(style, role, style_func)
# For backwards compatibility,
# set style for ERROR_OUTPUT == ERROR
style.ERROR_OUTPUT = style.ERROR
return style
@lru_cache.lru_cache(maxsize=None)
def no_style():
"""
Returns a Style object with no color scheme.
"""
return make_style('nocolor')
def color_style():
"""
Returns a Style object from the Django color scheme.
"""
if not supports_color():
return no_style()
return make_style(os.environ.get('DJANGO_COLORS', ''))
|
gcblue/gcblue | refs/heads/master | scripts/UnitCommands.py | 1 | import os, sys
from os.path import dirname, abspath, join, normpath
sys.path.append(abspath(join(dirname(__file__), 'Amram_Script_Data')))
from MissionTemplates import *
from Amram_Utilities import *
import math
deg_to_rad = 0.01745329252
def SetFormationLeader(UI, id):
UI.SetFormationLeader(id)
UI.UpdateMissionEditGraphics() # so formation graphics appear without rehook
#UI.DisplayMessage('Form leader: %d' % id)
def EnableFormationEdit(UI):
UI.SetFormationEdit(1)
def DisableFormationEdit(UI):
UI.SetFormationEdit(0)
def SetFormationMode(UI, mode_id):
UI.SetFormationMode(mode_id)
def ActivateAllSensors(UI):
UI.SetAllSensorState(1)
def ActivatePassiveSensors(UI):
nSensors = UI.GetSensorCount()
for n in range(0, nSensors):
sensor_info = UI.GetSensorInfo(n)
if (sensor_info.IsPassive()):
UI.SetSensorState(n, 1)
def GetConnControl(BB):
return BB.Write('ConnLock', '')
def ReleaseConnControl(BB):
if (BB.ReadAuthor('ConnLock') == BB.author):
BB.Erase('ConnLock')
def GetSensorControl(BB):
return BB.Write('SensorLock', '')
def ReleaseSensorControl(BB):
BB.Erase('SensorLock')
def TakeControl(UI):
UI.TakeControl()
def ReleaseControl(UI):
UI.ReleaseControl()
def AddEngageAllOrder(UI):
UI.AddTask('EngageAll', 2.0, 0)
def ToggleEngageAll(UI):
if (UI.TaskExists('EngageAllAir')):
UI.DeleteTask('EngageAllAir')
UI.AddTask('EngageAll', 2.0, 0)
elif (UI.TaskExists('EngageAll')):
UI.DeleteTask('EngageAll')
else:
UI.AddTask('EngageAllAir', 2.0, 0)
def AddTaskByName(UI, taskName):
UI.AddTask(taskName, 3.0, 0)
def AddBombTargetTask(UI):
UI.AddTask('BombTarget', 3.0, 0)
def AddBombDatumTask(UI, lon, lat):
BB = UI.GetBlackboardInterface()
BB.Write('DatumLongitude', '%f' % lon)
BB.Write('DatumLatitude', '%f' % lat)
UI.AddTask('BombDatum', 3.0, 0)
def AddBombDatumTaskDeg(UI, lon, lat):
BB = UI.GetBlackboardInterface()
deg_to_rad = 0.01745329252
BB.Write('DatumLongitude', '%f' % (deg_to_rad*lon))
BB.Write('DatumLatitude', '%f' % (deg_to_rad*lat))
UI.AddTask('BombDatum', 3.0, 0)
# version that uses target_id to automatically get the coordinates
def AddBombDatumTaskTargetID(UI, target_id):
target_track = UI.GetTrackById(target_id)
if (not target_track.IsValid()):
return
lon = target_track.Lon
lat = target_track.Lat
BB = UI.GetBlackboardInterface()
BB.Write('DatumLongitude', '%f' % lon)
BB.Write('DatumLatitude', '%f' % lat)
UI.AddTask('BombDatum', 3.0, 0)
def AddInterceptTask(UI):
UI.AddTask('IntC', 2.0, 0)
# @param bingo_fuel return to base when fuel is less than this fuel fraction
# @param rtb_time return to base when indicated sim time has elapsed
def AddRTBtask(UI, home_name, bingo_fuel, rtb_time):
BB = UI.GetBlackboardInterface()
current_time = UI.GetTime()
if (not BB.KeyExists('Home')):
BB.Write('Home', home_name)
BB.Write('Bingo', '%f' % bingo_fuel)
BB.Write('RTBtime', '%f' % (current_time + rtb_time))
UI.AddTask('RTB', 2.0, 0)
def AddAirFormationTaskTarget(UI):
target = UI.GetTarget()
AddAirFormationTaskId(UI, target)
def AddAirFormationTaskId(UI, id):
track = UI.GetTrackById(id)
if (not track.IsAir() or not UI.IsAir()):
return
BB = UI.GetBlackboardInterface()
BB.Write('FormationLeader', '%d' % id)
UI.AddTask('AirFormation', 2.0, 0)
def BreakAirFormation(UI):
UI.SetFormationLeader(-1)
def AddRefuelTask(UI, target):
UI.StartRefuelWith(target) # direct call for now, need task
# for new first waypoint for helo, automatically increase speed if helo was slow
# do the same for ground vehicles
def ApplyHeloWaypointLogic(UI):
if ((UI.IsHelo() or UI.IsGroundVehicle()) and not UI.TaskExists('Nav')):
speed_kts = UI.GetSpeed()
maxSpeed_kts = UI.GetMaxSpeed()
if (speed_kts < 0.25*maxSpeed_kts):
UI.SetSpeed(0.75*maxSpeed_kts)
def AddWaypointOrder(UI, lon, lat):
if (not UI.HasFlightPort()):
ApplyHeloWaypointLogic(UI)
UI.AddNavWaypoint(lon, lat)
else:
FP = UI.GetFlightPortInfo()
editMission = FP.GetEditMissionId()
if (editMission == 0):
UI.AddNavWaypoint(lon, lat)
else:
FP.AddMissionWaypoint(editMission, lon, lat)
def AddMissionWaypointAdvanced(UI, mission_id, lon, lat, alt_m, speed_kts):
if (not UI.HasFlightPort()):
return
FP = UI.GetFlightPortInfo()
FP.AddMissionWaypointAdvanced(mission_id, lon, lat, alt_m, speed_kts)
def AddWaypointOrderDeg(UI, lon_deg, lat_deg):
deg_to_rad = 0.01745329252
ApplyHeloWaypointLogic(UI)
UI.AddNavWaypoint(deg_to_rad*lon_deg, deg_to_rad*lat_deg)
# version that sets altitude and speed too
def AddWaypointOrderDeg2(UI, lon_deg, lat_deg, alt_m, speed_kts):
deg_to_rad = 0.01745329252
UI.AddNavWaypointAdvanced(deg_to_rad*lon_deg, deg_to_rad*lat_deg, alt_m, speed_kts)
def EditWaypoint(UI, idx, lon, lat):
try:
#GetPlatformId ONLY works on UnitInfo, so if we throw an error, we got called by GroupInfo instead
test = UI.GetPlatformId()
group = False
except:
group = True
if group:
GI = UI
for n in xrange(GI.GetUnitCount()):
UI = GI.GetPlatformInterface(n)
UI.EditNavWaypoint(idx, lon, lat)
else:
UI.EditNavWaypoint(idx, lon, lat)
def EditWaypointAdvanced(UI, idx, lon, lat, alt_m, speed_kts):
UI.EditNavWaypointAdvanced(idx, lon, lat, alt_m, speed_kts)
def EditWaypointReferencePlatform(UI, referencePlatformId, waypoint_idx, referenceMode):
UI.EditNavWaypointReference(waypoint_idx, referenceMode, referencePlatformId)
# insert nav waypoint before waypoint at idx
def InsertWaypoint(UI, idx, lon, lat):
UI.InsertNavWaypoint(idx, lon, lat)
# insert mission waypoint before waypoint at idx
def InsertMissionWaypoint(UI, mission_id, idx, lon, lat):
if (not UI.HasFlightPort()):
return
FP = UI.GetFlightPortInfo()
FP.InsertMissionWaypoint(mission_id, idx, lon, lat)
# delete nav waypoint
def DeleteWaypoint(UI, idx):
UI.DeleteNavWaypoint(idx)
# delete mission waypoint
def DeleteMissionWaypoint(UI, mission_id, idx):
if (not UI.HasFlightPort()):
return
FP = UI.GetFlightPortInfo()
FP.DeleteMissionWaypoint(mission_id, idx)
# version for flightport mission
def EditMissionWaypoint(UI, mission_id, idx, lon, lat):
if (not UI.HasFlightPort()):
return
FP = UI.GetFlightPortInfo()
FP.EditMissionWaypoint(mission_id, idx, lon, lat)
# version for flightport mission
def EditMissionWaypointAdvanced(UI, mission_id, idx, lon, lat, alt_m, speed_kts):
if (not UI.HasFlightPort()):
return
FP = UI.GetFlightPortInfo()
FP.EditMissionWaypointAdvanced(mission_id, idx, lon, lat, alt_m, speed_kts)
def EditMissionWaypointReferencePlatform(UI, referencePlatformId, mission_id, waypoint_idx, referenceMode):
if (not UI.HasFlightPort()):
return
FP = UI.GetFlightPortInfo()
FP.EditMissionWaypointReference(mission_id, waypoint_idx, referenceMode, referencePlatformId)
# version with idx starting from current waypoint
def EditActiveWaypoint(UI, idx, lon, lat):
EditWaypoint(UI, idx + UI.GetCurrentWaypoint(), lon, lat)
def EditActiveWaypointAdvanced(UI, idx, lon, lat, alt_m, speed_kts):
EditWaypointAdvanced(UI, idx + UI.GetCurrentWaypoint(), lon, lat, alt_m, speed_kts)
def SetNavWaypointTasks(UI, idx, task_list):
UI.SetNavWaypointTasks(idx, task_list)
def SetMissionWaypointTasks(UI, mission_id, idx, task_list):
if (not UI.HasFlightPort()):
return
FP = UI.GetFlightPortInfo()
FP.SetMissionWaypointTasks(mission_id, idx, task_list)
def SetMissionDatum(UI, lon_rad, lat_rad, mission_id):
if (not UI.HasFlightPort()):
return
FP = UI.GetFlightPortInfo()
FP.SetMissionDatum(mission_id, lon_rad, lat_rad)
UI.UpdateMissionEditGraphics()
def SetTargetDatum(UI, lon_rad, lat_rad):
BB = UI.GetBlackboardInterface()
BB.Write('DatumLongitude', '%.7f' % lon_rad)
BB.Write('DatumLatitude', '%.7f' % lat_rad)
UI.UpdateMissionEditGraphics()
def ClearTargetDatum(UI):
BB = UI.GetBlackboardInterface()
BB.Erase('DatumLongitude')
BB.Erase('DatumLatitude')
UI.UpdateMissionEditGraphics()
def SetMissionLandingTarget(UI, target_id, mission_id):
if ((not UI.HasFlightPort()) or (target_id == -1)):
return
FP = UI.GetFlightPortInfo()
base_name = UI.LookupFriendlyName(target_id)
FP.SetMissionLandingTarget(mission_id, base_name)
def SetMissionLaunchTime(UI, mission_id, time_string):
if ((not UI.HasFlightPort()) or (mission_id == -1)):
return
FP = UI.GetFlightPortInfo()
FP.SetMissionLaunchTime(mission_id, time_string)
def SetMissionType(UI, mission_id, type_string):
if ((not UI.HasFlightPort()) or (mission_id == -1)):
return
FP = UI.GetFlightPortInfo()
FP.SetMissionType(mission_id, type_string)
def SetNavLoopState(UI, state):
UI.SetNavLoopState(state) # 1 to loop waypoints, 0 no loop
def AddPatrolOrder(UI):
if (UI.IsSub()):
UI.AddTask('SubPatrol', 1.0, 0)
else:
UI.AddTask('Patrol', 1.0, 0)
def AddPatrolStation(UI, lon, lat):
UI.AddTask('PatrolCircle', 1.0, 0)
BB = UI.GetBlackboardInterface()
BB.Write('StationLongitude', '%f' % lon)
BB.Write('StationLatitude', '%f' % lat)
#BB.Write('StationAltitude', '%f' % UI.GetAlt())
def AddPatrolStationDeg(UI, lon, lat):
UI.AddTask('PatrolCircle', 1.0, 0)
BB = UI.GetBlackboardInterface()
BB.Write('StationLongitude', '%f' % (deg_to_rad*lon))
BB.Write('StationLatitude', '%f' % (deg_to_rad*lat))
BB.Write('StationAltitude', '%f' % UI.GetAlt())
def AddLandingOrder(UI, dest_id):
if (not UI.IsAir()):
return
UI.ClearTasks()
if (UI.IsHelo()):
UI.AddTask('LandHelo', 3.0, 0)
else:
UI.AddTask('Land', 3.0, 0)
dest_name = UI.LookupFriendlyName(dest_id)
BB = UI.GetBlackboardInterface()
BB.Write('LandTarget', dest_name)
def AddMissileWarnTask(UI):
UI.AddTask('MissileWarning', 0.0, 0)
def AddNamedTask(UI, task_name):
UI.AddTask(task_name, 3.0, 0)
def ClearTasks(UI):
UI.ClearTasks()
def ClearWaypoints(interface):
try:
#GetPlatformId ONLY works on UnitInfo, so if we throw an error, we got called by GroupInfo instead
test = interface.GetPlatformId()
UI = interface
UI.DeleteTask('Nav')
except:
GI = interface
for n in xrange(GI.GetUnitCount()):
UI = GI.GetPlatformInterface(n)
UI.DeleteTask('Nav')
# gets info on closest (known) enemy or unknown platform within search range
def ClosestOfType(UI, class_mask, search_range_km):
track_list = UI.GetTrackList(class_mask, search_range_km, 100)
current_time = UI.GetTime()
nTracks = track_list.Size()
closest_range = 1e6
closest_id = -1
for n in range(0, nTracks):
track_info = track_list.GetTrack(n)
track_id = track_info.ID
staleness = current_time - track_info.Time
range_km = UI.GetRangeToTrack(track_info)
if ((staleness <= 20.0) and (range_km < closest_range)):
closest_range = range_km
closest_id = track_id
return (closest_id, closest_range)
# gets info on closest (known) enemy or unknown platform within search range
def ClosestOfTypeUnengaged(UI, class_mask, search_range_km):
track_list = UI.GetTrackList(class_mask, search_range_km, 100)
current_time = UI.GetTime()
nTracks = track_list.Size()
closest_range = 1e6
closest_id = -1
closest_bearing = 0
for n in range(0, nTracks):
track_info = track_list.GetTrack(n)
track_id = track_info.ID
nEngaged = track_info.GetEngagedCount()
staleness = current_time - track_info.Time
range_km = UI.GetRangeToTrack(track_info)
if ((nEngaged == 0) and (staleness <= 20.0) and (range_km < closest_range)):
closest_range = range_km
closest_id = track_id
closest_bearing = UI.GetHeadingToDatum(track_info.Lon, track_info.Lat)
return (closest_id, closest_range, closest_bearing)
# clears all orders, moves to location and starts patrol
def MoveToAndPatrol(UI, lon, lat):
ClearTasks(UI)
UI.AddTask('Patrol', 1.0, 0)
UI.AddTask('Nav', 2.0, 0)
UI.AddTask('EngageAll', 3.0, 0)
AddWaypointOrder(UI, lon, lat)
def SetPatrolArea(UI, lon1, lat1, lon2, lat2):
BB = UI.GetBlackboardInterface()
hasPatrol = BB.KeyExists('PatrolArea')
if (not hasPatrol):
UI.DeleteTask('Nav')
if (not UI.IsHelo()):
if (not HasSonobuoys(UI)):
UI.AddTask('AirPatrolArea', 1.0, 0)
else:
UI.AddTask('ASWPatrolAreaFW', 1.0, 0)
else:
UI.AddTask('ASWPatrolArea', 1.0, 0)
#area_string = '%.7f,%.7f,%.7f,%.7f,%.7f,%.7f,%.7f,%.7f,' % (lon-0.001, lat+0.001, lon+0.001, lat+0.001, lon+0.001, lat-0.001, lon-0.001, lat-0.001)
area_string = '%.7f,%.7f,%.7f,%.7f,%.7f,%.7f,%.7f,%.7f,' % (lon1, lat1, lon2, lat1, lon2, lat2, lon1, lat2)
SetPatrolAreaString(UI, area_string)
def SetMissionPatrolArea(UI, lon, lat, mission_id):
BB = UI.GetBlackboardInterface()
area_string = '%.7f,%.7f,%.7f,%.7f,%.7f,%.7f,%.7f,%.7f,' % (lon-0.001, lat+0.001, lon+0.001, lat+0.001, lon+0.001, lat-0.001, lon-0.001, lat-0.001)
SetMissionPatrolAreaString(UI, mission_id, area_string)
def SetPatrolAreaString(UI, area_string):
BB = UI.GetBlackboardInterface()
BB.Write('PatrolArea', area_string)
UI.UpdateMissionEditGraphics() # so area graphics appear without rehook
def SetMissionPatrolAreaString(UI, mission_id, area_string):
if (not UI.HasFlightPort()):
return
FP = UI.GetFlightPortInfo()
FP.SetMissionPatrolArea(mission_id, area_string)
UI.UpdateMissionEditGraphics()
def SetPatrolAnchor(UI, anchor_unit, anchor_mode):
# look up position of anchor_unit (must be friendly unit)
id = UI.LookupFriendlyId(anchor_unit)
track = UI.GetTrackById(id)
if (not track.IsValid()):
return
#anchor_string = '%s,%.7f,%.7f,%d' % (anchor_unit, track.Lon, track.Lat, anchor_mode)
anchor_string = '%s,%d' % (anchor_unit, anchor_mode)
BB = UI.GetBlackboardInterface()
BB.Write('PatrolAnchor', anchor_string)
UI.UpdateMissionEditGraphics() # so area graphics appear without rehook
def SetMissionPatrolAnchor(UI, mission_id, anchor_unit, anchor_mode):
if (not UI.HasFlightPort()):
return
# look up position of anchor_unit (must be friendly unit) normally this is the flightport
id = UI.LookupFriendlyId(anchor_unit)
track = UI.GetTrackById(id)
if ((not track.IsValid()) and (anchor_mode != 0)):
return
FP = UI.GetFlightPortInfo()
FP.SetMissionPatrolAnchor(mission_id, anchor_unit, anchor_mode)
UI.UpdateMissionEditGraphics()
def ClearPatrolArea(UI):
BB = UI.GetBlackboardInterface()
BB.Erase('PatrolArea')
UI.UpdateMissionEditGraphics()
def ClearMissionPatrolArea(UI):
if (not UI.HasFlightPort()):
return
FP = UI.GetFlightPortInfo()
FP.SetMissionPatrolArea(mission_id, '')
UI.UpdateMissionEditGraphics()
def LaunchDatum(UI, lon, lat, launcher_num):
datum_alt = UI.GetMapTerrainElevation(lon, lat)
if (datum_alt < 5.0):
datum_alt = 0.0
UI.SendDatumToLauncher(lon,lat,datum_alt + 0.0,launcher_num)
UI.Launch(launcher_num, 1)
# this version uses altitude provided instead of doing lookup of ground height
def LaunchDatum3D(UI, lon, lat, alt, launcher_num):
UI.SendDatumToLauncher(lon,lat,alt,launcher_num)
UI.Launch(launcher_num, 1)
def SetDatum(UI, lon, lat, launcher_num):
datum_alt = UI.GetMapTerrainElevation(lon, lat)
if (datum_alt < 5.0):
datum_alt = 0.0
UI.SendDatumToLauncher(lon,lat,datum_alt + 5.0,launcher_num)
def TargetAndEngageWith(UI, target_id, launcher):
UI.SetTarget(target_id)
if (UI.IsLauncherEffective(launcher)):
LaunchTarget(UI, launcher)
else:
UI.SetTarget(-1) # launcher is not effective vs. target, clear target
UI.DisplayMessage('Canceling launch, not effective vs. target.')
def LaunchTarget(UI, launcher):
target_id = UI.GetTarget()
if (target_id == -1):
return # return if no target selected
if UI.IsDatumLaunch(launcher):
track_info = UI.GetTargetTrackInfo()
tti_s = UI.GetLauncherInterceptTime(launcher, track_info)
predicted_track = track_info.PredictAhead(tti_s)
lat = predicted_track.Lat
lon = predicted_track.Lon
if ((track_info.IsAir()) and ((track_info.Flags & 0x01) != 0)):
alt = track_info.Alt
UI.HandoffTargetToLauncher(launcher) # so that intended target is set
LaunchDatum3D(UI, lon, lat, alt, launcher)
return
#UI.DisplayMessage('tti: %.1f' % tti_s)
UI.HandoffTargetToLauncher(launcher) # so that intended target is set
LaunchDatum(UI, lon, lat, launcher)
else: # handoff to active seeker
target_accepted = UI.HandoffTargetToLauncher(launcher)
if (target_accepted):
UI.Launch(launcher,1)
else:
UI.DisplayMessage('Handoff failed, may not be in seeker coverage.')
def LaunchChaffAndFlare(UI):
if ((not UI.IsAir()) and (not UI.IsSurface())):
return
nLaunchers = UI.GetLauncherCount()
for n in range(0, nLaunchers):
info = UI.GetLauncherInfo(n)
# LaunchMode == 10 is chaff or flare
if (info.LaunchMode == 10) and (info.Quantity > 0):
UI.Launch(n, 1)
# launch any CM with minimum lifetime, returns number of CM launched
def LaunchShipCM(UI, minLifeTime_s):
if (not UI.IsSurface()):
return 0
launch_count = 0
nLaunchers = UI.GetLauncherCount()
for n in range(0, nLaunchers):
info = UI.GetLauncherInfo(n)
# LaunchMode == 10 is chaff or flare
if (info.LaunchMode == 10) and (info.Quantity > 0) and (info.LifeTime_s >= minLifeTime_s):
UI.Launch(n, 1)
launch_count = launch_count + 1
return launch_count
def GetCMLifeSpan(UI):
if ((not UI.IsAir()) and (not UI.IsSurface())):
return
nLaunchers = UI.GetLauncherCount()
for n in range(0, nLaunchers):
info = UI.GetLauncherInfo(n)
# LaunchMode == 10 is chaff or flare
if (info.LaunchMode == 10) and (info.Quantity > 0):
UI.Launch(n, 1)
def PrintTargetInterceptParams(UI):
# Get target id (-1 is no target)
tgt_id = UI.GetTarget()
if tgt_id == -1:
UI.DisplayMessage('No target')
return
# Get track data on target
track = UI.GetTrackById(tgt_id)
track_heading_rad = track.Heading_rad
track_speed_kts = track.Speed
track_bearing_deg = UI.GetHeadingToDatum(track.Lon, track.Lat) # returns in units of deg
my_heading_deg = UI.GetHeading() # returns in units of deg
my_speed_kts = UI.GetSpeed()
# Print info to console
rad_to_deg = 57.29578
display_string = 'Tgt hdg: %.0f deg, speed: %.0f kts, brg: %.0f deg, ' \
'own hdg: %.0f deg, speed: %.0f kts' % \
(rad_to_deg*track_heading_rad, track_speed_kts, track_bearing_deg, \
my_heading_deg, my_speed_kts)
UI.DisplayMessage(display_string)
# select new target and attack
def SelectAttackTarget(UI, target_id):
UI.SetTarget(target_id)
UI.AddTask('InterceptTarget', 2.0, 0)
# attack current target
def AttackTarget(UI):
UI.AddTask('InterceptTarget', 2.0, 0)
def SelectTarget(UI, target_id):
UI.SetTarget(target_id)
# trackinfo = UI.GetTargetTrackInfo()
def ClearTarget(UI):
UI.SetTarget(-1)
def SpeedAB(UI):
# 2.0 is max afterburner, 1.5 would be half afterburner
SetFractionalSpeed(UI, 2.0)
def Speed100(UI):
SetFractionalSpeed(UI, 1.0)
def Speed80(UI):
SetFractionalSpeed(UI, 0.8)
def Speed50(UI):
SetFractionalSpeed(UI, 0.5)
def Speed30(UI):
SetFractionalSpeed(UI, 0.3)
def SpeedCruise(UI):
alt_m = UI.GetAltitude()
cruise_kts = UI.GetCruiseSpeedForAltitude(alt_m)
if (cruise_kts > 0):
UI.SetSpeed(cruise_kts)
def AltitudeHigh(UI):
max_altitude = UI.GetMaxAlt()
if (max_altitude >= 10000):
SetAlt(UI, 10000)
else:
SetAlt(UI, max_altitude)
def AltitudeMedium(UI):
SetAlt(UI, 4000)
def AltitudeLow(UI):
SetAlt(UI, 150)
def AltitudeVeryLow(UI):
SetAlt(UI, 50)
def DepthSurface(UI):
if (not UI.IsSub()):
return
SetAlt(UI, 0)
def DepthPeriscope(UI):
if (not UI.IsSub()):
return
SetAlt(UI, 0)
SubInterface = UI.GetSubInterface()
SubInterface.GoToPeriscopeDepth()
def DepthMedium(UI):
if (not UI.IsSub()):
return
SetAlt(UI, -100)
def DepthDeep(UI):
if (not UI.IsSub()):
return
SubInterface = UI.GetSubInterface()
max_depth = SubInterface.GetMaxDepth()
SetAlt(UI, -max_depth + 50)
def SetPeriscope(UI, state):
if (not UI.IsSub()):
return
SubInterface = UI.GetSubInterface()
if (state == 0):
SubInterface.LowerPeriscope()
else:
SubInterface.RaisePeriscope()
def SetRadarMast(UI, state):
if (not UI.IsSub()):
return
SubInterface = UI.GetSubInterface()
if (state == 0):
SubInterface.LowerRadarMast()
else:
SubInterface.RaiseRadarMast()
def SetSnorkel(UI, state):
if (not UI.IsSub()):
return
SubInterface = UI.GetSubInterface()
if (state == 0):
SubInterface.SetSnorkelState(0)
else:
SubInterface.SetSnorkelState(1)
# first char of s is assumed to be launcher_idx (only works
# for launchers 0-9)
# no longer true, reworked by Amram, now permits ANY launcher number, ex: '1337~Mk-46 Mod5',
# idx 1337, item Mk-46 Mod5
def ReloadLauncher(UI, s):
#assume s is 'id~weapon'
#use split to retrieve.
idx, item = s.split('~')
UI.LoadLauncher(int(idx), item)
def Reload0(UI, weap_name):
UI.LoadLauncher(0, weap_name)
def Reload1(UI, weap_name):
UI.LoadLauncher(1, weap_name)
def Reload2(UI, weap_name):
UI.LoadLauncher(2, weap_name)
def Reload3(UI, weap_name):
UI.LoadLauncher(3, weap_name)
def Reload4(UI, weap_name):
UI.LoadLauncher(4, weap_name)
def Reload5(UI, weap_name):
UI.LoadLauncher(5, weap_name)
def Reload6(UI, weap_name):
UI.LoadLauncher(6, weap_name)
def Reload7(UI, weap_name):
UI.LoadLauncher(7, weap_name)
def Unload(UI, launcher):
UI.UnloadLauncher(launcher)
def SetFractionalSpeed(UI, k, dispMessage=0):
# use throttle setting for units with throttle, throttle of > 1 is
# afterburners
if (UI.HasThrottle()):
UI.SetThrottle(k)
if (dispMessage):
if (k <= 1):
UI.DisplayMessage('Set throttle to %2.0f%% military' % (100.0*k))
else:
UI.DisplayMessage('Set throttle to afterburn')
else:
if (k > 1):
k = 1.0
if UI.IsSub():
max_speed = float(UI.QueryDatabase('sub', UI.GetPlatformClass(), 'MaxSpeed_kts').GetRow(0).GetString(0))
else:
max_speed = UI.GetMaxSpeed()
UI.SetSpeed(k*max_speed)
def SetHeading(interface,h):
try:
UI = interface
UI.DeleteTask('AutoLoiter')
UI.SetHeadingRad(h)
except:
GI = interface
unit_count = GI.GetUnitCount()
for n in range(0, unit_count):
UnitInfo = GI.GetPlatformInterface(n)
UnitInfo.DeleteTask('AutoLoiter')
SetHeading(UnitInfo, h)
def Altitude(UI):
SetAlt(UI, 2345)
def ShowFlightPanel(UI):
UI.SendCommand('ShowFlightPanel')
def ShowStoresPanel(UI):
UI.SendCommand('ShowStoresPanel')
def ShowPlatformPanel(UI):
UI.SendCommand('ShowPlatformPanel')
def ToggleRadars(UI):
n_sensors = UI.GetSensorCount()
for n in range(0, n_sensors):
sens_info = UI.GetSensorInfo(n)
# 1 radar, 2 ESM, 4 passive sonar, 8 active sonar, 16 optical, 0 error
if (sens_info.type == 1):
if (sens_info.isActive):
UI.SetSensorState(n, 0)
else:
UI.SetSensorState(n, 1)
UI.PlaySound('Ping')
# 28DEC2010, added for check if helo has dipping sonar
def HasSonar(UI):
n_sensors = UI.GetSensorCount()
for n in range(0, n_sensors):
sens_info = UI.GetSensorInfo(n)
# 1 radar, 2 ESM, 4 passive sonar, 8 active sonar, 16 optical, 0 error
if ((sens_info.type == 4) or (sens_info.type == 8)):
return 1
return 0
# return 1 if unit has gravity bombs, 0 otherwise
def HasGravityBombs(UI):
nLaunchers = UI.GetLauncherCount()
for n in range(0, nLaunchers):
info = UI.GetLauncherInfo(n)
# LaunchMode == 3 is gravity bomb
if (info.LaunchMode == 3) and (info.Quantity > 0):
return 1
return 0
# return 1 if unit has sonobuoys, 0 otherwise
def HasSonobuoys(UI):
nLaunchers = UI.GetLauncherCount()
for n in range(0, nLaunchers):
info = UI.GetLauncherInfo(n)
# LaunchMode == 11 is sonobuoy
if (info.LaunchMode == 11) and (info.Quantity > 0):
return 1
return 0
# TLL - added. 'v' in hotkey menu
def ReverseOurCourse(UI):
current_heading = UI.GetHeading() + 180
if (current_heading >= 360):
current_heading = current_heading - 360
UI.SetHeading(current_heading)
def EraseLastOrder(UI):
task_list = UI.GetTaskList()
if (task_list.Size() > 0):
UI.DeleteTask(task_list.GetString(task_list.Size()-1))
def AddCAPMission(UI, lon, lat):
if (not UI.HasFlightPort()):
return
FP = UI.GetFlightPortInfo()
FP.AddCAPMission(lon, lat)
def AddASWMission(UI, lon, lat):
if (not UI.HasFlightPort()):
return
FP = UI.GetFlightPortInfo()
FP.AddASWMission(lon, lat)
def AddAEWMission(UI, lon, lat):
if (not UI.HasFlightPort()):
return
FP = UI.GetFlightPortInfo()
FP.AddAEWMission(lon, lat)
def AddAttackMission(UI, targetId):
if (not UI.HasFlightPort()):
return
FP = UI.GetFlightPortInfo()
FP.AddAttackMission(targetId)
def SetEditMissionId(UI, mission_id):
if (not UI.HasFlightPort()):
return
FP = UI.GetFlightPortInfo()
FP.SetEditMissionId(mission_id)
def AddMissionWaypoint(UI, lon_rad, lat_rad, mission_id):
if (not UI.HasFlightPort()):
return
FP = UI.GetFlightPortInfo()
FP.AddMissionWaypoint(mission_id, lon_rad, lat_rad)
def ReportBestLauncherForTarget(UI):
launcher_info = UI.GetBestLauncher()
launcher_idx = launcher_info.Launcher
if (launcher_idx != -1):
target_range = UI.GetRangeToTarget()
launch_range = launcher_info.Range_km # reference max range, use for launch decision
UI.DisplayMessage('Best launcher %d, launch range %f' % (launcher_idx, launch_range))
else:
UI.DisplayMessage('No valid launcher')
def UpdateWeaponTargetDatum(WI, lon_rad, lat_rad):
WI.UpdateTargetPosition(lon_rad, lat_rad)
def SetPitch(UI, theta, rate = 9, uncontrolled = True):
BB = UI.GetBlackboardInterface()
if uncontrolled:
if not BB.KeyExists('uncontrolled_dive'):
BB.Write('uncontrolled_dive', '1')
Fast = True
if abs(UI.GetClimbDeg() - theta) <= (rate*2):
UI.SetClimbDeg(theta)
Fast = False
elif theta - UI.GetClimbDeg() > rate:
UI.SetClimbDeg(UI.GetClimbDeg() + rate)
elif UI.GetClimbDeg() - theta > rate:
UI.SetClimbDeg(UI.GetClimbDeg() - rate)
else:
UI.SetClimbDeg(theta)
Fast = False
return Fast
def cruiseclimb(TI):
UI = TI.GetPlatformInterface()
BB = UI.GetBlackboardInterface()
update = 0.2
TI.SetUpdateInterval(update)
#assumes full throttle climb
#achieves cruise speed, then pitched up, and tries to manipulate pitch to maintain cruise speed and climb rate to within 5%
#later development should also attempt to obey maxclimbrate.
#later development should also accept desired altitude inputs so we can level off when we get there.
#do everything in absolute values as relative differences, then invert to negative for descent if necessary.
try:
alt = float(BB.ReadMessage('ClimbInitAlt'))
except:
TI.EndTask()
return
if alt == -1:
alt = UI.GetCruiseAlt()
if alt < UI.GetTerrainElevation():
alt = UI.GetTerrainElevation() + 50
do_pitch = False
if alt < UI.GetAlt() - 5:
UI.SetAlt(alt)
TI.EndTask()
return
elif alt > UI.GetAlt() + 5:
do_pitch = True
else:
throttle = float(BB.ReadMessage('ClimbInitThrottle'))
if throttle == -1:
SpeedCruise(UI)
else:
UI.SetThrottle(throttle)
alt = float(BB.ReadMessage('ClimbInitAlt'))
if alt == -1:
alt = UI.GetCruiseAlt()
BB.Erase('Last_Speed')
BB.Erase('ClimbInitThrottle')
BB.Erase('ClimbInitAlt')
TI.EndTask()
UI.SetClimbDeg(0.0)
UI.SetAlt(alt)
return
if do_pitch:
if BB.KeyExists('Last_Speed'):
last_speed = float(BB.ReadMessage('Last_Speed'))
else:
last_speed = UI.GetSpeed()
pitch = UI.GetClimbDeg()
rate_mod = 3
want_pitch = math.degrees(math.atan((alt - UI.GetAlt()) / 1500))
if want_pitch < -67:
want_pitch = -67
cruise_spd = UI.GetCruiseSpeedForAltitude(UI.GetAlt()) + 10
gravity_losses = math.sin(math.radians(pitch)) * 9.80665
accel_mod = ((1-(cruise_spd / UI.GetSpeed())) / 0.4) / update
accel_mod2 = ((1-((cruise_spd+20) / UI.GetSpeed())) / 0.4) / update
acceleration = (UI.GetSpeed() - last_speed) / update
if alt > UI.GetAlt():
if accel_mod2 > 0:
UI.SetAlt(alt)
return
else:
new_gravity_losses = gravity_losses + accel_mod + acceleration
if new_gravity_losses < 0:
new_gravity_losses = 0
rate_mod = max(abs(acceleration) / 10 + 0.5,0.5)
if new_gravity_losses < 0:
new_gravity_losses = 0
if new_gravity_losses > 9.80665:
new_gravity_losses = 9.80665
elif alt < UI.GetAlt():
new_gravity_losses = -9.02707
if new_gravity_losses < -9.02707:
new_gravity_losses = -9.02707
if new_gravity_losses > 0:
new_gravity_losses = 0
new_pitch = math.degrees(math.asin(new_gravity_losses/9.80665))
if want_pitch < 0:
new_pitch = want_pitch
else:
if new_pitch > want_pitch:
new_pitch = want_pitch
SetPitch(UI, new_pitch, rate=rate_mod, uncontrolled=False)
write_speed = str(UI.GetSpeed())
BB.Write('Last_Speed',write_speed)
def SetAlt(UI, alt):
UI.SetAlt(alt)
if UI.HasThrottle():
#then we can use my advanced climb until 1.26 resolved this.
if UI.GetSpeed() == UI.GetCruiseSpeedForAltitude(UI.GetAlt()):
throttle = -1
else:
throttle = UI.GetThrottle()
if throttle < 1.0 and alt > UI.GetAlt():
UI.SetThrottle(1.0)
OptionHandler(UI,'ClimbInitThrottle|Set|%s;ClimbInitAlt|Set|%s;cruiseclimb|Task|Start~0.2~-1' % (throttle, alt))
def Check_Status(UI, launcher, mode):
# 0 = LAUNCHER_READY
# 1 = BAD_LAUNCHER ///< launcher index does not exist
# 2 = LAUNCHER_EMPTY
# 3 = LAUNCHER_BUSY ///< launcher auto reloading
# 4 = NO_DATUM
# 5 = NO_TARGET
# 6 = NOT_DETECTED_FC ///< fire control sensor can't detect target (could be off)
# 7 = NOT_DETECTED_SEEKER ///< seeker can't detect target
# 8 = FC_BUSY ///< fire control sensor has no free tracks
# 9 = LAUNCHER_ERROR
#10 = LAUNCHER_INACTIVE
#11 = NO_FIRECONTROL
#12 = TOO_DEEP ///< too deep for sub launch
#13 = TOO_LOW ///< too low for aircraft missile launch
#14 = TOO_HIGH ///< too high for aircraft missile launch
#15 = DAMAGED ///< cannot operate due to damage
#16 = INVALID_TARGET ///< not effective vs. target type
#17 = OUT_OF_RANGE ///< target is beyond max range
#18 = INVALID_FUEL_OPERATION ///< invalid op for external fuel tank
#19 = LAUNCHER_LOADING ///< added for multiplayer, loading from magazine
#20 = LAUNCHER_UNLOADING ///< added for multiplayer, unloading to magazine
#21 = OUT_OF_FOV ///< target is outside of field of view of this launcher
#22 = TOO_CLOSE ///< target is inside minimum range
#23 = LAUNCHER_EMPTY_AUTORELOAD ///< empty, try autoreload when ready, workaround to delay auto-reload until after launch
#24 = ROE_HOLD ///< ready, but launch violates ROE
status_num = launcher.Status
if status_num:
status_strings = [
'Launcher ready to fire...',
'Launcher does not exist',
'Launcher empty',
'Launcher busy',
'no datum',
'no target',
'FC does not detect target',
'seeker does not detect target',
'all FC channels busy',
'Unknown launcher error',
'Launcher inactive',
'No Firecontrol',
'Too deep',
'Too low for launch',
'Too high for launch',
'Launcher is damaged',
'Invalid target',
'Target out of range',
'Invalid Fuel Operation',
'Launcher is Loading',
'Launcher is unLoading',
'target/datum outside FoV',
'Too close',
'Empty, Autoreload expected',
'ROE restrictions, holding fire']
excuse = status_strings[status_num]
return False, excuse
else:
excuse = 'firing as ordered'
return True, excuse
def Use_Launcher_On_Target_Amram(UI, launcher, launch_type, *target):
#dependency on current set target removed. Must now be handed target criteria.
launcher = UI.GetLauncherInfo(launcher)
if launcher.Status != 0:
return False
launch_mode = launcher.LaunchMode
isdatum = False
if len(target) == 2:
datum = target
isdatum = True
elif len(target) == 1:
target_info = UI.GetTrackById(target[0])
else:
#use the assigned target
if UI.GetTarget() != -1:
target_info = UI.GetTargetTrackInfo()
launch_qty = 1 # only launch one at a time for spacing
if launch_type == -2:
#we need to determine our launch_type now.
weapon_name = UI.GetLauncherWeaponName(launcher.Launcher)
if UI.QueryDatabase('torpedo',weapon_name,'ClassificationId').GetRow(0).GetString(0) == '130':
#its a torpedo
launch_type = 2
elif UI.QueryDatabase('missile',weapon_name,'ClassificationId').GetRow(0).GetString(0) == '64':
#its a missile
launch_type = 0
elif UI.QueryDatabase('ballistic',weapon_name,'BallisticType').GetRow(0).GetString(0) == '5': #rocket
#its a rocket
launch_type = 3
else:
#must be a ballistic if we get here
launch_type = 1
if not UI.IsAir():
#then we know they aren't bombs.
launch_qty = 5
if launch_mode == 0: # datum launch
if launch_type == 2: #torpedo
if isdatum:
lon, lat = datum
alt = UI.GetMapTerrainElevation(datum[0], datum[1])
if alt < 0:
alt = 0
else:
#is target, determine lead
range_km = UI.GetRangeToTrack(target_info)
speed1 = float(UI.QueryDatabase('torpedo',UI.GetLauncherWeaponName(launcher.Launcher),'preEnableSpeed_kts').GetRow(0).GetString(0))
speed2 = float(UI.QueryDatabase('torpedo',UI.GetLauncherWeaponName(launcher.Launcher),'maxSpeed_kts').GetRow(0).GetString(0))
accel = float(UI.QueryDatabase('torpedo',UI.GetLauncherWeaponName(launcher.Launcher),'acceleration_ktps').GetRow(0).GetString(0))
if UI.QueryDatabase('torpedo',UI.GetLauncherWeaponName(launcher.Launcher),'SensorClass').GetRow(0).GetString(0) != '':
seeker=True
UI.DisplayMessage('seeker = %s' % UI.QueryDatabase('torpedo',UI.GetLauncherWeaponName(launcher.Launcher),'SensorClass').GetRow(0).GetString(0)) != ''
else:
seeker=False
UI.DisplayMessage('seeker = %s' % UI.QueryDatabase('torpedo',UI.GetLauncherWeaponName(launcher.Launcher),'SensorClass').GetRow(0).GetString(0)) != ''
range_m = range_km * 1000
launch_speed = UI.GetSpeed()
accel_time1 = abs(speed1 - launch_speed) / accel
accel_dist1 = 0.5 * (speed1 + launch_speed) * accel_time1
travel_time1 = (range_m - accel_dist1) / speed1 + accel_time1
accel_time2 = abs(speed1 - speed2) / accel
accel_dist2 = 0.5 * (speed1 + speed2) * accel_time1
travel_time2 = (range_m - accel_dist2) / speed2 + accel_time2
if seeker:
travel_time2 *= 0.07 #add 7% for snaking
travel_time_s = travel_time1 + travel_time2
UI.DisplayMessage('travel_time = %s' % travel_time)
target_info = target_info.PredictAhead(travel_time_s)
lat = target_info.Lat
lon = target_info.Lon
alt = target_info.Alt
elif launch_type == 1 or launch_type == 3: #gun, rocket
# if the Speed_mps is non-zero in launcher info, then adjust datum based on expected travel time and target speed and heading
if isdatum:
lon, lat = datum
alt = UI.GetMapTerrainElevation(datum[0], datum[1])
if alt < 0: alt = 0
else:
range_km = UI.GetRangeToTrack(target_info)
if (launcher.Speed_mps > 0):
travel_time_s = 1000.0 * range_km / launcher.Speed_mps
travel_time_s = travel_time_s + 10.0 # add a little time for launch and altitude adjustment
target_info = target_info.PredictAhead(travel_time_s)
lat = target_info.Lat
lon = target_info.Lon
alt = target_info.Alt
if alt < 0: alt = 0
else: #launch_type = 0, missile
#confirm the missile is the type its supposed to be.....
if isdatum:
lon, lat = datum
alt = UI.GetMapTerrainElevation(datum[0], datum[1])
if alt < 0: alt = 0
else:
flag = int(UI.QueryDatabase('missile',UI.GetLauncherWeaponName(launcher.Launcher),'targetFlags').GetRow(0).GetString(0))
if target_info.IsSurface() and flag != 'Error':
if not has_target_flag(flag, 1):
return False
elif target_info.IsGround() and flag != 'Error':
if not has_target_flag(flag, 4):
return False
range_km = UI.GetRangeToTrack(target_info)
lat = target_info.Lat
lon = target_info.Lon
alt = target_info.Alt
if not isdatum:
UI.HandoffTargetToLauncher(launcher.Launcher) # to store intended target
UI.SendDatumToLauncher(lon, lat, alt, launcher.Launcher)
UI.Launch(launcher.Launcher, launch_qty)
return True
elif (launch_mode == 1) or (launch_mode == 2) or (launch_mode == 4): # handoff to active seeker
if launch_type == 3:
if (launcher.Speed_mps > 0):
range_km = UI.GetRangeToTrack(target_info)
travel_time_s = 1000.0 * range_km / launcher.Speed_mps
travel_time_s = travel_time_s + 10.0 # add a little time for launch and altitude adjustment
target_info = target_info.PredictAhead(travel_time_s)
lat = target_info.Lat
lon = target_info.Lon
alt = target_info.Alt
if alt < 0: alt = 0
UI.SendDatumToLauncher(lon, lat, alt, launcher.Launcher)
UI.Launch(launcher.Launcher, launch_qty)
return True
elif launch_type is 0 or launch_type is 1:
UI.HandoffTargetToLauncher(launcher.Launcher)
UI.Launch(launcher.Launcher, launch_qty)
return True
else:
return False
else:
UI.DisplayMessage('Unrecognized launch mode: %d' % launch_mode)
return False
|
vdemeester/compose | refs/heads/master | tests/unit/cli/errors_test.py | 3 | import pytest
from docker.errors import APIError
from requests.exceptions import ConnectionError
from compose.cli import errors
from compose.cli.errors import handle_connection_errors
from compose.const import IS_WINDOWS_PLATFORM
from tests import mock
@pytest.yield_fixture
def mock_logging():
with mock.patch('compose.cli.errors.log', autospec=True) as mock_log:
yield mock_log
def patch_find_executable(side_effect):
return mock.patch(
'compose.cli.errors.find_executable',
autospec=True,
side_effect=side_effect)
class TestHandleConnectionErrors:
def test_generic_connection_error(self, mock_logging):
with pytest.raises(errors.ConnectionError):
with patch_find_executable(['/bin/docker', None]):
with handle_connection_errors(mock.Mock()):
raise ConnectionError()
_, args, _ = mock_logging.error.mock_calls[0]
assert "Couldn't connect to Docker daemon" in args[0]
def test_api_error_version_mismatch(self, mock_logging):
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.38')):
raise APIError(None, None, b"client is newer than server")
_, args, _ = mock_logging.error.mock_calls[0]
assert "Docker Engine of version 18.06.0 or greater" in args[0]
def test_api_error_version_mismatch_unicode_explanation(self, mock_logging):
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.38')):
raise APIError(None, None, "client is newer than server")
_, args, _ = mock_logging.error.mock_calls[0]
assert "Docker Engine of version 18.06.0 or greater" in args[0]
def test_api_error_version_other(self, mock_logging):
msg = b"Something broke!"
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.22')):
raise APIError(None, None, msg)
mock_logging.error.assert_called_once_with(msg.decode('utf-8'))
def test_api_error_version_other_unicode_explanation(self, mock_logging):
msg = "Something broke!"
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.22')):
raise APIError(None, None, msg)
mock_logging.error.assert_called_once_with(msg)
@pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='Needs pywin32')
def test_windows_pipe_error_no_data(self, mock_logging):
import pywintypes
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.22')):
raise pywintypes.error(232, 'WriteFile', 'The pipe is being closed.')
_, args, _ = mock_logging.error.mock_calls[0]
assert "The current Compose file version is not compatible with your engine version." in args[0]
@pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='Needs pywin32')
def test_windows_pipe_error_misc(self, mock_logging):
import pywintypes
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.22')):
raise pywintypes.error(231, 'WriteFile', 'The pipe is busy.')
_, args, _ = mock_logging.error.mock_calls[0]
assert "Windows named pipe error: The pipe is busy. (code: 231)" == args[0]
@pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='Needs pywin32')
def test_windows_pipe_error_encoding_issue(self, mock_logging):
import pywintypes
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.22')):
raise pywintypes.error(9999, 'WriteFile', 'I use weird characters \xe9')
_, args, _ = mock_logging.error.mock_calls[0]
assert 'Windows named pipe error: I use weird characters \xe9 (code: 9999)' == args[0]
|
How2Compute/SmartHome | refs/heads/master | cli/py3env/lib/python3.4/site-packages/pip/_vendor/distlib/_backport/misc.py | 1428 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Backports for individual classes and functions."""
import os
import sys
__all__ = ['cache_from_source', 'callable', 'fsencode']
try:
from imp import cache_from_source
except ImportError:
def cache_from_source(py_file, debug=__debug__):
ext = debug and 'c' or 'o'
return py_file + ext
try:
callable = callable
except NameError:
from collections import Callable
def callable(obj):
return isinstance(obj, Callable)
try:
fsencode = os.fsencode
except AttributeError:
def fsencode(filename):
if isinstance(filename, bytes):
return filename
elif isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
raise TypeError("expect bytes or str, not %s" %
type(filename).__name__)
|
antirais/estnin | refs/heads/master | setup.py | 1 | #!/usr/bin/env python3
# coding: UTF-8
import os
from setuptools import setup, Command
root = os.path.abspath('.')
current_dir = os.path.dirname(__file__)
def read_file(file_name):
with open(os.path.join(current_dir, file_name), 'r') as file:
return file.read()
README = read_file('README.rst')
VERSION = read_file('VERSION')
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info ./eggs')
test_deps = [
'coverage',
'pytest',
'pytest-cov'
]
extras = {
'test': test_deps,
}
setup(
name = 'estnin',
version = VERSION,
url = 'https://github.com/antirais/estnin',
py_modules = ['estnin'],
license = 'MIT',
include_package_data = True,
author = 'Anti Räis',
author_email = '[email protected]',
description = 'library for handling Estonian national identity numbers',
long_description = README,
test_suite = 'tests',
setup_requires = ['pytest-runner'],
tests_require = test_deps,
extras_require = {'test': test_deps},
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries',
],
cmdclass = {
'clean': CleanCommand,
},
)
|
iamthearm/bazel | refs/heads/master | tools/android/build_split_manifest.py | 20 | # Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stubifies an AndroidManifest.xml.
Does the following things:
- Replaces the Application class in an Android manifest with a stub one
- Resolve string and integer resources to their default values
usage: %s [input manifest] [output manifest] [file for old application class]
Writes the old application class into the file designated by the third argument.
"""
import sys
from xml.etree import ElementTree
from third_party.py import gflags
gflags.DEFINE_string("main_manifest", None, "The main manifest of the app")
gflags.DEFINE_string("split_manifest", None, "The output manifest")
gflags.DEFINE_string("override_package", None,
"The Android package. Override the one specified in the "
"input manifest")
gflags.DEFINE_string("split", None, "The name of the split")
gflags.DEFINE_boolean("hascode", False, "Whether this split .apk has dexes")
FLAGS = gflags.FLAGS
MANIFEST_TEMPLATE = """<?xml version="1.0" encoding="utf-8"?>
<manifest
xmlns:android="http://schemas.android.com/apk/res/android"
android:versionCode="%(version_code)s"
android:versionName="%(version_name)s"
package="%(package)s"
split="%(split)s">
<application android:hasCode="%(hascode)s">
</application>
</manifest>
"""
def BuildSplitManifest(main_manifest, override_package, split, hascode):
"""Builds a split manifest based on the manifest of the main APK.
Args:
main_manifest: the XML manifest of the main APK as a string
override_package: if not None, override the package in the main manifest
split: the name of the split as a string
hascode: if this split APK will contain .dex files
Returns:
The XML split manifest as a string
Raises:
Exception if something goes wrong.
"""
manifest = ElementTree.fromstring(main_manifest)
android_namespace_prefix = "{http://schemas.android.com/apk/res/android}"
if override_package:
package = override_package
else:
package = manifest.get("package")
version_code = manifest.get(android_namespace_prefix + "versionCode")
version_name = manifest.get(android_namespace_prefix + "versionName")
return MANIFEST_TEMPLATE % {
"version_code": version_code,
"version_name": version_name,
"package": package,
"split": split,
"hascode": str(hascode).lower()
}
def main():
split_manifest = BuildSplitManifest(
file(FLAGS.main_manifest).read(),
FLAGS.override_package,
FLAGS.split,
FLAGS.hascode)
with file(FLAGS.split_manifest, "w") as output_xml:
output_xml.write(split_manifest)
if __name__ == "__main__":
FLAGS(sys.argv)
main()
|
olasitarska/django | refs/heads/master | tests/serializers/models.py | 5 | # -*- coding: utf-8 -*-
"""
42. Serialization
``django.core.serializers`` provides interfaces to converting Django
``QuerySet`` objects to and from "flat" data (i.e. strings).
"""
from __future__ import unicode_literals
from decimal import Decimal
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(max_length=20)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=20)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Article(models.Model):
author = models.ForeignKey(Author)
headline = models.CharField(max_length=50)
pub_date = models.DateTimeField()
categories = models.ManyToManyField(Category)
class Meta:
ordering = ('pub_date',)
def __str__(self):
return self.headline
@python_2_unicode_compatible
class AuthorProfile(models.Model):
author = models.OneToOneField(Author, primary_key=True)
date_of_birth = models.DateField()
def __str__(self):
return "Profile of %s" % self.author
@python_2_unicode_compatible
class Actor(models.Model):
name = models.CharField(max_length=20, primary_key=True)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Movie(models.Model):
actor = models.ForeignKey(Actor)
title = models.CharField(max_length=50)
price = models.DecimalField(max_digits=6, decimal_places=2, default=Decimal('0.00'))
class Meta:
ordering = ('title',)
def __str__(self):
return self.title
class Score(models.Model):
score = models.FloatField()
@python_2_unicode_compatible
class Team(object):
def __init__(self, title):
self.title = title
def __str__(self):
raise NotImplementedError("Not so simple")
def to_string(self):
return "%s" % self.title
class TeamField(models.CharField):
def __init__(self):
super(TeamField, self).__init__(max_length=100)
def get_db_prep_save(self, value, connection):
return six.text_type(value.title)
def to_python(self, value):
if isinstance(value, Team):
return value
return Team(value)
def from_db_value(self, value, connection):
return Team(value)
def value_to_string(self, obj):
return self._get_val_from_obj(obj).to_string()
def deconstruct(self):
name, path, args, kwargs = super(TeamField, self).deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
@python_2_unicode_compatible
class Player(models.Model):
name = models.CharField(max_length=50)
rank = models.IntegerField()
team = TeamField()
def __str__(self):
return '%s (%d) playing for %s' % (self.name, self.rank, self.team.to_string())
|
geohot/topmodel | refs/heads/master | topmodel/hmetrics.py | 1 | import numpy as np
def recalls(hist):
# true positive rate
# TP / (TP + FN)
ret = []
trues = sum(hist['trues'])
all_trues = trues
for i in range(len(hist['probs'])):
ret.append(trues * 1.0 / all_trues if all_trues != 0 else None)
trues -= hist['trues'][i]
return ret
def fprs(hist):
# FP / (FP + TN)
# probs is being used as the threshold
# ones selected that aren't true / all selected
ret = []
falses = sum(hist['totals']) - sum(hist['trues'])
all_falses = falses
for i in range(len(hist['probs'])):
ret.append(falses * 1.0 / all_falses if all_falses != 0 else None)
falses -= (hist['totals'][i] - hist['trues'][i])
return ret
def precisions(hist):
ret = []
selected = sum(hist['totals'])
trues = sum(hist['trues'])
for i in range(len(hist['probs'])):
ret.append(trues * 1.0 / selected if selected != 0 else None)
trues -= hist['trues'][i]
selected -= hist['totals'][i]
return ret
def marginal_precisions(hist):
return map(lambda x: x[0] * 1.0 / x[1] if x[1] != 0 else None, zip(hist['trues'], hist['totals']))
def brier(hist):
N = sum(hist['totals'])
base_rate = sum(hist['trues']) * 1.0 / N
unc = base_rate * (1.0 - base_rate)
rel = 0.0
res = 0.0
for i in range(len(hist['probs'])):
if hist['totals'][i] > 0:
ok = (hist['trues'][i] * 1.0 / hist['totals'][i])
rel += hist['totals'][i] * (hist['probs'][i] - ok) ** 2
res += hist['totals'][i] * (ok - base_rate) ** 2
rel /= N
res /= N
# whole brier score
return {"rel": rel, "res": res, "unc": unc, "brier": rel - res + unc}
def logloss(hist):
loss = 0.0
N = sum(hist['totals'])
for i in range(len(hist['probs'])):
t = hist['trues'][i]
f = hist['totals'][i] - t
loss += t * \
np.log(hist['probs'][i]) + f * np.log(1.0 - hist['probs'][i])
return -loss / N
def auc(fprs, tprs):
xs = np.concatenate(([0], fprs[::-1], [1]))
ys = np.concatenate(([0], tprs[::-1], [1]))
return np.trapz(ys, xs)
|
ericholscher/merchant | refs/heads/master | billing/models/authorize_models.py | 3 | from django.db import models
# Response Codes
# APPROVED, DECLINED, ERROR, FRAUD_REVIEW = 1, 2, 3, 4
class AuthorizeAIMResponse(models.Model):
RESPONSE_CODES = [
(1, 'Approved'),
(2, 'Declined'),
(3, 'Error'),
(4, 'Held for Review'),
]
ADDRESS_VERIFICATION_RESPONSE = [
('A', 'Address(Street) matches,ZIP does not'),
('B', 'Address information not provided for AVS check'),
('E', 'AVS error'),
('G', 'Non-U.S. Card Issuing Bank'),
('N', 'No match on Address(Street) or ZIP'),
('P', 'AVS not applicable for this transactions'),
('R', 'Retry-System unavailable or timed out'),
('S', 'Service not supported by issuer'),
('U', 'Address information is unavailable'),
('W', 'Nine digit Zip matches, Address(Street) does not'),
('X', 'Address(Street) and nine digit ZIP match'),
('Y', 'Address(Street) and five digit ZIP match'),
('Z', 'Five digit Zip matches, Address(Street) does not'),
]
CARD_CODE_RESPONSES = [
('', ''),
('M', 'Match'),
('N', 'No Match'),
('P', 'Not Processed'),
('S', 'Should have been present'),
('U', 'Issuer unable to process request'),
]
response_code = models.IntegerField(choices=RESPONSE_CODES)
response_reason_code = models.IntegerField(blank=True)
response_reason_text = models.TextField(blank=True)
authorization_code = models.CharField(max_length=8)
address_verification_response = models.CharField(max_length='8', choices=ADDRESS_VERIFICATION_RESPONSE)
transaction_id = models.CharField(max_length=64)
invoice_number = models.CharField(max_length=64, blank=True)
description = models.CharField(max_length=255, blank=True)
amount = models.DecimalField(max_digits=16, decimal_places=2)
method = models.CharField(max_length=255, blank=True)
transaction_type = models.CharField(max_length=255, blank=True)
customer_id = models.CharField(max_length=64, blank=True)
first_name = models.CharField(max_length=64, blank=True)
last_name = models.CharField(max_length=64, blank=True)
company = models.CharField(max_length=64, blank=True)
address = models.CharField(max_length=64, blank=True)
city = models.CharField(max_length=64, blank=True)
state = models.CharField(max_length=64, blank=True)
zip_code = models.CharField(max_length=64, blank=True)
country = models.CharField(max_length=64, blank=True)
phone = models.CharField(max_length=64, blank=True)
fax = models.CharField(max_length=64, blank=True)
email = models.EmailField()
shipping_first_name = models.CharField(max_length=64, blank=True)
shipping_last_name = models.CharField(max_length=64, blank=True)
shipping_company = models.CharField(max_length=64, blank=True)
shipping_address = models.CharField(max_length=64, blank=True)
shipping_city = models.CharField(max_length=64, blank=True)
shipping_state = models.CharField(max_length=64, blank=True)
shipping_zip_code = models.CharField(max_length=64, blank=True)
shipping_country = models.CharField(max_length=64, blank=True)
card_code_response = models.CharField(max_length='8', choices=CARD_CODE_RESPONSES, help_text=u'Card Code Verification response')
class Meta:
app_label = __name__.split(".")[0]
|
beangoben/qwxwhat | refs/heads/master | qwxwhat.py | 1 | __version__ = "0.5.6"
from pythonwhat.check_syntax import state_dec, Ex
#from pythonwhat.test_funcs import test_student_typed
#from pythonwhat.check_funcs import test_not
import numpy as np
# ERROR messages
TYPE_MSG = "__JINJA__: **type** **mismatch**: expected `{{sol_eval}}`, got `{{stu_eval}}`."
TYPE_EXPR = 'type({:s})'
SHAPE_MSG = "__JINJA__: **shape** **mismatch**: expected `{{sol_eval}}`, got `{{stu_eval}}`."
SHAPE_EXPR = '{:s}.shape'
CONTENTS_MSG = "__JINJA__: **solution** **contents** are different acoording to numpy.allclose. We expected `{{sol_eval}}`, but got `{{stu_eval}}`."
#CONTENTS_EXPR = 'np.round({:s}, decimals = 4)'
FUNC_CALL_MSG = "Did you forget to use `{}`?"
FLOAT_MSG = "__JINJA__: **solution** **contents** for {:s} are different acoording to numpy.allclose.`."
FUNC_TEST_MSG = "FMT:Calling it with arguments `({:s})` should result in `{{str_sol}}`, instead got `{{str_stu}}`."
CHEAT_MSG = "You cannot use the provided test function `{:s}` in your solution!!"
@state_dec
def check_numpy_array(name, state=None):
"""Test a numpy array object. Return that object.
Args:
name: name of the object to be tested.
"""
# check if is defined
obj = Ex(state).check_object(name)
# check if it has same type of variable
obj.has_equal_value(expr_code=TYPE_EXPR.format(name),
incorrect_msg=TYPE_MSG)
# check if it has the same shape
obj.has_equal_value(expr_code=SHAPE_EXPR.format(name),
incorrect_msg=SHAPE_MSG)
# check if it has the same value
obj.has_equal_value(func=lambda x, y: np.allclose(
x, y, rtol=1e-04, atol=1e-05),
incorrect_msg=CONTENTS_MSG)
# return object state for chaining
return obj
@state_dec
def check_float_value(name, state=None):
"""Test a float.
Args:
name: name of the object to be tested.
"""
# check if is defined
obj = Ex(state).check_object(name)
# check if it has same type of variable
obj.has_equal_value(expr_code=TYPE_EXPR.format(name),
incorrect_msg=TYPE_MSG)
# check if it has the same value
obj.has_equal_value(func=lambda x, y: np.allclose(x, y, rtol=1e-04, atol=1e-05),
incorrect_msg=FLOAT_MSG.format(name))
return obj
@state_dec
def check_function_call(name, state=None):
obj = Ex(state).test_student_typed(
"{}\s*\(".format(name), not_typed_msg=FUNC_CALL_MSG.format(name))
return obj
@state_dec
def check_testfunction_call(name, state=None):
name_regex = "{:s}\s*\(".format(name)
obj = Ex(state).test_not(test_student_typed(
name_regex), msg=CHEAT_MSG.format(name))
return obj
@state_dec
def check_testfunction_call2(state=None):
obj = Ex(state).test_not(test_student_typed('SOMETHING'), msg = "FAILED")
return obj
@state_dec
def check_testfunction_call3(name, state=None):
obj = Ex(state).test_not(test_student_typed('SOMETHING'), msg = "FAILED")
return obj
@state_dec
def check_testfunction_call4(name, state=None):
obj = Ex(state).test_not(test_student_typed(name), msg = "FAILED")
return obj
@state_dec
def check_function_definition(name, v, state=None):
Ex(state).test_function_definition(name)
obj = Ex(state).check_function_def(name)
for vi in v:
arg_string = str(vi).replace("[", "").replace("]", "")
obj.call(vi,
func=lambda x, y: np.allclose(x, y, rtol=1e-04, atol=1e-05),
incorrect_msg=FUNC_TEST_MSG.format(arg_string))
return obj
def test_exercise(func_defs={}, arrays=[], floats=[], func_calls=[],
state=None):
# check function definitions
for i, v in func_defs.items():
Ex(state) >> check_function_definition(i, v)
# numpy arrays
for name in arrays:
Ex(state) >> check_numpy_array(name)
# floating values
for name in floats:
Ex(state) >> check_float_value(name)
# function calls via re
for func in func_calls:
Ex(state) >> check_function_call(func)
# check against test functions
#for i, v in func_defs.items():
# name = 'test_{:s}'.format(i)
# Ex(state) >> check_testfunction_call(name)
return
|
apache/arrow | refs/heads/master | dev/merge_arrow_pr.py | 4 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility for creating well-formed pull request merges and pushing them to
# Apache.
# usage: ./merge_arrow_pr.py (see config env vars below)
#
# This utility assumes you already have a local Arrow git clone and that you
# have added remotes corresponding to both (i) the GitHub Apache Arrow mirror
# and (ii) the apache git repo.
#
# There are several pieces of authorization possibly needed via environment
# variables
#
# APACHE_JIRA_USERNAME: your Apache JIRA id
# APACHE_JIRA_PASSWORD: your Apache JIRA password
# ARROW_GITHUB_API_TOKEN: a GitHub API token to use for API requests (to avoid
# rate limiting)
import configparser
import os
import pprint
import re
import subprocess
import sys
import requests
import getpass
from six.moves import input
import six
try:
import jira.client
import jira.exceptions
except ImportError:
print("Could not find jira library. "
"Run 'sudo pip install jira' to install.")
print("Exiting without trying to close the associated JIRA.")
sys.exit(1)
# Remote name which points to the GitHub site
PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "apache")
# For testing to avoid accidentally pushing to apache
DEBUG = bool(int(os.environ.get("DEBUG", 0)))
if DEBUG:
print("**************** DEBUGGING ****************")
# Prefix added to temporary branches
BRANCH_PREFIX = "PR_TOOL"
JIRA_API_BASE = "https://issues.apache.org/jira"
def get_json(url, headers=None):
req = requests.get(url, headers=headers)
return req.json()
def run_cmd(cmd):
if isinstance(cmd, six.string_types):
cmd = cmd.split(' ')
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
# this avoids hiding the stdout / stderr of failed processes
print('Command failed: %s' % cmd)
print('With output:')
print('--------------')
print(e.output)
print('--------------')
raise e
if isinstance(output, six.binary_type):
output = output.decode('utf-8')
return output
original_head = run_cmd("git rev-parse HEAD")[:8]
def clean_up():
print("Restoring head pointer to %s" % original_head)
run_cmd("git checkout %s" % original_head)
branches = run_cmd("git branch").replace(" ", "").split("\n")
for branch in [x for x in branches
if x.startswith(BRANCH_PREFIX)]:
print("Deleting local branch %s" % branch)
run_cmd("git branch -D %s" % branch)
_REGEX_CI_DIRECTIVE = re.compile(r'\[[^\]]*\]')
def strip_ci_directives(commit_message):
# Remove things like '[force ci]', '[skip appveyor]' from the assembled
# commit message
return _REGEX_CI_DIRECTIVE.sub('', commit_message)
def fix_version_from_branch(branch, versions):
# Note: Assumes this is a sorted (newest->oldest) list of un-released
# versions
if branch == "master":
return versions[-1]
else:
branch_ver = branch.replace("branch-", "")
return [x for x in versions if x.name.startswith(branch_ver)][-1]
# We can merge both ARROW and PARQUET patchesa
SUPPORTED_PROJECTS = ['ARROW', 'PARQUET']
PR_TITLE_REGEXEN = [(project, re.compile(r'^(' + project + r'-[0-9]+)\b.*$'))
for project in SUPPORTED_PROJECTS]
class JiraIssue(object):
def __init__(self, jira_con, jira_id, project, cmd):
self.jira_con = jira_con
self.jira_id = jira_id
self.project = project
self.cmd = cmd
try:
self.issue = jira_con.issue(jira_id)
except Exception as e:
self.cmd.fail("ASF JIRA could not find %s\n%s" % (jira_id, e))
@property
def current_fix_versions(self):
return self.issue.fields.fixVersions
def get_candidate_fix_versions(self, merge_branches=('master',)):
# Only suggest versions starting with a number, like 0.x but not JS-0.x
all_versions = self.jira_con.project_versions(self.project)
unreleased_versions = [x for x in all_versions
if not x.raw['released']]
unreleased_versions = sorted(unreleased_versions,
key=lambda x: x.name, reverse=True)
mainline_versions = self._filter_mainline_versions(unreleased_versions)
mainline_non_patch_versions = []
for v in mainline_versions:
(major, minor, patch) = v.name.split(".")
if patch == "0":
mainline_non_patch_versions.append(v)
if len(mainline_versions) > len(mainline_non_patch_versions):
# If there is a non-patch release, suggest that instead
mainline_versions = mainline_non_patch_versions
default_fix_versions = [
fix_version_from_branch(x, mainline_versions).name
for x in merge_branches]
return all_versions, default_fix_versions
def _filter_mainline_versions(self, versions):
if self.project == 'PARQUET':
mainline_regex = re.compile(r'cpp-\d.*')
else:
mainline_regex = re.compile(r'\d.*')
return [x for x in versions if mainline_regex.match(x.name)]
def resolve(self, fix_versions, comment):
fields = self.issue.fields
cur_status = fields.status.name
if cur_status == "Resolved" or cur_status == "Closed":
self.cmd.fail("JIRA issue %s already has status '%s'"
% (self.jira_id, cur_status))
if DEBUG:
print("JIRA issue %s untouched" % (self.jira_id))
return
resolve = [x for x in self.jira_con.transitions(self.jira_id)
if x['name'] == "Resolve Issue"][0]
# ARROW-6915: do not overwrite existing fix versions corresponding to
# point releases
fix_versions = list(fix_versions)
fix_version_names = set(x['name'] for x in fix_versions)
for version in self.current_fix_versions:
major, minor, patch = version.name.split('.')
if patch != '0' and version.name not in fix_version_names:
fix_versions.append(version.raw)
self.jira_con.transition_issue(self.jira_id, resolve["id"],
comment=comment,
fixVersions=fix_versions)
print("Successfully resolved %s!" % (self.jira_id))
self.issue = self.jira_con.issue(self.jira_id)
self.show()
def show(self):
fields = self.issue.fields
print(format_jira_output(self.jira_id, fields.status.name,
fields.summary, fields.assignee,
fields.components))
def format_jira_output(jira_id, status, summary, assignee, components):
if assignee is None:
assignee = "NOT ASSIGNED!!!"
else:
assignee = assignee.displayName
if len(components) == 0:
components = 'NO COMPONENTS!!!'
else:
components = ', '.join((x.name for x in components))
return """=== JIRA {} ===
Summary\t\t{}
Assignee\t{}
Components\t{}
Status\t\t{}
URL\t\t{}/{}""".format(jira_id, summary, assignee, components, status,
'/'.join((JIRA_API_BASE, 'browse')), jira_id)
class GitHubAPI(object):
def __init__(self, project_name):
self.github_api = ("https://api.github.com/repos/apache/{0}"
.format(project_name))
token = os.environ.get('ARROW_GITHUB_API_TOKEN', None)
if token:
self.headers = {'Authorization': 'token {0}'.format(token)}
else:
self.headers = None
def get_pr_data(self, number):
return get_json("%s/pulls/%s" % (self.github_api, number),
headers=self.headers)
class CommandInput(object):
"""
Interface to input(...) to enable unit test mocks to be created
"""
def fail(self, msg):
clean_up()
raise Exception(msg)
def prompt(self, prompt):
return input(prompt)
def getpass(self, prompt):
return getpass.getpass(prompt)
def continue_maybe(self, prompt):
while True:
result = input("\n%s (y/n): " % prompt)
if result.lower() == "y":
return
elif result.lower() == "n":
self.fail("Okay, exiting")
else:
prompt = "Please input 'y' or 'n'"
class PullRequest(object):
def __init__(self, cmd, github_api, git_remote, jira_con, number):
self.cmd = cmd
self.git_remote = git_remote
self.con = jira_con
self.number = number
self._pr_data = github_api.get_pr_data(number)
try:
self.url = self._pr_data["url"]
self.title = self._pr_data["title"]
self.body = self._pr_data["body"]
self.target_ref = self._pr_data["base"]["ref"]
self.user_login = self._pr_data["user"]["login"]
self.base_ref = self._pr_data["head"]["ref"]
except KeyError:
pprint.pprint(self._pr_data)
raise
self.description = "%s/%s" % (self.user_login, self.base_ref)
self.jira_issue = self._get_jira()
def show(self):
print("\n=== Pull Request #%s ===" % self.number)
print("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s"
% (self.title, self.description, self.target_ref, self.url))
if self.jira_issue is not None:
self.jira_issue.show()
else:
print("Minor PR. Please ensure it meets guidelines for minor.\n")
@property
def is_merged(self):
return bool(self._pr_data["merged"])
@property
def is_mergeable(self):
return bool(self._pr_data["mergeable"])
def _get_jira(self):
if self.title.startswith("MINOR:"):
return None
jira_id = None
for project, regex in PR_TITLE_REGEXEN:
m = regex.search(self.title)
if m:
jira_id = m.group(1)
break
if jira_id is None:
options = ' or '.join('{0}-XXX'.format(project)
for project in SUPPORTED_PROJECTS)
self.cmd.fail("PR title should be prefixed by a jira id "
"{0}, but found {1}".format(options, self.title))
return JiraIssue(self.con, jira_id, project, self.cmd)
def merge(self):
"""
merge the requested PR and return the merge hash
"""
pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, self.number)
target_branch_name = "%s_MERGE_PR_%s_%s" % (BRANCH_PREFIX,
self.number,
self.target_ref.upper())
run_cmd("git fetch %s pull/%s/head:%s" % (self.git_remote,
self.number,
pr_branch_name))
run_cmd("git fetch %s %s:%s" % (self.git_remote, self.target_ref,
target_branch_name))
run_cmd("git checkout %s" % target_branch_name)
had_conflicts = False
try:
run_cmd(['git', 'merge', pr_branch_name, '--ff', '--squash'])
except Exception as e:
msg = ("Error merging: %s\nWould you like to "
"manually fix-up this merge?" % e)
self.cmd.continue_maybe(msg)
msg = ("Okay, please fix any conflicts and 'git add' "
"conflicting files... Finished?")
self.cmd.continue_maybe(msg)
had_conflicts = True
commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%an <%ae>']).split("\n")
distinct_authors = sorted(set(commit_authors),
key=lambda x: commit_authors.count(x),
reverse=True)
for i, author in enumerate(distinct_authors):
print("Author {}: {}".format(i + 1, author))
if len(distinct_authors) > 1:
primary_author, distinct_authors = get_primary_author(
self.cmd, distinct_authors)
else:
# If there is only one author, do not prompt for a lead author
primary_author = distinct_authors[0]
merge_message_flags = []
merge_message_flags += ["-m", self.title]
if self.body is not None:
merge_message_flags += ["-m", self.body]
committer_name = run_cmd("git config --get user.name").strip()
committer_email = run_cmd("git config --get user.email").strip()
authors = ("Authored-by:" if len(distinct_authors) == 1
else "Lead-authored-by:")
authors += " %s" % (distinct_authors.pop(0))
if len(distinct_authors) > 0:
authors += "\n" + "\n".join(["Co-authored-by: %s" % a
for a in distinct_authors])
authors += "\n" + "Signed-off-by: %s <%s>" % (committer_name,
committer_email)
if had_conflicts:
committer_name = run_cmd("git config --get user.name").strip()
committer_email = run_cmd("git config --get user.email").strip()
message = ("This patch had conflicts when merged, "
"resolved by\nCommitter: %s <%s>" %
(committer_name, committer_email))
merge_message_flags += ["-m", message]
# The string "Closes #%s" string is required for GitHub to correctly
# close the PR
merge_message_flags += [
"-m",
"Closes #%s from %s"
% (self.number, self.description)]
merge_message_flags += ["-m", authors]
if DEBUG:
print("\n".join(merge_message_flags))
run_cmd(['git', 'commit',
'--no-verify', # do not run commit hooks
'--author="%s"' % primary_author] +
merge_message_flags)
self.cmd.continue_maybe("Merge complete (local ref %s). Push to %s?"
% (target_branch_name, self.git_remote))
try:
push_cmd = ('git push %s %s:%s' % (self.git_remote,
target_branch_name,
self.target_ref))
if DEBUG:
print(push_cmd)
else:
run_cmd(push_cmd)
except Exception as e:
clean_up()
self.cmd.fail("Exception while pushing: %s" % e)
merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8]
clean_up()
print("Pull request #%s merged!" % self.number)
print("Merge hash: %s" % merge_hash)
return merge_hash
def get_primary_author(cmd, distinct_authors):
author_pat = re.compile(r'(.*) <(.*)>')
while True:
primary_author = cmd.prompt(
"Enter primary author in the format of "
"\"name <email>\" [%s]: " % distinct_authors[0])
if primary_author == "":
return distinct_authors[0], distinct_authors
if author_pat.match(primary_author):
break
print('Bad author "{}", please try again'.format(primary_author))
# When primary author is specified manually, de-dup it from
# author list and put it at the head of author list.
distinct_authors = [x for x in distinct_authors
if x != primary_author]
distinct_authors = [primary_author] + distinct_authors
return primary_author, distinct_authors
def prompt_for_fix_version(cmd, jira_issue):
(all_versions,
default_fix_versions) = jira_issue.get_candidate_fix_versions()
default_fix_versions = ",".join(default_fix_versions)
issue_fix_versions = cmd.prompt("Enter comma-separated "
"fix version(s) [%s]: "
% default_fix_versions)
if issue_fix_versions == "":
issue_fix_versions = default_fix_versions
issue_fix_versions = issue_fix_versions.replace(" ", "").split(",")
def get_version_json(version_str):
return [x for x in all_versions if x.name == version_str][0].raw
return [get_version_json(v) for v in issue_fix_versions]
CONFIG_FILE = "~/.config/arrow/merge.conf"
def load_configuration():
config = configparser.ConfigParser()
config.read(os.path.expanduser(CONFIG_FILE))
return config
def get_credentials(cmd):
username, password = None, None
config = load_configuration()
if "jira" in config.sections():
username = config["jira"].get("username")
password = config["jira"].get("password")
# Fallback to environment variables
if not username:
username = os.environ.get("APACHE_JIRA_USERNAME")
if not password:
password = os.environ.get("APACHE_JIRA_PASSWORD")
# Fallback to user tty prompt
if not username:
username = cmd.prompt("Env APACHE_JIRA_USERNAME not set, "
"please enter your JIRA username:")
if not password:
password = cmd.getpass("Env APACHE_JIRA_PASSWORD not set, "
"please enter your JIRA password:")
return (username, password)
def connect_jira(cmd):
try:
return jira.client.JIRA(options={'server': JIRA_API_BASE},
basic_auth=get_credentials(cmd))
except jira.exceptions.JIRAError as e:
if "CAPTCHA_CHALLENGE" in e.text:
print("")
print("It looks like you need to answer a captcha challenge for "
"this account (probably due to a login attempt with an "
"incorrect password). Please log in at "
"https://issues.apache.org/jira and complete the captcha "
"before running this tool again.")
print("Exiting.")
sys.exit(1)
raise e
def get_pr_num():
if len(sys.argv) == 2:
return sys.argv[1]
return input("Which pull request would you like to merge? (e.g. 34): ")
def cli():
# Location of your Arrow git clone
ARROW_HOME = os.path.abspath(os.path.dirname(__file__))
PROJECT_NAME = os.environ.get('ARROW_PROJECT_NAME') or 'arrow'
print("ARROW_HOME = " + ARROW_HOME)
print("PROJECT_NAME = " + PROJECT_NAME)
cmd = CommandInput()
pr_num = get_pr_num()
os.chdir(ARROW_HOME)
github_api = GitHubAPI(PROJECT_NAME)
jira_con = connect_jira(cmd)
pr = PullRequest(cmd, github_api, PR_REMOTE_NAME, jira_con, pr_num)
if pr.is_merged:
print("Pull request %s has already been merged")
sys.exit(0)
if not pr.is_mergeable:
msg = ("Pull request %s is not mergeable in its current form.\n"
% pr_num + "Continue? (experts only!)")
cmd.continue_maybe(msg)
pr.show()
cmd.continue_maybe("Proceed with merging pull request #%s?" % pr_num)
# merged hash not used
pr.merge()
if pr.jira_issue is None:
print("Minor PR. No JIRA issue to update.\n")
return
cmd.continue_maybe("Would you like to update the associated JIRA?")
jira_comment = (
"Issue resolved by pull request %s\n[%s/%s]"
% (pr_num,
"https://github.com/apache/" + PROJECT_NAME + "/pull",
pr_num))
fix_versions_json = prompt_for_fix_version(cmd, pr.jira_issue)
pr.jira_issue.resolve(fix_versions_json, jira_comment)
if __name__ == '__main__':
try:
cli()
except Exception:
raise
|
fangxingli/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/django/contrib/formtools/tests/wizard/namedwizardtests/forms.py | 318 | import os
import tempfile
from django import forms
from django.core.files.storage import FileSystemStorage
from django.forms.formsets import formset_factory
from django.http import HttpResponse
from django.template import Template, Context
from django.contrib.auth.models import User
from django.contrib.formtools.wizard.views import NamedUrlWizardView
temp_storage_location = tempfile.mkdtemp(dir=os.environ.get('DJANGO_TEST_TEMP_DIR'))
temp_storage = FileSystemStorage(location=temp_storage_location)
class Page1(forms.Form):
name = forms.CharField(max_length=100)
user = forms.ModelChoiceField(queryset=User.objects.all())
thirsty = forms.NullBooleanField()
class Page2(forms.Form):
address1 = forms.CharField(max_length=100)
address2 = forms.CharField(max_length=100)
file1 = forms.FileField()
class Page3(forms.Form):
random_crap = forms.CharField(max_length=100)
Page4 = formset_factory(Page3, extra=2)
class ContactWizard(NamedUrlWizardView):
file_storage = temp_storage
def done(self, form_list, **kwargs):
c = Context({
'form_list': [x.cleaned_data for x in form_list],
'all_cleaned_data': self.get_all_cleaned_data()
})
for form in self.form_list.keys():
c[form] = self.get_cleaned_data_for_step(form)
c['this_will_fail'] = self.get_cleaned_data_for_step('this_will_fail')
return HttpResponse(Template('').render(c))
class SessionContactWizard(ContactWizard):
storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage'
class CookieContactWizard(ContactWizard):
storage_name = 'django.contrib.formtools.wizard.storage.cookie.CookieStorage'
|
kenshay/ImageScripter | refs/heads/master | ProgramData/SystemFiles/Python/Lib/site-packages/OpenGL/GLES2/ARM/mali_program_binary.py | 8 | '''OpenGL extension ARM.mali_program_binary
This module customises the behaviour of the
OpenGL.raw.GLES2.ARM.mali_program_binary to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARM/mali_program_binary.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.ARM.mali_program_binary import *
from OpenGL.raw.GLES2.ARM.mali_program_binary import _EXTENSION_NAME
def glInitMaliProgramBinaryARM():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION |
isrohutamahopetechnik/MissionPlanner | refs/heads/master | Lib/site-packages/scipy/lib/lapack/__init__.py | 62 | #
# LAPACK wrappers
#
from info import __doc__
__all__ = ['get_lapack_funcs','calc_lwork','flapack','clapack']
import calc_lwork
# The following ensures that possibly missing flavor (C or Fortran) is
# replaced with the available one. If none is available, exception
# is raised at the first attempt to use the resources.
import flapack
import clapack
_use_force_clapack = 1
if hasattr(clapack,'empty_module'):
clapack = flapack
_use_force_clapack = 0
elif hasattr(flapack,'empty_module'):
flapack = clapack
_type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'} # 'd' will be default for 'i',..
_inv_type_conv = {'s':'f','d':'d','c':'F','z':'D'}
def get_lapack_funcs(names,arrays=(),debug=0,force_clapack=1):
"""Return available LAPACK function objects with names.
arrays are used to determine the optimal prefix of
LAPACK routines.
If force_clapack is True then available Atlas routine
is returned for column major storaged arrays with
rowmajor argument set to False.
"""
force_clapack=0 #XXX: Don't set it true! The feature is unreliable
# and may cause incorrect results.
# See test_basic.test_solve.check_20Feb04_bug.
ordering = []
for i in range(len(arrays)):
t = arrays[i].dtype.char
if t not in _type_conv:
t = 'd'
ordering.append((t,i))
if ordering:
ordering.sort()
required_prefix = _type_conv[ordering[0][0]]
else:
required_prefix = 'd'
dtypechar = _inv_type_conv[required_prefix]
# Default lookup:
if ordering and arrays[ordering[0][1]].flags['FORTRAN']:
# prefer Fortran code for leading array with column major order
m1,m2 = flapack,clapack
else:
# in all other cases, C code is preferred
m1,m2 = clapack,flapack
if not _use_force_clapack:
force_clapack = 0
funcs = []
m1_name = m1.__name__.split('.')[-1]
m2_name = m2.__name__.split('.')[-1]
for name in names:
func_name = required_prefix + name
func = getattr(m1,func_name,None)
if func is None:
func = getattr(m2,func_name)
func.module_name = m2_name
else:
func.module_name = m1_name
if force_clapack and m1 is flapack:
func2 = getattr(m2,func_name,None)
if func2 is not None:
import new
exec _colmajor_func_template % {'func_name':func_name}
func = new.function(func_code,{'clapack_func':func2},func_name)
func.module_name = m2_name
func.__doc__ = func2.__doc__
func.prefix = required_prefix
func.dtypechar = dtypechar
funcs.append(func)
return tuple(funcs)
_colmajor_func_template = '''\
def %(func_name)s(*args,**kws):
if "rowmajor" not in kws:
kws["rowmajor"] = 0
return clapack_func(*args,**kws)
func_code = %(func_name)s.func_code
'''
from numpy.testing import Tester
test = Tester().test
|
boretom/pyload-apkg | refs/heads/master | source/py-mods-prebuilt-i386/site-packages/PIL/JpegImagePlugin.py | 40 | #
# The Python Imaging Library.
# $Id$
#
# JPEG (JFIF) file handling
#
# See "Digital Compression and Coding of Continous-Tone Still Images,
# Part 1, Requirements and Guidelines" (CCITT T.81 / ISO 10918-1)
#
# History:
# 1995-09-09 fl Created
# 1995-09-13 fl Added full parser
# 1996-03-25 fl Added hack to use the IJG command line utilities
# 1996-05-05 fl Workaround Photoshop 2.5 CMYK polarity bug
# 1996-05-28 fl Added draft support, JFIF version (0.1)
# 1996-12-30 fl Added encoder options, added progression property (0.2)
# 1997-08-27 fl Save mode 1 images as BW (0.3)
# 1998-07-12 fl Added YCbCr to draft and save methods (0.4)
# 1998-10-19 fl Don't hang on files using 16-bit DQT's (0.4.1)
# 2001-04-16 fl Extract DPI settings from JFIF files (0.4.2)
# 2002-07-01 fl Skip pad bytes before markers; identify Exif files (0.4.3)
# 2003-04-25 fl Added experimental EXIF decoder (0.5)
# 2003-06-06 fl Added experimental EXIF GPSinfo decoder
# 2003-09-13 fl Extract COM markers
# 2009-09-06 fl Added icc_profile support (from Florian Hoech)
# 2009-03-06 fl Changed CMYK handling; always use Adobe polarity (0.6)
# 2009-03-08 fl Added subsampling support (from Justin Huff).
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-1996 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.6"
import array, struct
import string
import Image, ImageFile
def i16(c,o=0):
return ord(c[o+1]) + (ord(c[o])<<8)
def i32(c,o=0):
return ord(c[o+3]) + (ord(c[o+2])<<8) + (ord(c[o+1])<<16) + (ord(c[o])<<24)
#
# Parser
def Skip(self, marker):
n = i16(self.fp.read(2))-2
ImageFile._safe_read(self.fp, n)
def APP(self, marker):
#
# Application marker. Store these in the APP dictionary.
# Also look for well-known application markers.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
app = "APP%d" % (marker&15)
self.app[app] = s # compatibility
self.applist.append((app, s))
if marker == 0xFFE0 and s[:4] == "JFIF":
# extract JFIF information
self.info["jfif"] = version = i16(s, 5) # version
self.info["jfif_version"] = divmod(version, 256)
# extract JFIF properties
try:
jfif_unit = ord(s[7])
jfif_density = i16(s, 8), i16(s, 10)
except:
pass
else:
if jfif_unit == 1:
self.info["dpi"] = jfif_density
self.info["jfif_unit"] = jfif_unit
self.info["jfif_density"] = jfif_density
elif marker == 0xFFE1 and s[:5] == "Exif\0":
# extract Exif information (incomplete)
self.info["exif"] = s # FIXME: value will change
elif marker == 0xFFE2 and s[:5] == "FPXR\0":
# extract FlashPix information (incomplete)
self.info["flashpix"] = s # FIXME: value will change
elif marker == 0xFFE2 and s[:12] == "ICC_PROFILE\0":
# Since an ICC profile can be larger than the maximum size of
# a JPEG marker (64K), we need provisions to split it into
# multiple markers. The format defined by the ICC specifies
# one or more APP2 markers containing the following data:
# Identifying string ASCII "ICC_PROFILE\0" (12 bytes)
# Marker sequence number 1, 2, etc (1 byte)
# Number of markers Total of APP2's used (1 byte)
# Profile data (remainder of APP2 data)
# Decoders should use the marker sequence numbers to
# reassemble the profile, rather than assuming that the APP2
# markers appear in the correct sequence.
self.icclist.append(s)
elif marker == 0xFFEE and s[:5] == "Adobe":
self.info["adobe"] = i16(s, 5)
# extract Adobe custom properties
try:
adobe_transform = ord(s[1])
except:
pass
else:
self.info["adobe_transform"] = adobe_transform
def COM(self, marker):
#
# Comment marker. Store these in the APP dictionary.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
self.app["COM"] = s # compatibility
self.applist.append(("COM", s))
def SOF(self, marker):
#
# Start of frame marker. Defines the size and mode of the
# image. JPEG is colour blind, so we use some simple
# heuristics to map the number of layers to an appropriate
# mode. Note that this could be made a bit brighter, by
# looking for JFIF and Adobe APP markers.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
self.size = i16(s[3:]), i16(s[1:])
self.bits = ord(s[0])
if self.bits != 8:
raise SyntaxError("cannot handle %d-bit layers" % self.bits)
self.layers = ord(s[5])
if self.layers == 1:
self.mode = "L"
elif self.layers == 3:
self.mode = "RGB"
elif self.layers == 4:
self.mode = "CMYK"
else:
raise SyntaxError("cannot handle %d-layer images" % self.layers)
if marker in [0xFFC2, 0xFFC6, 0xFFCA, 0xFFCE]:
self.info["progressive"] = self.info["progression"] = 1
if self.icclist:
# fixup icc profile
self.icclist.sort() # sort by sequence number
if ord(self.icclist[0][13]) == len(self.icclist):
profile = []
for p in self.icclist:
profile.append(p[14:])
icc_profile = string.join(profile, "")
else:
icc_profile = None # wrong number of fragments
self.info["icc_profile"] = icc_profile
self.icclist = None
for i in range(6, len(s), 3):
t = s[i:i+3]
# 4-tuples: id, vsamp, hsamp, qtable
self.layer.append((t[0], ord(t[1])/16, ord(t[1])&15, ord(t[2])))
def DQT(self, marker):
#
# Define quantization table. Support baseline 8-bit tables
# only. Note that there might be more than one table in
# each marker.
# FIXME: The quantization tables can be used to estimate the
# compression quality.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
while len(s):
if len(s) < 65:
raise SyntaxError("bad quantization table marker")
v = ord(s[0])
if v/16 == 0:
self.quantization[v&15] = array.array("b", s[1:65])
s = s[65:]
else:
return # FIXME: add code to read 16-bit tables!
# raise SyntaxError, "bad quantization table element size"
#
# JPEG marker table
MARKER = {
0xFFC0: ("SOF0", "Baseline DCT", SOF),
0xFFC1: ("SOF1", "Extended Sequential DCT", SOF),
0xFFC2: ("SOF2", "Progressive DCT", SOF),
0xFFC3: ("SOF3", "Spatial lossless", SOF),
0xFFC4: ("DHT", "Define Huffman table", Skip),
0xFFC5: ("SOF5", "Differential sequential DCT", SOF),
0xFFC6: ("SOF6", "Differential progressive DCT", SOF),
0xFFC7: ("SOF7", "Differential spatial", SOF),
0xFFC8: ("JPG", "Extension", None),
0xFFC9: ("SOF9", "Extended sequential DCT (AC)", SOF),
0xFFCA: ("SOF10", "Progressive DCT (AC)", SOF),
0xFFCB: ("SOF11", "Spatial lossless DCT (AC)", SOF),
0xFFCC: ("DAC", "Define arithmetic coding conditioning", Skip),
0xFFCD: ("SOF13", "Differential sequential DCT (AC)", SOF),
0xFFCE: ("SOF14", "Differential progressive DCT (AC)", SOF),
0xFFCF: ("SOF15", "Differential spatial (AC)", SOF),
0xFFD0: ("RST0", "Restart 0", None),
0xFFD1: ("RST1", "Restart 1", None),
0xFFD2: ("RST2", "Restart 2", None),
0xFFD3: ("RST3", "Restart 3", None),
0xFFD4: ("RST4", "Restart 4", None),
0xFFD5: ("RST5", "Restart 5", None),
0xFFD6: ("RST6", "Restart 6", None),
0xFFD7: ("RST7", "Restart 7", None),
0xFFD8: ("SOI", "Start of image", None),
0xFFD9: ("EOI", "End of image", None),
0xFFDA: ("SOS", "Start of scan", Skip),
0xFFDB: ("DQT", "Define quantization table", DQT),
0xFFDC: ("DNL", "Define number of lines", Skip),
0xFFDD: ("DRI", "Define restart interval", Skip),
0xFFDE: ("DHP", "Define hierarchical progression", SOF),
0xFFDF: ("EXP", "Expand reference component", Skip),
0xFFE0: ("APP0", "Application segment 0", APP),
0xFFE1: ("APP1", "Application segment 1", APP),
0xFFE2: ("APP2", "Application segment 2", APP),
0xFFE3: ("APP3", "Application segment 3", APP),
0xFFE4: ("APP4", "Application segment 4", APP),
0xFFE5: ("APP5", "Application segment 5", APP),
0xFFE6: ("APP6", "Application segment 6", APP),
0xFFE7: ("APP7", "Application segment 7", APP),
0xFFE8: ("APP8", "Application segment 8", APP),
0xFFE9: ("APP9", "Application segment 9", APP),
0xFFEA: ("APP10", "Application segment 10", APP),
0xFFEB: ("APP11", "Application segment 11", APP),
0xFFEC: ("APP12", "Application segment 12", APP),
0xFFED: ("APP13", "Application segment 13", APP),
0xFFEE: ("APP14", "Application segment 14", APP),
0xFFEF: ("APP15", "Application segment 15", APP),
0xFFF0: ("JPG0", "Extension 0", None),
0xFFF1: ("JPG1", "Extension 1", None),
0xFFF2: ("JPG2", "Extension 2", None),
0xFFF3: ("JPG3", "Extension 3", None),
0xFFF4: ("JPG4", "Extension 4", None),
0xFFF5: ("JPG5", "Extension 5", None),
0xFFF6: ("JPG6", "Extension 6", None),
0xFFF7: ("JPG7", "Extension 7", None),
0xFFF8: ("JPG8", "Extension 8", None),
0xFFF9: ("JPG9", "Extension 9", None),
0xFFFA: ("JPG10", "Extension 10", None),
0xFFFB: ("JPG11", "Extension 11", None),
0xFFFC: ("JPG12", "Extension 12", None),
0xFFFD: ("JPG13", "Extension 13", None),
0xFFFE: ("COM", "Comment", COM)
}
def _accept(prefix):
return prefix[0] == "\377"
##
# Image plugin for JPEG and JFIF images.
class JpegImageFile(ImageFile.ImageFile):
format = "JPEG"
format_description = "JPEG (ISO 10918)"
def _open(self):
s = self.fp.read(1)
if ord(s[0]) != 255:
raise SyntaxError("not a JPEG file")
# Create attributes
self.bits = self.layers = 0
# JPEG specifics (internal)
self.layer = []
self.huffman_dc = {}
self.huffman_ac = {}
self.quantization = {}
self.app = {} # compatibility
self.applist = []
self.icclist = []
while 1:
s = s + self.fp.read(1)
i = i16(s)
if MARKER.has_key(i):
name, description, handler = MARKER[i]
# print hex(i), name, description
if handler is not None:
handler(self, i)
if i == 0xFFDA: # start of scan
rawmode = self.mode
if self.mode == "CMYK":
rawmode = "CMYK;I" # assume adobe conventions
self.tile = [("jpeg", (0,0) + self.size, 0, (rawmode, ""))]
# self.__offset = self.fp.tell()
break
s = self.fp.read(1)
elif i == 0 or i == 65535:
# padded marker or junk; move on
s = "\xff"
else:
raise SyntaxError("no marker found")
def draft(self, mode, size):
if len(self.tile) != 1:
return
d, e, o, a = self.tile[0]
scale = 0
if a[0] == "RGB" and mode in ["L", "YCbCr"]:
self.mode = mode
a = mode, ""
if size:
scale = max(self.size[0] / size[0], self.size[1] / size[1])
for s in [8, 4, 2, 1]:
if scale >= s:
break
e = e[0], e[1], (e[2]-e[0]+s-1)/s+e[0], (e[3]-e[1]+s-1)/s+e[1]
self.size = ((self.size[0]+s-1)/s, (self.size[1]+s-1)/s)
scale = s
self.tile = [(d, e, o, a)]
self.decoderconfig = (scale, 1)
return self
def load_djpeg(self):
# ALTERNATIVE: handle JPEGs via the IJG command line utilities
import tempfile, os
file = tempfile.mktemp()
os.system("djpeg %s >%s" % (self.filename, file))
try:
self.im = Image.core.open_ppm(file)
finally:
try: os.unlink(file)
except: pass
self.mode = self.im.mode
self.size = self.im.size
self.tile = []
def _getexif(self):
# Extract EXIF information. This method is highly experimental,
# and is likely to be replaced with something better in a future
# version.
import TiffImagePlugin, StringIO
def fixup(value):
if len(value) == 1:
return value[0]
return value
# The EXIF record consists of a TIFF file embedded in a JPEG
# application marker (!).
try:
data = self.info["exif"]
except KeyError:
return None
file = StringIO.StringIO(data[6:])
head = file.read(8)
exif = {}
# process dictionary
info = TiffImagePlugin.ImageFileDirectory(head)
info.load(file)
for key, value in info.items():
exif[key] = fixup(value)
# get exif extension
try:
file.seek(exif[0x8769])
except KeyError:
pass
else:
info = TiffImagePlugin.ImageFileDirectory(head)
info.load(file)
for key, value in info.items():
exif[key] = fixup(value)
# get gpsinfo extension
try:
file.seek(exif[0x8825])
except KeyError:
pass
else:
info = TiffImagePlugin.ImageFileDirectory(head)
info.load(file)
exif[0x8825] = gps = {}
for key, value in info.items():
gps[key] = fixup(value)
return exif
# --------------------------------------------------------------------
# stuff to save JPEG files
RAWMODE = {
"1": "L",
"L": "L",
"RGB": "RGB",
"RGBA": "RGB",
"RGBX": "RGB",
"CMYK": "CMYK;I", # assume adobe conventions
"YCbCr": "YCbCr",
}
def _save(im, fp, filename):
try:
rawmode = RAWMODE[im.mode]
except KeyError:
raise IOError("cannot write mode %s as JPEG" % im.mode)
info = im.encoderinfo
dpi = info.get("dpi", (0, 0))
subsampling = info.get("subsampling", -1)
if subsampling == "4:4:4":
subsampling = 0
elif subsampling == "4:2:2":
subsampling = 1
elif subsampling == "4:1:1":
subsampling = 2
extra = ""
icc_profile = info.get("icc_profile")
if icc_profile:
ICC_OVERHEAD_LEN = 14
MAX_BYTES_IN_MARKER = 65533
MAX_DATA_BYTES_IN_MARKER = MAX_BYTES_IN_MARKER - ICC_OVERHEAD_LEN
markers = []
while icc_profile:
markers.append(icc_profile[:MAX_DATA_BYTES_IN_MARKER])
icc_profile = icc_profile[MAX_DATA_BYTES_IN_MARKER:]
i = 1
for marker in markers:
size = struct.pack(">H", 2 + ICC_OVERHEAD_LEN + len(marker))
extra = extra + ("\xFF\xE2" + size + "ICC_PROFILE\0" + chr(i) + chr(len(markers)) + marker)
i = i + 1
# get keyword arguments
im.encoderconfig = (
info.get("quality", 0),
# "progressive" is the official name, but older documentation
# says "progression"
# FIXME: issue a warning if the wrong form is used (post-1.1.7)
info.has_key("progressive") or info.has_key("progression"),
info.get("smooth", 0),
info.has_key("optimize"),
info.get("streamtype", 0),
dpi[0], dpi[1],
subsampling,
extra,
)
ImageFile._save(im, fp, [("jpeg", (0,0)+im.size, 0, rawmode)])
def _save_cjpeg(im, fp, filename):
# ALTERNATIVE: handle JPEGs via the IJG command line utilities.
import os
file = im._dump()
os.system("cjpeg %s >%s" % (file, filename))
try: os.unlink(file)
except: pass
# -------------------------------------------------------------------q-
# Registry stuff
Image.register_open("JPEG", JpegImageFile, _accept)
Image.register_save("JPEG", _save)
Image.register_extension("JPEG", ".jfif")
Image.register_extension("JPEG", ".jpe")
Image.register_extension("JPEG", ".jpg")
Image.register_extension("JPEG", ".jpeg")
Image.register_mime("JPEG", "image/jpeg")
|
foreni-packages/golismero | refs/heads/master | tools/sqlmap/waf/varnish.py | 7 | #!/usr/bin/env python
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import re
from lib.core.enums import HTTP_HEADER
from lib.core.settings import WAF_ATTACK_VECTORS
__product__ = "Varnish FireWall (OWASP) "
def detect(get_page):
retval = False
for vector in WAF_ATTACK_VECTORS:
page, headers, code = get_page(get=vector)
retval = headers.get("X-Varnish") is not None
retval |= re.search(r"varnish\Z", headers.get(HTTP_HEADER.VIA, ""), re.I) is not None
if retval:
break
return retval
|
s0lst1ce/twisted-intro | refs/heads/master | basic-twisted/hello.py | 11 |
def hello():
print 'Hello from the reactor loop!'
print 'Lately I feel like I\'m stuck in a rut.'
from twisted.internet import reactor
reactor.callWhenRunning(hello)
print 'Starting the reactor.'
reactor.run()
|
sankalpg/Essentia_tonicDebug_TEMP | refs/heads/master | test/src/unittest/highlevel/test_singlebeatloudness.py | 10 | #!/usr/bin/env python
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from numpy import *
from essentia_test import *
class TestSingleBeatLoudness(TestCase):
def testZero(self):
self.assertEqualVector(SingleBeatLoudness(frequencyBands = [20,150])(zeros(8192)), zeros(2))
def testEmpty(self):
self.assertComputeFails(SingleBeatLoudness(), [])
def testSingle(self):
self.assertComputeFails(SingleBeatLoudness(), [1])
def testInvalidInput(self):
loudness = SingleBeatLoudness(sampleRate=44100,
beatWindowDuration=0.1,
beatDuration=0.05)
#should fail due to input.size < beatDuration+beatWindowDuration
self.assertComputeFails(SingleBeatLoudness(), [i for i in range(1000)])
def testInvalidParam(self):
self.assertConfigureFails(SingleBeatLoudness(), {'beatWindowDuration':0.05,
'beatDuration':0.1})
self.assertConfigureFails(SingleBeatLoudness(), {'beatWindowDuration':0.1,
'beatDuration':-0.05})
self.assertConfigureFails(SingleBeatLoudness(), {'beatWindowDuration':-0.1,
'beatDuration':0.05})
self.assertConfigureFails(SingleBeatLoudness(), {'sampleRate':-441000})
def testRegression(self):
# test that it yields valid output (which doesn't mean correct output ;)
loudness = SingleBeatLoudness(frequencyBands = [20,150])(array(random.rand(8192)))
self.assert_(not any(numpy.isnan(loudness)))
self.assert_(not any(numpy.isinf(loudness)))
self.assert_(all(array(loudness) >= 0.0))
suite = allTests(TestSingleBeatLoudness)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
mickele77/FreeCAD | refs/heads/master | src/Mod/Path/PathScripts/PathDrilling.py | 7 | # -*- coding: utf-8 -*-
#***************************************************************************
#* *
#* Copyright (c) 2014 Yorik van Havre <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCAD,Path
from PySide import QtCore,QtGui
from PathScripts import PathUtils,PathSelection,PathProject
FreeCADGui = None
if FreeCAD.GuiUp:
import FreeCADGui
"""Path Drilling object and FreeCAD command"""
# Qt tanslation handling
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def translate(context, text, disambig=None):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def translate(context, text, disambig=None):
return QtGui.QApplication.translate(context, text, disambig)
class ObjectDrilling:
def __init__(self,obj):
#obj.addProperty("App::PropertyVector","StartPoint","Path",translate("PathProfile","The start position of the drilling"))
obj.addProperty("App::PropertyLinkSub","Base","Path",translate("Parent Object","The base geometry of this toolpath"))
obj.addProperty("App::PropertyVectorList","locations","Path","The drilling locations")
obj.addProperty("App::PropertyLength", "PeckDepth", "Drilling", translate("PeckDepth","Incremental Drill depth before retracting to clear chips"))
#obj.PeckDepth = (0,0,1000,1)
obj.addProperty("App::PropertyDistance", "ClearanceHeight", "Drilling", translate("Clearance Height","The height needed to clear clamps and obstructions"))
obj.addProperty("App::PropertyDistance", "FinalDepth", "Drilling", translate("Final Depth","Final Depth of Tool- lowest value in Z"))
obj.addProperty("App::PropertyDistance", "RetractHeight", "Drilling", translate("Retract Height","The height where feed starts and height during retract tool when path is finished"))
obj.addProperty("App::PropertyLength", "VertFeed", "Feed",translate("Vert Feed","Feed rate for vertical moves in Z"))
#obj.addProperty("App::PropertySpeed", "HorizFeed", "Feed",translate("Horiz Feed","Feed rate for horizontal moves")) #not needed for drilling
obj.addProperty("App::PropertyString","Comment","Path",translate("PathProject","An optional comment for this profile"))
obj.addProperty("App::PropertyBool","Active","Path",translate("Active","Make False, to prevent operation from generating code"))
obj.addProperty("App::PropertyIntegerConstraint","ToolNumber","Tool",translate("PathProfile","The tool number in use"))
obj.ToolNumber = (0,0,1000,1)
obj.setEditorMode('ToolNumber',1) #make this read only
obj.Proxy = self
def __getstate__(self):
return None
def __setstate__(self,state):
return None
def execute(self,obj):
output = "G90 G98\n"
# rapid to first hole location, with spindle still retracted:
p0 = obj.locations[0]
output += "G0 X"+str(p0.x) + " Y" + str(p0.y)+ "\n"
# move tool to clearance plane
output += "G0 Z" + str(obj.ClearanceHeight.Value) + "\n"
if obj.PeckDepth.Value > 0:
cmd = "G83"
qword = " Q"+ str(obj.PeckDepth.Value)
else:
cmd = "G81"
qword = ""
for p in obj.locations:
output += cmd + " X" + str(p.x) + " Y" + str(p.y) + " Z" + str(obj.FinalDepth.Value) + qword + " R" + str(obj.RetractHeight.Value) + " F" + str(obj.VertFeed.Value) + "\n"
output += "G80\n"
print output
path = Path.Path(output)
obj.Path = path
# tie the toolnumber to the PathLoadTool object ToolNumber
if len(obj.InList)>0: #check to see if obj is in the Project group yet
project = obj.InList[0]
tl = int(PathUtils.changeTool(obj,project))
obj.ToolNumber= tl
class _ViewProviderDrill:
def __init__(self,obj): #mandatory
# obj.addProperty("App::PropertyFloat","SomePropertyName","PropertyGroup","Description of this property")
obj.Proxy = self
def __getstate__(self): #mandatory
return None
def __setstate__(self,state): #mandatory
return None
def getIcon(self): #optional
return ":/icons/Path-Drilling.svg"
# def attach(self): #optional
# # this is executed on object creation and object load from file
# pass
def onChanged(self,obj,prop): #optional
# this is executed when a property of the VIEW PROVIDER changes
pass
def updateData(self,obj,prop): #optional
# this is executed when a property of the APP OBJECT changes
pass
def setEdit(self,vobj,mode): #optional
# this is executed when the object is double-clicked in the tree
pass
def unsetEdit(self,vobj,mode): #optional
# this is executed when the user cancels or terminates edit mode
pass
class CommandPathDrilling:
def GetResources(self):
return {'Pixmap' : 'Path-Drilling',
'MenuText': QtCore.QT_TRANSLATE_NOOP("PathDrilling","Drilling"),
'Accel': "P, D",
'ToolTip': QtCore.QT_TRANSLATE_NOOP("PathDrilling","Creates a Path Drilling object")}
def IsActive(self):
return not FreeCAD.ActiveDocument is None
def Activated(self):
import Path
import Part
from PathScripts import PathUtils,PathDrilling,PathProject
prjexists = False
selection = FreeCADGui.Selection.getSelectionEx()
if not selection:
return
# if everything is ok, execute and register the transaction in the undo/redo stack
FreeCAD.ActiveDocument.openTransaction(translate("PathDrilling","Create Drilling"))
FreeCADGui.addModule("PathScripts.PathDrilling")
obj = FreeCAD.ActiveDocument.addObject("Path::FeaturePython","Drilling")
PathDrilling.ObjectDrilling(obj)
myList = obj.locations
for sub in selection:
for point in sub.SubObjects:
if isinstance(point,Part.Vertex):
myList.append(FreeCAD.Vector(point.X, point.Y, point.Z))
if isinstance(point,Part.Edge):
if isinstance(point.Curve,Part.Circle):
center = point.Curve.Center
myList.append(FreeCAD.Vector(center.x,center.y,center.z))
obj.locations = myList
PathDrilling._ViewProviderDrill(obj.ViewObject)
# obj.ViewObject.Proxy = 0
obj.Active = True
project = PathUtils.addToProject(obj)
tl = PathUtils.changeTool(obj,project)
if tl:
obj.ToolNumber = tl
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
if FreeCAD.GuiUp:
# register the FreeCAD command
FreeCADGui.addCommand('Path_Drilling',CommandPathDrilling())
FreeCAD.Console.PrintLog("Loading PathDrilling... done\n")
|
kushalbhola/MyStuff | refs/heads/master | Practice/PythonApplication/env/Lib/site-packages/pandas/plotting/_matplotlib/core.py | 2 | import re
from typing import Optional # noqa
import warnings
import numpy as np
from pandas._config import get_option
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
is_hashable,
is_integer,
is_iterator,
is_list_like,
is_number,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCIndexClass,
ABCMultiIndex,
ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import isna, notna
import pandas.core.common as com
from pandas.io.formats.printing import pprint_thing
from pandas.plotting._matplotlib import converter
from pandas.plotting._matplotlib.compat import _mpl_ge_3_0_0
from pandas.plotting._matplotlib.style import _get_standard_colors
from pandas.plotting._matplotlib.tools import (
_flatten,
_get_all_lines,
_get_xlim,
_handle_shared_axes,
_subplots,
format_date_labels,
table,
)
if get_option("plotting.matplotlib.register_converters"):
converter.register(explicit=False)
class MPLPlot:
"""
Base class for assembling a pandas plot using matplotlib
Parameters
----------
data :
"""
@property
def _kind(self):
"""Specify kind str. Must be overridden in child class"""
raise NotImplementedError
_layout_type = "vertical"
_default_rot = 0
orientation = None # type: Optional[str]
_pop_attributes = [
"label",
"style",
"logy",
"logx",
"loglog",
"mark_right",
"stacked",
]
_attr_defaults = {
"logy": False,
"logx": False,
"loglog": False,
"mark_right": True,
"stacked": False,
}
def __init__(
self,
data,
kind=None,
by=None,
subplots=False,
sharex=None,
sharey=False,
use_index=True,
figsize=None,
grid=None,
legend=True,
rot=None,
ax=None,
fig=None,
title=None,
xlim=None,
ylim=None,
xticks=None,
yticks=None,
sort_columns=False,
fontsize=None,
secondary_y=False,
colormap=None,
table=False,
layout=None,
**kwds
):
import matplotlib.pyplot as plt
converter._WARN = False # no warning for pandas plots
self.data = data
self.by = by
self.kind = kind
self.sort_columns = sort_columns
self.subplots = subplots
if sharex is None:
if ax is None:
self.sharex = True
else:
# if we get an axis, the users should do the visibility
# setting...
self.sharex = False
else:
self.sharex = sharex
self.sharey = sharey
self.figsize = figsize
self.layout = layout
self.xticks = xticks
self.yticks = yticks
self.xlim = xlim
self.ylim = ylim
self.title = title
self.use_index = use_index
self.fontsize = fontsize
if rot is not None:
self.rot = rot
# need to know for format_date_labels since it's rotated to 30 by
# default
self._rot_set = True
else:
self._rot_set = False
self.rot = self._default_rot
if grid is None:
grid = False if secondary_y else plt.rcParams["axes.grid"]
self.grid = grid
self.legend = legend
self.legend_handles = []
self.legend_labels = []
for attr in self._pop_attributes:
value = kwds.pop(attr, self._attr_defaults.get(attr, None))
setattr(self, attr, value)
self.ax = ax
self.fig = fig
self.axes = None
# parse errorbar input if given
xerr = kwds.pop("xerr", None)
yerr = kwds.pop("yerr", None)
self.errors = {
kw: self._parse_errorbars(kw, err)
for kw, err in zip(["xerr", "yerr"], [xerr, yerr])
}
if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, ABCIndexClass)):
secondary_y = [secondary_y]
self.secondary_y = secondary_y
# ugly TypeError if user passes matplotlib's `cmap` name.
# Probably better to accept either.
if "cmap" in kwds and colormap:
raise TypeError("Only specify one of `cmap` and `colormap`.")
elif "cmap" in kwds:
self.colormap = kwds.pop("cmap")
else:
self.colormap = colormap
self.table = table
self.kwds = kwds
self._validate_color_args()
def _validate_color_args(self):
if "color" not in self.kwds and "colors" in self.kwds:
warnings.warn(
(
"'colors' is being deprecated. Please use 'color'"
"instead of 'colors'"
)
)
colors = self.kwds.pop("colors")
self.kwds["color"] = colors
if (
"color" in self.kwds
and self.nseries == 1
and not is_list_like(self.kwds["color"])
):
# support series.plot(color='green')
self.kwds["color"] = [self.kwds["color"]]
if (
"color" in self.kwds
and isinstance(self.kwds["color"], tuple)
and self.nseries == 1
and len(self.kwds["color"]) in (3, 4)
):
# support RGB and RGBA tuples in series plot
self.kwds["color"] = [self.kwds["color"]]
if (
"color" in self.kwds or "colors" in self.kwds
) and self.colormap is not None:
warnings.warn(
"'color' and 'colormap' cannot be used " "simultaneously. Using 'color'"
)
if "color" in self.kwds and self.style is not None:
if is_list_like(self.style):
styles = self.style
else:
styles = [self.style]
# need only a single match
for s in styles:
if re.match("^[a-z]+?", s) is not None:
raise ValueError(
"Cannot pass 'style' string with a color "
"symbol and 'color' keyword argument. Please"
" use one or the other or pass 'style' "
"without a color symbol"
)
def _iter_data(self, data=None, keep_index=False, fillna=None):
if data is None:
data = self.data
if fillna is not None:
data = data.fillna(fillna)
# TODO: unused?
# if self.sort_columns:
# columns = com.try_sort(data.columns)
# else:
# columns = data.columns
for col, values in data.items():
if keep_index is True:
yield col, values
else:
yield col, values.values
@property
def nseries(self):
if self.data.ndim == 1:
return 1
else:
return self.data.shape[1]
def draw(self):
self.plt.draw_if_interactive()
def generate(self):
self._args_adjust()
self._compute_plot_data()
self._setup_subplots()
self._make_plot()
self._add_table()
self._make_legend()
self._adorn_subplots()
for ax in self.axes:
self._post_plot_logic_common(ax, self.data)
self._post_plot_logic(ax, self.data)
def _args_adjust(self):
pass
def _has_plotted_object(self, ax):
"""check whether ax has data"""
return len(ax.lines) != 0 or len(ax.artists) != 0 or len(ax.containers) != 0
def _maybe_right_yaxis(self, ax, axes_num):
if not self.on_right(axes_num):
# secondary axes may be passed via ax kw
return self._get_ax_layer(ax)
if hasattr(ax, "right_ax"):
# if it has right_ax proparty, ``ax`` must be left axes
return ax.right_ax
elif hasattr(ax, "left_ax"):
# if it has left_ax proparty, ``ax`` must be right axes
return ax
else:
# otherwise, create twin axes
orig_ax, new_ax = ax, ax.twinx()
# TODO: use Matplotlib public API when available
new_ax._get_lines = orig_ax._get_lines
new_ax._get_patches_for_fill = orig_ax._get_patches_for_fill
orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
if not self._has_plotted_object(orig_ax): # no data on left y
orig_ax.get_yaxis().set_visible(False)
if self.logy is True or self.loglog is True:
new_ax.set_yscale("log")
elif self.logy == "sym" or self.loglog == "sym":
new_ax.set_yscale("symlog")
return new_ax
def _setup_subplots(self):
if self.subplots:
fig, axes = _subplots(
naxes=self.nseries,
sharex=self.sharex,
sharey=self.sharey,
figsize=self.figsize,
ax=self.ax,
layout=self.layout,
layout_type=self._layout_type,
)
else:
if self.ax is None:
fig = self.plt.figure(figsize=self.figsize)
axes = fig.add_subplot(111)
else:
fig = self.ax.get_figure()
if self.figsize is not None:
fig.set_size_inches(self.figsize)
axes = self.ax
axes = _flatten(axes)
valid_log = {False, True, "sym", None}
input_log = {self.logx, self.logy, self.loglog}
if input_log - valid_log:
invalid_log = next(iter((input_log - valid_log)))
raise ValueError(
"Boolean, None and 'sym' are valid options,"
" '{}' is given.".format(invalid_log)
)
if self.logx is True or self.loglog is True:
[a.set_xscale("log") for a in axes]
elif self.logx == "sym" or self.loglog == "sym":
[a.set_xscale("symlog") for a in axes]
if self.logy is True or self.loglog is True:
[a.set_yscale("log") for a in axes]
elif self.logy == "sym" or self.loglog == "sym":
[a.set_yscale("symlog") for a in axes]
self.fig = fig
self.axes = axes
@property
def result(self):
"""
Return result axes
"""
if self.subplots:
if self.layout is not None and not is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
return self.axes
else:
sec_true = isinstance(self.secondary_y, bool) and self.secondary_y
all_sec = (
is_list_like(self.secondary_y) and len(self.secondary_y) == self.nseries
)
if sec_true or all_sec:
# if all data is plotted on secondary, return right axes
return self._get_ax_layer(self.axes[0], primary=False)
else:
return self.axes[0]
def _compute_plot_data(self):
data = self.data
if isinstance(data, ABCSeries):
label = self.label
if label is None and data.name is None:
label = "None"
data = data.to_frame(name=label)
# GH16953, _convert is needed as fallback, for ``Series``
# with ``dtype == object``
data = data._convert(datetime=True, timedelta=True)
numeric_data = data.select_dtypes(
include=[np.number, "datetime", "datetimetz", "timedelta"]
)
try:
is_empty = numeric_data.empty
except AttributeError:
is_empty = not len(numeric_data)
# no non-numeric frames or series allowed
if is_empty:
raise TypeError("no numeric data to plot")
# GH25587: cast ExtensionArray of pandas (IntegerArray, etc.) to
# np.ndarray before plot.
numeric_data = numeric_data.copy()
for col in numeric_data:
numeric_data[col] = np.asarray(numeric_data[col])
self.data = numeric_data
def _make_plot(self):
raise AbstractMethodError(self)
def _add_table(self):
if self.table is False:
return
elif self.table is True:
data = self.data.transpose()
else:
data = self.table
ax = self._get_ax(0)
table(ax, data)
def _post_plot_logic_common(self, ax, data):
"""Common post process for each axes"""
if self.orientation == "vertical" or self.orientation is None:
self._apply_axis_properties(ax.xaxis, rot=self.rot, fontsize=self.fontsize)
self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)
if hasattr(ax, "right_ax"):
self._apply_axis_properties(ax.right_ax.yaxis, fontsize=self.fontsize)
elif self.orientation == "horizontal":
self._apply_axis_properties(ax.yaxis, rot=self.rot, fontsize=self.fontsize)
self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)
if hasattr(ax, "right_ax"):
self._apply_axis_properties(ax.right_ax.yaxis, fontsize=self.fontsize)
else: # pragma no cover
raise ValueError
def _post_plot_logic(self, ax, data):
"""Post process for each axes. Overridden in child classes"""
pass
def _adorn_subplots(self):
"""Common post process unrelated to data"""
if len(self.axes) > 0:
all_axes = self._get_subplots()
nrows, ncols = self._get_axes_layout()
_handle_shared_axes(
axarr=all_axes,
nplots=len(all_axes),
naxes=nrows * ncols,
nrows=nrows,
ncols=ncols,
sharex=self.sharex,
sharey=self.sharey,
)
for ax in self.axes:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.title:
if self.subplots:
if is_list_like(self.title):
if len(self.title) != self.nseries:
msg = (
"The length of `title` must equal the number "
"of columns if using `title` of type `list` "
"and `subplots=True`.\n"
"length of title = {}\n"
"number of columns = {}"
).format(len(self.title), self.nseries)
raise ValueError(msg)
for (ax, title) in zip(self.axes, self.title):
ax.set_title(title)
else:
self.fig.suptitle(self.title)
else:
if is_list_like(self.title):
msg = (
"Using `title` of type `list` is not supported "
"unless `subplots=True` is passed"
)
raise ValueError(msg)
self.axes[0].set_title(self.title)
def _apply_axis_properties(self, axis, rot=None, fontsize=None):
""" Tick creation within matplotlib is reasonably expensive and is
internally deferred until accessed as Ticks are created/destroyed
multiple times per draw. It's therefore beneficial for us to avoid
accessing unless we will act on the Tick.
"""
if rot is not None or fontsize is not None:
# rot=0 is a valid setting, hence the explicit None check
labels = axis.get_majorticklabels() + axis.get_minorticklabels()
for label in labels:
if rot is not None:
label.set_rotation(rot)
if fontsize is not None:
label.set_fontsize(fontsize)
@property
def legend_title(self):
if not isinstance(self.data.columns, ABCMultiIndex):
name = self.data.columns.name
if name is not None:
name = pprint_thing(name)
return name
else:
stringified = map(pprint_thing, self.data.columns.names)
return ",".join(stringified)
def _add_legend_handle(self, handle, label, index=None):
if label is not None:
if self.mark_right and index is not None:
if self.on_right(index):
label = label + " (right)"
self.legend_handles.append(handle)
self.legend_labels.append(label)
def _make_legend(self):
ax, leg = self._get_ax_legend(self.axes[0])
handles = []
labels = []
title = ""
if not self.subplots:
if leg is not None:
title = leg.get_title().get_text()
handles = leg.legendHandles
labels = [x.get_text() for x in leg.get_texts()]
if self.legend:
if self.legend == "reverse":
self.legend_handles = reversed(self.legend_handles)
self.legend_labels = reversed(self.legend_labels)
handles += self.legend_handles
labels += self.legend_labels
if self.legend_title is not None:
title = self.legend_title
if len(handles) > 0:
ax.legend(handles, labels, loc="best", title=title)
elif self.subplots and self.legend:
for ax in self.axes:
if ax.get_visible():
ax.legend(loc="best")
def _get_ax_legend(self, ax):
leg = ax.get_legend()
other_ax = getattr(ax, "left_ax", None) or getattr(ax, "right_ax", None)
other_leg = None
if other_ax is not None:
other_leg = other_ax.get_legend()
if leg is None and other_leg is not None:
leg = other_leg
ax = other_ax
return ax, leg
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
_need_to_set_index = False
def _get_xticks(self, convert_period=False):
index = self.data.index
is_datetype = index.inferred_type in ("datetime", "date", "datetime64", "time")
if self.use_index:
if convert_period and isinstance(index, ABCPeriodIndex):
self.data = self.data.reindex(index=index.sort_values())
x = self.data.index.to_timestamp()._mpl_repr()
elif index.is_numeric():
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
x = index._mpl_repr()
elif is_datetype:
self.data = self.data[notna(self.data.index)]
self.data = self.data.sort_index()
x = self.data.index._mpl_repr()
else:
self._need_to_set_index = True
x = list(range(len(index)))
else:
x = list(range(len(index)))
return x
@classmethod
def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds):
mask = isna(y)
if mask.any():
y = np.ma.array(y)
y = np.ma.masked_where(mask, y)
if isinstance(x, ABCIndexClass):
x = x._mpl_repr()
if is_errorbar:
if "xerr" in kwds:
kwds["xerr"] = np.array(kwds.get("xerr"))
if "yerr" in kwds:
kwds["yerr"] = np.array(kwds.get("yerr"))
return ax.errorbar(x, y, **kwds)
else:
# prevent style kwarg from going to errorbar, where it is
# unsupported
if style is not None:
args = (x, y, style)
else:
args = (x, y)
return ax.plot(*args, **kwds)
def _get_index_name(self):
if isinstance(self.data.index, ABCMultiIndex):
name = self.data.index.names
if com._any_not_none(*name):
name = ",".join(pprint_thing(x) for x in name)
else:
name = None
else:
name = self.data.index.name
if name is not None:
name = pprint_thing(name)
return name
@classmethod
def _get_ax_layer(cls, ax, primary=True):
"""get left (primary) or right (secondary) axes"""
if primary:
return getattr(ax, "left_ax", ax)
else:
return getattr(ax, "right_ax", ax)
def _get_ax(self, i):
# get the twinx ax if appropriate
if self.subplots:
ax = self.axes[i]
ax = self._maybe_right_yaxis(ax, i)
self.axes[i] = ax
else:
ax = self.axes[0]
ax = self._maybe_right_yaxis(ax, i)
ax.get_yaxis().set_visible(True)
return ax
@classmethod
def get_default_ax(cls, ax):
import matplotlib.pyplot as plt
if ax is None and len(plt.get_fignums()) > 0:
with plt.rc_context():
ax = plt.gca()
ax = cls._get_ax_layer(ax)
def on_right(self, i):
if isinstance(self.secondary_y, bool):
return self.secondary_y
if isinstance(self.secondary_y, (tuple, list, np.ndarray, ABCIndexClass)):
return self.data.columns[i] in self.secondary_y
def _apply_style_colors(self, colors, kwds, col_num, label):
"""
Manage style and color based on column number and its label.
Returns tuple of appropriate style and kwds which "color" may be added.
"""
style = None
if self.style is not None:
if isinstance(self.style, list):
try:
style = self.style[col_num]
except IndexError:
pass
elif isinstance(self.style, dict):
style = self.style.get(label, style)
else:
style = self.style
has_color = "color" in kwds or self.colormap is not None
nocolor_style = style is None or re.match("[a-z]+", style) is None
if (has_color or self.subplots) and nocolor_style:
kwds["color"] = colors[col_num % len(colors)]
return style, kwds
def _get_colors(self, num_colors=None, color_kwds="color"):
if num_colors is None:
num_colors = self.nseries
return _get_standard_colors(
num_colors=num_colors,
colormap=self.colormap,
color=self.kwds.get(color_kwds),
)
def _parse_errorbars(self, label, err):
"""
Look for error keyword arguments and return the actual errorbar data
or return the error DataFrame/dict
Error bars can be specified in several ways:
Series: the user provides a pandas.Series object of the same
length as the data
ndarray: provides a np.ndarray of the same length as the data
DataFrame/dict: error values are paired with keys matching the
key in the plotted DataFrame
str: the name of the column within the plotted DataFrame
"""
if err is None:
return None
def match_labels(data, e):
e = e.reindex(data.index)
return e
# key-matched DataFrame
if isinstance(err, ABCDataFrame):
err = match_labels(self.data, err)
# key-matched dict
elif isinstance(err, dict):
pass
# Series of error values
elif isinstance(err, ABCSeries):
# broadcast error series across data
err = match_labels(self.data, err)
err = np.atleast_2d(err)
err = np.tile(err, (self.nseries, 1))
# errors are a column in the dataframe
elif isinstance(err, str):
evalues = self.data[err].values
self.data = self.data[self.data.columns.drop(err)]
err = np.atleast_2d(evalues)
err = np.tile(err, (self.nseries, 1))
elif is_list_like(err):
if is_iterator(err):
err = np.atleast_2d(list(err))
else:
# raw error values
err = np.atleast_2d(err)
err_shape = err.shape
# asymmetrical error bars
if err.ndim == 3:
if (
(err_shape[0] != self.nseries)
or (err_shape[1] != 2)
or (err_shape[2] != len(self.data))
):
msg = (
"Asymmetrical error bars should be provided "
+ "with the shape (%u, 2, %u)" % (self.nseries, len(self.data))
)
raise ValueError(msg)
# broadcast errors to each data series
if len(err) == 1:
err = np.tile(err, (self.nseries, 1))
elif is_number(err):
err = np.tile([err], (self.nseries, len(self.data)))
else:
msg = "No valid {label} detected".format(label=label)
raise ValueError(msg)
return err
def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True):
errors = {}
for kw, flag in zip(["xerr", "yerr"], [xerr, yerr]):
if flag:
err = self.errors[kw]
# user provided label-matched dataframe of errors
if isinstance(err, (ABCDataFrame, dict)):
if label is not None and label in err.keys():
err = err[label]
else:
err = None
elif index is not None and err is not None:
err = err[index]
if err is not None:
errors[kw] = err
return errors
def _get_subplots(self):
from matplotlib.axes import Subplot
return [
ax for ax in self.axes[0].get_figure().get_axes() if isinstance(ax, Subplot)
]
def _get_axes_layout(self):
axes = self._get_subplots()
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
class PlanePlot(MPLPlot):
"""
Abstract class for plotting on plane, currently scatter and hexbin.
"""
_layout_type = "single"
def __init__(self, data, x, y, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError(self._kind + " requires an x and y column")
if is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if len(self.data[x]._get_numeric_data()) == 0:
raise ValueError(self._kind + " requires x column to be numeric")
if len(self.data[y]._get_numeric_data()) == 0:
raise ValueError(self._kind + " requires y column to be numeric")
self.x = x
self.y = y
@property
def nseries(self):
return 1
def _post_plot_logic(self, ax, data):
x, y = self.x, self.y
ax.set_ylabel(pprint_thing(y))
ax.set_xlabel(pprint_thing(x))
def _plot_colorbar(self, ax, **kwds):
# Addresses issues #10611 and #10678:
# When plotting scatterplots and hexbinplots in IPython
# inline backend the colorbar axis height tends not to
# exactly match the parent axis height.
# The difference is due to small fractional differences
# in floating points with similar representation.
# To deal with this, this method forces the colorbar
# height to take the height of the parent axes.
# For a more detailed description of the issue
# see the following link:
# https://github.com/ipython/ipython/issues/11215
img = ax.collections[0]
cbar = self.fig.colorbar(img, ax=ax, **kwds)
if _mpl_ge_3_0_0():
# The workaround below is no longer necessary.
return
points = ax.get_position().get_points()
cbar_points = cbar.ax.get_position().get_points()
cbar.ax.set_position(
[
cbar_points[0, 0],
points[0, 1],
cbar_points[1, 0] - cbar_points[0, 0],
points[1, 1] - points[0, 1],
]
)
# To see the discrepancy in axis heights uncomment
# the following two lines:
# print(points[1, 1] - points[0, 1])
# print(cbar_points[1, 1] - cbar_points[0, 1])
class ScatterPlot(PlanePlot):
_kind = "scatter"
def __init__(self, data, x, y, s=None, c=None, **kwargs):
if s is None:
# hide the matplotlib default for size, in case we want to change
# the handling of this argument later
s = 20
super().__init__(data, x, y, s=s, **kwargs)
if is_integer(c) and not self.data.columns.holds_integer():
c = self.data.columns[c]
self.c = c
def _make_plot(self):
x, y, c, data = self.x, self.y, self.c, self.data
ax = self.axes[0]
c_is_column = is_hashable(c) and c in self.data.columns
# plot a colorbar only if a colormap is provided or necessary
cb = self.kwds.pop("colorbar", self.colormap or c_is_column)
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or "Greys"
cmap = self.plt.cm.get_cmap(cmap)
color = self.kwds.pop("color", None)
if c is not None and color is not None:
raise TypeError("Specify exactly one of `c` and `color`")
elif c is None and color is None:
c_values = self.plt.rcParams["patch.facecolor"]
elif color is not None:
c_values = color
elif c_is_column:
c_values = self.data[c].values
else:
c_values = c
if self.legend and hasattr(self, "label"):
label = self.label
else:
label = None
scatter = ax.scatter(
data[x].values,
data[y].values,
c=c_values,
label=label,
cmap=cmap,
**self.kwds
)
if cb:
cbar_label = c if c_is_column else ""
self._plot_colorbar(ax, label=cbar_label)
if label is not None:
self._add_legend_handle(scatter, label)
else:
self.legend = False
errors_x = self._get_errorbars(label=x, index=0, yerr=False)
errors_y = self._get_errorbars(label=y, index=0, xerr=False)
if len(errors_x) > 0 or len(errors_y) > 0:
err_kwds = dict(errors_x, **errors_y)
err_kwds["ecolor"] = scatter.get_facecolor()[0]
ax.errorbar(data[x].values, data[y].values, linestyle="none", **err_kwds)
class HexBinPlot(PlanePlot):
_kind = "hexbin"
def __init__(self, data, x, y, C=None, **kwargs):
super().__init__(data, x, y, **kwargs)
if is_integer(C) and not self.data.columns.holds_integer():
C = self.data.columns[C]
self.C = C
def _make_plot(self):
x, y, data, C = self.x, self.y, self.data, self.C
ax = self.axes[0]
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or "BuGn"
cmap = self.plt.cm.get_cmap(cmap)
cb = self.kwds.pop("colorbar", True)
if C is None:
c_values = None
else:
c_values = data[C].values
ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap, **self.kwds)
if cb:
self._plot_colorbar(ax)
def _make_legend(self):
pass
class LinePlot(MPLPlot):
_kind = "line"
_default_rot = 0
orientation = "vertical"
def __init__(self, data, **kwargs):
from pandas.plotting import plot_params
MPLPlot.__init__(self, data, **kwargs)
if self.stacked:
self.data = self.data.fillna(value=0)
self.x_compat = plot_params["x_compat"]
if "x_compat" in self.kwds:
self.x_compat = bool(self.kwds.pop("x_compat"))
def _is_ts_plot(self):
# this is slightly deceptive
return not self.x_compat and self.use_index and self._use_dynamic_x()
def _use_dynamic_x(self):
from pandas.plotting._matplotlib.timeseries import _use_dynamic_x
return _use_dynamic_x(self._get_ax(0), self.data)
def _make_plot(self):
if self._is_ts_plot():
from pandas.plotting._matplotlib.timeseries import _maybe_convert_index
data = _maybe_convert_index(self._get_ax(0), self.data)
x = data.index # dummy, not used
plotf = self._ts_plot
it = self._iter_data(data=data, keep_index=True)
else:
x = self._get_xticks(convert_period=True)
plotf = self._plot
it = self._iter_data()
stacking_id = self._get_stacking_id()
is_errorbar = com._any_not_none(*self.errors.values())
colors = self._get_colors()
for i, (label, y) in enumerate(it):
ax = self._get_ax(i)
kwds = self.kwds.copy()
style, kwds = self._apply_style_colors(colors, kwds, i, label)
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = pprint_thing(label) # .encode('utf-8')
kwds["label"] = label
newlines = plotf(
ax,
x,
y,
style=style,
column_num=i,
stacking_id=stacking_id,
is_errorbar=is_errorbar,
**kwds
)
self._add_legend_handle(newlines[0], label, index=i)
lines = _get_all_lines(ax)
left, right = _get_xlim(lines)
ax.set_xlim(left, right)
@classmethod
def _plot(cls, ax, x, y, style=None, column_num=None, stacking_id=None, **kwds):
# column_num is used to get the target column from protf in line and
# area plots
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(y))
y_values = cls._get_stacked_values(ax, stacking_id, y, kwds["label"])
lines = MPLPlot._plot(ax, x, y_values, style=style, **kwds)
cls._update_stacker(ax, stacking_id, y)
return lines
@classmethod
def _ts_plot(cls, ax, x, data, style=None, **kwds):
from pandas.plotting._matplotlib.timeseries import (
_maybe_resample,
_decorate_axes,
format_dateaxis,
)
# accept x to be consistent with normal plot func,
# x is not passed to tsplot as it uses data.index as x coordinate
# column_num must be in kwds for stacking purpose
freq, data = _maybe_resample(data, ax, kwds)
# Set ax with freq info
_decorate_axes(ax, freq, kwds)
# digging deeper
if hasattr(ax, "left_ax"):
_decorate_axes(ax.left_ax, freq, kwds)
if hasattr(ax, "right_ax"):
_decorate_axes(ax.right_ax, freq, kwds)
ax._plot_data.append((data, cls._kind, kwds))
lines = cls._plot(ax, data.index, data.values, style=style, **kwds)
# set date formatter, locators and rescale limits
format_dateaxis(ax, ax.freq, data.index)
return lines
def _get_stacking_id(self):
if self.stacked:
return id(self.data)
else:
return None
@classmethod
def _initialize_stacker(cls, ax, stacking_id, n):
if stacking_id is None:
return
if not hasattr(ax, "_stacker_pos_prior"):
ax._stacker_pos_prior = {}
if not hasattr(ax, "_stacker_neg_prior"):
ax._stacker_neg_prior = {}
ax._stacker_pos_prior[stacking_id] = np.zeros(n)
ax._stacker_neg_prior[stacking_id] = np.zeros(n)
@classmethod
def _get_stacked_values(cls, ax, stacking_id, values, label):
if stacking_id is None:
return values
if not hasattr(ax, "_stacker_pos_prior"):
# stacker may not be initialized for subplots
cls._initialize_stacker(ax, stacking_id, len(values))
if (values >= 0).all():
return ax._stacker_pos_prior[stacking_id] + values
elif (values <= 0).all():
return ax._stacker_neg_prior[stacking_id] + values
raise ValueError(
"When stacked is True, each column must be either "
"all positive or negative."
"{0} contains both positive and negative values".format(label)
)
@classmethod
def _update_stacker(cls, ax, stacking_id, values):
if stacking_id is None:
return
if (values >= 0).all():
ax._stacker_pos_prior[stacking_id] += values
elif (values <= 0).all():
ax._stacker_neg_prior[stacking_id] += values
def _post_plot_logic(self, ax, data):
from matplotlib.ticker import FixedLocator
def get_label(i):
try:
return pprint_thing(data.index[i])
except Exception:
return ""
if self._need_to_set_index:
xticks = ax.get_xticks()
xticklabels = [get_label(x) for x in xticks]
ax.set_xticklabels(xticklabels)
ax.xaxis.set_major_locator(FixedLocator(xticks))
condition = (
not self._use_dynamic_x()
and data.index.is_all_dates
and not self.subplots
or (self.subplots and self.sharex)
)
index_name = self._get_index_name()
if condition:
# irregular TS rotated 30 deg. by default
# probably a better place to check / set this.
if not self._rot_set:
self.rot = 30
format_date_labels(ax, rot=self.rot)
if index_name is not None and self.use_index:
ax.set_xlabel(index_name)
class AreaPlot(LinePlot):
_kind = "area"
def __init__(self, data, **kwargs):
kwargs.setdefault("stacked", True)
data = data.fillna(value=0)
LinePlot.__init__(self, data, **kwargs)
if not self.stacked:
# use smaller alpha to distinguish overlap
self.kwds.setdefault("alpha", 0.5)
if self.logy or self.loglog:
raise ValueError("Log-y scales are not supported in area plot")
@classmethod
def _plot(
cls,
ax,
x,
y,
style=None,
column_num=None,
stacking_id=None,
is_errorbar=False,
**kwds
):
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(y))
y_values = cls._get_stacked_values(ax, stacking_id, y, kwds["label"])
# need to remove label, because subplots uses mpl legend as it is
line_kwds = kwds.copy()
line_kwds.pop("label")
lines = MPLPlot._plot(ax, x, y_values, style=style, **line_kwds)
# get data from the line to get coordinates for fill_between
xdata, y_values = lines[0].get_data(orig=False)
# unable to use ``_get_stacked_values`` here to get starting point
if stacking_id is None:
start = np.zeros(len(y))
elif (y >= 0).all():
start = ax._stacker_pos_prior[stacking_id]
elif (y <= 0).all():
start = ax._stacker_neg_prior[stacking_id]
else:
start = np.zeros(len(y))
if "color" not in kwds:
kwds["color"] = lines[0].get_color()
rect = ax.fill_between(xdata, start, y_values, **kwds)
cls._update_stacker(ax, stacking_id, y)
# LinePlot expects list of artists
res = [rect]
return res
def _post_plot_logic(self, ax, data):
LinePlot._post_plot_logic(self, ax, data)
if self.ylim is None:
if (data >= 0).all().all():
ax.set_ylim(0, None)
elif (data <= 0).all().all():
ax.set_ylim(None, 0)
class BarPlot(MPLPlot):
_kind = "bar"
_default_rot = 90
orientation = "vertical"
def __init__(self, data, **kwargs):
# we have to treat a series differently than a
# 1-column DataFrame w.r.t. color handling
self._is_series = isinstance(data, ABCSeries)
self.bar_width = kwargs.pop("width", 0.5)
pos = kwargs.pop("position", 0.5)
kwargs.setdefault("align", "center")
self.tick_pos = np.arange(len(data))
self.bottom = kwargs.pop("bottom", 0)
self.left = kwargs.pop("left", 0)
self.log = kwargs.pop("log", False)
MPLPlot.__init__(self, data, **kwargs)
if self.stacked or self.subplots:
self.tickoffset = self.bar_width * pos
if kwargs["align"] == "edge":
self.lim_offset = self.bar_width / 2
else:
self.lim_offset = 0
else:
if kwargs["align"] == "edge":
w = self.bar_width / self.nseries
self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5
self.lim_offset = w * 0.5
else:
self.tickoffset = self.bar_width * pos
self.lim_offset = 0
self.ax_pos = self.tick_pos - self.tickoffset
def _args_adjust(self):
if is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
if is_list_like(self.left):
self.left = np.array(self.left)
@classmethod
def _plot(cls, ax, x, y, w, start=0, log=False, **kwds):
return ax.bar(x, y, w, bottom=start, log=log, **kwds)
@property
def _start_base(self):
return self.bottom
def _make_plot(self):
import matplotlib as mpl
colors = self._get_colors()
ncolors = len(colors)
pos_prior = neg_prior = np.zeros(len(self.data))
K = self.nseries
for i, (label, y) in enumerate(self._iter_data(fillna=0)):
ax = self._get_ax(i)
kwds = self.kwds.copy()
if self._is_series:
kwds["color"] = colors
else:
kwds["color"] = colors[i % ncolors]
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = pprint_thing(label)
if (("yerr" in kwds) or ("xerr" in kwds)) and (kwds.get("ecolor") is None):
kwds["ecolor"] = mpl.rcParams["xtick.color"]
start = 0
if self.log and (y >= 1).all():
start = 1
start = start + self._start_base
if self.subplots:
w = self.bar_width / 2
rect = self._plot(
ax,
self.ax_pos + w,
y,
self.bar_width,
start=start,
label=label,
log=self.log,
**kwds
)
ax.set_title(label)
elif self.stacked:
mask = y > 0
start = np.where(mask, pos_prior, neg_prior) + self._start_base
w = self.bar_width / 2
rect = self._plot(
ax,
self.ax_pos + w,
y,
self.bar_width,
start=start,
label=label,
log=self.log,
**kwds
)
pos_prior = pos_prior + np.where(mask, y, 0)
neg_prior = neg_prior + np.where(mask, 0, y)
else:
w = self.bar_width / K
rect = self._plot(
ax,
self.ax_pos + (i + 0.5) * w,
y,
w,
start=start,
label=label,
log=self.log,
**kwds
)
self._add_legend_handle(rect, label, index=i)
def _post_plot_logic(self, ax, data):
if self.use_index:
str_index = [pprint_thing(key) for key in data.index]
else:
str_index = [pprint_thing(key) for key in range(data.shape[0])]
name = self._get_index_name()
s_edge = self.ax_pos[0] - 0.25 + self.lim_offset
e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset
self._decorate_ticks(ax, name, str_index, s_edge, e_edge)
def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge):
ax.set_xlim((start_edge, end_edge))
ax.set_xticks(self.tick_pos)
ax.set_xticklabels(ticklabels)
if name is not None and self.use_index:
ax.set_xlabel(name)
class BarhPlot(BarPlot):
_kind = "barh"
_default_rot = 0
orientation = "horizontal"
@property
def _start_base(self):
return self.left
@classmethod
def _plot(cls, ax, x, y, w, start=0, log=False, **kwds):
return ax.barh(x, y, w, left=start, log=log, **kwds)
def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge):
# horizontal bars
ax.set_ylim((start_edge, end_edge))
ax.set_yticks(self.tick_pos)
ax.set_yticklabels(ticklabels)
if name is not None and self.use_index:
ax.set_ylabel(name)
class PiePlot(MPLPlot):
_kind = "pie"
_layout_type = "horizontal"
def __init__(self, data, kind=None, **kwargs):
data = data.fillna(value=0)
if (data < 0).any().any():
raise ValueError("{0} doesn't allow negative values".format(kind))
MPLPlot.__init__(self, data, kind=kind, **kwargs)
def _args_adjust(self):
self.grid = False
self.logy = False
self.logx = False
self.loglog = False
def _validate_color_args(self):
pass
def _make_plot(self):
colors = self._get_colors(num_colors=len(self.data), color_kwds="colors")
self.kwds.setdefault("colors", colors)
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
if label is not None:
label = pprint_thing(label)
ax.set_ylabel(label)
kwds = self.kwds.copy()
def blank_labeler(label, value):
if value == 0:
return ""
else:
return label
idx = [pprint_thing(v) for v in self.data.index]
labels = kwds.pop("labels", idx)
# labels is used for each wedge's labels
# Blank out labels for values of 0 so they don't overlap
# with nonzero wedges
if labels is not None:
blabels = [blank_labeler(l, value) for l, value in zip(labels, y)]
else:
blabels = None
results = ax.pie(y, labels=blabels, **kwds)
if kwds.get("autopct", None) is not None:
patches, texts, autotexts = results
else:
patches, texts = results
autotexts = []
if self.fontsize is not None:
for t in texts + autotexts:
t.set_fontsize(self.fontsize)
# leglabels is used for legend labels
leglabels = labels if labels is not None else idx
for p, l in zip(patches, leglabels):
self._add_legend_handle(p, l)
|
bspink/django | refs/heads/master | tests/utils_tests/test_lorem_ipsum.py | 246 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
from django.utils.lorem_ipsum import paragraphs, words
class WebdesignTest(unittest.TestCase):
def test_words(self):
self.assertEqual(words(7), 'lorem ipsum dolor sit amet consectetur adipisicing')
def test_paragraphs(self):
self.assertEqual(paragraphs(1),
['Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'])
|
msebire/intellij-community | refs/heads/master | python/testData/inspections/PyProtocolInspection/protocolBases.py | 19 | from typing import Protocol
class MyProto1(Protocol):
pass
class MyProto2(Protocol):
pass
class A:
pass
class B(A, MyProto1):
pass
class C(MyProto1, MyProto2):
pass
class <warning descr="All bases of a protocol must be protocols">D</warning>(A, MyProto1, Protocol):
pass
class E(MyProto1, MyProto2, Protocol):
pass
|
CRImier/yowsup | refs/heads/master | yowsup/layers/protocol_profiles/protocolentities/iq_picture_set.py | 65 | from .iq_picture import PictureIqProtocolEntity
from yowsup.structs import ProtocolTreeNode
import time
class SetPictureIqProtocolEntity(PictureIqProtocolEntity):
'''
<iq type="set" id="{{id}}" xmlns="w:profile:picture", to={{jid}}">
<picture type="image" id="{{another_id}}">
{{Binary bytes of the picture when type is set.}}
</picture>
</iq>
'''
def __init__(self, jid, previewData, pictureData, pictureId = None, _id = None):
super(SetPictureIqProtocolEntity, self).__init__(jid, _id, "set")
self.setSetPictureProps(previewData, pictureData, pictureId)
def setSetPictureProps(self, previewData, pictureData, pictureId = None):
self.setPictureData(pictureData)
self.setPictureId(pictureId or str(int(time.time())))
self.setPreviewData(previewData)
def setPictureData(self, pictureData):
self.pictureData = pictureData
def getPictureData(self):
return self.pictureData
def setPreviewData(self, previewData):
self.previewData = previewData
def getPreviewData(self):
return self.previewData
def setPictureId(self, pictureId):
self.pictureId = pictureId
def getPictureId(self):
return self.pictureId
def toProtocolTreeNode(self):
node = super(PictureIqProtocolEntity, self).toProtocolTreeNode()
attribs = {"type": "image", "id": self.pictureId}
pictureNode = ProtocolTreeNode("picture", attribs, None, self.getPictureData())
previewNode = ProtocolTreeNode("picture", {"type": "preview"}, None, self.getPreviewData())
node.addChild(pictureNode)
node.addChild(previewNode)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = PictureIqProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = SetPictureIqProtocolEntity
pictureNode = None
previewNode = None
for child in node.getAllChildren("picture"):
nodeType = child.getAttributeValue("type")
if nodeType == "image":
pictureNode = child
elif nodeType == "preview":
previewNode = child
entity.setSetPictureProps(previewNode.getData(), pictureNode.getData(), pictureNode.getAttributeValue("id"))
return entity |
supriyantomaftuh/django | refs/heads/master | tests/forms_tests/tests/test_fields.py | 134 | # -*- coding: utf-8 -*-
"""
##########
# Fields #
##########
Each Field class does some sort of validation. Each Field has a clean() method,
which either raises django.forms.ValidationError or returns the "clean"
data -- usually a Unicode object, but, in some rare cases, a list.
Each Field's __init__() takes at least these parameters:
required -- Boolean that specifies whether the field is required.
True by default.
widget -- A Widget class, or instance of a Widget class, that should be
used for this Field when displaying it. Each Field has a default
Widget that it'll use if you don't specify this. In most cases,
the default widget is TextInput.
label -- A verbose name for this field, for use in displaying this field in
a form. By default, Django will use a "pretty" version of the form
field name, if the Field is part of a Form.
initial -- A value to use in this Field's initial display. This value is
*not* used as a fallback if data isn't given.
Other than that, the Field subclasses have class-specific options for
__init__(). For example, CharField has a max_length option.
"""
from __future__ import unicode_literals
import datetime
import os
import pickle
import re
import uuid
from decimal import Decimal
from unittest import skipIf
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms import (
BooleanField, CharField, ChoiceField, ComboField, DateField, DateTimeField,
DecimalField, DurationField, EmailField, Field, FileField, FilePathField,
FloatField, Form, GenericIPAddressField, HiddenInput, ImageField,
IntegerField, MultipleChoiceField, NullBooleanField, NumberInput,
PasswordInput, RadioSelect, RegexField, SlugField, SplitDateTimeField,
Textarea, TextInput, TimeField, TypedChoiceField, TypedMultipleChoiceField,
URLField, UUIDField, ValidationError, Widget, forms,
)
from django.test import SimpleTestCase, ignore_warnings
from django.utils import formats, six, translation
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.duration import duration_string
try:
from PIL import Image
except ImportError:
Image = None
def fix_os_paths(x):
if isinstance(x, six.string_types):
return x.replace('\\', '/')
elif isinstance(x, tuple):
return tuple(fix_os_paths(list(x)))
elif isinstance(x, list):
return [fix_os_paths(y) for y in x]
else:
return x
class FieldsTests(SimpleTestCase):
def assertWidgetRendersTo(self, field, to):
class _Form(Form):
f = field
self.assertHTMLEqual(str(_Form()['f']), to)
def test_field_sets_widget_is_required(self):
self.assertTrue(Field(required=True).widget.is_required)
self.assertFalse(Field(required=False).widget.is_required)
def test_cooperative_multiple_inheritance(self):
class A(object):
def __init__(self):
self.class_a_var = True
super(A, self).__init__()
class ComplexField(Field, A):
def __init__(self):
super(ComplexField, self).__init__()
f = ComplexField()
self.assertTrue(f.class_a_var)
# CharField ###################################################################
def test_charfield_1(self):
f = CharField()
self.assertEqual('1', f.clean(1))
self.assertEqual('hello', f.clean('hello'))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertEqual('[1, 2, 3]', f.clean([1, 2, 3]))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, None)
def test_charfield_2(self):
f = CharField(required=False)
self.assertEqual('1', f.clean(1))
self.assertEqual('hello', f.clean('hello'))
self.assertEqual('', f.clean(None))
self.assertEqual('', f.clean(''))
self.assertEqual('[1, 2, 3]', f.clean([1, 2, 3]))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, None)
def test_charfield_3(self):
f = CharField(max_length=10, required=False)
self.assertEqual('12345', f.clean('12345'))
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 10 characters (it has 11).'", f.clean, '1234567890a')
self.assertEqual(f.max_length, 10)
self.assertEqual(f.min_length, None)
def test_charfield_4(self):
f = CharField(min_length=10, required=False)
self.assertEqual('', f.clean(''))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 10 characters (it has 5).'", f.clean, '12345')
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertEqual('1234567890a', f.clean('1234567890a'))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, 10)
def test_charfield_5(self):
f = CharField(min_length=10, required=True)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 10 characters (it has 5).'", f.clean, '12345')
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertEqual('1234567890a', f.clean('1234567890a'))
self.assertEqual(f.max_length, None)
self.assertEqual(f.min_length, 10)
def test_charfield_length_not_int(self):
"""
Ensure that setting min_length or max_length to something that is not a
number returns an exception.
"""
self.assertRaises(ValueError, CharField, min_length='a')
self.assertRaises(ValueError, CharField, max_length='a')
self.assertRaises(ValueError, CharField, 'a')
def test_charfield_widget_attrs(self):
"""
Ensure that CharField.widget_attrs() always returns a dictionary.
Refs #15912
"""
# Return an empty dictionary if max_length is None
f = CharField()
self.assertEqual(f.widget_attrs(TextInput()), {})
self.assertEqual(f.widget_attrs(Textarea()), {})
# Otherwise, return a maxlength attribute equal to max_length
f = CharField(max_length=10)
self.assertEqual(f.widget_attrs(TextInput()), {'maxlength': '10'})
self.assertEqual(f.widget_attrs(PasswordInput()), {'maxlength': '10'})
self.assertEqual(f.widget_attrs(Textarea()), {'maxlength': '10'})
def test_charfield_strip(self):
"""
Ensure that values have whitespace stripped and that strip=False works.
"""
f = CharField()
self.assertEqual(f.clean(' 1'), '1')
self.assertEqual(f.clean('1 '), '1')
f = CharField(strip=False)
self.assertEqual(f.clean(' 1'), ' 1')
self.assertEqual(f.clean('1 '), '1 ')
def test_charfield_disabled(self):
f = CharField(disabled=True)
self.assertWidgetRendersTo(f, '<input type="text" name="f" id="id_f" disabled />')
# IntegerField ################################################################
def test_integerfield_1(self):
f = IntegerField()
self.assertWidgetRendersTo(f, '<input type="number" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(1, f.clean('1'))
self.assertIsInstance(f.clean('1'), int)
self.assertEqual(23, f.clean('23'))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, 'a')
self.assertEqual(42, f.clean(42))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, 3.14)
self.assertEqual(1, f.clean('1 '))
self.assertEqual(1, f.clean(' 1'))
self.assertEqual(1, f.clean(' 1 '))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, '1a')
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_integerfield_2(self):
f = IntegerField(required=False)
self.assertIsNone(f.clean(''))
self.assertEqual('None', repr(f.clean('')))
self.assertIsNone(f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertEqual(1, f.clean('1'))
self.assertIsInstance(f.clean('1'), int)
self.assertEqual(23, f.clean('23'))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, 'a')
self.assertEqual(1, f.clean('1 '))
self.assertEqual(1, f.clean(' 1'))
self.assertEqual(1, f.clean(' 1 '))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, '1a')
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_integerfield_3(self):
f = IntegerField(max_value=10)
self.assertWidgetRendersTo(f, '<input max="10" type="number" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(1, f.clean(1))
self.assertEqual(10, f.clean(10))
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 10.'", f.clean, 11)
self.assertEqual(10, f.clean('10'))
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 10.'", f.clean, '11')
self.assertEqual(f.max_value, 10)
self.assertEqual(f.min_value, None)
def test_integerfield_4(self):
f = IntegerField(min_value=10)
self.assertWidgetRendersTo(f, '<input id="id_f" type="number" name="f" min="10" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 10.'", f.clean, 1)
self.assertEqual(10, f.clean(10))
self.assertEqual(11, f.clean(11))
self.assertEqual(10, f.clean('10'))
self.assertEqual(11, f.clean('11'))
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, 10)
def test_integerfield_5(self):
f = IntegerField(min_value=10, max_value=20)
self.assertWidgetRendersTo(f, '<input id="id_f" max="20" type="number" name="f" min="10" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 10.'", f.clean, 1)
self.assertEqual(10, f.clean(10))
self.assertEqual(11, f.clean(11))
self.assertEqual(10, f.clean('10'))
self.assertEqual(11, f.clean('11'))
self.assertEqual(20, f.clean(20))
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 20.'", f.clean, 21)
self.assertEqual(f.max_value, 20)
self.assertEqual(f.min_value, 10)
def test_integerfield_localized(self):
"""
Make sure localized IntegerField's widget renders to a text input with
no number input specific attributes.
"""
f1 = IntegerField(localize=True)
self.assertWidgetRendersTo(f1, '<input id="id_f" name="f" type="text" />')
def test_integerfield_float(self):
f = IntegerField()
self.assertEqual(1, f.clean(1.0))
self.assertEqual(1, f.clean('1.0'))
self.assertEqual(1, f.clean(' 1.0 '))
self.assertEqual(1, f.clean('1.'))
self.assertEqual(1, f.clean(' 1. '))
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, '1.5')
self.assertRaisesMessage(ValidationError, "'Enter a whole number.'", f.clean, '…')
def test_integerfield_big_num(self):
f = IntegerField()
self.assertEqual(9223372036854775808, f.clean(9223372036854775808))
self.assertEqual(9223372036854775808, f.clean('9223372036854775808'))
self.assertEqual(9223372036854775808, f.clean('9223372036854775808.0'))
def test_integerfield_subclass(self):
"""
Test that class-defined widget is not overwritten by __init__ (#22245).
"""
class MyIntegerField(IntegerField):
widget = Textarea
f = MyIntegerField()
self.assertEqual(f.widget.__class__, Textarea)
f = MyIntegerField(localize=True)
self.assertEqual(f.widget.__class__, Textarea)
# FloatField ##################################################################
def test_floatfield_1(self):
f = FloatField()
self.assertWidgetRendersTo(f, '<input step="any" type="number" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(1.0, f.clean('1'))
self.assertIsInstance(f.clean('1'), float)
self.assertEqual(23.0, f.clean('23'))
self.assertEqual(3.1400000000000001, f.clean('3.14'))
self.assertEqual(3.1400000000000001, f.clean(3.14))
self.assertEqual(42.0, f.clean(42))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'a')
self.assertEqual(1.0, f.clean('1.0 '))
self.assertEqual(1.0, f.clean(' 1.0'))
self.assertEqual(1.0, f.clean(' 1.0 '))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '1.0a')
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'Infinity')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'NaN')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '-Inf')
def test_floatfield_2(self):
f = FloatField(required=False)
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(None))
self.assertEqual(1.0, f.clean('1'))
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_floatfield_3(self):
f = FloatField(max_value=1.5, min_value=0.5)
self.assertWidgetRendersTo(f, '<input step="any" name="f" min="0.5" max="1.5" type="number" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 1.5.'", f.clean, '1.6')
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 0.5.'", f.clean, '0.4')
self.assertEqual(1.5, f.clean('1.5'))
self.assertEqual(0.5, f.clean('0.5'))
self.assertEqual(f.max_value, 1.5)
self.assertEqual(f.min_value, 0.5)
def test_floatfield_widget_attrs(self):
f = FloatField(widget=NumberInput(attrs={'step': 0.01, 'max': 1.0, 'min': 0.0}))
self.assertWidgetRendersTo(f, '<input step="0.01" name="f" min="0.0" max="1.0" type="number" id="id_f" />')
def test_floatfield_localized(self):
"""
Make sure localized FloatField's widget renders to a text input with
no number input specific attributes.
"""
f = FloatField(localize=True)
self.assertWidgetRendersTo(f, '<input id="id_f" name="f" type="text" />')
def test_floatfield_changed(self):
f = FloatField()
n = 4.35
self.assertFalse(f.has_changed(n, '4.3500'))
with translation.override('fr'), self.settings(USE_L10N=True):
f = FloatField(localize=True)
localized_n = formats.localize_input(n) # -> '4,35' in French
self.assertFalse(f.has_changed(n, localized_n))
# DecimalField ################################################################
def test_decimalfield_1(self):
f = DecimalField(max_digits=4, decimal_places=2)
self.assertWidgetRendersTo(f, '<input id="id_f" step="0.01" type="number" name="f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(f.clean('1'), Decimal("1"))
self.assertIsInstance(f.clean('1'), Decimal)
self.assertEqual(f.clean('23'), Decimal("23"))
self.assertEqual(f.clean('3.14'), Decimal("3.14"))
self.assertEqual(f.clean(3.14), Decimal("3.14"))
self.assertEqual(f.clean(Decimal('3.14')), Decimal("3.14"))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'NaN')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'Inf')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '-Inf')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'a')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, 'łąść')
self.assertEqual(f.clean('1.0 '), Decimal("1.0"))
self.assertEqual(f.clean(' 1.0'), Decimal("1.0"))
self.assertEqual(f.clean(' 1.0 '), Decimal("1.0"))
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '1.0a')
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 4 digits in total.'", f.clean, '123.45')
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 decimal places.'", f.clean, '1.234')
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 digits before the decimal point.'", f.clean, '123.4')
self.assertEqual(f.clean('-12.34'), Decimal("-12.34"))
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 4 digits in total.'", f.clean, '-123.45')
self.assertEqual(f.clean('-.12'), Decimal("-0.12"))
self.assertEqual(f.clean('-00.12'), Decimal("-0.12"))
self.assertEqual(f.clean('-000.12'), Decimal("-0.12"))
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 decimal places.'", f.clean, '-000.123')
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 4 digits in total.'", f.clean, '-000.12345')
self.assertRaisesMessage(ValidationError, "'Enter a number.'", f.clean, '--0.12')
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_decimalfield_2(self):
f = DecimalField(max_digits=4, decimal_places=2, required=False)
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(None))
self.assertEqual(f.clean('1'), Decimal("1"))
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertEqual(f.max_value, None)
self.assertEqual(f.min_value, None)
def test_decimalfield_3(self):
f = DecimalField(max_digits=4, decimal_places=2, max_value=Decimal('1.5'), min_value=Decimal('0.5'))
self.assertWidgetRendersTo(f, '<input step="0.01" name="f" min="0.5" max="1.5" type="number" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 1.5.'", f.clean, '1.6')
self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 0.5.'", f.clean, '0.4')
self.assertEqual(f.clean('1.5'), Decimal("1.5"))
self.assertEqual(f.clean('0.5'), Decimal("0.5"))
self.assertEqual(f.clean('.5'), Decimal("0.5"))
self.assertEqual(f.clean('00.50'), Decimal("0.50"))
self.assertEqual(f.max_digits, 4)
self.assertEqual(f.decimal_places, 2)
self.assertEqual(f.max_value, Decimal('1.5'))
self.assertEqual(f.min_value, Decimal('0.5'))
def test_decimalfield_4(self):
f = DecimalField(decimal_places=2)
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 2 decimal places.'", f.clean, '0.00000001')
def test_decimalfield_5(self):
f = DecimalField(max_digits=3)
# Leading whole zeros "collapse" to one digit.
self.assertEqual(f.clean('0000000.10'), Decimal("0.1"))
# But a leading 0 before the . doesn't count towards max_digits
self.assertEqual(f.clean('0000000.100'), Decimal("0.100"))
# Only leading whole zeros "collapse" to one digit.
self.assertEqual(f.clean('000000.02'), Decimal('0.02'))
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 3 digits in total.'", f.clean, '000000.0002')
self.assertEqual(f.clean('.002'), Decimal("0.002"))
def test_decimalfield_6(self):
f = DecimalField(max_digits=2, decimal_places=2)
self.assertEqual(f.clean('.01'), Decimal(".01"))
self.assertRaisesMessage(ValidationError, "'Ensure that there are no more than 0 digits before the decimal point.'", f.clean, '1.1')
def test_decimalfield_scientific(self):
f = DecimalField(max_digits=2, decimal_places=2)
self.assertEqual(f.clean('1E+2'), Decimal('1E+2'))
self.assertEqual(f.clean('1e+2'), Decimal('1E+2'))
with self.assertRaisesMessage(ValidationError, "Ensure that there are no more"):
f.clean('0.546e+2')
def test_decimalfield_widget_attrs(self):
f = DecimalField(max_digits=6, decimal_places=2)
self.assertEqual(f.widget_attrs(Widget()), {})
self.assertEqual(f.widget_attrs(NumberInput()), {'step': '0.01'})
f = DecimalField(max_digits=10, decimal_places=0)
self.assertEqual(f.widget_attrs(NumberInput()), {'step': '1'})
f = DecimalField(max_digits=19, decimal_places=19)
self.assertEqual(f.widget_attrs(NumberInput()), {'step': '1e-19'})
f = DecimalField(max_digits=20)
self.assertEqual(f.widget_attrs(NumberInput()), {'step': 'any'})
f = DecimalField(max_digits=6, widget=NumberInput(attrs={'step': '0.01'}))
self.assertWidgetRendersTo(f, '<input step="0.01" name="f" type="number" id="id_f" />')
def test_decimalfield_localized(self):
"""
Make sure localized DecimalField's widget renders to a text input with
no number input specific attributes.
"""
f = DecimalField(localize=True)
self.assertWidgetRendersTo(f, '<input id="id_f" name="f" type="text" />')
def test_decimalfield_changed(self):
f = DecimalField(max_digits=2, decimal_places=2)
d = Decimal("0.1")
self.assertFalse(f.has_changed(d, '0.10'))
self.assertTrue(f.has_changed(d, '0.101'))
with translation.override('fr'), self.settings(USE_L10N=True):
f = DecimalField(max_digits=2, decimal_places=2, localize=True)
localized_d = formats.localize_input(d) # -> '0,1' in French
self.assertFalse(f.has_changed(d, localized_d))
# DateField ###################################################################
def test_datefield_1(self):
f = DateField()
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('2006-10-25'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('10/25/2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('10/25/06'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('Oct 25 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('October 25 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('October 25, 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('25 October 2006'))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('25 October, 2006'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '2006-4-31')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '200a-10-25')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '25/10/06')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
def test_datefield_2(self):
f = DateField(required=False)
self.assertIsNone(f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertIsNone(f.clean(''))
self.assertEqual('None', repr(f.clean('')))
def test_datefield_3(self):
f = DateField(input_formats=['%Y %m %d'])
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.date(2006, 10, 25), f.clean('2006 10 25'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '2006-10-25')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '10/25/2006')
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, '10/25/06')
def test_datefield_4(self):
# Test whitespace stripping behavior (#5714)
f = DateField()
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' 10/25/2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' 10/25/06 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' Oct 25 2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' October 25 2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' October 25, 2006 '))
self.assertEqual(datetime.date(2006, 10, 25), f.clean(' 25 October 2006 '))
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ' ')
def test_datefield_5(self):
# Test null bytes (#18982)
f = DateField()
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, 'a\x00b')
@ignore_warnings(category=RemovedInDjango110Warning) # for _has_changed
def test_datefield_changed(self):
format = '%d/%m/%Y'
f = DateField(input_formats=[format])
d = datetime.date(2007, 9, 17)
self.assertFalse(f.has_changed(d, '17/09/2007'))
# Test for deprecated behavior _has_changed
self.assertFalse(f._has_changed(d, '17/09/2007'))
def test_datefield_strptime(self):
"""Test that field.strptime doesn't raise an UnicodeEncodeError (#16123)"""
f = DateField()
try:
f.strptime('31 мая 2011', '%d-%b-%y')
except Exception as e:
# assertIsInstance or assertRaises cannot be used because UnicodeEncodeError
# is a subclass of ValueError
self.assertEqual(e.__class__, ValueError)
# TimeField ###################################################################
def test_timefield_1(self):
f = TimeField()
self.assertEqual(datetime.time(14, 25), f.clean(datetime.time(14, 25)))
self.assertEqual(datetime.time(14, 25, 59), f.clean(datetime.time(14, 25, 59)))
self.assertEqual(datetime.time(14, 25), f.clean('14:25'))
self.assertEqual(datetime.time(14, 25, 59), f.clean('14:25:59'))
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, 'hello')
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, '1:24 p.m.')
def test_timefield_2(self):
f = TimeField(input_formats=['%I:%M %p'])
self.assertEqual(datetime.time(14, 25), f.clean(datetime.time(14, 25)))
self.assertEqual(datetime.time(14, 25, 59), f.clean(datetime.time(14, 25, 59)))
self.assertEqual(datetime.time(4, 25), f.clean('4:25 AM'))
self.assertEqual(datetime.time(16, 25), f.clean('4:25 PM'))
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, '14:30:45')
def test_timefield_3(self):
f = TimeField()
# Test whitespace stripping behavior (#5714)
self.assertEqual(datetime.time(14, 25), f.clean(' 14:25 '))
self.assertEqual(datetime.time(14, 25, 59), f.clean(' 14:25:59 '))
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ' ')
def test_timefield_changed(self):
t1 = datetime.time(12, 51, 34, 482548)
t2 = datetime.time(12, 51)
f = TimeField(input_formats=['%H:%M', '%H:%M %p'])
self.assertTrue(f.has_changed(t1, '12:51'))
self.assertFalse(f.has_changed(t2, '12:51'))
self.assertFalse(f.has_changed(t2, '12:51 PM'))
# DateTimeField ###############################################################
def test_datetimefield_1(self):
f = DateTimeField()
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 59), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 59, 200), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006-10-25 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006-10-25 14:30:45.0002'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('2006-10-25 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006-10-25 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006-10-25 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('2006-10-25'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('10/25/2006 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('10/25/2006 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/2006 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/2006 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('10/25/2006'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('10/25/06 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('10/25/06 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/06 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/06 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('10/25/06'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, 'hello')
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, '2006-10-25 4:30 p.m.')
def test_datetimefield_2(self):
f = DateTimeField(input_formats=['%Y %m %d %I:%M %p'])
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 59), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 59, 200), f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006 10 25 2:30 PM'))
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, '2006-10-25 14:30:45')
def test_datetimefield_3(self):
f = DateTimeField(required=False)
self.assertIsNone(f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertIsNone(f.clean(''))
self.assertEqual('None', repr(f.clean('')))
def test_datetimefield_4(self):
f = DateTimeField()
# Test whitespace stripping behavior (#5714)
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 2006-10-25 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 2006-10-25 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 10/25/2006 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(' 10/25/2006 14:30 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 10/25/2006 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 10/25/06 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 10/25/06 '))
self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'", f.clean, ' ')
def test_datetimefield_5(self):
f = DateTimeField(input_formats=['%Y.%m.%d %H:%M:%S.%f'])
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006.10.25 14:30:45.0002'))
def test_datetimefield_changed(self):
format = '%Y %m %d %I:%M %p'
f = DateTimeField(input_formats=[format])
d = datetime.datetime(2006, 9, 17, 14, 30, 0)
self.assertFalse(f.has_changed(d, '2006 09 17 2:30 PM'))
# DurationField ###########################################################
def test_durationfield_1(self):
f = DurationField()
self.assertEqual(datetime.timedelta(seconds=30), f.clean('30'))
self.assertEqual(
datetime.timedelta(minutes=15, seconds=30),
f.clean('15:30')
)
self.assertEqual(
datetime.timedelta(hours=1, minutes=15, seconds=30),
f.clean('1:15:30')
)
self.assertEqual(
datetime.timedelta(
days=1, hours=1, minutes=15, seconds=30, milliseconds=300),
f.clean('1 1:15:30.3')
)
def test_durationfield_2(self):
class DurationForm(Form):
duration = DurationField(initial=datetime.timedelta(hours=1))
f = DurationForm()
self.assertHTMLEqual(
'<input id="id_duration" type="text" name="duration" value="01:00:00">',
str(f['duration'])
)
def test_durationfield_prepare_value(self):
field = DurationField()
td = datetime.timedelta(minutes=15, seconds=30)
self.assertEqual(field.prepare_value(td), duration_string(td))
self.assertEqual(field.prepare_value('arbitrary'), 'arbitrary')
self.assertIsNone(field.prepare_value(None))
# RegexField ##################################################################
def test_regexfield_1(self):
f = RegexField('^[0-9][A-F][0-9]$')
self.assertEqual('2A2', f.clean('2A2'))
self.assertEqual('3F3', f.clean('3F3'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '3G3')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, ' 2A2')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '2A2 ')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
def test_regexfield_2(self):
f = RegexField('^[0-9][A-F][0-9]$', required=False)
self.assertEqual('2A2', f.clean('2A2'))
self.assertEqual('3F3', f.clean('3F3'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '3G3')
self.assertEqual('', f.clean(''))
def test_regexfield_3(self):
f = RegexField(re.compile('^[0-9][A-F][0-9]$'))
self.assertEqual('2A2', f.clean('2A2'))
self.assertEqual('3F3', f.clean('3F3'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '3G3')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, ' 2A2')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '2A2 ')
@ignore_warnings(category=RemovedInDjango110Warning) # error_message deprecation
def test_regexfield_4(self):
f = RegexField('^[0-9][0-9][0-9][0-9]$', error_message='Enter a four-digit number.')
self.assertEqual('1234', f.clean('1234'))
self.assertRaisesMessage(ValidationError, "'Enter a four-digit number.'", f.clean, '123')
self.assertRaisesMessage(ValidationError, "'Enter a four-digit number.'", f.clean, 'abcd')
def test_regexfield_5(self):
f = RegexField('^[0-9]+$', min_length=5, max_length=10)
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 5 characters (it has 3).'", f.clean, '123')
six.assertRaisesRegex(self, ValidationError, "'Ensure this value has at least 5 characters \(it has 3\)\.', u?'Enter a valid value\.'", f.clean, 'abc')
self.assertEqual('12345', f.clean('12345'))
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 10 characters (it has 11).'", f.clean, '12345678901')
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, '12345a')
def test_regexfield_6(self):
"""
Ensure that it works with unicode characters.
Refs #.
"""
f = RegexField('^\w+$')
self.assertEqual('éèøçÎÎ你好', f.clean('éèøçÎÎ你好'))
def test_change_regex_after_init(self):
f = RegexField('^[a-z]+$')
f.regex = '^[0-9]+$'
self.assertEqual('1234', f.clean('1234'))
self.assertRaisesMessage(ValidationError, "'Enter a valid value.'", f.clean, 'abcd')
# EmailField ##################################################################
# See also validators tests for validate_email specific tests
def test_emailfield_1(self):
f = EmailField()
self.assertWidgetRendersTo(f, '<input type="email" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual('[email protected]', f.clean('[email protected]'))
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'foo')
self.assertEqual('[email protected]\xe4\xf6\xfc\xdfabc.part.com',
f.clean('[email protected]äöüßabc.part.com'))
def test_email_regexp_for_performance(self):
f = EmailField()
# Check for runaway regex security problem. This will take for-freeking-ever
# if the security fix isn't in place.
addr = '[email protected]'
self.assertEqual(addr, f.clean(addr))
def test_emailfield_not_required(self):
f = EmailField(required=False)
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
self.assertEqual('[email protected]', f.clean('[email protected]'))
self.assertEqual('[email protected]', f.clean(' [email protected] \t \t '))
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'foo')
def test_emailfield_min_max_length(self):
f = EmailField(min_length=10, max_length=15)
self.assertWidgetRendersTo(f, '<input id="id_f" type="email" name="f" maxlength="15" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 10 characters (it has 9).'", f.clean, '[email protected]')
self.assertEqual('[email protected]', f.clean('[email protected]'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 15 characters (it has 20).'", f.clean, '[email protected]')
# FileField ##################################################################
def test_filefield_1(self):
f = FileField()
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '', '')
self.assertEqual('files/test1.pdf', f.clean('', 'files/test1.pdf'))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None, '')
self.assertEqual('files/test2.pdf', f.clean(None, 'files/test2.pdf'))
self.assertRaisesMessage(ValidationError, "'No file was submitted. Check the encoding type on the form.'", f.clean, SimpleUploadedFile('', b''))
self.assertRaisesMessage(ValidationError, "'No file was submitted. Check the encoding type on the form.'", f.clean, SimpleUploadedFile('', b''), '')
self.assertEqual('files/test3.pdf', f.clean(None, 'files/test3.pdf'))
self.assertRaisesMessage(ValidationError, "'No file was submitted. Check the encoding type on the form.'", f.clean, 'some content that is not a file')
self.assertRaisesMessage(ValidationError, "'The submitted file is empty.'", f.clean, SimpleUploadedFile('name', None))
self.assertRaisesMessage(ValidationError, "'The submitted file is empty.'", f.clean, SimpleUploadedFile('name', b''))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('name', b'Some File Content'))))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह'.encode('utf-8')))))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('name', b'Some File Content'), 'files/test4.pdf')))
def test_filefield_2(self):
f = FileField(max_length=5)
self.assertRaisesMessage(ValidationError, "'Ensure this filename has at most 5 characters (it has 18).'", f.clean, SimpleUploadedFile('test_maxlength.txt', b'hello world'))
self.assertEqual('files/test1.pdf', f.clean('', 'files/test1.pdf'))
self.assertEqual('files/test2.pdf', f.clean(None, 'files/test2.pdf'))
self.assertEqual(SimpleUploadedFile, type(f.clean(SimpleUploadedFile('name', b'Some File Content'))))
def test_filefield_3(self):
f = FileField(allow_empty_file=True)
self.assertEqual(SimpleUploadedFile,
type(f.clean(SimpleUploadedFile('name', b''))))
def test_filefield_changed(self):
'''
Test for the behavior of has_changed for FileField. The value of data will
more than likely come from request.FILES. The value of initial data will
likely be a filename stored in the database. Since its value is of no use to
a FileField it is ignored.
'''
f = FileField()
# No file was uploaded and no initial data.
self.assertFalse(f.has_changed('', None))
# A file was uploaded and no initial data.
self.assertTrue(f.has_changed('', {'filename': 'resume.txt', 'content': 'My resume'}))
# A file was not uploaded, but there is initial data
self.assertFalse(f.has_changed('resume.txt', None))
# A file was uploaded and there is initial data (file identity is not dealt
# with here)
self.assertTrue(f.has_changed('resume.txt', {'filename': 'resume.txt', 'content': 'My resume'}))
# ImageField ##################################################################
@skipIf(Image is None, "Pillow is required to test ImageField")
def test_imagefield_annotate_with_image_after_clean(self):
f = ImageField()
img_path = os.path.dirname(upath(__file__)) + '/filepath_test_files/1x1.png'
with open(img_path, 'rb') as img_file:
img_data = img_file.read()
img_file = SimpleUploadedFile('1x1.png', img_data)
img_file.content_type = 'text/plain'
uploaded_file = f.clean(img_file)
self.assertEqual('PNG', uploaded_file.image.format)
self.assertEqual('image/png', uploaded_file.content_type)
@skipIf(Image is None, "Pillow is required to test ImageField")
def test_imagefield_annotate_with_bitmap_image_after_clean(self):
"""
This also tests the situation when Pillow doesn't detect the MIME type
of the image (#24948).
"""
from PIL.BmpImagePlugin import BmpImageFile
try:
Image.register_mime(BmpImageFile.format, None)
f = ImageField()
img_path = os.path.dirname(upath(__file__)) + '/filepath_test_files/1x1.bmp'
with open(img_path, 'rb') as img_file:
img_data = img_file.read()
img_file = SimpleUploadedFile('1x1.bmp', img_data)
img_file.content_type = 'text/plain'
uploaded_file = f.clean(img_file)
self.assertEqual('BMP', uploaded_file.image.format)
self.assertIsNone(uploaded_file.content_type)
finally:
Image.register_mime(BmpImageFile.format, 'image/bmp')
# URLField ##################################################################
def test_urlfield_1(self):
f = URLField()
self.assertWidgetRendersTo(f, '<input type="url" name="f" id="id_f" />')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual('http://localhost', f.clean('http://localhost'))
self.assertEqual('http://example.com', f.clean('http://example.com'))
self.assertEqual('http://example.com.', f.clean('http://example.com.'))
self.assertEqual('http://www.example.com', f.clean('http://www.example.com'))
self.assertEqual('http://www.example.com:8000/test', f.clean('http://www.example.com:8000/test'))
self.assertEqual('http://valid-with-hyphens.com', f.clean('valid-with-hyphens.com'))
self.assertEqual('http://subdomain.domain.com', f.clean('subdomain.domain.com'))
self.assertEqual('http://200.8.9.10', f.clean('http://200.8.9.10'))
self.assertEqual('http://200.8.9.10:8000/test', f.clean('http://200.8.9.10:8000/test'))
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'com.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, '.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://invalid-.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://-invalid.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://inv-.alid-.com')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://inv-.-alid.com')
self.assertEqual('http://valid-----hyphens.com', f.clean('http://valid-----hyphens.com'))
self.assertEqual('http://some.idn.xyz\xe4\xf6\xfc\xdfabc.domain.com:123/blah', f.clean('http://some.idn.xyzäöüßabc.domain.com:123/blah'))
self.assertEqual('http://www.example.com/s/http://code.djangoproject.com/ticket/13804', f.clean('www.example.com/s/http://code.djangoproject.com/ticket/13804'))
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, '[a')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://[a')
def test_url_regex_ticket11198(self):
f = URLField()
# hangs "forever" if catastrophic backtracking in ticket:#11198 not fixed
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://%s' % ("X" * 200,))
# a second test, to make sure the problem is really addressed, even on
# domains that don't fail the domain label length check in the regex
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://%s' % ("X" * 60,))
def test_urlfield_2(self):
f = URLField(required=False)
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
self.assertEqual('http://example.com', f.clean('http://example.com'))
self.assertEqual('http://www.example.com', f.clean('http://www.example.com'))
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://example.')
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 'http://.com')
def test_urlfield_5(self):
f = URLField(min_length=15, max_length=20)
self.assertWidgetRendersTo(f, '<input id="id_f" type="url" name="f" maxlength="20" />')
self.assertRaisesMessage(ValidationError, "'Ensure this value has at least 15 characters (it has 12).'", f.clean, 'http://f.com')
self.assertEqual('http://example.com', f.clean('http://example.com'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 20 characters (it has 37).'", f.clean, 'http://abcdefghijklmnopqrstuvwxyz.com')
def test_urlfield_6(self):
f = URLField(required=False)
self.assertEqual('http://example.com', f.clean('example.com'))
self.assertEqual('', f.clean(''))
self.assertEqual('https://example.com', f.clean('https://example.com'))
def test_urlfield_7(self):
f = URLField()
self.assertEqual('http://example.com', f.clean('http://example.com'))
self.assertEqual('http://example.com/test', f.clean('http://example.com/test'))
self.assertEqual('http://example.com?some_param=some_value',
f.clean('http://example.com?some_param=some_value'))
def test_urlfield_9(self):
f = URLField()
urls = (
'http://עברית.idn.icann.org/',
'http://sãopaulo.com/',
'http://sãopaulo.com.br/',
'http://пример.испытание/',
'http://مثال.إختبار/',
'http://例子.测试/',
'http://例子.測試/',
'http://उदाहरण.परीक्षा/',
'http://例え.テスト/',
'http://مثال.آزمایشی/',
'http://실례.테스트/',
'http://العربية.idn.icann.org/',
)
for url in urls:
# Valid IDN
self.assertEqual(url, f.clean(url))
def test_urlfield_10(self):
"""Test URLField correctly validates IPv6 (#18779)."""
f = URLField()
urls = (
'http://[12:34::3a53]/',
'http://[a34:9238::]:8080/',
)
for url in urls:
self.assertEqual(url, f.clean(url))
def test_urlfield_not_string(self):
f = URLField(required=False)
self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'", f.clean, 23)
def test_urlfield_normalization(self):
f = URLField()
self.assertEqual(f.clean('http://example.com/ '), 'http://example.com/')
# BooleanField ################################################################
def test_booleanfield_1(self):
f = BooleanField()
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(True, f.clean(True))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, False)
self.assertEqual(True, f.clean(1))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, 0)
self.assertEqual(True, f.clean('Django rocks'))
self.assertEqual(True, f.clean('True'))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, 'False')
def test_booleanfield_2(self):
f = BooleanField(required=False)
self.assertEqual(False, f.clean(''))
self.assertEqual(False, f.clean(None))
self.assertEqual(True, f.clean(True))
self.assertEqual(False, f.clean(False))
self.assertEqual(True, f.clean(1))
self.assertEqual(False, f.clean(0))
self.assertEqual(True, f.clean('1'))
self.assertEqual(False, f.clean('0'))
self.assertEqual(True, f.clean('Django rocks'))
self.assertEqual(False, f.clean('False'))
self.assertEqual(False, f.clean('false'))
self.assertEqual(False, f.clean('FaLsE'))
def test_boolean_picklable(self):
self.assertIsInstance(pickle.loads(pickle.dumps(BooleanField())), BooleanField)
def test_booleanfield_changed(self):
f = BooleanField()
self.assertFalse(f.has_changed(None, None))
self.assertFalse(f.has_changed(None, ''))
self.assertFalse(f.has_changed('', None))
self.assertFalse(f.has_changed('', ''))
self.assertTrue(f.has_changed(False, 'on'))
self.assertFalse(f.has_changed(True, 'on'))
self.assertTrue(f.has_changed(True, ''))
# Initial value may have mutated to a string due to show_hidden_initial (#19537)
self.assertTrue(f.has_changed('False', 'on'))
# ChoiceField #################################################################
def test_choicefield_1(self):
f = ChoiceField(choices=[('1', 'One'), ('2', 'Two')])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual('1', f.clean(1))
self.assertEqual('1', f.clean('1'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 3 is not one of the available choices.'", f.clean, '3')
def test_choicefield_2(self):
f = ChoiceField(choices=[('1', 'One'), ('2', 'Two')], required=False)
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
self.assertEqual('1', f.clean(1))
self.assertEqual('1', f.clean('1'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 3 is not one of the available choices.'", f.clean, '3')
def test_choicefield_3(self):
f = ChoiceField(choices=[('J', 'John'), ('P', 'Paul')])
self.assertEqual('J', f.clean('J'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. John is not one of the available choices.'", f.clean, 'John')
def test_choicefield_4(self):
f = ChoiceField(choices=[('Numbers', (('1', 'One'), ('2', 'Two'))), ('Letters', (('3', 'A'), ('4', 'B'))), ('5', 'Other')])
self.assertEqual('1', f.clean(1))
self.assertEqual('1', f.clean('1'))
self.assertEqual('3', f.clean(3))
self.assertEqual('3', f.clean('3'))
self.assertEqual('5', f.clean(5))
self.assertEqual('5', f.clean('5'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 6 is not one of the available choices.'", f.clean, '6')
def test_choicefield_callable(self):
choices = lambda: [('J', 'John'), ('P', 'Paul')]
f = ChoiceField(choices=choices)
self.assertEqual('J', f.clean('J'))
def test_choicefield_callable_may_evaluate_to_different_values(self):
choices = []
def choices_as_callable():
return choices
class ChoiceFieldForm(Form):
choicefield = ChoiceField(choices=choices_as_callable)
choices = [('J', 'John')]
form = ChoiceFieldForm()
self.assertEqual([('J', 'John')], list(form.fields['choicefield'].choices))
choices = [('P', 'Paul')]
form = ChoiceFieldForm()
self.assertEqual([('P', 'Paul')], list(form.fields['choicefield'].choices))
def test_choicefield_disabled(self):
f = ChoiceField(choices=[('J', 'John'), ('P', 'Paul')], disabled=True)
self.assertWidgetRendersTo(f,
'<select id="id_f" name="f" disabled><option value="J">John</option>'
'<option value="P">Paul</option></select>')
# TypedChoiceField ############################################################
# TypedChoiceField is just like ChoiceField, except that coerced types will
# be returned:
def test_typedchoicefield_1(self):
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int)
self.assertEqual(1, f.clean('1'))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 2 is not one of the available choices.'", f.clean, '2')
def test_typedchoicefield_2(self):
# Different coercion, same validation.
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=float)
self.assertEqual(1.0, f.clean('1'))
def test_typedchoicefield_3(self):
# This can also cause weirdness: be careful (bool(-1) == True, remember)
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=bool)
self.assertEqual(True, f.clean('-1'))
def test_typedchoicefield_4(self):
# Even more weirdness: if you have a valid choice but your coercion function
# can't coerce, you'll still get a validation error. Don't do this!
f = TypedChoiceField(choices=[('A', 'A'), ('B', 'B')], coerce=int)
self.assertRaisesMessage(ValidationError, "'Select a valid choice. B is not one of the available choices.'", f.clean, 'B')
# Required fields require values
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
def test_typedchoicefield_5(self):
# Non-required fields aren't required
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False)
self.assertEqual('', f.clean(''))
# If you want cleaning an empty value to return a different type, tell the field
def test_typedchoicefield_6(self):
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False, empty_value=None)
self.assertIsNone(f.clean(''))
def test_typedchoicefield_has_changed(self):
# has_changed should not trigger required validation
f = TypedChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=True)
self.assertFalse(f.has_changed(None, ''))
self.assertFalse(f.has_changed(1, '1'))
self.assertFalse(f.has_changed('1', '1'))
def test_typedchoicefield_special_coerce(self):
"""
Test a coerce function which results in a value not present in choices.
Refs #21397.
"""
def coerce_func(val):
return Decimal('1.%s' % val)
f = TypedChoiceField(choices=[(1, "1"), (2, "2")], coerce=coerce_func, required=True)
self.assertEqual(Decimal('1.2'), f.clean('2'))
self.assertRaisesMessage(ValidationError,
"'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError,
"'Select a valid choice. 3 is not one of the available choices.'",
f.clean, '3')
# NullBooleanField ############################################################
def test_nullbooleanfield_1(self):
f = NullBooleanField()
self.assertIsNone(f.clean(''))
self.assertEqual(True, f.clean(True))
self.assertEqual(False, f.clean(False))
self.assertIsNone(f.clean(None))
self.assertEqual(False, f.clean('0'))
self.assertEqual(True, f.clean('1'))
self.assertIsNone(f.clean('2'))
self.assertIsNone(f.clean('3'))
self.assertIsNone(f.clean('hello'))
self.assertEqual(True, f.clean('true'))
self.assertEqual(False, f.clean('false'))
def test_nullbooleanfield_2(self):
# Make sure that the internal value is preserved if using HiddenInput (#7753)
class HiddenNullBooleanForm(Form):
hidden_nullbool1 = NullBooleanField(widget=HiddenInput, initial=True)
hidden_nullbool2 = NullBooleanField(widget=HiddenInput, initial=False)
f = HiddenNullBooleanForm()
self.assertHTMLEqual('<input type="hidden" name="hidden_nullbool1" value="True" id="id_hidden_nullbool1" /><input type="hidden" name="hidden_nullbool2" value="False" id="id_hidden_nullbool2" />', str(f))
def test_nullbooleanfield_3(self):
class HiddenNullBooleanForm(Form):
hidden_nullbool1 = NullBooleanField(widget=HiddenInput, initial=True)
hidden_nullbool2 = NullBooleanField(widget=HiddenInput, initial=False)
f = HiddenNullBooleanForm({'hidden_nullbool1': 'True', 'hidden_nullbool2': 'False'})
self.assertIsNone(f.full_clean())
self.assertEqual(True, f.cleaned_data['hidden_nullbool1'])
self.assertEqual(False, f.cleaned_data['hidden_nullbool2'])
def test_nullbooleanfield_4(self):
# Make sure we're compatible with MySQL, which uses 0 and 1 for its boolean
# values. (#9609)
NULLBOOL_CHOICES = (('1', 'Yes'), ('0', 'No'), ('', 'Unknown'))
class MySQLNullBooleanForm(Form):
nullbool0 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
nullbool1 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
nullbool2 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES))
f = MySQLNullBooleanForm({'nullbool0': '1', 'nullbool1': '0', 'nullbool2': ''})
self.assertIsNone(f.full_clean())
self.assertEqual(True, f.cleaned_data['nullbool0'])
self.assertEqual(False, f.cleaned_data['nullbool1'])
self.assertIsNone(f.cleaned_data['nullbool2'])
def test_nullbooleanfield_changed(self):
f = NullBooleanField()
self.assertTrue(f.has_changed(False, None))
self.assertTrue(f.has_changed(None, False))
self.assertFalse(f.has_changed(None, None))
self.assertFalse(f.has_changed(False, False))
self.assertTrue(f.has_changed(True, False))
self.assertTrue(f.has_changed(True, None))
self.assertTrue(f.has_changed(True, False))
# MultipleChoiceField #########################################################
def test_multiplechoicefield_1(self):
f = MultipleChoiceField(choices=[('1', 'One'), ('2', 'Two')])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(['1'], f.clean([1]))
self.assertEqual(['1'], f.clean(['1']))
self.assertEqual(['1', '2'], f.clean(['1', '2']))
self.assertEqual(['1', '2'], f.clean([1, '2']))
self.assertEqual(['1', '2'], f.clean((1, '2')))
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, [])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, ())
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 3 is not one of the available choices.'", f.clean, ['3'])
def test_multiplechoicefield_2(self):
f = MultipleChoiceField(choices=[('1', 'One'), ('2', 'Two')], required=False)
self.assertEqual([], f.clean(''))
self.assertEqual([], f.clean(None))
self.assertEqual(['1'], f.clean([1]))
self.assertEqual(['1'], f.clean(['1']))
self.assertEqual(['1', '2'], f.clean(['1', '2']))
self.assertEqual(['1', '2'], f.clean([1, '2']))
self.assertEqual(['1', '2'], f.clean((1, '2')))
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
self.assertEqual([], f.clean([]))
self.assertEqual([], f.clean(()))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 3 is not one of the available choices.'", f.clean, ['3'])
def test_multiplechoicefield_3(self):
f = MultipleChoiceField(choices=[('Numbers', (('1', 'One'), ('2', 'Two'))), ('Letters', (('3', 'A'), ('4', 'B'))), ('5', 'Other')])
self.assertEqual(['1'], f.clean([1]))
self.assertEqual(['1'], f.clean(['1']))
self.assertEqual(['1', '5'], f.clean([1, 5]))
self.assertEqual(['1', '5'], f.clean([1, '5']))
self.assertEqual(['1', '5'], f.clean(['1', 5]))
self.assertEqual(['1', '5'], f.clean(['1', '5']))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 6 is not one of the available choices.'", f.clean, ['6'])
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 6 is not one of the available choices.'", f.clean, ['1', '6'])
def test_multiplechoicefield_changed(self):
f = MultipleChoiceField(choices=[('1', 'One'), ('2', 'Two'), ('3', 'Three')])
self.assertFalse(f.has_changed(None, None))
self.assertFalse(f.has_changed([], None))
self.assertTrue(f.has_changed(None, ['1']))
self.assertFalse(f.has_changed([1, 2], ['1', '2']))
self.assertFalse(f.has_changed([2, 1], ['1', '2']))
self.assertTrue(f.has_changed([1, 2], ['1']))
self.assertTrue(f.has_changed([1, 2], ['1', '3']))
# TypedMultipleChoiceField ############################################################
# TypedMultipleChoiceField is just like MultipleChoiceField, except that coerced types
# will be returned:
def test_typedmultiplechoicefield_1(self):
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int)
self.assertEqual([1], f.clean(['1']))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 2 is not one of the available choices.'", f.clean, ['2'])
def test_typedmultiplechoicefield_2(self):
# Different coercion, same validation.
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=float)
self.assertEqual([1.0], f.clean(['1']))
def test_typedmultiplechoicefield_3(self):
# This can also cause weirdness: be careful (bool(-1) == True, remember)
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=bool)
self.assertEqual([True], f.clean(['-1']))
def test_typedmultiplechoicefield_4(self):
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int)
self.assertEqual([1, -1], f.clean(['1', '-1']))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. 2 is not one of the available choices.'", f.clean, ['1', '2'])
def test_typedmultiplechoicefield_5(self):
# Even more weirdness: if you have a valid choice but your coercion function
# can't coerce, you'll still get a validation error. Don't do this!
f = TypedMultipleChoiceField(choices=[('A', 'A'), ('B', 'B')], coerce=int)
self.assertRaisesMessage(ValidationError, "'Select a valid choice. B is not one of the available choices.'", f.clean, ['B'])
# Required fields require values
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, [])
def test_typedmultiplechoicefield_6(self):
# Non-required fields aren't required
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False)
self.assertEqual([], f.clean([]))
def test_typedmultiplechoicefield_7(self):
# If you want cleaning an empty value to return a different type, tell the field
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False, empty_value=None)
self.assertIsNone(f.clean([]))
def test_typedmultiplechoicefield_has_changed(self):
# has_changed should not trigger required validation
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=True)
self.assertFalse(f.has_changed(None, ''))
def test_typedmultiplechoicefield_special_coerce(self):
"""
Test a coerce function which results in a value not present in choices.
Refs #21397.
"""
def coerce_func(val):
return Decimal('1.%s' % val)
f = TypedMultipleChoiceField(
choices=[(1, "1"), (2, "2")], coerce=coerce_func, required=True)
self.assertEqual([Decimal('1.2')], f.clean(['2']))
self.assertRaisesMessage(ValidationError,
"'This field is required.'", f.clean, [])
self.assertRaisesMessage(ValidationError,
"'Select a valid choice. 3 is not one of the available choices.'",
f.clean, ['3'])
# ComboField ##################################################################
def test_combofield_1(self):
f = ComboField(fields=[CharField(max_length=20), EmailField()])
self.assertEqual('[email protected]', f.clean('[email protected]'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 20 characters (it has 28).'", f.clean, '[email protected]')
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'not an email')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
def test_combofield_2(self):
f = ComboField(fields=[CharField(max_length=20), EmailField()], required=False)
self.assertEqual('[email protected]', f.clean('[email protected]'))
self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 20 characters (it has 28).'", f.clean, '[email protected]')
self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'", f.clean, 'not an email')
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
# FilePathField ###############################################################
def test_filepathfield_1(self):
path = os.path.abspath(upath(forms.__file__))
path = os.path.dirname(path) + '/'
self.assertTrue(fix_os_paths(path).endswith('/django/forms/'))
def test_filepathfield_2(self):
path = upath(forms.__file__)
path = os.path.dirname(os.path.abspath(path)) + '/'
f = FilePathField(path=path)
f.choices = [p for p in f.choices if p[0].endswith('.py')]
f.choices.sort()
expected = [
('/django/forms/__init__.py', '__init__.py'),
('/django/forms/fields.py', 'fields.py'),
('/django/forms/forms.py', 'forms.py'),
('/django/forms/formsets.py', 'formsets.py'),
('/django/forms/models.py', 'models.py'),
('/django/forms/utils.py', 'utils.py'),
('/django/forms/widgets.py', 'widgets.py')
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
self.assertRaisesMessage(ValidationError, "'Select a valid choice. fields.py is not one of the available choices.'", f.clean, 'fields.py')
assert fix_os_paths(f.clean(path + 'fields.py')).endswith('/django/forms/fields.py')
def test_filepathfield_3(self):
path = upath(forms.__file__)
path = os.path.dirname(os.path.abspath(path)) + '/'
f = FilePathField(path=path, match='^.*?\.py$')
f.choices.sort()
expected = [
('/django/forms/__init__.py', '__init__.py'),
('/django/forms/fields.py', 'fields.py'),
('/django/forms/forms.py', 'forms.py'),
('/django/forms/formsets.py', 'formsets.py'),
('/django/forms/models.py', 'models.py'),
('/django/forms/utils.py', 'utils.py'),
('/django/forms/widgets.py', 'widgets.py')
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
def test_filepathfield_4(self):
path = os.path.abspath(upath(forms.__file__))
path = os.path.dirname(path) + '/'
f = FilePathField(path=path, recursive=True, match='^.*?\.py$')
f.choices.sort()
expected = [
('/django/forms/__init__.py', '__init__.py'),
('/django/forms/extras/__init__.py', 'extras/__init__.py'),
('/django/forms/extras/widgets.py', 'extras/widgets.py'),
('/django/forms/fields.py', 'fields.py'),
('/django/forms/forms.py', 'forms.py'),
('/django/forms/formsets.py', 'formsets.py'),
('/django/forms/models.py', 'models.py'),
('/django/forms/utils.py', 'utils.py'),
('/django/forms/widgets.py', 'widgets.py')
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
def test_filepathfield_folders(self):
path = os.path.dirname(upath(__file__)) + '/filepath_test_files/'
f = FilePathField(path=path, allow_folders=True, allow_files=False)
f.choices.sort()
expected = [
('/tests/forms_tests/tests/filepath_test_files/directory', 'directory'),
]
for exp, got in zip(expected, fix_os_paths(f.choices)):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
f = FilePathField(path=path, allow_folders=True, allow_files=True)
f.choices.sort()
expected = [
('/tests/forms_tests/tests/filepath_test_files/.dot-file', '.dot-file'),
('/tests/forms_tests/tests/filepath_test_files/1x1.bmp', '1x1.bmp'),
('/tests/forms_tests/tests/filepath_test_files/1x1.png', '1x1.png'),
('/tests/forms_tests/tests/filepath_test_files/directory', 'directory'),
('/tests/forms_tests/tests/filepath_test_files/fake-image.jpg', 'fake-image.jpg'),
('/tests/forms_tests/tests/filepath_test_files/real-text-file.txt', 'real-text-file.txt'),
]
actual = fix_os_paths(f.choices)
self.assertEqual(len(expected), len(actual))
for exp, got in zip(expected, actual):
self.assertEqual(exp[1], got[1])
self.assertTrue(got[0].endswith(exp[0]))
# SplitDateTimeField ##########################################################
def test_splitdatetimefield_1(self):
from django.forms.widgets import SplitDateTimeWidget
f = SplitDateTimeField()
self.assertIsInstance(f.widget, SplitDateTimeWidget)
self.assertEqual(datetime.datetime(2006, 1, 10, 7, 30), f.clean([datetime.date(2006, 1, 10), datetime.time(7, 30)]))
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
six.assertRaisesRegex(self, ValidationError, "'Enter a valid date\.', u?'Enter a valid time\.'", f.clean, ['hello', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ['hello', '07:30'])
def test_splitdatetimefield_2(self):
f = SplitDateTimeField(required=False)
self.assertEqual(datetime.datetime(2006, 1, 10, 7, 30), f.clean([datetime.date(2006, 1, 10), datetime.time(7, 30)]))
self.assertEqual(datetime.datetime(2006, 1, 10, 7, 30), f.clean(['2006-01-10', '07:30']))
self.assertIsNone(f.clean(None))
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(['']))
self.assertIsNone(f.clean(['', '']))
self.assertRaisesMessage(ValidationError, "'Enter a list of values.'", f.clean, 'hello')
six.assertRaisesRegex(self, ValidationError, "'Enter a valid date\.', u?'Enter a valid time\.'", f.clean, ['hello', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10', 'there'])
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ['hello', '07:30'])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10', ''])
self.assertRaisesMessage(ValidationError, "'Enter a valid time.'", f.clean, ['2006-01-10'])
self.assertRaisesMessage(ValidationError, "'Enter a valid date.'", f.clean, ['', '07:30'])
def test_splitdatetimefield_changed(self):
f = SplitDateTimeField(input_date_formats=['%d/%m/%Y'])
self.assertFalse(f.has_changed(['11/01/2012', '09:18:15'], ['11/01/2012', '09:18:15']))
self.assertTrue(f.has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), ['2008-05-06', '12:40:00']))
self.assertFalse(f.has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), ['06/05/2008', '12:40']))
self.assertTrue(f.has_changed(datetime.datetime(2008, 5, 6, 12, 40, 00), ['06/05/2008', '12:41']))
# GenericIPAddressField #######################################################
def test_generic_ipaddress_invalid_arguments(self):
self.assertRaises(ValueError, GenericIPAddressField, protocol="hamster")
self.assertRaises(ValueError, GenericIPAddressField, protocol="ipv4", unpack_ipv4=True)
def test_generic_ipaddress_as_generic(self):
# The edge cases of the IPv6 validation code are not deeply tested
# here, they are covered in the tests for django.utils.ipv6
f = GenericIPAddressField()
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1 '), '127.0.0.1')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '127.0.0.')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '1.2.3.4.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '12345:2:3:4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3::4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3:4:5:6:7:8')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1:2')
def test_generic_ipaddress_as_ipv4_only(self):
f = GenericIPAddressField(protocol="IPv4")
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertEqual(f.clean(' 127.0.0.1 '), '127.0.0.1')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, '127.0.0.')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, '1.2.3.4.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, '256.125.1.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, 'fe80::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 address.'", f.clean, '2a02::223:6cff:fe8a:2e8a')
def test_generic_ipaddress_as_ipv6_only(self):
f = GenericIPAddressField(protocol="IPv6")
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, '127.0.0.1')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, '127.0.0.')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, '1.2.3.4.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv6 address.'", f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '12345:2:3:4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3::4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3:4:5:6:7:8')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1:2')
def test_generic_ipaddress_as_generic_not_required(self):
f = GenericIPAddressField(required=False)
self.assertEqual(f.clean(''), '')
self.assertEqual(f.clean(None), '')
self.assertEqual(f.clean('127.0.0.1'), '127.0.0.1')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, 'foo')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '127.0.0.')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '1.2.3.4.5')
self.assertRaisesMessage(ValidationError, "'Enter a valid IPv4 or IPv6 address.'", f.clean, '256.125.1.5')
self.assertEqual(f.clean(' fe80::223:6cff:fe8a:2e8a '), 'fe80::223:6cff:fe8a:2e8a')
self.assertEqual(f.clean(' 2a02::223:6cff:fe8a:2e8a '), '2a02::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '12345:2:3:4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3::4')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, 'foo::223:6cff:fe8a:2e8a')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1::2:3:4:5:6:7:8')
self.assertRaisesMessage(ValidationError, "'This is not a valid IPv6 address.'", f.clean, '1:2')
def test_generic_ipaddress_normalization(self):
# Test the normalizing code
f = GenericIPAddressField()
self.assertEqual(f.clean(' ::ffff:0a0a:0a0a '), '::ffff:10.10.10.10')
self.assertEqual(f.clean(' ::ffff:10.10.10.10 '), '::ffff:10.10.10.10')
self.assertEqual(f.clean(' 2001:000:a:0000:0:fe:fe:beef '), '2001:0:a::fe:fe:beef')
self.assertEqual(f.clean(' 2001::a:0000:0:fe:fe:beef '), '2001:0:a::fe:fe:beef')
f = GenericIPAddressField(unpack_ipv4=True)
self.assertEqual(f.clean(' ::ffff:0a0a:0a0a'), '10.10.10.10')
# SlugField ###################################################################
def test_slugfield_normalization(self):
f = SlugField()
self.assertEqual(f.clean(' aa-bb-cc '), 'aa-bb-cc')
def test_slugfield_unicode_normalization(self):
f = SlugField(allow_unicode=True)
self.assertEqual(f.clean('a'), 'a')
self.assertEqual(f.clean('1'), '1')
self.assertEqual(f.clean('a1'), 'a1')
self.assertEqual(f.clean('你好'), '你好')
self.assertEqual(f.clean(' 你-好 '), '你-好')
self.assertEqual(f.clean('ıçğüş'), 'ıçğüş')
self.assertEqual(f.clean('foo-ıç-bar'), 'foo-ıç-bar')
# UUIDField ###################################################################
def test_uuidfield_1(self):
field = UUIDField()
value = field.clean('550e8400e29b41d4a716446655440000')
self.assertEqual(value, uuid.UUID('550e8400e29b41d4a716446655440000'))
def test_uuidfield_2(self):
field = UUIDField(required=False)
value = field.clean('')
self.assertEqual(value, None)
def test_uuidfield_3(self):
field = UUIDField()
with self.assertRaises(ValidationError) as cm:
field.clean('550e8400')
self.assertEqual(cm.exception.messages[0], 'Enter a valid UUID.')
def test_uuidfield_4(self):
field = UUIDField()
value = field.prepare_value(uuid.UUID('550e8400e29b41d4a716446655440000'))
self.assertEqual(value, '550e8400e29b41d4a716446655440000')
|
MSOpenTech/edx-platform | refs/heads/master | common/lib/xmodule/xmodule/modulestore/courseware_index.py | 6 | """ Code to allow module store to interface with courseware index """
from __future__ import absolute_import
import logging
from django.utils.translation import ugettext as _
from opaque_keys.edx.locator import CourseLocator
from search.search_engine_base import SearchEngine
from eventtracking import tracker
from . import ModuleStoreEnum
from .exceptions import ItemNotFoundError
# Use default index and document names for now
INDEX_NAME = "courseware_index"
DOCUMENT_TYPE = "courseware_content"
log = logging.getLogger('edx.modulestore')
class SearchIndexingError(Exception):
""" Indicates some error(s) occured during indexing """
def __init__(self, message, error_list):
super(SearchIndexingError, self).__init__(message)
self.error_list = error_list
class CoursewareSearchIndexer(object):
"""
Class to perform indexing for courseware search from different modulestores
"""
@staticmethod
def add_to_search_index(modulestore, location, delete=False, raise_on_error=False):
"""
Add to courseware search index from given location and its children
"""
error_list = []
indexed_count = 0
# TODO - inline for now, need to move this out to a celery task
searcher = SearchEngine.get_search_engine(INDEX_NAME)
if not searcher:
return
if isinstance(location, CourseLocator):
course_key = location
else:
course_key = location.course_key
location_info = {
"course": unicode(course_key),
}
def _fetch_item(item_location):
""" Fetch the item from the modulestore location, log if not found, but continue """
try:
if isinstance(item_location, CourseLocator):
item = modulestore.get_course(item_location)
else:
item = modulestore.get_item(item_location, revision=ModuleStoreEnum.RevisionOption.published_only)
except ItemNotFoundError:
log.warning('Cannot find: %s', item_location)
return None
return item
def index_item_location(item_location, current_start_date):
""" add this item to the search index """
item = _fetch_item(item_location)
if not item:
return
is_indexable = hasattr(item, "index_dictionary")
# if it's not indexable and it does not have children, then ignore
if not is_indexable and not item.has_children:
return
# if it has a defined start, then apply it and to it's children
if item.start and (not current_start_date or item.start > current_start_date):
current_start_date = item.start
if item.has_children:
for child_loc in item.children:
index_item_location(child_loc, current_start_date)
item_index = {}
item_index_dictionary = item.index_dictionary() if is_indexable else None
# if it has something to add to the index, then add it
if item_index_dictionary:
try:
item_index.update(location_info)
item_index.update(item_index_dictionary)
item_index['id'] = unicode(item.scope_ids.usage_id)
if current_start_date:
item_index['start_date'] = current_start_date
searcher.index(DOCUMENT_TYPE, item_index)
except Exception as err: # pylint: disable=broad-except
# broad exception so that index operation does not fail on one item of many
log.warning('Could not index item: %s - %s', item_location, unicode(err))
error_list.append(_('Could not index item: {}').format(item_location))
def remove_index_item_location(item_location):
""" remove this item from the search index """
item = _fetch_item(item_location)
if item:
if item.has_children:
for child_loc in item.children:
remove_index_item_location(child_loc)
searcher.remove(DOCUMENT_TYPE, unicode(item.scope_ids.usage_id))
try:
if delete:
remove_index_item_location(location)
else:
index_item_location(location, None)
indexed_count += 1
except Exception as err: # pylint: disable=broad-except
# broad exception so that index operation does not prevent the rest of the application from working
log.exception(
"Indexing error encountered, courseware index may be out of date %s - %s",
course_key,
unicode(err)
)
error_list.append(_('General indexing error occurred'))
if raise_on_error and error_list:
raise SearchIndexingError(_('Error(s) present during indexing'), error_list)
return indexed_count
@classmethod
def do_publish_index(cls, modulestore, location, delete=False, raise_on_error=False):
"""
Add to courseware search index published section and children
"""
indexed_count = cls.add_to_search_index(modulestore, location, delete, raise_on_error)
cls._track_index_request('edx.course.index.published', indexed_count, str(location))
return indexed_count
@classmethod
def do_course_reindex(cls, modulestore, course_key):
"""
(Re)index all content within the given course
"""
indexed_count = cls.add_to_search_index(modulestore, course_key, delete=False, raise_on_error=True)
cls._track_index_request('edx.course.index.reindexed', indexed_count)
return indexed_count
@staticmethod
def _track_index_request(event_name, indexed_count, location=None):
"""Track content index requests.
Arguments:
location (str): The ID of content to be indexed.
event_name (str): Name of the event to be logged.
Returns:
None
"""
data = {
"indexed_count": indexed_count,
'category': 'courseware_index',
}
if location:
data['location_id'] = location
tracker.emit(
event_name,
data
)
|
datapythonista/pandas | refs/heads/master | pandas/__init__.py | 3 | # flake8: noqa
__docformat__ = "restructuredtext"
# Let users know if they're missing any of our hard dependencies
hard_dependencies = ("numpy", "pytz", "dateutil")
missing_dependencies = []
for dependency in hard_dependencies:
try:
__import__(dependency)
except ImportError as e:
missing_dependencies.append(f"{dependency}: {e}")
if missing_dependencies:
raise ImportError(
"Unable to import required dependencies:\n" + "\n".join(missing_dependencies)
)
del hard_dependencies, dependency, missing_dependencies
# numpy compat
from pandas.compat import (
np_version_under1p18 as _np_version_under1p18,
is_numpy_dev as _is_numpy_dev,
)
try:
from pandas._libs import hashtable as _hashtable, lib as _lib, tslib as _tslib
except ImportError as e: # pragma: no cover
# hack but overkill to use re
module = str(e).replace("cannot import name ", "")
raise ImportError(
f"C extension: {module} not built. If you want to import "
"pandas from the source directory, you may need to run "
"'python setup.py build_ext --force' to build the C extensions first."
) from e
from pandas._config import (
get_option,
set_option,
reset_option,
describe_option,
option_context,
options,
)
# let init-time option registration happen
import pandas.core.config_init
from pandas.core.api import (
# dtype
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
Float32Dtype,
Float64Dtype,
CategoricalDtype,
PeriodDtype,
IntervalDtype,
DatetimeTZDtype,
StringDtype,
BooleanDtype,
# missing
NA,
isna,
isnull,
notna,
notnull,
# indexes
Index,
CategoricalIndex,
Int64Index,
UInt64Index,
RangeIndex,
Float64Index,
MultiIndex,
IntervalIndex,
TimedeltaIndex,
DatetimeIndex,
PeriodIndex,
IndexSlice,
# tseries
NaT,
Period,
period_range,
Timedelta,
timedelta_range,
Timestamp,
date_range,
bdate_range,
Interval,
interval_range,
DateOffset,
# conversion
to_numeric,
to_datetime,
to_timedelta,
# misc
Flags,
Grouper,
factorize,
unique,
value_counts,
NamedAgg,
array,
Categorical,
set_eng_float_format,
Series,
DataFrame,
)
from pandas.core.arrays.sparse import SparseDtype
from pandas.tseries.api import infer_freq
from pandas.tseries import offsets
from pandas.core.computation.api import eval
from pandas.core.reshape.api import (
concat,
lreshape,
melt,
wide_to_long,
merge,
merge_asof,
merge_ordered,
crosstab,
pivot,
pivot_table,
get_dummies,
cut,
qcut,
)
import pandas.api
from pandas.util._print_versions import show_versions
from pandas.io.api import (
# excel
ExcelFile,
ExcelWriter,
read_excel,
# parsers
read_csv,
read_fwf,
read_table,
# pickle
read_pickle,
to_pickle,
# pytables
HDFStore,
read_hdf,
# sql
read_sql,
read_sql_query,
read_sql_table,
# misc
read_clipboard,
read_parquet,
read_orc,
read_feather,
read_gbq,
read_html,
read_xml,
read_json,
read_stata,
read_sas,
read_spss,
)
from pandas.io.json import _json_normalize as json_normalize
from pandas.util._tester import test
import pandas.testing
import pandas.arrays
# use the closest tagged version if possible
from pandas._version import get_versions
v = get_versions()
__version__ = v.get("closest-tag", v["version"])
__git_version__ = v.get("full-revisionid")
del get_versions, v
# GH 27101
def __getattr__(name):
import warnings
if name == "datetime":
warnings.warn(
"The pandas.datetime class is deprecated "
"and will be removed from pandas in a future version. "
"Import from datetime module instead.",
FutureWarning,
stacklevel=2,
)
from datetime import datetime as dt
return dt
elif name == "np":
warnings.warn(
"The pandas.np module is deprecated "
"and will be removed from pandas in a future version. "
"Import numpy directly instead",
FutureWarning,
stacklevel=2,
)
import numpy as np
return np
elif name in {"SparseSeries", "SparseDataFrame"}:
warnings.warn(
f"The {name} class is removed from pandas. Accessing it from "
"the top-level namespace will also be removed in the next version",
FutureWarning,
stacklevel=2,
)
return type(name, (), {})
elif name == "SparseArray":
warnings.warn(
"The pandas.SparseArray class is deprecated "
"and will be removed from pandas in a future version. "
"Use pandas.arrays.SparseArray instead.",
FutureWarning,
stacklevel=2,
)
from pandas.core.arrays.sparse import SparseArray as _SparseArray
return _SparseArray
raise AttributeError(f"module 'pandas' has no attribute '{name}'")
# module level doc-string
__doc__ = """
pandas - a powerful data analysis and manipulation library for Python
=====================================================================
**pandas** is a Python package providing fast, flexible, and expressive data
structures designed to make working with "relational" or "labeled" data both
easy and intuitive. It aims to be the fundamental high-level building block for
doing practical, **real world** data analysis in Python. Additionally, it has
the broader goal of becoming **the most powerful and flexible open source data
analysis / manipulation tool available in any language**. It is already well on
its way toward this goal.
Main Features
-------------
Here are just a few of the things that pandas does well:
- Easy handling of missing data in floating point as well as non-floating
point data.
- Size mutability: columns can be inserted and deleted from DataFrame and
higher dimensional objects
- Automatic and explicit data alignment: objects can be explicitly aligned
to a set of labels, or the user can simply ignore the labels and let
`Series`, `DataFrame`, etc. automatically align the data for you in
computations.
- Powerful, flexible group by functionality to perform split-apply-combine
operations on data sets, for both aggregating and transforming data.
- Make it easy to convert ragged, differently-indexed data in other Python
and NumPy data structures into DataFrame objects.
- Intelligent label-based slicing, fancy indexing, and subsetting of large
data sets.
- Intuitive merging and joining data sets.
- Flexible reshaping and pivoting of data sets.
- Hierarchical labeling of axes (possible to have multiple labels per tick).
- Robust IO tools for loading data from flat files (CSV and delimited),
Excel files, databases, and saving/loading data from the ultrafast HDF5
format.
- Time series-specific functionality: date range generation and frequency
conversion, moving window statistics, date shifting and lagging.
"""
|
deshipu/micropython | refs/heads/master | tools/gen-cpydiff.py | 1 | # This file is part of the MicroPython project, http://micropython.org/
#
# The MIT License (MIT)
#
# Copyright (c) 2016 Rami Ali
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" gen-cpydiff generates documentation which outlines operations that differ between MicroPython
and CPython. This script is called by the docs Makefile for html and Latex and may be run
manually using the command make gen-cpydiff. """
import os
import errno
import subprocess
import time
import re
from collections import namedtuple
# Micropython supports syntax of CPython 3.4 with some features from 3.5, and
# such version should be used to test for differences. If your default python3
# executable is of lower version, you can point MICROPY_CPYTHON3 environment var
# to the correct executable.
if os.name == 'nt':
CPYTHON3 = os.getenv('MICROPY_CPYTHON3', 'python3.exe')
MICROPYTHON = os.getenv('MICROPY_MICROPYTHON', '../windows/micropython.exe')
else:
CPYTHON3 = os.getenv('MICROPY_CPYTHON3', 'python3')
MICROPYTHON = os.getenv('MICROPY_MICROPYTHON', '../unix/micropython')
TESTPATH = '../tests/cpydiff/'
DOCPATH = '../docs/genrst/'
INDEXTEMPLATE = '../docs/differences/index_template.txt'
INDEX = 'index.rst'
HEADER = '.. This document was generated by tools/gen-cpydiff.py\n\n'
UIMPORTLIST = {'struct', 'collections', 'json'}
CLASSMAP = {'Core': 'Core Language', 'Types': 'Builtin Types'}
INDEXPRIORITY = ['syntax', 'core_language', 'builtin_types', 'modules']
RSTCHARS = ['=', '-', '~', '`', ':']
SPLIT = '"""\n|categories: |description: |cause: |workaround: '
TAB = ' '
Output = namedtuple('output', ['name', 'class_', 'desc', 'cause', 'workaround', 'code',
'output_cpy', 'output_upy', 'status'])
def readfiles():
""" Reads test files """
tests = list(filter(lambda x: x.endswith('.py'), os.listdir(TESTPATH)))
tests.sort()
files = []
for test in tests:
text = open(TESTPATH + test, 'r').read()
try:
class_, desc, cause, workaround, code = [x.rstrip() for x in \
list(filter(None, re.split(SPLIT, text)))]
output = Output(test, class_, desc, cause, workaround, code, '', '', '')
files.append(output)
except IndexError:
print('Incorrect format in file ' + TESTPATH + test)
return files
def uimports(code):
""" converts CPython module names into MicroPython equivalents """
for uimport in UIMPORTLIST:
uimport = bytes(uimport, 'utf8')
code = code.replace(uimport, b'u' + uimport)
return code
def run_tests(tests):
""" executes all tests """
results = []
for test in tests:
with open(TESTPATH + test.name, 'rb') as f:
input_cpy = f.read()
input_upy = uimports(input_cpy)
process = subprocess.Popen(CPYTHON3, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
output_cpy = [com.decode('utf8') for com in process.communicate(input_cpy)]
process = subprocess.Popen(MICROPYTHON, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
output_upy = [com.decode('utf8') for com in process.communicate(input_upy)]
if output_cpy[0] == output_upy[0] and output_cpy[1] == output_upy[1]:
status = 'Supported'
print('Supported operation!\nFile: ' + TESTPATH + test.name)
else:
status = 'Unsupported'
output = Output(test.name, test.class_, test.desc, test.cause,
test.workaround, test.code, output_cpy, output_upy, status)
results.append(output)
results.sort(key=lambda x: x.class_)
return results
def indent(block, spaces):
""" indents paragraphs of text for rst formatting """
new_block = ''
for line in block.split('\n'):
new_block += spaces + line + '\n'
return new_block
def gen_table(contents):
""" creates a table given any set of columns """
xlengths = []
ylengths = []
for column in contents:
col_len = 0
for entry in column:
lines = entry.split('\n')
for line in lines:
col_len = max(len(line) + 2, col_len)
xlengths.append(col_len)
for i in range(len(contents[0])):
ymax = 0
for j in range(len(contents)):
ymax = max(ymax, len(contents[j][i].split('\n')))
ylengths.append(ymax)
table_divider = '+' + ''.join(['-' * i + '+' for i in xlengths]) + '\n'
table = table_divider
for i in range(len(ylengths)):
row = [column[i] for column in contents]
row = [entry + '\n' * (ylengths[i]-len(entry.split('\n'))) for entry in row]
row = [entry.split('\n') for entry in row]
for j in range(ylengths[i]):
k = 0
for entry in row:
width = xlengths[k]
table += ''.join(['| {:{}}'.format(entry[j], width - 1)])
k += 1
table += '|\n'
table += table_divider
return table + '\n'
def gen_rst(results):
""" creates restructured text documents to display tests """
# make sure the destination directory exists
try:
os.mkdir(DOCPATH)
except OSError as e:
if e.args[0] != errno.EEXIST and e.args[0] != errno.EISDIR:
raise
toctree = []
class_ = []
for output in results:
section = output.class_.split(',')
for i in range(len(section)):
section[i] = section[i].rstrip()
if section[i] in CLASSMAP:
section[i] = CLASSMAP[section[i]]
if i >= len(class_) or section[i] != class_[i]:
if i == 0:
filename = section[i].replace(' ', '_').lower()
rst = open(DOCPATH + filename + '.rst', 'w')
rst.write(HEADER)
rst.write(section[i] + '\n')
rst.write(RSTCHARS[0] * len(section[i]))
rst.write(time.strftime("\nGenerated %a %d %b %Y %X UTC\n\n", time.gmtime()))
toctree.append(filename)
else:
rst.write(section[i] + '\n')
rst.write(RSTCHARS[min(i, len(RSTCHARS)-1)] * len(section[i]))
rst.write('\n\n')
class_ = section
rst.write('.. _cpydiff_%s:\n\n' % output.name.rsplit('.', 1)[0])
rst.write(output.desc + '\n')
rst.write('~' * len(output.desc) + '\n\n')
if output.cause != 'Unknown':
rst.write('**Cause:** ' + output.cause + '\n\n')
if output.workaround != 'Unknown':
rst.write('**Workaround:** ' + output.workaround + '\n\n')
rst.write('Sample code::\n\n' + indent(output.code, TAB) + '\n')
output_cpy = indent(''.join(output.output_cpy[0:2]), TAB).rstrip()
output_cpy = ('::\n\n' if output_cpy != '' else '') + output_cpy
output_upy = indent(''.join(output.output_upy[0:2]), TAB).rstrip()
output_upy = ('::\n\n' if output_upy != '' else '') + output_upy
table = gen_table([['CPy output:', output_cpy], ['uPy output:', output_upy]])
rst.write(table)
template = open(INDEXTEMPLATE, 'r')
index = open(DOCPATH + INDEX, 'w')
index.write(HEADER)
index.write(template.read())
for section in INDEXPRIORITY:
if section in toctree:
index.write(indent(section + '.rst', TAB))
toctree.remove(section)
for section in toctree:
index.write(indent(section + '.rst', TAB))
def main():
""" Main function """
# set search path so that test scripts find the test modules (and no other ones)
os.environ['PYTHONPATH'] = TESTPATH
os.environ['MICROPYPATH'] = TESTPATH
files = readfiles()
results = run_tests(files)
gen_rst(results)
main()
|
clayg/swift | refs/heads/master | test/unit/common/test_storage_policy.py | 4 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tests for swift.common.storage_policies """
import contextlib
import six
import logging
import unittest
import os
import mock
from functools import partial
from six.moves.configparser import ConfigParser
from tempfile import NamedTemporaryFile
from test.unit import patch_policies, FakeRing, temptree, DEFAULT_TEST_EC_TYPE
import swift.common.storage_policy
from swift.common.storage_policy import (
StoragePolicyCollection, POLICIES, PolicyError, parse_storage_policies,
reload_storage_policies, get_policy_string, split_policy_string,
BaseStoragePolicy, StoragePolicy, ECStoragePolicy, REPL_POLICY, EC_POLICY,
VALID_EC_TYPES, DEFAULT_EC_OBJECT_SEGMENT_SIZE, BindPortsCache)
from swift.common.ring import RingData
from swift.common.exceptions import RingLoadError
from pyeclib.ec_iface import ECDriver
class CapturingHandler(logging.Handler):
def __init__(self):
super(CapturingHandler, self).__init__()
self._records = []
def emit(self, record):
self._records.append(record)
@contextlib.contextmanager
def capture_logging(log_name):
captured = CapturingHandler()
logger = logging.getLogger(log_name)
logger.addHandler(captured)
try:
yield captured._records
finally:
logger.removeHandler(captured)
@BaseStoragePolicy.register('fake')
class FakeStoragePolicy(BaseStoragePolicy):
"""
Test StoragePolicy class - the only user at the moment is
test_validate_policies_type_invalid()
"""
def __init__(self, idx, name='', is_default=False, is_deprecated=False,
object_ring=None):
super(FakeStoragePolicy, self).__init__(
idx, name, is_default, is_deprecated, object_ring)
class TestStoragePolicies(unittest.TestCase):
def _conf(self, conf_str):
conf_str = "\n".join(line.strip() for line in conf_str.split("\n"))
conf = ConfigParser()
conf.readfp(six.StringIO(conf_str))
return conf
def assertRaisesWithMessage(self, exc_class, message, f, *args, **kwargs):
try:
f(*args, **kwargs)
except exc_class as err:
err_msg = str(err)
self.assertTrue(message in err_msg, 'Error message %r did not '
'have expected substring %r' % (err_msg, message))
else:
self.fail('%r did not raise %s' % (message, exc_class.__name__))
def test_policy_baseclass_instantiate(self):
self.assertRaisesWithMessage(TypeError,
"Can't instantiate BaseStoragePolicy",
BaseStoragePolicy, 1, 'one')
@patch_policies([
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one'),
StoragePolicy(2, 'two'),
StoragePolicy(3, 'three', is_deprecated=True),
ECStoragePolicy(10, 'ten', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4),
])
def test_swift_info(self):
# the deprecated 'three' should not exist in expect
expect = [{'aliases': 'zero', 'default': True, 'name': 'zero', },
{'aliases': 'two', 'name': 'two'},
{'aliases': 'one', 'name': 'one'},
{'aliases': 'ten', 'name': 'ten'}]
swift_info = POLICIES.get_policy_info()
self.assertEqual(sorted(expect, key=lambda k: k['name']),
sorted(swift_info, key=lambda k: k['name']))
@patch_policies
def test_get_policy_string(self):
self.assertEqual(get_policy_string('something', 0), 'something')
self.assertEqual(get_policy_string('something', None), 'something')
self.assertEqual(get_policy_string('something', ''), 'something')
self.assertEqual(get_policy_string('something', 1),
'something' + '-1')
self.assertRaises(PolicyError, get_policy_string, 'something', 99)
@patch_policies
def test_split_policy_string(self):
expectations = {
'something': ('something', POLICIES[0]),
'something-1': ('something', POLICIES[1]),
'tmp': ('tmp', POLICIES[0]),
'objects': ('objects', POLICIES[0]),
'tmp-1': ('tmp', POLICIES[1]),
'objects-1': ('objects', POLICIES[1]),
'objects-': PolicyError,
'objects-0': PolicyError,
'objects--1': ('objects-', POLICIES[1]),
'objects-+1': PolicyError,
'objects--': PolicyError,
'objects-foo': PolicyError,
'objects--bar': PolicyError,
'objects-+bar': PolicyError,
# questionable, demonstrated as inverse of get_policy_string
'objects+0': ('objects+0', POLICIES[0]),
'': ('', POLICIES[0]),
'0': ('0', POLICIES[0]),
'-1': ('', POLICIES[1]),
}
for policy_string, expected in expectations.items():
if expected == PolicyError:
try:
invalid = split_policy_string(policy_string)
except PolicyError:
continue # good
else:
self.fail('The string %r returned %r '
'instead of raising a PolicyError' %
(policy_string, invalid))
self.assertEqual(expected, split_policy_string(policy_string))
# should be inverse of get_policy_string
self.assertEqual(policy_string, get_policy_string(*expected))
def test_defaults(self):
self.assertGreater(len(POLICIES), 0)
# test class functions
default_policy = POLICIES.default
self.assertTrue(default_policy.is_default)
zero_policy = POLICIES.get_by_index(0)
self.assertTrue(zero_policy.idx == 0)
zero_policy_by_name = POLICIES.get_by_name(zero_policy.name)
self.assertTrue(zero_policy_by_name.idx == 0)
def test_storage_policy_repr(self):
test_policies = [StoragePolicy(0, 'aay', True),
StoragePolicy(1, 'bee', False),
StoragePolicy(2, 'cee', False),
ECStoragePolicy(10, 'ten',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3),
ECStoragePolicy(11, 'eleven',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3,
ec_duplication_factor=2)]
policies = StoragePolicyCollection(test_policies)
for policy in policies:
policy_repr = repr(policy)
self.assertTrue(policy.__class__.__name__ in policy_repr)
self.assertTrue('is_default=%s' % policy.is_default in policy_repr)
self.assertTrue('is_deprecated=%s' % policy.is_deprecated in
policy_repr)
self.assertTrue(policy.name in policy_repr)
if policy.policy_type == EC_POLICY:
self.assertTrue('ec_type=%s' % policy.ec_type in policy_repr)
self.assertTrue('ec_ndata=%s' % policy.ec_ndata in policy_repr)
self.assertTrue('ec_nparity=%s' %
policy.ec_nparity in policy_repr)
self.assertTrue('ec_segment_size=%s' %
policy.ec_segment_size in policy_repr)
if policy.ec_duplication_factor > 1:
self.assertTrue('ec_duplication_factor=%s' %
policy.ec_duplication_factor in
policy_repr)
collection_repr = repr(policies)
collection_repr_lines = collection_repr.splitlines()
self.assertTrue(
policies.__class__.__name__ in collection_repr_lines[0])
self.assertEqual(len(policies), len(collection_repr_lines[1:-1]))
for policy, line in zip(policies, collection_repr_lines[1:-1]):
self.assertTrue(repr(policy) in line)
with patch_policies(policies):
self.assertEqual(repr(POLICIES), collection_repr)
def test_validate_policies_defaults(self):
# 0 explicit default
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False)]
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.default, test_policies[0])
self.assertEqual(policies.default.name, 'zero')
# non-zero explicit default
test_policies = [StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', True)]
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.default, test_policies[2])
self.assertEqual(policies.default.name, 'two')
# multiple defaults
test_policies = [StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', True),
StoragePolicy(2, 'two', True)]
self.assertRaisesWithMessage(
PolicyError, 'Duplicate default', StoragePolicyCollection,
test_policies)
# nothing specified
test_policies = []
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.default, policies[0])
self.assertEqual(policies.default.name, 'Policy-0')
# no default specified with only policy index 0
test_policies = [StoragePolicy(0, 'zero')]
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.default, policies[0])
# no default specified with multiple policies
test_policies = [StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False)]
self.assertRaisesWithMessage(
PolicyError, 'Unable to find default policy',
StoragePolicyCollection, test_policies)
def test_deprecate_policies(self):
# deprecation specified
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False, is_deprecated=True)]
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.default, test_policies[0])
self.assertEqual(policies.default.name, 'zero')
self.assertEqual(len(policies), 3)
# multiple policies requires default
test_policies = [StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', False, is_deprecated=True),
StoragePolicy(2, 'two', False)]
self.assertRaisesWithMessage(
PolicyError, 'Unable to find default policy',
StoragePolicyCollection, test_policies)
def test_validate_policies_indexes(self):
# duplicate indexes
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(1, 'two', False)]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies)
def test_validate_policy_params(self):
StoragePolicy(0, 'name') # sanity
# bogus indexes
self.assertRaises(PolicyError, FakeStoragePolicy, 'x', 'name')
self.assertRaises(PolicyError, FakeStoragePolicy, -1, 'name')
# non-zero Policy-0
self.assertRaisesWithMessage(PolicyError, 'reserved',
FakeStoragePolicy, 1, 'policy-0')
# deprecate default
self.assertRaisesWithMessage(
PolicyError, 'Deprecated policy can not be default',
FakeStoragePolicy, 1, 'Policy-1', is_default=True,
is_deprecated=True)
# weird names
names = (
'',
'name_foo',
'name\nfoo',
'name foo',
u'name \u062a',
'name \xd8\xaa',
)
for name in names:
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
FakeStoragePolicy, 1, name)
def test_validate_policies_names(self):
# duplicate names
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'zero', False),
StoragePolicy(2, 'two', False)]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies)
def test_validate_policies_type_default(self):
# no type specified - make sure the policy is initialized to
# DEFAULT_POLICY_TYPE
test_policy = FakeStoragePolicy(0, 'zero', True)
self.assertEqual(test_policy.policy_type, 'fake')
def test_validate_policies_type_invalid(self):
class BogusStoragePolicy(FakeStoragePolicy):
policy_type = 'bogus'
# unsupported policy type - initialization with FakeStoragePolicy
self.assertRaisesWithMessage(PolicyError, 'Invalid type',
BogusStoragePolicy, 1, 'one')
def test_policies_type_attribute(self):
test_policies = [
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one'),
StoragePolicy(2, 'two'),
StoragePolicy(3, 'three', is_deprecated=True),
ECStoragePolicy(10, 'ten', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3),
]
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.get_by_index(0).policy_type,
REPL_POLICY)
self.assertEqual(policies.get_by_index(1).policy_type,
REPL_POLICY)
self.assertEqual(policies.get_by_index(2).policy_type,
REPL_POLICY)
self.assertEqual(policies.get_by_index(3).policy_type,
REPL_POLICY)
self.assertEqual(policies.get_by_index(10).policy_type,
EC_POLICY)
def test_names_are_normalized(self):
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'ZERO', False)]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies)
policies = StoragePolicyCollection([StoragePolicy(0, 'zEro', True),
StoragePolicy(1, 'One', False)])
pol0 = policies[0]
pol1 = policies[1]
for name in ('zero', 'ZERO', 'zErO', 'ZeRo'):
self.assertEqual(pol0, policies.get_by_name(name))
self.assertEqual(policies.get_by_name(name).name, 'zEro')
for name in ('one', 'ONE', 'oNe', 'OnE'):
self.assertEqual(pol1, policies.get_by_name(name))
self.assertEqual(policies.get_by_name(name).name, 'One')
def test_multiple_names(self):
# checking duplicate on insert
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False, aliases='zero')]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies)
# checking correct retrival using other names
test_policies = [StoragePolicy(0, 'zero', True, aliases='cero, kore'),
StoragePolicy(1, 'one', False, aliases='uno, tahi'),
StoragePolicy(2, 'two', False, aliases='dos, rua')]
policies = StoragePolicyCollection(test_policies)
for name in ('zero', 'cero', 'kore'):
self.assertEqual(policies.get_by_name(name), test_policies[0])
for name in ('two', 'dos', 'rua'):
self.assertEqual(policies.get_by_name(name), test_policies[2])
# Testing parsing of conf files/text
good_conf = self._conf("""
[storage-policy:0]
name = one
aliases = uno, tahi
default = yes
""")
policies = parse_storage_policies(good_conf)
self.assertEqual(policies.get_by_name('one'),
policies[0])
self.assertEqual(policies.get_by_name('one'),
policies.get_by_name('tahi'))
name_repeat_conf = self._conf("""
[storage-policy:0]
name = one
aliases = one
default = yes
""")
# Test on line below should not generate errors. Repeat of main
# name under aliases is permitted during construction
# but only because automated testing requires it.
policies = parse_storage_policies(name_repeat_conf)
extra_commas_conf = self._conf("""
[storage-policy:0]
name = one
aliases = ,,one, ,
default = yes
""")
# Extra blank entries should be silently dropped
policies = parse_storage_policies(extra_commas_conf)
bad_conf = self._conf("""
[storage-policy:0]
name = one
aliases = uno, uno
default = yes
""")
self.assertRaisesWithMessage(PolicyError,
'is already assigned to this policy',
parse_storage_policies, bad_conf)
def test_multiple_names_EC(self):
# checking duplicate names on insert
test_policies_ec = [
ECStoragePolicy(
0, 'ec8-2',
aliases='zeus, jupiter',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2,
object_ring=FakeRing(replicas=8),
is_default=True),
ECStoragePolicy(
1, 'ec10-4',
aliases='ec8-2',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4,
object_ring=FakeRing(replicas=10))]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies_ec)
# checking correct retrival using other names
good_test_policies_EC = [
ECStoragePolicy(0, 'ec8-2', aliases='zeus, jupiter',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2,
object_ring=FakeRing(replicas=10),
is_default=True),
ECStoragePolicy(1, 'ec10-4', aliases='athena, minerva',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4,
object_ring=FakeRing(replicas=14)),
ECStoragePolicy(2, 'ec4-2', aliases='poseidon, neptune',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2,
object_ring=FakeRing(replicas=6)),
ECStoragePolicy(3, 'ec4-2-dup', aliases='uzuki, rin',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2,
ec_duplication_factor=2,
object_ring=FakeRing(replicas=12)),
]
ec_policies = StoragePolicyCollection(good_test_policies_EC)
for name in ('ec8-2', 'zeus', 'jupiter'):
self.assertEqual(ec_policies.get_by_name(name), ec_policies[0])
for name in ('ec10-4', 'athena', 'minerva'):
self.assertEqual(ec_policies.get_by_name(name), ec_policies[1])
for name in ('ec4-2', 'poseidon', 'neptune'):
self.assertEqual(ec_policies.get_by_name(name), ec_policies[2])
for name in ('ec4-2-dup', 'uzuki', 'rin'):
self.assertEqual(ec_policies.get_by_name(name), ec_policies[3])
# Testing parsing of conf files/text
good_ec_conf = self._conf("""
[storage-policy:0]
name = ec8-2
aliases = zeus, jupiter
policy_type = erasure_coding
ec_type = %(ec_type)s
default = yes
ec_num_data_fragments = 8
ec_num_parity_fragments = 2
[storage-policy:1]
name = ec10-4
aliases = poseidon, neptune
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
[storage-policy:2]
name = ec4-2-dup
aliases = uzuki, rin
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = 4
ec_num_parity_fragments = 2
ec_duplication_factor = 2
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
ec_policies = parse_storage_policies(good_ec_conf)
self.assertEqual(ec_policies.get_by_name('ec8-2'),
ec_policies[0])
self.assertEqual(ec_policies.get_by_name('ec10-4'),
ec_policies.get_by_name('poseidon'))
self.assertEqual(ec_policies.get_by_name('ec4-2-dup'),
ec_policies.get_by_name('uzuki'))
name_repeat_ec_conf = self._conf("""
[storage-policy:0]
name = ec8-2
aliases = ec8-2
policy_type = erasure_coding
ec_type = %(ec_type)s
default = yes
ec_num_data_fragments = 8
ec_num_parity_fragments = 2
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
# Test on line below should not generate errors. Repeat of main
# name under aliases is permitted during construction
# but only because automated testing requires it.
ec_policies = parse_storage_policies(name_repeat_ec_conf)
bad_ec_conf = self._conf("""
[storage-policy:0]
name = ec8-2
aliases = zeus, zeus
policy_type = erasure_coding
ec_type = %(ec_type)s
default = yes
ec_num_data_fragments = 8
ec_num_parity_fragments = 2
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'is already assigned to this policy',
parse_storage_policies, bad_ec_conf)
def test_add_remove_names(self):
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False)]
policies = StoragePolicyCollection(test_policies)
# add names
policies.add_policy_alias(1, 'tahi')
self.assertEqual(policies.get_by_name('tahi'), test_policies[1])
policies.add_policy_alias(2, 'rua', 'dos')
self.assertEqual(policies.get_by_name('rua'), test_policies[2])
self.assertEqual(policies.get_by_name('dos'), test_policies[2])
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
policies.add_policy_alias, 2, 'double\n')
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
policies.add_policy_alias, 2, '')
# try to add existing name
self.assertRaisesWithMessage(PolicyError, 'Duplicate name',
policies.add_policy_alias, 2, 'two')
self.assertRaisesWithMessage(PolicyError, 'Duplicate name',
policies.add_policy_alias, 1, 'two')
# remove name
policies.remove_policy_alias('tahi')
self.assertIsNone(policies.get_by_name('tahi'))
# remove only name
self.assertRaisesWithMessage(PolicyError,
'Policies must have at least one name.',
policies.remove_policy_alias, 'zero')
# remove non-existent name
self.assertRaisesWithMessage(PolicyError,
'No policy with name',
policies.remove_policy_alias, 'three')
# remove default name
policies.remove_policy_alias('two')
self.assertIsNone(policies.get_by_name('two'))
self.assertEqual(policies.get_by_index(2).name, 'rua')
# change default name to a new name
policies.change_policy_primary_name(2, 'two')
self.assertEqual(policies.get_by_name('two'), test_policies[2])
self.assertEqual(policies.get_by_index(2).name, 'two')
# change default name to an existing alias
policies.change_policy_primary_name(2, 'dos')
self.assertEqual(policies.get_by_index(2).name, 'dos')
# change default name to a bad new name
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
policies.change_policy_primary_name,
2, 'bad\nname')
# change default name to a name belonging to another policy
self.assertRaisesWithMessage(PolicyError,
'Other policy',
policies.change_policy_primary_name,
1, 'dos')
def test_deprecated_default(self):
bad_conf = self._conf("""
[storage-policy:1]
name = one
deprecated = yes
default = yes
""")
self.assertRaisesWithMessage(
PolicyError, "Deprecated policy can not be default",
parse_storage_policies, bad_conf)
def test_multiple_policies_with_no_policy_index_zero(self):
bad_conf = self._conf("""
[storage-policy:1]
name = one
default = yes
""")
# Policy-0 will not be implicitly added if other policies are defined
self.assertRaisesWithMessage(
PolicyError, "must specify a storage policy section "
"for policy index 0", parse_storage_policies, bad_conf)
@mock.patch.object(swift.common.storage_policy, 'VALID_EC_TYPES',
['isa_l_rs_vand', 'isa_l_rs_cauchy'])
@mock.patch('swift.common.storage_policy.ECDriver')
def test_known_bad_ec_config(self, mock_driver):
good_conf = self._conf("""
[storage-policy:0]
name = bad-policy
policy_type = erasure_coding
ec_type = isa_l_rs_cauchy
ec_num_data_fragments = 10
ec_num_parity_fragments = 5
""")
with capture_logging('swift.common.storage_policy') as records:
parse_storage_policies(good_conf)
mock_driver.assert_called_once()
mock_driver.reset_mock()
self.assertFalse([(r.levelname, r.msg) for r in records])
good_conf = self._conf("""
[storage-policy:0]
name = bad-policy
policy_type = erasure_coding
ec_type = isa_l_rs_vand
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
""")
with capture_logging('swift.common.storage_policy') as records:
parse_storage_policies(good_conf)
mock_driver.assert_called_once()
mock_driver.reset_mock()
self.assertFalse([(r.levelname, r.msg) for r in records])
bad_conf = self._conf("""
[storage-policy:0]
name = bad-policy
policy_type = erasure_coding
ec_type = isa_l_rs_vand
ec_num_data_fragments = 10
ec_num_parity_fragments = 5
""")
with capture_logging('swift.common.storage_policy') as records, \
self.assertRaises(PolicyError) as exc_mgr:
parse_storage_policies(bad_conf)
self.assertEqual(exc_mgr.exception.message,
'Storage policy bad-policy uses an EC '
'configuration known to harm data durability. This '
'policy MUST be deprecated.')
mock_driver.assert_not_called()
mock_driver.reset_mock()
self.assertEqual([r.levelname for r in records],
['WARNING'])
for msg in ('known to harm data durability',
'Any data in this policy should be migrated',
'https://bugs.launchpad.net/swift/+bug/1639691'):
self.assertIn(msg, records[0].msg)
slightly_less_bad_conf = self._conf("""
[storage-policy:0]
name = bad-policy
policy_type = erasure_coding
ec_type = isa_l_rs_vand
ec_num_data_fragments = 10
ec_num_parity_fragments = 5
deprecated = true
[storage-policy:1]
name = good-policy
policy_type = erasure_coding
ec_type = isa_l_rs_cauchy
ec_num_data_fragments = 10
ec_num_parity_fragments = 5
default = true
""")
with capture_logging('swift.common.storage_policy') as records:
parse_storage_policies(slightly_less_bad_conf)
self.assertEqual(2, mock_driver.call_count)
mock_driver.reset_mock()
self.assertEqual([r.levelname for r in records],
['WARNING'])
for msg in ('known to harm data durability',
'Any data in this policy should be migrated',
'https://bugs.launchpad.net/swift/+bug/1639691'):
self.assertIn(msg, records[0].msg)
def test_no_default(self):
orig_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = one
default = yes
""")
policies = parse_storage_policies(orig_conf)
self.assertEqual(policies.default, policies[1])
self.assertTrue(policies[0].name, 'Policy-0')
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = one
deprecated = yes
""")
# multiple polices and no explicit default
self.assertRaisesWithMessage(
PolicyError, "Unable to find default",
parse_storage_policies, bad_conf)
good_conf = self._conf("""
[storage-policy:0]
name = Policy-0
default = yes
[storage-policy:1]
name = one
deprecated = yes
""")
policies = parse_storage_policies(good_conf)
self.assertEqual(policies.default, policies[0])
self.assertTrue(policies[1].is_deprecated, True)
def test_parse_storage_policies(self):
# ValueError when deprecating policy 0
bad_conf = self._conf("""
[storage-policy:0]
name = zero
deprecated = yes
[storage-policy:1]
name = one
deprecated = yes
""")
self.assertRaisesWithMessage(
PolicyError, "Unable to find policy that's not deprecated",
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:-1]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:x]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:x-1]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:x]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:x:1]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:1]
name = zero
boo = berries
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid option',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:0]
name =
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:3]
name = Policy-0
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:1]
name = policY-0
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:0]
name = one
[storage-policy:1]
name = ONE
""")
self.assertRaisesWithMessage(PolicyError, 'Duplicate name',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:0]
name = good_stuff
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
parse_storage_policies, bad_conf)
# policy_type = erasure_coding
# missing ec_type, ec_num_data_fragments and ec_num_parity_fragments
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
""")
self.assertRaisesWithMessage(PolicyError, 'Missing ec_type',
parse_storage_policies, bad_conf)
# missing ec_type, but other options valid...
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
""")
self.assertRaisesWithMessage(PolicyError, 'Missing ec_type',
parse_storage_policies, bad_conf)
# ec_type specified, but invalid...
bad_conf = self._conf("""
[storage-policy:0]
name = zero
default = yes
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_type = garbage_alg
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
""")
self.assertRaisesWithMessage(PolicyError,
'Wrong ec_type garbage_alg for policy '
'ec10-4, should be one of "%s"' %
(', '.join(VALID_EC_TYPES)),
parse_storage_policies, bad_conf)
# missing and invalid ec_num_parity_fragments
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = 10
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'Invalid ec_num_parity_fragments',
parse_storage_policies, bad_conf)
for num_parity in ('-4', '0', 'x'):
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = 10
ec_num_parity_fragments = %(num_parity)s
""" % {'ec_type': DEFAULT_TEST_EC_TYPE,
'num_parity': num_parity})
self.assertRaisesWithMessage(PolicyError,
'Invalid ec_num_parity_fragments',
parse_storage_policies, bad_conf)
# missing and invalid ec_num_data_fragments
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_parity_fragments = 4
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'Invalid ec_num_data_fragments',
parse_storage_policies, bad_conf)
for num_data in ('-10', '0', 'x'):
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = %(num_data)s
ec_num_parity_fragments = 4
""" % {'num_data': num_data, 'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'Invalid ec_num_data_fragments',
parse_storage_policies, bad_conf)
# invalid ec_object_segment_size
for segment_size in ('-4', '0', 'x'):
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_object_segment_size = %(segment_size)s
ec_type = %(ec_type)s
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
""" % {'segment_size': segment_size,
'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'Invalid ec_object_segment_size',
parse_storage_policies, bad_conf)
# Additional section added to ensure parser ignores other sections
conf = self._conf("""
[some-other-section]
foo = bar
[storage-policy:0]
name = zero
[storage-policy:5]
name = one
default = yes
[storage-policy:6]
name = duplicate-sections-are-ignored
[storage-policy:6]
name = apple
""")
policies = parse_storage_policies(conf)
self.assertEqual(True, policies.get_by_index(5).is_default)
self.assertEqual(False, policies.get_by_index(0).is_default)
self.assertEqual(False, policies.get_by_index(6).is_default)
self.assertEqual("object", policies.get_by_name("zero").ring_name)
self.assertEqual("object-5", policies.get_by_name("one").ring_name)
self.assertEqual("object-6", policies.get_by_name("apple").ring_name)
self.assertEqual(0, int(policies.get_by_name('zero')))
self.assertEqual(5, int(policies.get_by_name('one')))
self.assertEqual(6, int(policies.get_by_name('apple')))
self.assertEqual("zero", policies.get_by_index(0).name)
self.assertEqual("zero", policies.get_by_index("0").name)
self.assertEqual("one", policies.get_by_index(5).name)
self.assertEqual("apple", policies.get_by_index(6).name)
self.assertEqual("zero", policies.get_by_index(None).name)
self.assertEqual("zero", policies.get_by_index('').name)
self.assertEqual(policies.get_by_index(0), policies.legacy)
def test_reload_invalid_storage_policies(self):
conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:00]
name = double-zero
""")
with NamedTemporaryFile() as f:
conf.write(f)
f.flush()
with mock.patch('swift.common.utils.SWIFT_CONF_FILE',
new=f.name):
try:
reload_storage_policies()
except SystemExit as e:
err_msg = str(e)
else:
self.fail('SystemExit not raised')
parts = [
'Invalid Storage Policy Configuration',
'Duplicate index',
]
for expected in parts:
self.assertTrue(
expected in err_msg, '%s was not in %s' % (expected,
err_msg))
def test_storage_policy_ordering(self):
test_policies = StoragePolicyCollection([
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(503, 'error'),
StoragePolicy(204, 'empty'),
StoragePolicy(404, 'missing'),
])
self.assertEqual([0, 204, 404, 503], [int(p) for p in
sorted(list(test_policies))])
p503 = test_policies[503]
self.assertTrue(501 < p503 < 507)
def test_get_object_ring(self):
test_policies = [StoragePolicy(0, 'aay', True),
StoragePolicy(1, 'bee', False),
StoragePolicy(2, 'cee', False)]
policies = StoragePolicyCollection(test_policies)
class NamedFakeRing(FakeRing):
def __init__(self, swift_dir, ring_name=None):
self.ring_name = ring_name
super(NamedFakeRing, self).__init__()
with mock.patch('swift.common.storage_policy.Ring',
new=NamedFakeRing):
for policy in policies:
self.assertFalse(policy.object_ring)
ring = policies.get_object_ring(int(policy), '/path/not/used')
self.assertEqual(ring.ring_name, policy.ring_name)
self.assertTrue(policy.object_ring)
self.assertTrue(isinstance(policy.object_ring, NamedFakeRing))
def blow_up(*args, **kwargs):
raise Exception('kaboom!')
with mock.patch('swift.common.storage_policy.Ring', new=blow_up):
for policy in policies:
policy.load_ring('/path/not/used')
expected = policies.get_object_ring(int(policy),
'/path/not/used')
self.assertEqual(policy.object_ring, expected)
# bad policy index
self.assertRaises(PolicyError, policies.get_object_ring, 99,
'/path/not/used')
def test_bind_ports_cache(self):
test_policies = [StoragePolicy(0, 'aay', True),
StoragePolicy(1, 'bee', False),
StoragePolicy(2, 'cee', False)]
my_ips = ['1.2.3.4', '2.3.4.5']
other_ips = ['3.4.5.6', '4.5.6.7']
bind_ip = my_ips[1]
devs_by_ring_name1 = {
'object': [ # 'aay'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0],
'port': 6006},
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[0],
'port': 6007},
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
'port': 6008},
None,
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6009}],
'object-1': [ # 'bee'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
'port': 6006}, # dupe
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[0],
'port': 6010},
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
'port': 6011},
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6012}],
'object-2': [ # 'cee'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0],
'port': 6010}, # on our IP and a not-us IP
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[0],
'port': 6013},
None,
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
'port': 6014},
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6015}],
}
devs_by_ring_name2 = {
'object': [ # 'aay'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0],
'port': 6016},
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6019}],
'object-1': [ # 'bee'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
'port': 6016}, # dupe
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6022}],
'object-2': [ # 'cee'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0],
'port': 6020},
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6025}],
}
ring_files = [ring_name + '.ring.gz'
for ring_name in sorted(devs_by_ring_name1)]
def _fake_load(gz_path, stub_objs, metadata_only=False):
return RingData(
devs=stub_objs[os.path.basename(gz_path)[:-8]],
replica2part2dev_id=[],
part_shift=24)
with mock.patch(
'swift.common.storage_policy.RingData.load'
) as mock_ld, \
patch_policies(test_policies), \
mock.patch('swift.common.storage_policy.whataremyips') \
as mock_whataremyips, \
temptree(ring_files) as tempdir:
mock_whataremyips.return_value = my_ips
cache = BindPortsCache(tempdir, bind_ip)
self.assertEqual([
mock.call(bind_ip),
], mock_whataremyips.mock_calls)
mock_whataremyips.reset_mock()
mock_ld.side_effect = partial(_fake_load,
stub_objs=devs_by_ring_name1)
self.assertEqual(set([
6006, 6008, 6011, 6010, 6014,
]), cache.all_bind_ports_for_node())
self.assertEqual([
mock.call(os.path.join(tempdir, ring_files[0]),
metadata_only=True),
mock.call(os.path.join(tempdir, ring_files[1]),
metadata_only=True),
mock.call(os.path.join(tempdir, ring_files[2]),
metadata_only=True),
], mock_ld.mock_calls)
mock_ld.reset_mock()
mock_ld.side_effect = partial(_fake_load,
stub_objs=devs_by_ring_name2)
self.assertEqual(set([
6006, 6008, 6011, 6010, 6014,
]), cache.all_bind_ports_for_node())
self.assertEqual([], mock_ld.mock_calls)
# but when all the file mtimes are made different, it'll
# reload
for gz_file in [os.path.join(tempdir, n)
for n in ring_files]:
os.utime(gz_file, (88, 88))
self.assertEqual(set([
6016, 6020,
]), cache.all_bind_ports_for_node())
self.assertEqual([
mock.call(os.path.join(tempdir, ring_files[0]),
metadata_only=True),
mock.call(os.path.join(tempdir, ring_files[1]),
metadata_only=True),
mock.call(os.path.join(tempdir, ring_files[2]),
metadata_only=True),
], mock_ld.mock_calls)
mock_ld.reset_mock()
# Don't do something stupid like crash if a ring file is missing.
os.unlink(os.path.join(tempdir, 'object-2.ring.gz'))
self.assertEqual(set([
6016, 6020,
]), cache.all_bind_ports_for_node())
self.assertEqual([], mock_ld.mock_calls)
# whataremyips() is only called in the constructor
self.assertEqual([], mock_whataremyips.mock_calls)
def test_singleton_passthrough(self):
test_policies = [StoragePolicy(0, 'aay', True),
StoragePolicy(1, 'bee', False),
StoragePolicy(2, 'cee', False)]
with patch_policies(test_policies):
for policy in POLICIES:
self.assertEqual(POLICIES[int(policy)], policy)
def test_quorum_size_replication(self):
expected_sizes = {1: 1,
2: 1,
3: 2,
4: 2,
5: 3}
for n, expected in expected_sizes.items():
policy = StoragePolicy(0, 'zero',
object_ring=FakeRing(replicas=n))
self.assertEqual(policy.quorum, expected)
def test_quorum_size_erasure_coding(self):
test_ec_policies = [
ECStoragePolicy(10, 'ec8-2', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2),
ECStoragePolicy(11, 'df10-6', ec_type='flat_xor_hd_4',
ec_ndata=10, ec_nparity=6),
ECStoragePolicy(12, 'ec4-2-dup', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2, ec_duplication_factor=2),
]
for ec_policy in test_ec_policies:
k = ec_policy.ec_ndata
expected_size = (
(k + ec_policy.pyeclib_driver.min_parity_fragments_needed())
* ec_policy.ec_duplication_factor
)
self.assertEqual(expected_size, ec_policy.quorum)
def test_validate_ring(self):
test_policies = [
ECStoragePolicy(0, 'ec8-2', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2,
is_default=True),
ECStoragePolicy(1, 'ec10-4', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4),
ECStoragePolicy(2, 'ec4-2', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2),
ECStoragePolicy(3, 'ec4-2-2dup', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2,
ec_duplication_factor=2)
]
actual_load_ring_replicas = [8, 10, 7, 11]
policies = StoragePolicyCollection(test_policies)
class MockRingData(object):
def __init__(self, num_replica):
self._replica2part2dev_id = [0] * num_replica
for policy, ring_replicas in zip(policies, actual_load_ring_replicas):
with mock.patch('swift.common.ring.ring.RingData.load',
return_value=MockRingData(ring_replicas)):
necessary_replica_num = \
policy.ec_n_unique_fragments * policy.ec_duplication_factor
with mock.patch(
'swift.common.ring.ring.validate_configuration'):
msg = 'EC ring for policy %s needs to be configured with ' \
'exactly %d replicas.' % \
(policy.name, necessary_replica_num)
self.assertRaisesWithMessage(RingLoadError, msg,
policy.load_ring, 'mock')
def test_storage_policy_get_info(self):
test_policies = [
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one', is_deprecated=True,
aliases='tahi, uno'),
ECStoragePolicy(10, 'ten',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3),
ECStoragePolicy(11, 'done', is_deprecated=True,
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3),
]
policies = StoragePolicyCollection(test_policies)
expected = {
# default replication
(0, True): {
'name': 'zero',
'aliases': 'zero',
'default': True,
'deprecated': False,
'policy_type': REPL_POLICY
},
(0, False): {
'name': 'zero',
'aliases': 'zero',
'default': True,
},
# deprecated replication
(1, True): {
'name': 'one',
'aliases': 'one, tahi, uno',
'default': False,
'deprecated': True,
'policy_type': REPL_POLICY
},
(1, False): {
'name': 'one',
'aliases': 'one, tahi, uno',
'deprecated': True,
},
# enabled ec
(10, True): {
'name': 'ten',
'aliases': 'ten',
'default': False,
'deprecated': False,
'policy_type': EC_POLICY,
'ec_type': DEFAULT_TEST_EC_TYPE,
'ec_num_data_fragments': 10,
'ec_num_parity_fragments': 3,
'ec_object_segment_size': DEFAULT_EC_OBJECT_SEGMENT_SIZE,
'ec_duplication_factor': 1,
},
(10, False): {
'name': 'ten',
'aliases': 'ten',
},
# deprecated ec
(11, True): {
'name': 'done',
'aliases': 'done',
'default': False,
'deprecated': True,
'policy_type': EC_POLICY,
'ec_type': DEFAULT_TEST_EC_TYPE,
'ec_num_data_fragments': 10,
'ec_num_parity_fragments': 3,
'ec_object_segment_size': DEFAULT_EC_OBJECT_SEGMENT_SIZE,
'ec_duplication_factor': 1,
},
(11, False): {
'name': 'done',
'aliases': 'done',
'deprecated': True,
},
# enabled ec with ec_duplication
(12, True): {
'name': 'twelve',
'aliases': 'twelve',
'default': False,
'deprecated': False,
'policy_type': EC_POLICY,
'ec_type': DEFAULT_TEST_EC_TYPE,
'ec_num_data_fragments': 10,
'ec_num_parity_fragments': 3,
'ec_object_segment_size': DEFAULT_EC_OBJECT_SEGMENT_SIZE,
'ec_duplication_factor': 2,
},
(12, False): {
'name': 'twelve',
'aliases': 'twelve',
},
}
self.maxDiff = None
for policy in policies:
expected_info = expected[(int(policy), True)]
self.assertEqual(policy.get_info(config=True), expected_info)
expected_info = expected[(int(policy), False)]
self.assertEqual(policy.get_info(config=False), expected_info)
def test_ec_fragment_size_cached(self):
policy = ECStoragePolicy(
0, 'ec2-1', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=2, ec_nparity=1, object_ring=FakeRing(replicas=3),
ec_segment_size=DEFAULT_EC_OBJECT_SEGMENT_SIZE, is_default=True)
ec_driver = ECDriver(ec_type=DEFAULT_TEST_EC_TYPE,
k=2, m=1)
expected_fragment_size = ec_driver.get_segment_info(
DEFAULT_EC_OBJECT_SEGMENT_SIZE,
DEFAULT_EC_OBJECT_SEGMENT_SIZE)['fragment_size']
with mock.patch.object(
policy.pyeclib_driver, 'get_segment_info') as fake:
fake.return_value = {
'fragment_size': expected_fragment_size}
for x in range(10):
self.assertEqual(expected_fragment_size,
policy.fragment_size)
# pyeclib_driver.get_segment_info is called only once
self.assertEqual(1, fake.call_count)
if __name__ == '__main__':
unittest.main()
|
dmcelectrico/Subtitle-Toolbox | refs/heads/master | sites/subtitulos_es.py | 1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import utils
import iso6392
import re
from classes.Entities import *
from bs4 import BeautifulSoup
URL = "http://www.subtitulos.es/"
def get_all_subtitles(series,season,episode):
series_url_name = __translate_series_name(series)
subtitles_url = URL+series_url_name+"/"+str(season)+"x"+str(episode)
soup = utils.get_soup_from_URL(subtitles_url,debug=False)
if soup==None:
return None;
releases = soup.find_all(id="version")
subtitle_releases = []
for release in releases:
version = __get_release(release)
subtitles = __get_subtitles_from_release(release)
subtitle_releases.append(subtitles)
return subtitle_releases
def translate_language_to_iso6392(language):
if language=="Español (España)" or language=="Español" :
return iso6392.get_iso6392("Spanish; Castilian")
elif language=="Català":
return iso6392.get_iso6392("Catalan; Valencian")
elif language=="Español (Latinoamérica)":
return iso6392.get_iso6392("Latin")
elif language=="English":
return iso6392.get_iso6392("English")
elif language=="English":
return iso6392.get_iso6392("English")
elif language=="Galego":
return iso6392.get_iso6392("Galician")
def get_episode_name(series,season,episode):
series_url_name = __translate_series_name(series)
subtitles_url = URL+series_url_name+"/"+str(season)+"x"+str(episode)
soup = utils.get_soup_from_URL(subtitles_url,debug=False)
if soup is None:
return None
cabecera_subtitulo = soup.find(id="cabecera-subtitulo")
nombre = re.search('(?<=x[0-9][0-9] - ).+',cabecera_subtitulo.string)
if nombre!= None:
return nombre.group(0)
else:
return ""
#########################
# Internal methods #
#########################
def __get_release(release_tag):
for s in release_tag.p.strings:
if "versión" in s.lower():
tmp = s.split(" ")[1:-2]
" ".join(tmp)
return " ".join(tmp)
def __get_subtitles_from_release(release_tag):
#subtitles = {}
subtitles = SubtitleRelease(__get_release(release_tag))
for language in release_tag.find_all(class_="li-idioma"):
raw_href = language.parent.next_sibling.next_sibling
if(__get_class_as_string(raw_href['class'])!="descargar green"):
continue
subtitle = Subtitle(translate_language_to_iso6392(language.strong.string),__get_most_updated_subtitle(raw_href))
subtitles.add(subtitle)
#subtitles[language.strong.string]=__get_most_updated_subtitle(raw_href)
return subtitles
# Utils #
def __get_class_as_string(class_set):
ret = ""
for e in class_set:
ret+= e +" "
return ret.strip()
def __get_most_updated_subtitle(raw_href):
a = raw_href.find_all("a")
[s for s in a if "updated" in s['href']]
return s['href'] #Somewhat if there's no updated one, it will return the only available.
def __translate_series_name(name):
name = name.replace(" ","-")
if(show_name.has_key(name.lower())):
name = show_name[name.lower()]
return name
## SHOW NAME CORRECTIONS ##
show_name = {'house-of-cards-2013' : 'House-of-Cards-(2013)',
'transporter-the-series' : 'Transporter:-The-Series',
'faking-it-2014' : 'Faking-It'
} |
AICP/external_chromium_org | refs/heads/lp5.0 | chrome/common/extensions/docs/server2/cron_servlet.py | 7 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import posixpath
import traceback
from app_yaml_helper import AppYamlHelper
from appengine_wrappers import IsDeadlineExceededError, logservice
from branch_utility import BranchUtility
from compiled_file_system import CompiledFileSystem
from data_source_registry import CreateDataSources
from environment import GetAppVersion, IsDevServer
from extensions_paths import EXAMPLES, PUBLIC_TEMPLATES, STATIC_DOCS
from file_system_util import CreateURLsFromPaths
from future import Future
from gcs_file_system_provider import CloudStorageFileSystemProvider
from github_file_system_provider import GithubFileSystemProvider
from host_file_system_provider import HostFileSystemProvider
from object_store_creator import ObjectStoreCreator
from render_servlet import RenderServlet
from server_instance import ServerInstance
from servlet import Servlet, Request, Response
from special_paths import SITE_VERIFICATION_FILE
from timer import Timer, TimerClosure
class _SingletonRenderServletDelegate(RenderServlet.Delegate):
def __init__(self, server_instance):
self._server_instance = server_instance
def CreateServerInstance(self):
return self._server_instance
class _CronLogger(object):
'''Wraps the logging.* methods to prefix them with 'cron' and flush
immediately. The flushing is important because often these cron runs time
out and we lose the logs.
'''
def info(self, msg, *args): self._log(logging.info, msg, args)
def warning(self, msg, *args): self._log(logging.warning, msg, args)
def error(self, msg, *args): self._log(logging.error, msg, args)
def _log(self, logfn, msg, args):
try:
logfn('cron: %s' % msg, *args)
finally:
logservice.flush()
_cronlog = _CronLogger()
def _RequestEachItem(title, items, request_callback):
'''Runs a task |request_callback| named |title| for each item in |items|.
|request_callback| must take an item and return a servlet response.
Returns true if every item was successfully run, false if any return a
non-200 response or raise an exception.
'''
_cronlog.info('%s: starting', title)
success_count, failure_count = 0, 0
timer = Timer()
try:
for i, item in enumerate(items):
def error_message(detail):
return '%s: error rendering %s (%s of %s): %s' % (
title, item, i + 1, len(items), detail)
try:
response = request_callback(item)
if response.status == 200:
success_count += 1
else:
_cronlog.error(error_message('response status %s' % response.status))
failure_count += 1
except Exception as e:
_cronlog.error(error_message(traceback.format_exc()))
failure_count += 1
if IsDeadlineExceededError(e): raise
finally:
_cronlog.info('%s: rendered %s of %s with %s failures in %s',
title, success_count, len(items), failure_count,
timer.Stop().FormatElapsed())
return success_count == len(items)
class CronServlet(Servlet):
'''Servlet which runs a cron job.
'''
def __init__(self, request, delegate_for_test=None):
Servlet.__init__(self, request)
self._delegate = delegate_for_test or CronServlet.Delegate()
class Delegate(object):
'''CronServlet's runtime dependencies. Override for testing.
'''
def CreateBranchUtility(self, object_store_creator):
return BranchUtility.Create(object_store_creator)
def CreateHostFileSystemProvider(self,
object_store_creator,
max_trunk_revision=None):
return HostFileSystemProvider(object_store_creator,
max_trunk_revision=max_trunk_revision)
def CreateGithubFileSystemProvider(self, object_store_creator):
return GithubFileSystemProvider(object_store_creator)
def CreateGCSFileSystemProvider(self, object_store_creator):
return CloudStorageFileSystemProvider(object_store_creator)
def GetAppVersion(self):
return GetAppVersion()
def Get(self):
# Crons often time out, and if they do we need to make sure to flush the
# logs before the process gets killed (Python gives us a couple of
# seconds).
#
# So, manually flush logs at the end of the cron run. However, sometimes
# even that isn't enough, which is why in this file we use _cronlog and
# make it flush the log every time its used.
logservice.AUTOFLUSH_ENABLED = False
try:
return self._GetImpl()
except BaseException:
_cronlog.error('Caught top-level exception! %s', traceback.format_exc())
finally:
logservice.flush()
def _GetImpl(self):
# Cron strategy:
#
# Find all public template files and static files, and render them. Most of
# the time these won't have changed since the last cron run, so it's a
# little wasteful, but hopefully rendering is really fast (if it isn't we
# have a problem).
_cronlog.info('starting')
# This is returned every time RenderServlet wants to create a new
# ServerInstance.
#
# TODO(kalman): IMPORTANT. This sometimes throws an exception, breaking
# everything. Need retry logic at the fetcher level.
server_instance = self._GetSafeServerInstance()
trunk_fs = server_instance.host_file_system_provider.GetTrunk()
def render(path):
request = Request(path, self._request.host, self._request.headers)
delegate = _SingletonRenderServletDelegate(server_instance)
return RenderServlet(request, delegate).Get()
def request_files_in_dir(path, prefix='', strip_ext=None):
'''Requests every file found under |path| in this host file system, with
a request prefix of |prefix|. |strip_ext| is an optional list of file
extensions that should be stripped from paths before requesting.
'''
def maybe_strip_ext(name):
if name == SITE_VERIFICATION_FILE or not strip_ext:
return name
base, ext = posixpath.splitext(name)
return base if ext in strip_ext else name
files = [maybe_strip_ext(name)
for name, _ in CreateURLsFromPaths(trunk_fs, path, prefix)]
return _RequestEachItem(path, files, render)
results = []
try:
# Start running the hand-written Cron methods first; they can be run in
# parallel. They are resolved at the end.
def run_cron_for_future(target):
title = target.__class__.__name__
future, init_timer = TimerClosure(target.Cron)
assert isinstance(future, Future), (
'%s.Cron() did not return a Future' % title)
def resolve():
resolve_timer = Timer()
try:
future.Get()
except Exception as e:
_cronlog.error('%s: error %s' % (title, traceback.format_exc()))
results.append(False)
if IsDeadlineExceededError(e): raise
finally:
resolve_timer.Stop()
_cronlog.info('%s took %s: %s to initialize and %s to resolve' %
(title,
init_timer.With(resolve_timer).FormatElapsed(),
init_timer.FormatElapsed(),
resolve_timer.FormatElapsed()))
return Future(callback=resolve)
targets = (CreateDataSources(server_instance).values() +
[server_instance.content_providers,
server_instance.api_models])
title = 'initializing %s parallel Cron targets' % len(targets)
_cronlog.info(title)
timer = Timer()
try:
cron_futures = [run_cron_for_future(target) for target in targets]
finally:
_cronlog.info('%s took %s' % (title, timer.Stop().FormatElapsed()))
# Samples are too expensive to run on the dev server, where there is no
# parallel fetch.
#
# XXX(kalman): Currently samples are *always* too expensive to fetch, so
# disabling them for now. It won't break anything so long as we're still
# not enforcing that everything gets cached for normal instances.
if False: # should be "not IsDevServer()":
# Fetch each individual sample file.
results.append(request_files_in_dir(EXAMPLES,
prefix='extensions/examples'))
# Resolve the hand-written Cron method futures.
title = 'resolving %s parallel Cron targets' % len(targets)
_cronlog.info(title)
timer = Timer()
try:
for future in cron_futures:
future.Get()
finally:
_cronlog.info('%s took %s' % (title, timer.Stop().FormatElapsed()))
except:
results.append(False)
# This should never actually happen (each cron step does its own
# conservative error checking), so re-raise no matter what it is.
_cronlog.error('uncaught error: %s' % traceback.format_exc())
raise
finally:
success = all(results)
_cronlog.info('finished (%s)', 'success' if success else 'FAILED')
return (Response.Ok('Success') if success else
Response.InternalError('Failure'))
def _GetSafeServerInstance(self):
'''Returns a ServerInstance with a host file system at a safe revision,
meaning the last revision that the current running version of the server
existed.
'''
delegate = self._delegate
# IMPORTANT: Get a ServerInstance pinned to the most recent revision, not
# HEAD. These cron jobs take a while and run very frequently such that
# there is usually one running at any given time, and eventually a file
# that we're dealing with will change underneath it, putting the server in
# an undefined state.
server_instance_near_head = self._CreateServerInstance(
self._GetMostRecentRevision())
app_yaml_handler = AppYamlHelper(
server_instance_near_head.object_store_creator,
server_instance_near_head.host_file_system_provider)
if app_yaml_handler.IsUpToDate(delegate.GetAppVersion()):
return server_instance_near_head
# The version in app.yaml is greater than the currently running app's.
# The safe version is the one before it changed.
safe_revision = app_yaml_handler.GetFirstRevisionGreaterThan(
delegate.GetAppVersion()) - 1
_cronlog.info('app version %s is out of date, safe is %s',
delegate.GetAppVersion(), safe_revision)
return self._CreateServerInstance(safe_revision)
def _GetMostRecentRevision(self):
'''Gets the revision of the most recent patch submitted to the host file
system. This is similar to HEAD but it's a concrete revision so won't
change as the cron runs.
'''
head_fs = (
self._CreateServerInstance(None).host_file_system_provider.GetTrunk())
return head_fs.Stat('').version
def _CreateServerInstance(self, revision):
'''Creates a ServerInstance pinned to |revision|, or HEAD if None.
NOTE: If passed None it's likely that during the cron run patches will be
submitted at HEAD, which may change data underneath the cron run.
'''
object_store_creator = ObjectStoreCreator(start_empty=True)
branch_utility = self._delegate.CreateBranchUtility(object_store_creator)
host_file_system_provider = self._delegate.CreateHostFileSystemProvider(
object_store_creator, max_trunk_revision=revision)
github_file_system_provider = self._delegate.CreateGithubFileSystemProvider(
object_store_creator)
gcs_file_system_provider = self._delegate.CreateGCSFileSystemProvider(
object_store_creator)
return ServerInstance(object_store_creator,
CompiledFileSystem.Factory(object_store_creator),
branch_utility,
host_file_system_provider,
github_file_system_provider,
gcs_file_system_provider)
|
zmap/ztag | refs/heads/master | ztag/annotations/netgear.py | 1 | from ztag.annotation import *
class NetGearSmartSwitch(Annotation):
protocol = protocols.HTTP
subprotocol = protocols.HTTP.GET
port = None
tests = {
"netgear_smart_switch":{
"global_metadata":{
"manufacturer":Manufacturer.NETGEAR,
"device_type":Type.SWITCH,
"product":"Smart Switch",
},
"tags":["embedded",]
}
}
def process(self, obj, meta):
if obj["title"] == "NETGEAR Web Smart Switch":
meta.global_metadata.manufacturer = Manufacturer.NETGEAR
meta.global_metadata.product = "Smart Switch"
meta.global_metadata.device_type = Type.SWITCH
meta.tags.add("embedded")
return meta
if obj["title"] == "Netgear Prosafe Plus Switch":
meta.global_metadata.manufacturer = Manufacturer.NETGEAR
meta.global_metadata.product = "Prosafe Plus Switch"
meta.global_metadata.device_type = Type.SWITCH
meta.tags.add("embedded")
return meta
class NetGearLabeledSwitches(Annotation):
protocol = protocols.HTTP
subprotocol = protocols.HTTP.GET
port = None
VALID_MODELS = set([
"GS724T",
"GS748Tv5",
"GS108T"
])
def process(self, obj, meta):
if obj["title"].lower().startswith("netgear") and " " in obj["title"]:
m = obj["title"].split(" ")[1].strip()
if m in self.VALID_MODELS:
meta.global_metadata.manufacturer = Manufacturer.NETGEAR
meta.global_metadata.product = m
meta.global_metadata.device_type = Type.SWITCH
meta.tags.add("embedded")
return meta
class NetGearProsafe(Annotation):
protocol = protocols.HTTPS
subprotocol = protocols.HTTPS.TLS
port = None
tests = {
"netgear_prosafe":{
"global_metadata":{
"manufacturer":Manufacturer.NETGEAR,
"device_type":Type.FIREWALL,
"product":"Prosafe VPN Firewall",
},
"tags":["embedded",]
}
}
def process(self, obj, meta):
cn = obj["certificate"]["parsed"]["subject"]["common_name"][0].strip()
if cn == "Netgear VPN Firewall":
meta.global_metadata.manufacturer = Manufacturer.NETGEAR
meta.global_metadata.product = "Prosafe VPN Firewall"
meta.global_metadata.device_type = Type.FIREWALL
meta.tags.add("embedded")
return meta
|
DukeOfHazard/crits | refs/heads/master | crits/comments/api.py | 21 | from tastypie import authorization
from tastypie.authentication import MultiAuthentication
from crits.comments.comment import Comment
from crits.comments.handlers import comment_add
from crits.core.api import CRITsApiKeyAuthentication, CRITsSessionAuthentication
from crits.core.api import CRITsSerializer, CRITsAPIResource
class CommentResource(CRITsAPIResource):
"""
Comment API Resource Class.
"""
class Meta:
object_class = Comment
allowed_methods = ('post')
resource_name = "comments"
authentication = MultiAuthentication(CRITsApiKeyAuthentication(),
CRITsSessionAuthentication())
authorization = authorization.Authorization()
serializer = CRITsSerializer()
def get_object_list(self, request):
"""
Use the CRITsAPIResource to get our objects but provide the class to get
the objects from.
:param request: The incoming request.
:type request: :class:`django.http.HttpRequest`
:returns: Resulting objects in the specified format (JSON by default).
"""
return super(CommentResource, self).get_object_list(request, Comment)
def obj_create(self, bundle, **kwargs):
"""
Handles creating Comments through the API.
:param bundle: Bundle containing the information to create the Comment.
:type bundle: Tastypie Bundle object.
:returns: HttpResponse.
"""
analyst = bundle.request.user.username
comment = bundle.data.get('comment', None)
obj_type = bundle.data.get('object_type', None)
obj_id = bundle.data.get('object_id', None)
content = {'return_code': 1,
'type': 'Comment',
'success': False}
if not obj_type:
content['message'] = 'Must provide an object type.'
self.crits_response(content)
if not obj_id:
content['message'] = 'Must provide an object id.'
self.crits_response(content)
if not comment:
content['message'] = 'Must provide a comment.'
self.crits_response(content)
data = {'comment': comment,
'object_type': obj_type,
'object_id': obj_id,
'url_key': obj_id}
retVal = comment_add(data, obj_type, obj_id, '', {}, analyst)
if "Comment added successfully!" in retVal.content:
content['success'] = True
content['return_code'] = 0
content['message'] = 'Comment added successfully!'
self.crits_response(content) |
FFMG/myoddweb.piger | refs/heads/master | monitor/api/python/Python-3.7.2/Lib/test/bisect.py | 3 | #!/usr/bin/env python3
"""
Command line tool to bisect failing CPython tests.
Find the test_os test method which alters the environment:
./python -m test.bisect --fail-env-changed test_os
Find a reference leak in "test_os", write the list of failing tests into the
"bisect" file:
./python -m test.bisect -o bisect -R 3:3 test_os
Load an existing list of tests from a file using -i option:
./python -m test --list-cases -m FileTests test_os > tests
./python -m test.bisect -i tests test_os
"""
import argparse
import datetime
import os.path
import math
import random
import subprocess
import sys
import tempfile
import time
def write_tests(filename, tests):
with open(filename, "w") as fp:
for name in tests:
print(name, file=fp)
fp.flush()
def write_output(filename, tests):
if not filename:
return
print("Writing %s tests into %s" % (len(tests), filename))
write_tests(filename, tests)
return filename
def format_shell_args(args):
return ' '.join(args)
def list_cases(args):
cmd = [sys.executable, '-m', 'test', '--list-cases']
cmd.extend(args.test_args)
proc = subprocess.run(cmd,
stdout=subprocess.PIPE,
universal_newlines=True)
exitcode = proc.returncode
if exitcode:
cmd = format_shell_args(cmd)
print("Failed to list tests: %s failed with exit code %s"
% (cmd, exitcode))
sys.exit(exitcode)
tests = proc.stdout.splitlines()
return tests
def run_tests(args, tests, huntrleaks=None):
tmp = tempfile.mktemp()
try:
write_tests(tmp, tests)
cmd = [sys.executable, '-m', 'test', '--matchfile', tmp]
cmd.extend(args.test_args)
print("+ %s" % format_shell_args(cmd))
proc = subprocess.run(cmd)
return proc.returncode
finally:
if os.path.exists(tmp):
os.unlink(tmp)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input',
help='Test names produced by --list-tests written '
'into a file. If not set, run --list-tests')
parser.add_argument('-o', '--output',
help='Result of the bisection')
parser.add_argument('-n', '--max-tests', type=int, default=1,
help='Maximum number of tests to stop the bisection '
'(default: 1)')
parser.add_argument('-N', '--max-iter', type=int, default=100,
help='Maximum number of bisection iterations '
'(default: 100)')
# FIXME: document that following arguments are test arguments
args, test_args = parser.parse_known_args()
args.test_args = test_args
return args
def main():
args = parse_args()
if args.input:
with open(args.input) as fp:
tests = [line.strip() for line in fp]
else:
tests = list_cases(args)
print("Start bisection with %s tests" % len(tests))
print("Test arguments: %s" % format_shell_args(args.test_args))
print("Bisection will stop when getting %s or less tests "
"(-n/--max-tests option), or after %s iterations "
"(-N/--max-iter option)"
% (args.max_tests, args.max_iter))
output = write_output(args.output, tests)
print()
start_time = time.monotonic()
iteration = 1
try:
while len(tests) > args.max_tests and iteration <= args.max_iter:
ntest = len(tests)
ntest = max(ntest // 2, 1)
subtests = random.sample(tests, ntest)
print("[+] Iteration %s: run %s tests/%s"
% (iteration, len(subtests), len(tests)))
print()
exitcode = run_tests(args, subtests)
print("ran %s tests/%s" % (ntest, len(tests)))
print("exit", exitcode)
if exitcode:
print("Tests failed: continuing with this subtest")
tests = subtests
output = write_output(args.output, tests)
else:
print("Tests succeeded: skipping this subtest, trying a new subset")
print()
iteration += 1
except KeyboardInterrupt:
print()
print("Bisection interrupted!")
print()
print("Tests (%s):" % len(tests))
for test in tests:
print("* %s" % test)
print()
if output:
print("Output written into %s" % output)
dt = math.ceil(time.monotonic() - start_time)
if len(tests) <= args.max_tests:
print("Bisection completed in %s iterations and %s"
% (iteration, datetime.timedelta(seconds=dt)))
sys.exit(1)
else:
print("Bisection failed after %s iterations and %s"
% (iteration, datetime.timedelta(seconds=dt)))
if __name__ == "__main__":
main()
|
elmerdpadilla/iv | refs/heads/8.0 | addons/mrp_operations/report/__init__.py | 443 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import mrp_wc_barcode
import mrp_code_barcode
import mrp_workorder_analysis
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
marc-sensenich/ansible | refs/heads/devel | lib/ansible/modules/network/f5/bigip_log_publisher.py | 14 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_log_publisher
short_description: Manages log publishers on a BIG-IP
description:
- Manages log publishers on a BIG-IP.
version_added: 2.6
options:
name:
description:
- Specifies the name of the log publisher.
required: True
description:
description:
- Specifies a description for the log publisher.
destinations:
description:
- Specifies log destinations for this log publisher to use.
partition:
description:
- Device partition to manage resources on.
default: Common
state:
description:
- When C(present), ensures that the resource exists.
- When C(absent), ensures the resource is removed.
default: present
choices:
- present
- absent
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create a log publisher for use in high speed loggins
bigip_log_publisher:
name: publisher1
destinations:
- hsl1
- security-log-servers-logging
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
description:
description: The new description of the resource.
returned: changed
type: str
sample: "Security log publisher"
destinations:
description: The new list of destinations for the resource.
returned: changed
type: list
sample: ['/Common/destination1', '/Common/destination2']
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.compare import cmp_simple_list
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.compare import cmp_simple_list
class Parameters(AnsibleF5Parameters):
api_map = {
}
api_attributes = [
'destinations',
'description',
]
returnables = [
'destinations',
'description',
]
updatables = [
'destinations',
'description',
]
class ApiParameters(Parameters):
@property
def destinations(self):
if self._values['destinations'] is None:
return None
results = []
for destination in self._values['destinations']:
result = fq_name(destination['partition'], destination['name'])
results.append(result)
results.sort()
return results
class ModuleParameters(Parameters):
@property
def destinations(self):
if self._values['destinations'] is None:
return None
if len(self._values['destinations']) == 1 and self._values['destinations'][0] == '':
return ''
result = [fq_name(self.partition, x) for x in self._values['destinations']]
result = list(set(result))
result.sort()
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def destinations(self):
result = cmp_simple_list(self.want.destinations, self.have.destinations)
return result
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def absent(self):
if self.exists():
return self.remove()
return False
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/sys/log-config/publisher/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/sys/log-config/publisher/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/log-config/publisher/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/sys/log-config/publisher/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['selfLink']
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/log-config/publisher/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
description=dict(),
destinations=dict(type='list'),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
|
abhi252/GloVeGraphs | refs/heads/master | Initial_Test/glove.py | 1 | #!/usr/bin/env python
from argparse import ArgumentParser
import codecs
from collections import Counter
import itertools
from functools import partial
import logging
from math import log
import os.path
import cPickle as pickle
from random import shuffle
import msgpack
import numpy as np
from scipy import sparse
from util import listify
logger = logging.getLogger("glove")
def parse_args():
parser = ArgumentParser(
description=('Build a GloVe vector-space model from the '
'provided corpus'))
g_cooccur = parser.add_argument_group('Cooccurrence tracking options')
g_cooccur.add_argument('--cooccur-path',
help=('Path to cooccurrence matrix file. If '
'this path exists, the matrix will be '
'loaded from the file. If it does not '
'exist, the matrix will be written to '
'this file.'))
g_cooccur.add_argument('--size', type=int,
help=('Number of nodes in network'))
g_cooccur.add_argument('--min-count', type=int, default=0,
help=('Discard cooccurrence pairs where at '
'least one of the words occurs fewer '
'than this many times in the training '
'corpus'))
g_glove = parser.add_argument_group('GloVe options')
g_glove.add_argument('--vector-path',
help=('Path to which to save computed word '
'vectors'))
g_glove.add_argument('-s', '--vector-size', type=int, default=100,
help=('Dimensionality of output word vectors'))
g_glove.add_argument('--iterations', type=int, default=25,
help='Number of training iterations')
g_glove.add_argument('--learning-rate', type=float, default=0.05,
help='Initial learning rate')
g_glove.add_argument('--save-often', action='store_true', default=True,
help=('Save vectors after every training '
'iteration'))
return parser.parse_args()
def get_cooccurence(path):
with open(path, 'rb') as obj_f:
obj = msgpack.load(obj_f, use_list=False, encoding='utf-8')
return obj
def get_or_build(path, build_fn, *args, **kwargs):
"""
Load from serialized form or build an object, saving the built
object.
Remaining arguments are provided to `build_fn`.
"""
save = False
obj = None
if path is not None and os.path.isfile(path):
with open(path, 'rb') as obj_f:
obj = msgpack.load(obj_f, use_list=False, encoding='utf-8')
else:
save = True
if obj is None:
obj = build_fn(*args, **kwargs)
if save and path is not None:
with open(path, 'wb') as obj_f:
msgpack.dump(obj, obj_f)
return obj
# def build_vocab(corpus):
# """
# Build a vocabulary with word frequencies for an entire corpus.
# Returns a dictionary `w -> (i, f)`, mapping word strings to pairs of
# word ID and word corpus frequency.
# """
# logger.info("Building vocab from corpus")
# vocab = Counter()
# for line in corpus:
# tokens = line.strip().split()
# vocab.update(tokens)
# logger.info("Done building vocab from corpus.")
# return {word: (i, freq) for i, (word, freq) in enumerate(vocab.iteritems())}
@listify
def build_cooccur(vocab, corpus, window_size=10, min_count=None):
"""
Build a word co-occurrence list for the given corpus.
This function is a tuple generator, where each element (representing
a cooccurrence pair) is of the form
(i_main, i_context, cooccurrence)
where `i_main` is the ID of the main word in the cooccurrence and
`i_context` is the ID of the context word, and `cooccurrence` is the
`X_{ij}` cooccurrence value as described in Pennington et al.
(2014).
If `min_count` is not `None`, cooccurrence pairs where either word
occurs in the corpus fewer than `min_count` times are ignored.
"""
vocab_size = len(vocab)
id2word = dict((i, word) for word, (i, _) in vocab.iteritems())
# Collect cooccurrences internally as a sparse matrix for passable
# indexing speed; we'll convert into a list later
cooccurrences = sparse.lil_matrix((vocab_size, vocab_size),
dtype=np.float64)
for i, line in enumerate(corpus):
if i % 1000 == 0:
logger.info("Building cooccurrence matrix: on line %i", i)
tokens = line.strip().split()
token_ids = [vocab[word][0] for word in tokens]
for center_i, center_id in enumerate(token_ids):
# Collect all word IDs in left window of center word
context_ids = token_ids[max(0, center_i - window_size) : center_i]
contexts_len = len(context_ids)
for left_i, left_id in enumerate(context_ids):
# Distance from center word
distance = contexts_len - left_i
# Weight by inverse of distance between words
increment = 1.0 / float(distance)
# Build co-occurrence matrix symmetrically (pretend we
# are calculating right contexts as well)
cooccurrences[center_id, left_id] += increment
cooccurrences[left_id, center_id] += increment
# Now yield our tuple sequence (dig into the LiL-matrix internals to
# quickly iterate through all nonzero cells)
for i, (row, data) in enumerate(itertools.izip(cooccurrences.rows,
cooccurrences.data)):
if min_count is not None and vocab[id2word[i]][1] < min_count:
continue
for data_idx, j in enumerate(row):
if min_count is not None and vocab[id2word[j]][1] < min_count:
continue
yield i, j, data[data_idx]
def run_iter(data, learning_rate=0.05, x_max=20, alpha=0.75):
"""
Run a single iteration of GloVe training using the given
cooccurrence data and the previously computed weight vectors /
biases and accompanying gradient histories.
`data` is a pre-fetched data / weights list where each element is of
the form
(v_main, v_context,
b_main, b_context,
gradsq_W_main, gradsq_W_context,
gradsq_b_main, gradsq_b_context,
cooccurrence)
as produced by the `train_glove` function. Each element in this
tuple is an `ndarray` view into the data structure which contains
it.
See the `train_glove` function for information on the shapes of `W`,
`biases`, `gradient_squared`, `gradient_squared_biases` and how they
should be initialized.
The parameters `x_max`, `alpha` define our weighting function when
computing the cost for two word pairs; see the GloVe paper for more
details.
Returns the cost associated with the given weight assignments and
updates the weights by online AdaGrad in place.
"""
global_cost = 0
# We want to iterate over data randomly so as not to unintentionally
# bias the word vector contents
shuffle(data)
#i = 0
for (v_main, v_context, b_main, b_context, gradsq_W_main, gradsq_W_context,
gradsq_b_main, gradsq_b_context, cooccurrence) in data:
weight = (cooccurrence / x_max) ** alpha if cooccurrence < x_max else 1
# Compute inner component of cost function, which is used in
# both overall cost calculation and in gradient calculation
#
# $$ J' = w_i^Tw_j + b_i + b_j - log(X_{ij}) $$
#print i,b_main,b_context
#i+=1
cost_inner = (v_main.dot(v_context)
+ b_main[0] + b_context[0]
- log(cooccurrence))
# Compute cost
#
# $$ J = f(X_{ij}) (J')^2 $$
cost = weight * (cost_inner ** 2)
# Add weighted cost to the global cost tracker
global_cost += 0.5 * cost
# Compute gradients for word vector terms.
#
# NB: `main_word` is only a view into `W` (not a copy), so our
# modifications here will affect the global weight matrix;
# likewise for context_word, biases, etc.
grad_main = weight * cost_inner * v_context
grad_context = weight * cost_inner * v_main
# Compute gradients for bias terms
grad_bias_main = weight * cost_inner
grad_bias_context = weight * cost_inner
# Now perform adaptive updates
v_main -= (learning_rate * grad_main / np.sqrt(gradsq_W_main))
v_context -= (learning_rate * grad_context / np.sqrt(gradsq_W_context))
b_main -= (learning_rate * grad_bias_main / np.sqrt(gradsq_b_main))
b_context -= (learning_rate * grad_bias_context / np.sqrt(
gradsq_b_context))
# Update squared gradient sums
gradsq_W_main += np.square(grad_main)
gradsq_W_context += np.square(grad_context)
gradsq_b_main += grad_bias_main ** 2
gradsq_b_context += grad_bias_context ** 2
return global_cost
def train_glove(nodes,cooccurrences, iter_callback=None, vector_size=100,
iterations=25, **kwargs):
"""
Train GloVe vectors on the given generator `cooccurrences`, where
each element is of the form
(word_i_id, word_j_id, x_ij)
where `x_ij` is a cooccurrence value $X_{ij}$ as presented in the
matrix defined by `build_cooccur` and the Pennington et al. (2014)
paper itself.
If `iter_callback` is not `None`, the provided function will be
called after each iteration with the learned `W` matrix so far.
Keyword arguments are passed on to the iteration step function
`run_iter`.
Returns the computed word vector matrix `W`.
"""
vocab_size = nodes
# Word vector matrix. This matrix is (2V) * d, where N is the size
# of the corpus vocabulary and d is the dimensionality of the word
# vectors. All elements are initialized randomly in the range (-0.5,
# 0.5]. We build two word vectors for each word: one for the word as
# the main (center) word and one for the word as a context word.
#
# It is up to the client to decide what to do with the resulting two
# vectors. Pennington et al. (2014) suggest adding or averaging the
# two for each word, or discarding the context vectors.
W = (np.random.rand(vocab_size * 2, vector_size) - 0.5) / float(vector_size + 1)
# Bias terms, each associated with a single vector. An array of size
# $2V$, initialized randomly in the range (-0.5, 0.5].
biases = (np.random.rand(vocab_size * 2) - 0.5) / float(vector_size + 1)
# Training is done via adaptive gradient descent (AdaGrad). To make
# this work we need to store the sum of squares of all previous
# gradients.
#
# Like `W`, this matrix is (2V) * d.
#
# Initialize all squared gradient sums to 1 so that our initial
# adaptive learning rate is simply the global learning rate.
gradient_squared = np.ones((vocab_size * 2, vector_size),
dtype=np.float64)
# Sum of squared gradients for the bias terms.
gradient_squared_biases = np.ones(vocab_size * 2, dtype=np.float64)
# Build a reusable list from the given cooccurrence generator,
# pre-fetching all necessary data.
#
# NB: These are all views into the actual data matrices, so updates
# to them will pass on to the real data structures
#
# (We even extract the single-element biases as slices so that we
# can use them as views)
data = [(W[i_main], W[i_context + vocab_size],
biases[i_main : i_main + 1],
biases[i_context + vocab_size : i_context + vocab_size + 1],
gradient_squared[i_main], gradient_squared[i_context + vocab_size],
gradient_squared_biases[i_main : i_main + 1],
gradient_squared_biases[i_context + vocab_size
: i_context + vocab_size + 1],
cooccurrence)
for i_main, i_context, cooccurrence in cooccurrences]
for i in range(iterations):
logger.info("\tBeginning iteration %i..", i)
cost = run_iter(data, **kwargs)
logger.info("\t\tDone - cost : %f" ,cost)
if iter_callback is not None:
iter_callback(W)
return W
def save_model(W, path):
with open(path, 'wb') as vector_f:
pickle.dump(W, vector_f, protocol=2)
logger.info("Saved vectors to %s", path)
def main(arguments):
#corpus = arguments.corpus
# logger.info("Fetching vocab..")
# vocab = get_or_build(arguments.vocab_path, build_vocab, corpus)
# logger.info("Vocab has %i elements.\n", len(vocab))
logger.info("Fetching cooccurrence list..")
#corpus.seek(0)
cooccurrences = get_cooccurence(arguments.cooccur_path)
logger.info("Cooccurrence list fetch complete (%i pairs).\n",
len(cooccurrences))
if arguments.save_often:
iter_callback = partial(save_model, path=arguments.vector_path)
else:
iter_callback = None
logger.info("Beginning GloVe training..")
W = train_glove(arguments.size,cooccurrences,
iter_callback=iter_callback,
vector_size=arguments.vector_size,
iterations=arguments.iterations,
learning_rate=arguments.learning_rate)
# TODO shave off bias values, do something with context vectors
save_model(W, arguments.vector_path)
print W
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s\t%(message)s")
main(parse_args())
|
IndonesiaX/edx-platform | refs/heads/master | common/djangoapps/performance/views/__init__.py | 51 | import datetime
import json
import logging
from django.http import HttpResponse
from track.utils import DateTimeJSONEncoder
perflog = logging.getLogger("perflog")
def _get_request_header(request, header_name, default=''):
"""Helper method to get header values from a request's META dict, if present."""
if request is not None and hasattr(request, 'META') and header_name in request.META:
return request.META[header_name]
else:
return default
def _get_request_value(request, value_name, default=''):
"""Helper method to get header values from a request's GET or POST dicts, if present."""
if request is not None and hasattr(request, 'GET') and value_name in request.GET:
return request.GET[value_name]
elif request is not None and hasattr(request, 'POST') and value_name in request.POST:
return request.POST[value_name]
else:
return default
def performance_log(request):
"""
Log when POST call to "performance" URL is made by a user.
Request should provide "event" and "page" arguments.
"""
event = {
"ip": _get_request_header(request, 'REMOTE_ADDR'),
"referer": _get_request_header(request, 'HTTP_REFERER'),
"accept_language": _get_request_header(request, 'HTTP_ACCEPT_LANGUAGE'),
"event_source": "browser",
"event": _get_request_value(request, 'event'),
"agent": _get_request_header(request, 'HTTP_USER_AGENT'),
"page": _get_request_value(request, 'page'),
"id": _get_request_value(request, 'id'),
"expgroup": _get_request_value(request, 'expgroup'),
"value": _get_request_value(request, 'value'),
"time": datetime.datetime.utcnow(),
"host": _get_request_header(request, 'SERVER_NAME'),
}
perflog.info(json.dumps(event, cls=DateTimeJSONEncoder))
return HttpResponse(status=204)
|
gangadharkadam/v6_frappe | refs/heads/develop | frappe/patches/v6_2/ignore_user_permissions_if_missing.py | 42 | from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doctype("System Settings")
system_settings = frappe.get_doc("System Settings")
system_settings.ignore_user_permissions_if_missing = 1
system_settings.flags.ignore_mandatory = 1
system_settings.save()
|
frappe/erpnext | refs/heads/develop | erpnext/education/doctype/fees/test_fees.py | 1 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from frappe.utils import nowdate
from frappe.utils.make_random import get_random
from erpnext.education.doctype.program.test_program import make_program_and_linked_courses
test_dependencies = ['Company']
class TestFees(unittest.TestCase):
def test_fees(self):
student = get_random("Student")
program = make_program_and_linked_courses("_Test Program 1", ["_Test Course 1", "_Test Course 2"])
fee = frappe.new_doc("Fees")
fee.posting_date = nowdate()
fee.due_date = nowdate()
fee.student = student
fee.receivable_account = "_Test Receivable - _TC"
fee.income_account = "Sales - _TC"
fee.cost_center = "_Test Cost Center - _TC"
fee.company = "_Test Company"
fee.program = program.name
fee.extend("components", [
{
"fees_category": "Tuition Fee",
"amount": 40000
},
{
"fees_category": "Transportation Fee",
"amount": 10000
}])
fee.save()
fee.submit()
gl_entries = frappe.db.sql("""
select account, posting_date, party_type, party, cost_center, fiscal_year, voucher_type,
voucher_no, against_voucher_type, against_voucher, cost_center, company, credit, debit
from `tabGL Entry` where voucher_type=%s and voucher_no=%s""", ("Fees", fee.name), as_dict=True)
if gl_entries[0].account == "_Test Receivable - _TC":
self.assertEqual(gl_entries[0].debit, 50000)
self.assertEqual(gl_entries[0].credit, 0)
self.assertEqual(gl_entries[1].debit, 0)
self.assertEqual(gl_entries[1].credit, 50000)
else:
self.assertEqual(gl_entries[0].credit, 50000)
self.assertEqual(gl_entries[0].debit, 0)
self.assertEqual(gl_entries[1].credit, 0)
self.assertEqual(gl_entries[1].debit, 50000)
|
indevgr/django | refs/heads/master | django/contrib/gis/admin/options.py | 52 | from django.contrib.admin import ModelAdmin
from django.contrib.gis.admin.widgets import OpenLayersWidget
from django.contrib.gis.db import models
from django.contrib.gis.gdal import HAS_GDAL, OGRGeomType
from django.core.exceptions import ImproperlyConfigured
spherical_mercator_srid = 3857
class GeoModelAdmin(ModelAdmin):
"""
The administration options class for Geographic models. Map settings
may be overloaded from their defaults to create custom maps.
"""
# The default map settings that may be overloaded -- still subject
# to API changes.
default_lon = 0
default_lat = 0
default_zoom = 4
display_wkt = False
display_srid = False
extra_js = []
num_zoom = 18
max_zoom = False
min_zoom = False
units = False
max_resolution = False
max_extent = False
modifiable = True
mouse_position = True
scale_text = True
layerswitcher = True
scrollable = True
map_width = 600
map_height = 400
map_srid = 4326
map_template = 'gis/admin/openlayers.html'
openlayers_url = 'http://openlayers.org/api/2.13.1/OpenLayers.js'
point_zoom = num_zoom - 6
wms_url = 'http://vmap0.tiles.osgeo.org/wms/vmap0'
wms_layer = 'basic'
wms_name = 'OpenLayers WMS'
wms_options = {'format': 'image/jpeg'}
debug = False
widget = OpenLayersWidget
@property
def media(self):
"Injects OpenLayers JavaScript into the admin."
media = super(GeoModelAdmin, self).media
media.add_js([self.openlayers_url])
media.add_js(self.extra_js)
return media
def formfield_for_dbfield(self, db_field, request, **kwargs):
"""
Overloaded from ModelAdmin so that an OpenLayersWidget is used
for viewing/editing 2D GeometryFields (OpenLayers 2 does not support
3D editing).
"""
if isinstance(db_field, models.GeometryField) and db_field.dim < 3:
if not HAS_GDAL and db_field.srid != self.map_srid:
raise ImproperlyConfigured(
"Map SRID is %s and SRID of `%s` is %s. GDAL must be "
"installed to perform the transformation."
% (self.map_srid, db_field, db_field.srid)
)
# Setting the widget with the newly defined widget.
kwargs['widget'] = self.get_map_widget(db_field)
return db_field.formfield(**kwargs)
else:
return super(GeoModelAdmin, self).formfield_for_dbfield(db_field, request, **kwargs)
def get_map_widget(self, db_field):
"""
Returns a subclass of the OpenLayersWidget (or whatever was specified
in the `widget` attribute) using the settings from the attributes set
in this class.
"""
is_collection = db_field.geom_type in ('MULTIPOINT', 'MULTILINESTRING', 'MULTIPOLYGON', 'GEOMETRYCOLLECTION')
if is_collection:
if db_field.geom_type == 'GEOMETRYCOLLECTION':
collection_type = 'Any'
else:
collection_type = OGRGeomType(db_field.geom_type.replace('MULTI', ''))
else:
collection_type = 'None'
class OLMap(self.widget):
template = self.map_template
geom_type = db_field.geom_type
wms_options = ''
if self.wms_options:
wms_options = ["%s: '%s'" % pair for pair in self.wms_options.items()]
wms_options = ', %s' % ', '.join(wms_options)
params = {'default_lon': self.default_lon,
'default_lat': self.default_lat,
'default_zoom': self.default_zoom,
'display_wkt': self.debug or self.display_wkt,
'geom_type': OGRGeomType(db_field.geom_type),
'field_name': db_field.name,
'is_collection': is_collection,
'scrollable': self.scrollable,
'layerswitcher': self.layerswitcher,
'collection_type': collection_type,
'is_generic': db_field.geom_type == 'GEOMETRY',
'is_linestring': db_field.geom_type in ('LINESTRING', 'MULTILINESTRING'),
'is_polygon': db_field.geom_type in ('POLYGON', 'MULTIPOLYGON'),
'is_point': db_field.geom_type in ('POINT', 'MULTIPOINT'),
'num_zoom': self.num_zoom,
'max_zoom': self.max_zoom,
'min_zoom': self.min_zoom,
'units': self.units, # likely should get from object
'max_resolution': self.max_resolution,
'max_extent': self.max_extent,
'modifiable': self.modifiable,
'mouse_position': self.mouse_position,
'scale_text': self.scale_text,
'map_width': self.map_width,
'map_height': self.map_height,
'point_zoom': self.point_zoom,
'srid': self.map_srid,
'display_srid': self.display_srid,
'wms_url': self.wms_url,
'wms_layer': self.wms_layer,
'wms_name': self.wms_name,
'wms_options': wms_options,
'debug': self.debug,
}
return OLMap
class OSMGeoAdmin(GeoModelAdmin):
map_template = 'gis/admin/osm.html'
num_zoom = 20
map_srid = spherical_mercator_srid
max_extent = '-20037508,-20037508,20037508,20037508'
max_resolution = '156543.0339'
point_zoom = num_zoom - 6
units = 'm'
|
DougBurke/astropy | refs/heads/master | astropy/constants/__init__.py | 2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains astronomical and physical constants for use in Astropy or other
places.
A typical use case might be::
>>> from astropy.constants import c, m_e
>>> # ... define the mass of something you want the rest energy of as m ...
>>> m = m_e
>>> E = m * c**2
>>> E.to('MeV') # doctest: +FLOAT_CMP
<Quantity 0.510998927603161 MeV>
"""
import inspect
from contextlib import contextmanager
# Hack to make circular imports with units work
try:
from .. import units
del units
except ImportError:
pass
from .constant import Constant, EMConstant # noqa
from . import si # noqa
from . import cgs # noqa
from . import codata2014, iau2015 # noqa
from . import utils as _utils
# for updating the constants module docstring
_lines = [
'The following constants are available:\n',
'========== ============== ================ =========================',
' Name Value Unit Description',
'========== ============== ================ =========================',
]
# NOTE: Update this when default changes.
_utils._set_c(codata2014, iau2015, inspect.getmodule(inspect.currentframe()),
not_in_module_only=True, doclines=_lines, set_class=True)
_lines.append(_lines[1])
if __doc__ is not None:
__doc__ += '\n'.join(_lines)
# TODO: Re-implement in a way that is more consistent with astropy.units.
# See https://github.com/astropy/astropy/pull/7008 discussions.
@contextmanager
def set_enabled_constants(modname):
"""
Context manager to temporarily set values in the ``constants``
namespace to an older version.
See :ref:`astropy-constants-prior` for usage.
Parameters
----------
modname : {'astropyconst13'}
Name of the module containing an older version.
"""
# Re-import here because these were deleted from namespace on init.
import inspect
import warnings
from . import utils as _utils
# NOTE: Update this when default changes.
if modname == 'astropyconst13':
from .astropyconst13 import codata2010 as codata
from .astropyconst13 import iau2012 as iaudata
else:
raise ValueError(
'Context manager does not currently handle {}'.format(modname))
module = inspect.getmodule(inspect.currentframe())
# Ignore warnings about "Constant xxx already has a definition..."
with warnings.catch_warnings():
warnings.simplefilter('ignore')
_utils._set_c(codata, iaudata, module,
not_in_module_only=False, set_class=True)
try:
yield
finally:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
# NOTE: Update this when default changes.
_utils._set_c(codata2014, iau2015, module,
not_in_module_only=False, set_class=True)
# Clean up namespace
del inspect
del contextmanager
del _utils
del _lines
|
prutseltje/ansible | refs/heads/devel | lib/ansible/modules/network/f5/bigip_device_ntp.py | 2 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_device_ntp
short_description: Manage NTP servers on a BIG-IP
description:
- Manage NTP servers on a BIG-IP.
version_added: 2.2
options:
ntp_servers:
description:
- A list of NTP servers to set on the device. At least one of C(ntp_servers)
or C(timezone) is required.
state:
description:
- The state of the NTP servers on the system. When C(present), guarantees
that the NTP servers are set on the system. When C(absent), removes the
specified NTP servers from the device configuration.
default: present
choices:
- absent
- present
timezone:
description:
- The timezone to set for NTP lookups. At least one of C(ntp_servers) or
C(timezone) is required.
default: UTC
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Set NTP server
bigip_device_ntp:
ntp_servers:
- 192.0.2.23
password: secret
server: lb.mydomain.com
user: admin
validate_certs: no
delegate_to: localhost
- name: Set timezone
bigip_device_ntp:
password: secret
server: lb.mydomain.com
timezone: America/Los_Angeles
user: admin
validate_certs: no
delegate_to: localhost
'''
RETURN = r'''
ntp_servers:
description: The NTP servers that were set on the device
returned: changed
type: list
sample: ["192.0.2.23", "192.0.2.42"]
timezone:
description: The timezone that was set on the device
returned: changed
type: string
sample: true
'''
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {
'servers': 'ntp_servers'
}
api_attributes = [
'servers', 'timezone',
]
updatables = [
'ntp_servers', 'timezone'
]
returnables = [
'ntp_servers', 'timezone'
]
absentables = [
'ntp_servers'
]
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.have = None
self.want = Parameters(params=self.module.params)
self.changes = Parameters()
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = Parameters(params=changed)
return True
return False
def _absent_changed_options(self):
changed = {}
for key in Parameters.absentables:
if getattr(self.want, key) is not None:
set_want = set(getattr(self.want, key))
set_have = set(getattr(self.have, key))
if set_want != set_have:
changed[key] = list(set_want)
if changed:
self.changes = Parameters(params=changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.update()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def should_absent(self):
result = self._absent_changed_options()
if result:
return True
return False
def absent(self):
self.have = self.read_current_from_device()
if not self.should_absent():
return False
if self.module.check_mode:
return True
self.absent_on_device()
return True
def update_on_device(self):
params = self.want.api_params()
resource = self.client.api.tm.sys.ntp.load()
resource.update(**params)
def read_current_from_device(self):
resource = self.client.api.tm.sys.ntp.load()
result = resource.attrs
return Parameters(params=result)
def absent_on_device(self):
params = self.changes.api_params()
resource = self.client.api.tm.sys.ntp.load()
resource.update(**params)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
ntp_servers=dict(
type='list',
),
timezone=dict(),
state=dict(
default='present',
choices=['present', 'absent']
),
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.required_one_of = [
['ntp_servers', 'timezone']
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
required_one_of=spec.required_one_of
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
jashwanth9/Expert-recommendation-system | refs/heads/master | code/collab_content_based_tags.py | 1 | import pdb
import random
import evaluate
import warnings
import collections
import numpy as np
import cPickle as pickle
from scipy import sparse
from scipy.spatial import distance
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics.pairwise import cosine_similarity
def loadTrainTestData():
trainData = []
with open('../train_data/invited_info_train.txt', 'r') as f1:
for line in f1:
line = line.rstrip('\n')
sp = line.split()
trainData.append((sp[0], sp[1], int(sp[2])))
testData = []
with open('../train_data/test_nolabel.txt', 'r') as f1:
line = f1.readline()
for line in f1:
testData.append(line.rstrip('\r\n').split(','))
return trainData, testData
def loadData():
print "loading data"
question_keys = pickle.load(open('../features/question_info_keys.dat', 'rb'))
question_feats = {}
trainData = []
valData = []
# with open('../features/question_word_freq.txt', 'r') as f1:
# i = 0
# for line in f1:
# line = line.rstrip()
# wordfreq = map(int, line.split())
# question_feats[question_keys[i]] = wordfreq
# i += 1
#tf = pickle.load(open('../features/ques_charid_tfidf.dat', 'rb'))
#tfx = tf.toarray()
#print tfx.shape
topics = []
with open('../train_data/question_info.txt', 'r') as f1:
for line in f1:
topic = int(line.split()[1])
topics.append(topic)
# with open('../train_data/question_info.txt', 'r') as f1:
# i = 0
# for line in f1:
# line = line.rstrip('\n')
# sp = line.split()
# question_feats[question_keys[i]] = map(int, sp[4:7])
# i += 1
#for i in range(len(question_keys)):
#question_feats[question_keys[i]] = [1 if x == topics[i] else 0 for x in range(22)]
# question_feats[question_keys[i]] = [1, 1, 1]
with open('../features/question_char_freq.txt', 'r') as f1:
i = 0
for line in f1:
line = line.rstrip()
wordfreq = map(int, line.split())
question_feats[question_keys[i]] = wordfreq
i += 1
#tf2 = pickle.load(open('../features/ques_wordid_tfidf.dat', 'rb'))
#tfx2 = tf2.toarray()
#for i in range(len(tfx2)):
#question_feats[question_keys[i]].append(tfx2[])
# with open('../train_data/invited_info_train.txt', 'r') as f1:
# for line in f1:
# line = line.rstrip('\n')
# sp = line.split()
# trainData.append((sp[0], sp[1], int(sp[2])))
# # with open('../train_data/validate_nolabel.txt', 'r') as f1:
# # line = f1.readline()
# # for line in f1:
# # valData.append(line.rstrip('\r\n').split(','))
# #valData = [x[:2] for x in trainData]
# random.shuffle(trainData)
# valData = [x[:2] for x in trainData[:int(0.15*len(trainData))]]
# trainData = trainData[int(0.15*len(trainData)):]
# useritem_sparse = pickle.load(open('../features/useritemmatrix_normalized.dat', 'rb'))
ques_keys = pickle.load(open('../train_data/question_info_keys.dat', 'rb'))
user_keys = pickle.load(open('../train_data/user_info_keys.dat', 'rb'))
ques_keys_map = {}
user_keys_map = {}
for i in range(len(user_keys)):
user_keys_map[user_keys[i]] = i
for i in range(len(ques_keys)):
ques_keys_map[ques_keys[i]] = i
return question_feats, ques_keys_map, user_keys_map, user_keys
def getUserItemMatrix(trainData, ques_keys_map, user_keys_map):
print "getting useritem matrix"
useritem = np.zeros(shape=(len(user_keys_map), len(ques_keys_map)))
for qid, uid, val in trainData:
if val == '1' or val==1:
useritem[user_keys_map[uid]][ques_keys_map[qid]] = 1
#posc+=1
else:
useritem[user_keys_map[uid]][ques_keys_map[qid]] = -0.125
uisparse = sparse.csr_matrix(useritem)
return uisparse
def getModels(trainData, question_feats):
print "getting models"
userX = {}
userY = {}
for qid, uid, val in trainData:
if uid not in userX:
userX[uid] = []
userY[uid] = []
userX[uid].append(question_feats[qid])
userY[uid].append(val)
nbmodels = {}
for user in userX:
nbmodels[user] = MultinomialNB()
nbmodels[user].fit(userX[user], userY[user])
#print "dumping"
#pickle.dump(nbmodels, open('../features/usermodels_queschartfidf.dat', 'wb'))
#exit()
return nbmodels
def getPredictions(valData, nbmodels, question_feats, useritem, user_keys_map, user_keys, k):
print "getting predictions"
similarities = cosine_similarity(useritem)
print similarities.shape
predictions = []
i = 0
for qid, uid in valData:
# print i
# i += 1
if uid not in nbmodels:
predictions.append(0)
continue
score = 0
y = 0
for nbindex in similarities[user_keys_map[uid]].argsort()[(-k-1):]:
if user_keys[nbindex] not in nbmodels:
y+=1
sc = 0
continue
prob = nbmodels[user_keys[nbindex]].predict_proba([question_feats[qid]])
if nbmodels[user_keys[nbindex]].classes_[0] == 1:
sc = prob[0][0]
elif len(prob[0])>1:
sc = prob[0][1]
else:
y+=1
sc = 0
score += sc
alt_score = score/(k-y)
score = score/k
# print("score:- ", score)
# print("altscore:-", alt_score)
prob = nbmodels[uid].predict_proba([question_feats[qid]])
# print prob
if nbmodels[uid].classes_[0] == 1:
predictions.append(prob[0][0]*0.75 + alt_score*0.43)
elif len(prob[0])>1:
predictions.append(prob[0][1]*0.75 + alt_score*0.5)
else:
predictions.append(alt_score*3)
#if predictions[-1] <= 0:
#predictions[-1] = 0.111
print max(predictions)
return predictions
def run(trainData, valData):
k = 180
question_feats, ques_keys_map, user_keys_map, user_keys = loadData()
useritem_sparse = getUserItemMatrix(trainData, ques_keys_map, user_keys_map)
nbmodels = getModels(trainData, question_feats)
predictions = getPredictions(valData, nbmodels, question_feats, useritem_sparse, user_keys_map, user_keys, k)
fname = '../validation/t_collab_alt_scorec.csv'
with open(fname , 'w') as f1:
f1.write('qid,uid,label\n')
for i in range(0, len(predictions)):
f1.write(valData[i][0]+','+valData[i][1]+','+str(predictions[i])+'\n')
return
# return evaluate.ndcg(fname)
if __name__ == "__main__":
trainData, testData = loadTrainTestData()
run(trainData, testData) |
nicoboss/Floatmotion | refs/heads/master | OpenGL/GL/NV/register_combiners2.py | 9 | '''OpenGL extension NV.register_combiners2
This module customises the behaviour of the
OpenGL.raw.GL.NV.register_combiners2 to provide a more
Python-friendly API
Overview (from the spec)
The NV_register_combiners extension provides a powerful fragment
coloring mechanism. This specification extends the register combiners
functionality to support more color constant values that are unique
for each general combiner stage.
The base register combiners functionality supports only two color
constants. These two constants are available in every general
combiner stage and in the final combiner.
When many general combiner stages are supported, more than two
unique color constants is often required. The obvious way to extend
the register combiners is to add several more color constant
registers. But adding new unique color constant registers is
expensive for hardware implementation because every color constant
register must be available as an input to any stage.
In practice however, it is the total set of general combiner stages
that requires more color constants, not each and every individual
general combiner stage. Each individual general combiner stage
typically requires only one or two color constants.
By keeping two color constant registers but making these two registers
contain two unique color constant values for each general combiner
stage, the hardware expense of supporting multiple color constants
is minimized. Additionally, this scheme scales appropriately as
more general combiner stages are added.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/register_combiners2.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NV.register_combiners2 import *
from OpenGL.raw.GL.NV.register_combiners2 import _EXTENSION_NAME
def glInitRegisterCombiners2NV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glCombinerStageParameterfvNV.params size not checked against 'pname'
glCombinerStageParameterfvNV=wrapper.wrapper(glCombinerStageParameterfvNV).setInputArraySize(
'params', None
)
glGetCombinerStageParameterfvNV=wrapper.wrapper(glGetCombinerStageParameterfvNV).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
### END AUTOGENERATED SECTION |
richbs/colourlens | refs/heads/master | colourlens/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/eucjpprober.py | 2918 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJPSMModel
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCJPSMModel)
self._mDistributionAnalyzer = EUCJPDistributionAnalysis()
self._mContextAnalyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "EUC-JP"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
# PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar, charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
|
tripzero/snetcam | refs/heads/master | snetcam/test_color_filter.py | 1 | #!/usr/bin/env python
import cv2
from .avg_color import color_filter
import numpy as np
def nothing(x):
pass
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('file_to_process', help="file_to_process")
args = parser.parse_args()
img = cv2.imread(args.file_to_process, cv2.IMREAD_COLOR)
if img is None:
raise Exception("failed to read image")
# Starting with 100's to prevent error while masking
h,s,v = 100,100,100
cv2.namedWindow('result')
# Creating track bar
cv2.createTrackbar('h', 'result',0, 179, nothing)
cv2.createTrackbar('s', 'result',0, 255, nothing)
cv2.createTrackbar('v', 'result',0, 255, nothing)
cv2.imshow("original", img)
upper = False
upperhsv = (180, 255, 255)
lowerhsv = (0, 0, 0)
while(1):
h = cv2.getTrackbarPos('h','result')
s = cv2.getTrackbarPos('s','result')
v = cv2.getTrackbarPos('v','result')
if upper:
result = color_filter(img, lowerhsv, (h, s, v))
else:
result = color_filter(img, (h, s, v), upperhsv)
cv2.imshow("result", result)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
elif k == 24:
if upper:
upperhsv = (h, s, v)
print("upper hsv: {}".format(upperhsv))
else:
lowerhsv = (h, s, v)
print("lower hsv: {}".format(lowerhsv))
cv2.destroyAllWindows()
# Creating a window for later use
|
throwable-one/lettuce | refs/heads/master | tests/integration/lib/Django-1.2.5/tests/regressiontests/templates/templatetags/__init__.py | 12133432 | |
samatdav/zulip | refs/heads/master | tools/__init__.py | 12133432 | |
aleksandra-tarkowska/django | refs/heads/master | tests/conditional_processing/__init__.py | 12133432 | |
meskobalazs/django-bootstrap | refs/heads/master | main/__init__.py | 12133432 | |
wkschwartz/django | refs/heads/stable/3.2.x | django/contrib/gis/db/backends/mysql/operations.py | 16 | from django.contrib.gis.db import models
from django.contrib.gis.db.backends.base.adapter import WKTAdapter
from django.contrib.gis.db.backends.base.operations import (
BaseSpatialOperations,
)
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.geos.geometry import GEOSGeometryBase
from django.contrib.gis.geos.prototypes.io import wkb_r
from django.contrib.gis.measure import Distance
from django.db.backends.mysql.operations import DatabaseOperations
from django.utils.functional import cached_property
class MySQLOperations(BaseSpatialOperations, DatabaseOperations):
name = 'mysql'
geom_func_prefix = 'ST_'
Adapter = WKTAdapter
@cached_property
def mariadb(self):
return self.connection.mysql_is_mariadb
@cached_property
def mysql(self):
return not self.connection.mysql_is_mariadb
@cached_property
def select(self):
return self.geom_func_prefix + 'AsBinary(%s)'
@cached_property
def from_text(self):
return self.geom_func_prefix + 'GeomFromText'
@cached_property
def gis_operators(self):
operators = {
'bbcontains': SpatialOperator(func='MBRContains'), # For consistency w/PostGIS API
'bboverlaps': SpatialOperator(func='MBROverlaps'), # ...
'contained': SpatialOperator(func='MBRWithin'), # ...
'contains': SpatialOperator(func='ST_Contains'),
'crosses': SpatialOperator(func='ST_Crosses'),
'disjoint': SpatialOperator(func='ST_Disjoint'),
'equals': SpatialOperator(func='ST_Equals'),
'exact': SpatialOperator(func='ST_Equals'),
'intersects': SpatialOperator(func='ST_Intersects'),
'overlaps': SpatialOperator(func='ST_Overlaps'),
'same_as': SpatialOperator(func='ST_Equals'),
'touches': SpatialOperator(func='ST_Touches'),
'within': SpatialOperator(func='ST_Within'),
}
if self.connection.mysql_is_mariadb:
operators['relate'] = SpatialOperator(func='ST_Relate')
return operators
disallowed_aggregates = (
models.Collect, models.Extent, models.Extent3D, models.MakeLine,
models.Union,
)
@cached_property
def unsupported_functions(self):
unsupported = {
'AsGML', 'AsKML', 'AsSVG', 'Azimuth', 'BoundingCircle',
'ForcePolygonCW', 'GeometryDistance', 'LineLocatePoint',
'MakeValid', 'MemSize', 'Perimeter', 'PointOnSurface', 'Reverse',
'Scale', 'SnapToGrid', 'Transform', 'Translate',
}
if self.connection.mysql_is_mariadb:
unsupported.remove('PointOnSurface')
unsupported.update({'GeoHash', 'IsValid'})
if self.connection.mysql_version < (10, 2, 4):
unsupported.add('AsGeoJSON')
elif self.connection.mysql_version < (5, 7, 5):
unsupported.update({'AsGeoJSON', 'GeoHash', 'IsValid'})
return unsupported
def geo_db_type(self, f):
return f.geom_type
def get_distance(self, f, value, lookup_type):
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
raise ValueError(
'Only numeric values of degree units are allowed on '
'geodetic distance queries.'
)
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
return [dist_param]
def get_geometry_converter(self, expression):
read = wkb_r().read
srid = expression.output_field.srid
if srid == -1:
srid = None
geom_class = expression.output_field.geom_class
def converter(value, expression, connection):
if value is not None:
geom = GEOSGeometryBase(read(memoryview(value)), geom_class)
if srid:
geom.srid = srid
return geom
return converter
|
hpcugent/graphite-web | refs/heads/HPC | webapp/graphite/jobs.py | 1 | """
Interacts with the job database/storage.
At the moment; this uses hard coded data but should finally interact with the real database/storage.
"""
import time
from vsc.utils.dateandtime import timestamp_parser
from sqlalchemy import create_engine, MetaData, Table, select
from graphite.logger import log
engine = create_engine('postgresql://silox:sup@localhost/hpc')
metadata = MetaData(engine)
jobs = Table('job', metadata, autoload=True)
def get_jobs(user, limit=False, query=False, cluster=False, start=False, end=False):
"""
Returns all the jobs a user ever has submitted
If the limit paramater is set, display the most recent limit number of jobs
"""
# Build the select query
s = select([jobs.c.name, jobs.c.jobname])
if query:
s = s.where(jobs.c.name.ilike('%' + query + '%') | jobs.c.jobname.ilike('%' + query + '%'))
# If the user isn't allowed to see everything; limit the query to 300 though,
if user.has_perm('account.can_see_all'):
s = s.limit(300)
else:
s = s.where(jobs.c.userr == user.username)
# Extra limitation parameters
if cluster:
s = s.where(jobs.c.clustername.ilike('%' + cluster + '%'))
if start:
starttime = timestamp_parser(start)
s = s.where(jobs.c.start >= starttime)
if end:
endtime = timestamp_parser(end)
s = s.where(jobs.c.lasttime <= endtime)
# Order the jobs
s = s.order_by(jobs.c.lasttime.desc())
# Did we limit the jobs? Return only the last limit number of jobs!
if limit:
s = s.limit(limit)
# Fetch the results and return the ID's as a list
result = engine.execute(s).fetchall()
return [(
str(job[0].replace('.', '-')),
str(job[1]),
str(job[1] + " (" + job[0].split('.')[0] + " - " + job[0].split('.')[2] + ")")
) for job in result]
def has_job(user, job):
job = job.replace('-', '.')
s = select([jobs.c.name]).where(jobs.c.name == job and jobs.c.userr == user.username)
result = engine.execute(s).fetchall()
return len(result) > 0
def get_job_timerange(job):
"""
Returns specific job timerange in the tuple (startTime, endTime)
"""
s = select([jobs.c.start, jobs.c.lasttime]).where(jobs.c.name == job.replace('-', '.'))
result = engine.execute(s).first()
if len(result) > 1:
return (timestamp(result[0]), timestamp(result[1]))
else:
# Log to exception: a job must have nodes
log.exeption("No timerange found for job %s", job)
raise NoTimeRangeForJobException
def get_nodes(job):
"""
Returns all the nodes a job has run on
"""
s = select([jobs.c.exec_host]).where(jobs.c.name == job.replace('-', '.'))
result = engine.execute(s).first()
if len(result) > 0:
nodestring = result[0].split('.', 1)[0]
nodes = nodestring.split('+')
nodes = [str(node.split('/', 1)[0]) for node in nodes]
return nodes
else:
# Log to exception: a job must have nodes
log.exeption("No nodes found for job %s", job)
raise NoNodesForJobException
class NoNodesForJobException(Exception):
''' Error die wordt opgegooid wanneer er geen nodes voor een job worden gevonden '''
pass
class NoTimeRangeForJobException(Exception):
''' Error die wordt opgegooid wanneer er geen timerange voor een job wordt gevonden '''
pass
def timestamp(datetime):
"Convert a datetime object into epoch time"
return time.mktime( datetime.timetuple() )
|
nju520/youtube-dl | refs/heads/master | youtube_dl/extractor/rai.py | 76 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
)
from ..utils import (
parse_duration,
unified_strdate,
)
class RaiIE(InfoExtractor):
_VALID_URL = r'(?P<url>(?P<host>http://(?:.+?\.)?(?:rai\.it|rai\.tv|rainews\.it))/dl/.+?-(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})(?:-.+?)?\.html)'
_TESTS = [
{
'url': 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-cb27157f-9dd0-4aee-b788-b1f67643a391.html',
'md5': 'c064c0b2d09c278fb293116ef5d0a32d',
'info_dict': {
'id': 'cb27157f-9dd0-4aee-b788-b1f67643a391',
'ext': 'mp4',
'title': 'Report del 07/04/2014',
'description': 'md5:f27c544694cacb46a078db84ec35d2d9',
'upload_date': '20140407',
'duration': 6160,
}
},
{
'url': 'http://www.raisport.rai.it/dl/raiSport/media/rassegna-stampa-04a9f4bd-b563-40cf-82a6-aad3529cb4a9.html',
'md5': '8bb9c151924ce241b74dd52ef29ceafa',
'info_dict': {
'id': '04a9f4bd-b563-40cf-82a6-aad3529cb4a9',
'ext': 'mp4',
'title': 'TG PRIMO TEMPO',
'description': '',
'upload_date': '20140612',
'duration': 1758,
},
'skip': 'Error 404',
},
{
'url': 'http://www.rainews.it/dl/rainews/media/state-of-the-net-Antonella-La-Carpia-regole-virali-7aafdea9-0e5d-49d5-88a6-7e65da67ae13.html',
'md5': '35cf7c229f22eeef43e48b5cf923bef0',
'info_dict': {
'id': '7aafdea9-0e5d-49d5-88a6-7e65da67ae13',
'ext': 'mp4',
'title': 'State of the Net, Antonella La Carpia: regole virali',
'description': 'md5:b0ba04a324126903e3da7763272ae63c',
'upload_date': '20140613',
},
'skip': 'Error 404',
},
{
'url': 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-b4a49761-e0cc-4b14-8736-2729f6f73132-tg2.html',
'md5': '35694f062977fe6619943f08ed935730',
'info_dict': {
'id': 'b4a49761-e0cc-4b14-8736-2729f6f73132',
'ext': 'mp4',
'title': 'Alluvione in Sardegna e dissesto idrogeologico',
'description': 'Edizione delle ore 20:30 ',
}
},
{
'url': 'http://www.ilcandidato.rai.it/dl/ray/media/Il-Candidato---Primo-episodio-Le-Primarie-28e5525a-b495-45e8-a7c3-bc48ba45d2b6.html',
'md5': '02b64456f7cc09f96ff14e7dd489017e',
'info_dict': {
'id': '28e5525a-b495-45e8-a7c3-bc48ba45d2b6',
'ext': 'flv',
'title': 'Il Candidato - Primo episodio: "Le Primarie"',
'description': 'Primo appuntamento con "Il candidato" con Filippo Timi, alias Piero Zucca presidente!',
'uploader': 'RaiTre',
}
}
]
def _extract_relinker_url(self, webpage):
return self._proto_relative_url(self._search_regex(
[r'name="videourl" content="([^"]+)"', r'var\s+videoURL(?:_MP4)?\s*=\s*"([^"]+)"'],
webpage, 'relinker url', default=None))
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
host = mobj.group('host')
webpage = self._download_webpage(url, video_id)
relinker_url = self._extract_relinker_url(webpage)
if not relinker_url:
iframe_path = self._search_regex(
r'<iframe[^>]+src="/?(dl/[^"]+\?iframe\b[^"]*)"',
webpage, 'iframe')
webpage = self._download_webpage(
'%s/%s' % (host, iframe_path), video_id)
relinker_url = self._extract_relinker_url(webpage)
relinker = self._download_json(
'%s&output=47' % relinker_url, video_id)
media_url = relinker['video'][0]
ct = relinker.get('ct')
if ct == 'f4m':
formats = self._extract_f4m_formats(
media_url + '&hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id)
else:
formats = [{
'url': media_url,
'format_id': ct,
}]
json_link = self._html_search_meta(
'jsonlink', webpage, 'JSON link', default=None)
if json_link:
media = self._download_json(
host + json_link, video_id, 'Downloading video JSON')
title = media.get('name')
description = media.get('desc')
thumbnail = media.get('image_300') or media.get('image_medium') or media.get('image')
duration = parse_duration(media.get('length'))
uploader = media.get('author')
upload_date = unified_strdate(media.get('date'))
else:
title = (self._search_regex(
r'var\s+videoTitolo\s*=\s*"(.+?)";',
webpage, 'title', default=None) or self._og_search_title(webpage)).replace('\\"', '"')
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
duration = None
uploader = self._html_search_meta('Editore', webpage, 'uploader')
upload_date = unified_strdate(self._html_search_meta(
'item-date', webpage, 'upload date', default=None))
subtitles = self.extract_subtitles(video_id, webpage)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'upload_date': upload_date,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
def _get_subtitles(self, video_id, webpage):
subtitles = {}
m = re.search(r'<meta name="closedcaption" content="(?P<captions>[^"]+)"', webpage)
if m:
captions = m.group('captions')
STL_EXT = '.stl'
SRT_EXT = '.srt'
if captions.endswith(STL_EXT):
captions = captions[:-len(STL_EXT)] + SRT_EXT
subtitles['it'] = [{
'ext': 'srt',
'url': 'http://www.rai.tv%s' % compat_urllib_parse.quote(captions),
}]
return subtitles
|
UITools/saleor | refs/heads/master | saleor/order/migrations/0043_auto_20180322_0655.py | 1 | # Generated by Django 2.0.3 on 2018-03-22 11:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0042_auto_20180227_0436'),
]
operations = [
migrations.AlterModelOptions(
name='order',
options={'ordering': ('-pk',), 'permissions': (('view_order', 'Can view orders'), ('edit_order', 'Can edit orders'))},
),
migrations.AlterField(
model_name='order',
name='language_code',
field=models.CharField(default='en', max_length=35),
),
]
|
3dfxmadscientist/CBSS | refs/heads/master | addons/portal/__openerp__.py | 55 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 OpenERP S.A (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Portal',
'version': '1.0',
'depends': [
'base',
'share',
'auth_signup',
],
'author': 'OpenERP SA',
'category': 'Portal',
'description': """
Customize access to your OpenERP database to external users by creating portals.
================================================================================
A portal defines a specific user menu and access rights for its members. This
menu can ben seen by portal members, anonymous users and any other user that
have the access to technical features (e.g. the administrator).
Also, each portal member is linked to a specific partner.
The module also associates user groups to the portal users (adding a group in
the portal automatically adds it to the portal users, etc). That feature is
very handy when used in combination with the module 'share'.
""",
'website': 'http://www.openerp.com',
'data': [
'portal_data.xml',
'portal_view.xml',
'wizard/portal_wizard_view.xml',
'wizard/share_wizard_view.xml',
'acquirer_view.xml',
'security/ir.model.access.csv',
'security/portal_security.xml',
],
'demo': ['portal_demo.xml'],
'css': ['static/src/css/portal.css'],
'auto_install': True,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
nickpegg/ciscolib | refs/heads/master | ciscolib/helpers.py | 2 | import re
def shorten_int_name(interface_name):
"""
Returns the Cisco shortened interface name from a full one.
If the full interface name is invalid, this will return None
"""
short = None
regex = "(\w{2}).*?(\d+(?:/\d+)?(?:/\d+)?)"
match = re.match(regex, interface_name)
if match is not None:
short = ""
for group in match.groups():
short += group
return short
|
Qgap/zulip | refs/heads/master | zerver/lib/test_helpers.py | 114 | from django.test import TestCase
from zerver.lib.initial_password import initial_password
from zerver.lib.db import TimeTrackingCursor
from zerver.lib import cache
from zerver.lib import event_queue
from zerver.worker import queue_processors
from zerver.lib.actions import (
check_send_message, create_stream_if_needed, do_add_subscription,
get_display_recipient, get_user_profile_by_email,
)
from zerver.models import (
resolve_email_to_domain,
Client,
Message,
Realm,
Recipient,
Stream,
Subscription,
UserMessage,
)
import base64
import os
import re
import time
import ujson
import urllib
from contextlib import contextmanager
API_KEYS = {}
@contextmanager
def stub(obj, name, f):
old_f = getattr(obj, name)
setattr(obj, name, f)
yield
setattr(obj, name, old_f)
@contextmanager
def simulated_queue_client(client):
real_SimpleQueueClient = queue_processors.SimpleQueueClient
queue_processors.SimpleQueueClient = client
yield
queue_processors.SimpleQueueClient = real_SimpleQueueClient
@contextmanager
def tornado_redirected_to_list(lst):
real_event_queue_process_notification = event_queue.process_notification
event_queue.process_notification = lst.append
yield
event_queue.process_notification = real_event_queue_process_notification
@contextmanager
def simulated_empty_cache():
cache_queries = []
def my_cache_get(key, cache_name=None):
cache_queries.append(('get', key, cache_name))
return None
def my_cache_get_many(keys, cache_name=None):
cache_queries.append(('getmany', keys, cache_name))
return None
old_get = cache.cache_get
old_get_many = cache.cache_get_many
cache.cache_get = my_cache_get
cache.cache_get_many = my_cache_get_many
yield cache_queries
cache.cache_get = old_get
cache.cache_get_many = old_get_many
@contextmanager
def queries_captured():
'''
Allow a user to capture just the queries executed during
the with statement.
'''
queries = []
def wrapper_execute(self, action, sql, params=()):
start = time.time()
try:
return action(sql, params)
finally:
stop = time.time()
duration = stop - start
queries.append({
'sql': self.mogrify(sql, params),
'time': "%.3f" % duration,
})
old_execute = TimeTrackingCursor.execute
old_executemany = TimeTrackingCursor.executemany
def cursor_execute(self, sql, params=()):
return wrapper_execute(self, super(TimeTrackingCursor, self).execute, sql, params)
TimeTrackingCursor.execute = cursor_execute
def cursor_executemany(self, sql, params=()):
return wrapper_execute(self, super(TimeTrackingCursor, self).executemany, sql, params)
TimeTrackingCursor.executemany = cursor_executemany
yield queries
TimeTrackingCursor.execute = old_execute
TimeTrackingCursor.executemany = old_executemany
def find_key_by_email(address):
from django.core.mail import outbox
key_regex = re.compile("accounts/do_confirm/([a-f0-9]{40})>")
for message in reversed(outbox):
if address in message.to:
return key_regex.search(message.body).groups()[0]
def message_ids(result):
return set(message['id'] for message in result['messages'])
def message_stream_count(user_profile):
return UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
count()
def most_recent_usermessage(user_profile):
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('-message')
return query[0] # Django does LIMIT here
def most_recent_message(user_profile):
usermessage = most_recent_usermessage(user_profile)
return usermessage.message
def get_user_messages(user_profile):
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('message')
return [um.message for um in query]
class DummyObject:
pass
class DummyTornadoRequest:
def __init__(self):
self.connection = DummyObject()
self.connection.stream = DummyStream()
class DummyHandler(object):
def __init__(self, assert_callback):
self.assert_callback = assert_callback
self.request = DummyTornadoRequest()
# Mocks RequestHandler.async_callback, which wraps a callback to
# handle exceptions. We return the callback as-is.
def async_callback(self, cb):
return cb
def write(self, response):
raise NotImplemented
def zulip_finish(self, response, *ignore):
if self.assert_callback:
self.assert_callback(response)
class DummySession(object):
session_key = "0"
class DummyStream:
def closed(self):
return False
class POSTRequestMock(object):
method = "POST"
def __init__(self, post_data, user_profile, assert_callback=None):
self.REQUEST = self.POST = post_data
self.user = user_profile
self._tornado_handler = DummyHandler(assert_callback)
self.session = DummySession()
self._log_data = {}
self.META = {'PATH_INFO': 'test'}
self._log_data = {}
class AuthedTestCase(TestCase):
# Helper because self.client.patch annoying requires you to urlencode
def client_patch(self, url, info={}, **kwargs):
info = urllib.urlencode(info)
return self.client.patch(url, info, **kwargs)
def client_put(self, url, info={}, **kwargs):
info = urllib.urlencode(info)
return self.client.put(url, info, **kwargs)
def client_delete(self, url, info={}, **kwargs):
info = urllib.urlencode(info)
return self.client.delete(url, info, **kwargs)
def login(self, email, password=None):
if password is None:
password = initial_password(email)
return self.client.post('/accounts/login/',
{'username':email, 'password':password})
def register(self, username, password, domain="zulip.com"):
self.client.post('/accounts/home/',
{'email': username + "@" + domain})
return self.submit_reg_form_for_user(username, password, domain=domain)
def submit_reg_form_for_user(self, username, password, domain="zulip.com"):
"""
Stage two of the two-step registration process.
If things are working correctly the account should be fully
registered after this call.
"""
return self.client.post('/accounts/register/',
{'full_name': username, 'password': password,
'key': find_key_by_email(username + '@' + domain),
'terms': True})
def get_api_key(self, email):
if email not in API_KEYS:
API_KEYS[email] = get_user_profile_by_email(email).api_key
return API_KEYS[email]
def api_auth(self, email):
credentials = "%s:%s" % (email, self.get_api_key(email))
return {
'HTTP_AUTHORIZATION': 'Basic ' + base64.b64encode(credentials)
}
def get_streams(self, email):
"""
Helper function to get the stream names for a user
"""
user_profile = get_user_profile_by_email(email)
subs = Subscription.objects.filter(
user_profile = user_profile,
active = True,
recipient__type = Recipient.STREAM)
return [get_display_recipient(sub.recipient) for sub in subs]
def send_message(self, sender_name, recipient_list, message_type,
content="test content", subject="test", **kwargs):
sender = get_user_profile_by_email(sender_name)
if message_type == Recipient.PERSONAL:
message_type_name = "private"
else:
message_type_name = "stream"
if isinstance(recipient_list, basestring):
recipient_list = [recipient_list]
(sending_client, _) = Client.objects.get_or_create(name="test suite")
return check_send_message(
sender, sending_client, message_type_name, recipient_list, subject,
content, forged=False, forged_timestamp=None,
forwarder_user_profile=sender, realm=sender.realm, **kwargs)
def get_old_messages(self, anchor=1, num_before=100, num_after=100):
post_params = {"anchor": anchor, "num_before": num_before,
"num_after": num_after}
result = self.client.post("/json/get_old_messages", dict(post_params))
data = ujson.loads(result.content)
return data['messages']
def users_subscribed_to_stream(self, stream_name, realm_domain):
realm = Realm.objects.get(domain=realm_domain)
stream = Stream.objects.get(name=stream_name, realm=realm)
recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
subscriptions = Subscription.objects.filter(recipient=recipient, active=True)
return [subscription.user_profile for subscription in subscriptions]
def assert_json_success(self, result):
"""
Successful POSTs return a 200 and JSON of the form {"result": "success",
"msg": ""}.
"""
self.assertEqual(result.status_code, 200, result)
json = ujson.loads(result.content)
self.assertEqual(json.get("result"), "success")
# We have a msg key for consistency with errors, but it typically has an
# empty value.
self.assertIn("msg", json)
return json
def get_json_error(self, result, status_code=400):
self.assertEqual(result.status_code, status_code)
json = ujson.loads(result.content)
self.assertEqual(json.get("result"), "error")
return json['msg']
def assert_json_error(self, result, msg, status_code=400):
"""
Invalid POSTs return an error status code and JSON of the form
{"result": "error", "msg": "reason"}.
"""
self.assertEqual(self.get_json_error(result, status_code=status_code), msg)
def assert_length(self, queries, count, exact=False):
actual_count = len(queries)
if exact:
return self.assertTrue(actual_count == count,
"len(%s) == %s, != %s" % (queries, actual_count, count))
return self.assertTrue(actual_count <= count,
"len(%s) == %s, > %s" % (queries, actual_count, count))
def assert_json_error_contains(self, result, msg_substring):
self.assertIn(msg_substring, self.get_json_error(result))
def fixture_data(self, type, action, file_type='json'):
return open(os.path.join(os.path.dirname(__file__),
"../fixtures/%s/%s_%s.%s" % (type, type, action,file_type))).read()
# Subscribe to a stream directly
def subscribe_to_stream(self, email, stream_name, realm=None):
realm = Realm.objects.get(domain=resolve_email_to_domain(email))
stream, _ = create_stream_if_needed(realm, stream_name)
user_profile = get_user_profile_by_email(email)
do_add_subscription(user_profile, stream, no_log=True)
# Subscribe to a stream by making an API request
def common_subscribe_to_streams(self, email, streams, extra_post_data = {}, invite_only=False):
post_data = {'subscriptions': ujson.dumps([{"name": stream} for stream in streams]),
'invite_only': ujson.dumps(invite_only)}
post_data.update(extra_post_data)
result = self.client.post("/api/v1/users/me/subscriptions", post_data, **self.api_auth(email))
return result
def send_json_payload(self, email, url, payload, stream_name=None, **post_params):
if stream_name != None:
self.subscribe_to_stream(email, stream_name)
result = self.client.post(url, payload, **post_params)
self.assert_json_success(result)
# Check the correct message was sent
msg = Message.objects.filter().order_by('-id')[0]
self.assertEqual(msg.sender.email, email)
self.assertEqual(get_display_recipient(msg.recipient), stream_name)
return msg
|
pamfilos/invenio | refs/heads/master-sql-fixes | modules/bibformat/lib/elements/bfe_date.py | 25 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints imprint publication date
"""
__revision__ = "$Id$"
import time
def format_element(bfo, date_format='%d %B %Y'):
"""
Prints the imprint publication date. If <code>format</code> is specified,
Parameter <code>date_format</code> allows to specify the string representation of the output.
The format string has the same behaviour as the strftime() function::
<pre>Eg: 1982-09-24 07:32:00
"%d %B %Y" -> 24 September 1982
"%I:%M" -> 07:32
</pre>
@see: pagination.py, publisher.py, reprints.py, imprint.py, place.py
@param date_format date format
"""
date = bfo.field('260__c')
if date_format != '':
try:
date_time = time.strptime(date, "%Y-%m-%d")
return time.strftime(date_format, date_time)
except ValueError:
return date
else:
return date
|
NeovaHealth/odoo | refs/heads/8.0 | addons/website_membership/controllers/__init__.py | 7372 | import main
|
pra85/calibre | refs/heads/master | src/html5lib/treewalkers/simpletree.py | 27 | import gettext
_ = gettext.gettext
import _base
class TreeWalker(_base.NonRecursiveTreeWalker):
"""Given that simpletree has no performant way of getting a node's
next sibling, this implementation returns "nodes" as tuples with the
following content:
1. The parent Node (Element, Document or DocumentFragment)
2. The child index of the current node in its parent's children list
3. A list used as a stack of all ancestors. It is a pair tuple whose
first item is a parent Node and second item is a child index.
"""
def getNodeDetails(self, node):
if isinstance(node, tuple): # It might be the root Node
parent, idx, parents = node
node = parent.childNodes[idx]
# testing node.type allows us not to import treebuilders.simpletree
if node.type in (1, 2): # Document or DocumentFragment
return (_base.DOCUMENT,)
elif node.type == 3: # DocumentType
return _base.DOCTYPE, node.name, node.publicId, node.systemId
elif node.type == 4: # TextNode
return _base.TEXT, node.value
elif node.type == 5: # Element
return (_base.ELEMENT, node.namespace, node.name,
node.attributes.items(), node.hasContent())
elif node.type == 6: # CommentNode
return _base.COMMENT, node.data
else:
return _node.UNKNOWN, node.type
def getFirstChild(self, node):
if isinstance(node, tuple): # It might be the root Node
parent, idx, parents = node
parents.append((parent, idx))
node = parent.childNodes[idx]
else:
parents = []
assert node.hasContent(), "Node has no children"
return (node, 0, parents)
def getNextSibling(self, node):
assert isinstance(node, tuple), "Node is not a tuple: " + str(node)
parent, idx, parents = node
idx += 1
if len(parent.childNodes) > idx:
return (parent, idx, parents)
else:
return None
def getParentNode(self, node):
assert isinstance(node, tuple)
parent, idx, parents = node
if parents:
parent, idx = parents.pop()
return parent, idx, parents
else:
# HACK: We could return ``parent`` but None will stop the algorithm the same way
return None
|
sisl/Chimp | refs/heads/master | chimp/simulators/gym/__init__.py | 1 | """
Import OpenAI Gym Wrapper
"""
|
vechorko/django-filer | refs/heads/develop | filer/models/tools.py | 46 | #-*- coding: utf-8 -*-
from filer.models import Clipboard
def discard_clipboard(clipboard):
clipboard.files.clear()
def delete_clipboard(clipboard):
for file_obj in clipboard.files.all():
file_obj.delete()
def get_user_clipboard(user):
if user.is_authenticated():
clipboard = Clipboard.objects.get_or_create(user=user)[0]
return clipboard
def move_file_to_clipboard(files, clipboard):
count = 0
for file_obj in files:
if clipboard.append_file(file_obj):
file_obj.folder = None
file_obj.save()
count += 1
return count
def move_files_from_clipboard_to_folder(clipboard, folder):
return move_files_to_folder(clipboard.files.all(), folder)
def move_files_to_folder(files, folder):
for file_obj in files:
file_obj.folder = folder
file_obj.save()
return True
|
ziir/lumbergh | refs/heads/master | vendor-local/lib/python/south/tests/circular_b/migrations/0001_first.py | 174 | from south.db import db
from django.db import models
class Migration:
depends_on = [('circular_a', '0001_first')]
def forwards(self):
pass
def backwards(self):
pass
|
Horrowind/mockup | refs/heads/master | tools/wasm-sourcemap.py | 1 | #!/usr/bin/env python
# Copyright 2018 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""Utility tools that extracts DWARF information encoded in a wasm output
produced by the LLVM tools, and encodes it as a wasm source map. Additionally,
it can collect original sources, change files prefixes, and strip debug
sections from a wasm file.
"""
import argparse
from collections import OrderedDict, namedtuple
import json
import logging
from math import floor, log
import os
import re
from subprocess import Popen, PIPE
import sys
sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
logger = logging.getLogger('wasm-sourcemap')
def asstr(s):
if str is bytes:
if isinstance(s, unicode):
return s.encode('utf-8')
elif isinstance(s, bytes):
return s.decode('utf-8')
return s
def parse_args():
parser = argparse.ArgumentParser(prog='wasm-sourcemap.py', description=__doc__)
parser.add_argument('wasm', help='wasm file')
parser.add_argument('-o', '--output', help='output source map')
parser.add_argument('-p', '--prefix', nargs='*', help='replace source debug filename prefix for source map', default=[])
parser.add_argument('-s', '--sources', action='store_true', help='read and embed source files from file system into source map')
parser.add_argument('-l', '--load-prefix', nargs='*', help='replace source debug filename prefix for reading sources from file system (see also --sources)', default=[])
parser.add_argument('-w', nargs='?', help='set output wasm file')
parser.add_argument('-x', '--strip', action='store_true', help='removes debug and linking sections')
parser.add_argument('-u', '--source-map-url', nargs='?', help='specifies sourceMappingURL section contest')
parser.add_argument('--dwarfdump', help="path to llvm-dwarfdump executable")
parser.add_argument('--dwarfdump-output', nargs='?', help=argparse.SUPPRESS)
return parser.parse_args()
class Prefixes:
def __init__(self, args):
prefixes = []
for p in args:
if '=' in p:
prefix, replacement = p.split('=')
prefixes.append({'prefix': prefix, 'replacement': replacement})
else:
prefixes.append({'prefix': p, 'replacement': None})
self.prefixes = prefixes
self.cache = {}
def resolve(self, name):
if name in self.cache:
return self.cache[name]
result = name
for p in self.prefixes:
if name.startswith(p['prefix']):
if p['replacement'] is None:
result = name[len(p['prefix'])::]
else:
result = p['replacement'] + name[len(p['prefix'])::]
break
self.cache[name] = result
return result
# SourceMapPrefixes contains resolver for file names that are:
# - "sources" is for names that output to source maps JSON
# - "load" is for paths that used to load source text
SourceMapPrefixes = namedtuple('SourceMapPrefixes', 'sources, load')
def encode_vlq(n):
VLQ_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
x = (n << 1) if n >= 0 else ((-n << 1) + 1)
result = ""
while x > 31:
result = result + VLQ_CHARS[32 + (x & 31)]
x = x >> 5
return result + VLQ_CHARS[x]
def read_var_uint(wasm, pos):
n = 0
shift = 0
b = ord(wasm[pos:pos + 1])
pos = pos + 1
while b >= 128:
n = n | ((b - 128) << shift)
b = ord(wasm[pos:pos + 1])
pos = pos + 1
shift += 7
return n + (b << shift), pos
def strip_debug_sections(wasm):
logger.debug('Strip debug sections')
pos = 8
stripped = wasm[:pos]
while pos < len(wasm):
section_start = pos
section_id, pos_ = read_var_uint(wasm, pos)
section_size, section_body = read_var_uint(wasm, pos_)
pos = section_body + section_size
if section_id == 0:
name_len, name_pos = read_var_uint(wasm, section_body)
name_end = name_pos + name_len
name = wasm[name_pos:name_end]
if name == "linking" or name == "sourceMappingURL" or name.startswith("reloc..debug_") or name.startswith(".debug_"):
continue # skip debug related sections
stripped = stripped + wasm[section_start:pos]
return stripped
def encode_uint_var(n):
result = bytearray()
while n > 127:
result.append(128 | (n & 127))
n = n >> 7
result.append(n)
return bytes(result)
def append_source_mapping(wasm, url):
logger.debug('Append sourceMappingURL section')
section_name = "sourceMappingURL"
section_content = encode_uint_var(len(section_name)) + section_name + encode_uint_var(len(url)) + url
return wasm + encode_uint_var(0) + encode_uint_var(len(section_content)) + section_content
def get_code_section_offset(wasm):
logger.debug('Read sections index')
pos = 8
while pos < len(wasm):
section_id, pos_ = read_var_uint(wasm, pos)
section_size, pos = read_var_uint(wasm, pos_)
if section_id == 10:
return pos
pos = pos + section_size
def remove_dead_entries(entries):
# Remove entries for dead functions. It is a heuristics to ignore data if the
# function starting address near to 0 (is equal to its size field length).
block_start = 0
cur_entry = 0
while cur_entry < len(entries):
if not entries[cur_entry]['eos']:
cur_entry += 1
continue
fn_start = entries[block_start]['address']
# Calculate the LEB encoded function size (including size field)
fn_size_length = floor(log(entries[cur_entry]['address'] - fn_start + 1, 128)) + 1
min_live_offset = 1 + fn_size_length # 1 byte is for code section entries
if fn_start < min_live_offset:
# Remove dead code debug info block.
del entries[block_start:cur_entry + 1]
cur_entry = block_start
continue
cur_entry += 1
block_start = cur_entry
def read_dwarf_entries(wasm, options):
if options.dwarfdump_output:
output = open(options.dwarfdump_output, 'r').read()
elif options.dwarfdump:
logger.debug('Reading DWARF information from %s' % wasm)
if not os.path.exists(options.dwarfdump):
logger.error('llvm-dwarfdump not found: ' + options.dwarfdump)
sys.exit(1)
process = Popen([options.dwarfdump, "-debug-info", "-debug-line", wasm], stdout=PIPE)
output, err = process.communicate()
exit_code = process.wait()
if exit_code != 0:
logger.error('Error during llvm-dwarfdump execution (%s)' % exit_code)
sys.exit(1)
else:
logger.error('Please specify either --dwarfdump or --dwarfdump-output')
sys.exit(1)
entries = []
debug_line_chunks = re.split(r"debug_line\[(0x[0-9a-f]*)\]", asstr(output))
maybe_debug_info_content = debug_line_chunks[0]
for i in range(1, len(debug_line_chunks), 2):
stmt_list = debug_line_chunks[i]
comp_dir_match = re.search(r"DW_AT_stmt_list\s+\(" + stmt_list + r"\)\s+" +
r"DW_AT_comp_dir\s+\(\"([^\"]+)", maybe_debug_info_content)
comp_dir = comp_dir_match.group(1) if comp_dir_match is not None else ""
line_chunk = debug_line_chunks[i + 1]
# include_directories[ 1] = "/Users/yury/Work/junk/sqlite-playground/src"
# file_names[ 1]:
# name: "playground.c"
# dir_index: 1
# mod_time: 0x00000000
# length: 0x00000000
#
# Address Line Column File ISA Discriminator Flags
# ------------------ ------ ------ ------ --- ------------- -------------
# 0x0000000000000006 22 0 1 0 0 is_stmt
# 0x0000000000000007 23 10 1 0 0 is_stmt prologue_end
# 0x000000000000000f 23 3 1 0 0
# 0x0000000000000010 23 3 1 0 0 end_sequence
# 0x0000000000000011 28 0 1 0 0 is_stmt
include_directories = {'0': comp_dir}
for dir in re.finditer(r"include_directories\[\s*(\d+)\] = \"([^\"]*)", line_chunk):
include_directories[dir.group(1)] = dir.group(2)
files = {}
for file in re.finditer(r"file_names\[\s*(\d+)\]:\s+name: \"([^\"]*)\"\s+dir_index: (\d+)", line_chunk):
dir = include_directories[file.group(3)]
file_path = (dir + '/' if file.group(2)[0] != '/' else '') + file.group(2)
files[file.group(1)] = file_path
for line in re.finditer(r"\n0x([0-9a-f]+)\s+(\d+)\s+(\d+)\s+(\d+)(.*?end_sequence)?", line_chunk):
entry = {'address': int(line.group(1), 16), 'line': int(line.group(2)), 'column': int(line.group(3)), 'file': files[line.group(4)], 'eos': line.group(5) is not None}
if not entry['eos']:
entries.append(entry)
else:
# move end of function to the last END operator
entry['address'] -= 1
if entries[-1]['address'] == entry['address']:
# last entry has the same address, reusing
entries[-1]['eos'] = True
else:
entries.append(entry)
remove_dead_entries(entries)
# return entries sorted by the address field
return sorted(entries, key=lambda entry: entry['address'])
def build_sourcemap(entries, code_section_offset, prefixes, collect_sources):
sources = []
sources_content = [] if collect_sources else None
mappings = []
sources_map = {}
last_address = 0
last_source_id = 0
last_line = 1
last_column = 1
for entry in entries:
line = entry['line']
column = entry['column']
# ignore entries with line 0
if line == 0:
continue
# start at least at column 1
if column == 0:
column = 1
address = entry['address'] + code_section_offset
file_name = entry['file']
source_name = prefixes.sources.resolve(file_name)
if source_name not in sources_map:
source_id = len(sources)
sources_map[source_name] = source_id
sources.append(source_name)
if collect_sources:
load_name = prefixes.load.resolve(file_name)
try:
with open(load_name, 'r') as infile:
source_content = infile.read()
sources_content.append(source_content)
except:
print('Failed to read source: %s' % load_name)
sources_content.append(None)
else:
source_id = sources_map[source_name]
address_delta = address - last_address
source_id_delta = source_id - last_source_id
line_delta = line - last_line
column_delta = column - last_column
mappings.append(encode_vlq(address_delta) + encode_vlq(source_id_delta) + encode_vlq(line_delta) + encode_vlq(column_delta))
last_address = address
last_source_id = source_id
last_line = line
last_column = column
return OrderedDict([('version', 3),
('names', []),
('sources', sources),
('sourcesContent', sources_content),
('mappings', ','.join(mappings))])
def main():
options = parse_args()
wasm_input = options.wasm
with open(wasm_input, 'rb') as infile:
wasm = infile.read()
entries = read_dwarf_entries(wasm_input, options)
code_section_offset = get_code_section_offset(wasm)
prefixes = SourceMapPrefixes(sources=Prefixes(options.prefix), load=Prefixes(options.load_prefix))
logger.debug('Saving to %s' % options.output)
map = build_sourcemap(entries, code_section_offset, prefixes, options.sources)
with open(options.output, 'w') as outfile:
json.dump(map, outfile, separators=(',', ':'))
if options.strip:
wasm = strip_debug_sections(wasm)
if options.source_map_url:
wasm = append_source_mapping(wasm, options.source_map_url)
if options.w:
logger.debug('Saving wasm to %s' % options.w)
with open(options.w, 'wb') as outfile:
outfile.write(wasm)
logger.debug('Done')
return 0
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG if os.environ.get('EMCC_DEBUG') else logging.INFO)
sys.exit(main())
|
Amber-Creative/amber-frappe | refs/heads/master | frappe/utils/logger.py | 6 | from __future__ import unicode_literals
import frappe
import logging
default_log_level = logging.DEBUG
def get_logger(module, with_more_info=True):
if module in frappe.loggers:
return frappe.loggers[module]
formatter = logging.Formatter('%(asctime)s - %(pathname)s [%(levelname)s]:\n%(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
if with_more_info:
handler.addFilter(SiteContextFilter())
logger = logging.getLogger(module)
logger.setLevel(frappe.log_level or default_log_level)
logger.addHandler(handler)
logger.propagate = False
frappe.loggers[module] = logger
return logger
class SiteContextFilter(logging.Filter):
"""This is a filter which injects request information (if available) into the log."""
def filter(self, record):
record.msg = get_more_info_for_log() + record.msg
return True
def get_more_info_for_log():
'''Adds Site, Form Dict into log entry'''
more_info = []
site = getattr(frappe.local, 'site', None)
if site:
more_info.append('Site: {0}'.format(site))
form_dict = getattr(frappe.local, 'form_dict', None)
if form_dict:
more_info.append('Form Dict: {0}'.format(frappe.as_json(form_dict)))
if more_info:
# to append a \n
more_info = more_info + ['']
return '\n'.join(more_info)
def set_log_level(level):
'''Use this method to set log level to something other than the default DEBUG'''
frappe.log_level = getattr(logging, (level or '').upper(), None) or default_log_level
frappe.loggers = {}
|
pedrobaeza/odoo | refs/heads/master | addons/hr_timesheet/report/__init__.py | 395 | import hr_timesheet_report
|
mapbased/vitess | refs/heads/master | test/initial_sharding_bytes.py | 5 | #!/usr/bin/env python
#
# Copyright 2013, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""Re-runs initial_sharding.py with a varbinary keyspace_id."""
from vtdb import keyrange_constants
import base_sharding
import initial_sharding
import utils
# this test is just re-running an entire initial_sharding.py with a
# varbinary keyspace_id
if __name__ == '__main__':
base_sharding.keyspace_id_type = keyrange_constants.KIT_BYTES
utils.main(initial_sharding)
|
drewokane/xray | refs/heads/master | xarray/test/test_combine.py | 1 | from copy import deepcopy
import numpy as np
import pandas as pd
from xarray import Dataset, DataArray, auto_combine, concat, Variable
from xarray.core.pycompat import iteritems, OrderedDict
from . import TestCase, InaccessibleArray, requires_dask
from .test_dataset import create_test_data
class TestConcatDataset(TestCase):
def test_concat(self):
# TODO: simplify and split this test case
# drop the third dimension to keep things relatively understandable
data = create_test_data().drop('dim3')
split_data = [data.isel(dim1=slice(3)),
data.isel(dim1=slice(3, None))]
self.assertDatasetIdentical(data, concat(split_data, 'dim1'))
def rectify_dim_order(dataset):
# return a new dataset with all variable dimensions tranposed into
# the order in which they are found in `data`
return Dataset(dict((k, v.transpose(*data[k].dims))
for k, v in iteritems(dataset.data_vars)),
dataset.coords, attrs=dataset.attrs)
for dim in ['dim1', 'dim2']:
datasets = [g for _, g in data.groupby(dim, squeeze=False)]
self.assertDatasetIdentical(data, concat(datasets, dim))
self.assertDatasetIdentical(
data, concat(datasets, data[dim]))
self.assertDatasetIdentical(
data, concat(datasets, data[dim], coords='minimal'))
datasets = [g for _, g in data.groupby(dim, squeeze=True)]
concat_over = [k for k, v in iteritems(data.coords)
if dim in v.dims and k != dim]
actual = concat(datasets, data[dim], coords=concat_over)
self.assertDatasetIdentical(data, rectify_dim_order(actual))
actual = concat(datasets, data[dim], coords='different')
self.assertDatasetIdentical(data, rectify_dim_order(actual))
# make sure the coords argument behaves as expected
data.coords['extra'] = ('dim4', np.arange(3))
for dim in ['dim1', 'dim2']:
datasets = [g for _, g in data.groupby(dim, squeeze=True)]
actual = concat(datasets, data[dim], coords='all')
expected = np.array([data['extra'].values
for _ in range(data.dims[dim])])
self.assertArrayEqual(actual['extra'].values, expected)
actual = concat(datasets, data[dim], coords='different')
self.assertDataArrayEqual(data['extra'], actual['extra'])
actual = concat(datasets, data[dim], coords='minimal')
self.assertDataArrayEqual(data['extra'], actual['extra'])
# verify that the dim argument takes precedence over
# concatenating dataset variables of the same name
dim = (2 * data['dim1']).rename('dim1')
datasets = [g for _, g in data.groupby('dim1', squeeze=False)]
expected = data.copy()
expected['dim1'] = dim
self.assertDatasetIdentical(expected, concat(datasets, dim))
def test_concat_data_vars(self):
data = Dataset({'foo': ('x', np.random.randn(10))})
objs = [data.isel(x=slice(5)), data.isel(x=slice(5, None))]
for data_vars in ['minimal', 'different', 'all', [], ['foo']]:
actual = concat(objs, dim='x', data_vars=data_vars)
self.assertDatasetIdentical(data, actual)
def test_concat_coords(self):
data = Dataset({'foo': ('x', np.random.randn(10))})
expected = data.assign_coords(c=('x', [0] * 5 + [1] * 5))
objs = [data.isel(x=slice(5)).assign_coords(c=0),
data.isel(x=slice(5, None)).assign_coords(c=1)]
for coords in ['different', 'all', ['c']]:
actual = concat(objs, dim='x', coords=coords)
self.assertDatasetIdentical(expected, actual)
for coords in ['minimal', []]:
with self.assertRaisesRegexp(ValueError, 'not equal across'):
concat(objs, dim='x', coords=coords)
def test_concat_constant_index(self):
# GH425
ds1 = Dataset({'foo': 1.5}, {'y': 1})
ds2 = Dataset({'foo': 2.5}, {'y': 1})
expected = Dataset({'foo': ('y', [1.5, 2.5]), 'y': [1, 1]})
for mode in ['different', 'all', ['foo']]:
actual = concat([ds1, ds2], 'y', data_vars=mode)
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'not equal across datasets'):
concat([ds1, ds2], 'y', data_vars='minimal')
def test_concat_size0(self):
data = create_test_data()
split_data = [data.isel(dim1=slice(0, 0)), data]
actual = concat(split_data, 'dim1')
self.assertDatasetIdentical(data, actual)
actual = concat(split_data[::-1], 'dim1')
self.assertDatasetIdentical(data, actual)
def test_concat_errors(self):
data = create_test_data()
split_data = [data.isel(dim1=slice(3)),
data.isel(dim1=slice(3, None))]
with self.assertRaisesRegexp(ValueError, 'must supply at least one'):
concat([], 'dim1')
with self.assertRaisesRegexp(ValueError, 'are not coordinates'):
concat([data, data], 'new_dim', coords=['not_found'])
with self.assertRaisesRegexp(ValueError, 'global attributes not'):
data0, data1 = deepcopy(split_data)
data1.attrs['foo'] = 'bar'
concat([data0, data1], 'dim1', compat='identical')
self.assertDatasetIdentical(
data, concat([data0, data1], 'dim1', compat='equals'))
with self.assertRaisesRegexp(ValueError, 'encountered unexpected'):
data0, data1 = deepcopy(split_data)
data1['foo'] = ('bar', np.random.randn(10))
concat([data0, data1], 'dim1')
with self.assertRaisesRegexp(ValueError, 'not equal across datasets'):
data0, data1 = deepcopy(split_data)
data1['dim2'] = 2 * data1['dim2']
concat([data0, data1], 'dim1', coords='minimal')
with self.assertRaisesRegexp(ValueError, 'it is not 1-dimensional'):
concat([data0, data1], 'dim1')
with self.assertRaisesRegexp(ValueError, 'compat.* invalid'):
concat(split_data, 'dim1', compat='foobar')
with self.assertRaisesRegexp(ValueError, 'unexpected value for'):
concat([data, data], 'new_dim', coords='foobar')
with self.assertRaisesRegexp(
ValueError, 'coordinate in some datasets but not others'):
concat([Dataset({'x': 0}), Dataset({'x': [1]})], dim='z')
with self.assertRaisesRegexp(
ValueError, 'coordinate in some datasets but not others'):
concat([Dataset({'x': 0}), Dataset({}, {'x': 1})], dim='z')
with self.assertRaisesRegexp(ValueError, 'no longer a valid'):
concat([data, data], 'new_dim', mode='different')
with self.assertRaisesRegexp(ValueError, 'no longer a valid'):
concat([data, data], 'new_dim', concat_over='different')
def test_concat_promote_shape(self):
# mixed dims within variables
objs = [Dataset({}, {'x': 0}), Dataset({'x': [1]})]
actual = concat(objs, 'x')
expected = Dataset({'x': [0, 1]})
self.assertDatasetIdentical(actual, expected)
objs = [Dataset({'x': [0]}), Dataset({}, {'x': 1})]
actual = concat(objs, 'x')
self.assertDatasetIdentical(actual, expected)
# mixed dims between variables
objs = [Dataset({'x': [2], 'y': 3}), Dataset({'x': [4], 'y': 5})]
actual = concat(objs, 'x')
expected = Dataset({'x': [2, 4], 'y': ('x', [3, 5])})
self.assertDatasetIdentical(actual, expected)
# mixed dims in coord variable
objs = [Dataset({'x': [0]}, {'y': -1}),
Dataset({'x': [1]}, {'y': ('x', [-2])})]
actual = concat(objs, 'x')
expected = Dataset({'x': [0, 1]}, {'y': ('x', [-1, -2])})
self.assertDatasetIdentical(actual, expected)
# scalars with mixed lengths along concat dim -- values should repeat
objs = [Dataset({'x': [0]}, {'y': -1}),
Dataset({'x': [1, 2]}, {'y': -2})]
actual = concat(objs, 'x')
expected = Dataset({}, {'y': ('x', [-1, -2, -2])})
self.assertDatasetIdentical(actual, expected)
# broadcast 1d x 1d -> 2d
objs = [Dataset({'z': ('x', [-1])}, {'x': [0], 'y': [0]}),
Dataset({'z': ('y', [1])}, {'x': [1], 'y': [0]})]
actual = concat(objs, 'x')
expected = Dataset({'z': (('x', 'y'), [[-1], [1]])})
self.assertDatasetIdentical(actual, expected)
def test_concat_do_not_promote(self):
# GH438
objs = [Dataset({'y': ('t', [1])}, {'x': 1}),
Dataset({'y': ('t', [2])}, {'x': 1})]
expected = Dataset({'y': ('t', [1, 2])}, {'x': 1, 't': [0, 0]})
actual = concat(objs, 't')
self.assertDatasetIdentical(expected, actual)
objs = [Dataset({'y': ('t', [1])}, {'x': 1}),
Dataset({'y': ('t', [2])}, {'x': 2})]
with self.assertRaises(ValueError):
concat(objs, 't', coords='minimal')
def test_concat_dim_is_variable(self):
objs = [Dataset({'x': 0}), Dataset({'x': 1})]
coord = Variable('y', [3, 4])
expected = Dataset({'x': ('y', [0, 1]), 'y': [3, 4]})
actual = concat(objs, coord)
self.assertDatasetIdentical(actual, expected)
@requires_dask # only for toolz
def test_auto_combine(self):
objs = [Dataset({'x': [0]}), Dataset({'x': [1]})]
actual = auto_combine(objs)
expected = Dataset({'x': [0, 1]})
self.assertDatasetIdentical(expected, actual)
actual = auto_combine([actual])
self.assertDatasetIdentical(expected, actual)
objs = [Dataset({'x': [0, 1]}), Dataset({'x': [2]})]
actual = auto_combine(objs)
expected = Dataset({'x': [0, 1, 2]})
self.assertDatasetIdentical(expected, actual)
# ensure auto_combine handles non-sorted dimensions
objs = [Dataset(OrderedDict([('x', ('a', [0])), ('y', ('a', [0]))])),
Dataset(OrderedDict([('y', ('a', [1])), ('x', ('a', [1]))]))]
actual = auto_combine(objs)
expected = Dataset({'x': ('a', [0, 1]), 'y': ('a', [0, 1]), 'a': [0, 0]})
self.assertDatasetIdentical(expected, actual)
objs = [Dataset({'x': [0], 'y': [0]}), Dataset({'y': [1], 'x': [1]})]
with self.assertRaisesRegexp(ValueError, 'too many .* dimensions'):
auto_combine(objs)
objs = [Dataset({'x': 0}), Dataset({'x': 1})]
with self.assertRaisesRegexp(ValueError, 'cannot infer dimension'):
auto_combine(objs)
objs = [Dataset({'x': [0], 'y': [0]}), Dataset({'x': [0]})]
with self.assertRaises(KeyError):
auto_combine(objs)
class TestConcatDataArray(TestCase):
def test_concat(self):
ds = Dataset({'foo': (['x', 'y'], np.random.random((10, 20))),
'bar': (['x', 'y'], np.random.random((10, 20)))})
foo = ds['foo']
bar = ds['bar']
# from dataset array:
expected = DataArray(np.array([foo.values, bar.values]),
dims=['w', 'x', 'y'])
actual = concat([foo, bar], 'w')
self.assertDataArrayEqual(expected, actual)
# from iteration:
grouped = [g for _, g in foo.groupby('x')]
stacked = concat(grouped, ds['x'])
self.assertDataArrayIdentical(foo, stacked)
# with an index as the 'dim' argument
stacked = concat(grouped, ds.indexes['x'])
self.assertDataArrayIdentical(foo, stacked)
actual = concat([foo[0], foo[1]], pd.Index([0, 1])).reset_coords(drop=True)
expected = foo[:2].rename({'x': 'concat_dim'})
self.assertDataArrayIdentical(expected, actual)
actual = concat([foo[0], foo[1]], [0, 1]).reset_coords(drop=True)
expected = foo[:2].rename({'x': 'concat_dim'})
self.assertDataArrayIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'not identical'):
concat([foo, bar], dim='w', compat='identical')
with self.assertRaisesRegexp(ValueError, 'not a valid argument'):
concat([foo, bar], dim='w', data_vars='minimal')
@requires_dask
def test_concat_lazy(self):
import dask.array as da
arrays = [DataArray(
da.from_array(InaccessibleArray(np.zeros((3, 3))), 3),
dims=['x', 'y']) for _ in range(2)]
# should not raise
combined = concat(arrays, dim='z')
self.assertEqual(combined.shape, (2, 3, 3))
self.assertEqual(combined.dims, ('z', 'x', 'y'))
|
Reddcraft/reddcraftd | refs/heads/master | lib/dividend.py | 1 | #! /usr/bin/python3
"""Pay out dividends."""
import struct
import decimal
D = decimal.Decimal
from . import (util, config, exceptions, reddcoin, util)
FORMAT_1 = '>QQ'
LENGTH_1 = 8 + 8
FORMAT_2 = '>QQQ'
LENGTH_2 = 8 + 8 + 8
ID = 50
def validate (db, source, quantity_per_unit, asset, dividend_asset, block_index):
cursor = db.cursor()
problems = []
if asset == config.RDD:
problems.append('cannot pay dividends to holders of {}'.format(config.RDD))
if asset == config.XRT:
if (not block_index >= 317500) or block_index >= 320000 or config.TESTNET: # Protocol change.
problems.append('cannot pay dividends to holders of {}'.format(config.XRT))
if quantity_per_unit <= 0: problems.append('non‐positive quantity per unit')
# Examine asset.
issuances = list(cursor.execute('''SELECT * FROM issuances WHERE (status = ? AND asset = ?) ORDER BY tx_index ASC''', ('valid', asset)))
if not issuances:
problems.append('no such asset, {}.'.format(asset))
return None, None, problems, 0
divisible = issuances[0]['divisible']
# Only issuer can pay dividends.
if block_index >= 320000 or config.TESTNET: # Protocol change.
if issuances[-1]['issuer'] != source:
problems.append('only issuer can pay dividends')
# Examine dividend asset.
if dividend_asset in (config.RDD, config.XRT):
dividend_divisible = True
else:
issuances = list(cursor.execute('''SELECT * FROM issuances WHERE (status = ? AND asset = ?)''', ('valid', dividend_asset)))
if not issuances:
problems.append('no such dividend asset, {}.'.format(dividend_asset))
return None, None, problems, 0
dividend_divisible = issuances[0]['divisible']
# Calculate dividend quantities.
holders = util.holders(db, asset)
outputs = []
addresses = []
dividend_total = 0
for holder in holders:
if block_index < 294500 and not config.TESTNET: # Protocol change.
if holder['escrow']: continue
address = holder['address']
address_quantity = holder['address_quantity']
if block_index >= 296000 or config.TESTNET: # Protocol change.
if address == source: continue
dividend_quantity = address_quantity * quantity_per_unit
if divisible: dividend_quantity /= config.UNIT
if not dividend_divisible: dividend_quantity /= config.UNIT
if dividend_asset == config.RDD and dividend_quantity < config.DEFAULT_MULTISIG_DUST_SIZE: continue # A bit hackish.
dividend_quantity = int(dividend_quantity)
outputs.append({'address': address, 'address_quantity': address_quantity, 'dividend_quantity': dividend_quantity})
addresses.append(address)
dividend_total += dividend_quantity
if not dividend_total: problems.append('zero dividend')
if dividend_asset != config.RDD:
balances = list(cursor.execute('''SELECT * FROM balances WHERE (address = ? AND asset = ?)''', (source, dividend_asset)))
if not balances or balances[0]['quantity'] < dividend_total:
problems.append('insufficient funds ({})'.format(dividend_asset))
fee = 0
if not problems and dividend_asset != config.RDD:
holder_count = len(set(addresses))
if block_index >= 330000 or config.TESTNET: # Protocol change.
fee = int(0.0002 * config.UNIT * holder_count)
if fee:
balances = list(cursor.execute('''SELECT * FROM balances WHERE (address = ? AND asset = ?)''', (source, config.XRT)))
if not balances or balances[0]['quantity'] < fee:
problems.append('insufficient funds ({})'.format(config.XRT))
cursor.close()
return dividend_total, outputs, problems, fee
def compose (db, source, quantity_per_unit, asset, dividend_asset):
dividend_total, outputs, problems, fee = validate(db, source, quantity_per_unit, asset, dividend_asset, util.last_block(db)['block_index'])
if problems: raise exceptions.DividendError(problems)
print('Total quantity to be distributed in dividends:', util.devise(db, dividend_total, dividend_asset, 'output'), dividend_asset)
if dividend_asset == config.RDD:
return (source, [(output['address'], output['dividend_quantity']) for output in outputs], None)
asset_id = util.asset_id(asset)
dividend_asset_id = util.asset_id(dividend_asset)
data = struct.pack(config.TXTYPE_FORMAT, ID)
data += struct.pack(FORMAT_2, quantity_per_unit, asset_id, dividend_asset_id)
return (source, [], data)
def parse (db, tx, message):
dividend_parse_cursor = db.cursor()
# Unpack message.
try:
if (tx['block_index'] > 288150 or config.TESTNET) and len(message) == LENGTH_2:
quantity_per_unit, asset_id, dividend_asset_id = struct.unpack(FORMAT_2, message)
asset = util.asset_name(asset_id)
dividend_asset = util.asset_name(dividend_asset_id)
status = 'valid'
elif len(message) == LENGTH_1:
quantity_per_unit, asset_id = struct.unpack(FORMAT_1, message)
asset = util.asset_name(asset_id)
dividend_asset = config.XRT
status = 'valid'
else:
raise exceptions.UnpackError
except (exceptions.UnpackError, exceptions.AssetNameError, struct.error) as e:
dividend_asset, quantity_per_unit, asset = None, None, None
status = 'invalid: could not unpack'
if dividend_asset == config.RDD:
status = 'invalid: cannot pay {} dividends within protocol'.format(config.RDD)
if status == 'valid':
# For SQLite3
quantity_per_unit = min(quantity_per_unit, config.MAX_INT)
dividend_total, outputs, problems, fee = validate(db, tx['source'], quantity_per_unit, asset, dividend_asset, block_index=tx['block_index'])
if problems: status = 'invalid: ' + '; '.join(problems)
if status == 'valid':
# Debit.
util.debit(db, tx['block_index'], tx['source'], dividend_asset, dividend_total, action='dividend', event=tx['tx_hash'])
if tx['block_index'] >= 330000 or config.TESTNET: # Protocol change.
util.debit(db, tx['block_index'], tx['source'], config.XRT, fee, action='dividend fee', event=tx['tx_hash'])
# Credit.
for output in outputs:
util.credit(db, tx['block_index'], output['address'], dividend_asset, output['dividend_quantity'], action='dividend', event=tx['tx_hash'])
# Add parsed transaction to message-type–specific table.
bindings = {
'tx_index': tx['tx_index'],
'tx_hash': tx['tx_hash'],
'block_index': tx['block_index'],
'source': tx['source'],
'asset': asset,
'dividend_asset': dividend_asset,
'quantity_per_unit': quantity_per_unit,
'fee_paid': fee,
'status': status,
}
sql='insert into dividends values(:tx_index, :tx_hash, :block_index, :source, :asset, :dividend_asset, :quantity_per_unit, :fee_paid, :status)'
dividend_parse_cursor.execute(sql, bindings)
dividend_parse_cursor.close()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
ncbray/pystream | refs/heads/master | bin/analysis/ipa/memory/__init__.py | 39 | # Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
SoftwareMaven/django | refs/heads/master | tests/migrations/test_migrations_squashed_erroneous/2_auto.py | 770 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("migrations", "1_auto")]
operations = [
migrations.RunPython(migrations.RunPython.noop)
]
|
Samael500/helena | refs/heads/master | helena/content/models.py | 1 | from django.db import models
from helpers.service import image_path
class ImgWithDescr(models.Model):
""" class with genres model """
directory = None
def get_image_path(instace, filename):
return image_path(instace, filename, directory=instace.directory)
title = models.CharField(verbose_name='Заголовок', max_length=200)
description = models.TextField(verbose_name='Описание', blank=True, null=True)
image = models.ImageField(verbose_name='Изображение', upload_to=get_image_path, blank=True, null=True)
def __str__(self):
return self.title
class Meta:
abstract = True
class Genres(ImgWithDescr):
""" class with genres model """
directory = 'genres'
class Meta:
verbose_name = 'жанр'
verbose_name_plural = 'жанры'
class Galleries(ImgWithDescr):
""" class with gallery model """
directory = 'gallery'
external_img = models.URLField(verbose_name='Изображение во внешнем источнике', blank=True, null=True)
@property
def img_src(self):
""" return external img url or self file img """
return self.external_img or self.image
@property
def img_url(self):
""" return external img url or self file img """
return self.external_img or self.image.url
class Meta:
verbose_name = 'изображение в галлерее'
verbose_name_plural = 'изображения в галлерее'
|
GunoH/intellij-community | refs/heads/master | python/testData/resolve/TypingListInheritor.py | 20 | from typing import List
class A(List[int]):
pass
A().append()
# <ref> |
tao12345666333/app-turbo | refs/heads/master | turbo/fake/project_template/models/settings.py | 1 | # -*- coding:utf-8 -*-
from db.conn import (
test as _test,
user as _user,
test_files as _test_files,
user_files as _user_files,
)
MONGO_DB_MAPPING = {
'db': {
'test': _test,
'user': _user,
},
'db_file': {
'test': _test_files,
'user': _user_files,
}
}
|
puzan/ansible | refs/heads/devel | lib/ansible/modules/packaging/language/bundler.py | 25 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Tim Hoiberg <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION='''
---
module: bundler
short_description: Manage Ruby Gem dependencies with Bundler
description:
- Manage installation and Gem version dependencies for Ruby using the Bundler gem
version_added: "2.0.0"
options:
executable:
description:
- The path to the bundler executable
required: false
default: null
state:
description:
- The desired state of the Gem bundle. C(latest) updates gems to the most recent, acceptable version
required: false
choices: [present, latest]
default: present
chdir:
description:
- The directory to execute the bundler commands from. This directoy
needs to contain a valid Gemfile or .bundle/ directory
required: false
default: temporary working directory
exclude_groups:
description:
- A list of Gemfile groups to exclude during operations. This only
applies when state is C(present). Bundler considers this
a 'remembered' property for the Gemfile and will automatically exclude
groups in future operations even if C(exclude_groups) is not set
required: false
default: null
clean:
description:
- Only applies if state is C(present). If set removes any gems on the
target host that are not in the gemfile
required: false
choices: [yes, no]
default: "no"
gemfile:
description:
- Only applies if state is C(present). The path to the gemfile to use to install gems.
required: false
default: Gemfile in current directory
local:
description:
- If set only installs gems from the cache on the target host
required: false
choices: [yes, no]
default: "no"
deployment_mode:
description:
- Only applies if state is C(present). If set it will only install gems
that are in the default or production groups. Requires a Gemfile.lock
file to have been created prior
required: false
choices: [yes, no]
default: "no"
user_install:
description:
- Only applies if state is C(present). Installs gems in the local user's cache or for all users
required: false
choices: [yes, no]
default: "yes"
gem_path:
description:
- Only applies if state is C(present). Specifies the directory to
install the gems into. If C(chdir) is set then this path is relative to
C(chdir)
required: false
default: RubyGems gem paths
binstub_directory:
description:
- Only applies if state is C(present). Specifies the directory to
install any gem bins files to. When executed the bin files will run
within the context of the Gemfile and fail if any required gem
dependencies are not installed. If C(chdir) is set then this path is
relative to C(chdir)
required: false
default: null
extra_args:
description:
- A space separated string of additional commands that can be applied to
the Bundler command. Refer to the Bundler documentation for more
information
required: false
default: null
author: "Tim Hoiberg (@thoiberg)"
'''
EXAMPLES='''
# Installs gems from a Gemfile in the current directory
- bundler:
state: present
executable: ~/.rvm/gems/2.1.5/bin/bundle
# Excludes the production group from installing
- bundler:
state: present
exclude_groups: production
# Only install gems from the default and production groups
- bundler:
state: present
deployment_mode: yes
# Installs gems using a Gemfile in another directory
- bundler:
state: present
gemfile: ../rails_project/Gemfile
# Updates Gemfile in another directory
- bundler:
state: latest
chdir: ~/rails_project
'''
def get_bundler_executable(module):
if module.params.get('executable'):
return module.params.get('executable').split(' ')
else:
return [ module.get_bin_path('bundle', True) ]
def main():
module = AnsibleModule(
argument_spec=dict(
executable=dict(default=None, required=False),
state=dict(default='present', required=False, choices=['present', 'latest']),
chdir=dict(default=None, required=False, type='path'),
exclude_groups=dict(default=None, required=False, type='list'),
clean=dict(default=False, required=False, type='bool'),
gemfile=dict(default=None, required=False, type='path'),
local=dict(default=False, required=False, type='bool'),
deployment_mode=dict(default=False, required=False, type='bool'),
user_install=dict(default=True, required=False, type='bool'),
gem_path=dict(default=None, required=False, type='path'),
binstub_directory=dict(default=None, required=False, type='path'),
extra_args=dict(default=None, required=False),
),
supports_check_mode=True
)
executable = module.params.get('executable')
state = module.params.get('state')
chdir = module.params.get('chdir')
exclude_groups = module.params.get('exclude_groups')
clean = module.params.get('clean')
gemfile = module.params.get('gemfile')
local = module.params.get('local')
deployment_mode = module.params.get('deployment_mode')
user_install = module.params.get('user_install')
gem_path = module.params.get('gem_path')
binstub_directory = module.params.get('binstub_directory')
extra_args = module.params.get('extra_args')
cmd = get_bundler_executable(module)
if module.check_mode:
cmd.append('check')
rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=False)
module.exit_json(changed=rc != 0, state=state, stdout=out, stderr=err)
if state == 'present':
cmd.append('install')
if exclude_groups:
cmd.extend(['--without', ':'.join(exclude_groups)])
if clean:
cmd.append('--clean')
if gemfile:
cmd.extend(['--gemfile', gemfile])
if local:
cmd.append('--local')
if deployment_mode:
cmd.append('--deployment')
if not user_install:
cmd.append('--system')
if gem_path:
cmd.extend(['--path', gem_path])
if binstub_directory:
cmd.extend(['--binstubs', binstub_directory])
else:
cmd.append('update')
if local:
cmd.append('--local')
if extra_args:
cmd.extend(extra_args.split(' '))
rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=True)
module.exit_json(changed='Installing' in out, state=state, stdout=out, stderr=err)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
prudnikov/python-oauth2 | refs/heads/master | oauth2/_version.py | 2 | # This is the version of this source code.
manual_verstr = "1.5"
auto_build_num = "212"
verstr = manual_verstr + "." + auto_build_num
try:
from pyutil.version_class import Version as pyutil_Version
__version__ = pyutil_Version(verstr)
except (ImportError, ValueError):
# Maybe there is no pyutil installed.
from distutils.version import LooseVersion as distutils_Version
__version__ = distutils_Version(verstr)
|
AidanGG/tensorflow_tmva | refs/heads/master | tensorflow_tmva/classification/__init__.py | 12133432 | |
puzan/ansible | refs/heads/devel | lib/ansible/modules/utilities/__init__.py | 12133432 | |
indictranstech/ebuy-now-frappe | refs/heads/develop | frappe/integrations/doctype/__init__.py | 12133432 | |
infoclock/OlympicTracker | refs/heads/master | allauthdemo/demo/migrations/__init__.py | 12133432 | |
sanghinitin/golismero | refs/heads/master | thirdparty_libs/django/conf/locale/es_MX/__init__.py | 12133432 | |
erikr/django | refs/heads/master | tests/migrations/__init__.py | 12133432 | |
sublime1809/django | refs/heads/master | django/contrib/gis/tests/layermap/__init__.py | 12133432 | |
henry-ajere/rad2py | refs/heads/master | assignments/program6A.py | 16 | #!/usr/bin/env python
# coding:utf-8
"PSP Program 6A - Linear Regression Prediction Interval"
__author__ = "Mariano Reingart ([email protected])"
__copyright__ = "Copyright (C) 2011 Mariano Reingart"
__license__ = "GPL 3.0"
from math import sqrt, pi
# reuse previous programs
from program1A import mean
from program5A import simpson_rule_integrate, gamma
def double_sided_student_t_probability(t, n):
"Calculate the p-value using a double sided student t distribution"
# create the function for n degrees of freedom:
k = gamma(n + 1, 2) / (sqrt(n * pi) * gamma(n, 2))
f_t_dist = lambda u: k * (1 + (u ** 2) / float(n)) ** (- (n + 1) / 2.0)
# integrate a finite area from the origin to t
p_aux = simpson_rule_integrate(f_t_dist, 0, t)
# return the area of the two tails of the distribution (symmetrical)
return (0.5 - p_aux) * 2
def double_sided_student_t_value(p, n):
"Calculate the t-value using a double sided student t distribution"
# replaces table lookup, thanks to http://statpages.org/pdfs.html
v = dv = 0.5
t = 0
while dv > 0.000001:
t = 1 / v - 1
dv = dv / 2
if double_sided_student_t_probability(t, n) > p:
v = v - dv
else:
v = v + dv
return t
def variance(x_values, y_values, b0, b1):
"Calculate the mean square deviation of the linear regeression line"
# take the variance from the regression line instead of the data average
sum_aux = sum([(y - b0 - b1 * x) ** 2 for x, y in zip(x_values, y_values)])
n = float(len(x_values))
return (1 / (n - 2.0)) * sum_aux
def prediction_interval(x_values, y_values, x_k, alpha):
"""Calculate the linear regression parameters for a set of n values
then calculate the upper and lower prediction interval
"""
# calculate aux variables
x_avg = mean(x_values)
y_avg = mean(y_values)
n = len(x_values)
sum_xy = sum([(x_values[i] * y_values[i]) for i in range(n)])
sum_x2 = sum([(x_values[i] ** 2) for i in range(n)])
# calculate regression coefficients
b1 = (sum_xy - (n * x_avg * y_avg)) / (sum_x2 - n * (x_avg ** 2))
b0 = y_avg - b1 * x_avg
# calculate the t-value for the given alpha p-value
t = double_sided_student_t_value(1 - alpha, n - 2)
# calculate the standard deviation
sigma = sqrt(variance(x_values, y_values, b0, b1))
# calculate the range
sum_xi_xavg = sum([(x - x_avg) ** 2 for x in x_values], 0.0)
aux = 1 + (1 / float(n)) + ((x_k - x_avg) ** 2) / sum_xi_xavg
p_range = t * sigma * sqrt(aux)
# combine the range with the x_k projection:
return b0, b1, p_range, x_k + p_range, x_k - p_range, t
def test_student_t_integration():
# test student t values
assert round(double_sided_student_t_probability(t=1.8595, n=8), 4) == 0.1
assert round(double_sided_student_t_value(p=0.1, n=8), 4) == 1.8595
if __name__ == "__main__":
test_student_t_integration()
# Table D8 "Size Estimating regression data"
est_loc = [130, 650, 99, 150, 128, 302, 95, 945, 368, 961]
act_new_chg_loc = [186, 699, 132, 272, 291, 331, 199, 1890, 788, 1601]
projection = 644.429
# 70 percent
b0, b1, p_range, upi, lpi, t = prediction_interval(
est_loc, act_new_chg_loc, projection, alpha=0.7)
print "70% Prediction interval: ", b0, b1, p_range, upi, lpi, t
assert round(t, 3) == 1.108
assert round(b0, 2) == -22.55
assert round(b1, 4) == 1.7279
assert round(p_range, 3) == 236.563
assert round(upi, 2) == 880.99
assert round(lpi, 2) == 407.87
# 90 percent
b0, b1, p_range, upi, lpi, t = prediction_interval(
est_loc, act_new_chg_loc, projection, alpha=0.9)
print "90% Prediction interval: ", b0, b1, p_range, upi, lpi, t
assert round(t, 2) == 1.86
assert round(p_range, 2) == 396.97
assert round(upi, 2) == 1041.4
assert round(lpi, 2) == 247.46
|
ergodicbreak/evennia | refs/heads/master | evennia/objects/objects.py | 1 | """
This module defines the basic `DefaultObject` and its children
`DefaultCharacter`, `DefaultPlayer`, `DefaultRoom` and `DefaultExit`.
These are the (default) starting points for all in-game visible
entities.
"""
import time
from builtins import object
from future.utils import listvalues, with_metaclass
from django.conf import settings
from evennia.typeclasses.models import TypeclassBase
from evennia.typeclasses.attributes import NickHandler
from evennia.objects.manager import ObjectManager
from evennia.objects.models import ObjectDB
from evennia.scripts.scripthandler import ScriptHandler
from evennia.commands import cmdset, command
from evennia.commands.cmdsethandler import CmdSetHandler
from evennia.commands import cmdhandler
from evennia.utils import logger
from evennia.utils.utils import (variable_from_module, lazy_property,
make_iter, to_unicode)
_MULTISESSION_MODE = settings.MULTISESSION_MODE
_ScriptDB = None
_SESSIONS = None
_AT_SEARCH_RESULT = variable_from_module(*settings.SEARCH_AT_RESULT.rsplit('.', 1))
# the sessid_max is based on the length of the db_sessid csv field (excluding commas)
_SESSID_MAX = 16 if _MULTISESSION_MODE in (1, 3) else 1
from django.utils.translation import ugettext as _
class ObjectSessionHandler(object):
"""
Handles the get/setting of the sessid
comma-separated integer field
"""
def __init__(self, obj):
"""
Initializes the handler.
Args:
obj (Object): The object on which the handler is defined.
"""
self.obj = obj
self._sessid_cache = set()
self._recache()
def _recache(self):
self._sessid_cache = list(set(int(val) for val in (self.obj.db_sessid or "").split(",") if val))
def get(self, sessid=None):
"""
Get the sessions linked to this Object.
Args:
sessid (int, optional): A specific session id.
Returns:
sessions (list): The sessions connected to this object. If `sessid` is given,
this is a list of one (or zero) elements.
Notes:
Aliased to `self.all()`.
"""
global _SESSIONS
if not _SESSIONS:
from evennia.server.sessionhandler import SESSIONS as _SESSIONS
if sessid:
return [_SESSIONS[sessid]] if sessid in self._sessid_cache and sessid in _SESSIONS else []
else:
return [_SESSIONS[sessid] for sessid in self._sessid_cache if sessid in _SESSIONS]
def all(self):
"""
Alias to get(), returning all sessions.
Returns:
sessions (list): All sessions.
"""
return self.get()
def add(self, session):
"""
Add session to handler.
Args:
session (Session or int): Session or session id to add.
Notes:
We will only add a session/sessid if this actually also exists
in the the core sessionhandler.
"""
global _SESSIONS
if not _SESSIONS:
from evennia.server.sessionhandler import SESSIONS as _SESSIONS
try:
sessid = session.sessid
except AttributeError:
sessid = session
sessid_cache = self._sessid_cache
if sessid in _SESSIONS and sessid not in sessid_cache:
if len(sessid_cache) >= _SESSID_MAX:
return
sessid_cache.append(sessid)
self.obj.db_sessid = ",".join(str(val) for val in sessid_cache)
self.obj.save(update_fields=["db_sessid"])
def remove(self, session):
"""
Remove session from handler.
Args:
sessid (Session or int): Session or session id to remove.
"""
try:
sessid = session.sessid
except AttributeError:
sessid = session
sessid_cache = self._sessid_cache
if sessid in sessid_cache:
sessid_cache.remove(sessid)
self.obj.db_sessid = ",".join(str(val) for val in sessid_cache)
self.obj.save(update_fields=["db_sessid"])
def clear(self):
"""
Clear all handled sessids.
"""
self._sessid_cache = []
self.obj.db_sessid = None
self.obj.save(update_fields=["db_sessid"])
def count(self):
"""
Get amount of sessions connected.
Returns:
sesslen (int): Number of sessions handled.
"""
return len(self._sessid_cache)
#
# Base class to inherit from.
#
class DefaultObject(with_metaclass(TypeclassBase, ObjectDB)):
"""
This is the root typeclass object, representing all entities that
have an actual presence in-game. DefaultObjects generally have a
location. They can also be manipulated and looked at. Game
entities you define should inherit from DefaultObject at some distance.
It is recommended to create children of this class using the
`evennia.create_object()` function rather than to initialize the class
directly - this will both set things up and efficiently save the object
without `obj.save()` having to be called explicitly.
"""
objects = ObjectManager()
# on-object properties
@lazy_property
def cmdset(self):
return CmdSetHandler(self, True)
@lazy_property
def scripts(self):
return ScriptHandler(self)
@lazy_property
def nicks(self):
return NickHandler(self)
@lazy_property
def sessions(self):
return ObjectSessionHandler(self)
@property
def has_player(self):
"""
Convenience property for checking if an active player is
currently connected to this object.
"""
return self.sessions.count()
@property
def is_superuser(self):
"""
Check if user has a player, and if so, if it is a superuser.
"""
return self.db_player and self.db_player.is_superuser \
and not self.db_player.attributes.get("_quell")
def contents_get(self, exclude=None):
"""
Returns the contents of this object, i.e. all
objects that has this object set as its location.
This should be publically available.
Args:
exclude (Object): Object to exclude from returned
contents list
Returns:
contents (list): List of contents of this Object.
Notes:
Also available as the `contents` property.
"""
return self.contents_cache.get(exclude=exclude)
contents = property(contents_get)
@property
def exits(self):
"""
Returns all exits from this object, i.e. all objects at this
location having the property destination != `None`.
"""
return [exi for exi in self.contents if exi.destination]
# main methods
def get_display_name(self, looker, **kwargs):
"""
Displays the name of the object in a viewer-aware manner.
Args:
looker (TypedObject): The object or player that is looking
at/getting inforamtion for this object.
Returns:
name (str): A string containing the name of the object,
including the DBREF if this user is privileged to control
said object.
Notes:
This function could be extended to change how object names
appear to users in character, but be wary. This function
does not change an object's keys or aliases when
searching, and is expected to produce something useful for
builders.
"""
if self.locks.check_lockstring(looker, "perm(Builders)"):
return "{}(#{})".format(self.name, self.id)
return self.name
def search(self, searchdata,
global_search=False,
use_nicks=True, # should this default to off?
typeclass=None,
location=None,
attribute_name=None,
quiet=False,
exact=False,
candidates=None,
nofound_string=None,
multimatch_string=None):
"""
Returns the typeclass of an `Object` matching a search
string/condition
Perform a standard object search in the database, handling
multiple results and lack thereof gracefully. By default, only
objects in the current `location` of `self` or its inventory are searched for.
Args:
searchdata (str or obj): Primary search criterion. Will be matched
against `object.key` (with `object.aliases` second) unless
the keyword attribute_name specifies otherwise.
**Special strings:**
- `#<num>`: search by unique dbref. This is always
a global search.
- `me,self`: self-reference to this object
- `<num>-<string>` - can be used to differentiate
between multiple same-named matches
global_search (bool): Search all objects globally. This is overruled
by `location` keyword.
use_nicks (bool): Use nickname-replace (nicktype "object") on `searchdata`.
typeclass (str or Typeclass, or list of either): Limit search only
to `Objects` with this typeclass. May be a list of typeclasses
for a broader search.
location (Object): Specify a location to search, if different from the
self's given `location` plus its contents. This can also
be a list of locations.
attribute_name (str): Define which property to search. If set, no
key+alias search will be performed. This can be used
to search database fields (db_ will be automatically
appended), and if that fails, it will try to return
objects having Attributes with this name and value
equal to searchdata. A special use is to search for
"key" here if you want to do a key-search without
including aliases.
quiet (bool): don't display default error messages - this tells the
search method that the user wants to handle all errors
themselves. It also changes the return value type, see
below.
exact (bool): if unset (default) - prefers to match to beginning of
string rather than not matching at all. If set, requires
exact mathing of entire string.
candidates (list of objects): this is an optional custom list of objects
to search (filter) between. It is ignored if `global_search`
is given. If not set, this list will automatically be defined
to include the location, the contents of location and the
caller's contents (inventory).
nofound_string (str): optional custom string for not-found error message.
multimatch_string (str): optional custom string for multimatch error header.
Returns:
match (Object, None or list): will return an Object/None if `quiet=False`,
otherwise it will return a list of 0, 1 or more matches.
Notes:
To find Players, use eg. `evennia.player_search`. If
`quiet=False`, error messages will be handled by
`settings.SEARCH_AT_RESULT` and echoed automatically (on
error, return will be `None`). If `quiet=True`, the error
messaging is assumed to be handled by the caller.
"""
is_string = isinstance(searchdata, basestring)
if is_string:
# searchdata is a string; wrap some common self-references
if searchdata.lower() in ("here", ):
return [self.location] if quiet else self.location
if searchdata.lower() in ("me", "self",):
return [self] if quiet else self
if use_nicks:
# do nick-replacement on search
searchdata = self.nicks.nickreplace(searchdata, categories=("object", "player"), include_player=True)
if(global_search or (is_string and searchdata.startswith("#") and
len(searchdata) > 1 and searchdata[1:].isdigit())):
# only allow exact matching if searching the entire database
# or unique #dbrefs
exact = True
elif not candidates:
# no custom candidates given - get them automatically
if location:
# location(s) were given
candidates = []
for obj in make_iter(location):
candidates.extend(obj.contents)
else:
# local search. Candidates are taken from
# self.contents, self.location and
# self.location.contents
location = self.location
candidates = self.contents
if location:
candidates = candidates + [location] + location.contents
else:
# normally we don't need this since we are
# included in location.contents
candidates.append(self)
results = ObjectDB.objects.object_search(searchdata,
attribute_name=attribute_name,
typeclass=typeclass,
candidates=candidates,
exact=exact)
if quiet:
return results
return _AT_SEARCH_RESULT(results, self, query=searchdata,
nofound_string=nofound_string, multimatch_string=multimatch_string)
def search_player(self, searchdata, quiet=False):
"""
Simple shortcut wrapper to search for players, not characters.
Args:
searchdata (str): Search criterion - the key or dbref of the player
to search for. If this is "here" or "me", search
for the player connected to this object.
quiet (bool): Returns the results as a list rather than
echo eventual standard error messages. Default `False`.
Returns:
result (Player, None or list): Just what is returned depends on
the `quiet` setting:
- `quiet=True`: No match or multumatch auto-echoes errors
to self.msg, then returns `None`. The esults are passed
through `settings.SEARCH_AT_RESULT` and
`settings.SEARCH_AT_MULTIMATCH_INPUT`. If there is a
unique match, this will be returned.
- `quiet=True`: No automatic error messaging is done, and
what is returned is always a list with 0, 1 or more
matching Players.
"""
if isinstance(searchdata, basestring):
# searchdata is a string; wrap some common self-references
if searchdata.lower() in ("me", "self",):
return [self.player] if quiet else self.player
results = self.player.__class__.objects.player_search(searchdata)
if quiet:
return results
return _AT_SEARCH_RESULT(results, self, query=searchdata)
def execute_cmd(self, raw_string, session=None, **kwargs):
"""
Do something as this object. This is never called normally,
it's only used when wanting specifically to let an object be
the caller of a command. It makes use of nicks of eventual
connected players as well.
Args:
raw_string (string): Raw command input
session (Session, optional): Session to
return results to
Kwargs:
Other keyword arguments will be added to the found command
object instace as variables before it executes. This is
unused by default Evennia but may be used to set flags and
change operating paramaters for commands at run-time.
Returns:
defer (Deferred): This is an asynchronous Twisted object that
will not fire until the command has actually finished
executing. To overload this one needs to attach
callback functions to it, with addCallback(function).
This function will be called with an eventual return
value from the command execution. This return is not
used at all by Evennia by default, but might be useful
for coders intending to implement some sort of nested
command structure.
"""
# nick replacement - we require full-word matching.
# do text encoding conversion
raw_string = to_unicode(raw_string)
raw_string = self.nicks.nickreplace(raw_string,
categories=("inputline", "channel"), include_player=True)
return cmdhandler.cmdhandler(self, raw_string, callertype="object", session=session, **kwargs)
def msg(self, text=None, from_obj=None, session=None, options=None, **kwargs):
"""
Emits something to a session attached to the object.
Args:
text (str or tuple, optional): The message to send. This
is treated internally like any send-command, so its
value can be a tuple if sending multiple arguments to
the `text` oob command.
from_obj (obj, optional): object that is sending. If
given, at_msg_send will be called. This value will be
passed on to the protocol.
session (Session or list, optional): Session or list of
Sessions to relay data to, if any. If set, will force send
to these sessions. If unset, who receives the message
depends on the MULTISESSION_MODE.
options (dict, optional): Message-specific option-value
pairs. These will be applied at the protocol level.
Kwargs:
any (string or tuples): All kwarg keys not listed above
will be treated as send-command names and their arguments
(which can be a string or a tuple).
Notes:
`at_msg_receive` will be called on this Object.
All extra kwargs will be passed on to the protocol.
"""
# try send hooks
if from_obj:
try:
from_obj.at_msg_send(text=text, to_obj=self, **kwargs)
except Exception:
logger.log_trace()
try:
if not self.at_msg_receive(text=text, **kwargs):
# if at_msg_receive returns false, we abort message to this object
return
except Exception:
logger.log_trace()
text = None if text is None else str(text)
kwargs["options"] = options
# relay to session(s)
sessions = make_iter(session) if session else self.sessions.all()
for session in sessions:
session.data_out(text=text, **kwargs)
def for_contents(self, func, exclude=None, **kwargs):
"""
Runs a function on every object contained within this one.
Args:
func (callable): Function to call. This must have the
formal call sign func(obj, **kwargs), where obj is the
object currently being processed and `**kwargs` are
passed on from the call to `for_contents`.
exclude (list, optional): A list of object not to call the
function on.
Kwargs:
Keyword arguments will be passed to the function for all objects.
"""
contents = self.contents
if exclude:
exclude = make_iter(exclude)
contents = [obj for obj in contents if obj not in exclude]
for obj in contents:
func(obj, **kwargs)
def msg_contents(self, message, exclude=None, from_obj=None, **kwargs):
"""
Emits a message to all objects inside this object.
Args:
message (str): Message to send.
exclude (list, optional): A list of objects not to send to.
from_obj (Object, optional): An object designated as the
"sender" of the message. See `DefaultObject.msg()` for
more info.
Kwargs:
Keyword arguments will be passed on to `obj.msg()` for all
messaged objects.
"""
def msg(obj, message, from_obj, **kwargs):
obj.msg(message, from_obj=from_obj, **kwargs)
self.for_contents(msg, exclude=exclude, from_obj=from_obj, message=message, **kwargs)
def move_to(self, destination, quiet=False,
emit_to_obj=None, use_destination=True, to_none=False, move_hooks=True):
"""
Moves this object to a new location.
Args:
destination (Object): Reference to the object to move to. This
can also be an exit object, in which case the
destination property is used as destination.
quiet (bool): If true, turn off the calling of the emit hooks
(announce_move_to/from etc)
emit_to_obj (Object): object to receive error messages
use_destination (bool): Default is for objects to use the "destination"
property of destinations as the target to move to. Turning off this
keyword allows objects to move "inside" exit objects.
to_none (bool): Allow destination to be None. Note that no hooks are run when
moving to a None location. If you want to run hooks, run them manually
(and make sure they can manage None locations).
move_hooks (bool): If False, turn off the calling of move-related hooks
(at_before/after_move etc) with quiet=True, this is as quiet a move
as can be done.
Returns:
result (bool): True/False depending on if there were problems with the move.
This method may also return various error messages to the
`emit_to_obj`.
Notes:
No access checks are done in this method, these should be handled before
calling `move_to`.
The `DefaultObject` hooks called (if `move_hooks=True`) are, in order:
1. `self.at_before_move(destination)` (if this returns False, move is aborted)
2. `source_location.at_object_leave(self, destination)`
3. `self.announce_move_from(destination)`
4. (move happens here)
5. `self.announce_move_to(source_location)`
6. `destination.at_object_receive(self, source_location)`
7. `self.at_after_move(source_location)`
"""
def logerr(string="", err=None):
"Simple log helper method"
logger.log_trace()
self.msg("%s%s" % (string, "" if err is None else " (%s)" % err))
errtxt = _("Couldn't perform move ('%s'). Contact an admin.")
if not emit_to_obj:
emit_to_obj = self
if not destination:
if to_none:
# immediately move to None. There can be no hooks called since
# there is no destination to call them with.
self.location = None
return True
emit_to_obj.msg(_("The destination doesn't exist."))
return
if destination.destination and use_destination:
# traverse exits
destination = destination.destination
# Before the move, call eventual pre-commands.
if move_hooks:
try:
if not self.at_before_move(destination):
return
except Exception as err:
logerr(errtxt % "at_before_move()", err)
return False
# Save the old location
source_location = self.location
if not source_location:
# there was some error in placing this room.
# we have to set one or we won't be able to continue
if self.home:
source_location = self.home
else:
default_home = ObjectDB.objects.get_id(settings.DEFAULT_HOME)
source_location = default_home
# Call hook on source location
if move_hooks:
try:
source_location.at_object_leave(self, destination)
except Exception as err:
logerr(errtxt % "at_object_leave()", err)
return False
if not quiet:
#tell the old room we are leaving
try:
self.announce_move_from(destination)
except Exception as err:
logerr(errtxt % "at_announce_move()", err)
return False
# Perform move
try:
self.location = destination
except Exception as err:
logerr(errtxt % "location change", err)
return False
if not quiet:
# Tell the new room we are there.
try:
self.announce_move_to(source_location)
except Exception as err:
logerr(errtxt % "announce_move_to()", err)
return False
if move_hooks:
# Perform eventual extra commands on the receiving location
# (the object has already arrived at this point)
try:
destination.at_object_receive(self, source_location)
except Exception as err:
logerr(errtxt % "at_object_receive()", err)
return False
# Execute eventual extra commands on this object after moving it
# (usually calling 'look')
if move_hooks:
try:
self.at_after_move(source_location)
except Exception as err:
logerr(errtxt % "at_after_move", err)
return False
return True
def clear_exits(self):
"""
Destroys all of the exits and any exits pointing to this
object as a destination.
"""
for out_exit in [exi for exi in ObjectDB.objects.get_contents(self) if exi.db_destination]:
out_exit.delete()
for in_exit in ObjectDB.objects.filter(db_destination=self):
in_exit.delete()
def clear_contents(self):
"""
Moves all objects (players/things) to their home location or
to default home.
"""
# Gather up everything that thinks this is its location.
default_home_id = int(settings.DEFAULT_HOME.lstrip("#"))
try:
default_home = ObjectDB.objects.get(id=default_home_id)
if default_home.dbid == self.dbid:
# we are deleting default home!
default_home = None
except Exception:
string = _("Could not find default home '(#%d)'.")
logger.log_err(string % default_home_id)
default_home = None
for obj in self.contents:
home = obj.home
# Obviously, we can't send it back to here.
if not home or (home and home.dbid == self.dbid):
obj.home = default_home
home = default_home
# If for some reason it's still None...
if not home:
string = "Missing default home, '%s(#%d)' "
string += "now has a null location."
obj.location = None
obj.msg(_("Something went wrong! You are dumped into nowhere. Contact an admin."))
logger.log_err(string % (obj.name, obj.dbid))
return
if obj.has_player:
if home:
string = "Your current location has ceased to exist,"
string += " moving you to %s(#%d)."
obj.msg(_(string) % (home.name, home.dbid))
else:
# Famous last words: The player should never see this.
string = "This place should not exist ... contact an admin."
obj.msg(_(string))
obj.move_to(home)
def copy(self, new_key=None):
"""
Makes an identical copy of this object, identical except for a
new dbref in the database. If you want to customize the copy
by changing some settings, use ObjectDB.object.copy_object()
directly.
Args:
new_key (string): New key/name of copied object. If new_key is not
specified, the copy will be named <old_key>_copy by default.
Returns:
copy (Object): A copy of this object.
"""
def find_clone_key():
"""
Append 01, 02 etc to obj.key. Checks next higher number in the
same location, then adds the next number available
returns the new clone name on the form keyXX
"""
key = self.key
num = 1
for obj in (obj for obj in self.location.contents
if obj.key.startswith(key) and
obj.key.lstrip(key).isdigit()):
num += 1
return "%s%03i" % (key, num)
new_key = new_key or find_clone_key()
return ObjectDB.objects.copy_object(self, new_key=new_key)
delete_iter = 0
def delete(self):
"""
Deletes this object. Before deletion, this method makes sure
to move all contained objects to their respective home
locations, as well as clean up all exits to/from the object.
Returns:
noerror (bool): Returns whether or not the delete completed
successfully or not.
"""
global _ScriptDB
if not _ScriptDB:
from evennia.scripts.models import ScriptDB as _ScriptDB
if self.delete_iter > 0:
# make sure to only call delete once on this object
# (avoid recursive loops)
return False
if not self.at_object_delete():
# this is an extra pre-check
# run before deletion field-related properties
# is kicked into gear.
self.delete_iter = 0
return False
self.delete_iter += 1
# See if we need to kick the player off.
for session in self.sessions.all():
session.msg(_("Your character %s has been destroyed.") % self.key)
# no need to disconnect, Player just jumps to OOC mode.
# sever the connection (important!)
if self.player:
for session in self.sessions.all():
self.player.unpuppet_object(session)
self.player = None
for script in _ScriptDB.objects.get_all_scripts_on_obj(self):
script.stop()
# Destroy any exits to and from this room, if any
self.clear_exits()
# Clear out any non-exit objects located within the object
self.clear_contents()
self.attributes.clear()
self.nicks.clear()
self.aliases.clear()
self.location = None # this updates contents_cache for our location
# Perform the deletion of the object
super(ObjectDB, self).delete()
return True
def access(self, accessing_obj, access_type='read', default=False, no_superuser_bypass=False, **kwargs):
"""
Determines if another object has permission to access this object
in whatever way.
Args:
accessing_obj (Object): Object trying to access this one.
access_type (str, optional): Type of access sought.
default (bool, optional): What to return if no lock of access_type was found.
no_superuser_bypass (bool, optional): If `True`, don't skip
lock check for superuser (be careful with this one).
Kwargs:
Passed on to the at_access hook along with the result of the access check.
"""
result = super(DefaultObject, self).access(accessing_obj, access_type=access_type,
default=default, no_superuser_bypass=no_superuser_bypass)
self.at_access(result, accessing_obj, access_type, **kwargs)
return result
def __eq__(self, other):
"""
Checks for equality against an id string or another object or
user.
Args:
other (Object): object to compare to.
"""
try:
return self.dbid == other.dbid
except AttributeError:
# compare players instead
try:
return self.player.uid == other.player.uid
except AttributeError:
return False
#
# Hook methods
#
def at_first_save(self):
"""
This is called by the typeclass system whenever an instance of
this class is saved for the first time. It is a generic hook
for calling the startup hooks for the various game entities.
When overloading you generally don't overload this but
overload the hooks called by this method.
"""
self.basetype_setup()
self.at_object_creation()
if hasattr(self, "_createdict"):
# this will only be set if the utils.create function
# was used to create the object. We want the create
# call's kwargs to override the values set by hooks.
cdict = self._createdict
updates = []
if not cdict.get("key"):
if not self.db_key:
self.db_key = "#%i" % self.dbid
updates.append("db_key")
elif self.key != cdict.get("key"):
updates.append("db_key")
self.db_key = cdict["key"]
if cdict.get("location") and self.location != cdict["location"]:
self.db_location = cdict["location"]
updates.append("db_location")
if cdict.get("home") and self.home != cdict["home"]:
self.home = cdict["home"]
updates.append("db_home")
if cdict.get("destination") and self.destination != cdict["destination"]:
self.destination = cdict["destination"]
updates.append("db_destination")
if updates:
self.save(update_fields=updates)
if cdict.get("permissions"):
self.permissions.add(cdict["permissions"])
if cdict.get("locks"):
self.locks.add(cdict["locks"])
if cdict.get("aliases"):
self.aliases.add(cdict["aliases"])
if cdict.get("location"):
cdict["location"].at_object_receive(self, None)
self.at_after_move(None)
if cdict.get("tags"):
# this should be a list of tags
self.tags.add(cdict["tags"])
if cdict.get("attributes"):
# this should be a dict of attrname:value
keys, values = list(cdict["attributes"]), listvalues(cdict["attributes"])
self.attributes.batch_add(keys, values)
if cdict.get("nattributes"):
# this should be a dict of nattrname:value
for key, value in cdict["nattributes"].items():
self.nattributes.add(key, value)
del self._createdict
self.basetype_posthook_setup()
## hooks called by the game engine
def basetype_setup(self):
"""
This sets up the default properties of an Object, just before
the more general at_object_creation.
You normally don't need to change this unless you change some
fundamental things like names of permission groups.
"""
# the default security setup fallback for a generic
# object. Overload in child for a custom setup. Also creation
# commands may set this (create an item and you should be its
# controller, for example)
self.locks.add(";".join([
"control:perm(Immortals)", # edit locks/permissions, delete
"examine:perm(Builders)", # examine properties
"view:all()", # look at object (visibility)
"edit:perm(Wizards)", # edit properties/attributes
"delete:perm(Wizards)", # delete object
"get:all()", # pick up object
"call:true()", # allow to call commands on this object
"tell:perm(Wizards)", # allow emits to this object
"puppet:pperm(Immortals)"])) # lock down puppeting only to staff by default
def basetype_posthook_setup(self):
"""
Called once, after basetype_setup and at_object_creation. This
should generally not be overloaded unless you are redefining
how a room/exit/object works. It allows for basetype-like
setup after the object is created. An example of this is
EXITs, who need to know keys, aliases, locks etc to set up
their exit-cmdsets.
"""
pass
def at_object_creation(self):
"""
Called once, when this object is first created. This is the
normal hook to overload for most object types.
"""
pass
def at_object_delete(self):
"""
Called just before the database object is permanently
delete()d from the database. If this method returns False,
deletion is aborted.
"""
return True
def at_init(self):
"""
This is always called whenever this object is initiated --
that is, whenever it its typeclass is cached from memory. This
happens on-demand first time the object is used or activated
in some way after being created but also after each server
restart or reload.
"""
pass
def at_cmdset_get(self, **kwargs):
"""
Called just before cmdsets on this object are requested by the
command handler. If changes need to be done on the fly to the
cmdset before passing them on to the cmdhandler, this is the
place to do it. This is called also if the object currently
have no cmdsets.
Kwargs:
Usually not set but could be used e.g. to force rebuilding
of a dynamically created cmdset or similar.
"""
pass
def at_pre_puppet(self, player, session=None):
"""
Called just before a Player connects to this object to puppet
it.
Args:
player (Player): This is the connecting player.
session (Session): Session controlling the connection.
"""
pass
def at_post_puppet(self):
"""
Called just after puppeting has been completed and all
Player<->Object links have been established.
"""
self.player.db._last_puppet = self
def at_pre_unpuppet(self):
"""
Called just before beginning to un-connect a puppeting from
this Player.
"""
pass
def at_post_unpuppet(self, player, session=None):
"""
Called just after the Player successfully disconnected from
this object, severing all connections.
Args:
player (Player): The player object that just disconnected
from this object.
session (Session): Session id controlling the connection that
just disconnected.
"""
pass
def at_server_reload(self):
"""
This hook is called whenever the server is shutting down for
restart/reboot. If you want to, for example, save non-persistent
properties across a restart, this is the place to do it.
"""
pass
def at_server_shutdown(self):
"""
This hook is called whenever the server is shutting down fully
(i.e. not for a restart).
"""
pass
def at_access(self, result, accessing_obj, access_type, **kwargs):
"""
This is called with the result of an access call, along with
any kwargs used for that call. The return of this method does
not affect the result of the lock check. It can be used e.g. to
customize error messages in a central location or other effects
based on the access result.
Args:
result (bool): The outcome of the access call.
accessing_obj (Object or Player): The entity trying to
gain access. access_type (str): The type of access that
was requested.
Kwargs:
Not used by default, added for possible expandability in a
game.
"""
pass
# hooks called when moving the object
def at_before_move(self, destination):
"""
Called just before starting to move this object to
destination.
Args:
destination (Object): The object we are moving to
Returns:
shouldmove (bool): If we should move or not.
Notes:
If this method returns False/None, the move is cancelled
before it is even started.
"""
#return has_perm(self, destination, "can_move")
return True
def announce_move_from(self, destination):
"""
Called if the move is to be announced. This is
called while we are still standing in the old
location.
Args:
destination (Object): The place we are going to.
"""
if not self.location:
return
name = self.name
loc_name = ""
loc_name = self.location.name
dest_name = destination.name
string = "%s is leaving %s, heading for %s."
self.location.msg_contents(string % (name, loc_name, dest_name), exclude=self)
def announce_move_to(self, source_location):
"""
Called after the move if the move was not quiet. At this point
we are standing in the new location.
Args:
source_location (Object): The place we came from
"""
name = self.name
if not source_location and self.location.has_player:
# This was created from nowhere and added to a player's
# inventory; it's probably the result of a create command.
string = "You now have %s in your possession." % name
self.location.msg(string)
return
src_name = "nowhere"
loc_name = self.location.name
if source_location:
src_name = source_location.name
string = "%s arrives to %s from %s."
self.location.msg_contents(string % (name, loc_name, src_name), exclude=self)
def at_after_move(self, source_location):
"""
Called after move has completed, regardless of quiet mode or
not. Allows changes to the object due to the location it is
now in.
Args:
source_location (Object): Wwhere we came from. This may be `None`.
"""
pass
def at_object_leave(self, moved_obj, target_location):
"""
Called just before an object leaves from inside this object
Args:
moved_obj (Object): The object leaving
target_location (Object): Where `moved_obj` is going.
"""
pass
def at_object_receive(self, moved_obj, source_location):
"""
Called after an object has been moved into this object.
Args:
moved_obj (Object): The object moved into this one
source_location (Object): Where `moved_object` came from.
"""
pass
def at_traverse(self, traversing_object, target_location):
"""
This hook is responsible for handling the actual traversal,
normally by calling
`traversing_object.move_to(target_location)`. It is normally
only implemented by Exit objects. If it returns False (usually
because `move_to` returned False), `at_after_traverse` below
should not be called and instead `at_failed_traverse` should be
called.
Args:
traversing_object (Object): Object traversing us.
target_location (Object): Where target is going.
"""
pass
def at_after_traverse(self, traversing_object, source_location):
"""
Called just after an object successfully used this object to
traverse to another object (i.e. this object is a type of
Exit)
Args:
traversing_object (Object): The object traversing us.
source_location (Object): Where `traversing_object` came from.
Notes:
The target location should normally be available as `self.destination`.
"""
pass
def at_failed_traverse(self, traversing_object):
"""
This is called if an object fails to traverse this object for
some reason.
Args:
traversing_object (Object): The object that failed traversing us.
Notes:
Using the default exits, this hook will not be called if an
Attribute `err_traverse` is defined - this will in that case be
read for an error string instead.
"""
pass
def at_msg_receive(self, text=None, **kwargs):
"""
This hook is called whenever someone sends a message to this
object using the `msg` method.
Note that from_obj may be None if the sender did not include
itself as an argument to the obj.msg() call - so you have to
check for this. .
Consider this a pre-processing method before msg is passed on
to the user sesssion. If this method returns False, the msg
will not be passed on.
Args:
text (str, optional): The message received.
Kwargs:
This includes any keywords sent to the `msg` method.
Returns:
receive (bool): If this message should be received.
Notes:
If this method returns False, the `msg` operation
will abort without sending the message.
"""
return True
def at_msg_send(self, text=None, to_obj=None, **kwargs):
"""
This is a hook that is called when *this* object sends a
message to another object with `obj.msg(text, to_obj=obj)`.
Args:
text (str): Text to send.
to_obj (Object): The object to send to.
Kwargs:
Keywords passed from msg()
Notes:
Since this method is executed `from_obj`, if no `from_obj`
was passed to `DefaultCharacter.msg` this hook will never
get called.
"""
pass
# hooks called by the default cmdset.
def return_appearance(self, looker):
"""
This formats a description. It is the hook a 'look' command
should call.
Args:
looker (Object): Object doing the looking.
"""
if not looker:
return
# get and identify all objects
visible = (con for con in self.contents if con != looker and
con.access(looker, "view"))
exits, users, things = [], [], []
for con in visible:
key = con.get_display_name(looker)
if con.destination:
exits.append(key)
elif con.has_player:
users.append("{c%s{n" % key)
else:
things.append(key)
# get description, build string
string = "{c%s{n\n" % self.get_display_name(looker)
desc = self.db.desc
if desc:
string += "%s" % desc
if exits:
string += "\n{wExits:{n " + ", ".join(exits)
if users or things:
string += "\n{wYou see:{n " + ", ".join(users + things)
return string
def at_look(self, target):
"""
Called when this object performs a look. It allows to
customize just what this means. It will not itself
send any data.
Args:
target (Object): The target being looked at. This is
commonly an object or the current location. It will
be checked for the "view" type access.
Returns:
lookstring (str): A ready-processed look string
potentially ready to return to the looker.
"""
if not target.access(self, "view"):
try:
return "Could not view '%s'." % target.get_display_name(self)
except AttributeError:
return "Could not view '%s'." % target.key
# the target's at_desc() method.
target.at_desc(looker=self)
return target.return_appearance(self)
def at_desc(self, looker=None):
"""
This is called whenever someone looks at this object.
looker (Object): The object requesting the description.
"""
pass
def at_get(self, getter):
"""
Called by the default `get` command when this object has been
picked up.
Args:
getter (Object): The object getting this object.
Notes:
This hook cannot stop the pickup from happening. Use
permissions for that.
"""
pass
def at_drop(self, dropper):
"""
Called by the default `drop` command when this object has been
dropped.
Args:
dropper (Object): The object which just dropped this object.
Notes:
This hook cannot stop the pickup from happening. Use
permissions from that.
"""
pass
def at_say(self, speaker, message):
"""
Called on this object if an object inside this object speaks.
The string returned from this method is the final form of the
speech.
Args:
speaker (Object): The object speaking.
message (str): The words spoken.
Notes:
You should not need to add things like 'you say: ' or
similar here, that should be handled by the say command before
this.
"""
return message
#
# Base Character object
#
class DefaultCharacter(DefaultObject):
"""
This implements an Object puppeted by a Session - that is,
a character avatar controlled by a player.
"""
def basetype_setup(self):
"""
Setup character-specific security.
You should normally not need to overload this, but if you do,
make sure to reproduce at least the two last commands in this
method (unless you want to fundamentally change how a
Character object works).
"""
super(DefaultCharacter, self).basetype_setup()
self.locks.add(";".join(["get:false()", # noone can pick up the character
"call:false()"])) # no commands can be called on character from outside
# add the default cmdset
self.cmdset.add_default(settings.CMDSET_CHARACTER, permanent=True)
def at_after_move(self, source_location):
"""
We make sure to look around after a move.
"""
if self.location.access(self, "view"):
self.msg(self.at_look(self.location))
def at_pre_puppet(self, player, session=None):
"""
This implementation recovers the character again after having been "stoved
away" to the `None` location in `at_post_unpuppet`.
Args:
player (Player): This is the connecting player.
session (Session): Session controlling the connection.
"""
if self.db.prelogout_location:
# try to recover
self.location = self.db.prelogout_location
if self.location is None:
# make sure location is never None (home should always exist)
self.location = self.home
if self.location:
# save location again to be sure
self.db.prelogout_location = self.location
self.location.at_object_receive(self, self.location)
else:
player.msg("{r%s has no location and no home is set.{n" % self, session=session)
def at_post_puppet(self):
"""
Called just after puppeting has been completed and all
Player<->Object links have been established.
"""
self.msg("\nYou become {c%s{n.\n" % self.name)
self.msg(self.at_look(self.location))
def message(obj, from_obj):
obj.msg("%s has entered the game." % self.get_display_name(obj), from_obj=from_obj)
self.location.for_contents(message, exclude=[self], from_obj=self)
def at_post_unpuppet(self, player, session=None):
"""
We stove away the character when the player goes ooc/logs off,
otherwise the character object will remain in the room also
after the player logged off ("headless", so to say).
Args:
player (Player): The player object that just disconnected
from this object.
session (Session): Session controlling the connection that
just disconnected.
"""
if not self.sessions.count():
# only remove this char from grid if no sessions control it anymore.
if self.location:
def message(obj, from_obj):
obj.msg("%s has left the game." % self.get_display_name(obj), from_obj=from_obj)
self.location.for_contents(message, exclude=[self], from_obj=self)
self.db.prelogout_location = self.location
self.location = None
@property
def idle_time(self):
"""
Returns the idle time of the least idle session in seconds. If
no sessions are connected it returns nothing.
"""
idle = [session.cmd_last_visible for session in self.sessions.all()]
if idle:
return time.time() - float(max(idle))
@property
def connection_time(self):
"""
Returns the maximum connection time of all connected sessions
in seconds. Returns nothing if there are no sessions.
"""
conn = [session.conn_time for session in self.sessions.all()]
if conn:
return time.time() - float(min(conn))
#
# Base Room object
#
class DefaultRoom(DefaultObject):
"""
This is the base room object. It's just like any Object except its
location is always `None`.
"""
def basetype_setup(self):
"""
Simple room setup setting locks to make sure the room
cannot be picked up.
"""
super(DefaultRoom, self).basetype_setup()
self.locks.add(";".join(["get:false()",
"puppet:false()"])) # would be weird to puppet a room ...
self.location = None
#
# Default Exit command, used by the base exit object
#
class ExitCommand(command.Command):
"""
This is a command that simply cause the caller to traverse
the object it is attached to.
"""
obj = None
def func(self):
"""
Default exit traverse if no syscommand is defined.
"""
if self.obj.access(self.caller, 'traverse'):
# we may traverse the exit.
self.obj.at_traverse(self.caller, self.obj.destination)
else:
# exit is locked
if self.obj.db.err_traverse:
# if exit has a better error message, let's use it.
self.caller.msg(self.obj.db.err_traverse)
else:
# No shorthand error message. Call hook.
self.obj.at_failed_traverse(self.caller)
def get_extra_info(self, caller, **kwargs):
"""
Shows a bit of information on where the exit leads.
Args:
caller (Object): The object (usually a character) that entered an ambiguous command.
Returns:
A string with identifying information to disambiguate the command, conventionally with a preceding space.
"""
if self.obj.destination:
return " (exit to %s)" % self.obj.destination.get_display_name(caller)
else:
return " (%s)" % self.obj.get_display_name(caller)
#
# Base Exit object
#
class DefaultExit(DefaultObject):
"""
This is the base exit object - it connects a location to another.
This is done by the exit assigning a "command" on itself with the
same name as the exit object (to do this we need to remember to
re-create the command when the object is cached since it must be
created dynamically depending on what the exit is called). This
command (which has a high priority) will thus allow us to traverse
exits simply by giving the exit-object's name on its own.
"""
exit_command = ExitCommand
priority = 101
# Helper classes and methods to implement the Exit. These need not
# be overloaded unless one want to change the foundation for how
# Exits work. See the end of the class for hook methods to overload.
def create_exit_cmdset(self, exidbobj):
"""
Helper function for creating an exit command set + command.
The command of this cmdset has the same name as the Exit
object and allows the exit to react when the player enter the
exit's name, triggering the movement between rooms.
Args:
exidbobj (Object): The DefaultExit object to base the command on.
"""
# create an exit command. We give the properties here,
# to always trigger metaclass preparations
cmd = self.exit_command(key=exidbobj.db_key.strip().lower(),
aliases=exidbobj.aliases.all(),
locks=str(exidbobj.locks),
auto_help=False,
destination=exidbobj.db_destination,
arg_regex=r"^$",
is_exit=True,
obj=exidbobj)
# create a cmdset
exit_cmdset = cmdset.CmdSet(None)
exit_cmdset.key = '_exitset'
exit_cmdset.priority = self.priority
exit_cmdset.duplicates = True
# add command to cmdset
exit_cmdset.add(cmd)
return exit_cmdset
# Command hooks
def basetype_setup(self):
"""
Setup exit-security
You should normally not need to overload this - if you do make
sure you include all the functionality in this method.
"""
super(DefaultExit, self).basetype_setup()
# setting default locks (overload these in at_object_creation()
self.locks.add(";".join(["puppet:false()", # would be weird to puppet an exit ...
"traverse:all()", # who can pass through exit by default
"get:false()"])) # noone can pick up the exit
# an exit should have a destination (this is replaced at creation time)
if self.location:
self.destination = self.location
def at_cmdset_get(self, **kwargs):
"""
Called just before cmdsets on this object are requested by the
command handler. If changes need to be done on the fly to the
cmdset before passing them on to the cmdhandler, this is the
place to do it. This is called also if the object currently
has no cmdsets.
Kwargs:
force_init (bool): If `True`, force a re-build of the cmdset
(for example to update aliases).
"""
if "force_init" in kwargs or not self.cmdset.has_cmdset("_exitset", must_be_default=True):
# we are resetting, or no exit-cmdset was set. Create one dynamically.
self.cmdset.add_default(self.create_exit_cmdset(self), permanent=False)
def at_init(self):
"""
This is called when this objects is re-loaded from cache. When
that happens, we make sure to remove any old _exitset cmdset
(this most commonly occurs when renaming an existing exit)
"""
self.cmdset.remove_default()
def at_traverse(self, traversing_object, target_location):
"""
This implements the actual traversal. The traverse lock has
already been checked (in the Exit command) at this point.
Args:
traversing_object (Object): Object traversing us.
target_location (Object): Where target is going.
"""
source_location = traversing_object.location
if traversing_object.move_to(target_location):
self.at_after_traverse(traversing_object, source_location)
else:
if self.db.err_traverse:
# if exit has a better error message, let's use it.
self.caller.msg(self.db.err_traverse)
else:
# No shorthand error message. Call hook.
self.at_failed_traverse(traversing_object)
def at_failed_traverse(self, traversing_object):
"""
Overloads the default hook to implement a simple default error message.
Args:
traversing_object (Object): The object that failed traversing us.
Notes:
Using the default exits, this hook will not be called if an
Attribute `err_traverse` is defined - this will in that case be
read for an error string instead.
"""
traversing_object.msg("You cannot go there.")
|
DuCorey/bokeh | refs/heads/master | tests/integration/integration_tests_plugin.py | 8 | import os
import pytest
from bokeh.io import output_file
from .screenshot import Screenshot, ScreenshotMismatchError
def pytest_addoption(parser):
parser.addoption(
"--set-new-base-screenshot", dest="set_new_base_screenshot", action="store_true", default=False,
help="Use to set a new screenshot for imagediff testing. Be sure to only set for the tests you want by usign the -k pytest option to select your test.")
@pytest.fixture
def selenium(selenium):
# Give items a chance to load
selenium.implicitly_wait(10)
selenium.set_window_size(width=1200, height=600)
return selenium
@pytest.fixture
def output_file_url(request, file_server):
filename = request.function.__name__ + '.html'
file_obj = request.fspath.dirpath().join(filename)
file_path = file_obj.strpath
url = file_path.replace('\\', '/') # Windows-proof
output_file(file_path, mode='inline')
def tearDown():
if file_obj.isfile():
file_obj.remove()
request.addfinalizer(tearDown)
return file_server.where_is(url)
def pytest_generate_tests(metafunc):
# hasattr(metafunc.function, "foo") is like doing item.get_marker("foo")
# This is ugly, but unfortunately there's not currently a better interface
# https://github.com/pytest-dev/pytest/issues/1425
if hasattr(metafunc.function, "cross_browser"):
if metafunc.config.option.driver == "SauceLabs":
cross_browser_list = [
{
"browserName": "firefox",
"platform": "Linux",
"version": None
},
{
"browserName": "chrome",
"platform": "Linux",
"version": None
},
]
metafunc.fixturenames.append('test_browser')
metafunc.parametrize('test_browser', cross_browser_list, ids=["firefox", "chrome"])
@pytest.fixture()
def test_browser():
# If version is None, latest will be used
# Latest is Firefox 45 as of Dec 13, 2016
return {"browserName": "firefox", "platform": "Linux", "version": None}
@pytest.fixture()
def capabilities(capabilities, test_browser):
capabilities["browserName"] = test_browser["browserName"]
capabilities["platform"] = test_browser["platform"]
if test_browser["version"]:
capabilities["version"] = test_browser["version"]
return capabilities
@pytest.fixture(scope="session")
def session_capabilities(session_capabilities):
session_capabilities["tunnel-identifier"] = os.environ.get("TRAVIS_JOB_NUMBER")
return session_capabilities
@pytest.fixture
def screenshot(request):
# Screenshot tests can only be run under the following circumstances:
# - driver: SauceLabs
# - capabilities: browserName: firefox
# - capabilities: platform: linux
# This helps ensure that screenshots are comparable.
if request.config.option.driver != 'SauceLabs':
pytest.skip('Screenshot tests can only be run with --driver=SauceLabs')
capabilities = request.getfixturevalue('capabilities')
if capabilities['browserName'] != 'firefox':
pytest.skip('Screenshot tests can only be run with browserName firefox. Capabilties are: %s' % capabilities)
if capabilities['platform'] != 'Linux':
pytest.skip('Screenshot tests can only be run with platform linux. Capabilities are: %s' % capabilities)
if request.config.option.set_new_base_screenshot:
screenshot = Screenshot(request=request, set_new_base=True)
else:
screenshot = Screenshot(request=request, set_new_base=False)
return screenshot
#
# Hook into the pytest report to add the screenshot diff
#
@pytest.mark.hookwrapper
def pytest_runtest_makereport(item, call):
outcome = yield
report = outcome.get_result()
# When executing the test failed for some reason
if report.when == "call" and report.failed:
if 'screenshot' in item.fixturenames and isinstance(call.excinfo.value, ScreenshotMismatchError):
screenshot = Screenshot(item=item)
pytest_html = item.config.pluginmanager.getplugin('html')
diff = pytest_html.extras.image(screenshot.get_diff_as_base64(), '')
base = pytest_html.extras.image(screenshot.get_base_as_base64(), '')
test = pytest_html.extras.image(screenshot.get_current_as_base64(), '')
# Override existing extra screenshot attr to add image reports
report.extra = [test, diff, base]
|
Alofoxx/club-websystem | refs/heads/master | src/events/migrations/0004_created_recurring_event_model.py | 2 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('events', '0003_changed_event_model'),
]
operations = [
migrations.CreateModel(
name='RecurringEvent',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('starts_on', models.DateField(verbose_name=b'Starts on')),
('ends_on', models.DateField(verbose_name=b'Ends on')),
('rule_type', models.IntegerField(default=200, verbose_name=b'Recurring rule', choices=[(100, b'Every day'), (200, b'Specified days of the week'), (300, b'Specified days of the month')])),
('repeat_each', models.IntegerField(default=1, help_text=b'Repeat every X days/weeks/months.', verbose_name=b'Repeat each')),
('criteria', models.CharField(max_length=200, verbose_name=b'Criteria')),
],
options={
'verbose_name': 'Recurring Event',
'verbose_name_plural': 'Recurring Events',
},
),
migrations.AddField(
model_name='event',
name='recurring',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, verbose_name=b'Belongs to recurring group', blank=True, to='events.RecurringEvent', null=True),
),
]
|
mattesno1/Sick-Beard | refs/heads/master | sickbeard/notifiers/nma.py | 10 | import sickbeard
from sickbeard import logger, common
from lib.pynma import pynma
class NMA_Notifier:
def test_notify(self, nma_api, nma_priority):
return self._sendNMA(nma_api, nma_priority, event="Test", message="Testing NMA settings from Sick Beard", force=True)
def notify_snatch(self, ep_name):
if sickbeard.NMA_NOTIFY_ONSNATCH:
self._sendNMA(nma_api=None, nma_priority=None, event=common.notifyStrings[common.NOTIFY_SNATCH], message=ep_name)
def notify_download(self, ep_name):
if sickbeard.NMA_NOTIFY_ONDOWNLOAD:
self._sendNMA(nma_api=None, nma_priority=None, event=common.notifyStrings[common.NOTIFY_DOWNLOAD], message=ep_name)
def _sendNMA(self, nma_api=None, nma_priority=None, event=None, message=None, force=False):
title = 'Sick-Beard'
if not sickbeard.USE_NMA and not force:
return False
if nma_api == None:
nma_api = sickbeard.NMA_API
if nma_priority == None:
nma_priority = sickbeard.NMA_PRIORITY
logger.log(u"NMA title: " + title, logger.DEBUG)
logger.log(u"NMA event: " + event, logger.DEBUG)
logger.log(u"NMA message: " + message, logger.DEBUG)
batch = False
p = pynma.PyNMA()
keys = nma_api.split(',')
p.addkey(keys)
if len(keys) > 1: batch = True
response = p.push(title, event, message, priority=nma_priority, batch_mode=batch)
if not response[nma_api][u'code'] == u'200':
logger.log(u'Could not send notification to NotifyMyAndroid', logger.ERROR)
return False
else:
return True
notifier = NMA_Notifier |
ramineni/myironic | refs/heads/master | ironic/drivers/modules/ipminative.py | 1 | # coding=utf-8
# Copyright 2013 International Business Machines Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Ironic Native IPMI power manager.
"""
import os
import tempfile
from oslo.utils import excutils
from oslo.utils import importutils
from oslo_config import cfg
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LW
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.drivers import base
from ironic.drivers.modules import console_utils
from ironic.openstack.common import log as logging
pyghmi = importutils.try_import('pyghmi')
if pyghmi:
from pyghmi import exceptions as pyghmi_exception
from pyghmi.ipmi import command as ipmi_command
opts = [
cfg.IntOpt('retry_timeout',
default=60,
help='Maximum time in seconds to retry IPMI operations.'),
cfg.IntOpt('min_command_interval',
default=5,
help='Minimum time, in seconds, between IPMI operations '
'sent to a server. There is a risk with some hardware '
'that setting this too low may cause the BMC to crash. '
'Recommended setting is 5 seconds.'),
]
CONF = cfg.CONF
CONF.register_opts(opts, group='ipmi')
LOG = logging.getLogger(__name__)
REQUIRED_PROPERTIES = {'ipmi_address': _("IP of the node's BMC. Required."),
'ipmi_password': _("IPMI password. Required."),
'ipmi_username': _("IPMI username. Required.")}
COMMON_PROPERTIES = REQUIRED_PROPERTIES
CONSOLE_PROPERTIES = {
'ipmi_terminal_port': _("node's UDP port to connect to. Only required for "
"console access.")
}
_BOOT_DEVICES_MAP = {
boot_devices.DISK: 'hd',
boot_devices.PXE: 'network',
boot_devices.CDROM: 'cdrom',
boot_devices.BIOS: 'setup',
}
def _parse_driver_info(node):
"""Gets the bmc access info for the given node.
:raises: MissingParameterValue when required ipmi credentials
are missing.
:raises: InvalidParameterValue when the IPMI terminal port is not an
integer.
"""
info = node.driver_info or {}
missing_info = [key for key in REQUIRED_PROPERTIES if not info.get(key)]
if missing_info:
raise exception.MissingParameterValue(_(
"Missing the following IPMI credentials in node's"
" driver_info: %s.") % missing_info)
bmc_info = {}
bmc_info['address'] = info.get('ipmi_address')
bmc_info['username'] = info.get('ipmi_username')
bmc_info['password'] = info.get('ipmi_password')
# get additional info
bmc_info['uuid'] = node.uuid
# terminal port must be an integer
port = info.get('ipmi_terminal_port')
if port is not None:
try:
port = int(port)
except ValueError:
raise exception.InvalidParameterValue(_(
"IPMI terminal port is not an integer."))
bmc_info['port'] = port
return bmc_info
def _console_pwfile_path(uuid):
"""Return the file path for storing the ipmi password."""
file_name = "%(uuid)s.pw" % {'uuid': uuid}
return os.path.join(tempfile.gettempdir(), file_name)
def _power_on(driver_info):
"""Turn the power on for this node.
:param driver_info: the bmc access info for a node.
:returns: power state POWER_ON, one of :class:`ironic.common.states`.
:raises: IPMIFailure when the native ipmi call fails.
:raises: PowerStateFailure when invalid power state is returned
from ipmi.
"""
msg = _LW("IPMI power on failed for node %(node_id)s with the "
"following error: %(error)s")
try:
ipmicmd = ipmi_command.Command(bmc=driver_info['address'],
userid=driver_info['username'],
password=driver_info['password'])
wait = CONF.ipmi.retry_timeout
ret = ipmicmd.set_power('on', wait)
except pyghmi_exception.IpmiException as e:
LOG.warning(msg, {'node_id': driver_info['uuid'], 'error': str(e)})
raise exception.IPMIFailure(cmd=str(e))
state = ret.get('powerstate')
if state == 'on':
return states.POWER_ON
else:
LOG.warning(msg, {'node_id': driver_info['uuid'], 'error': ret})
raise exception.PowerStateFailure(pstate=state)
def _power_off(driver_info):
"""Turn the power off for this node.
:param driver_info: the bmc access info for a node.
:returns: power state POWER_OFF, one of :class:`ironic.common.states`.
:raises: IPMIFailure when the native ipmi call fails.
:raises: PowerStateFailure when invalid power state is returned
from ipmi.
"""
msg = _LW("IPMI power off failed for node %(node_id)s with the "
"following error: %(error)s")
try:
ipmicmd = ipmi_command.Command(bmc=driver_info['address'],
userid=driver_info['username'],
password=driver_info['password'])
wait = CONF.ipmi.retry_timeout
ret = ipmicmd.set_power('off', wait)
except pyghmi_exception.IpmiException as e:
LOG.warning(msg, {'node_id': driver_info['uuid'], 'error': str(e)})
raise exception.IPMIFailure(cmd=str(e))
state = ret.get('powerstate')
if state == 'off':
return states.POWER_OFF
else:
LOG.warning(msg % {'node_id': driver_info['uuid'], 'error': ret})
raise exception.PowerStateFailure(pstate=state)
def _reboot(driver_info):
"""Reboot this node.
If the power is off, turn it on. If the power is on, reset it.
:param driver_info: the bmc access info for a node.
:returns: power state POWER_ON, one of :class:`ironic.common.states`.
:raises: IPMIFailure when the native ipmi call fails.
:raises: PowerStateFailure when invalid power state is returned
from ipmi.
"""
msg = _LW("IPMI power reboot failed for node %(node_id)s with the "
"following error: %(error)s")
try:
ipmicmd = ipmi_command.Command(bmc=driver_info['address'],
userid=driver_info['username'],
password=driver_info['password'])
wait = CONF.ipmi.retry_timeout
ret = ipmicmd.set_power('boot', wait)
except pyghmi_exception.IpmiException as e:
LOG.warning(msg % {'node_id': driver_info['uuid'], 'error': str(e)})
raise exception.IPMIFailure(cmd=str(e))
state = ret.get('powerstate')
if state == 'on':
return states.POWER_ON
else:
LOG.warning(msg % {'node_id': driver_info['uuid'], 'error': ret})
raise exception.PowerStateFailure(pstate=state)
def _power_status(driver_info):
"""Get the power status for this node.
:param driver_info: the bmc access info for a node.
:returns: power state POWER_ON, POWER_OFF or ERROR defined in
:class:`ironic.common.states`.
:raises: IPMIFailure when the native ipmi call fails.
"""
try:
ipmicmd = ipmi_command.Command(bmc=driver_info['address'],
userid=driver_info['username'],
password=driver_info['password'])
ret = ipmicmd.get_power()
except pyghmi_exception.IpmiException as e:
LOG.warning(_LW("IPMI get power state failed for node %(node_id)s "
"with the following error: %(error)s"),
{'node_id': driver_info['uuid'], 'error': str(e)})
raise exception.IPMIFailure(cmd=str(e))
state = ret.get('powerstate')
if state == 'on':
return states.POWER_ON
elif state == 'off':
return states.POWER_OFF
else:
# NOTE(linggao): Do not throw an exception here because it might
# return other valid values. It is up to the caller to decide
# what to do.
LOG.warning(_LW("IPMI get power state for node %(node_id)s returns the"
" following details: %(detail)s"),
{'node_id': driver_info['uuid'], 'detail': ret})
return states.ERROR
def _get_sensors_data(driver_info):
"""Get sensors data.
:param driver_info: node's driver info
:raises: FailedToGetSensorData when getting the sensor data fails.
:returns: returns a dict of sensor data group by sensor type.
"""
try:
ipmicmd = ipmi_command.Command(bmc=driver_info['address'],
userid=driver_info['username'],
password=driver_info['password'])
ret = ipmicmd.get_sensor_data()
except Exception as e:
LOG.error(_LE("IPMI get sensor data failed for node %(node_id)s "
"with the following error: %(error)s"),
{'node_id': driver_info['uuid'], 'error': e})
raise exception.FailedToGetSensorData(
node=driver_info['uuid'], error=e)
if not ret:
return {}
sensors_data = {}
for reading in ret:
# ignore the sensor data which has no sensor reading value
if not reading.value:
continue
sensors_data.setdefault(reading.type,
{})[reading.name] = {
'Sensor Reading': '%s %s' % (reading.value, reading.units),
'Sensor ID': reading.name,
'States': str(reading.states),
'Units': reading.units,
'Health': str(reading.health)}
return sensors_data
class NativeIPMIPower(base.PowerInterface):
"""The power driver using native python-ipmi library."""
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task):
"""Check that node['driver_info'] contains IPMI credentials.
:param task: a TaskManager instance containing the node to act on.
:raises: MissingParameterValue when required ipmi credentials
are missing.
"""
_parse_driver_info(task.node)
def get_power_state(self, task):
"""Get the current power state of the task's node.
:param task: a TaskManager instance containing the node to act on.
:returns: power state POWER_ON, POWER_OFF or ERROR defined in
:class:`ironic.common.states`.
:raises: MissingParameterValue when required ipmi credentials
are missing.
:raises: IPMIFailure when the native ipmi call fails.
"""
driver_info = _parse_driver_info(task.node)
return _power_status(driver_info)
@task_manager.require_exclusive_lock
def set_power_state(self, task, pstate):
"""Turn the power on or off.
:param task: a TaskManager instance containing the node to act on.
:param pstate: a power state that will be set on the task's node.
:raises: IPMIFailure when the native ipmi call fails.
:raises: MissingParameterValue when required ipmi credentials
are missing.
:raises: InvalidParameterValue when an invalid power state
is specified
:raises: PowerStateFailure when invalid power state is returned
from ipmi.
"""
driver_info = _parse_driver_info(task.node)
if pstate == states.POWER_ON:
_power_on(driver_info)
elif pstate == states.POWER_OFF:
_power_off(driver_info)
else:
raise exception.InvalidParameterValue(_(
"set_power_state called with an invalid power state: %s."
) % pstate)
@task_manager.require_exclusive_lock
def reboot(self, task):
"""Cycles the power to the task's node.
:param task: a TaskManager instance containing the node to act on.
:raises: IPMIFailure when the native ipmi call fails.
:raises: MissingParameterValue when required ipmi credentials
are missing.
:raises: PowerStateFailure when invalid power state is returned
from ipmi.
"""
driver_info = _parse_driver_info(task.node)
_reboot(driver_info)
class NativeIPMIManagement(base.ManagementInterface):
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task):
"""Check that 'driver_info' contains IPMI credentials.
Validates whether the 'driver_info' property of the supplied
task's node contains the required credentials information.
:param task: a task from TaskManager.
:raises: MissingParameterValue when required ipmi credentials
are missing.
"""
_parse_driver_info(task.node)
def get_supported_boot_devices(self):
"""Get a list of the supported boot devices.
:returns: A list with the supported boot devices defined
in :mod:`ironic.common.boot_devices`.
"""
return list(_BOOT_DEVICES_MAP.keys())
@task_manager.require_exclusive_lock
def set_boot_device(self, task, device, persistent=False):
"""Set the boot device for the task's node.
Set the boot device to use on next reboot of the node.
:param task: a task from TaskManager.
:param device: the boot device, one of
:mod:`ironic.common.boot_devices`.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False.
:raises: InvalidParameterValue if an invalid boot device is specified
or required ipmi credentials are missing.
:raises: MissingParameterValue when required ipmi credentials
are missing.
:raises: IPMIFailure on an error from pyghmi.
"""
if device not in self.get_supported_boot_devices():
raise exception.InvalidParameterValue(_(
"Invalid boot device %s specified.") % device)
driver_info = _parse_driver_info(task.node)
try:
ipmicmd = ipmi_command.Command(bmc=driver_info['address'],
userid=driver_info['username'],
password=driver_info['password'])
bootdev = _BOOT_DEVICES_MAP[device]
ipmicmd.set_bootdev(bootdev, persist=persistent)
except pyghmi_exception.IpmiException as e:
LOG.error(_LE("IPMI set boot device failed for node %(node_id)s "
"with the following error: %(error)s"),
{'node_id': driver_info['uuid'], 'error': e})
raise exception.IPMIFailure(cmd=e)
def get_boot_device(self, task):
"""Get the current boot device for the task's node.
Returns the current boot device of the node.
:param task: a task from TaskManager.
:raises: MissingParameterValue if required IPMI parameters
are missing.
:raises: IPMIFailure on an error from pyghmi.
:returns: a dictionary containing:
:boot_device: the boot device, one of
:mod:`ironic.common.boot_devices` or None if it is unknown.
:persistent: Whether the boot device will persist to all
future boots or not, None if it is unknown.
"""
driver_info = _parse_driver_info(task.node)
response = {'boot_device': None}
try:
ipmicmd = ipmi_command.Command(bmc=driver_info['address'],
userid=driver_info['username'],
password=driver_info['password'])
ret = ipmicmd.get_bootdev()
# FIXME(lucasagomes): pyghmi doesn't seem to handle errors
# consistently, for some errors it raises an exception
# others it just returns a dictionary with the error.
if 'error' in ret:
raise pyghmi_exception.IpmiException(ret['error'])
except pyghmi_exception.IpmiException as e:
LOG.error(_LE("IPMI get boot device failed for node %(node_id)s "
"with the following error: %(error)s"),
{'node_id': driver_info['uuid'], 'error': e})
raise exception.IPMIFailure(cmd=e)
response['persistent'] = ret.get('persistent')
bootdev = ret.get('bootdev')
if bootdev:
response['boot_device'] = next((dev for dev, hdev in
_BOOT_DEVICES_MAP.items()
if hdev == bootdev), None)
return response
def get_sensors_data(self, task):
"""Get sensors data.
:param task: a TaskManager instance.
:raises: FailedToGetSensorData when getting the sensor data fails.
:raises: MissingParameterValue if required ipmi parameters are missing
:returns: returns a dict of sensor data group by sensor type.
"""
driver_info = _parse_driver_info(task.node)
return _get_sensors_data(driver_info)
class NativeIPMIShellinaboxConsole(base.ConsoleInterface):
"""A ConsoleInterface that uses pyghmi and shellinabox."""
def get_properties(self):
d = COMMON_PROPERTIES.copy()
d.update(CONSOLE_PROPERTIES)
return d
def validate(self, task):
"""Validate the Node console info.
:param task: a TaskManager instance containing the node to act on.
:raises: MissingParameterValue when required IPMI credentials or
the IPMI terminal port are missing
:raises: InvalidParameterValue when the IPMI terminal port is not
an integer.
"""
driver_info = _parse_driver_info(task.node)
if not driver_info['port']:
raise exception.MissingParameterValue(_(
"Missing 'ipmi_terminal_port' parameter in node's"
" driver_info."))
def start_console(self, task):
"""Start a remote console for the node.
:param task: a TaskManager instance containing the node to act on.
:raises: MissingParameterValue when required ipmi credentials
are missing.
:raises: InvalidParameterValue when the IPMI terminal port is not an
integer.
:raises: ConsoleError if unable to start the console process.
"""
driver_info = _parse_driver_info(task.node)
path = _console_pwfile_path(driver_info['uuid'])
pw_file = console_utils.make_persistent_password_file(
path, driver_info['password'])
console_cmd = ("/:%(uid)s:%(gid)s:HOME:pyghmicons %(bmc)s"
" %(user)s"
" %(passwd_file)s"
% {'uid': os.getuid(),
'gid': os.getgid(),
'bmc': driver_info['address'],
'user': driver_info['username'],
'passwd_file': pw_file})
try:
console_utils.start_shellinabox_console(driver_info['uuid'],
driver_info['port'],
console_cmd)
except exception.ConsoleError:
with excutils.save_and_reraise_exception():
utils.unlink_without_raise(path)
def stop_console(self, task):
"""Stop the remote console session for the node.
:param task: a TaskManager instance containing the node to act on.
:raises: MissingParameterValue when required IPMI credentials or
the IPMI terminal port are missing
:raises: InvalidParameterValue when the IPMI terminal port is not
an integer.
:raises: ConsoleError if unable to stop the console process.
"""
driver_info = _parse_driver_info(task.node)
try:
console_utils.stop_shellinabox_console(driver_info['uuid'])
finally:
password_file = _console_pwfile_path(driver_info['uuid'])
utils.unlink_without_raise(password_file)
def get_console(self, task):
"""Get the type and connection information about the console.
:param task: a TaskManager instance containing the node to act on.
:raises: MissingParameterValue when required IPMI credentials or
the IPMI terminal port are missing
:raises: InvalidParameterValue when the IPMI terminal port is not
an integer.
"""
driver_info = _parse_driver_info(task.node)
url = console_utils.get_shellinabox_console_url(driver_info['port'])
return {'type': 'shellinabox', 'url': url}
|
gxk/libnl | refs/heads/master | python/netlink/route/address.py | 3 | #
# Copyright (c) 2011 Thomas Graf <[email protected]>
#
"""Module providing access to network addresses
"""
__version__ = "1.0"
__all__ = [
'AddressCache',
'Address']
import datetime
import netlink.core as netlink
import netlink.capi as core_capi
import netlink.route.capi as capi
import netlink.route.link as Link
import netlink.util as util
###########################################################################
# Address Cache
class AddressCache(netlink.Cache):
"""Cache containing network addresses"""
def __init__(self, cache=None):
if not cache:
cache = self._alloc_cache_name("route/addr")
self._protocol = netlink.NETLINK_ROUTE
self._nl_cache = cache
def __getitem__(self, key):
# Using ifindex=0 here implies that the local address itself
# is unique, otherwise the first occurence is returned.
return self.lookup(0, key)
def lookup(self, ifindex, local):
if type(local) is str:
local = netlink.AbstractAddress(local)
addr = capi.rtnl_addr_get(self._nl_cache, ifindex,
local._nl_addr)
if addr is None:
raise KeyError()
return Address._from_capi(addr)
def _new_object(self, obj):
return Address(obj)
def _new_cache(self, cache):
return AddressCache(cache=cache)
###########################################################################
# Address Object
class Address(netlink.Object):
"""Network address"""
def __init__(self, obj=None):
netlink.Object.__init__(self, "route/addr", "address", obj)
self._rtnl_addr = self._obj2type(self._nl_object)
@classmethod
def _from_capi(cls, obj):
return cls(capi.addr2obj(obj))
def _obj2type(self, obj):
return capi.obj2addr(obj)
def __cmp__(self, other):
# sort by:
# 1. network link
# 2. address family
# 3. local address (including prefixlen)
diff = self.ifindex - other.ifindex
if diff == 0:
diff = self.family - other.family
if diff == 0:
diff = capi.nl_addr_cmp(self.local, other.local)
return diff
def _new_instance(self, obj):
return Address(obj)
#####################################################################
# ifindex
@netlink.nlattr('address.ifindex', type=int, immutable=True,
fmt=util.num)
@property
def ifindex(self):
"""interface index"""
return capi.rtnl_addr_get_ifindex(self._rtnl_addr)
@ifindex.setter
def ifindex(self, value):
link = Link.resolve(value)
if not link:
raise ValueError()
self.link = link
#####################################################################
# link
@netlink.nlattr('address.link', type=str, fmt=util.string)
@property
def link(self):
link = capi.rtnl_addr_get_link(self._rtnl_addr)
if not link:
return None
return Link.Link.from_capi(link)
@link.setter
def link(self, value):
if type(value) is str:
try:
value = Link.resolve(value)
except KeyError:
raise ValueError()
capi.rtnl_addr_set_link(self._rtnl_addr, value._rtnl_link)
# ifindex is immutable but we assume that if _orig does not
# have an ifindex specified, it was meant to be given here
if capi.rtnl_addr_get_ifindex(self._orig) == 0:
capi.rtnl_addr_set_ifindex(self._orig, value.ifindex)
#####################################################################
# label
@netlink.nlattr('address.label', type=str, fmt=util.string)
@property
def label(self):
"""address label"""
return capi.rtnl_addr_get_label(self._rtnl_addr)
@label.setter
def label(self, value):
capi.rtnl_addr_set_label(self._rtnl_addr, value)
#####################################################################
# flags
@netlink.nlattr('address.flags', type=str, fmt=util.string)
@property
def flags(self):
"""Flags"""
flags = capi.rtnl_addr_get_flags(self._rtnl_addr)
return capi.rtnl_addr_flags2str(flags, 256)[0].split(',')
def _set_flag(self, flag):
if flag[0] == '-':
i = capi.rtnl_addr_str2flags(flag[1:])
capi.rtnl_addr_unset_flags(self._rtnl_addr, i)
else:
i = capi.rtnl_addr_str2flags(flag[1:])
capi.rtnl_addr_set_flags(self._rtnl_addr, i)
@flags.setter
def flags(self, value):
if type(value) is list:
for flag in value:
self._set_flag(flag)
else:
self._set_flag(value)
#####################################################################
# family
@netlink.nlattr('address.family', type=int, immutable=True,
fmt=util.num)
@property
def family(self):
"""Address family"""
fam = capi.rtnl_addr_get_family(self._rtnl_addr)
return netlink.AddressFamily(fam)
@family.setter
def family(self, value):
if not isinstance(value, AddressFamily):
value = AddressFamily(value)
capi.rtnl_addr_set_family(self._rtnl_addr, int(value))
#####################################################################
# scope
@netlink.nlattr('address.scope', type=int, fmt=util.num)
@property
def scope(self):
"""Address scope"""
scope = capi.rtnl_addr_get_scope(self._rtnl_addr)
return capi.rtnl_scope2str(scope, 32)[0]
@scope.setter
def scope(self, value):
if type(value) is str:
value = capi.rtnl_str2scope(value)
capi.rtnl_addr_set_scope(self._rtnl_addr, value)
#####################################################################
# local address
@netlink.nlattr('address.local', type=str, immutable=True,
fmt=util.addr)
@property
def local(self):
"""Local address"""
a = capi.rtnl_addr_get_local(self._rtnl_addr)
return netlink.AbstractAddress(a)
@local.setter
def local(self, value):
a = netlink.AbstractAddress(value)
capi.rtnl_addr_set_local(self._rtnl_addr, a._nl_addr)
# local is immutable but we assume that if _orig does not
# have a local address specified, it was meant to be given here
if capi.rtnl_addr_get_local(self._orig) is None:
capi.rtnl_addr_set_local(self._orig, a._nl_addr)
#####################################################################
# Peer address
@netlink.nlattr('address.peer', type=str, fmt=util.addr)
@property
def peer(self):
"""Peer address"""
a = capi.rtnl_addr_get_peer(self._rtnl_addr)
return netlink.AbstractAddress(a)
@peer.setter
def peer(self, value):
a = netlink.AbstractAddress(value)
capi.rtnl_addr_set_peer(self._rtnl_addr, a._nl_addr)
#####################################################################
# Broadcast address
@netlink.nlattr('address.broadcast', type=str, fmt=util.addr)
@property
def broadcast(self):
"""Broadcast address"""
a = capi.rtnl_addr_get_broadcast(self._rtnl_addr)
return netlink.AbstractAddress(a)
@broadcast.setter
def broadcast(self, value):
a = netlink.AbstractAddress(value)
capi.rtnl_addr_set_broadcast(self._rtnl_addr, a._nl_addr)
#####################################################################
# Multicast address
@netlink.nlattr('address.multicast', type=str, fmt=util.addr)
@property
def multicast(self):
"""multicast address"""
a = capi.rtnl_addr_get_multicast(self._rtnl_addr)
return netlink.AbstractAddress(a)
@multicast.setter
def multicast(self, value):
try:
a = netlink.AbstractAddress(value)
except ValueError as err:
raise AttributeError('multicast', err)
capi.rtnl_addr_set_multicast(self._rtnl_addr, a._nl_addr)
#####################################################################
# Anycast address
@netlink.nlattr('address.anycast', type=str, fmt=util.addr)
@property
def anycast(self):
"""anycast address"""
a = capi.rtnl_addr_get_anycast(self._rtnl_addr)
return netlink.AbstractAddress(a)
@anycast.setter
def anycast(self, value):
a = netlink.AbstractAddress(value)
capi.rtnl_addr_set_anycast(self._rtnl_addr, a._nl_addr)
#####################################################################
# Valid lifetime
@netlink.nlattr('address.valid_lifetime', type=int, immutable=True,
fmt=util.num)
@property
def valid_lifetime(self):
"""Valid lifetime"""
msecs = capi.rtnl_addr_get_valid_lifetime(self._rtnl_addr)
if msecs == 0xFFFFFFFF:
return None
else:
return datetime.timedelta(seconds=msecs)
@valid_lifetime.setter
def valid_lifetime(self, value):
capi.rtnl_addr_set_valid_lifetime(self._rtnl_addr, int(value))
#####################################################################
# Preferred lifetime
@netlink.nlattr('address.preferred_lifetime', type=int,
immutable=True, fmt=util.num)
@property
def preferred_lifetime(self):
"""Preferred lifetime"""
msecs = capi.rtnl_addr_get_preferred_lifetime(self._rtnl_addr)
if msecs == 0xFFFFFFFF:
return None
else:
return datetime.timedelta(seconds=msecs)
@preferred_lifetime.setter
def preferred_lifetime(self, value):
capi.rtnl_addr_set_preferred_lifetime(self._rtnl_addr, int(value))
#####################################################################
# Creation Time
@netlink.nlattr('address.create_time', type=int, immutable=True,
fmt=util.num)
@property
def create_time(self):
"""Creation time"""
hsec = capi.rtnl_addr_get_create_time(self._rtnl_addr)
return datetime.timedelta(milliseconds=10*hsec)
#####################################################################
# Last Update
@netlink.nlattr('address.last_update', type=int, immutable=True,
fmt=util.num)
@property
def last_update(self):
"""Last update"""
hsec = capi.rtnl_addr_get_last_update_time(self._rtnl_addr)
return datetime.timedelta(milliseconds=10*hsec)
#####################################################################
# add()
def add(self, socket=None, flags=None):
if not socket:
socket = netlink.lookup_socket(netlink.NETLINK_ROUTE)
if not flags:
flags = netlink.NLM_F_CREATE
ret = capi.rtnl_addr_add(socket._sock, self._rtnl_addr, flags)
if ret < 0:
raise netlink.KernelError(ret)
#####################################################################
# delete()
def delete(self, socket):
"""Attempt to delete this link in the kernel"""
ret = capi.rtnl_addr_delete(socket._sock, self._addr)
if ret < 0:
raise netlink.KernelError(ret)
###################################################################
# private properties
#
# Used for formatting output. USE AT OWN RISK
@property
def _flags(self):
return ','.join(self.flags)
###################################################################
#
# format(details=False, stats=False)
#
def format(self, details=False, stats=False, nodev=False, indent=''):
"""Return address as formatted text"""
fmt = util.MyFormatter(self, indent)
buf = fmt.format('{a|local!b}')
if not nodev:
buf += fmt.format(' {a|ifindex}')
buf += fmt.format(' {a|scope}')
if self.label:
buf += fmt.format(' "{a|label}"')
buf += fmt.format(' <{a|_flags}>')
if details:
buf += fmt.nl('\t{t|broadcast} {t|multicast}') \
+ fmt.nl('\t{t|peer} {t|anycast}')
if self.valid_lifetime:
buf += fmt.nl('\t{s|valid-lifetime!k} '\
'{a|valid_lifetime}')
if self.preferred_lifetime:
buf += fmt.nl('\t{s|preferred-lifetime!k} '\
'{a|preferred_lifetime}')
if stats and (self.create_time or self.last_update):
buf += self.nl('\t{s|created!k} {a|create_time}'\
' {s|last-updated!k} {a|last_update}')
return buf
|
thermokarst/qiime2 | refs/heads/master | qiime2/core/testing/type.py | 1 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2020, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import qiime2.plugin as plugin
IntSequence1 = plugin.SemanticType('IntSequence1')
IntSequence2 = plugin.SemanticType('IntSequence2')
Mapping = plugin.SemanticType('Mapping')
FourInts = plugin.SemanticType('FourInts')
SingleInt = plugin.SemanticType('SingleInt')
Kennel = plugin.SemanticType('Kennel', field_names='pet')
Dog = plugin.SemanticType('Dog', variant_of=Kennel.field['pet'])
Cat = plugin.SemanticType('Cat', variant_of=Kennel.field['pet'])
# Kennel[Dog | Cat]
C1 = plugin.SemanticType('C1', field_names='first')
C2 = plugin.SemanticType('C2', field_names=['first', 'second'],
variant_of=C1.field['first'],
field_members={'first': [C1], 'second': [C1]})
C3 = plugin.SemanticType('C3', field_names=['first', 'second', 'third'],
variant_of=[C1.field['first'], C2.field['first'],
C2.field['second']],
field_members={'first': [C1, C2],
'second': [C1, C2],
'third': [C1, C2]})
_variants = [
C1.field['first'], C2.field['first'], C3.field['first'],
C2.field['second'], C3.field['second'],
C3.field['third']
]
Foo = plugin.SemanticType('Foo', variant_of=_variants)
Bar = plugin.SemanticType('Bar', variant_of=_variants)
Baz = plugin.SemanticType('Baz', variant_of=_variants)
# C1[C2[C3[Foo, Bar, Baz], C1[Foo]]] ... etc
|
garethsion/Bad_Boids | refs/heads/master | refactored_boids_no_classes.py | 1 | """
A deliberately bad implementation of [Boids](http://dl.acm.org/citation.cfm?doid=37401.37406)
for use as an exercise on refactoring.
"""
from matplotlib import pyplot as plt
from matplotlib import animation
import random
from numpy import array
# Deliberately terrible code for teaching purposes
number_of_boids = 50 # removed unnecessary repetition of this number
x_pos_bounds = array([-450, 50.0])
y_pos_bounds = array([300.0, 600.0])
x_vel_bounds = array([0, 10.0])
y_vel_bounds = array([-20.0, 20.0])
x_axis_limits = [-500,1500]
y_axis_limits = [-500,1500]
anim_frame_rate = 50
anim_interval = 50
boids_x=[random.uniform(x_pos_bounds[0],x_pos_bounds[1]) for x in range(number_of_boids)]
boids_y=[random.uniform(y_pos_bounds[0],y_pos_bounds[1]) for x in range(number_of_boids)]
boid_x_velocities=[random.uniform(x_vel_bounds[0],x_vel_bounds[1]) for x in range(number_of_boids)]
boid_y_velocities=[random.uniform(y_vel_bounds[0], y_vel_bounds[1]) for x in range(number_of_boids)]
boids=(boids_x,boids_y,boid_x_velocities,boid_y_velocities)
def update_boids(boids):
xs,ys,xvs,yvs=boids
fly_to_mid_weight = 0.01
fly_to_mid_weight = 0.01
nearby_boid_dist = 100
flying_dist = 10000
group_flying_weight = 0.125
group_flying_weight = 0.125
for i in range(number_of_boids):
for j in range(number_of_boids):
x_seperation = (xs[j] - xs[i])
y_seperation = (ys[j] - ys[i])
# Fly towards the middle
xvs[i]+=x_seperation*fly_to_mid_weight/number_of_boids
yvs[i]+=y_seperation*fly_to_mid_weight/number_of_boids
# Fly away from nearby boids
if x_seperation**2 + y_seperation**2 < nearby_boid_dist:
xvs[i]-=x_seperation
yvs[i]-=y_seperation
# Try to match speed with nearby boids
x_velocity_seperation = (xvs[j]-xvs[i])
y_velocity_seperation = (yvs[j]-yvs[i])
if x_seperation**2 + x_seperation**2 < flying_dist:
xvs[i]+=x_velocity_seperation*group_flying_weight/number_of_boids
yvs[i]+=y_velocity_seperation*group_flying_weight/number_of_boids
# Move according to velocities
for i in range(number_of_boids):
xs[i]+=xvs[i]
ys[i]+=yvs[i]
figure=plt.figure()
axes=plt.axes(xlim=(x_axis_limits[0],x_axis_limits[1]), ylim=(y_axis_limits[0],y_axis_limits[1]))
scatter=axes.scatter(boids[0],boids[1])
def animate(frame):
update_boids(boids)
scatter.set_offsets(zip(boids[0],boids[1]))
anim = animation.FuncAnimation(figure, animate,
frames=anim_frame_rate, interval=anim_interval)
if __name__ == "__main__":
plt.show()
|
pablorecio/djangae | refs/heads/master | djangae/contrib/gauth/sql/backends.py | 14 | from djangae.contrib.gauth.common.backends import BaseAppEngineUserAPIBackend
class AppEngineUserAPIBackend(BaseAppEngineUserAPIBackend):
pass
|
dpnishant/appmon | refs/heads/master | appmon.py | 1 | #!/usr/bin/python
###
# Copyright (c) 2016 Nishant Das Patnaik.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import os, sys, argparse, time, codecs, binascii, frida, json, traceback, subprocess, tempfile
from flask import Flask, request, render_template
from termcolor import colored
import database as db
import platform as platform_module
print("""
___ .______ .______ .___ ___. ______ .__ __.
/ \ | _ \ | _ \ | \/ | / __ \ | \ | |
/ ^ \ | |_) | | |_) | | \ / | | | | | | \| |
/ /_\ \ | ___/ | ___/ | |\/| | | | | | | . ` |
/ _____ \ | | | | | | | | | `--' | | |\ |
/__/ \__\ | _| | _| |__| |__| \______/ |__| \__|
github.com/dpnishant
""")
app = Flask(__name__, static_url_path='/static')
#app.debug = True
device = ''
session = ''
temp_dir = tempfile.mkdtemp()
merged_script_path = os.path.join(temp_dir,'merged.js')
APP_LIST = []
@app.after_request
def add_header(r):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers['Cache-Control'] = 'public, max-age=0'
return r
@app.route('/api/fetch', methods=['GET'])
def serve_json():
index = request.args.get('id')
if request.args.get('reportdb'):
db_name = request.args.get('reportdb')
else:
db_name = request.args.get('app')
response = db.read_from_database(db_name, index)
#response = open('static/data.json').read()
return response
@app.route('/monitor/', methods=['GET'])
def monitor_page():
app_name = request.args.get('app')
return render_template('monitor.html', app_name=app_name)
@app.route('/', methods=['GET'])
def landing_page():
global APP_LIST, DB_MAP
app_dumps_dir = os.path.join('.','app_dumps')
for root, dirs, files in os.walk(app_dumps_dir):
path = root.split(os.sep)
for file in files:
file_path = os.path.join(root, file)
if file_path.endswith('.db'):
APP_LIST.append(file.replace('.db', ''))
return render_template('index.html', apps=APP_LIST)
def init_opts():
parser = argparse.ArgumentParser()
parser.add_argument('-a', action='store', dest='app_name', default='',
help='''Process Name;
Accepts "Twitter" for iOS;
"com.twitter.android" for Android; "Twitter" for macOS''')
parser.add_argument('--spawn', action='store', dest='spawn', default=0,
help='''Optional; Accepts 1=Spawn, 0=Attach; Needs "-p PLATFORM"''')
parser.add_argument('-p', action='store', dest='platform',
help='Platform Type; Accepts "ios", "iossim", "android" or "macos"')
parser.add_argument('-s', action='store', dest='script_path', default='',
help='''Path to agent script file;
Can be relative/absolute path for a file or directory;
Multiple scripts in a directory shall be merged;
Needs "-a APP_NAME"''')
parser.add_argument('-o', action='store', dest='output_dir',
help='''(Optional) Path to store any dumps/logs;
Accepts relative/absolute paths''')
parser.add_argument('-r', action='store', dest='report',
help='Report database name (Default is <appname>.db')
parser.add_argument('-ls', action='store', dest='list_apps', default=0,
help='''Optional; Accepts 1 or 0; Lists running Apps on target device; Needs "-p PLATFORM"''')
parser.add_argument('-v', action='version', version='AppMon Sniffer v0.1, Nishant Das Patnaik, 2016')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
global output_dir, report_name
results = parser.parse_args()
app_name = results.app_name
platform = results.platform
script_path = results.script_path
list_apps = int(results.list_apps)
spawn = int(results.spawn)
output_dir = results.output_dir if results.output_dir else os.path.join('.','app_dumps')
report_name = results.report if results.report else app_name
if script_path is not None and app_name == '' and list_apps == 0:
parser.print_help()
sys.exit(1)
return app_name, platform, script_path, list_apps, output_dir, spawn
def merge_scripts(path):
global merged_script_path
script_source = ''
for root, dirs, files in os.walk(path):
path = root.split('/')
for file in files:
script_path = os.path.join(root, file)
if script_path.endswith('.js'):
source = ''
with codecs.open(script_path, 'r', 'utf-8') as f:
source = f.read()
script_source += '/* ____%s/%s____ */\n\n' % (os.path.basename(root), file) + source + '\n\n'
with codecs.open(merged_script_path, "w", "utf-8") as f:
f.write(script_source)
return merged_script_path
def _exit_():
print((colored('[INFO] Exiting...', 'green')))
try:
os.remove(merged_script_path)
except Exception as e:
pass
sys.exit(0)
def writeBinFile(fname, data):
with codecs.open(fname, "a", "utf-8") as f:
f.write(data + '\r\n\r\n')
def list_processes(session):
print(('PID\tProcesses\n', '===\t========='))
for app in session.enumerate_processes():
print(("%s\t%s" % (app.pid, app.name)))
def on_detached():
print((colored('[WARNING] "%s" has terminated!' % (app_name), 'red')))
def on_message(message, data):
os_string = platform_module.system()
if os_string == "Windows":
current_time = time.strftime('%b %d %Y %I:%M %p', time.localtime())
else:
current_time = time.strftime('%b %d %Y %l:%M %p', time.localtime())
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if message['type'] == 'send':
writePath = os.path.join(output_dir, str(report_name) + '.db')
db.save_to_database(writePath, message['payload'])
#writePath = os.path.join(output_dir, app_name + '.json')
#writeBinFile(writePath, message['payload']) #writeBinFile(writePath, binascii.unhexlify(message['payload']))
print((colored('[%s] Dumped to %s' % (current_time, writePath), 'green')))
elif message['type'] == 'error':
print((message['stack']))
def generate_injection():
injection_source = ''
if os.path.isfile(script_path):
with codecs.open(script_path, 'r', 'utf-8') as f:
injection_source = f.read()
elif os.path.isdir(script_path):
with codecs.open(merge_scripts(script_path), 'r', 'utf-8') as f:
injection_source = f.read()
print((colored('[INFO] Building injection...', 'yellow')))
return injection_source
def getDisplayName(session, app_name, platform):
try:
str_script = ""
if platform == "ios":
str_script = """/* ____CFBundleDisplayName Getter for iOS Gadget____ */
'use strict';
rpc.exports = {
gadgetdisplayname: function () {
if (ObjC.available) {
var dict = ObjC.classes.NSBundle.mainBundle().infoDictionary();
var iter = dict.keyEnumerator();
var key = "";
while ((key = iter.nextObject()) !== null) {
if (key.toString() === "CFBundleDisplayName") {
return dict.objectForKey_(key).toString();
}
}
} else { return null; }
}
};
"""
script = session.create_script(str_script)
script.load()
if script.exports.gadgetdisplayname:
app_name = script.exports.gadgetdisplayname()
script.unload()
return app_name
elif platform == "android":
str_script = """/* ____ getPackageName Getter for Android Gadget____ */
'use strict';
rpc.exports = {
gadgetdisplayname: function () {
var appName = "";
Java.perform(function(argument) {
const ActivityThread = Java.use('android.app.ActivityThread');
const app = ActivityThread.currentApplication();
appName = app.toString().split("@")[0];
});
return appName;
}};
"""
script = session.create_script(str_script)
script.load()
if script.exports.gadgetdisplayname:
app_name = script.exports.gadgetdisplayname()
script.unload()
return app_name
except Exception as e:
print((colored("[ERROR] " + str(e), "red")))
traceback.print_exc()
def getBundleID(device, app_name, platform):
try:
session = device.attach(app_name)
session.on('detached', on_detached)
script = session.create_script("""'use strict';
rpc.exports = {
iosbundleid: function () {
return ObjC.classes.NSBundle.mainBundle().bundleIdentifier().toString();
},
macosbundleid: function () {
return ObjC.classes.NSBundle.mainBundle().executablePath().toString();
}
};
""")
script.load()
if platform == 'ios':
bundleID = script.exports.iosbundleid()
elif platform == 'macos':
bundleID = script.exports.macosbundleid()
script.unload()
session.detach()
return bundleID
except Exception as e:
print((colored("[ERROR] " + str(e), "red")))
traceback.print_exc()
def init_session():
try:
session = None
if platform == 'ios' or platform == 'android':
try:
device = frida.get_usb_device(3) # added timeout to wait for 3 seconds
except Exception as e:
print((colored(str(e), "red")))
traceback.print_exc()
if platform == 'android':
print((colored("Troubleshooting Help", "blue")))
print((colored("HINT: Is USB Debugging enabled?", "blue")))
print((colored("HINT: Is `frida-server` running on mobile device (with +x permissions)?", "blue")))
print((colored("HINT: Is `adb` daemon running?", "blue")))
sys.exit(1)
elif platform == "ios":
print((colored("Troubleshooting Help", "blue")))
print((colored("HINT: Have you installed `frida` module from Cydia?", "blue")))
print((colored("HINT: Have used `ipa_installer` to inject the `FridaGadget` shared lbrary?", "blue")))
sys.exit(1)
elif platform == 'iossim':
try:
device = frida.get_remote_device()
except Exception as e:
print((colored("Troubleshooting Help", "blue")))
print((colored("HINT: Have you successfully integrated the FridaGadget dylib with the XCode Project?", "blue")))
print((colored("HINT: Do you see a message similar to \"[Frida INFO] Listening on 127.0.0.1 TCP port 27042\" on XCode console logs?", "blue")))
sys.exit(1)
elif platform == 'macos':
device = frida.get_local_device()
else:
print((colored('[ERROR] Unsupported Platform', 'red')))
sys.exit(1)
pid = None
if app_name:
try:
if platform == 'android' and spawn == 1:
print((colored("Now Spawning %s" % app_name, "green")))
pid = device.spawn([app_name])
#time.sleep(5)
session = device.attach(pid)
#time.sleep(5)
elif (platform == 'ios' or platform == 'macos') and spawn == 1:
bundleID = getBundleID(device, app_name, platform)
if bundleID:
print((colored("Now Spawning %s" % bundleID, "green")))
pid = device.spawn([bundleID])
#time.sleep(5)
session = device.attach(pid)
else:
print((colored("[ERROR] Can't spawn %s" % app_name, "red")))
traceback.print_exc()
sys.exit(1)
else:
arg_to_attach = app_name
if app_name.isdigit():
arg_to_attach = int(app_name)
session = device.attach(arg_to_attach)
except Exception as e:
print((colored('[ERROR] ' + str(e), 'red')))
traceback.print_exc()
if session:
print((colored('[INFO] Attached to %s' % (app_name), 'yellow')))
session.on('detached', on_detached)
except Exception as e:
print((colored('[ERROR] ' + str(e), 'red')))
traceback.print_exc()
sys.exit(1)
return device, session, pid
try:
app_name, platform, script_path, list_apps, output_dir, spawn = init_opts()
device, session, pid = init_session()
if int(list_apps) == 1:
list_processes(device)
sys.exit(0)
if session:
if app_name == "Gadget":
app_name = getDisplayName(session, app_name, platform)
script = session.create_script(generate_injection())
if script:
print((colored('[INFO] Instrumentation started...', 'yellow')))
script.on('message', on_message)
script.load()
if spawn == 1 and pid:
device.resume(pid)
app.run() #Start WebServer
except Exception as e:
print((colored('[ERROR] ' + str(e), 'red')))
traceback.print_exc()
sys.exit(1)
try:
while True:
pass
except KeyboardInterrupt:
script.unload()
session.detach()
_exit_()
|
enriquepablo/nl | refs/heads/master | nl/examples/modality.py | 1 | # -*- coding: utf-8 -*-
# Copyright (c) 2007-2012 by Enrique Pérez Arnaud <[email protected]>
#
# This file is part of nlproject.
#
# The nlproject is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The nlproject is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with any part of the nlproject.
# If not, see <http://www.gnu.org/licenses/>.
from nl import *
from people import *
class Wants(Exists):
"""
the verb Wants has a person proper name as subject
and can take as 'to' modifier a verb prhase
"""
subject = Person
mods = {'to': Exists}
class Can(Exists):
"""
the verb Can has a person proper name as subject
and can take as 'what' modifier a verb prhase
"""
subject = Person
mods = {'what': Exists}
class Must(Exists):
"""
the verb Can has a person proper name as subject
and can take as 'what' modifier a verb prhase
"""
subject = Person
mods = {'what': Exists}
|
fe11x/dpark | refs/heads/master | dpark/moosefs/master.py | 14 | import os
import socket
import threading
import Queue
import time
import struct
import logging
from consts import *
from utils import *
logger = logging.getLogger(__name__)
# mfsmaster need to been patched with dcache
ENABLE_DCACHE = False
class StatInfo:
def __init__(self, totalspace, availspace, trashspace,
reservedspace, inodes):
self.totalspace = totalspace
self.availspace = availspace
self.trashspace = trashspace
self.reservedspace = reservedspace
self.inodes = inodes
class Chunk:
def __init__(self, id, length, version, csdata):
self.id = id
self.length = length
self.version = version
self.addrs = self._parse(csdata)
def _parse(self, csdata):
return [(socket.inet_ntoa(csdata[i:i+4]),
unpack("H", csdata[i+4:i+6])[0])
for i in range(len(csdata))[::6]]
def __repr__(self):
return "<Chunk(%d, %d, %d)>" % (self.id, self.version, self.length)
def try_again(f):
def _(self, *a, **kw):
for i in range(3):
try:
return f(self, *a, **kw)
except IOError, e:
self.close()
logger.warning("mfs master connection: %s", e)
time.sleep(2**i*0.1)
else:
raise
return _
def spawn(target, *args, **kw):
t = threading.Thread(target=target, name=target.__name__, args=args, kwargs=kw)
t.daemon = True
t.start()
return t
class MasterConn:
def __init__(self, host='mfsmaster', port=9421):
self.host = host
self.port = port
self.uid = os.getuid()
self.gid = os.getgid()
self.sessionid = 0
self.conn = None
self.packetid = 0
self.fail_count = 0
self.dcache = {}
self.dstat = {}
self.lock = threading.RLock()
self.reply = Queue.Queue()
self.is_ready = False
spawn(self.heartbeat)
spawn(self.recv_thread)
def heartbeat(self):
while True:
try:
self.nop()
except Exception, e:
self.close()
time.sleep(2)
def connect(self):
if self.conn is not None:
return
for _ in range(10):
try:
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn.connect((self.host, self.port))
break
except socket.error, e:
self.conn = None
#self.next_try = time.time() + 1.5 ** self.fail_count
self.fail_count += 1
time.sleep(1.5 ** self.fail_count)
if not self.conn:
raise IOError("mfsmaster not availbale")
regbuf = pack(CUTOMA_FUSE_REGISTER, FUSE_REGISTER_BLOB_NOACL,
self.sessionid, VERSION)
self.send(regbuf)
recv = self.recv(8)
cmd, i = unpack("II", recv)
if cmd != MATOCU_FUSE_REGISTER:
raise Exception("got incorrect answer from mfsmaster %s" % cmd)
if i not in (1, 4):
raise Exception("got incorrect size from mfsmaster")
data = self.recv(i)
if i == 1:
code, = unpack("B", data)
if code != 0:
raise Exception("mfsmaster register error: "
+ mfs_strerror(code))
if self.sessionid == 0:
self.sessionid, = unpack("I", data)
self.is_ready = True
def close(self):
with self.lock:
if self.conn:
self.conn.close()
self.conn = None
self.dcache.clear()
self.is_ready = False
def send(self, buf):
with self.lock:
conn = self.conn
if not conn:
raise IOError("not connected")
n = conn.send(buf)
while n < len(buf):
sent = conn.send(buf[n:])
if not sent:
self.close()
raise IOError("write to master failed")
n += sent
def nop(self):
with self.lock:
self.connect()
msg = pack(ANTOAN_NOP, 0)
self.send(msg)
def recv(self, n):
with self.lock:
conn = self.conn
if not conn:
raise IOError("not connected")
r = conn.recv(n)
while len(r) < n:
rr = conn.recv(n - len(r))
if not rr:
self.close()
raise IOError("unexpected error: need %d" % (n-len(r)))
r += rr
return r
def recv_cmd(self):
d = self.recv(12)
cmd, size = unpack("II", d)
data = self.recv(size-4) if size > 4 else ''
while cmd in (ANTOAN_NOP, MATOCU_FUSE_NOTIFY_ATTR, MATOCU_FUSE_NOTIFY_DIR):
if cmd == ANTOAN_NOP:
pass
elif cmd == MATOCU_FUSE_NOTIFY_ATTR:
while len(data) >= 43:
parent, inode = unpack("II", data)
attr = data[8:43]
if parent in self.dcache:
cache = self.dcache[parent]
for name in cache:
if cache[name].inode == inode:
cache[name] = attrToFileInfo(inode, attr)
break
data = data[43:]
elif cmd == MATOCU_FUSE_NOTIFY_DIR:
while len(data) >= 4:
inode, = unpack("I", data)
if inode in self.dcache:
del self.dcache[inode]
with self.lock:
self.send(pack(CUTOMA_FUSE_DIR_REMOVED, 0, inode))
data = data[4:]
d = self.recv(12)
cmd, size = unpack("II", d)
data = self.recv(size-4) if size > 4 else ''
return d, data
def recv_thread(self):
while True:
with self.lock:
if not self.is_ready:
time.sleep(0.01)
continue
try:
r = self.recv_cmd()
self.reply.put(r)
except IOError, e:
self.reply.put(e)
@try_again
def sendAndReceive(self, cmd, *args):
#print 'sendAndReceive', cmd, args
self.packetid += 1
msg = pack(cmd, self.packetid, *args)
with self.lock:
self.connect()
while not self.reply.empty():
self.reply.get_nowait()
self.send(msg)
r = self.reply.get()
if isinstance(r, Exception):
raise r
h, d = r
rcmd, size, pid = unpack("III", h)
if rcmd != cmd+1 or pid != self.packetid or size <= 4:
self.close()
raise Exception("incorrect answer (%s!=%s, %s!=%s, %d<=4",
rcmd, cmd+1, pid, self.packetid, size)
if len(d) == 1 and ord(d[0]) != 0:
raise Error(ord(d[0]))
return d
def statfs(self):
ans = self.sendAndReceive(CUTOMA_FUSE_STATFS)
return StatInfo(*unpack("QQQQI", ans))
# def access(self, inode, modemask):
# return self.sendAndReceive(CUTOMA_FUSE_ACCESS, inode,
# self.uid, self.gid, uint8(modemask))
#
def lookup(self, parent, name):
if ENABLE_DCACHE:
cache = self.dcache.get(parent)
if cache is None and self.dstat.get(parent, 0) > 1:
cache = self.getdirplus(parent)
if cache is not None:
return cache.get(name), None
self.dstat[parent] = self.dstat.get(parent, 0) + 1
ans = self.sendAndReceive(CUTOMA_FUSE_LOOKUP, parent,
uint8(len(name)), name, 0, 0)
if len(ans) == 1:
return None, ""
if len(ans) != 39:
raise Exception("bad length")
inode, = unpack("I", ans)
return attrToFileInfo(inode, ans[4:]), None
def getattr(self, inode):
ans = self.sendAndReceive(CUTOMA_FUSE_GETATTR, inode,
self.uid, self.gid)
return attrToFileInfo(inode, ans)
def readlink(self, inode):
ans = self.sendAndReceive(CUTOMA_FUSE_READLINK, inode)
length, = unpack("I", ans)
if length+4 != len(ans):
raise Exception("invalid length")
return ans[4:-1]
def getdir(self, inode):
"return: {name: (inode,type)}"
ans = self.sendAndReceive(CUTOMA_FUSE_GETDIR, inode,
self.uid, self.gid)
p = 0
names = {}
while p < len(ans):
length, = unpack("B", ans[p:p+1])
p += 1
if length + p + 5 > len(ans):
break
name = ans[p:p+length]
p += length
inode, type = unpack("IB", ans)
names[name] = (inode, type)
p += 5
return names
def getdirplus(self, inode):
"return {name: FileInfo()}"
if ENABLE_DCACHE:
infos = self.dcache.get(inode)
if infos is not None:
return infos
flag = GETDIR_FLAG_WITHATTR
if ENABLE_DCACHE:
flag |= GETDIR_FLAG_DIRCACHE
ans = self.sendAndReceive(CUTOMA_FUSE_GETDIR, inode,
self.uid, self.gid, uint8(flag))
p = 0
infos = {}
while p < len(ans):
length, = unpack("B", ans[p:p+1])
p += 1
name = ans[p:p+length]
p += length
i, = unpack("I", ans[p:p+4])
attr = ans[p+4:p+39]
infos[name] = attrToFileInfo(i, attr, name)
p += 39
if ENABLE_DCACHE:
self.dcache[inode] = infos
return infos
def opencheck(self, inode, flag=1):
ans = self.sendAndReceive(CUTOMA_FUSE_OPEN, inode,
self.uid, self.gid, uint8(flag))
return ans
def readchunk(self, inode, index):
ans = self.sendAndReceive(CUTOMA_FUSE_READ_CHUNK, inode, index)
n = len(ans)
if n < 20 or (n-20)%6 != 0:
raise Exception("read chunk: invalid length: %s" % n)
length, id, version = unpack("QQI", ans)
return Chunk(id, length, version, ans[20:])
def test():
m = MasterConn("mfsmaster")
m.connect()
m.close()
#print m.get_attr(1)
while True:
print m.getdir(1)
print m.getdirplus(1)
time.sleep(60)
info, err = m.lookup(1, "test.csv")
print info, err
#print m.opencheck(info.inode)
chunks = m.readchunk(info.inode, 0)
print chunks, chunks.addrs
for i in range(1000):
info, err = m.lookup(1, "test.csv")
chunks = m.readchunk(info.inode, 0)
print i,err, chunks
time.sleep(10)
m.close()
if __name__ == '__main__':
test()
|
uwcirg/true_nth_usa_portal | refs/heads/develop | portal/models/role.py | 1 | """Role module
Role data lives in the `roles` table, populated via:
`flask seed`
To restrict access to a given role, use the ROLE object:
@roles_required(ROLE.ADMIN.value)
To extend the list of roles, add name: description pairs to the
STATIC_ROLES dict within.
"""
from enum import Enum
from ..database import db
class Role(db.Model):
"""SQLAlchemy class for `roles` table"""
__tablename__ = 'roles'
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(50), unique=True)
description = db.Column(db.Text)
def __str__(self):
return "Role {}".format(self.name)
def as_json(self):
return {
'name': self.name,
'description': self.description,
'display_name': self.display_name}
@property
def display_name(self):
"""Generate and return 'Title Case' version of name 'title_case' """
if not self.name:
return
word_list = self.name.split('_')
return ' '.join([n.title() for n in word_list])
# Source definition for roles, as dictionary {name: description,}
STATIC_ROLES = {
'access_on_verify':
'Provides access prior to registration, on verification',
'admin':
'Administrator privileges, i.e. carte blanche',
'analyst':
'Grants view permissions for reporting data (does not include PHI)',
'anon':
'Anonymous role - exclusive to accounts generated prior to '
'user authentication',
'application_developer':
'Gives users permission to add/view/edit TrueNTH applications',
'content_manager':
'Gives user permission to add/view/edit associated content '
'managment systems',
'intervention_staff':
'Grants user permission to view patient information (name, DOB, etc) '
'from relevant intervention patients',
'partner':
"An intimate partner, use the partner relationship to define "
"whom the patient's partner is",
'patient':
'Default role for all patients, may only view their own '
'patient data',
'promote_without_identity_challenge':
'Users with "write_only" may be promoted without the standard '
'identity challenge if they are also have this role',
'researcher':
'Gives user access to the Research page',
'staff':
'Health care provider or staff at a TrueNTH-collaborating clinic',
'staff_admin':
'Staff administrator, with access to both patients and staff '
'from their organization tree',
'service':
'Reserved for automated service accounts needing API access',
'test':
'Designates a testing user',
'write_only':
'Limited access account, write only, cannot view data from '
'previous sessions',
}
ROLE = Enum('ROLE', {r.upper(): r for r in STATIC_ROLES})
ALL_BUT_WRITE_ONLY = [r.value for r in ROLE if r.value != 'write_only']
def add_static_roles():
"""Seed database with default static roles
Idempotent - run anytime to pick up any new roles in existing dbs
"""
for r in STATIC_ROLES:
if not Role.query.filter_by(name=r).first():
db.session.add(Role(name=r, description=STATIC_ROLES[r]))
|
qedsoftware/commcare-hq | refs/heads/master | corehq/apps/callcenter/tests/test_owner_options_view.py | 1 | import json
import math
from django.test import TestCase
from elasticsearch import ConnectionError
from corehq.apps.callcenter.views import CallCenterOwnerOptionsView
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.groups.models import Group
from corehq.apps.locations.models import LocationType
from corehq.apps.locations.tests.util import make_loc
from corehq.apps.users.models import CommCareUser, WebUser
from corehq.elastic import get_es_new, send_to_elasticsearch
from corehq.pillows.mappings.group_mapping import GROUP_INDEX_INFO
from corehq.pillows.mappings.user_mapping import USER_INDEX_INFO
from corehq.toggles import CALL_CENTER_LOCATION_OWNERS, NAMESPACE_DOMAIN
from corehq.util import reverse
from corehq.util.elastic import ensure_index_deleted
from corehq.util.test_utils import trap_extra_setup
from django_digest.test import Client
from pillowtop.es_utils import initialize_index_and_mapping
TEST_DOMAIN = "cc-location-owner-test-domain"
CASE_TYPE = "cc-case-type"
LOCATION_TYPE = "my-location"
class CallCenterLocationOwnerOptionsViewTest(TestCase):
@classmethod
def setUpClass(cls):
super(CallCenterLocationOwnerOptionsViewTest, cls).setUpClass()
with trap_extra_setup(ConnectionError, msg="cannot connect to elasicsearch"):
es = get_es_new()
ensure_index_deleted(USER_INDEX_INFO.index)
ensure_index_deleted(GROUP_INDEX_INFO.index)
initialize_index_and_mapping(es, USER_INDEX_INFO)
initialize_index_and_mapping(es, GROUP_INDEX_INFO)
# Create domain
cls.domain = create_domain(TEST_DOMAIN)
cls.domain.save()
CALL_CENTER_LOCATION_OWNERS.set(cls.domain.name, True, NAMESPACE_DOMAIN)
cls.username = "foo"
cls.password = "bar"
cls.web_user = WebUser.create(cls.domain.name, cls.username, cls.password)
cls.web_user.save()
# Create case sharing groups
cls.groups = []
for i in range(2):
group = Group(domain=TEST_DOMAIN, name="group{}".format(i), case_sharing=True)
group.save()
send_to_elasticsearch('groups', group.to_json())
cls.groups.append(group)
es.indices.refresh(GROUP_INDEX_INFO.index)
cls.group_ids = {g._id for g in cls.groups}
# Create locations
LocationType.objects.get_or_create(
domain=cls.domain.name,
name=LOCATION_TYPE,
shares_cases=True,
)
cls.locations = [
make_loc('loc{}'.format(i), type=LOCATION_TYPE, domain=TEST_DOMAIN) for i in range(4)
]
cls.location_ids = {l._id for l in cls.locations}
# Create users
cls.users = [CommCareUser.create(TEST_DOMAIN, 'user{}'.format(i), '***') for i in range(3)]
for user in cls.users:
send_to_elasticsearch('users', user.to_json())
es.indices.refresh(USER_INDEX_INFO.index)
cls.user_ids = {u._id for u in cls.users}
@classmethod
def tearDownClass(cls):
super(CallCenterLocationOwnerOptionsViewTest, cls).tearDownClass()
for user in cls.users:
user.delete()
CALL_CENTER_LOCATION_OWNERS.set(cls.domain.name, False, NAMESPACE_DOMAIN)
cls.domain.delete()
cls.web_user.delete()
ensure_index_deleted(USER_INDEX_INFO.index)
ensure_index_deleted(GROUP_INDEX_INFO.index)
def test_pages(self):
"""
Confirm that all the groups/locations/users appear on the correct pages
"""
client = Client()
client.login(username=self.username, password=self.password)
# expected_id_sets is a list of sets.
# expected_id_sets is constructed such that
# For option with index x yielded by the view:
# the option's id should be in expected_ids[x]
expected_id_sets = [{"user_location"}, {"user_parent_location"}]
for i in self.groups:
expected_id_sets.append(self.group_ids)
for i in self.locations:
expected_id_sets.append(self.location_ids)
for i in self.users:
expected_id_sets.append(self.user_ids)
page_size = 3 # using a small number because more pages will hopefully be more likely to reveal bugs
expected_num_pages = int(math.ceil(len(expected_id_sets) / float(page_size)))
for i in range(expected_num_pages):
page = i + 1
response = client.get(reverse(
CallCenterOwnerOptionsView.url_name, args=[self.domain.name]),
data={"page": page, "page_limit": page_size, "q": ""}
)
response_json = json.loads(response.content)
self.assertEqual(response_json['total'], len(expected_id_sets))
for item_index, item in enumerate(response_json['results']):
id_ = item['id']
option_index = ((page - 1) * page_size) + item_index
self.assertTrue(
id_ in expected_id_sets[option_index],
"Unexpected item {} at index {}.".format(item, option_index)
)
|
mikepea/fullerite | refs/heads/master | src/diamond/collectors/dseopscenter/dseopscenter.py | 29 | # coding=utf-8
"""
Collect the DataStax OpsCenter metrics
#### Dependencies
* urlib2
"""
import urllib2
import datetime
try:
import json
except ImportError:
import simplejson as json
import diamond.collector
class DseOpsCenterCollector(diamond.collector.Collector):
last_run_time = 0
column_families = None
last_schema_sync_time = 0
def get_default_config_help(self):
config_help = super(DseOpsCenterCollector,
self).get_default_config_help()
config_help.update({
'host': "",
'port': "",
'cluster_id': "Set cluster ID/name.\n",
'metrics': "You can list explicit metrics if you like,\n"
" by default all know metrics are included.\n",
'node_group': "Set node group name, any by default\n",
'default_tail_opts': "Chaning these is not recommended.",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(DseOpsCenterCollector, self).get_default_config()
metrics = [
'cf-bf-false-positives',
'cf-bf-false-ratio',
'cf-bf-space-used',
'cf-keycache-hit-rate',
'cf-keycache-hits',
'cf-keycache-requests',
'cf-live-disk-used',
'cf-live-sstables',
'cf-pending-tasks',
'cf-read-latency-op',
'cf-read-ops',
'cf-rowcache-hit-rate',
'cf-rowcache-hits',
'cf-rowcache-requests',
'cf-total-disk-used',
'cf-write-latency-op',
'cf-write-ops',
'cms-collection-count',
'cms-collection-time',
'data-load',
'heap-committed',
'heap-max',
'heap-used',
'key-cache-hit-rate',
'key-cache-hits',
'key-cache-requests',
'nonheap-committed',
'nonheap-max',
'nonheap-used',
'pending-compaction-tasks',
'pending-flush-sorter-tasks',
'pending-flushes',
'pending-gossip-tasks',
'pending-hinted-handoff',
'pending-internal-responses',
'pending-memtable-post-flushers',
'pending-migrations',
'pending-misc-tasks',
'pending-read-ops',
'pending-read-repair-tasks',
'pending-repair-tasks',
'pending-repl-on-write-tasks',
'pending-request-responses',
'pending-streams',
'pending-write-ops',
'read-latency-op',
'read-ops',
'row-cache-hit-rate',
'row-cache-hits',
'row-cache-requests',
'solr-avg-time-per-req',
'solr-errors',
'solr-requests',
'solr-timeouts',
'total-bytes-compacted',
'total-compactions-completed',
'write-latency-op',
'write-ops',
]
config.update({
'host': '127.0.0.1',
'port': 8888,
'path': 'cassandra',
'node_group': '*',
'metrics': ','.join(metrics),
'default_tail_opts': '&forecast=0&node_aggregation=1',
})
return config
def _get_schema(self):
time_now = int(datetime.datetime.utcnow().strftime('%s'))
if (self.column_families is None
or (time_now - self.last_schema_sync_time < 3600)):
return False
url = 'http://%s:%i/%s/keyspaces' % (self.config['host'],
int(self.config['port']),
self.config['cluster_id'])
try:
response = urllib2.urlopen(url)
except Exception, err:
self.log.error('%s: %s', url, err)
return False
try:
result = json.load(response)
column_families = []
for ks in result:
i = []
for cf in result[ks]['column_families']:
i.append("%s.%s" % (ks, cf))
column_families.append(i)
self.column_families = ','.join(sum(column_families, []))
self.log.debug('DseOpsCenterCollector columnfamilies = %s',
self.column_families)
self.last_schema_sync_time = time_now
return True
except (TypeError, ValueError):
self.log.error(
"Unable to parse response from opscenter as a json object")
return False
def _get(self, start, end, step=60):
self._get_schema()
url = ('http://%s:%i/%s/new-metrics?node_group=%s&columnfamilies=%s'
'&metrics=%s&start=%i&end=%i&step=%i%s') % (
self.config['host'],
int(self.config['port']),
self.config['cluster_id'],
self.config['node_group'],
self.column_families,
self.config['metrics'],
start, end, step,
self.config['default_tail_opts'])
try:
response = urllib2.urlopen(url)
except Exception, err:
self.log.error('%s: %s', url, err)
return False
self.log.debug('DseOpsCenterCollector metrics url = %s', url)
try:
return json.load(response)
except (TypeError, ValueError):
self.log.error(
"Unable to parse response from opscenter as a json object")
return False
def collect(self):
metrics = {}
if json is None:
self.log.error('Unable to import json')
return None
time_now = int(datetime.datetime.utcnow().strftime('%s'))
self.log.debug('DseOpsCenterCollector last_run_time = %i',
self.last_run_time)
if self.last_run_time == 0:
self.last_run_time = time_now - 60
if time_now - self.last_run_time >= 60:
result = self._get(self.last_run_time, time_now)
self.last_run_time = time_now
if not result:
return None
self.log.debug('DseOpsCenterCollector result = %s', result)
for data in result['data'][self.config['node_group']]:
if data['data-points'][0][0] is not None:
if 'columnfamily' in data:
k = '.'.join([data['metric'],
data['columnfamily']])
metrics[k] = data['data-points'][0][0]
else:
metrics[data['metric']] = data['data-points'][0][0]
self.log.debug('DseOpsCenterCollector metrics = %s', metrics)
for key in metrics:
self.publish(key, metrics[key])
else:
self.log.debug(
"DseOpsCenterCollector can only run once every minute")
return None
|
marcosdiez/ansible-modules-extras | refs/heads/devel | notification/flowdock.py | 91 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2013 Matt Coddington <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: flowdock
version_added: "1.2"
author: "Matt Coddington (@mcodd)"
short_description: Send a message to a flowdock
description:
- Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat)
options:
token:
description:
- API token.
required: true
type:
description:
- Whether to post to 'inbox' or 'chat'
required: true
choices: [ "inbox", "chat" ]
msg:
description:
- Content of the message
required: true
tags:
description:
- tags of the message, separated by commas
required: false
external_user_name:
description:
- (chat only - required) Name of the "user" sending the message
required: false
from_address:
description:
- (inbox only - required) Email address of the message sender
required: false
source:
description:
- (inbox only - required) Human readable identifier of the application that uses the Flowdock API
required: false
subject:
description:
- (inbox only - required) Subject line of the message
required: false
from_name:
description:
- (inbox only) Name of the message sender
required: false
reply_to:
description:
- (inbox only) Email address for replies
required: false
project:
description:
- (inbox only) Human readable identifier for more detailed message categorization
required: false
link:
description:
- (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox.
required: false
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
requirements: [ ]
'''
EXAMPLES = '''
- flowdock: type=inbox
token=AAAAAA
[email protected]
source='my cool app'
msg='test from ansible'
subject='test subject'
- flowdock: type=chat
token=AAAAAA
external_user_name=testuser
msg='test from ansible'
tags=tag1,tag2,tag3
'''
import urllib
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True),
msg=dict(required=True),
type=dict(required=True, choices=["inbox","chat"]),
external_user_name=dict(required=False),
from_address=dict(required=False),
source=dict(required=False),
subject=dict(required=False),
from_name=dict(required=False),
reply_to=dict(required=False),
project=dict(required=False),
tags=dict(required=False),
link=dict(required=False),
validate_certs = dict(default='yes', type='bool'),
),
supports_check_mode=True
)
type = module.params["type"]
token = module.params["token"]
if type == 'inbox':
url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token)
else:
url = "https://api.flowdock.com/v1/messages/chat/%s" % (token)
params = {}
# required params
params['content'] = module.params["msg"]
# required params for the 'chat' type
if module.params['external_user_name']:
if type == 'inbox':
module.fail_json(msg="external_user_name is not valid for the 'inbox' type")
else:
params['external_user_name'] = module.params["external_user_name"]
elif type == 'chat':
module.fail_json(msg="%s is required for the 'inbox' type" % item)
# required params for the 'inbox' type
for item in [ 'from_address', 'source', 'subject' ]:
if module.params[item]:
if type == 'chat':
module.fail_json(msg="%s is not valid for the 'chat' type" % item)
else:
params[item] = module.params[item]
elif type == 'inbox':
module.fail_json(msg="%s is required for the 'inbox' type" % item)
# optional params
if module.params["tags"]:
params['tags'] = module.params["tags"]
# optional params for the 'inbox' type
for item in [ 'from_name', 'reply_to', 'project', 'link' ]:
if module.params[item]:
if type == 'chat':
module.fail_json(msg="%s is not valid for the 'chat' type" % item)
else:
params[item] = module.params[item]
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=False)
# Send the data to Flowdock
data = urllib.urlencode(params)
response, info = fetch_url(module, url, data=data)
if info['status'] != 200:
module.fail_json(msg="unable to send msg: %s" % info['msg'])
module.exit_json(changed=True, msg=module.params["msg"])
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.