input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
a sparse 'd' matrix of size (0,n).
b is a dense 'd' matrix of size (p,1). The default value is a
dense 'd' matrix of size (0,1).
Optionally, the interface can write a .task file, required for
support questions on the MOSEK solver.
Return values
solsta is a MOSEK solution status key.
If solsta is mosek.solsta.optimal,
then (x, zl, zq, zs) contains the primal-dual solution.
If solsta is moseksolsta.prim_infeas_cer,
then (x, zl, zq, zs) is a certificate of dual infeasibility.
If solsta is moseksolsta.dual_infeas_cer,
then (x, zl, zq, zs) is a certificate of primal infeasibility.
If solsta is mosek.solsta.unknown,
then (x, zl, zq, zs) are all None
Other return values for solsta include:
mosek.solsta.dual_feas
mosek.solsta.near_dual_feas
mosek.solsta.near_optimal
mosek.solsta.near_prim_and_dual_feas
mosek.solsta.near_prim_feas
mosek.solsta.prim_and_dual_feas
mosek.solsta.prim_feas
in which case the (x,y,z) value may not be well-defined.
x, z the primal-dual solution.
Options are passed to MOSEK solvers via the msk.options dictionary,
e.g., the following turns off output from the MOSEK solvers
>>> msk.options = {mosek.iparam.log:0}
see the MOSEK Python API manual.
"""
with mosek.Env() as env:
if dims is None:
(solsta, x, y, z) = lp(c, G, h)
return (solsta, x, z, None)
N, n = G.size
ml, mq, ms = dims['l'], dims['q'], [ k*k for k in dims['s'] ]
cdim = ml + sum(mq) + sum(ms)
if cdim is 0: raise ValueError("ml+mq+ms cannot be 0")
# Data for kth 'q' constraint are found in rows indq[k]:indq[k+1] of G.
indq = [ dims['l'] ]
for k in dims['q']: indq = indq + [ indq[-1] + k ]
# Data for the kth 's' constraint are found in rows indq[-1] + (inds[k]:inds[k+1]) of G.
inds = [ 0 ]
for k in dims['s']: inds = inds + [ inds[-1] + k*k ]
if type(h) is not matrix or h.typecode != 'd' or h.size[1] != 1:
raise TypeError("'h' must be a 'd' matrix with 1 column")
if type(G) is matrix or type(G) is spmatrix:
if G.typecode != 'd' or G.size[0] != cdim:
raise TypeError("'G' must be a 'd' matrix with %d rows " %cdim)
if h.size[0] != cdim:
raise TypeError("'h' must have %d rows" %cdim)
else:
raise TypeError("'G' must be a matrix")
if len(dims['q']) and min(dims['q'])<1: raise TypeError(
"dimensions of quadratic cones must be positive")
if len(dims['s']) and min(dims['s'])<1: raise TypeError(
"dimensions of semidefinite cones must be positive")
bkc = n*[ mosek.boundkey.fx ]
blc = list(-c)
buc = list(-c)
dimx = ml + sum(mq)
bkx = ml*[ mosek.boundkey.lo ] + sum(mq)*[ mosek.boundkey.fr ]
blx = ml*[ 0.0 ] + sum(mq)*[ -inf ]
bux = dimx*[ +inf ]
c = list(-h)
cl, cs = c[:dimx], sparse(c[dimx:])
Gl, Gs = sparse(G[:dimx,:]), sparse(G[dimx:,:])
colptr, asub, acof = Gl.T.CCS
aptrb, aptre = colptr[:-1], colptr[1:]
with env.Task(0,0) as task:
task.set_Stream (mosek.streamtype.log, streamprinter)
# set MOSEK options
for (param, val) in options.items():
if str(param)[:6] == "iparam":
task.putintparam(param, val)
elif str(param)[:6] == "dparam":
task.putdouparam(param, val)
elif str(param)[:6] == "sparam":
task.putstrparam(param, val)
else:
raise ValueError("invalid MOSEK parameter: "+str(param))
task.inputdata (n, # number of constraints
dimx, # number of variables
cl, # linear objective coefficients
0.0, # objective fixed value
list(aptrb),
list(aptre),
list(asub),
list(acof),
bkc,
blc,
buc,
bkx,
blx,
bux)
task.putobjsense(mosek.objsense.maximize)
numbarvar = len(dims['s'])
task.appendbarvars(dims['s'])
barcsubj, barcsubk, barcsubl = (inds[-1])*[ 0 ], (inds[-1])*[ 0 ], (inds[-1])*[ 0 ]
barcval = [ -h[indq[-1]+k] for k in range(inds[0], inds[-1])]
for s in range(numbarvar):
for (k,idx) in enumerate(range(inds[s],inds[s+1])):
barcsubk[idx] = k / dims['s'][s]
barcsubl[idx] = k % dims['s'][s]
barcsubj[idx] = s
# filter out upper triangular part
trilidx = [ idx for idx in range(len(barcsubk)) if barcsubk[idx] >= barcsubl[idx] ]
barcsubj = [ barcsubj[k] for k in trilidx ]
barcsubk = [ barcsubk[k] for k in trilidx ]
barcsubl = [ barcsubl[k] for k in trilidx ]
barcval = [ barcval[k] for k in trilidx ]
task.putbarcblocktriplet(len(trilidx), barcsubj, barcsubk, barcsubl, barcval)
Gst = Gs.T
barasubi = len(Gst)*[ 0 ]
barasubj = len(Gst)*[ 0 ]
barasubk = len(Gst)*[ 0 ]
barasubl = len(Gst)*[ 0 ]
baraval = len(Gst)*[ 0.0 ]
colptr, row, val = Gst.CCS
for s in range(numbarvar):
for j in range(ms[s]):
for idx in range(colptr[inds[s]+j], colptr[inds[s]+j+1]):
barasubi[idx] = row[idx]
barasubj[idx] = s
barasubk[idx] = j / dims['s'][s]
barasubl[idx] = j % dims['s'][s]
baraval[idx] = val[idx]
# filter out upper triangular part
trilidx = [ idx for (idx, (k,l)) in enumerate(zip(barasubk,barasubl)) if k >= l ]
barasubi = [ barasubi[k] for k in trilidx ]
barasubj = [ barasubj[k] for k in trilidx ]
barasubk = [ barasubk[k] for k in trilidx ]
barasubl = [ barasubl[k] for k in trilidx ]
baraval = [ baraval[k] for k in trilidx ]
task.putbarablocktriplet(len(trilidx), barasubi, barasubj, barasubk, barasubl, baraval)
for k in range(len(mq)):
task.appendcone(mosek.conetype.quad, 0.0,
range(ml+sum(mq[:k]),ml+sum(mq[:k+1])))
if taskfile:
task.writetask(taskfile)
task.optimize()
task.solutionsummary (mosek.streamtype.msg);
solsta = task.getsolsta(mosek.soltype.itr)
xu, xl, zq = n*[ 0.0 ], n*[ 0.0 ], sum(mq)*[ 0.0 ]
task.getsolutionslice(mosek.soltype.itr, mosek.solitem.slc, 0, n, xl)
task.getsolutionslice(mosek.soltype.itr, mosek.solitem.suc, 0, n, xu)
task.getsolutionslice(mosek.soltype.itr, mosek.solitem.xx, ml, dimx, zq)
x = matrix(xu)-matrix(xl)
zq = matrix(zq)
for s in range(numbarvar):
xx = (dims['s'][s]*(dims['s'][s] + 1) >> 1)*[0.0]
task.getbarxj(mosek.soltype.itr, s, xx)
xs = matrix(0.0, (dims['s'][s], dims['s'][s]))
idx = 0
for j in range(dims['s'][s]):
for i in range(j,dims['s'][s]):
xs[i,j] = xx[idx]
if i != j:
xs[j,i] = xx[idx]
idx += 1
zq = matrix([zq, xs[:]])
if ml:
zl = ml*[ 0.0 ]
task.getsolutionslice(mosek.soltype.itr, mosek.solitem.xx, 0, ml, zl)
zl = matrix(zl)
else:
zl = matrix(0.0, (0,1))
if (solsta is mosek.solsta.unknown):
return (solsta, None, None)
else:
return (solsta, x, matrix([zl, zq]))
def socp(c, Gl=None, hl=None, Gq=None, hq=None, taskfile=None):
"""
Solves a pair of primal and dual SOCPs
minimize c'*x
subject to Gl*x + sl = hl
Gq[k]*x + sq[k] = hq[k], k = 0, ..., N-1
sl >= 0,
sq[k] >= 0, k = 0, ..., N-1
maximize -hl'*zl - sum_k hq[k]'*zq[k]
subject to Gl'*zl + sum_k Gq[k]'*zq[k] + c = 0
zl >= 0, zq[k] >= 0, k = 0, ..., N-1.
using MOSEK 8.0.
solsta, x, zl, zq = socp(c, Gl = None, hl = None, Gq = None, hq = None, taskfile=None)
Return values
solsta is a MOSEK solution status key.
If solsta is mosek.solsta.optimal,
then (x, zl, zq) contains the primal-dual solution.
If solsta is mosek.solsta.prim_infeas_cer,
then (x, zl, zq) is a certificate of dual infeasibility.
If solsta is mosek.solsta.dual_infeas_cer,
then (x, zl, zq) is a certificate of primal infeasibility.
If solsta is mosek.solsta.unknown,
then (x, zl, zq) are all None
Other return values for solsta include:
mosek.solsta.dual_feas
mosek.solsta.near_dual_feas
mosek.solsta.near_optimal
mosek.solsta.near_prim_and_dual_feas
mosek.solsta.near_prim_feas
mosek.solsta.prim_and_dual_feas
mosek.solsta.prim_feas
in which case the (x,y,z) value may not be well-defined.
x, zl, zq the primal-dual solution.
Options are passed to MOSEK solvers via the msk.options dictionary,
e.g., the following turns off output from the MOSEK solvers
>>> msk.options = {mosek.iparam.log: 0}
see the MOSEK Python API manual.
Optionally, the interface can write a .task file, required for
support questions on the MOSEK solver.
"""
with mosek.Env() as env:
if type(c) is not matrix or c.typecode != 'd' or c.size[1] != 1:
raise TypeError("'c' must be a dense column matrix")
n = c.size[0]
if n < 1: raise ValueError("number of variables must be at least 1")
if Gl is None: Gl = spmatrix([], [], [], (0,n), tc='d')
if (type(Gl) is not matrix and type(Gl) is not spmatrix) or \
Gl.typecode != 'd' or Gl.size[1] != n:
raise TypeError("'Gl' must be a dense or sparse 'd' matrix "\
"with %d columns" %n)
ml = Gl.size[0]
if hl is None: hl = matrix(0.0, (0,1))
if type(hl) is not matrix or hl.typecode != 'd' or | |
mentalIndef,recvMental = self.getHistoryRelatedColumnData(phIndex,
"MentalHealthProblem", "has_mental_health_problem",
"mental_health_indefinite", "receive_mental_health_services")
substance, substanceIndef, recvSubstance \
= self.getHistoryRelatedColumnData(phIndex,
"SubstanceAbuseProblem", "has_substance_abuse_problem",
"substance_abuse_indefinite", "receive_substance_abuse_services")
violence,violenceOccured = self.getHistoryRelatedColumnData(phIndex,
"DomesticViolence",
"domestic_violence_survivor", "dv_occurred")
employed, hoursLastWk, tenure, looking \
= self.getHistoryRelatedColumnData(phIndex, "Employment",
"currently_employed", "hours_worked_last_week",
"employment_tenure", "looking_for_work")
inSchool = self.getHistoryRelatedColumnData(phIndex,
"CurrentlyInSchool", "currently_in_school")
vocational = self.getHistoryRelatedColumnData(phIndex,
"VocationalTraining", "vocational_training")
highestSchool = self.getHistoryRelatedColumnData(phIndex,
"HighestSchoolLevel", "highest_school_level")
(degreeNum, degreeStr) = self.getHistoryRelatedColumnData(phIndex,
"Degree", "degree_id_id_num", "degree_id_id_str")
degree = self.chooseId(degreeNum, degreeStr)
healthStatus = self.getHistoryRelatedColumnData(phIndex,
"HealthStatus", "health_status")
pregnant, dueDate = self.getHistoryRelatedColumnData(phIndex,
"Pregnancy", "pregnancy_status", "due_date")
dueDate = dueDate
serviceEra = self.getHistoryRelatedColumnData(phIndex,
"VeteranServiceEra", "service_era")
serviceDur = self.getHistoryRelatedColumnData(phIndex,
"VeteranMilitaryServiceDuration", "military_service_duration")
servedInWz = self.getHistoryRelatedColumnData(phIndex,
"VeteranServedInWarZone", "served_in_war_zone")
wzNum, wzMonths, wzFire = self.getHistoryRelatedColumnData(phIndex,
"VeteranWarzonesServed", "war_zone_id_id_id_num",
"months_in_war_zone", "received_fire")
warZone = wzNum
branch, discharge = self.getHistoryRelatedColumnData(phIndex,
"VeteranMilitaryBranches", "military_branch", "discharge_status")
cesIndex, childInSchool, school, mvLiason, schoolType, lastSchoolDt \
= self.getHistoryRelatedColumnData(phIndex, "ChildEnrollmentStatus",
"id", "child_currently_enrolled_in_school",
"child_school_name", "child_mckinney_vento_liason",
"child_school_type", "child_last_enrolled_date")
# Get fields from subtables non-simply related to person_historical table:
schoolBarrier = self.getSchoolBarrier(cesIndex)
except:
print("Unable to interpret data from client_historical table!")
raise
# TBD: Other fields to implement:
orgId = None
programId = None
siteId = None
assessDate = None
dateUpdated = None
# Build data row list:
dataRow = \
[
self.outputStr(32, personId),
self.outputInt(orgId),
self.outputInt(programId),
self.outputInt(siteId),
self.outputDate(assessDate),
self.outputDate(dateUpdated),
self.outputMoney(monthlyIncome),
self.outputStr(2, income30),
self.outputStr(2, noncash30),
self.outputStr(1, physDis),
self.outputStr(1, recvPhysDis),
self.outputStr(1, devDis),
self.outputStr(1, recvDevDis),
self.outputStr(1, chronicCond),
self.outputStr(1, recvChronic),
self.outputStr(1, hivAids),
self.outputStr(1, recvHivAids),
self.outputStr(1, mental),
self.outputStr(1, mentalIndef),
self.outputStr(1, recvMental),
self.outputStr(1, substance),
self.outputStr(1, substanceIndef),
self.outputStr(1, recvSubstance),
self.outputStr(1, violence),
self.outputStr(1, violenceOccured),
self.outputStr(1, employed),
self.outputInt(hoursLastWk),
self.outputStr(1, tenure),
self.outputStr(1, looking),
self.outputStr(1, inSchool),
self.outputStr(1, vocational),
self.outputStr(1, highestSchool),
self.outputStr(1, degree),
self.outputStr(1, healthStatus),
self.outputStr(1, pregnant),
self.outputDate(dueDate),
self.outputStr(1, serviceEra),
self.outputInt(serviceDur),
self.outputStr(1, servedInWz),
self.outputStr(1, warZone),
self.outputInt(wzMonths),
self.outputStr(1, wzFire),
self.outputStr(1, branch),
self.outputStr(1, discharge),
self.outputStr(1, childInSchool),
self.outputStr(100, school),
self.outputStr(1, mvLiason),
self.outputStr(1, schoolType),
self.outputDate(lastSchoolDt),
self.outputInt(schoolBarrier),
self.outputStr(32, self.exportId)
]
try:
print("\n* DataRow (ClientHistorical)= ", dataRow)
self.historicalWriter.writerow(dataRow)
except:
print("Unable to write record to CSV file %s!" \
% HmisCSV30Writer.files["historical"])
raise
self.createIncomeBenefitsRecs(phIndex, personId)
def createClientRecs(self, exportId):
for person in self.getPersonData(exportId):
try:
# Get the person index id to be used to get related data:
personIndex = person.id
# Get the fields in person table:
personId = self.chooseId(person.person_id_id_num,
person.person_id_id_str)
personId = self.chooseId(personId, person.person_id_hashed)
firstName = person.person_legal_first_name_unhashed
middleName = person.person_legal_middle_name_unhashed
lastName = person.person_legal_last_name_unhashed
nameSuffix = person.person_legal_suffix_unhashed
ssn = person.person_social_security_number_unhashed
ssnQual = person.person_social_sec_number_quality_code
dob = person.person_date_of_birth_unhashed
ethnicity = person.person_ethnicity_unhashed
gender = person.person_gender_unhashed
releaseOfInfo = self.getReleaseGrantedData(personIndex)
except:
print("Unable to interpret data from person table!")
raise
(primaryRace, secondaryRace) = self.getRacesData(personIndex)
# TBD: Other fields to implement:
orgId = None
dobQual = None
dateAdded = None
dateUpdated = None
updateOrDelete = None
idVerification = None
# Build data row list:
dataRow = \
[
self.outputInt(orgId),
self.outputStr(32, personId),
self.outputStr(30, firstName),
self.outputStr(30, middleName),
self.outputStr(30, lastName),
self.outputStr(30, nameSuffix),
self.outputStr(11, ssn),
self.outputStr(1, ssnQual),
self.outputDate(dob),
self.outputStr(1, dobQual),
self.outputStr(1, primaryRace),
self.outputStr(1, secondaryRace),
self.outputStr(1, ethnicity),
self.outputStr(1, gender),
self.outputDate(dateAdded),
self.outputDate(dateUpdated),
self.outputStr(1, updateOrDelete),
self.outputStr(1, idVerification),
self.outputStr(1, releaseOfInfo),
self.outputStr(32, exportId)
]
try:
if self.debug:
print("\n* DataRow (Client)= ", dataRow)
self.clientWriter.writerow(dataRow)
except:
print("Unable to write record to CSV file %s!" \
% HmisCSV30Writer.files["client"])
raise
self.createClientHistoricalRecs(personIndex, personId)
self.createParticipationRecs(personIndex, personId)
def createBedInventoryRecs(self, siteService, orgId):
for inventory in self.getInventoryData(siteService.id):
try:
# Get the fields in site_service table:
programId = siteService.service_id
siteId = siteService.site_id
# Get the fields in inventory table:
assetListId = inventory.inventory_id_id_num
assetListName = inventory.inventory_id_id_str
householdType = inventory.household_type
bedType = inventory.bed_type
bedAvail = inventory.bed_availability
bedInv = inventory.bed_inventory
chInv = inventory.chronic_homeless_bed
unitInv = inventory.unit_inventory
invStart = inventory.inventory_effective_period_start_date
invEnd = inventory.inventory_effective_period_end_date
hmisPartBeds = inventory.hmis_participating_beds
hmisStart = inventory.hmis_participation_period_start_date
hmisEnd = inventory.hmis_participation_period_end_date
# TBD: Other fields to implement:
dateUpdated = None
except:
print("Unable to interpret data from inventory tables!")
raise
# Build data row list:
dataRow = \
[
self.outputInt(orgId),
self.outputInt(programId),
self.outputInt(siteId),
self.outputStr(10, assetListId),
self.outputStr(30, assetListName),
self.outputStr(1, householdType),
self.outputStr(1, bedType),
self.outputStr(1, bedAvail),
self.outputInt(bedInv),
self.outputInt(chInv),
self.outputInt(unitInv),
self.outputDate(invStart),
self.outputDate(invEnd),
self.outputInt(hmisPartBeds),
self.outputDate(hmisStart),
self.outputDate(hmisEnd),
self.outputDate(dateUpdated),
self.outputStr(32, self.exportId)
]
try:
if self.debug:
print("\n* DataRow (Inventory)= ", dataRow)
self.inventoryWriter.writerow(dataRow)
except:
print("Unable to write record to CSV file %s!" \
% HmisCSV30Writer.files["inventory"])
raise
def createRegionsRecs(self, siteService, orgId):
for region in self.getRegionData(siteService.key):
try:
# Get the fields in site_service table:
siteId = siteService.site_id
# Get the fields in region table:
#TBD: Which field is ID?
regionId = region.id
regionType = region.region_type
descript = region.region_description
# TBD: Other fields to implement:
dateUpdated = None
except:
print("Unable to interpret data from region tables!")
raise
# Build data row list:
dataRow = \
[
self.outputInt(orgId),
self.outputInt(siteId),
self.outputStr(2, regionId),
self.outputStr(8, regionType),
self.outputStr(30, descript),
self.outputDate(dateUpdated),
self.outputStr(32, self.exportId)
]
try:
if self.debug:
print("\n* DataRow (Regions)= ", dataRow)
self.regionsWriter.writerow(dataRow)
except:
print("Unable to write record to CSV file %s!" \
% HmisCSV30Writer.files["regions"])
raise
def createSiteInformationRecs(self, site, orgId):
for siteService in self.getSiteServiceData(site.id):
try:
# Get the fields in site table:
siteId = site.airs_key
address = site.physical_address_line_1
city = site.physical_address_city
state = site.physical_address_state
zipCode = site.physical_address_zip_code
# Get the fields in site_service table:
geoCode = siteService.geographic_code
siteServiceType = siteService.site_service_type
housingType = siteService.housing_type
# TBD: Other fields to implement:
dateUpdated = None
except:
print("Unable to interpret data from site, site_service tables!")
raise
# Build data row list:
dataRow = \
[
self.outputInt(orgId),
self.outputInt(siteId),
self.outputStr(30, address),
self.outputStr(30, city),
self.outputStr(2, state),
self.outputStr(5, zipCode),
self.outputInt(geoCode),
self.outputStr(1, siteServiceType),
self.outputStr(1, housingType),
self.outputDate(dateUpdated),
self.outputStr(32, self.exportId)
]
try:
if self.debug:
print("\n* DataRow (SiteInfo)= ", dataRow)
self.siteInfoWriter.writerow(dataRow)
except:
print("Unable to write record to CSV file %s!" \
% HmisCSV30Writer.files["siteInfo"])
raise
self.createRegionsRecs(siteService, orgId)
self.createBedInventoryRecs(siteService, orgId)
def createAgencyProgramRecs(self, exportIndex):
orgId = None
for agency, service, site in self.getAgencyProgramData(exportIndex):
try:
# Get the fields in agency table:
#agencyIndex = agency.id
orgId = agency.airs_key
orgName = agency.airs_name
# Get the fields in service table:
serviceIndex = service.id
programId = service.airs_key
programName = service.airs_name
directServiceCode = service.direct_service_code
programTypeCode = service.service_type
targetPopulationA = service.target_population_a
targetPopulationB = service.target_population_b
trackingMethod = service.residential_tracking_method
granteeIdentifier = service.grantee_identifier
# Get the fields in site table:
siteId = site.airs_key
# Get the fields in related funding_source table:
receivesMcKFunding = self.getReceivesMcKinneyFundingData(serviceIndex)
# TBD: Other fields to implement:
dateCreated = None
dateUpdated = None
except:
print("Unable to interpret data from agency, service, and/or site tables!")
raise
# Build data row list:
dataRow = \
[
self.outputInt(orgId),
self.outputStr(30, orgName),
self.outputInt(programId),
self.outputStr(30, programName),
self.outputStr(1, directServiceCode),
self.outputInt(siteId),
self.outputStr(1, programTypeCode),
self.outputStr(1, targetPopulationA),
self.outputStr(2, targetPopulationB),
self.outputStr(2, trackingMethod),
self.outputStr(10, granteeIdentifier),
self.outputStr(1, receivesMcKFunding),
self.outputDate(dateCreated),
self.outputDate(dateUpdated),
self.outputStr(32, self.exportId)
]
try:
if self.debug:
print("\n* DataRow (AgencyProgram)= ", dataRow)
self.agencyWriter.writerow(dataRow)
except:
print("Unable to write record to CSV file %s!" \
% HmisCSV30Writer.files["agency"])
raise
self.createSiteInformationRecs(site, orgId)
def createExportRecs(self):
self.exportid = None
for export in self.getExportData():
try:
exportIndex = export.export_id
self.exportId = export.export_id
expDate = export.export_date
perStart = export.export_period_start_date
perEnd = export.export_period_end_date
# TBD: These moved to source for 3.0:
#swVendor = export.export_software_vendor
#swVersion = export.export_software_version
except:
print("Unable to interpret data from export table!")
raise
source = self.getSourceData(self.exportId)
try:
sourceId = getattr(source, "source_id", None)
sourceName = getattr(source, "source_name", None)
contactFirst = getattr(source, "source_contact_first", None)
contactLast = getattr(source, "source_contact_last", None)
contactPhone = getattr(source, "source_contact_phone", None)
contactExt = getattr(source, "source_contact_extension", None)
contactEmail = getattr(source, "source_email", None)
# TBD: These are moved from export for 3.0:
swVendor = getattr(source, "software_vendor", None)
swVersion = getattr(source, "software_version", None)
except:
print("Unable to interpret data from source table!")
raise
# TBD: Other fields to implement:
self.exportHashing = None
deltaRefresh = None
# Build data row list:
dataRow = \
[
self.outputStr(32, self.exportId),
self.outputStr(32, sourceId),
self.outputStr(50, | |
rf o rpi = r>
label(X,Z,rpi) :- label(X,Y,rf), label(Y,Z,rpi).
% rf o rd = rd
label(X,Z,rd) :- label(X,Y,rf), label(Y,Z,rd).
% rf o rdi = r> di si mi oi
label(X,Z,rpi) | label(X,Z,rdi) | label(X,Z,rsi) | label(X,Z,rmi) | label(X,Z,roi) :- label(X,Y,rf), label(Y,Z,rdi).
% rf o rs = rd
label(X,Z,rd) :- label(X,Y,rf), label(Y,Z,rs).
% rf o rsi = r> mi oi
label(X,Z,rpi) | label(X,Z,rmi) | label(X,Z,roi) :- label(X,Y,rf), label(Y,Z,rsi).
% rf o rf = rf
label(X,Z,rf) :- label(X,Y,rf), label(Y,Z,rf).
% rf o rfi = r= f fi
label(X,Z,req) | label(X,Z,rf) | label(X,Z,rfi) :- label(X,Y,rf), label(Y,Z,rfi).
% rf o rm = rm
label(X,Z,rm) :- label(X,Y,rf), label(Y,Z,rm).
% rf o rmi = r>
label(X,Z,rpi) :- label(X,Y,rf), label(Y,Z,rmi).
% rf o ro = rd s o
label(X,Z,rd) | label(X,Z,rs) | label(X,Z,ro) :- label(X,Y,rf), label(Y,Z,ro).
% rf o roi = r> mi oi
label(X,Z,rpi) | label(X,Z,rmi) | label(X,Z,roi) :- label(X,Y,rf), label(Y,Z,roi).
% rfi o req = rfi
label(X,Z,rfi) :- label(X,Y,rfi), label(Y,Z,req).
% rfi o rp = r<
label(X,Z,rp) :- label(X,Y,rfi), label(Y,Z,rp).
% rfi o rpi = r> di si mi oi
label(X,Z,rpi) | label(X,Z,rdi) | label(X,Z,rsi) | label(X,Z,rmi) | label(X,Z,roi) :- label(X,Y,rfi), label(Y,Z,rpi).
% rfi o rd = rd s o
label(X,Z,rd) | label(X,Z,rs) | label(X,Z,ro) :- label(X,Y,rfi), label(Y,Z,rd).
% rfi o rdi = rdi
label(X,Z,rdi) :- label(X,Y,rfi), label(Y,Z,rdi).
% rfi o rs = ro
label(X,Z,ro) :- label(X,Y,rfi), label(Y,Z,rs).
% rfi o rsi = rdi
label(X,Z,rdi) :- label(X,Y,rfi), label(Y,Z,rsi).
% rfi o rf = r= f fi
label(X,Z,req) | label(X,Z,rf) | label(X,Z,rfi) :- label(X,Y,rfi), label(Y,Z,rf).
% rfi o rfi = rfi
label(X,Z,rfi) :- label(X,Y,rfi), label(Y,Z,rfi).
% rfi o rm = rm
label(X,Z,rm) :- label(X,Y,rfi), label(Y,Z,rm).
% rfi o rmi = rdi si oi
label(X,Z,rdi) | label(X,Z,rsi) | label(X,Z,roi) :- label(X,Y,rfi), label(Y,Z,rmi).
% rfi o ro = ro
label(X,Z,ro) :- label(X,Y,rfi), label(Y,Z,ro).
% rfi o roi = rdi si oi
label(X,Z,rdi) | label(X,Z,rsi) | label(X,Z,roi) :- label(X,Y,rfi), label(Y,Z,roi).
% rm o req = rm
label(X,Z,rm) :- label(X,Y,rm), label(Y,Z,req).
% rm o rp = r<
label(X,Z,rp) :- label(X,Y,rm), label(Y,Z,rp).
% rm o rpi = r> di si mi oi
label(X,Z,rpi) | label(X,Z,rdi) | label(X,Z,rsi) | label(X,Z,rmi) | label(X,Z,roi) :- label(X,Y,rm), label(Y,Z,rpi).
% rm o rd = rd s o
label(X,Z,rd) | label(X,Z,rs) | label(X,Z,ro) :- label(X,Y,rm), label(Y,Z,rd).
% rm o rdi = r<
label(X,Z,rp) :- label(X,Y,rm), label(Y,Z,rdi).
% rm o rs = rm
label(X,Z,rm) :- label(X,Y,rm), label(Y,Z,rs).
% rm o rsi = rm
label(X,Z,rm) :- label(X,Y,rm), label(Y,Z,rsi).
% rm o rf = rd s o
label(X,Z,rd) | label(X,Z,rs) | label(X,Z,ro) :- label(X,Y,rm), label(Y,Z,rf).
% rm o rfi = r<
label(X,Z,rp) :- label(X,Y,rm), label(Y,Z,rfi).
% rm o rm = r<
label(X,Z,rp) :- label(X,Y,rm), label(Y,Z,rm).
% rm o rmi = r= f fi
label(X,Z,req) | label(X,Z,rf) | label(X,Z,rfi) :- label(X,Y,rm), label(Y,Z,rmi).
% rm o ro = r<
label(X,Z,rp) :- label(X,Y,rm), label(Y,Z,ro).
% rm o roi = rd s o
label(X,Z,rd) | label(X,Z,rs) | label(X,Z,ro) :- label(X,Y,rm), label(Y,Z,roi).
% rmi o req = rmi
label(X,Z,rmi) :- label(X,Y,rmi), label(Y,Z,req).
% rmi o rp = r< di fi m o
label(X,Z,rp) | label(X,Z,rdi) | label(X,Z,rfi) | label(X,Z,rm) | label(X,Z,ro) :- label(X,Y,rmi), label(Y,Z,rp).
% rmi o rpi = r>
label(X,Z,rpi) :- label(X,Y,rmi), label(Y,Z,rpi).
% rmi o rd = rd f oi
label(X,Z,rd) | label(X,Z,rf) | label(X,Z,roi) :- label(X,Y,rmi), label(Y,Z,rd).
% rmi o rdi = r>
label(X,Z,rpi) :- label(X,Y,rmi), label(Y,Z,rdi).
% rmi o rs = rd f oi
label(X,Z,rd) | label(X,Z,rf) | label(X,Z,roi) :- label(X,Y,rmi), label(Y,Z,rs).
% rmi o rsi = r>
label(X,Z,rpi) :- label(X,Y,rmi), label(Y,Z,rsi).
% rmi o rf = rmi
label(X,Z,rmi) :- label(X,Y,rmi), label(Y,Z,rf).
% rmi o rfi = rmi
label(X,Z,rmi) :- label(X,Y,rmi), label(Y,Z,rfi).
% rmi o rm = r= s si
label(X,Z,req) | label(X,Z,rs) | label(X,Z,rsi) :- label(X,Y,rmi), label(Y,Z,rm).
% rmi o rmi = r>
label(X,Z,rpi) :- label(X,Y,rmi), label(Y,Z,rmi).
% rmi o ro = rd f oi
label(X,Z,rd) | label(X,Z,rf) | label(X,Z,roi) :- label(X,Y,rmi), label(Y,Z,ro).
% rmi o roi = r>
label(X,Z,rpi) :- label(X,Y,rmi), label(Y,Z,roi).
% ro o req = ro
label(X,Z,ro) :- label(X,Y,ro), label(Y,Z,req).
% ro o rp = r<
label(X,Z,rp) :- label(X,Y,ro), label(Y,Z,rp).
% ro o rpi = r> di si mi oi
label(X,Z,rpi) | label(X,Z,rdi) | label(X,Z,rsi) | label(X,Z,rmi) | label(X,Z,roi) :- label(X,Y,ro), label(Y,Z,rpi).
% ro o rd = rd s o
label(X,Z,rd) | label(X,Z,rs) | label(X,Z,ro) :- label(X,Y,ro), label(Y,Z,rd).
% ro o rdi = r< di fi m o
label(X,Z,rp) | label(X,Z,rdi) | label(X,Z,rfi) | label(X,Z,rm) | label(X,Z,ro) :- label(X,Y,ro), label(Y,Z,rdi).
% ro o rs = ro
label(X,Z,ro) :- label(X,Y,ro), label(Y,Z,rs).
% ro o rsi = rdi fi o
label(X,Z,rdi) | label(X,Z,rfi) | label(X,Z,ro) :- label(X,Y,ro), label(Y,Z,rsi).
% ro o rf = rd s o
label(X,Z,rd) | label(X,Z,rs) | label(X,Z,ro) :- label(X,Y,ro), label(Y,Z,rf).
% ro o rfi = r< m o
label(X,Z,rp) | label(X,Z,rm) | label(X,Z,ro) :- label(X,Y,ro), label(Y,Z,rfi).
% ro o rm = r<
label(X,Z,rp) :- label(X,Y,ro), label(Y,Z,rm).
% ro o rmi = rdi si oi
label(X,Z,rdi) | label(X,Z,rsi) | label(X,Z,roi) :- label(X,Y,ro), label(Y,Z,rmi).
% ro o ro = r< m o
label(X,Z,rp) | label(X,Z,rm) | label(X,Z,ro) :- label(X,Y,ro), label(Y,Z,ro).
% ro o roi = r= d di s si f fi o oi
label(X,Z,req) | label(X,Z,rd) | label(X,Z,rdi) | label(X,Z,rs) | label(X,Z,rsi) | label(X,Z,rf) | label(X,Z,rfi) | label(X,Z,ro) | label(X,Z,roi) :- label(X,Y,ro), label(Y,Z,roi).
% roi o req = roi
label(X,Z,roi) :- label(X,Y,roi), label(Y,Z,req).
% roi o rp = r< di fi m o
label(X,Z,rp) | label(X,Z,rdi) | label(X,Z,rfi) | label(X,Z,rm) | label(X,Z,ro) :- label(X,Y,roi), label(Y,Z,rp).
% roi o rpi = r>
label(X,Z,rpi) :- label(X,Y,roi), label(Y,Z,rpi).
% roi o rd = rd f oi
label(X,Z,rd) | label(X,Z,rf) | label(X,Z,roi) :- label(X,Y,roi), label(Y,Z,rd).
% roi o rdi = r> di si mi oi
label(X,Z,rpi) | label(X,Z,rdi) | label(X,Z,rsi) | label(X,Z,rmi) | label(X,Z,roi) :- label(X,Y,roi), label(Y,Z,rdi).
% roi o rs = rd f oi
label(X,Z,rd) | label(X,Z,rf) | label(X,Z,roi) :- label(X,Y,roi), label(Y,Z,rs).
% roi o rsi = r> mi oi
label(X,Z,rpi) | label(X,Z,rmi) | label(X,Z,roi) :- label(X,Y,roi), label(Y,Z,rsi).
% roi o rf = roi
label(X,Z,roi) :- label(X,Y,roi), label(Y,Z,rf).
% roi o rfi = rdi si oi
label(X,Z,rdi) | label(X,Z,rsi) | label(X,Z,roi) :- label(X,Y,roi), label(Y,Z,rfi).
% roi o rm = rdi fi o
label(X,Z,rdi) | label(X,Z,rfi) | label(X,Z,ro) :- label(X,Y,roi), label(Y,Z,rm).
% roi o rmi = r>
label(X,Z,rpi) :- label(X,Y,roi), label(Y,Z,rmi).
% roi o ro = r= d di s si f fi o oi
label(X,Z,req) | label(X,Z,rd) | label(X,Z,rdi) | label(X,Z,rs) | label(X,Z,rsi) | label(X,Z,rf) | label(X,Z,rfi) | label(X,Z,ro) | label(X,Z,roi) :- label(X,Y,roi), label(Y,Z,ro).
% roi o roi = r> mi oi
label(X,Z,rpi) | label(X,Z,rmi) | label(X,Z,roi) :- label(X,Y,roi), label(Y,Z,roi).
"""
output = """
% A Disjunctive Logic Program for IA Constraint Networks
% relations
rel(req).
rel(rp).
rel(rpi).
rel(rd).
rel(rdi).
rel(ro).
rel(roi).
rel(rm).
rel(rmi).
rel(rs).
rel(rsi).
rel(rf).
rel(rfi).
% Choice rule for clasp
1 <= {label(X,Y,L) : rel(L)} <= 1 :- node1(X), node2(Y), X<Y.
:- label(X,Y,L), lc(X,Y,L), node1(X), node2(Y), rel(L).
% Composition table
% req o req = r=
label(X,Z,req) :- label(X,Y,req), label(Y,Z,req).
% req o rp = r<
label(X,Z,rp) :- label(X,Y,req), label(Y,Z,rp).
% req o rpi = r>
label(X,Z,rpi) :- label(X,Y,req), label(Y,Z,rpi).
% req o rd = rd
label(X,Z,rd) :- label(X,Y,req), label(Y,Z,rd).
% req o rdi = rdi
label(X,Z,rdi) :- label(X,Y,req), label(Y,Z,rdi).
% req o rs = rs
label(X,Z,rs) :- label(X,Y,req), label(Y,Z,rs).
% req o rsi = rsi
label(X,Z,rsi) :- label(X,Y,req), label(Y,Z,rsi).
% req o rf = rf
label(X,Z,rf) :- label(X,Y,req), label(Y,Z,rf).
% req o rfi = rfi
label(X,Z,rfi) :- label(X,Y,req), label(Y,Z,rfi).
% req o rm = rm
label(X,Z,rm) :- label(X,Y,req), label(Y,Z,rm).
% req o rmi = rmi
label(X,Z,rmi) :- label(X,Y,req), label(Y,Z,rmi).
% req o ro = ro
label(X,Z,ro) :- label(X,Y,req), label(Y,Z,ro).
% req o roi = roi
label(X,Z,roi) :- label(X,Y,req), label(Y,Z,roi).
% rp o req = r<
label(X,Z,rp) :- label(X,Y,rp), label(Y,Z,req).
% rp o rp = r<
label(X,Z,rp) :- label(X,Y,rp), label(Y,Z,rp).
% rp o rpi = r= < > d di s si f fi m mi o oi
label(X,Z,req) | label(X,Z,rp) | label(X,Z,rpi) | label(X,Z,rd) | label(X,Z,rdi) | label(X,Z,rs) | label(X,Z,rsi) | label(X,Z,rf) | label(X,Z,rfi) | label(X,Z,rm) | label(X,Z,rmi) | label(X,Z,ro) | label(X,Z,roi) :- label(X,Y,rp), label(Y,Z,rpi).
% rp o rd = r< d s m o
label(X,Z,rp) | label(X,Z,rd) | label(X,Z,rs) | label(X,Z,rm) | label(X,Z,ro) :- label(X,Y,rp), label(Y,Z,rd).
% rp o rdi = r<
label(X,Z,rp) :- label(X,Y,rp), label(Y,Z,rdi).
% rp o rs = r<
label(X,Z,rp) :- label(X,Y,rp), label(Y,Z,rs).
% rp o rsi = r<
label(X,Z,rp) :- label(X,Y,rp), label(Y,Z,rsi).
% rp o rf = r< d s m o
label(X,Z,rp) | label(X,Z,rd) | label(X,Z,rs) | label(X,Z,rm) | label(X,Z,ro) :- label(X,Y,rp), label(Y,Z,rf).
% rp | |
from .solver1d import *
# p coefficient functions
class Solver2d(Solver1d):
def __init__(self, equation):
super().__init__(equation)
# Equation dependent functions
self.flux_y = equation.flux_y
self.spectral_radius_y = equation.spectral_radius_y
def fd2(self, u):
u_star = np.ones(u.shape)
un_half = np.ones(u.shape)
u_prime_x = np.ones(u.shape)
u_prime_y = np.ones(u.shape)
u_prime_x[1:-1, 1:-1] = limiter_x(u)
u_prime_y[1:-1, 1:-1] = limiter_y(u)
if self.odd:
un_half[1:-2, 1:-2] = 0.25 * (
(u[1:-2, 1:-2] + u[2:-1, 1:-2] + u[1:-2, 2:-1] + u[2:-1, 2:-1])
+ 0.25
* (
(u_prime_x[1:-2, 1:-2] - u_prime_x[2:-1, 1:-2])
+ (u_prime_x[1:-2, 2:-1] - u_prime_x[2:-1, 2:-1])
+ (u_prime_y[1:-2, 1:-2] - u_prime_y[1:-2, 2:-1])
+ (u_prime_y[2:-1, 1:-2] - u_prime_y[2:-1, 2:-1])
)
)
else:
un_half[2:-1, 2:-1] = 0.25 * (
(u[1:-2, 1:-2] + u[2:-1, 1:-2] + u[1:-2, 2:-1] + u[2:-1, 2:-1])
+ 0.25
* (
(u_prime_x[1:-2, 1:-2] - u_prime_x[2:-1, 1:-2])
+ (u_prime_x[1:-2, 2:-1] - u_prime_x[2:-1, 2:-1])
+ (u_prime_y[1:-2, 1:-2] - u_prime_y[1:-2, 2:-1])
+ (u_prime_y[2:-1, 1:-2] - u_prime_y[2:-1, 2:-1])
)
)
f = self.flux_x(u)
g = self.flux_y(u)
f_prime_x = limiter_x(f)
g_prime_y = limiter_y(g)
u_star[1:-1, 1:-1] = u[1:-1, 1:-1] - 0.5 * self.dt * (
f_prime_x / self.dx + g_prime_y / self.dy
)
self.boundary_conditions(u_star)
f_star = self.flux_x(u_star)
g_star = self.flux_y(u_star)
if self.odd:
u[1:-2, 1:-2] = (
un_half[1:-2, 1:-2]
- 0.5
* self.dt
/ self.dx
* (
(f_star[2:-1, 1:-2] - f_star[1:-2, 1:-2])
+ (f_star[2:-1, 2:-1] - f_star[1:-2, 2:-1])
)
- 0.5
* self.dt
/ self.dy
* (
(g_star[1:-2, 2:-1] - g_star[1:-2, 1:-2])
+ (g_star[2:-1, 2:-1] - g_star[2:-1, 1:-2])
)
)
else:
u[2:-1, 2:-1] = (
un_half[2:-1, 2:-1]
- 0.5
* self.dt
/ self.dx
* (
(f_star[2:-1, 1:-2] - f_star[1:-2, 1:-2])
+ (f_star[2:-1, 2:-1] - f_star[1:-2, 2:-1])
)
- 0.5
* self.dt
/ self.dy
* (
(g_star[1:-2, 2:-1] - g_star[1:-2, 1:-2])
+ (g_star[2:-1, 2:-1] - g_star[2:-1, 1:-2])
)
)
self.boundary_conditions(u)
self.odd = not self.odd
return u
#################
# SD2
#################
def reconstruction_sd2(self, u):
u_N, u_S, u_E, u_W = np.ones((4,) + u.shape)
ux = limiter_x(u[1:-1, 1:-1])
uy = limiter_y(u[1:-1, 1:-1])
u_N[j0, j0], u_S[j0, j0], u_E[j0, j0], u_W[j0, j0] = u[None, j0, j0] + np.array(
[0.5 * uy, -0.5 * uy, 0.5 * ux, -0.5 * ux]
)
list(map(self.boundary_conditions, [u_N, u_S, u_E, u_W]))
return u_N, u_S, u_E, u_W
def Hx_flux_sd2(self, u_E, u_W):
a = np.maximum(self.spectral_radius_x(u_E), self.spectral_radius_x(u_W))
f_E = self.flux_x(u_E)
f_W = self.flux_x(u_W)
if u_W.shape == a.shape:
return 0.5 * (f_W + f_E) - 0.5 * a * (u_W - u_E) # scalar
else:
return 0.5 * (f_W + f_E) - 0.5 * np.multiply(
a[:, :, None], (u_W - u_E)
) # systems
def Hy_flux_sd2(self, u_E, u_W):
a = np.maximum(self.spectral_radius_y(u_E), self.spectral_radius_y(u_W))
f_E = self.flux_y(u_E)
f_W = self.flux_y(u_W)
if u_W.shape == a.shape:
return 0.5 * (f_W + f_E) - 0.5 * a * (u_W - u_E) # scalar
else:
return 0.5 * (f_W + f_E) - 0.5 * np.multiply(
a[:, :, None], (u_W - u_E)
) # systems
def c_flux_sd2(self, u_N, u_S, u_E, u_W):
Hx_halfm = self.Hx_flux_sd2(u_E[jm, j0], u_W[j0, j0])
Hx_halfp = self.Hx_flux_sd2(u_E[j0, j0], u_W[jp, j0])
Hy_halfm = self.Hy_flux_sd2(u_N[j0, jm], u_S[j0, j0])
Hy_halfp = self.Hy_flux_sd2(u_N[j0, j0], u_S[j0, jp])
return -self.dt / self.dx * (Hx_halfp - Hx_halfm) - self.dt / self.dy * (
Hy_halfp - Hy_halfm
)
def sd2(self, u):
self.boundary_conditions(u)
u_N, u_S, u_E, u_W = self.reconstruction_sd2(u)
C0 = self.c_flux_sd2(u_N, u_S, u_E, u_W)
u[j0, j0] += C0
self.boundary_conditions(u)
u_N, u_S, u_E, u_W = self.reconstruction_sd2(u)
C1 = self.c_flux_sd2(u_N, u_S, u_E, u_W)
u[j0, j0] += 0.5 * (C1 - C0)
self.boundary_conditions(u)
return u
#################
# SD3
#################
# indicators: indicators_2d_sd3, indicators_diag_2d_sd3
def indicators_sd3(self, u):
u_norm = np.sqrt(self.dx * self.dy) * np.linalg.norm(u[j0, j0])
pl0, pl1, pr0, pr1, pcx0, pcx1, pcx2 = px_coefs(u)
ISl = pl1 ** 2 / (u_norm + eps)
IScx = 1.0 / (u_norm + eps) * ((13.0 / 3.0) * pcx2 ** 2 + pcx1 ** 2)
ISr = pr1 ** 2 / (u_norm + eps)
pb0, pb1, pt0, pt1, pcy0, pcy1, pcy2 = py_coefs(u)
ISb = pb1 ** 2 / (u_norm + eps)
IScy = 1.0 / (u_norm + eps) * ((13.0 / 3.0) * pcy2 ** 2 + pcy1 ** 2)
ISt = pt1 ** 2 / (u_norm + eps)
return ISl, IScx, ISr, ISb, IScy, ISt
def indicators_diag_sd3(self, u):
u_norm = np.sqrt(self.dx * self.dy) * np.linalg.norm(u[j0, j0])
pl0, pl1, pr0, pr1, pcx0, pcx1, pcx2 = pdx_coefs(u)
dISl = pl1 ** 2 / (u_norm + eps)
dIScx = 1.0 / (u_norm + eps) * ((13.0 / 3.0) * pcx2 ** 2 + pcx1 ** 2)
dISr = pr1 ** 2 / (u_norm + eps)
pb0, pb1, pt0, pt1, pcy0, pcy1, pcy2 = pdy_coefs(u)
dISb = pb1 ** 2 / (u_norm + eps)
dIScy = 1.0 / (u_norm + eps) * ((13.0 / 3.0) * pcy2 ** 2 + pcy1 ** 2)
dISt = pt1 ** 2 / (u_norm + eps)
return dISl, dIScx, dISr, dISb, dIScy, dISt
# reconstruction: reconstruction_2d_sd3, reconstruction_diag_2d_sd3
def reconstruction_sd3(self, u, ISl, IScx, ISr, ISb, IScy, ISt):
u_N, u_S, u_E, u_W = np.ones((4,) + u.shape)
cl = 0.25
ccx = 0.5
cr = 0.25
cb = 0.25
ccy = 0.5
ct = 0.25
pl0, pl1, pr0, pr1, pcx0, pcx1, pcx2 = px_coefs(u)
alpl = cl / ((eps + ISl) ** 2)
alpcx = ccx / ((eps + IScx) ** 2)
alpr = cr / ((eps + ISr) ** 2)
alp_sum = alpl + alpcx + alpr
wl = alpl / alp_sum
wcx = alpcx / alp_sum
wr = alpr / alp_sum
pb0, pb1, pt0, pt1, pcy0, pcy1, pcy2 = py_coefs(u)
alpb = cb / ((eps + ISb) ** 2)
alpcy = ccy / ((eps + IScy) ** 2)
alpt = ct / ((eps + ISt) ** 2)
alp_sum = alpb + alpcy + alpt
wb = alpb / alp_sum
wcy = alpcy / alp_sum
wt = alpt / alp_sum
u_N[j0, j0] = (
wb * (pb0 + 0.5 * pb1)
+ wcy * (pcy0 + 0.5 * pcy1 + 0.25 * pcy2)
+ wt * (pt0 + 0.5 * pt1)
)
u_S[j0, j0] = (
wb * (pb0 - 0.5 * pb1)
+ wcy * (pcy0 - 0.5 * pcy1 + 0.25 * pcy2)
+ wt * (pt0 - 0.5 * pt1)
)
u_E[j0, j0] = (
wl * (pl0 + 0.5 * pl1)
+ wcx * (pcx0 + 0.5 * pcx1 + 0.25 * pcx2)
+ wr * (pr0 + 0.5 * pr1)
)
u_W[j0, j0] = (
wl * (pl0 - 0.5 * pl1)
+ wcx * (pcx0 - 0.5 * pcx1 + 0.25 * pcx2)
+ wr * (pr0 - 0.5 * pr1)
)
return u_N, u_S, u_E, u_W
def reconstruction_diag_sd3(self, u, dISl, dIScx, dISr, dISb, dIScy, dISt):
u_NE, u_SE, u_NW, u_SW = np.ones((4,) + u.shape)
cl = 0.25
ccx = 0.5
cr = 0.25
cb = 0.25
ccy = 0.5
ct = 0.25
pl0, pl1, pr0, pr1, pcx0, pcx1, pcx2 = pdx_coefs(u)
alpl = cl / (eps + dISl) ** 2
alpcx = ccx / (eps + dIScx) ** 2
alpr = cr / (eps + dISr) ** 2
alp_sum = alpl + alpcx + alpr
wl = alpl / alp_sum
wcx = alpcx / alp_sum
wr = alpr / alp_sum
pb0, pb1, pt0, pt1, pcy0, pcy1, pcy2 = pdy_coefs(u)
alpb = cb / (eps + dISb) ** 2
alpcy = ccy / (eps + dIScy) ** 2
alpt = ct / (eps + dISt) ** 2
alp_sum = alpb + alpcy + alpt
wb = alpb / alp_sum
wcy = alpcy / alp_sum
wt = alpt / alp_sum
u_NW[j0, j0] = (
wb * (pb0 + 0.5 * pb1)
+ wcy * (pcy0 + 0.5 * pcy1 + 0.25 * pcy2)
+ wt * (pt0 + 0.5 * pt1)
)
u_SE[j0, j0] = (
wb * (pb0 - 0.5 * pb1)
+ wcy * (pcy0 - 0.5 * pcy1 + 0.25 * pcy2)
+ wt * (pt0 - 0.5 * pt1)
)
u_NE[j0, j0] = (
wl * (pl0 | |
<reponame>woodrow/pyoac
from pypy.tool.udir import udir
class AppTestMarshal:
def setup_class(cls):
tmpfile = udir.join('AppTestMarshal.tmp')
cls.w_tmpfile = cls.space.wrap(str(tmpfile))
def test_None(self):
import sys
hello = "he"
hello += "llo"
def func(x):
return lambda y: x+y
scopefunc = func(42)
import marshal, StringIO
case = None
print "case: %-30s func=None" % (case, )
s = marshal.dumps(case)
x = marshal.loads(s)
assert x == case and type(x) is type(case)
f = StringIO.StringIO()
marshal.dump(case, f)
f.seek(0)
x = marshal.load(f)
assert x == case and type(x) is type(case)
def test_False(self):
import sys
hello = "he"
hello += "llo"
def func(x):
return lambda y: x+y
scopefunc = func(42)
import marshal, StringIO
case = False
print "case: %-30s func=False" % (case, )
s = marshal.dumps(case)
x = marshal.loads(s)
assert x == case and type(x) is type(case)
f = StringIO.StringIO()
marshal.dump(case, f)
f.seek(0)
x = marshal.load(f)
assert x == case and type(x) is type(case)
def test_True(self):
import sys
hello = "he"
hello += "llo"
def func(x):
return lambda y: x+y
scopefunc = func(42)
import marshal, StringIO
case = True
print "case: %-30s func=True" % (case, )
s = marshal.dumps(case)
x = marshal.loads(s)
assert x == case and type(x) is type(case)
f = StringIO.StringIO()
marshal.dump(case, f)
f.seek(0)
x = marshal.load(f)
assert x == case and type(x) is type(case)
def test_StopIteration(self):
import sys
hello = "he"
hello += "llo"
def func(x):
return lambda y: x+y
scopefunc = func(42)
import marshal, StringIO
case = StopIteration
print "case: %-30s func=StopIteration" % (case, )
s = marshal.dumps(case)
x = marshal.loads(s)
assert x == case and type(x) is type(case)
f = StringIO.StringIO()
marshal.dump(case, f)
f.seek(0)
x = marshal.load(f)
assert x == case and type(x) is type(case)
def test_Ellipsis(self):
import sys
hello = "he"
hello += "llo"
def func(x):
return lambda y: x+y
scopefunc = func(42)
import marshal, StringIO
case = Ellipsis
print "case: %-30s func=Ellipsis" % (case, )
s = marshal.dumps(case)
x = marshal.loads(s)
assert x == case and type(x) is type(case)
f = StringIO.StringIO()
marshal.dump(case, f)
f.seek(0)
x = marshal.load(f)
assert x == case and type(x) is type(case)
def test_42(self):
import sys
hello = "he"
hello += "llo"
def func(x):
return lambda y: x+y
scopefunc = func(42)
import marshal, StringIO
case = 42
print "case: %-30s func=42" % (case, )
s = marshal.dumps(case)
x = marshal.loads(s)
assert x == case and type(x) is type(case)
f = StringIO.StringIO()
marshal.dump(case, f)
f.seek(0)
x = marshal.load(f)
assert x == case and type(x) is type(case)
def test__minus_17(self):
import sys
hello = "he"
hello += "llo"
def func(x):
return lambda y: x+y
scopefunc = func(42)
import marshal, StringIO
case = -17
print "case: %-30s func=_minus_17" % (case, )
s = marshal.dumps(case)
x = marshal.loads(s)
assert x == case and type(x) is type(case)
f = StringIO.StringIO()
marshal.dump(case, f)
f.seek(0)
x = marshal.load(f)
assert x == case and type(x) is type(case)
def test_sys_dot_maxint(self):
import sys
hello = "he"
hello += "llo"
def func(x):
return lambda y: x+y
scopefunc = func(42)
import marshal, StringIO
case = sys.maxint
print "case: %-30s func=sys_dot_maxint" % (case, )
s = marshal.dumps(case)
x = marshal.loads(s)
assert x == case and type(x) is type(case)
f = StringIO.StringIO()
marshal.dump(case, f)
f.seek(0)
x = marshal.load(f)
assert x == case and type(x) is type(case)
def test__minus_1_dot_25(self):
import sys
hello = "he"
hello += "llo"
def func(x):
return lambda y: x+y
scopefunc = func(42)
import marshal, StringIO
case = -1.25
print "case: %-30s func=_minus_1_dot_25" % (case, )
s = marshal.dumps(case)
x = marshal.loads(s)
assert x == case and type(x) is type(case)
f = StringIO.StringIO()
marshal.dump(case, f)
f.seek(0)
x = marshal.load(f)
assert x == case and type(x) is type(case)
def test__minus_1_dot_25__2(self):
import sys
hello = "he"
hello += "llo"
def func(x):
return lambda y: x+y
scopefunc = func(42)
import marshal, StringIO
case = -1.25 #2
print "case: %-30s func=_minus_1_dot_25__2" % (case, )
s = marshal.dumps(case, 2); assert len(s) in (9, 17)
x = marshal.loads(s)
assert x == case and type(x) is type(case)
f = StringIO.StringIO()
marshal.dump(case, f)
f.seek(0)
x = marshal.load(f)
assert x == case and type(x) is type(case)
def test_2_plus_5j(self):
import sys
hello = "he"
hello += "llo"
def func(x):
return lambda y: x+y
scopefunc = func(42)
import marshal, StringIO
case = 2+5j
print "case: %-30s func=2_plus_5j" % (case, )
s = marshal.dumps(case)
x = marshal.loads(s)
assert x == case and type(x) is type(case)
f = StringIO.StringIO()
marshal.dump(case, f)
f.seek(0)
x = marshal.load(f)
assert x == case and type(x) is type(case)
def test_2_plus_5j__2(self):
import sys
hello = "he"
hello += "llo"
def func(x):
return lambda y: x+y
scopefunc = func(42)
import marshal, StringIO
case = 2+5j #2
print "case: %-30s func=2_plus_5j__2" % (case, )
s = marshal.dumps(case, 2); assert len(s) in (9, 17)
x = marshal.loads(s)
assert x == case and type(x) is type(case)
f = StringIO.StringIO()
marshal.dump(case, f)
f.seek(0)
x = marshal.load(f)
assert x == case and type(x) is type(case)
def test_42L(self):
import sys
hello = "he"
hello += "llo"
def func(x):
return lambda y: x+y
scopefunc = func(42)
import marshal, StringIO
case = 42L
print "case: %-30s func=42L" % (case, )
s = marshal.dumps(case)
x = marshal.loads(s)
assert x == case and type(x) is type(case)
f = StringIO.StringIO()
marshal.dump(case, f)
f.seek(0)
x = marshal.load(f)
assert x == case and type(x) is type(case)
def test__minus_1234567890123456789012345678901234567890L(self):
import sys
hello = "he"
hello += "llo"
def func(x):
return lambda y: x+y
scopefunc = func(42)
import marshal, StringIO
case = -1234567890123456789012345678901234567890L
print "case: %-30s func=_minus_1234567890123456789012345678901234567890L" % (case, )
s = marshal.dumps(case)
x = marshal.loads(s)
assert x == case and type(x) is type(case)
f = StringIO.StringIO()
marshal.dump(case, f)
f.seek(0)
x = marshal.load(f)
assert x == case and type(x) is type(case)
def test_hello_____not_interned(self):
import sys
hello = "he"
hello += "llo"
def func(x):
return lambda y: x+y
scopefunc = func(42)
import marshal, StringIO
case = hello # not interned
print "case: %-30s func=hello_____not_interned" % (case, )
s = marshal.dumps(case)
x = marshal.loads(s)
assert x == case and type(x) is type(case)
f = StringIO.StringIO()
marshal.dump(case, f)
f.seek(0)
x = marshal.load(f)
assert x == case and type(x) is type(case)
def test__Quote_hello_Quote_(self):
import sys
hello = "he"
hello += "llo"
def func(x):
return lambda y: x+y
scopefunc = func(42)
import marshal, StringIO
case = "hello"
print "case: %-30s func=_Quote_hello_Quote_" % (case, )
s = marshal.dumps(case)
x = marshal.loads(s)
assert x == case and type(x) is type(case)
f = StringIO.StringIO()
marshal.dump(case, f)
f.seek(0)
x = marshal.load(f)
assert x == case and type(x) is type(case)
def test__brace__ecarb_(self):
import sys
hello = "he"
hello += "llo"
def func(x):
return lambda y: x+y
scopefunc = func(42)
import marshal, StringIO
case = ()
print "case: %-30s func=_brace__ecarb_" % (case, )
s = marshal.dumps(case)
x = marshal.loads(s)
assert x == case and type(x) is type(case)
f = StringIO.StringIO()
marshal.dump(case, f)
f.seek(0)
x = marshal.load(f)
assert x == case and type(x) is type(case)
def test__brace_1_comma__2_ecarb_(self):
import sys
hello = "he"
hello += "llo"
def func(x):
return lambda y: x+y
scopefunc = func(42)
import marshal, StringIO
case = (1, 2)
print "case: %-30s func=_brace_1_comma__2_ecarb_" % (case, )
s = marshal.dumps(case)
x = marshal.loads(s)
assert x == case and type(x) is type(case)
f = StringIO.StringIO()
marshal.dump(case, f)
f.seek(0)
x = marshal.load(f)
assert x == case and type(x) is type(case)
def test__list__tsil_(self):
import sys
hello = "he"
hello += "llo"
def func(x):
return lambda y: x+y
scopefunc = func(42)
import marshal, StringIO
case = []
print "case: %-30s func=_list__tsil_" % (case, )
s = marshal.dumps(case)
x = marshal.loads(s)
assert x == case and type(x) is type(case)
f = StringIO.StringIO()
marshal.dump(case, f)
f.seek(0)
x = marshal.load(f)
assert x == case and type(x) is type(case)
def test__list_3_comma__4_tsil_(self):
import sys
hello = "he"
hello += "llo"
def func(x):
return lambda y: x+y
scopefunc = func(42)
import marshal, StringIO
case = [3, 4]
print "case: %-30s func=_list_3_comma__4_tsil_" % (case, )
s = marshal.dumps(case)
x = marshal.loads(s)
assert x == case and type(x) is type(case)
f = StringIO.StringIO()
marshal.dump(case, f)
| |
"""
dataclasses
===========
Wrappers around Python's dataclasses to support slots and defaults.
License
-------
Copyright 2019 NEM
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import annotations
import contextlib
import copy
import dataclasses
import inspect
import typing
__all__ = [
'Field',
'FrozenInstanceError',
'InitVar',
'MISSING',
'dataclass',
'is_dataclass',
]
# HELPERS
Field = dataclasses.Field
FrozenInstanceError = dataclasses.FrozenInstanceError
InitVar = dataclasses.InitVar
MISSING = dataclasses.MISSING
is_dataclass = dataclasses.is_dataclass
# ForwardRef isn't part of the public API.
# If it's not present, just skip it.
ForwardRef = getattr(typing, 'ForwardRef', str)
Vars = typing.Dict[str, typing.Any]
DictType = typing.Mapping[str, typing.Any]
TupleType = typing.Sequence[typing.Any]
DictFactory = typing.Callable[..., DictType]
TupleFactory = typing.Callable[..., TupleType]
# NEW METHODS
def get_argnames(func):
"""Get the argument names from a function."""
# Get all positional arguments in __init__, including named
# and named optional arguments. `co_varnames` stores all argument
# names (including local variable names) in order, starting with
# function arguments, so only grab `co_argcount` varnames.
code = func.__code__
argcount = code.co_argcount
return code.co_varnames[:argcount]
def set_defaults(
cls: typing.Type,
defaults: Vars,
) -> None:
"""Set and validate optional default arguments."""
if not defaults:
return
# Need to validate we aren't adding defaults for interior items.
init = cls.__init__
varnames = get_argnames(init)
count = len(defaults)
if not all(i in defaults for i in varnames[-count:]):
raise SyntaxError("non-default argument follows default argument")
if init.__defaults__ is not None:
raise SyntaxError("__defaults__ should be none.")
init.__defaults__ = tuple(defaults.values())
def fix_annotation(type, global_vars: Vars, local_vars: Vars):
"""Fix annotation for field with type."""
if isinstance(type, str):
try:
# Silence a warning about sec, all the arguments passed.
# Eval only gets passed for internal type annotations.
return eval(type, global_vars, local_vars) # nosec
except NameError:
return type
elif isinstance(type, ForwardRef):
arg = fix_annotation(type.__forward_arg__, global_vars, local_vars)
is_argument = type.__forward_is_argument__
if isinstance(arg, str):
return ForwardRef(arg, is_argument)
return arg
elif hasattr(type, '__args__'):
args = type.__args__
args = tuple((fix_annotation(i, global_vars, local_vars) for i in args))
type.__args__ = args
return type
def fix_annotations(
cls: typing.Type,
clsdict: Vars,
global_vars: Vars,
local_vars: Vars,
) -> None:
"""Fix any forward references to variables defined in the callee scope."""
# Don't care except when enforcing types.
if typing.TYPE_CHECKING:
annotations = clsdict['__annotations__']
for field, type in annotations.items():
type = fix_annotation(type, global_vars, local_vars)
annotations[field] = type
def is_classvar(x, global_vars: Vars, local_vars: Vars) -> bool:
"""Determine if x is a ClassVar."""
if isinstance(x, str):
# Silence a warning about sec, all the arguments passed.
# Eval only gets passed for internal type annotations.
x = eval(x, global_vars, local_vars) # nosec
return getattr(x, '__origin__', None) is typing.ClassVar
def set_slots(
cls: typing.Type,
clsdict: Vars,
slots: bool,
global_vars: Vars,
local_vars: Vars,
) -> None:
"""Set default __slots__ implementation."""
if not slots or '__slots__' in clsdict:
return
annotations = clsdict['__annotations__']
is_cv = lambda x: is_classvar(x, global_vars, local_vars)
slots = (k for k, v in annotations.items() if not is_cv(v))
clsdict['__slots__'] = tuple(slots)
def set_copy(
cls: typing.Type,
clsdict: Vars,
copy: bool,
) -> None:
"""Set default __copy__ implementation."""
if not copy or '__copy__' in clsdict:
return
def func(self):
return type(self)(**replace_dict(self))
func.__name__ = '__copy__'
func.__qualname__ = f'{cls.__qualname__}.__copy__'
func.__module__ = cls.__module__
clsdict['__copy__'] = func
def set_deepcopy(
cls: typing.Type,
clsdict: Vars,
deepcopy: bool,
) -> None:
"""Set default __deepcopy__ implementation."""
if not deepcopy or '__deepcopy__' in clsdict:
return
def func(self, memo=None):
data = copy.deepcopy(replace_dict(self), memo)
return type(self)(**data)
func.__name__ = '__deepcopy__'
func.__qualname__ = f'{cls.__qualname__}.__deepcopy__'
func.__module__ = cls.__module__
clsdict['__deepcopy__'] = func
def shallow_asdict(self, dict_factory: DictFactory = dict) -> DictType:
names = [i.name for i in dataclasses.fields(self)]
return dict_factory([(i, getattr(self, i)) for i in names])
def deep_asdict(self, dict_factory: DictFactory = dict) -> DictType:
return dataclasses.asdict(self, dict_factory=dict_factory)
def set_asdict(
cls: typing.Type,
clsdict: Vars,
asdict: bool,
) -> None:
"""Set default asdict implementation."""
if not asdict or 'asdict' in clsdict:
return
def func(
self,
recurse: bool = True,
dict_factory: DictFactory = dict,
) -> DictType:
if recurse:
return deep_asdict(self, dict_factory=dict_factory)
return shallow_asdict(self, dict_factory=dict_factory)
func.__name__ = 'asdict'
func.__qualname__ = f'{cls.__qualname__}.asdict'
func.__module__ = cls.__module__
clsdict['asdict'] = func
def shallow_astuple(self, tuple_factory: TupleFactory = tuple) -> TupleType:
names = [i.name for i in dataclasses.fields(self)]
return tuple_factory([getattr(self, i) for i in names])
def deep_astuple(self, tuple_factory: TupleFactory = tuple) -> TupleType:
return dataclasses.astuple(self, tuple_factory=tuple_factory)
def set_astuple(
cls: typing.Type,
clsdict: Vars,
astuple: bool,
) -> None:
"""Set default astuple implementation."""
if not astuple or 'astuple' in clsdict:
return
def func(
self,
recurse: bool = True,
tuple_factory: TupleFactory = tuple,
) -> TupleType:
if recurse:
return deep_astuple(self, tuple_factory=tuple_factory)
return shallow_astuple(self, tuple_factory=tuple_factory)
func.__name__ = 'astuple'
func.__qualname__ = f'{cls.__qualname__}.astuple'
func.__module__ = cls.__module__
clsdict['astuple'] = func
def set_fields(
cls: typing.Type,
clsdict: Vars,
fields: bool,
) -> None:
"""Set default fields implementation."""
if not fields or 'fields' in clsdict:
return
def func(self) -> tuple:
return dataclasses.fields(self)
func.__name__ = 'fields'
func.__qualname__ = f'{cls.__qualname__}.fields'
func.__module__ = cls.__module__
clsdict['fields'] = func
def replace_dict(self):
# This is a smart-replace, any fields that are not defined
# in the initializer are ignored, since it is assumed
# sensible defaults are automatically provided for those.
varnames = get_argnames(self.__init__.__func__)[1:]
return {k: getattr(self, k) for k in varnames}
def set_replace(
cls: typing.Type,
clsdict: Vars,
replace: bool,
) -> None:
"""Set default replace implementation."""
if not replace or 'replace' in clsdict:
return
def func(self, **changes):
# Convert to a dictionary and then call __init__.
asdict = replace_dict(self)
asdict.update(changes)
return self.__class__(**asdict)
func.__name__ = 'replace'
func.__qualname__ = f'{cls.__qualname__}.replace'
func.__module__ = cls.__module__
clsdict['replace'] = func
def set_miscellaneous(cls: typing.Type, clsdict: Vars) -> None:
"""Set miscellaneous data for the class."""
clsdict['_set'] = object.__setattr__
# DATACLASS METACLASS
def wrap_dataclass(
cls: typing.Type,
global_vars: typing.Dict[str, typing.Any],
local_vars: typing.Dict[str, typing.Any],
slots: bool = True,
copy: bool = True,
deepcopy: bool = True,
asdict: bool = True,
astuple: bool = True,
fields: bool = True,
replace: bool = True,
) -> typing.Type:
"""Wrap a dataclass base with the desired methods."""
mcls = cls.__class__
name = cls.__name__
bases = cls.__bases__
clsdict = cls.__dict__.copy()
clsdict.setdefault('__annotations__', {})
fix_annotations(cls, clsdict, global_vars, local_vars)
set_slots(cls, clsdict, slots, global_vars, local_vars)
set_copy(cls, clsdict, copy)
set_deepcopy(cls, clsdict, deepcopy)
set_asdict(cls, clsdict, asdict)
set_astuple(cls, clsdict, astuple)
set_fields(cls, clsdict, fields)
set_replace(cls, clsdict, replace)
set_miscellaneous(cls, clsdict)
return mcls.__new__(mcls, name, bases, clsdict)
# PATCHES
def update_method(
cls: typing.Type,
new_cls: typing.Type,
func,
) -> None:
"""
Due to our dynamic creation of a new class, our old class may still
be present in some function closures, through `super()/__class__`.
We should not have remnants of the old class anywhere else, since
that would require hard-coding the actual class name, which should
not be done for obvious reasons.
To rectify this, we can check if '__class__' is in nonlocal vars
for each function (`func.__code__.co_freevars`), and if it is,
and the old class is bound, update it.
This is all well-documented in the Python data model:
__code__: Contains compiled function bytecode.
co_freevars: Contains free variables referenced inside closure.
__closure__: None or tuple of cells for the functions free variables.
cell_contents: Get value of cell.
Since we want a value injected locally (a free variable), with the
name `__class__`, we can use these attributes to determine if the old
class is bound to `__class__`, and if so, overwrite it.
"""
code = func.__code__
closure = func.__closure__
freevars = code.co_freevars
if closure and freevars and '__class__' in freevars:
# Have a specified class in freevars, must fix in closure.
# Only change if the cell_contents is the old cls,
# which we need to replace with the new cls.
for cell in closure:
if cell.cell_contents is cls:
cell.cell_contents = new_cls
def update_methods(cls: typing.Type, new_cls: typing.Type) -> None:
"""Replace all instances of `super()/__class__` with the new class."""
funcs = inspect.getmembers(new_cls, inspect.isroutine)
for _, func in funcs:
# Unwrap method if applicable.
func = getattr(func, '__func__', func)
with contextlib.suppress(AttributeError):
# Builtin functions won't have __code__.
update_method(cls, new_cls, func)
def update_classvars(cls: typing.Type, new_cls: typing.Type) -> None:
"""Replace all instances of the old class in class variables."""
# We're going to cheat, | |
> -68:
return "moc bardzo dobra (" + str(rssi) + ")"
if rssi > -71:
return "moc dobra (" + str(rssi) + ")"
if rssi > -81:
return "moc słaba (" + str(rssi) + ")"
if rssi > -91:
return "moc bardzo słaba (" + str(rssi) + ")"
return info
def _wifi_frequency_info(mhz):
if str(mhz).startswith("2"):
return "2.4 GHz"
elif str(mhz).startswith("5"):
return "5 GHz"
return str(mhz)
def _publish_wifi_status(hass, service):
wifis = json.loads(service.data["payload"])
ais_global.GLOBAL_SCAN_WIFI_ANSWER = wifis
wifis_names = [ais_global.G_EMPTY_OPTION]
for item in wifis["ScanResult"]:
if len(item["ssid"]) > 0:
wifis_names.append(
item["ssid"]
+ "; "
+ _wifi_rssi_to_info(item["rssi"])
+ "; "
+ item["capabilities"]
+ "; "
+ _wifi_frequency_info(item["frequency_mhz"])
+ "; MAC: "
+ item["bssid"]
)
hass.async_run_job(
hass.services.call(
"input_select",
"set_options",
{
"entity_id": "input_select.ais_android_wifi_network",
"options": wifis_names,
},
)
)
return len(wifis_names) - 1
def _process_command_from_frame(hass, service):
# process the message from frame
if "topic" not in service.data:
return
if service.data["topic"] == "ais/speech_command":
hass.async_run_job(
hass.services.async_call(
"conversation", "process", {"text": service.data["payload"]}
)
)
return
elif service.data["topic"] == "ais/key_command":
_process_code(hass, json.loads(service.data["payload"]))
return
elif service.data["topic"] == "ais/speech_text":
_say_it(hass, service.data["payload"])
return
elif service.data["topic"] == "ais/speech_status":
# AIS service.data["payload"] can be: START -> DONE/ERROR
event_data = {"status": str(service.data["payload"])}
hass.bus.fire("ais_speech_status", event_data)
hass.states.async_set(
"sensor.ais_speech_status", str(service.data["payload"]), {}
)
_LOGGER.debug("speech_status: " + str(service.data["payload"]))
return
elif service.data["topic"] == "ais/add_bookmark":
try:
bookmark = json.loads(service.data["payload"])
hass.async_run_job(
hass.services.call(
"ais_bookmarks",
"add_bookmark",
{
"attr": {
"media_title": bookmark["media_title"],
"source": bookmark["media_source"],
"media_position": bookmark["media_position"],
"media_content_id": bookmark["media_content_id"],
"media_stream_image": bookmark["media_stream_image"],
}
},
)
)
except Exception as e:
_LOGGER.info("problem to add_bookmark: " + str(e))
return
elif service.data["topic"] == "ais/player_speed":
# speed = json.loads(service.data["payload"])
# _say_it(hass, "prędkość odtwarzania: " + str(speed["currentSpeed"]))
# hass.services.call(
# 'input_number',
# 'set_value', {
# "entity_id": "input_number.media_player_speed",
# "value": round(speed["currentSpeed"], 2)})
return
elif service.data["topic"] == "ais/wifi_scan_info":
len_wifis = _publish_wifi_status(hass, service)
info = "Mamy dostępne " + str(len_wifis) + " wifi."
_say_it(hass, info)
return
elif service.data["topic"] == "ais/iot_scan_info":
iot = json.loads(service.data["payload"])
iot_names = [ais_global.G_EMPTY_OPTION]
for item in iot["ScanResult"]:
if len(item["ssid"]) > 0:
iot_names.append(
item["ssid"]
+ "; "
+ _wifi_rssi_to_info(item["rssi"])
+ "; "
+ item["capabilities"]
)
hass.async_run_job(
hass.services.async_call(
"input_select",
"set_options",
{
"entity_id": "input_select.ais_iot_devices_in_network",
"options": iot_names,
},
)
)
if len(iot_names) == 1:
info = "Nie znaleziono żadnego nowego urządzenia"
elif len(iot_names) == 2:
if item["model"] == ais_global.G_MODEL_SONOFF_S20:
info = "Znaleziono nowe inteligentne gniazdo"
elif item["model"] == ais_global.G_MODEL_SONOFF_SLAMPHER:
info = "Znaleziono nową oprawkę"
elif item["model"] == ais_global.G_MODEL_SONOFF_TOUCH:
info = "Znaleziono nowy przełącznik dotykowy"
elif item["model"] == ais_global.G_MODEL_SONOFF_TH:
info = "Znaleziono nowy przełącznik z czujnikami"
elif item["model"] == ais_global.G_MODEL_SONOFF_B1:
info = "Znaleziono nową żarówkę"
elif item["model"] == ais_global.G_MODEL_SONOFF_POW:
info = "Znaleziono nowy przełącznik z pomiarem mocy"
elif item["model"] == ais_global.G_MODEL_SONOFF_DUAL:
info = "Znaleziono nowy podwójny przełącznik"
elif item["model"] == ais_global.G_MODEL_SONOFF_BASIC:
info = "Znaleziono nowy przełącznik"
elif item["model"] == ais_global.G_MODEL_SONOFF_IFAN:
info = "Znaleziono nowy wentylator sufitowy"
elif item["model"] == ais_global.G_MODEL_SONOFF_T11:
info = "Znaleziono nowy przełącznik dotykowy pojedynczy"
elif item["model"] == ais_global.G_MODEL_SONOFF_T12:
info = "Znaleziono nowy przełącznik dotykowy podwójny"
elif item["model"] == ais_global.G_MODEL_SONOFF_T13:
info = "Znaleziono nowy przełącznik dotykowy potrójny"
else:
info = "Znaleziono nowe inteligentne urządzenie"
else:
info = "Znaleziono " + str(len(iot_names) - 1) + " nowe urządzenia"
# check if we are doing this from remote
if (
len(iot_names) > 1
and CURR_ENTITIE
in (
"sensor.ais_connect_iot_device_info",
"script.ais_scan_iot_devices_in_network",
)
and CURR_BUTTON_CODE == 23
):
info = (
info
+ ". Sprawdź wszystkie parametry, naciśnij strzałkę w prawo, by przejść dalej. "
"Na koniec uruchom: Dołącz nowe urządzenie."
)
# prepare form data
set_curr_entity(hass, "script.ais_scan_iot_devices_in_network")
hass.async_run_job(
hass.services.async_call(
"input_select",
"select_next",
{"entity_id": "input_select.ais_iot_devices_in_network"},
)
)
_say_it(hass, info)
return
elif service.data["topic"] == "ais/wifi_status_info":
_publish_wifi_status(hass, service)
return
elif service.data["topic"] == "ais/ais_gate_req_answer":
cci = json.loads(service.data["payload"])
ais_global.set_ais_gate_req(cci["req_id"], cci["req_answer"])
return
elif service.data["topic"] == "ais/wifi_connection_info":
# current connection info
cci = json.loads(service.data["payload"])
attr = {
"friendly_name": "Prędkość połączenia",
"unit_of_measurement": "MB",
"icon": "mdi:speedometer",
}
desc = ""
speed = 0
if "ais_gate_id" in cci:
pass
# ais_global.G_AIS_SECURE_ANDROID_ID_DOM = cci["ais_gate_id"]
if "pass" in cci:
ais_global.set_my_wifi_pass(cci["pass"])
if "ssid" in cci:
ais_global.set_my_ssid(cci["ssid"])
attr["ssid"] = cci["ssid"]
if cci["ssid"] == "<unknown ssid>":
desc += "brak informacji o połączeniu"
else:
desc += cci["ssid"]
if "link_speed_mbps" in cci:
desc += (
"; prędkość: "
+ str(cci["link_speed_mbps"])
+ " megabitów na sekundę"
)
attr["link_speed_mbps"] = cci["link_speed_mbps"]
speed = cci["link_speed_mbps"]
if "rssi" in cci:
desc += "; " + _wifi_rssi_to_info(cci["rssi"])
attr["rssi"] = cci["rssi"]
if "frequency_mhz" in cci:
desc += "; " + _wifi_frequency_info(cci["frequency_mhz"])
attr["frequency_mhz"] = cci["frequency_mhz"]
attr["description"] = desc
hass.states.async_set(
"sensor.ais_wifi_service_current_network_info", speed, attr
)
return
elif service.data["topic"] == "ais/wifi_state_change_info":
# current connection info
cci = json.loads(service.data["payload"])
ais_global.set_my_ssid(cci["ssid"])
# check if we are now online
if ais_global.GLOBAL_MY_IP == "127.0.0.1":
ais_global.set_global_my_ip(None)
if ais_global.GLOBAL_MY_IP != "127.0.0.1":
pass
# if yes then try to reload the cloud and other components
# TODO reload invalid components
# hass.async_run_job(async_load_platform(hass, 'sun', 'sun', {}, {}))
# hass.async_run_job(async_load_platform(hass, 'ais_cloud', 'ais_cloud', {}, {}))
# hass.async_run_job(async_load_platform(hass, 'ais_yt_service', 'ais_yt_service', {}, {}))
# hass.async_run_job(async_load_platform(hass, 'ais_knowledge_service', 'ais_knowledge_service'...
return
elif service.data["topic"] == "ais/go_to_player":
go_to_player(hass, False)
elif service.data["topic"] == "ais/ip_state_change_info":
pl = json.loads(service.data["payload"])
ais_global.set_global_my_ip(pl["ip"])
icon = "mdi:access-point-network"
friendly_name = "Lokalny adres IP"
if "type" in pl:
# see android ConnectivityManager
if type == "-1":
# TYPE_NONE
icon = "mdi:lan-disconnect"
friendly_name = "Lokalny adres IP - "
elif type == "9":
# TYPE_ETHERNET
icon = "mdi:ethernet"
friendly_name = "Lokalny adres IP (ethernet)"
elif type == "1":
# TYPE_WIFI
icon = "mdi:wifi-strength-4-lock"
friendly_name = "Lokalny adres IP (wifi)"
hass.states.async_set(
"sensor.internal_ip_address",
pl["ip"],
{"friendly_name": friendly_name, "icon": icon},
)
elif service.data["topic"] == "ais/player_status":
# try to get current volume
try:
message = json.loads(service.data["payload"])
ais_global.G_AIS_DAY_MEDIA_VOLUME_LEVEL = (
message.get("currentVolume", 0) / 100
)
except Exception:
_LOGGER.info(
"ais_global.G_AIS_DAY_MEDIA_VOLUME_LEVEL: "
+ str(ais_global.G_AIS_DAY_MEDIA_VOLUME_LEVEL)
)
if "ais_gate_client_id" in service.data:
json_string = json.dumps(service.data["payload"])
else:
json_string = service.data["payload"]
hass.async_run_job(
hass.services.async_call(
"media_player",
"play_media",
{
"entity_id": ais_global.G_LOCAL_EXO_PLAYER_ENTITY_ID,
"media_content_type": "exo_info",
"media_content_id": json_string,
},
)
)
elif service.data["topic"] == "ais/execute_script":
hass.services.call(
"ais_shell_command", "execute_script", {"script": service.data["payload"]}
)
elif service.data["topic"] == "ais/tts_voice":
# this is done only once on start to set the voice on hass from android
voice = service.data["payload"]
set_voice = "Jola lokalnie"
if voice == "pl-pl-x-oda-network":
set_voice = "Jola online"
elif voice == "pl-pl-x-oda#female_1-local":
set_voice = "Celina"
elif voice == "pl-pl-x-oda#female_2-local":
set_voice = "Anżela"
elif voice == "pl-pl-x-oda#female_3-local":
set_voice = "Asia"
elif voice == "pl-pl-x-oda#male_1-local":
set_voice = "Sebastian"
elif voice == "pl-pl-x-oda#male_2-local":
set_voice = "Bartek"
elif voice == "pl-pl-x-oda#male_3-local":
set_voice = "Andrzej"
current_voice = hass.states.get("input_select.assistant_voice").state
if current_voice != set_voice:
# we will inform the frame about change in EVENT_STATE_CHANGED listener
hass.async_run_job(
hass.services.async_call(
"input_select",
"select_option",
{"entity_id": "input_select.assistant_voice", "option": set_voice},
)
)
else:
# EVENT_STATE_CHANGED listener will not notice this change - publish info to frame about voice
hass.services.call(
"ais_ai_service",
"publish_command_to_frame",
{"key": "setTtsVoice", "val": voice},
)
elif service.data["topic"] == "ais/trim_memory":
_LOGGER.warning("trim_memory " + str(service.data["payload"]))
try:
import os
if str(service.data["payload"]) == "15":
# TRIM_MEMORY_RUNNING_CRITICAL
tot_m, used_m, free_m = map(
int, os.popen("free -t -m").readlines()[-1].split()[1:]
)
_LOGGER.warning(
"TRIM_MEMORY_RUNNING_CRITICAL, used memory: " + str(used_m)
)
# check if we can clear database
if "dbUrl" in ais_global.G_DB_SETTINGS_INFO:
if ais_global.G_DB_SETTINGS_INFO["dbUrl"].startswith(
"sqlite:///:memory:"
):
_LOGGER.warning("recorder -> purge keep_days: 0")
hass.services.call(
"recorder", "purge", {"keep_days": 0, "repack": True}
)
else:
# try to kill some heavy process
# Get List of all running process sorted by Highest Memory Usage
list_of_proc_objects = []
# Iterate over the list
for proc in psutil.process_iter():
try:
# Fetch process details as dict
pinfo = proc.as_dict(attrs=["pid", "name", "username"])
pinfo["vms"] = proc.memory_info().vms / (1024 * 1024)
# Append dict to list
list_of_proc_objects.append(pinfo)
except (
psutil.NoSuchProcess,
psutil.AccessDenied,
psutil.ZombieProcess,
):
pass
# Sort list of dict by key vms i.e. memory usage
list_of_proc_objects = sorted(
list_of_proc_objects,
key=lambda proc_obj: proc_obj["vms"],
reverse=True,
)
# print top 5 process by memory usage
for elem in list_of_proc_objects[:5]:
_LOGGER.error("We should kill: " + str(elem))
except Exception as e:
pass
elif service.data["topic"] == "ais/trim_storage":
_LOGGER.warning("trim_storage " + str(service.data["payload"]))
_LOGGER.warning("ACTION_DEVICE_STORAGE_LOW report form Android")
# check if we can clear database
if hass.services.has_service("recorder", "purge"):
_LOGGER.warning("recorder -> purge keep_days: 0")
hass.services.call("recorder", "purge", {"keep_days": 0, "repack": True})
_LOGGER.warning("ais -> flush_logs")
hass.services.call("ais_shell_command", "flush_logs")
elif service.data["topic"] == "ais/sip_event":
event_data = {"event": str(service.data["payload"])}
hass.bus.fire("ais_sip_event", event_data)
_LOGGER.info("sip_event " + str(event_data))
else:
# TODO process this without mqtt
# player_status and speech_status
mqtt.async_publish(hass, service.data["topic"], service.data["payload"], 2)
# TODO
return
def _post_message(
message,
| |
"
r"the values are in the correct form with the acceptable values\)"
r": \['CBMC_Nth'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_2.conf",
"NVT",
10,
300,
input_variables_dict={"CBMC_Nth": []},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['CBMC_Ang'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_2.conf",
"NVT",
10,
300,
input_variables_dict={"CBMC_Ang": []},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['CBMC_Dih'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_2.conf",
"NVT",
10,
300,
input_variables_dict={"CBMC_Dih": []},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['OutputName'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_2.conf",
"NVT",
10,
300,
input_variables_dict={"OutputName": []},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['CoordinatesFreq'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_2.conf",
"NVT",
10,
300,
input_variables_dict={"CoordinatesFreq": []},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['RestartFreq'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_2.conf",
"NVT",
10,
300,
input_variables_dict={"RestartFreq": []},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['CheckpointFreq'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_2.conf",
"NVT",
10,
300,
input_variables_dict={"CheckpointFreq": []},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['ConsoleFreq'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_2.conf",
"NVT",
10,
300,
input_variables_dict={"ConsoleFreq": []},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['BlockAverageFreq'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_2.conf",
"NVT",
10,
300,
input_variables_dict={"BlockAverageFreq": []},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['HistogramFreq'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_2.conf",
"NVT",
10,
300,
input_variables_dict={"HistogramFreq": []},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['DistName'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_2.conf",
"NVT",
10,
300,
input_variables_dict={"DistName": []},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['HistName'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_2.conf",
"NVT",
10,
300,
input_variables_dict={"HistName": []},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['RunNumber'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_2.conf",
"NVT",
10,
300,
input_variables_dict={"RunNumber": []},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['RunLetter'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_2.conf",
"NVT",
10,
300,
input_variables_dict={"RunLetter": []},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['SampleFreq'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_2.conf",
"NVT",
10,
300,
input_variables_dict={"SampleFreq": []},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['OutEnergy'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_2.conf",
"NVT",
10,
300,
input_variables_dict={"OutEnergy": []},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['OutPressure'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_2.conf",
"NVT",
10,
300,
input_variables_dict={"OutPressure": []},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['OutMolNumber'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_2.conf",
"NVT",
10,
300,
input_variables_dict={"OutMolNumber": []},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['OutDensity'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_2.conf",
"NVT",
10,
300,
input_variables_dict={"OutDensity": []},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['OutVolume'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_2.conf",
"NVT",
10,
300,
input_variables_dict={"OutVolume": []},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['OutSurfaceTension'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_2.conf",
"NVT",
10,
300,
input_variables_dict={"OutSurfaceTension": []},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['FreeEnergyCalc'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_1.conf",
"NVT",
10,
300,
input_variables_dict={
"FreeEnergyCalc": [],
"MoleculeType": ["ETH", 1],
"InitialState": 1,
"LambdaVDW": [0.1, 0.2, 0.4, 0.9],
"LambdaCoulomb": [0.1, 0.3, 0.8, 0.8],
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['MoleculeType'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_1.conf",
"NVT",
10,
300,
input_variables_dict={
"FreeEnergyCalc": [True, 10000],
"MoleculeType": ["ETH", []],
"InitialState": 1,
"LambdaVDW": [0.1, 0.2, 0.4, 0.9],
"LambdaCoulomb": [0.1, 0.3, 0.8, 0.8],
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['MoleculeType'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_1.conf",
"NVT",
10,
300,
input_variables_dict={
"FreeEnergyCalc": [True, 10000],
"MoleculeType": [["ETH"], 1],
"InitialState": 1,
"LambdaVDW": [0.1, 0.2, 0.4, 0.9],
"LambdaCoulomb": [0.1, 0.3, 0.8, 0.8],
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['MoleculeType'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_1.conf",
"NVT",
10,
300,
input_variables_dict={
"FreeEnergyCalc": [True, 10000],
"MoleculeType": [{"ETH": "1"}, 1],
"InitialState": 1,
"LambdaVDW": [0.1, 0.2, 0.4, 0.9],
"LambdaCoulomb": [0.1, 0.3, 0.8, 0.8],
},
)
with pytest.raises(
ValueError,
match=r"ERROR: The following input variables have "
r"bad values \(check spelling and for empty spaces in the keys or that "
r"the values are in the correct form with the acceptable values\)"
r": \['InitialState'\]",
):
gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_1.conf",
"NVT",
10,
300,
input_variables_dict={
"FreeEnergyCalc": [True, 10000],
"MoleculeType": ["ETH", 1],
"InitialState": [],
"LambdaVDW": [0.1, 0.2, | |
and reduced traces.
Positional arguments:
r: QRegistry
state: numpy array with the expected state of r
rdm0: numpy array with the expected reduced density matrix after
tracing out qubit 0.
rdm1: numpy array with the expected reduced density matrix after
tracing out qubit 1.
Keyworded arguments:
srt0: expected value for squared reduced trace after tracing out 0
srt1: expected value for squared reduced trace after tracing out 1
verbose: if messages with extra information should be printed
"""
if not np.allclose(r.get_state(), state):
if verbose:
print(r.get_state())
print(state)
print(r.get_state() == state)
print(" <NAME> visited your simulator...")
return False
dm = state * state.reshape((4, 1))
qdm = r.density_matrix()
if not np.allclose(qdm[:], dm):
if verbose:
print(r.density_matrix()[:])
print(dm)
print(r.density_matrix()[:] == dm)
print(" <NAME> visited your simulator...")
return False
qrdm0 = qdm.partial_trace(0)
if not np.allclose(qrdm0[:], rdm0):
if verbose:
print("RDM0")
print(qrdm0[:])
print(rdm0)
print(qrdm0[:] == rdm0)
print(" <NAME> visited your simulator...")
return False
qrdm1 = qdm.partial_trace(1)
if not np.allclose(qrdm1[:], rdm1):
if verbose:
print("RDM1")
print(qrdm1[:])
print(rdm1)
print(qrdm1[:] == rdm1)
print(" <NAME> visited your simulator...")
return False
qsrt0 = (qrdm0 @ qrdm0).trace()
if not np.allclose(qsrt0, srt0):
if verbose:
print("SRT0")
print(qrdm0[:])
print(qsrt0)
print(srt0)
print(qsrt0 == srt0)
print(" <NAME> visited your simulator...")
return False
qsrt1 = (qrdm1 @ qrdm1).trace()
if not np.allclose(qsrt1, srt1):
if verbose:
print("SRT1")
print(qsrt1)
print(srt1)
print(qsrt1 == srt1)
print(" <NAME> visited your simulator...")
return False
return True
def tool_test(verbose=False):
"""Test QRegistry states, density matrix, reduced dm and reduced trace."""
if verbose:
print(" Tools for QRegistry:")
reg = qj.QRegistry(2)
state = np.array([1, 0, 0, 0])
rdm0 = np.array([1, 0, 0, 0]).reshape((2, 2))
rdm1 = rdm0[:]
if not compare_state(reg, state, rdm0, rdm1, verbose=verbose):
del reg
raise AssertionError("Error on first step checking tools")
del state
del rdm0
del rdm1
reg2 = reg.apply_gate("H", targets=0)
del reg
reg = reg2
state = np.array([1/np.sqrt(2), 1/np.sqrt(2), 0, 0])
rdm0 = np.array([1, 0, 0, 0]).reshape((2, 2))
rdm1 = np.array([0.5, 0.5, 0.5, 0.5]).reshape((2, 2))
if not compare_state(reg, state, rdm0, rdm1, verbose=verbose):
del reg
raise AssertionError("Error on second step checking tools")
del state
del rdm0
del rdm1
reg2 = reg.apply_gate("X", targets=1, controls=0)
del reg
reg = reg2
state = np.array([1/np.sqrt(2), 0, 0, 1/np.sqrt(2)])
rdm0 = np.eye(2) * 0.5
rdm1 = rdm0[:]
if not compare_state(reg, state, rdm0, rdm1, srt0=0.5, srt1=0.5,
verbose=verbose):
del reg
raise AssertionError("Error on third step checking tools")
if verbose:
print(" Noice")
del reg
def measure_system_tests(nq, entangle=False, remove=False, verbose=False):
"""Test measurement with QSystem."""
if verbose:
print(f" Measure QSystem tests with entangle={entangle}:")
for id in range(nq):
reg = qj.QSystem(nq)
if entangle:
for control in range(1, nq, 2):
reg2 = reg.apply_gate("X", targets=control-1, controls=control)
if nq % 2 == 1:
reg2 = reg.apply_gate("X", targets=nq-2, controls=nq-1)
del reg
reg = reg2
reg2 = reg.apply_gate("X", targets=id)
del reg
reg = reg2
aux1, mes = reg.measure({id})
if nq > 1:
aux2, mes2 = aux1.measure({i for i in range(nq) if i != id})
del reg
if (not mes[id]
or (nq > 1 and any(mes2[i] for i in range(nq) if i != id))
or aux1.usable[id]
or not all(aux1.usable[i] for i in range(nq) if i != id)
or (nq > 1 and any(aux2.usable))):
if verbose:
print("M1:", mes)
print("M2:", mes2)
print("Check1:", not mes[id])
print("Check2:", (nq > 1 and any(mes2[i]
for i in range(nq)
if i != id)))
print("Check3:", aux1.usable[id])
print("Check4:", not all(aux1.usable[i]
for i in range(nq) if i != id))
print("Check5:", (nq > 1 and any(aux2.usable)))
print(" <NAME> visited your simulator...")
aux1.free()
if nq > 1:
aux2.free()
raise AssertionError("Error measuring states")
aux1.free()
if nq > 1:
aux2.free()
if verbose:
print(" Noice")
def add_operation_tests(qdesign, verbose=False):
"""Test add_line method of the given qstruct object."""
if verbose:
print(" add_line tests with " + qdesign.__name__ + ":")
qdes = qdesign(5, "Test")
cons = {1, 3}
acons = {2, 4}
qdes.add_operation("X", targets=0, controls=cons, anticontrols=acons)
if len(qdes.get_operations()) != 1:
raise AssertionError("Wrong operations list size: " +
f"{len(qdes.get_operations())}")
op_data = qdes.get_operations()[0]
if op_data["gate"]._str != "X" or op_data["targets"] != [0] \
or op_data["controls"] != cons or op_data["anticontrols"] != acons:
if verbose:
print(op_data)
print(" Michael Bay visited your simulator...")
raise AssertionError("Wrong operation added")
if verbose:
print(" Noice")
def _deutsch_aux(executor, nq, gate):
circuit = DJAlgCircuit(nq, gate)
mess = executor.execute(circuit)
mes = mess[0][0]
reg2 = qj.QSystem(nq) # Qubits (x1, ..., xn, y) initialized to zero
aux = reg2.apply_gate("X", targets=nq-1) # Qubit y set to one
del reg2
reg2 = aux
# Apply Hadamard to all qubits
for i in range(nq):
aux = reg2.apply_gate("H", targets=i)
del reg2
reg2 = aux
# Applied U (oracle)
aux = reg2.apply_gate(gate, targets=[i for i in range(nq)])
del reg2
reg2 = aux
# Applied Hadamard to (x1, ..., xn), nothing applied to y qubit
for i in range(nq - 1):
aux = reg2.apply_gate("H", targets=i)
del reg2
reg2 = aux
# We measure (x1, ..., xn) qubits
aux, mes2 = reg2.measure({i for i in range(nq - 1)})
del reg2
del aux
# If any qubit (x1, ..., xn) is 1, balanced. Otherwise constant.
return mes, mes2
def deutschTests(nq, verbose=False, useSystem=False, optimize=False):
"""Test Deutsch-Jozsa algorithm for the specified number of qubits."""
if verbose:
print(" Deutsch circuit (" + (qj.QSystem.__name__ if useSystem
else qj.QRegistry.__name__) + "):")
executor = qj.Drewom(qmachine="doki",
extra={"num_threads": -1,
"random_generator": np.random.rand,
"use_system": useSystem})
for id in range(nq - 1):
gate = Bal(nq, id)
mes, mes2 = _deutsch_aux(executor, nq, gate)
if not mes == mes2 or not any(mes):
if verbose:
print(mes)
print(mes2)
print(mes == mes2)
print(" <NAME> visited your simulator...")
raise AssertionError("Error checking DJ results")
for id in range(2):
gate = Const(nq, twice=(id == 1))
mes, mes2 = _deutsch_aux(executor, nq, gate)
if not mes == mes2 or any(mes):
if verbose:
print(mes)
print(mes2)
print(mes == mes2)
print(" <NAME> visited your simulator...")
raise AssertionError("Error checking DJ results")
if verbose:
print(" Noice")
def teleportation_tests(verbose=False, useSystem=False, optimize=False):
"""Execute teleportation algorithm related tests."""
rands = np.random.rand(3) * 2 * np.pi - np.pi
gate = "U(" + ",".join([str(angle)
for angle in rands]) + ")"
initialValue = rnd.randrange(2)
if verbose:
print(" Teleportation circuit (" + (qj.QSystem.__name__ if useSystem
else qj.QRegistry.__name__) + "):")
print(" Gate: " + gate)
print(" Initial value: " + str(initialValue))
executor = qj.Drewom(qmachine="doki",
extra={"num_threads": -1,
"random_generator": np.random.rand,
"use_system": useSystem,
"return_struct": True})
circuit = TeleportationCircuit(gate)
mess = executor.execute(circuit)
reg, mes = mess[0]
reg2 = qj.QRegistry(1)
aux = reg2.apply_gate(gate)
del reg2
reg2 = aux
if not np.allclose(reg.get_state(), reg2.get_state()):
if verbose:
print("Ops:", circuit.get_operations())
print(reg.get_state())
print(reg2.get_state())
print(reg.get_state() == reg2.get_state())
print(mes)
print(" <NAME> visited your simulator...")
del reg
del reg2
raise AssertionError("Error checking teleportation result!")
else:
if verbose:
print(" Noice")
del reg
del reg2
def all_gate_tests(seed=None, verbose=False):
"""Execute all gate tests."""
if not (seed is None):
qj.set_seed(seed)
rnd.seed(seed)
np.random.seed(seed)
result = [(0, 0) for i in range(15)] # We have 15 tests
# H gate tests
result[0] = gate_tests("H", verbose=verbose, hasInv=False, nArgs=0)
# X gate tests
result[1] = gate_tests("X", verbose=verbose, hasInv=False, nArgs=0)
# Y gate tests
result[2] = gate_tests("Y", verbose=verbose, hasInv=False, nArgs=0)
# Z gate tests
result[3] = gate_tests("Z", verbose=verbose, hasInv=False, nArgs=0)
# SqrtX gate tests
result[4] = gate_tests("SqrtX", verbose=verbose, hasInv=True, nArgs=0)
# RX gate tests
result[5] = gate_tests("RX", verbose=verbose, hasInv=True, nArgs=1)
# RY gate tests
result[6] = gate_tests("RY", verbose=verbose, hasInv=True, nArgs=1)
# RZ gate tests
result[7] = gate_tests("RZ", verbose=verbose, hasInv=True, nArgs=1)
# Phase shift gate tests
result[8] = gate_tests("R", verbose=verbose, hasInv=True, nArgs=1)
# Roots of unity gate tests
result[9] = gate_tests("RUnity", verbose=verbose, hasInv=True, nArgs=1)
# Partial Deutsch gate tests
result[10] = gate_tests("HalfDeutsch", verbose=verbose,
hasInv=True, nArgs=1)
# U gate tests
result[11] = gate_tests("U", verbose=verbose, hasInv=True, nArgs=3)
# U3 gate tests
result[12] = gate_tests("U3", verbose=verbose, hasInv=True, nArgs=3)
# U2 gate tests
result[13] = gate_tests("U2", verbose=verbose, hasInv=True, nArgs=2)
# U1 gate tests
result[14] = gate_tests("U1", verbose=verbose, hasInv=True, nArgs=1)
return result
def data_structure_tests(minqubits, maxqubits, seed=None, verbose=False,
QItem=qj.QRegistry):
"""Execute all data structure tests."""
if not (seed is None):
rnd.seed(seed)
np.random.seed(seed)
for nq in range(minqubits, maxqubits + 1):
if verbose:
print("Testing with " + str(nq) + " qubit " + QItem.__name__)
one_gate_tests(nq, verbose=verbose, QItem=QItem)
if nq >= 2:
two_gate_tests(nq, verbose=verbose, QItem=QItem)
controlled_gate_tests(nq, verbose=verbose, QItem=QItem)
if QItem == qj.QRegistry:
measure_registry_tests(nq, verbose=verbose)
else:
measure_system_tests(nq, entangle=False, verbose=verbose)
if nq >= 2:
measure_system_tests(nq, | |
freq2:
if icpu not in freq1:
equal=False
break
ff1=float(freq1[icpu])
ff2=float(freq2[icpu])
if ff2!=0:
diff=ff1/ff2
else:
equal=False
break
if diff<0.98 or diff>1.02:
equal=False
break
if not equal:
if o=='con':
ck.out('CPU frequency changed over iterations:')
ck.out('')
i['fail_reason']='frequency changed during experiments'
i['fail']='yes'
else:
ck.out('CPU frequency did not change ...')
ck.out('')
###############################################################################################################
# PIPELINE SECTION: Extract cTuning/MILEPOST static program features
cs='yes'
extracted_milepost_features=False
if i.get('fail','')!='yes' and milepost=='yes' and \
(compile_only_once!='yes' or ai==0) and \
(srn==0 or (srn>0 and i.get('repeat_compilation','')=='yes')):
if o=='con':
ck.out(sep)
ck.out('Extract MILEPOST/cTuning static program features ...')
ck.out('')
# Check that milepost repo exists
rmil=ck.access({'action':'load',
'module_uoa':cfg['module_deps']['repo'],
'data_uoa':cfg['repo_deps']['reproduce-milepost-project']})
if rmil['return']>0:
if rmil['return']!=16: return rmil
# Suggest to install MILEPOST repo
if o=='con':
rx=ck.inp({'text':'You need CK repo "reproduce-milepost-project" to extract static features. Install (Y/n)? '})
x=rx['string'].strip().lower()
ck.out('')
if x!='n':
ck.out(sep)
rmil=ck.access({'action':'pull',
'module_uoa':cfg['module_deps']['repo'],
'data_uoa':'reproduce-milepost-project',
'out':'con'})
if rmil['return']>0: return rmil
ck.out(sep)
# Set milepost tag to compiler deps
mcdeps=copy.deepcopy(cdeps)
mcdeps['compiler']={
"local": "yes",
"sort": 1,
"tags": "compiler,lang-c,ctuning-cc"}
mflags=flags
if mflags!='': mflags+=' '
mflags='-O3 --ct-extract-features'
cl=i.get('clean','')
if cl=='' and i.get('no_clean','')!='yes' and skip_exec!='yes' and srn==0: cl='yes'
if meta.get('no_compile','')!='yes' and no_compile!='yes':
if o=='con' and cl=='yes':
ck.out('Cleaning working directory ...')
ck.out('')
ii={'sub_action':'compile',
'target':target,
'target_os':tos,
'device_id':tdid,
'host_os':hos,
'path':pdir,
'meta':meta,
'deps':mcdeps,
'deps_cache':deps_cache,
'reuse_deps':reuse_deps,
'generate_rnd_tmp_dir':grtd,
'tmp_dir':tdir,
'clean':cl,
'skip_clean_after':sca,
'compile_type':ctype,
'flags':mflags,
'lflags':lflags,
'statistical_repetition':srn,
'autotuning_iteration':ati,
'console':cons,
'env':env,
'extra_env':eenv,
'compiler_vars':cv,
'no_vars':ncv,
'remove_compiler_vars':rcv,
'extra_env_for_compilation':eefc,
'compile_timeout':xcto,
'speed':espeed,
'add_rnd_extension_to_bin':are,
'add_save_extension_to_bin':ase,
'out':oo}
r=process_in_dir(ii)
if r['return']>0: return r
misc=r['misc']
tdir=misc.get('tmp_dir','')
cs=misc.get('compilation_success','')
if cs=='no':
x='MILEPOST feature extraction failed'
if misc.get('fail_reason','')!='': x+=' - '+misc['fail_reason']
i['fail_reason']=x
i['fail']='yes'
# Process features
if o=='con' and cl=='yes':
ck.out('')
ck.out('Converting MILEPOST/cTuning features to JSON ...')
ck.out('')
feat={}
x1=os.path.join(pdir, tdir)
x=os.listdir(x1)
for y in x:
if os.path.isfile(y) and y.startswith('ici_features_function.') and y.endswith('.fre.ft'):
fun=y[22:-7]
feat1={}
rz=ck.load_text_file({'text_file':y})
if rz['return']==0:
x2=rz['string'].strip().split(',')
for z in x2:
z1=z.split('=')
if len(z1)>1:
zk=z1[0].strip()[2:]
zv=z1[1].strip()
feat1[zk]=float(zv)
feat[fun]=feat1
features['program_static_milepost_features']=feat
extracted_milepost_features=True
###############################################################################################################
# PIPELINE SECTION: Compile program
cs='yes'
if i.get('fail','')!='yes' and no_compile!='yes' and \
(compile_only_once!='yes' or ai==0) and \
(srn==0 or (srn>0 and i.get('repeat_compilation','')=='yes')):
if o=='con':
ck.out(sep)
ck.out('Compile program ...')
ck.out('')
cl=i.get('clean','')
if cl=='' and i.get('no_clean','')!='yes' and skip_exec!='yes' and srn==0: cl='yes'
if meta.get('no_compile','')!='yes' and no_compile!='yes':
if o=='con' and cl=='yes':
ck.out('Cleaning working directory ...')
ck.out('')
ii={'sub_action':'compile',
'target':target,
'target_os':tos,
'device_id':tdid,
'host_os':hos,
'path':pdir,
'meta':meta,
'deps':cdeps,
'generate_rnd_tmp_dir':grtd,
'tmp_dir':tdir,
'clean':cl,
'skip_clean_after':sca,
'compile_type':ctype,
'flags':flags,
'lflags':lflags,
'statistical_repetition':srn,
'autotuning_iteration':ati,
'console':cons,
'env':env,
'speed':espeed,
'params':params,
'extra_env':eenv,
'compiler_vars':cv,
'no_vars':ncv,
'remove_compiler_vars':rcv,
'extra_env_for_compilation':eefc,
'compile_timeout':xcto,
'compute_platform_id':compute_platform_id,
'compute_device_id':compute_device_id,
'add_rnd_extension_to_bin':are,
'add_save_extension_to_bin':ase,
'out':oo}
r=process_in_dir(ii)
if r['return']>0: return r
misc=r['misc']
if 'add_to_state' in misc:
state.update(misc['add_to_state'])
del(misc['add_to_state'])
if 'add_to_features' in misc:
features.update(misc['add_to_features'])
del(misc['add_to_features'])
if 'add_to_choices' in misc:
choices.update(misc['add_to_choices'])
del(misc['add_to_choices'])
tdir=misc.get('tmp_dir','')
if tdir!='': state['tmp_dir']=tdir
cch=r['characteristics']
cch['joined_compiler_flags']=flags # Add joint compilation flags as string from previous sections
chars['compile']=cch
xct=cch.get('compilation_time',-1)
xos=cch.get('obj_size',-1)
cs=misc.get('compilation_success','')
if cs=='no':
x='compilation failed'
if misc.get('fail_reason','')!='': x+=' - '+misc['fail_reason']
i['fail_reason']=x
i['fail']='yes'
if last_md5!='':
md5=cch.get('md5_sum','')
if md5!='' and md5==last_md5:
i['fail_reason']=last_md5_fail_text
i['fail']='yes'
texe=misc.get('target_exe','')
state['target_exe']=texe
###############################################################################################################
# PIPELINE SECTION: check if record MILEPOST features (after clean)
if extracted_milepost_features and milepost_out_file!='':
r=ck.save_json_to_file({'json_file':milepost_out_file, 'dict':feat})
if r['return']>0: return r
###############################################################################################################
# PIPELINE SECTION: Check if dataset is the same
sdc='no'
if tsd=='yes' and (ati!=0 or srn!=0):
sdc='yes'
###############################################################################################################
# PIPELINE SECTION: perf
perf_tmp=''
if perf=='yes':
if o=='con':
ck.out(sep)
ck.out('Preparing perf ...')
rx=ck.gen_tmp_file({'prefix':'tmp-', 'remove_dir':'yes'})
if rx['return']>0: return rx
perf_tmp=rx['file_name']
prcmd+='perf stat -x, -o '+perf_tmp
if perf_tmp not in rof: rof.append(perf_tmp)
###############################################################################################################
# PIPELINE SECTION: Intel vTune
vtune_tmp=''
vtune_tmp1=''
if vtune=='yes':
if o=='con':
ck.out(sep)
ck.out('Preparing vtune ...')
if 'vtune_profiler' not in cdeps:
cdeps['vtune_profiler']={'local':'yes', 'tags':'perf,intel,vtune'}
if hplat=='win':
eenv='\nrd /S /Q r000\n'+eenv
else:
eenv='\nrm -rf r000\n'+eenv
prcmd+='amplxe-runss -r r000 -event-based-counts --'
rx=ck.gen_tmp_file({'prefix':'tmp-', 'remove_dir':'yes'})
if rx['return']>0: return rx
vtune_tmp=rx['file_name']
vtune_tmp1=vtune_tmp+'.csv'
if vtune_tmp1 not in rof: rof.append(vtune_tmp1)
eppc+='\namplxe-cl -report hw-events -r r000 -report-output='+vtune_tmp+' -format csv -csv-delimiter=comma -filter module=$#ONLY_BIN_FILE#$'
###############################################################################################################
# PIPELINE SECTION: Preload dividiti's OpenCL profiler.
if odp=='yes':
if hplat=='win':
return {'return':1, 'error':'dividiti\'s OpenCL profiler is currently not supported under Windows'}
if 'dvdt_prof' not in cdeps:
cdeps['dvdt_prof']={'local':'yes', 'tags':'tool,opencl,dvdt,prof,dvdt-prof2'}
eenv='export LD_PRELOAD="${CK_ENV_TOOL_DVDT_PROF_DYNAMIC_NAME_FULL}"; '+eenv
fodp='tmp-dvdt-prof-output.json'
if os.path.isfile(fodp): os.remove(fodp)
###############################################################################################################
# PIPELINE SECTION: Set MALI HWC counter collector
if mali_hwc=='yes':
# Call process output vector
r=ck.access({'action':'run',
'module_uoa':cfg['module_deps']['script'],
'data_uoa': cfg['data_deps']['mali_hwc'], #'mali-hwc',
'code':'process',
'func':'config'})
if r['return']>0:
return {'return':r['return'], 'error':'Problem with MALI HWC script ('+r['error']+')'}
###############################################################################################################
# PIPELINE SECTION: Valgrind
if valgrind=='yes':
if o=='con':
ck.out(sep)
ck.out('Adding valgrind ...')
x=cdeps['tool-valgrind'].get('cus',{}).get('cmd_prefix',{}).get(tplat,'')
if x=='':
return {'return':1, 'error':'command line for architecture simulator is not defined'}
prcmd+=x
###############################################################################################################
# PIPELINE SECTION: Architecture simulator
if sim=='yes':
if o=='con':
ck.out(sep)
ck.out('Adding architecture simulator ...')
x=cdeps['arch-sim'].get('cus',{}).get('cmd_prefix',{}).get(tplat,'')
if x=='':
return {'return':1, 'error':'command line for architecture simulator is not defined'}
prcmd+=x
###############################################################################################################
# PIPELINE SECTION: Run program
xdeps={}
if i.get('fail','')!='yes' and cs!='no' and no_run!='yes':
if o=='con':
ck.out(sep)
ck.out('Running program ...')
# Remove some keys from env
if meta.get('skip_remove_run_env_keys','')!='yes':
for k in list(env.keys()):
if k.startswith('CK_AR_') or k.startswith('CK_CC_') or \
k.startswith('CK_CXX_') or k.startswith('CK_CMAKE_') or \
k.startswith('CK_COMPILER_') or k.startswith('CK_LINKER_'):
del(env[k])
# Check run cmd keys subst
for k in choices:
if k.startswith('run_cmd_key_'):
rcsub[k]=choices[k]
ii={'sub_action':'run',
'target':target,
'target_os':tos,
'device_id':tdid,
'host_os':hos,
'path':pdir,
'console':cons,
'meta':meta,
'deps':cdeps,
'deps_cache':deps_cache,
'reuse_deps':reuse_deps,
'cmd_key':kcmd,
'dataset_uoa':dduoa,
'dataset_file':ddfile,
'generate_rnd_tmp_dir':grtd,
'tmp_dir':tdir,
'skip_clean_after':sca,
'compile_type':ctype,
'speed':espeed,
'sudo':isd,
'energy':sme,
'affinity':aff,
'flags':flags,
'lflags':lflags,
'repeat':repeat,
'pre_run_cmd':prcmd,
'run_output_files':rof,
'skip_calibration':rsc,
'calibration_time':rct,
'calibration_max':rcm,
'params':params,
'post_process_script_uoa':pp_uoa,
'post_process_subscript':pp_name,
'post_process_params':pp_params,
'statistical_repetition':srn,
'autotuning_iteration':ati,
'compute_platform_id':compute_platform_id,
'compute_device_id':compute_device_id,
'skip_output_validation':vout_skip,
'output_validation_repo':vout_repo,
'program_output_uoa':program_output_uoa,
'overwrite_reference_output':vout_over,
'skip_dataset_copy':sdc,
'skip_exec':skip_exec,
'env':env,
'extra_env':eenv,
'extra_run_cmd':ercmd,
'debug_run_cmd':drcmd,
'extra_post_process_cmd':eppc,
'run_cmd_substitutes':rcsub,
'compiler_vars':cv,
'no_vars':ncv,
'skip_print_timers':sptimers,
'remove_compiler_vars':rcv,
'extra_env_for_compilation':eefc,
'run_timeout':xrto,
'out':oo}
# Check if need to clean (when no_compile in meta and clean is yes)
cl=i.get('clean','')
if cl=='' and i.get('no_clean','')!='yes' and skip_exec!='yes' and srn==0: cl='yes'
if cl=='yes' and meta.get('no_compile','')=='yes':
ii['clean']='yes'
r=process_in_dir(ii)
if r['return']>0: return r
misc=r['misc']
if 'add_to_state' in misc:
state.update(misc['add_to_state'])
del(misc['add_to_state'])
if 'add_to_features' in misc:
features.update(misc['add_to_features'])
del(misc['add_to_features'])
if 'add_to_choices' in misc:
choices.update(misc['add_to_choices'])
del(misc['add_to_choices'])
tdir=misc.get('tmp_dir','')
if tdir!='': state['tmp_dir']=tdir
rch=r['characteristics']
chars['run']=rch
xdeps=r.get('deps',{})
if len(xdeps)>0:
if 'dependencies' not in i:
i['dependencies']={}
i['dependencies'].update(xdeps)
csuc=misc.get('calibration_success',True)
rs=misc.get('run_success','')
rsf=misc.get('fail_reason','')
repeat=rch.get('repeat','')
if repeat!='':
state['repeat']=repeat
choices['repeat']=repeat
if rs=='no' or not csuc:
x='execution failed'
if rsf!='': x+=' - '+rsf
i['fail_reason']=x
i['fail']='yes'
###############################################################################################################
# PIPELINE SECTION: set CPU frequency to ondemand to "calm" system (if supported)
if scpuf!='' and sic!='yes':
if o=='con':
ck.out(sep)
ck.out('Attempting to set CPU frequency to ondemand to "calm" system (if supported) ...')
ck.out('')
ii={'action':'set_freq',
'module_uoa':cfg['module_deps']['platform.cpu'],
'value':'ondemand',
'target':target,
'host_os':hos,
'target_os':tos,
'device_id':tdid,
'skip_print_os':'yes',
'skip_device_init':sdi,
'skip_info_collection':sic,
'out':oo}
r=ck.access(ii)
if r['return']>0: return r
###############################################################################################################
# PIPELINE SECTION: set GPU frequency to ondemand to "calm" system (if supported)
if sgpuf!='' and sic!='yes':
if o=='con':
ck.out(sep)
ck.out('Attempting to set GPU frequency to ondemand to "calm" system (if supported) ...')
ck.out('')
ii={'action':'set_freq',
'module_uoa':cfg['module_deps']['platform.gpu'],
'value':'ondemand',
'target':target,
'host_os':hos,
'target_os':tos,
'device_id':tdid,
'skip_print_os':'yes',
'skip_device_init':sdi,
'skip_info_collection':sic,
'out':oo}
r=ck.access(ii)
if r['return']>0: return r
###############################################################################################################
# PIPELINE SECTION: finish vtune
if vtune=='yes':
if o=='con':
ck.out(sep)
ck.out('Processing Intel vTune output ...')
ck.out('')
if os.path.isfile(vtune_tmp1):
import csv
clk='Hardware Event Count:CPU_CLK_UNHALTED.THREAD:Self'
inst='Hardware Event Count:INST_RETIRED.ANY:Self'
f=open(vtune_tmp1, 'rb')
c=csv.reader(f, delimiter=',')
hc=[]
val={}
first=True
for q in c:
if first:
first=False
if len(q)>1:
for k in range(2,len(q)):
hc.append(q[k])
else:
func=q[0]
module=q[1]
if len(q)>1:
for k in range(2,len(q)):
val[hc[k-2]]=q[k]
break
f.close()
if sca!='yes':
os.remove(vtune_tmp1)
if len(val)>0:
if clk in val and inst in val:
cpi=0
try:
cpi=float(val[clk])/float(val[inst])
except ValueError:
pass
if cpi!=0:
chars['run']['global_cpi']=cpi
chars['run']['global_clock']=float(val[clk])
chars['run']['global_instructions_retired']=float(val[inst])
###############################################################################################################
# PIPELINE SECTION: finish perf
if perf=='yes':
if o=='con':
ck.out(sep)
ck.out('Processing perf output ...')
ck.out('')
if os.path.isfile(perf_tmp):
ii={'text_file':perf_tmp,
'split_to_list':'yes',
'encoding':sys.stdin.encoding}
# if sca!='yes':
# ii['delete_after_read']='yes'
rx=ck.load_text_file(ii)
if rx['return']>0: return rx
glst=rx['lst']
clk='cycles'
inst='instructions'
val={}
found=False
for lx in glst:
l=lx.strip()
if found:
x=l.find(',')
if x>0:
v=l[0:x]
try:
v=float(v)
except ValueError:
pass
y=l.rfind(',')
if y>x:
key=l[y+1:]
val[key]=v
elif l.find(',task-clock')>0:
found=True
if sca!='yes':
os.remove(perf_tmp)
if len(val)>0:
if clk in val and inst in val:
cpi=0
try:
cpi=val[clk]/val[inst]
except ValueError:
pass
if cpi!=0:
chars['run']['global_cpi']=cpi
chars['run']['global_clock']=val[clk]
chars['run']['global_instructions_retired']=val[inst]
chars['run']['perf']=val
###############################################################################################################
# PIPELINE SECTION: finish gprof
if gprof=='yes':
if o=='con':
ck.out(sep)
ck.out('Processing gprof output ...')
ck.out('')
if os.path.isfile(cfg['gprof_file']):
ii={'text_file':gprof_tmp,
'split_to_list':'yes',
'encoding':sys.stdin.encoding}
if sca!='yes':
ii['delete_after_read']='yes'
rx=ck.load_text_file(ii)
if rx['return']>0: return rx
glst=rx['lst']
process=False
cgprof={}
for g in glst:
if g.startswith(' time'):
process=True
continue
if process:
if g=='':
break
gg1=g.strip().split(' ')
gg=[]
for g1 in gg1:
g1x=g1.strip()
if g1x!='': gg.append(g1x)
igg=len(gg)
if igg>0:
x1=gg[igg-1]
cgprof[x1]=gg
ck.out(' * '+str(x1)+' : '+str(gg[0])+' % ')
chars['run']['gprof']=cgprof
chars['run']['gprof_list']=glst
else:
ck.out('WARNING: gprof output was not found ...')
ck.out('')
###############################################################################################################
# PIPELINE SECTION: Post-process MALI HWC counters
if mali_hwc=='yes':
# Call process output vector
r=ck.access({'action':'run',
'module_uoa':cfg['module_deps']['script'],
'data_uoa': cfg['data_deps']['mali_hwc'], #'mali-hwc',
'code':'process',
'func':'read'})
if r['return']>0:
return {'return':r['return'], | |
<gh_stars>10-100
import unittest
import datetime
from contextlib import contextmanager
import rfc3339
from falcon_heavy.core.context import (
make_specification_conversion_context,
make_request_conversion_context
)
from falcon_heavy.core.openapi import (
SchemaObjectType,
ComponentsObjectType
)
from falcon_heavy.core.factories import TypeFactory
from falcon_heavy.core.types import Error, SchemaError, Path
class FactoriesTest(unittest.TestCase):
@staticmethod
def _load(object_type, specification):
return object_type().convert(
specification,
Path(''),
**make_specification_conversion_context(
'', specification)
)
@staticmethod
def _generate_type(spec):
return TypeFactory().generate(spec)
@staticmethod
def _convert(type_, payload, strict=True):
return type_.convert(
payload,
Path(''),
strict=strict,
**make_request_conversion_context()
)
@contextmanager
def assertSchemaErrorRaises(self, expected_errors=None):
with self.assertRaises(SchemaError) as ctx:
yield
if expected_errors is not None:
self.assertEqual(len(expected_errors), len(ctx.exception.errors))
for path, message in expected_errors.items():
self.assertTrue(
Error(Path(path), message) in ctx.exception.errors,
msg="An error at %s with message \"%s\" was expected, but these errors were received:\n%s" % (
path, message, ctx.exception.errors)
)
def test_generate_one_of(self):
spec = {
'x-schemas': {
'Cat': {
'type': 'object',
'additionalProperties': False,
'properties': {
'name': {
'type': 'string'
}
}
},
'Dog': {
'type': 'object',
'additionalProperties': False,
'properties': {
'nickname': {
'type': 'string'
}
}
}
},
'type': 'object',
'properties': {
'pet': {
'oneOf': [
{
'$ref': '#/x-schemas/Cat'
},
{
'$ref': '#/x-schemas/Dog'
}
]
}
}
}
spec = self._load(SchemaObjectType, spec)
type_ = self._generate_type(spec)
cat_payload = {
'pet': {
'name': 'Misty'
}
}
cat = self._convert(type_, cat_payload)
self.assertEqual(cat['pet']['name'], 'Misty')
dog_payload = {
'pet': {
'nickname': 'Max'
}
}
dog = self._convert(type_, dog_payload)
self.assertEqual(dog['pet']['nickname'], 'Max')
def test_generate_oneof_with_implicit_discriminator(self):
spec = {
'schemas': {
'Cat': {
'type': 'object',
'required': [
'pet_type'
],
'properties': {
'pet_type': {
'type': 'string'
},
'name': {
'type': 'string'
}
}
},
'Dog': {
'type': 'object',
'additionalProperties': False,
'required': [
'pet_type'
],
'properties': {
'pet_type': {
'type': 'string'
},
'nickname': {
'type': 'string'
}
}
},
'Pet': {
'type': 'object',
'properties': {
'pet': {
'oneOf': [
{
'$ref': '#/schemas/Cat'
},
{
'$ref': '#/schemas/Dog'
}
],
'discriminator': {
'propertyName': 'pet_type'
}
}
}
}
}
}
spec = self._load(ComponentsObjectType, spec)
type_ = self._generate_type(spec['schemas']['Pet'])
cat_payload = {
'pet': {
'pet_type': 'Cat',
'name': 'Misty'
}
}
cat = self._convert(type_, cat_payload)
self.assertEqual(cat['pet']['name'], 'Misty')
self.assertEqual(cat['pet']['pet_type'], 'Cat')
ambiguous_cat_payload = {
'pet': {
'pet_type': '',
'name': 'Misty'
}
}
with self.assertSchemaErrorRaises({
'#/pet': "The discriminator value must be equal to one of the following values: 'Cat', 'Dog'"
}):
self._convert(type_, ambiguous_cat_payload)
dog_with_cat_properties = {
'pet': {
'pet_type': 'Dog',
'name': 'Misty'
}
}
with self.assertSchemaErrorRaises({
'#/pet': "When `additionalProperties` is False, no unspecified properties are allowed. "
"The following unspecified properties were found: 'name'"
}):
self._convert(type_, dog_with_cat_properties)
def test_generate_one_of_with_semi_explicit_discriminator(self):
spec = {
'schemas': {
'Cat': {
'type': 'object',
'additionalProperties': False,
'required': [
'pet_type'
],
'properties': {
'pet_type': {
'type': 'string'
},
'name': {
'type': 'string'
}
}
},
'Dog': {
'type': 'object',
'additionalProperties': False,
'required': [
'pet_type'
],
'properties': {
'pet_type': {
'type': 'string'
},
'nickname': {
'type': 'string'
}
}
},
'Pet': {
'type': 'object',
'properties': {
'pet': {
'oneOf': [
{
'$ref': '#/schemas/Cat'
},
{
'$ref': '#/schemas/Dog'
}
],
'discriminator': {
'propertyName': 'pet_type',
'mapping': {
'1': '#/schemas/Cat',
}
}
}
}
}
}
}
spec = self._load(ComponentsObjectType, spec)
type_ = self._generate_type(spec['schemas']['Pet'])
cat_payload = {
'pet': {
'pet_type': '1',
'name': 'Misty'
}
}
cat = self._convert(type_, cat_payload)
self.assertEqual(cat['pet']['name'], 'Misty')
dog_payload = {
'pet': {
'pet_type': 'Dog',
'nickname': 'Max'
}
}
dog = self._convert(type_, dog_payload)
self.assertEqual(dog['pet']['nickname'], 'Max')
unknown_payload = {
'pet': {
'pet_type': '2',
'nickname': 'Max'
}
}
with self.assertSchemaErrorRaises({
'#/pet': "The discriminator value must be equal to one of the following values: '1', 'Cat', 'Dog'"
}):
self._convert(type_, unknown_payload)
def test_generate_discriminator_with_inline_schemas(self):
# Discriminator with inline schemas
spec = {
'x-schemas': {
'Cat': {
'type': 'object',
'additionalProperties': False,
'required': [
'pet_type'
],
'properties': {
'pet_type': {
'type': 'string'
},
'name': {
'type': 'string'
}
}
},
'Dog': {
'type': 'object',
'additionalProperties': False,
'required': [
'pet_type'
],
'properties': {
'pet_type': {
'type': 'string'
},
'nickname': {
'type': 'string'
}
}
}
},
'type': 'object',
'properties': {
'pet': {
'oneOf': [
{
'$ref': '#/x-schemas/Cat'
},
{
'$ref': '#/x-schemas/Dog'
},
{
'type': 'object',
'additionalProperties': False,
'required': [
'pet_type'
],
'properties': {
'pet_type': {
'type': 'string'
},
'last_name': {
'type': 'string'
}
}
}
],
'discriminator': {
'propertyName': 'pet_type'
}
}
}
}
spec = self._load(SchemaObjectType, spec)
type_ = self._generate_type(spec)
inline_payload = {
'pet': {
'pet_type': 2,
'last_name': 'Misty'
}
}
with self.assertSchemaErrorRaises({
'#/pet': "The discriminator value must be equal to one of the following values: 'Cat', 'Dog'"
}):
self._convert(type_, inline_payload)
def test_generate_anyOf(self):
spec = {
'x-schemas': {
'Cat': {
'type': 'object',
'additionalProperties': False,
'properties': {
'name': {
'type': 'string'
}
}
},
'Dog': {
'type': 'object',
'additionalProperties': False,
'properties': {
'nickname': {
'type': 'string'
}
}
}
},
'type': 'object',
'properties': {
'pet': {
'anyOf': [
{
'$ref': '#/x-schemas/Cat'
},
{
'$ref': '#/x-schemas/Dog'
}
]
}
}
}
spec = self._load(SchemaObjectType, spec)
type_ = self._generate_type(spec)
cat_payload = {
'pet': {
'name': 'Misty'
}
}
cat = self._convert(type_, cat_payload)
self.assertEqual(cat['pet']['name'], 'Misty')
dog_payload = {
'pet': {
'nickname': 'Max'
}
}
dog = self._convert(type_, dog_payload)
self.assertEqual(dog['pet']['nickname'], 'Max')
not_any_payload = {
'pet': {
'weight': '10kg'
}
}
with self.assertSchemaErrorRaises({
'#/pet': "Does not match any schemas from `anyOf`",
'#/pet/0': "When `additionalProperties` is False, no unspecified properties are allowed. "
"The following unspecified properties were found: 'weight'",
'#/pet/1': "When `additionalProperties` is False, no unspecified properties are allowed. "
"The following unspecified properties were found: 'weight'"
}):
self._convert(type_, not_any_payload)
spec = {
'x-schemas': {
'Cat': {
'type': 'object',
'properties': {
'name': {
'type': 'string'
}
}
},
'Dog': {
'type': 'object',
'properties': {
'nickname': {
'type': 'string'
}
}
}
},
'type': 'object',
'properties': {
'pet': {
'anyOf': [
{
'$ref': '#/x-schemas/Cat'
},
{
'$ref': '#/x-schemas/Dog'
}
]
}
}
}
spec = self._load(SchemaObjectType, spec)
type_ = self._generate_type(spec)
cat_dog_payload = {
'pet': {
'name': 'Misty',
'nickname': 'Max'
}
}
cat_dog = self._convert(type_, cat_dog_payload)
self.assertEqual(cat_dog['pet']['name'], 'Misty')
self.assertEqual(cat_dog['pet']['nickname'], 'Max')
def test_generate_allOf(self):
spec = {
'x-schemas': {
'Cat': {
'type': 'object',
'properties': {
'name': {
'type': 'string'
},
's': {
'type': 'integer'
}
}
},
'Dog': {
'type': 'object',
'properties': {
'nickname': {
'type': 'string'
}
}
}
},
'type': 'object',
'properties': {
'pet': {
'allOf': [
{
'$ref': '#/x-schemas/Cat'
},
{
'$ref': '#/x-schemas/Dog'
}
]
}
}
}
spec = self._load(SchemaObjectType, spec)
type_ = self._generate_type(spec)
cat_dog_payload = {
'pet': {
'name': 'Misty',
'nickname': 'Max',
's': '45'
}
}
cat_dog = self._convert(type_, cat_dog_payload, strict=False)
self.assertEqual(cat_dog['pet']['name'], 'Misty')
self.assertEqual(cat_dog['pet']['nickname'], 'Max')
self.assertTrue(isinstance(cat_dog['pet']['s'], int))
def test_generate_merged(self):
spec = {
'x-schemas': {
'Cat': {
'type': 'object',
'properties': {
'name': {
'type': 'string'
},
's': {
'type': 'integer'
}
}
},
'Dog': {
'type': 'object',
'required': ['nickname'],
'properties': {
'nickname': {
'type': 'string'
}
},
'additionalProperties': False,
'x-patternProperties': {
'r.*': {
'type': 'integer'
}
}
}
},
'type': 'object',
'properties': {
'pet': {
'allOf': [
{
'$ref': '#/x-schemas/Cat'
},
{
'$ref': '#/x-schemas/Dog'
}
],
'x-merge': True
}
}
}
spec = self._load(SchemaObjectType, spec)
type_ = self._generate_type(spec)
cat_dog_payload = {
'pet': {
'name': 'Misty',
'nickname': 'Max',
's': '45'
}
}
cat_dog = self._convert(type_, cat_dog_payload, strict=False)
self.assertEqual(cat_dog['pet']['name'], 'Misty')
self.assertEqual(cat_dog['pet']['nickname'], 'Max')
self.assertTrue(isinstance(cat_dog['pet']['s'], int))
invalid_cat_dog_payload = {
'pet': {
'name': 'Misty',
'nickname': 'Max',
's': 45,
'owner': 'Peter'
}
}
with self.assertSchemaErrorRaises({
'#/pet': "When `additionalProperties` is False, no unspecified properties are allowed. "
"The following unspecified properties were found: 'owner'"
}):
self._convert(type_, invalid_cat_dog_payload)
def test_generate_recursive_property(self):
spec = {
'properties': {
'payload': {},
'nested_nodes': {
'type': 'array',
'items': {
'$ref': '#/'
}
}
}
}
spec = self._load(SchemaObjectType, spec)
type_ = self._generate_type(spec)
payload = {
'adsad': 'sdsd',
'payload': {
'ddd': 34
},
'nested_nodes': [
{
'payload': {},
'nested_nodes': [
{
'payload': {
'fdf': 54
}
}
]
},
{
'payload': {
'ff': 'dd'
}
}
]
}
root = self._convert(type_, payload)
self.assertEqual(root['adsad'], 'sdsd')
def test_defaults(self):
spec = {
'properties': {
'with_default': {
'type': 'integer',
'default': 5
}
}
}
spec = self._load(SchemaObjectType, spec)
type_ = self._generate_type(spec)
payload = {}
obj = self._convert(type_, payload)
self.assertEqual(obj['with_default'], 5)
spec = {
'properties': {
'with_default': {
'type': 'string',
'pattern': r'^\+?\d{7,20}$',
'default': 'sdfdf'
}
}
}
spec = self._load(SchemaObjectType, spec)
type_ = self._generate_type(spec)
payload = {}
with self.assertSchemaErrorRaises({
'#/with_default': "Does not match the pattern"
}):
self._convert(type_, payload)
def | |
"dv": Variable(
"Meridional velocity tendency", V_GRID + TIMESTEPS, "m/s", "Meridional velocity tendency", write_to_restart=True
),
"du_cor": Variable("Change of u by Coriolis force", U_GRID, "m/s^2", "Change of u due to Coriolis force"),
"dv_cor": Variable("Change of v by Coriolis force", V_GRID, "m/s^2", "Change of v due to Coriolis force"),
"du_mix": Variable(
"Change of u by vertical mixing", U_GRID, "m/s^2", "Change of u due to implicit vertical mixing"
),
"dv_mix": Variable(
"Change of v by vertical mixing", V_GRID, "m/s^2", "Change of v due to implicit vertical mixing"
),
"du_adv": Variable("Change of u by advection", U_GRID, "m/s^2", "Change of u due to advection"),
"dv_adv": Variable("Change of v by advection", V_GRID, "m/s^2", "Change of v due to advection"),
"p_hydro": Variable("Hydrostatic pressure", T_GRID, "m^2/s^2", "Hydrostatic pressure"),
"kappaM": Variable("Vertical viscosity", T_GRID, "m^2/s", "Vertical viscosity"),
"kappaH": Variable("Vertical diffusivity", W_GRID, "m^2/s", "Vertical diffusivity"),
"surface_taux": Variable(
"Surface wind stress",
U_HOR,
"N/m^2",
"Zonal surface wind stress",
),
"surface_tauy": Variable(
"Surface wind stress",
V_HOR,
"N/m^2",
"Meridional surface wind stress",
),
"forc_rho_surface": Variable("Surface density flux", T_HOR, "kg / (m^2 s)", "Surface potential density flux"),
"psi": Variable(
"Streamfunction",
ZETA_HOR + TIMESTEPS,
"m^3/s",
"Barotropic streamfunction",
write_to_restart=True,
mask=ZETA_HOR_ERODED,
),
"dpsi": Variable(
"Streamfunction tendency", ZETA_HOR + TIMESTEPS, "m^3/s^2", "Streamfunction tendency", write_to_restart=True
),
"land_map": Variable("Land map", T_HOR, "", "Land map", dtype="int32"),
"isle": Variable("Island number", ISLE, "", "Island number"),
"psin": Variable(
"Boundary streamfunction",
ZETA_HOR + ISLE,
"m^3/s",
"Boundary streamfunction",
time_dependent=False,
mask=ZETA_HOR_ERODED,
),
"dpsin": Variable(
"Boundary streamfunction factor",
ISLE + TIMESTEPS,
"m^3/s^2",
"Boundary streamfunction factor",
write_to_restart=True,
),
"line_psin": Variable(
"Boundary line integrals", ISLE + ISLE, "m^4/s^2", "Boundary line integrals", time_dependent=False
),
"boundary_mask": Variable("Boundary mask", T_HOR + ISLE, "", "Boundary mask", time_dependent=False, dtype="bool"),
"line_dir_south_mask": Variable(
"Line integral mask", T_HOR + ISLE, "", "Line integral mask", time_dependent=False, dtype="bool"
),
"line_dir_north_mask": Variable(
"Line integral mask", T_HOR + ISLE, "", "Line integral mask", time_dependent=False, dtype="bool"
),
"line_dir_east_mask": Variable(
"Line integral mask", T_HOR + ISLE, "", "Line integral mask", time_dependent=False, dtype="bool"
),
"line_dir_west_mask": Variable(
"Line integral mask", T_HOR + ISLE, "", "Line integral mask", time_dependent=False, dtype="bool"
),
"K_gm": Variable("Skewness diffusivity", W_GRID, "m^2/s", "GM diffusivity, either constant or from EKE model"),
"K_iso": Variable("Isopycnal diffusivity", W_GRID, "m^2/s", "Along-isopycnal diffusivity"),
"K_diss_v": Variable(
"Dissipation of kinetic Energy",
W_GRID,
"m^2/s^3",
"Kinetic energy dissipation by vertical, rayleigh and bottom friction",
write_to_restart=True,
),
"K_diss_bot": Variable(
"Dissipation of kinetic Energy", W_GRID, "m^2/s^3", "Mean energy dissipation by bottom and rayleigh friction"
),
"K_diss_h": Variable(
"Dissipation of kinetic Energy", W_GRID, "m^2/s^3", "Kinetic energy dissipation by horizontal friction"
),
"K_diss_gm": Variable(
"Dissipation of mean energy",
W_GRID,
"m^2/s^3",
"Mean energy dissipation by GM (TRM formalism only)",
),
"P_diss_v": Variable(
"Dissipation of potential Energy", W_GRID, "m^2/s^3", "Potential energy dissipation by vertical diffusion"
),
"P_diss_nonlin": Variable(
"Dissipation of potential Energy",
W_GRID,
"m^2/s^3",
"Potential energy dissipation by nonlinear equation of state",
),
"P_diss_iso": Variable(
"Dissipation of potential Energy", W_GRID, "m^2/s^3", "Potential energy dissipation by isopycnal mixing"
),
"P_diss_skew": Variable(
"Dissipation of potential Energy", W_GRID, "m^2/s^3", "Potential energy dissipation by GM (w/o TRM)"
),
"P_diss_hmix": Variable(
"Dissipation of potential Energy", W_GRID, "m^2/s^3", "Potential energy dissipation by horizontal mixing"
),
"P_diss_adv": Variable(
"Dissipation of potential Energy", W_GRID, "m^2/s^3", "Potential energy dissipation by advection"
),
"P_diss_sources": Variable(
"Dissipation of potential Energy",
W_GRID,
"m^2/s^3",
"Potential energy dissipation by external sources (e.g. restoring zones)",
),
"u_wgrid": Variable("U on W grid", W_GRID, "m/s", "Zonal velocity interpolated to W grid points"),
"v_wgrid": Variable("V on W grid", W_GRID, "m/s", "Meridional velocity interpolated to W grid points"),
"w_wgrid": Variable("W on W grid", W_GRID, "m/s", "Vertical velocity interpolated to W grid points"),
"xt": Variable(
"Zonal coordinate (T)",
XT,
lambda settings: "degrees_east" if settings.coord_degree else "km",
"Zonal (x) coordinate of T-grid point",
time_dependent=False,
scale=lambda settings: 1 if settings.coord_degree else 1e-3,
),
"xu": Variable(
"Zonal coordinate (U)",
XU,
lambda settings: "degrees_east" if settings.coord_degree else "km",
"Zonal (x) coordinate of U-grid point",
time_dependent=False,
scale=lambda settings: 1 if settings.coord_degree else 1e-3,
),
"yt": Variable(
"Meridional coordinate (T)",
YT,
lambda settings: "degrees_north" if settings.coord_degree else "km",
"Meridional (y) coordinate of T-grid point",
time_dependent=False,
scale=lambda settings: 1 if settings.coord_degree else 1e-3,
),
"yu": Variable(
"Meridional coordinate (U)",
YU,
lambda settings: "degrees_north" if settings.coord_degree else "km",
"Meridional (y) coordinate of U-grid point",
time_dependent=False,
scale=lambda settings: 1 if settings.coord_degree else 1e-3,
),
"temp_source": Variable(
"Source of temperature",
T_GRID,
"K/s",
"Non-conservative source of temperature",
active=lambda settings: settings.enable_tempsalt_sources,
),
"salt_source": Variable(
"Source of salt",
T_GRID,
"g/(kg s)",
"Non-conservative source of salt",
active=lambda settings: settings.enable_tempsalt_sources,
),
"u_source": Variable(
"Source of zonal velocity",
U_GRID,
"m/s^2",
"Non-conservative source of zonal velocity",
active=lambda settings: settings.enable_momentum_sources,
),
"v_source": Variable(
"Source of meridional velocity",
V_GRID,
"m/s^2",
"Non-conservative source of meridional velocity",
active=lambda settings: settings.enable_momentum_sources,
),
"K_11": Variable(
"Isopycnal mixing coefficient",
T_GRID,
"m^2/s",
"Isopycnal mixing tensor component",
active=lambda settings: settings.enable_neutral_diffusion,
),
"K_22": Variable(
"Isopycnal mixing coefficient",
T_GRID,
"m^2/s",
"Isopycnal mixing tensor component",
active=lambda settings: settings.enable_neutral_diffusion,
),
"K_33": Variable(
"Isopycnal mixing coefficient",
T_GRID,
"m^2/s",
"Isopycnal mixing tensor component",
active=lambda settings: settings.enable_neutral_diffusion,
),
"Ai_ez": Variable(
"Isopycnal diffusion coefficient",
T_GRID + TENSOR_COMP,
"Vertical isopycnal diffusion coefficient on eastern face of T cell",
"1",
active=lambda settings: settings.enable_neutral_diffusion,
),
"Ai_nz": Variable(
"Isopycnal diffusion coefficient",
T_GRID + TENSOR_COMP,
"Vertical isopycnal diffusion coefficient on northern face of T cell",
"1",
active=lambda settings: settings.enable_neutral_diffusion,
),
"Ai_bx": Variable(
"Isopycnal diffusion coefficient",
T_GRID + TENSOR_COMP,
"Zonal isopycnal diffusion coefficient on bottom face of T cell",
"1",
active=lambda settings: settings.enable_neutral_diffusion,
),
"Ai_by": Variable(
"Isopycnal diffusion coefficient",
T_GRID + TENSOR_COMP,
"Meridional isopycnal diffusion coefficient on bottom face of T cell",
"1",
active=lambda settings: settings.enable_neutral_diffusion,
),
"B1_gm": Variable(
"Zonal component of GM streamfunction",
V_GRID,
"m^2/s",
"Zonal component of GM streamfunction",
active=lambda settings: settings.enable_skew_diffusion,
),
"B2_gm": Variable(
"Meridional component of GM streamfunction",
U_GRID,
"m^2/s",
"Meridional component of GM streamfunction",
active=lambda settings: settings.enable_skew_diffusion,
),
"r_bot_var_u": Variable(
"Bottom friction coeff.",
U_HOR,
"1/s",
"Zonal bottom friction coefficient",
active=lambda settings: settings.enable_bottom_friction_var,
),
"r_bot_var_v": Variable(
"Bottom friction coeff.",
V_HOR,
"1/s",
"Meridional bottom friction coefficient",
active=lambda settings: settings.enable_bottom_friction_var,
),
"kappa_gm": Variable(
"Vertical diffusivity",
W_GRID,
"m^2/s",
"Vertical diffusivity",
active=lambda settings: settings.enable_TEM_friction,
),
"tke": Variable(
"Turbulent kinetic energy",
W_GRID + TIMESTEPS,
"m^2/s^2",
"Turbulent kinetic energy",
write_to_restart=True,
active=lambda settings: settings.enable_tke,
),
"sqrttke": Variable(
"Square-root of TKE",
W_GRID,
"m/s",
"Square-root of TKE",
active=lambda settings: settings.enable_tke,
),
"dtke": Variable(
"Turbulent kinetic energy tendency",
W_GRID + TIMESTEPS,
"m^2/s^3",
"Turbulent kinetic energy tendency",
write_to_restart=True,
active=lambda settings: settings.enable_tke,
),
"Prandtlnumber": Variable(
"Prandtl number",
W_GRID,
"",
"Prandtl number",
active=lambda settings: settings.enable_tke,
),
"mxl": Variable(
"Mixing length",
W_GRID,
"m",
"Mixing length",
active=lambda settings: settings.enable_tke,
),
"forc_tke_surface": Variable(
"TKE surface flux",
T_HOR,
"m^3/s^3",
"TKE surface flux",
active=lambda settings: settings.enable_tke,
),
"tke_diss": Variable(
"TKE dissipation",
W_GRID,
"m^2/s^3",
"TKE dissipation",
active=lambda settings: settings.enable_tke,
),
"tke_surf_corr": Variable(
"Correction of TKE surface flux",
T_HOR,
"m^3/s^3",
"Correction of TKE surface flux",
active=lambda settings: settings.enable_tke,
),
"eke": Variable(
"meso-scale energy",
W_GRID + TIMESTEPS,
"m^2/s^2",
"meso-scale energy",
write_to_restart=True,
active=lambda settings: settings.enable_eke,
),
"deke": Variable(
"meso-scale energy tendency",
W_GRID + TIMESTEPS,
"m^2/s^3",
"meso-scale energy tendency",
write_to_restart=True,
active=lambda settings: settings.enable_eke,
),
"sqrteke": Variable(
"square-root of eke",
W_GRID,
"m/s",
"square-root of eke",
active=lambda settings: settings.enable_eke,
),
"L_rossby": Variable(
"Rossby radius",
T_HOR,
"m",
"Rossby radius",
active=lambda settings: settings.enable_eke,
),
"L_rhines": Variable(
"Rhines scale",
W_GRID,
"m",
"Rhines scale",
active=lambda settings: settings.enable_eke,
),
"eke_len": Variable(
"Eddy length scale",
T_GRID,
"m",
"Eddy length scale",
active=lambda settings: settings.enable_eke,
),
"eke_diss_iw": Variable(
"Dissipation of EKE to IW",
W_GRID,
"m^2/s^3",
"Dissipation of EKE to internal waves",
active=lambda settings: settings.enable_eke,
),
"eke_diss_tke": Variable(
"Dissipation of EKE to TKE",
W_GRID,
"m^2/s^3",
"Dissipation of EKE to TKE",
active=lambda settings: settings.enable_eke,
),
"E_iw": Variable(
"Internal wave energy",
W_GRID + TIMESTEPS,
"m^2/s^2",
"Internal wave energy",
write_to_restart=True,
active=lambda settings: settings.enable_idemix,
),
"dE_iw": Variable(
"Internal wave energy tendency",
W_GRID + TIMESTEPS,
"m^2/s^2",
"Internal wave energy tendency",
write_to_restart=True,
active=lambda settings: settings.enable_idemix,
),
"c0": Variable(
"Vertical IW group velocity",
W_GRID,
"m/s",
"Vertical internal wave group velocity",
active=lambda settings: settings.enable_idemix,
),
"v0": Variable(
"Horizontal IW group velocity",
W_GRID,
"m/s",
"Horizontal internal wave group velocity",
active=lambda settings: settings.enable_idemix,
),
"alpha_c": Variable(
"?",
W_GRID,
"?",
"?",
active=lambda settings: settings.enable_idemix,
),
"iw_diss": Variable(
"IW dissipation",
W_GRID,
"m^2/s^3",
"Internal wave | |
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.Name is not None:
showIndent(outfile, level)
outfile.write('Name=%s,\n' % quote_python(self.Name).encode(ExternalEncoding))
if self.Physical_Address is not None:
showIndent(outfile, level)
outfile.write('Physical_Address=%s,\n' % quote_python(self.Physical_Address).encode(ExternalEncoding))
if self.Virtual_Address is not None:
showIndent(outfile, level)
outfile.write('Virtual_Address=%s,\n' % quote_python(self.Virtual_Address).encode(ExternalEncoding))
if self.Size_Of_Raw_Data is not None:
showIndent(outfile, level)
outfile.write('Size_Of_Raw_Data=%s,\n' % quote_python(self.Size_Of_Raw_Data).encode(ExternalEncoding))
if self.Pointer_To_Raw_Data is not None:
showIndent(outfile, level)
outfile.write('Pointer_To_Raw_Data=%s,\n' % quote_python(self.Pointer_To_Raw_Data).encode(ExternalEncoding))
if self.Pointer_To_Relocations is not None:
showIndent(outfile, level)
outfile.write('Pointer_To_Relocations=%s,\n' % quote_python(self.Pointer_To_Relocations).encode(ExternalEncoding))
if self.Pointer_To_Linenumbers is not None:
showIndent(outfile, level)
outfile.write('Pointer_To_Linenumbers=%s,\n' % quote_python(self.Pointer_To_Linenumbers).encode(ExternalEncoding))
if self.Number_Of_Relocations is not None:
showIndent(outfile, level)
outfile.write('Number_Of_Relocations=%s,\n' % quote_python(self.Number_Of_Relocations).encode(ExternalEncoding))
if self.Number_Of_Linenumbers is not None:
showIndent(outfile, level)
outfile.write('Number_Of_Linenumbers=%s,\n' % quote_python(self.Number_Of_Linenumbers).encode(ExternalEncoding))
if self.Characteristics is not None:
showIndent(outfile, level)
outfile.write('Characteristics=%s,\n' % quote_python(self.Characteristics).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Name':
obj_ = common.StringObjectAttributeType.factory()
obj_.build(child_)
self.set_Name(obj_)
elif nodeName_ == 'Physical_Address':
obj_ = common.StringObjectAttributeType.factory()
obj_.build(child_)
self.set_Physical_Address(obj_)
elif nodeName_ == 'Virtual_Address':
Virtual_Address_ = child_.text
Virtual_Address_ = self.gds_validate_string(Virtual_Address_, node, 'Virtual_Address')
self.Virtual_Address = Virtual_Address_
elif nodeName_ == 'Size_Of_Raw_Data':
Size_Of_Raw_Data_ = child_.text
Size_Of_Raw_Data_ = self.gds_validate_string(Size_Of_Raw_Data_, node, 'Size_Of_Raw_Data')
self.Size_Of_Raw_Data = Size_Of_Raw_Data_
elif nodeName_ == 'Pointer_To_Raw_Data':
Pointer_To_Raw_Data_ = child_.text
Pointer_To_Raw_Data_ = self.gds_validate_string(Pointer_To_Raw_Data_, node, 'Pointer_To_Raw_Data')
self.Pointer_To_Raw_Data = Pointer_To_Raw_Data_
elif nodeName_ == 'Pointer_To_Relocations':
Pointer_To_Relocations_ = child_.text
Pointer_To_Relocations_ = self.gds_validate_string(Pointer_To_Relocations_, node, 'Pointer_To_Relocations')
self.Pointer_To_Relocations = Pointer_To_Relocations_
elif nodeName_ == 'Pointer_To_Linenumbers':
Pointer_To_Linenumbers_ = child_.text
Pointer_To_Linenumbers_ = self.gds_validate_string(Pointer_To_Linenumbers_, node, 'Pointer_To_Linenumbers')
self.Pointer_To_Linenumbers = Pointer_To_Linenumbers_
elif nodeName_ == 'Number_Of_Relocations':
Number_Of_Relocations_ = child_.text
Number_Of_Relocations_ = self.gds_validate_string(Number_Of_Relocations_, node, 'Number_Of_Relocations')
self.Number_Of_Relocations = Number_Of_Relocations_
elif nodeName_ == 'Number_Of_Linenumbers':
Number_Of_Linenumbers_ = child_.text
Number_Of_Linenumbers_ = self.gds_validate_string(Number_Of_Linenumbers_, node, 'Number_Of_Linenumbers')
self.Number_Of_Linenumbers = Number_Of_Linenumbers_
elif nodeName_ == 'Characteristics':
Characteristics_ = common.HexBinaryObjectAttributeType.factory()
Characteristics_.build(child_)
self.set_Characteristics(Characteristics_)
# end class PESectionHeaderStructType
class DOSHeaderType(GeneratedsSuper):
"""The DOSHeaderType type is a container for the attributes present in
the _IMAGE_DOS_HEADER structure, which can be found in Winnt.h
and pe.h. See http://www.csn.ul.ie/~caolan/pub/winresdump/winres
dump/doc/pefile.html for more information about the winnt.h
file, and http://www.tavi.co.uk/phobos/exeformat.html for even
more clarification."""
subclass = None
superclass = None
def __init__(self, e_magic=None, e_cblp=None, e_cp=None, e_crlc=None, e_cparhdr=None, e_minalloc=None, e_maxalloc=None, e_ss=None, e_sp=None, e_csum=None, e_ip=None, e_cs=None, e_lfarlc=None, e_ovro=None, reserved1=None, e_oemid=None, e_oeminfo=None, reserved2=None, e_lfanew=None, Hashes=None):
self.e_magic = e_magic
self.e_cblp = e_cblp
self.e_cp = e_cp
self.e_crlc = e_crlc
self.e_cparhdr = e_cparhdr
self.e_minalloc = e_minalloc
self.e_maxalloc = e_maxalloc
self.e_ss = e_ss
self.e_sp = e_sp
self.e_csum = e_csum
self.e_ip = e_ip
self.e_cs = e_cs
self.e_lfarlc = e_lfarlc
self.e_ovro = e_ovro
if reserved1 is None:
self.reserved1 = []
else:
self.reserved1 = reserved1
self.e_oemid = e_oemid
self.e_oeminfo = e_oeminfo
self.reserved2 = reserved2
self.e_lfanew = e_lfanew
self.Hashes = Hashes
def factory(*args_, **kwargs_):
if DOSHeaderType.subclass:
return DOSHeaderType.subclass(*args_, **kwargs_)
else:
return DOSHeaderType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_e_magic(self): return self.e_magic
def set_e_magic(self, e_magic): self.e_magic = e_magic
def get_e_cblp(self): return self.e_cblp
def set_e_cblp(self, e_cblp): self.e_cblp = e_cblp
def get_e_cp(self): return self.e_cp
def set_e_cp(self, e_cp): self.e_cp = e_cp
def get_e_crlc(self): return self.e_crlc
def set_e_crlc(self, e_crlc): self.e_crlc = e_crlc
def get_e_cparhdr(self): return self.e_cparhdr
def set_e_cparhdr(self, e_cparhdr): self.e_cparhdr = e_cparhdr
def get_e_minalloc(self): return self.e_minalloc
def set_e_minalloc(self, e_minalloc): self.e_minalloc = e_minalloc
def get_e_maxalloc(self): return self.e_maxalloc
def set_e_maxalloc(self, e_maxalloc): self.e_maxalloc = e_maxalloc
def get_e_ss(self): return self.e_ss
def set_e_ss(self, e_ss): self.e_ss = e_ss
def get_e_sp(self): return self.e_sp
def set_e_sp(self, e_sp): self.e_sp = e_sp
def get_e_csum(self): return self.e_csum
def set_e_csum(self, e_csum): self.e_csum = e_csum
def get_e_ip(self): return self.e_ip
def set_e_ip(self, e_ip): self.e_ip = e_ip
def get_e_cs(self): return self.e_cs
def set_e_cs(self, e_cs): self.e_cs = e_cs
def get_e_lfarlc(self): return self.e_lfarlc
def set_e_lfarlc(self, e_lfarlc): self.e_lfarlc = e_lfarlc
def get_e_ovro(self): return self.e_ovro
def set_e_ovro(self, e_ovro): self.e_ovro = e_ovro
def get_reserved1(self): return self.reserved1
def set_reserved1(self, reserved1): self.reserved1 = reserved1
def add_reserved1(self, value): self.reserved1.append(value)
def insert_reserved1(self, index, value): self.reserved1[index] = value
def get_e_oemid(self): return self.e_oemid
def set_e_oemid(self, e_oemid): self.e_oemid = e_oemid
def get_e_oeminfo(self): return self.e_oeminfo
def set_e_oeminfo(self, e_oeminfo): self.e_oeminfo = e_oeminfo
def get_reserved2(self): return self.reserved2
def set_reserved2(self, reserved2): self.reserved2 = reserved2
def get_e_lfanew(self): return self.e_lfanew
def set_e_lfanew(self, e_lfanew): self.e_lfanew = e_lfanew
def get_Hashes(self): return self.Hashes
def set_Hashes(self, Hashes): self.Hashes = Hashes
def export(self, outfile, level, namespace_='WinExecutableFileObj:', name_='DOSHeaderType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='DOSHeaderType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='WinExecutableFileObj:', name_='DOSHeaderType'):
pass
def exportChildren(self, outfile, level, namespace_='WinExecutableFileObj:', name_='DOSHeaderType', fromsubclass_=False):
if self.e_magic is not None:
self.e_magic.export(outfile, level, namespace_, name_='e_magic')
if self.e_cblp is not None:
self.e_cblp.export(outfile, level, namespace_, name_='e_cblp')
if self.e_cp is not None:
self.e_cp.export(outfile, level, namespace_, name_='e_cp')
if self.e_crlc is not None:
self.e_crlc.export(outfile, level, namespace_, name_='e_crlc')
if self.e_cparhdr is not None:
self.e_cparhdr.export(outfile, level, namespace_, name_='e_cparhdr')
if self.e_minalloc is not None:
self.e_minalloc.export(outfile, level, namespace_, name_='e_minalloc')
if self.e_maxalloc is not None:
self.e_maxalloc.export(outfile, level, namespace_, name_='e_maxalloc')
if self.e_ss is not None:
self.e_ss.export(outfile, level, namespace_, name_='e_ss')
if self.e_sp is not None:
self.e_sp.export(outfile, level, namespace_, name_='e_sp')
if self.e_csum is not None:
self.e_csum.export(outfile, level, namespace_, name_='e_csum')
if self.e_ip is not None:
self.e_ip.export(outfile, level, namespace_, name_='e_ip')
if self.e_cs is not None:
self.e_cs.export(outfile, level, namespace_, name_='e_cs')
if self.e_lfarlc is not None:
self.e_lfarlc.export(outfile, level, namespace_, name_='e_lfarlc')
if self.e_ovro is not None:
self.e_ovro.export(outfile, level, namespace_, name_='e_ovro')
for reserved1_ in self.reserved1:
reserved1_.export(outfile, level, namespace_, name_='reserved1')
if self.e_oemid is not None:
self.e_oemid.export(outfile, level, namespace_, name_='e_oemid')
if self.e_oeminfo is not None:
self.e_oeminfo.export(outfile, level, namespace_, name_='Driver_Init')
if self.reserved2 is not None:
self.reserved2.export(outfile, level, namespace_, name_='reserved2')
if self.e_lfanew is not None:
self.e_lfanew.export(outfile, level, namespace_, name_='e_lfanew')
if self.Hashes is not None:
self.Hashes.export(outfile, level, namespace_, name_='Hashes')
def hasContent_(self):
if (
self.e_magic is not None or
self.e_cblp is not None or
self.e_cp is not None or
self.e_crlc is not None or
self.e_cparhdr is not None or
self.e_minalloc is not None or
self.e_maxalloc is not None or
self.e_ss is not None or
self.e_sp is not None or
self.e_csum is not None or
self.e_ip is not None or
self.e_cs is not None or
self.e_lfarlc is not None or
self.e_ovro is not None or
self.reserved1 or
self.e_oemid is not None or
self.e_oeminfo is not None or
self.reserved2 is not None or
self.e_lfanew is not None or
self.Hashes is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DOSHeaderType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.e_magic is not None:
showIndent(outfile, level)
outfile.write('e_magic=%s,\n' % quote_python(self.e_magic).encode(ExternalEncoding))
if self.e_cblp is not None:
showIndent(outfile, level)
outfile.write('e_cblp=%s,\n' % quote_python(self.e_cblp).encode(ExternalEncoding))
if self.e_cp is not None:
showIndent(outfile, level)
outfile.write('e_cp=%s,\n' % quote_python(self.e_cp).encode(ExternalEncoding))
if self.e_crlc is not None:
showIndent(outfile, level)
outfile.write('e_crlc=%s,\n' % quote_python(self.e_crlc).encode(ExternalEncoding))
if self.e_cparhdr is not None:
showIndent(outfile, level)
outfile.write('e_cparhdr=%s,\n' % quote_python(self.e_cparhdr).encode(ExternalEncoding))
if self.e_minalloc is not None:
showIndent(outfile, level)
outfile.write('e_minalloc=%s,\n' % quote_python(self.e_minalloc).encode(ExternalEncoding))
if self.e_maxalloc is not None:
showIndent(outfile, level)
outfile.write('e_maxalloc=%s,\n' % quote_python(self.e_maxalloc).encode(ExternalEncoding))
if self.e_ss is not None:
showIndent(outfile, level)
outfile.write('e_ss=%s,\n' % quote_python(self.e_ss).encode(ExternalEncoding))
if self.e_sp is not None:
showIndent(outfile, level)
outfile.write('e_sp=%s,\n' % quote_python(self.e_sp).encode(ExternalEncoding))
if self.e_csum is not None:
showIndent(outfile, level)
outfile.write('e_csum=%s,\n' % quote_python(self.e_csum).encode(ExternalEncoding))
if self.e_ip is not None:
showIndent(outfile, level)
outfile.write('e_ip=%s,\n' % quote_python(self.e_ip).encode(ExternalEncoding))
if self.e_cs is not None:
showIndent(outfile, level)
outfile.write('e_cs=%s,\n' % quote_python(self.e_cs).encode(ExternalEncoding))
if self.e_lfarlc is not None:
showIndent(outfile, level)
outfile.write('e_lfarlc=%s,\n' % quote_python(self.e_lfarlc).encode(ExternalEncoding))
if self.e_ovro is not None:
showIndent(outfile, level)
outfile.write('e_ovro=%s,\n' % quote_python(self.e_ovro).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('reserved1=[\n')
level += 1
for reserved1_ in self.reserved1:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(reserved1_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.e_oemid is not None:
showIndent(outfile, level)
outfile.write('e_oemid=%s,\n' % quote_python(self.e_oemid).encode(ExternalEncoding))
if self.e_oeminfo is not None:
showIndent(outfile, level)
outfile.write('e_oeminfo=%s,\n' % quote_python(self.e_oeminfo).encode(ExternalEncoding))
if self.reserved2 is not None:
showIndent(outfile, level)
outfile.write('reserved2=%s,\n' % quote_python(self.reserved2).encode(ExternalEncoding))
if self.e_lfanew is not None:
showIndent(outfile, level)
outfile.write('e_lfanew=%s,\n' % quote_python(self.e_lfanew).encode(ExternalEncoding))
if self.Hashes is not None:
showIndent(outfile, level)
outfile.write('Hashes=%s,\n' % quote_python(self.Hashes).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'e_magic':
e_magic_ = child_.text
e_magic_ = self.gds_validate_string(e_magic_, node, 'e_magic')
self.e_magic = e_magic_
elif nodeName_ | |
(3759*mckin**10)/mbkin**10)*q_cut**5)/mbkin**10 +
((-1710 + (271*mckin**2)/mbkin**2 + (11609*mckin**4)/mbkin**4 +
(16225*mckin**6)/mbkin**6 + (2709*mckin**8)/mbkin**8)*q_cut**6)/
mbkin**12 - ((-465 + (689*mckin**2)/mbkin**2 + (3797*mckin**4)/
mbkin**4 + (939*mckin**6)/mbkin**6)*q_cut**7)/mbkin**14 +
((-63 + (115*mckin**2)/mbkin**2 + (138*mckin**4)/mbkin**4)*q_cut**8)/
mbkin**16 - (4*(2 + (5*mckin**2)/mbkin**2)*q_cut**9)/mbkin**18 +
(8*q_cut**10)/mbkin**20)*rE - ((-1 + mckin**2/mbkin**2)**2 -
(2*(mbkin**2 + mckin**2)*q_cut)/mbkin**4 + q_cut**2/mbkin**4)*
(-4*(-((-1 + mckin**2/mbkin**2)**2*(-731 + (4718*mckin**2)/mbkin**2 -
(4699*mckin**4)/mbkin**4 - (3384*mckin**6)/mbkin**6 -
(2189*mckin**8)/mbkin**8 + (1882*mckin**10)/mbkin**10 +
(83*mckin**12)/mbkin**12)) + ((-2179 + (9001*mckin**2)/
mbkin**2 - (4197*mckin**4)/mbkin**4 - (9313*mckin**6)/
mbkin**6 - (10973*mckin**8)/mbkin**8 - (5289*mckin**10)/
mbkin**10 + (5381*mckin**12)/mbkin**12 + (289*mckin**14)/
mbkin**14)*q_cut)/mbkin**2 - (2*(-694 + (1014*mckin**2)/
mbkin**2 - (1881*mckin**4)/mbkin**4 - (304*mckin**6)/
mbkin**6 + (612*mckin**8)/mbkin**8 + (1866*mckin**10)/
mbkin**10 + (107*mckin**12)/mbkin**12)*q_cut**2)/mbkin**4 -
(2*(-856 - (348*mckin**2)/mbkin**2 + (691*mckin**4)/mbkin**4 +
(2447*mckin**6)/mbkin**6 + (2013*mckin**8)/mbkin**8 +
(181*mckin**10)/mbkin**10)*q_cut**3)/mbkin**6 +
((-2641 - (1196*mckin**2)/mbkin**2 + (6054*mckin**4)/mbkin**4 +
(6404*mckin**6)/mbkin**6 + (715*mckin**8)/mbkin**8)*q_cut**4)/
mbkin**8 - ((-1103 + (405*mckin**2)/mbkin**2 + (2415*mckin**4)/
mbkin**4 + (427*mckin**6)/mbkin**6)*q_cut**5)/mbkin**10 +
(4*(-28 + (27*mckin**2)/mbkin**2 + (19*mckin**4)/mbkin**4)*q_cut**6)/
mbkin**12 + (4*(-1 + mckin**2/mbkin**2)*q_cut**7)/mbkin**14 +
(2*q_cut**8)/mbkin**16)*rG - 4*(-((-1 + mckin**2/mbkin**2)**2*
(351 - (1796*mckin**2)/mbkin**2 - (5235*mckin**4)/mbkin**4 -
(5200*mckin**6)/mbkin**6 - (7135*mckin**8)/mbkin**8 +
(4404*mckin**10)/mbkin**10 + (211*mckin**12)/mbkin**12)) +
((909 - (1687*mckin**2)/mbkin**2 - (18237*mckin**4)/mbkin**4 -
(23505*mckin**6)/mbkin**6 - (8845*mckin**8)/mbkin**8 -
(19449*mckin**10)/mbkin**10 + (12477*mckin**12)/mbkin**12 +
(737*mckin**14)/mbkin**14)*q_cut)/mbkin**2 -
(6*(57 + (175*mckin**2)/mbkin**2 - (475*mckin**4)/mbkin**4 +
(270*mckin**6)/mbkin**6 - (725*mckin**8)/mbkin**8 +
(1403*mckin**10)/mbkin**10 + (95*mckin**12)/mbkin**12)*q_cut**2)/
mbkin**4 - (2*(390 + (1352*mckin**2)/mbkin**2 + (3467*mckin**4)/
mbkin**4 + (6483*mckin**6)/mbkin**6 + (5015*mckin**8)/
mbkin**8 + (413*mckin**10)/mbkin**10)*q_cut**3)/mbkin**6 +
((561 + (4034*mckin**2)/mbkin**2 + (13860*mckin**4)/mbkin**4 +
(15570*mckin**6)/mbkin**6 + (1655*mckin**8)/mbkin**8)*q_cut**4)/
mbkin**8 - (3*(-49 + (463*mckin**2)/mbkin**2 + (1961*mckin**4)/
mbkin**4 + (313*mckin**6)/mbkin**6)*q_cut**5)/mbkin**10 +
(2*(-69 + (139*mckin**2)/mbkin**2 + (64*mckin**4)/mbkin**4)*q_cut**6)/
mbkin**12 + (4*(-3 + (5*mckin**2)/mbkin**2)*q_cut**7)/mbkin**14 +
(6*q_cut**8)/mbkin**16)*sB - 1968*sE + (14432*mckin**2*sE)/mbkin**
2 - (26560*mckin**4*sE)/mbkin**4 + (35616*mckin**6*sE)/mbkin**6 -
(28000*mckin**8*sE)/mbkin**8 - (15712*mckin**10*sE)/mbkin**10 +
(32448*mckin**12*sE)/mbkin**12 - (9760*mckin**14*sE)/mbkin**14 -
(496*mckin**16*sE)/mbkin**16 + (5760*q_cut*sE)/mbkin**2 -
(18112*mckin**2*q_cut*sE)/mbkin**4 - (21504*mckin**4*q_cut*sE)/mbkin**6 -
(21120*mckin**6*q_cut*sE)/mbkin**8 - (44800*mckin**8*q_cut*sE)/mbkin**
10 - (25920*mckin**10*q_cut*sE)/mbkin**12 + (31872*mckin**12*q_cut*
sE)/mbkin**14 + (1664*mckin**14*q_cut*sE)/mbkin**16 -
(3424*q_cut**2*sE)/mbkin**4 + (2160*mckin**2*q_cut**2*sE)/mbkin**6 +
(1488*mckin**4*q_cut**2*sE)/mbkin**8 + (12992*mckin**6*q_cut**2*sE)/mbkin**
10 - (28704*mckin**8*q_cut**2*sE)/mbkin**12 - (25776*mckin**10*q_cut**2*
sE)/mbkin**14 - (976*mckin**12*q_cut**2*sE)/mbkin**16 -
(4656*q_cut**3*sE)/mbkin**6 - (7456*mckin**2*q_cut**3*sE)/mbkin**8 -
(9952*mckin**4*q_cut**3*sE)/mbkin**10 + (6432*mckin**6*q_cut**3*sE)/mbkin**
12 - (16816*mckin**8*q_cut**3*sE)/mbkin**14 - (2752*mckin**10*q_cut**3*
sE)/mbkin**16 + (6336*q_cut**4*sE)/mbkin**8 + (11504*mckin**2*q_cut**4*
sE)/mbkin**10 + (18864*mckin**4*q_cut**4*sE)/mbkin**12 +
(32784*mckin**6*q_cut**4*sE)/mbkin**14 + (4880*mckin**8*q_cut**4*sE)/mbkin**
16 - (1936*q_cut**5*sE)/mbkin**10 - (2976*mckin**2*q_cut**5*sE)/mbkin**
12 - (12816*mckin**4*q_cut**5*sE)/mbkin**14 - (2944*mckin**6*q_cut**5*
sE)/mbkin**16 - (144*q_cut**6*sE)/mbkin**12 + (512*mckin**2*q_cut**6*
sE)/mbkin**14 + (656*mckin**4*q_cut**6*sE)/mbkin**16 -
(64*mckin**2*q_cut**7*sE)/mbkin**16 + (32*q_cut**8*sE)/mbkin**16 -
57*sqB + (338*mckin**2*sqB)/mbkin**2 + (5756*mckin**4*sqB)/mbkin**
4 - (10002*mckin**6*sqB)/mbkin**6 + (3590*mckin**8*sqB)/mbkin**
8 - (2098*mckin**10*sqB)/mbkin**10 + (3012*mckin**12*sqB)/mbkin**
12 - (526*mckin**14*sqB)/mbkin**14 - (13*mckin**16*sqB)/mbkin**
16 + (75*q_cut*sqB)/mbkin**2 + (713*mckin**2*q_cut*sqB)/mbkin**4 -
(13683*mckin**4*q_cut*sqB)/mbkin**6 - (18057*mckin**6*q_cut*sqB)/mbkin**
8 - (7867*mckin**8*q_cut*sqB)/mbkin**10 - (3177*mckin**10*q_cut*sqB)/
mbkin**12 + (1635*mckin**12*q_cut*sqB)/mbkin**14 + (41*mckin**14*q_cut*
sqB)/mbkin**16 + (116*q_cut**2*sqB)/mbkin**4 - (1062*mckin**2*q_cut**2*
sqB)/mbkin**6 + (2688*mckin**4*q_cut**2*sqB)/mbkin**8 -
(2668*mckin**6*q_cut**2*sqB)/mbkin**10 - (1236*mckin**8*q_cut**2*sqB)/
mbkin**12 - (1182*mckin**10*q_cut**2*sqB)/mbkin**14 -
(16*mckin**12*q_cut**2*sqB)/mbkin**16 - (138*q_cut**3*sqB)/mbkin**6 -
(868*mckin**2*q_cut**3*sqB)/mbkin**8 + (1346*mckin**4*q_cut**3*sqB)/mbkin**
10 - (18*mckin**6*q_cut**3*sqB)/mbkin**12 - (1024*mckin**8*q_cut**3*sqB)/
mbkin**14 - (82*mckin**10*q_cut**3*sqB)/mbkin**16 - (171*q_cut**4*sqB)/
mbkin**8 + (1028*mckin**2*q_cut**4*sqB)/mbkin**10 + (1686*mckin**4*
q_cut**4*sqB)/mbkin**12 + (1716*mckin**6*q_cut**4*sqB)/mbkin**14 +
(125*mckin**8*q_cut**4*sqB)/mbkin**16 + (251*q_cut**5*sqB)/mbkin**10 -
(201*mckin**2*q_cut**5*sqB)/mbkin**12 - (663*mckin**4*q_cut**5*sqB)/mbkin**
14 - (67*mckin**6*q_cut**5*sqB)/mbkin**16 - (66*q_cut**6*sqB)/mbkin**
12 + (56*mckin**2*q_cut**6*sqB)/mbkin**14 + (14*mckin**4*q_cut**6*sqB)/
mbkin**16 - (12*q_cut**7*sqB)/mbkin**14 - (4*mckin**2*q_cut**7*sqB)/mbkin**
16 + (2*q_cut**8*sqB)/mbkin**16)))/mbkin**6) -
6*np.sqrt(0j + (mbkin**4 - 2*mbkin**2*mckin**2 + mckin**4 - 2*mbkin**2*q_cut -
2*mckin**2*q_cut + q_cut**2)/mbkin**4)*
(32*(-((-1 + mckin**2/mbkin**2)**4*(-11 + (131*mckin**2)/mbkin**2 +
(343*mckin**4)/mbkin**4 - (4901*mckin**6)/mbkin**6 -
(3931*mckin**8)/mbkin**8 + (4513*mckin**10)/mbkin**10 +
(4201*mckin**12)/mbkin**12 - (3931*mckin**14)/mbkin**14 -
(842*mckin**16)/mbkin**16 + (108*mckin**18)/mbkin**18)) +
((-1 + mckin**2/mbkin**2)**2*(-69 + (686*mckin**2)/mbkin**2 +
(1998*mckin**4)/mbkin**4 - (17384*mckin**6)/mbkin**6 -
(28414*mckin**8)/mbkin**8 - (7896*mckin**10)/mbkin**10 +
(25876*mckin**12)/mbkin**12 + (14624*mckin**14)/mbkin**14 -
(19869*mckin**16)/mbkin**16 - (4814*mckin**18)/mbkin**18 +
(702*mckin**20)/mbkin**20)*q_cut)/mbkin**2 -
(2*(-80 + (683*mckin**2)/mbkin**2 + (1558*mckin**4)/mbkin**4 -
(10930*mckin**6)/mbkin**6 - (12701*mckin**8)/mbkin**8 -
(15107*mckin**10)/mbkin**10 - (14557*mckin**12)/mbkin**12 +
(20503*mckin**14)/mbkin**14 + (17401*mckin**16)/mbkin**16 -
(16868*mckin**18)/mbkin**18 - (5317*mckin**20)/mbkin**20 +
(855*mckin**22)/mbkin**22)*q_cut**2)/mbkin**4 +
(2*(-60 + (337*mckin**2)/mbkin**2 + (688*mckin**4)/mbkin**4 -
(2600*mckin**6)/mbkin**6 - (1620*mckin**8)/mbkin**8 +
(3788*mckin**10)/mbkin**10 + (14332*mckin**12)/mbkin**12 -
(9532*mckin**14)/mbkin**14 - (15876*mckin**16)/mbkin**16 -
(2417*mckin**18)/mbkin**18 + (720*mckin**20)/mbkin**20)*q_cut**3)/
mbkin**6 + (2*(-75 + (292*mckin**2)/mbkin**2 + (3006*mckin**4)/mbkin**
4 + (1155*mckin**6)/mbkin**6 - (7341*mckin**8)/mbkin**8 -
(10120*mckin**10)/mbkin**10 - (1458*mckin**12)/mbkin**12 -
(8299*mckin**14)/mbkin**14 - (3536*mckin**16)/mbkin**16 +
(720*mckin**18)/mbkin**18)*q_cut**4)/mbkin**8 +
((378 - (886*mckin**2)/mbkin**2 - (8966*mckin**4)/mbkin**4 -
(4238*mckin**6)/mbkin**6 + (22414*mckin**8)/mbkin**8 +
(43870*mckin**10)/mbkin**10 + (48834*mckin**12)/mbkin**12 +
(10862*mckin**14)/mbkin**14 - (4284*mckin**16)/mbkin**16)*q_cut**5)/
mbkin**10 + (2*(-126 + (313*mckin**2)/mbkin**2 + (2593*mckin**4)/
mbkin**4 - (837*mckin**6)/mbkin**6 - (12889*mckin**8)/mbkin**8 -
(16866*mckin**10)/mbkin**10 - (3746*mckin**12)/mbkin**12 +
(1638*mckin**14)/mbkin**14)*q_cut**6)/mbkin**12 +
(2*(-30 - (317*mckin**2)/mbkin**2 - (672*mckin**4)/mbkin**4 +
(2136*mckin**6)/mbkin**6 + (6062*mckin**8)/mbkin**8 +
(2953*mckin**10)/mbkin**10 + (180*mckin**12)/mbkin**12)*q_cut**7)/
mbkin**14 - ((-195 - (551*mckin**2)/mbkin**2 + (491*mckin**4)/mbkin**
4 + (3933*mckin**6)/mbkin**6 + (5026*mckin**8)/mbkin**8 +
(2340*mckin**10)/mbkin**10)*q_cut**8)/mbkin**16 +
((-125 - (234*mckin**2)/mbkin**2 + (913*mckin**4)/mbkin**4 +
(2412*mckin**6)/mbkin**6 + (1710*mckin**8)/mbkin**8)*q_cut**9)/
mbkin**18 + ((36 + (36*mckin**2)/mbkin**2 - (446*mckin**4)/mbkin**4 -
(558*mckin**6)/mbkin**6)*q_cut**10)/mbkin**20 +
((-4 + (72*mckin**4)/mbkin**4)*q_cut**11)/mbkin**22)*rE +
((-1 + mckin**2/mbkin**2)**2 - (2*(mbkin**2 + mckin**2)*q_cut)/mbkin**4 +
q_cut**2/mbkin**4)*((180*mckin**2*muG**2)/mbkin**2 - (1452*mckin**4*muG**2)/
mbkin**4 + (4512*mckin**6*muG**2)/mbkin**6 + (1584*mckin**8*muG**2)/
mbkin**8 - (38424*mckin**10*muG**2)/mbkin**10 +
(160680*mckin**12*muG**2)/mbkin**12 - (250128*mckin**14*muG**2)/
mbkin**14 + (135888*mckin**16*muG**2)/mbkin**16 -
(924*mckin**18*muG**2)/mbkin**18 - (14076*mckin**20*muG**2)/mbkin**20 +
(2160*mckin**22*muG**2)/mbkin**22 + (60*mckin**2*muG*mupi)/mbkin**2 -
(996*mckin**4*muG*mupi)/mbkin**4 + (3360*mckin**6*muG*mupi)/mbkin**6 +
(1488*mckin**8*muG*mupi)/mbkin**8 - (25224*mckin**10*muG*mupi)/
mbkin**10 - (24648*mckin**12*muG*mupi)/mbkin**12 +
(124176*mckin**14*muG*mupi)/mbkin**14 - (90960*mckin**16*muG*mupi)/
mbkin**16 + (8652*mckin**18*muG*mupi)/mbkin**18 +
(4524*mckin**20*muG*mupi)/mbkin**20 - (432*mckin**22*muG*mupi)/
mbkin**22 - (240*mckin**2*muG**2*q_cut)/mbkin**4 +
(1032*mckin**4*muG**2*q_cut)/mbkin**6 - (1344*mckin**6*muG**2*q_cut)/
mbkin**8 + (16128*mckin**8*muG**2*q_cut)/mbkin**10 -
(66624*mckin**10*muG**2*q_cut)/mbkin**12 - (32784*mckin**12*muG**2*q_cut)/
mbkin**14 - (183936*mckin**14*muG**2*q_cut)/mbkin**16 +
(23040*mckin**16*muG**2*q_cut)/mbkin**18 + (47088*mckin**18*muG**2*q_cut)/
mbkin**20 - (9720*mckin**20*muG**2*q_cut)/mbkin**22 -
(240*mckin**2*muG*mupi*q_cut)/mbkin**4 + (3288*mckin**4*muG*mupi*q_cut)/
mbkin**6 - (7680*mckin**6*muG*mupi*q_cut)/mbkin**8 -
(24000*mckin**8*muG*mupi*q_cut)/mbkin**10 + (73920*mckin**10*muG*mupi*
q_cut)/mbkin**12 + (84624*mckin**12*muG*mupi*q_cut)/mbkin**14 +
(129216*mckin**14*muG*mupi*q_cut)/mbkin**16 -
(38976*mckin**16*muG*mupi*q_cut)/mbkin**18 -
(14736*mckin**18*muG*mupi*q_cut)/mbkin**20 +
(1944*mckin**20*muG*mupi*q_cut)/mbkin**22 - (240*mckin**2*muG**2*q_cut**2)/
mbkin**6 + (3000*mckin**4*muG**2*q_cut**2)/mbkin**8 -
(5208*mckin**6*muG**2*q_cut**2)/mbkin**10 - (20472*mckin**8*muG**2*q_cut**2)/
mbkin**12 - (9768*mckin**10*muG**2*q_cut**2)/mbkin**14 +
(34152*mckin**12*muG**2*q_cut**2)/mbkin**16 + (22008*mckin**14*muG**2*q_cut**2)/
mbkin**18 - (53352*mckin**16*muG**2*q_cut**2)/mbkin**20 +
(12600*mckin**18*muG**2*q_cut**2)/mbkin**22 + (240*mckin**2*muG*mupi*q_cut**2)/
mbkin**6 - (2520*mckin**4*muG*mupi*q_cut**2)/mbkin**8 +
(3384*mckin**6*muG*mupi*q_cut**2)/mbkin**10 +
(16152*mckin**8*muG*mupi*q_cut**2)/mbkin**12 +
(15048*mckin**10*muG*mupi*q_cut**2)/mbkin**14 -
(46152*mckin**12*muG*mupi*q_cut**2)/mbkin**16 +
(19752*mckin**14*muG*mupi*q_cut**2)/mbkin**18 +
(13896*mckin**16*muG*mupi*q_cut**2)/mbkin**20 -
(2520*mckin**18*muG*mupi*q_cut**2)/mbkin**22 - (240*mckin**2*muG**2*q_cut**3)/
mbkin**8 - (1080*mckin**4*muG**2*q_cut**3)/mbkin**10 +
(4224*mckin**6*muG**2*q_cut**3)/mbkin**12 - (96840*mckin**8*muG**2*q_cut**3)/
mbkin**14 - (119760*mckin**10*muG**2*q_cut**3)/mbkin**16 -
(151272*mckin**12*muG**2*q_cut**3)/mbkin**18 +
(20160*mckin**14*muG**2*q_cut**3)/mbkin**20 + (6120*mckin**16*muG**2*q_cut**3)/
mbkin**22 + (240*mckin**2*muG*mupi*q_cut**3)/mbkin**8 -
(3624*mckin**4*muG*mupi*q_cut**3)/mbkin**10 +
(7104*mckin**6*muG*mupi*q_cut**3)/mbkin**12 +
(66216*mckin**8*muG*mupi*q_cut**3)/mbkin**14 +
(105168*mckin**10*muG*mupi*q_cut**3)/mbkin**16 +
(71112*mckin**12*muG*mupi*q_cut**3)/mbkin**18 +
(1536*mckin**14*muG*mupi*q_cut**3)/mbkin**20 -
(1224*mckin**16*muG*mupi*q_cut**3)/mbkin**22 + (1560*mckin**2*muG**2*q_cut**4)/
mbkin**10 - (8400*mckin**4*muG**2*q_cut**4)/mbkin**12 -
(7416*mckin**6*muG**2*q_cut**4)/mbkin**14 + (114336*mckin**8*muG**2*q_cut**4)/
mbkin**16 + (167928*mckin**10*muG**2*q_cut**4)/mbkin**18 +
(12960*mckin**12*muG**2*q_cut**4)/mbkin**20 - (29160*mckin**14*muG**2*q_cut**4)/
mbkin**22 - (600*mckin**2*muG*mupi*q_cut**4)/mbkin**10 +
(8112*mckin**4*muG*mupi*q_cut**4)/mbkin**12 -
(12072*mckin**6*muG*mupi*q_cut**4)/mbkin**14 -
(95712*mckin**8*muG*mupi*q_cut**4)/mbkin**16 -
(93048*mckin**10*muG*mupi*q_cut**4)/mbkin**18 -
(10560*mckin**12*muG*mupi*q_cut**4)/mbkin**20 +
(5832*mckin**14*muG*mupi*q_cut**4)/mbkin**22 - (720*mckin**2*muG**2*q_cut**5)/
mbkin**12 + (7032*mckin**4*muG**2*q_cut**5)/mbkin**14 -
(15648*mckin**6*muG**2*q_cut**5)/mbkin**16 - (98928*mckin**8*muG**2*q_cut**5)/
mbkin**18 - (46224*mckin**10*muG**2*q_cut**5)/mbkin**20 +
(21240*mckin**12*muG**2*q_cut**5)/mbkin**22 + (240*mckin**2*muG*mupi*q_cut**5)/
mbkin**12 - (4824*mckin**4*muG*mupi*q_cut**5)/mbkin**14 +
(11808*mckin**6*muG*mupi*q_cut**5)/mbkin**16 +
(40560*mckin**8*muG*mupi*q_cut**5)/mbkin**18 +
(11376*mckin**10*muG*mupi*q_cut**5)/mbkin**20 -
(4248*mckin**12*muG*mupi*q_cut**5)/mbkin**22 - (1200*mckin**2*muG**2*q_cut**6)/
mbkin**14 + (10728*mckin**4*muG**2*q_cut**6)/mbkin**16 +
(57048*mckin**6*muG**2*q_cut**6)/mbkin**18 + (64872*mckin**8*muG**2*q_cut**6)/
mbkin**20 + (6120*mckin**10*muG**2*q_cut**6)/mbkin**22 +
(240*mckin**2*muG*mupi*q_cut**6)/mbkin**14 - (1608*mckin**4*muG*mupi*
q_cut**6)/mbkin**16 - (11832*mckin**6*muG*mupi*q_cut**6)/mbkin**18 -
(11784*mckin**8*muG*mupi*q_cut**6)/mbkin**20 -
(1224*mckin**10*muG*mupi*q_cut**6)/mbkin**22 + (1200*mckin**2*muG**2*q_cut**7)/
mbkin**16 - (18600*mckin**4*muG**2*q_cut**7)/mbkin**18 -
(40608*mckin**6*muG**2*q_cut**7)/mbkin**20 - (16200*mckin**8*muG**2*q_cut**7)/
mbkin**22 - (240*mckin**2*muG*mupi*q_cut**7)/mbkin**16 +
(3720*mckin**4*muG*mupi*q_cut**7)/mbkin**18 +
(7584*mckin**6*muG*mupi*q_cut**7)/mbkin**20 +
(3240*mckin**8*muG*mupi*q_cut**7)/mbkin**22 - (300*mckin**2*muG**2*q_cut**8)/
mbkin**18 + (9180*mckin**4*muG**2*q_cut**8)/mbkin**20 +
(8280*mckin**6*muG**2*q_cut**8)/mbkin**22 + (60*mckin**2*muG*mupi*q_cut**8)/
mbkin**18 - (1836*mckin**4*muG*mupi*q_cut**8)/mbkin**20 -
(1656*mckin**6*muG*mupi*q_cut**8)/mbkin**22 - (1440*mckin**4*muG**2*q_cut**9)/
mbkin**22 + (288*mckin**4*muG*mupi*q_cut**9)/mbkin**22 -
24*mckin**2*muG*((-1 + mckin**2/mbkin**2)**2*(-5 + (73*mckin**2)/
mbkin**2 - (129*mckin**4)/mbkin**4 - (455*mckin**6)/mbkin**6 +
(1321*mckin**8)/mbkin**8 + (5151*mckin**10)/mbkin**10 -
(1367*mckin**12)/mbkin**12 - (305*mckin**14)/mbkin**14 +
(36*mckin**16)/mbkin**16) - (2*(-10 + (137*mckin**2)/mbkin**2 -
(320*mckin**4)/mbkin**4 - (1000*mckin**6)/mbkin**6 +
(3080*mckin**8)/mbkin**8 + (3526*mckin**10)/mbkin**10 +
(5384*mckin**12)/mbkin**12 - (1624*mckin**14)/mbkin**14 -
(614*mckin**16)/mbkin**16 + (81*mckin**18)/mbkin**18)*q_cut)/
mbkin**2 + (2*(-10 + (105*mckin**2)/mbkin**2 - (141*mckin**4)/
mbkin**4 - (673*mckin**6)/mbkin**6 - (627*mckin**8)/mbkin**8 +
(1923*mckin**10)/mbkin**10 - (823*mckin**12)/mbkin**12 -
(579*mckin**14)/mbkin**14 + (105*mckin**16)/mbkin**16)*q_cut**2)/
mbkin**4 + (2*(-10 + (151*mckin**2)/mbkin**2 - (296*mckin**4)/
mbkin**4 - (2759*mckin**6)/mbkin**6 - (4382*mckin**8)/mbkin**8 -
(2963*mckin**10)/mbkin**10 - (64*mckin**12)/mbkin**12 +
(51*mckin**14)/mbkin**14)*q_cut**3)/mbkin**6 +
((50 - (676*mckin**2)/mbkin**2 + (1006*mckin**4)/mbkin**4 +
(7976*mckin**6)/mbkin**6 + (7754*mckin**8)/mbkin**8 +
(880*mckin**10)/mbkin**10 - (486*mckin**12)/mbkin**12)*q_cut**4)/
mbkin**8 + ((-20 + (402*mckin**2)/mbkin**2 - (984*mckin**4)/
mbkin**4 - (3380*mckin**6)/mbkin**6 - (948*mckin**8)/mbkin**8 +
(354*mckin**10)/mbkin**10)*q_cut**5)/mbkin**10 +
(2*(-10 + (67*mckin**2)/mbkin**2 + (493*mckin**4)/mbkin**4 +
(491*mckin**6)/mbkin**6 + (51*mckin**8)/mbkin**8)*q_cut**6)/
mbkin**12 - (2*(-10 + (155*mckin**2)/mbkin**2 + (316*mckin**4)/
mbkin**4 + (135*mckin**6)/mbkin**6)*q_cut**7)/mbkin**14 +
((-5 + (153*mckin**2)/mbkin**2 + (138*mckin**4)/mbkin**4)*q_cut**8)/
mbkin**16 - (24*mckin**2*q_cut**9)/mbkin**20) -
4*(-((-1 + mckin**2/mbkin**2)**2*(-43 + (438*mckin**2)/mbkin**2 +
(1390*mckin**4)/mbkin**4 - (15994*mckin**6)/mbkin**6 +
(14652*mckin**8)/mbkin**8 + (24394*mckin**10)/mbkin**10 +
(8738*mckin**12)/mbkin**12 - (6774*mckin**14)/mbkin**14 -
| |
<reponame>roshanba/mangal<gh_stars>0
XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXX XXXXX XXXX XXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXX
XX XXXXXX XXXXXXXX X XXXXXXXXXXXXXXX XXXX XXXXXXX XXX XXXXXX XXXX XXXXX XXXX
XXX XXXXXXX XX XXXXXX XXXXXXX XXX XXXXXXXXX XXX XXXXX XXXX X XXXXXXXXXXXXXXX
XXXXXX XXX XXX XXXX XX XXXXXX X XXXX XXXX XXXX XXXXXX XXXXXX XXXXXXXXX XX XXXX
XXXXX XX XXXXX XX XXXXXXXXX XX XXXXXX XXX XXXXX XXXXX XX XXXX XXXXX XXXXXXX
XXXXXX XXXXXXX XXXXXXX XXX XXXXXX XX XXXX XXXXXX
XXX XXXX XXXXXXX XXXXXX XXXXXXXX X XXXXXX XXXXX XXXX XXXX XXX XXXXXX X XXXXXXXX
XXXXX XXXX X XXXXXX XXXXXX
XXX XXXXXXXXX
XXX XXXX XXXXXXXXXXXX XXXXXX XXXXXXXXX
XXX XXXX XXXXXXXXXXXX XXXXXX XXXXXXX
X XXXXXX XXX XXXX XXXXXX
XXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXX XXXXX
XXX XXXXX X XXXXXXX
XXX XXXXXX X XXXXXXXXXXXX XXXXXXXXXXX XXXXXXXXXX XXXXXXXXXXX
X XXXXXXXX X XXXX XX XXX XX XXXXXXXX
XXX XXXX X XXXXXXXXXXXXX
X XXXXXXXX X XXXX XX XXXXXX XX XXXXXXXX XXXXXXXX
XXX XXXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXX
XXXXXXXXXXX
XXX XXXXXXXXX XXXXXXXX XXXXX XXXX XXXX X XXXX XXXXX XXX XXXXX XXXXX XXXXX
XXXXXXXXXX XX XXX XXXXX XXXXXXXXX XX XXX XXXXXXXXXX XXXXXXXXXX
XXXX XXXXX XXXXX XXX X XXXXXXXXXXXXX XXXXXXX XXXX XXXXXX XXX XXXXXXXX X
XXXXXXXXXXXXX XX X XXXXX XX XXXXXXXXXXX XX X XXXXXXXXXXXXX XX X XXXXX X XXXXX
XXXXXXXXXXXXXXXXXXX XX XXXXXXXXXXX XX X XXXXXXXXXXXXXXXXXXXXXXXX XXXX XX XXX
XXXX XXXX XX XXXXXXXXXXXX
XX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXX XXXX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX XXX XXXXXXXXXXX XX XXX XXXX
XXXXXXXXXXXXXXXXXXXXX XXX XXXXXXXXXXX XX XXX XXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX
XXXXXXXXXXXXX XXX XX XXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXXXXX XXX XX XXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXX XX XXX XX
XXXXXXXX XX XXX XXXXX XXXXXX XXXXXXXXX XXX
XXXXXXXXXXX XX XXX XXXXX
XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX
XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX
XXXXXXXXXXXXXX XXX XX XXX XXXXX XXXXXXX
XXXXXXXXXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XX XXXXXXXX XX XXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXX
XXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXXXXXX XX XXX XXXX
XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX
XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXX XXXXXXXXXXXX
XX XXX XXXXX XXXXXXX XXX XXXXXXXXXXXXXX XXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXX
XXXXX XXX XXXXXXX XXXXXX
X XXXXXXXXXXXXXX XX XXXXXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XX X XXXXXXXXXXXXXXX XXXXX XXXXXXX XXX X XXXXX XXXXXXXXXXXXX
X XXXXXXXXXXXXXXXXXXX XX XXXXXXXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XX X
XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXX XXX X XXXXX XXXXXXXXXXXXX
XX XXXXXXXXX XXXX XXXXXXXXX XXXX XXXXX XXX XXXXXXXXXX XXX XX XXXXXXXX
X XX XXX XXXXX XXXXX XXX XXXXXXXXXXXXXXX XXXX XXXXXXXXXXXX XX XXX XX
XXXXXXXXX XX XXX XXXX XXXXXX XXXXXXXXXX XXXXXXXXXXXXXXXXXX
X XXX XXXX XXXXXXX XXXXXXXXX XX XXX XX XXX XXXXXXXXXXXXXXXX XX XXX XXXXX
XXXXXX XXXX XXX XXXXX XXXXXXXXX XXXXXXXXXXXX
X XXX XXXX XXXXXXX XXXXXXXXXXXXX XX XXX XX XXX XXXXXXXXXXXXX XX XXX XXXXX
XXXXXX
X XX XXX XXXXX XXXXX XXX XXXXXXXXXXX XXXX XXXX XXX XXXX XXXXXXX XXXXXXXXXX
XXXX XX XXX XX XXXXXXXXXXX XXXX XXXXXXX XXXXXX XXXX XXX XXXXX XXXXXXX
XXXXXXXXXXXX XXX XXXXXXX XXXX XXXXXXXX XXXXXXX XXX XXXXX XXXXXX XXXXX XX
XXXXXXXX XX XXXXXXXX XX XXX XXXXX XX XXXXXXXXX XXXX XXXXXX XXX XXXX XX
XXXX X XXXXXXXXXX XXX XXXXX XXXXXX XXXX XXX XX XXXXXXXX XX XXX XXXXX
XXXXX XXX XXXXXXXXXXXXXXX XXX XX XXXXXXXX XXXXXXXXXXX XXXXX XXXX
XXXXXXXXXXX XXXXX XXXX XX XXXXXXXXX XXXXXXXX XXXXXXXXX
XXXXXXXX XXXX XXXX XXX XXX XXXXXXXX XXX XXXX XXXXX XXXX XXX X XXXXX XXXXX
XXXXXX XXX XXXXXXXXXXX XXX XXXXXXX XXXXXXXX XXXXXX
X XXXX XXXXXXX
XXXXXXXXXXXXXX
XXXXXXXX XXXX XXX XX XXXXXXXX
XXXX XXXXXXXXX XXXXXX XXXXXX
XXXX XXXXXXXXXXXX XXXXXX XXXXXXXXX
XXXXXXXXXXXXX X X
XXXXXX XXXXXXX
XXXXXXX XXXXXXXX
XXXXXX XXXXXXX
X
XXXXX XXXXXXXXXXXXXXXXXXXXX
XXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX
XXX XXXXXXXXXXXXXX
XXXXXX XXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXX
XXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXX
XXXXX X XXXXXX
XXXXXX X XXXXXXXX XXXXXXXX XXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXX
XXXXX XXXXX
XXXXX X XXXX
XXXXXX X XXXXXXXX XXXXXXXXXX
XXXX XXXXX XXXXXXX XXX XXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXX XX XXXXXXX
XXXXXXXXXX XX XXXX XXXX XXXX XXXXXXXXXX XXXXX XXX XXXXXXXXXX XXXXXXX XXXXX
XXXXX XXXXXXX XX X XXXXXXXXXX
XXXX XXXXXX XXXXXX XXXXX
XXXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX X XXXXXXXXXXXXXXXX
XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
X
XXXXXXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXXX
XXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXX XX X XXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXX XXX XXXX XXXXX XXXXXXXX XX XXXXXXXXXX X XXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXX XXX XXXX XXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXX XXX XXXXX XXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XXXX XXXX XXXXXX XXXX XXXXXXXXXXX XXXXX XXXX XXXXXXXXXX XX XXXXXXXXX XXXXXXXXXX
XXXX XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX XXX XXXXXXXXXX XXXX XXXXXXX
XXXXXXXXXXXXXXXXX XXXXXXXX XXX XXXX XXXXXXXXX XXX XXX XXX XXXXXX XXXXXX XX
XXXXXXXXX
XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXXX XXXX XXXXXX XXX XXXX
XXXXXXXXXX XXXXX XXXXX XXXXX XXX XXXXXX XXXXXXXXXXX XXXXXX XX XXXXXXX
XX XXXXXXXXX
XXX XXXXXXXX XXXXXXX XXXXXXXX XXX XXXXX XXXXXXXX XXXXXX XX XXX
XXXXXXXXXXXXX XXXXXXXXXXX XX XXXXXXX XXXXX XXX XXXXXXXXX XXX XXXX XXXXXX XX
XXX XXXXX XXX XXXXXXXXX XXXX XXXXXX XXXX XXXXXXXX XXXXXX XXXXXXXXXX XXX
XXXXX XXX XXXXXXXXXX XXXXX XXXXXXXX XX XX XXXXXXXXXXXX XXXXX XXX XXXXXXXXX
XXXX XXX XXXXXXXXXXX XX XXXXX XXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXX XXX XXXXXXX XXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXX XXXXXXXX XXX XXXXXXXXXXX XXXXXX XX X XXXXX XXXX XX XXXXXXX XXXXXXXXXX
XXXXXXXXXX XX XXX XXXX XXX XXX XXX XX X XXXXXX XXXXX
X XXXXX XXXX XXXXXXXX XXXXXXXX XX X XXXXX XXXXXX XXXX XXXXXXX XX XXXXXXXXXXXX
XXXXXXXXX XXXX XXXXX XXX XXXXXXX XXXXXX XX XXXX XXXXXXXX XXXXX XXXXXXXXX
XX XXXXXXXXX
XXX XXXXXXXXXXXXXXXXXXXXX XXXXXX XXXX X XXXX XXXX XXXXX XXX XXXXXXXXXXX
XXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXX XXXXXXXX XXX XXXXXXXXXX XX XXXXX
XXXXXX XXXX XXX XXXXXX XX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXX XXXXX XXXX XX XXXXXXXX XXX XXXXXXXXXXX XXXXXX XXX XXXXXXXX XXXX
XXXXXXXXXXX XXX XXXX XXXX XXX XXXXXX XXXXXXX XXXXXXXXXXX XXXXXXX
XXXXXXXXXXX XXXX XXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXX XX XXX XXXXXXXXXX XXXXXXXX XXXXXXXXXXXXX XXXX XXXX XXX XXXXXXXXXXX
XXXXXX XX XXXX XXXXX XX XXXX XXXXX XXXX XXX X XXXXXXXXXXXXX XXXXX XX XXXX XXXXX
XX XXX XXXX XXXXXXXX XXX XXXXX XXXXXXX XXXXXXXXXX XXXX XXX XX XXX XX XXXXX
XXXXXXX XXX XXX XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX
XXX XXXX XX XXX XXXXX XXXXXXXX XXX XXXXXXXXXX XXXXX
XXX XXXXXXX XXXXXXXXXXX XXXXXX XXXX XX XXXXXX XXXXXX XXX XXXXXXXXXX XXXXXX XXX
XXXXX XXX XXXXXXXXXXXXXXXX XXXXXXX XXXXXXXXXXXXXXXXXXXXX XXX XXXX XXXXXXXXXXX
XX XXX XXXXXXX XXXXXXXXXXX XXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXX XXXXXXX XXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXX XXXXXXX XX XXX
XXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XX XX XXX
XXXXXXXXXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXX XXXX
XXXXXXXXXX XXXX XXX XXXXX XXXXXXXX XXXXXXX XX XXX
XXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX
XXXXX XXXXXXXX XXXXXXX XX XXXXXXXXXXXX XXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXX XXXX XXXX XXX
XXXXXXXXXXXXXXXXXXX XX XXXXXX XXXXXX XXX XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXX XXXX XXX XX XXXXXXXXXXXXX XXXXX XXXXXXXX XXX XXXXXXX XX
XXX XXXX XXXXXX
XXX XXX XXXXXXXX XXX XXXXX XXXXXXXX XXXX XXXXXXXXXXXXXXXXXXXX XXXXXX XX XXXXX
XXXXXXXXXX XX XXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX
XX XXX XXXXXXXXXXXXXXXXXX XXXXXXXXXX XX XXX XXXXXXXXXXXXXXX XXXXX XXXXXXXX XXXXXXX
XXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXX XXXXXX XXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXX
XXXXXXXXXXXXXX X X
XXXXXXXXXXXXXXXXX X
XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXX XXX XXXXXXXXX
X
X
XX XXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXXXXXXX XXXXXX
XXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXX XXXX XXX X XXXXXXXXXX XXXXXXX XXXX XXXXXX XXXXXXX XXX XXXXX
X XXXXXXXX XXXXXX XXXX XXX XXXX XXXXX XX XXX XXXXX X XXXXXXXX XX XXXXXXXXXXXXX
XXX XXXXXX XX XXXXXXXX XXXXX XXXXXXXX XX XXX XXXXXXX XXXXXXXX XXXXXXXXXXXXX XX
XXXX XX XXXXXXXXX XXXXXXXXXX XXXX XXXXXX XXXX XXXXXXXXX XX XXXX XXX XXXXXXXXX
XXXXXXXXXX XXXX XXXXXX X XXX XXXXXXXX XX XXX XXXXXXXXX XXXXXXX
XXX XXXX XXXXXXXXXXXX XXXXXX XXXXXXX
XXX XXXX XXXXXXXXXXX XXXXXX XXXXXXXXXXX
X XXXXXX X XXXX XXXXXXXX XXXX XXXX XXXXX
XXX X X XXXXXXXXXXXXXXXXXXXXXXXXX
X XXXX X XXX XXXXXXX XXXXXX XXXX XXX XXXXXX XXXXX
XXX XXXXXXXXXXX X XXXXXXXX
X XXXXXX X XXXX XX XXXX XX XXXXXXXX XXXXXXXX XXX XXX
X XXXX XXXX XX XXXXXXXX XXX XXXXX
XXX X X XXXXXXXXXXXXXXXXXXXXXXXXX
XXX X X XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXX
XXX XXXXXXXX
XXXX XXXX XX XXX XXXX XXXXXXXXXXXX XXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX XXXXXXXXXX XXXX XX XX XX XXXXXXXX
XXXXXXXXXXXXXXXX X XXXXXXXXXXXXXX XXXX XX XXXXXX XX XXX XXXX XX XXX XXXX
XXXXXXX XXXXXXXX XX XXXXX XX XXXXXXXXXXXXXXX XXXXXXXXX XX XXXXXXXXX
XX XX XXXXXXXX XXXXX XXXXXXX XXXXXX XX XXX XXXXXX XXXXX XXX XXXXXXXXX XXXXX
XXXXXXXX XXXX XXX XXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX
XXXXX XX XXXX XXX XXXX XXXXXX XXXX XXXXXXXX XXXXXXX XXXXX XX XXXXXX XXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXX XXXXXX XXXXXX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXX XXXXXXX
XXXXXXXXXX XXXXX XX XXXXXXXXX XXXXXXXX XXX XXXXXXXXXX XXXXXXXXX XXXXXXXXXXX
XXXXX XXXXXX XX XXX XXXX XX XX XXXX XXXX XXXXXXXXXXX XXX X XXXXXX XXXX XXXXX XX
XXXXXX XX XXXXXX XXXXXXXXX XX XXX XXX XXXX XXX XXXXXXX XXXXXXXX XXXXXXXX XXX X
XXXXX XXXX XXXX XXX XX XXXXX XXXXXXXX
XXXX XXXXXXXXXX XXXXXX XXXXXXX XX XXXXXXXX XXXXXXXXXX XXXXXXX XXXXXXXXX XXXXX
XXXXXXX XXXXXX XXXXXXXX XX XXXXXXXXXX XX XXX XXXX XXXXXXXXXX XXXX
XXXXXXXXXXXXXXXXX XXXX XX XXXX XXXXXX XX XXXXXX | |
import os
import pytz
import time
import datetime
import asyncio
import traceback
import subprocess
import importlib
import requests
from typing import Union, List
from pyrogram.types import Message
from pyrogram.errors import YouBlockedUser, MessageIdInvalid, PeerIdInvalid
from pyrogram.enums import ParseMode, ChatType
class RawFunctions(object):
async def aexec(
self,
code: str
):
"""
params:
1. code: str :: your written python code
use:
use this function to execute python codes
ex: (async)
await app.aexec("print('Hello, World !')")
"""
exec(
f"async def __aexec(self, m): "
+ "".join(f"\n {l}" for l in code.split("\n"))
)
return await locals()["__aexec"](self, self.m)
def showdate(
self
):
"""
params:
None
use:
use this function to get realtime date of your location
ex: (async)
await app.showdate()
"""
today = pytz.timezone(self.TIME_ZONE)
get_date = datetime.datetime.now(today)
mydate = get_date.strftime("%d %b %Y")
return mydate
def showtime(
self
):
"""
params:
None
use:
use this function to get time of your location
ex: (async)
await app.showtime()
"""
today = pytz.timezone(self.TIME_ZONE)
get_time = datetime.datetime.now(today)
mytime = get_time.strftime("%r")
return mytime
async def error(
self,
e,
edit_error: bool=True
):
"""
params:
1. error :: occured error
2. edit_error: bool, default=True :: edits | sends error message
usage:
use this function at the end of try/except block
ex: (async)
try:
await app.send_message(message.chat.id, "This is a test")
except Exception as e:
await app.error(e, edit_error=False)
"""
teks = f"**Traceback Report:**\n\n"
teks += f"**Date:** `{self.showdate()}`\n**Time:** `{self.showtime()}`\n\n"
teks += f"`This can be a error in tronuserbot, if you want you can forward this to @tronuserbot_support.`\n\n"
teks += f"**Command:** `{self.m.text}`\n\n"
teks += "`-`" * 30 + "\n\n"
teks += f"**SHORT:** \n\n`{e}`\n\n"
teks += f"**FULL:** \n\n`{traceback.format_exc()}`"
try:
if edit_error:
if hasattr(e, "MESSAGE"):
await self.send_edit(f"`{e.MESSAGE}`")
else:
await self.send_edit(e.args[0] if e.args else None)
await self.send_message(self.LOG_CHAT, teks)
print(e)
except PeerIdInvalid:
self.log.error(teks)
except Exception as err:
self.log.error(err)
return True
async def sleep(
self,
sec: int=0,
delmsg=False
):
"""
params:
1. sec :: time to sleep in seconds
2. delme, default=False :: delete the message if it is True
use:
this function deletes the message after sleeping for a given time,
this function blocks the code
ex: (async)
await app.sleep(10, delmsg=True)
"""
msg = None
await asyncio.sleep(sec)
if delmsg and self.m.from_user.is_self:
msg = await self.m.delete()
return msg
async def delete_message(
self,
sec: int=0
):
"""
params:
1. sec: int, default=0 :: time to sleep in seconds
use:
this function deletes a message after given time period
this function works without blocking the entire execution
ex: (async)
await app.delete(10)
"""
if sec <= 600: # 10 min
asyncio.create_task(self.sleep(sec=sec, delmsg=True))
return True
else:
self.log.error("Delete function can only sleep for 10 ( 600 sec ) minutes")
async def data(
self,
modules: str
):
"""
params:
1. plug: str :: module name whose information is updated in app.CMD_HELP dict
use:
use this function to get information about a module
ex: (async)
await app.data("admin")
"""
try:
module_data = []
module_data.clear()
for x, y in zip(
self.CMD_HELP.get(modules)[1].keys(),
self.CMD_HELP.get(modules)[1].values()
):
module_data.append(
f"CMD: `{self.PREFIX}{x}`\nINFO: `{y}`\n\n"
)
return module_data
except Exception as e:
self.log.error(e)
return None
async def send_edit(
self,
text: str,
parse_mode=ParseMode.DEFAULT,
disable_web_page_preview=False,
delme : int=0,
text_type: list=[],
disable_notification: bool=False,
reply_to_message_id: int=0,
schedule_date: int=0,
protect_content: bool=False,
reply_markup=None,
entities=None
):
"""
params:
1. text: str :: text to be edited or sent instead of editing
2. disable_web_page_preview: bool, default=False :: web page preview will be shown if True
3. delme: int, default=0 :: sleeps for given time and then deletes the message
4. mono: bool, default=False :: all text format will become mono
5. bold: bool, default=False :: all text format will become bold
6. italic: bool, default=False :: all text format will become italic
7. underline: bool, defau=False :: all text format will become underlined
use:
use this function to edit or send a message if failed to edit message
ex: (async)
await app.send_edit(
"This text is sent or edited",
disable_web_page_preview=True,
delme=5,
mono=True
)
"""
msg = None
if self.m.from_user.is_self:
msg = await self.m.edit(
text=self.FormatText(text, format=text_type),
parse_mode=parse_mode,
disable_web_page_preview=disable_web_page_preview,
reply_markup=reply_markup,
entities=entities
)
else:
msg = await self.send_message(
chat_id=self.m.chat.id,
text=self.FormatText(text, format=text_type),
disable_web_page_preview=disable_web_page_preview,
parse_mode=parse_mode,
reply_to_message_id=reply_to_message_id,
schedule_date=schedule_date,
protect_content=protect_content,
reply_markup=reply_markup,
entities=entities
)
try:
if delme > 0:
asyncio.create_task(self.sleep(sec=delme, delmsg=True))
except Exception as err:
await self.error(err)
return msg
async def check_private(
self
):
"""
params:
None
use:
use this to tell that they can't use some commands in private
ex: (async)
await app.private(message)
"""
if self.m.chat.type == ChatType.PRIVATE:
await self.send_edit(
"Please use these commands in groups.",
text_type=["mono"],
delme=4
)
return True
return False
def long(
self
):
"""
params:
None
use:
this function returns the length of a list containing message splited on spaces
ex:
if app.long() == 1:
print("there is one word in message.text")
"""
text_length = len(self.m.text.split() or self.m.caption.split())
return text_length if bool(text_length) is True else None
def textlen(
self
):
"""
params:
None
use:
this function returns length of characters in message.text
ex:
if app.textlen() > 4096:
print("Text is too long")
"""
return len([x for x in self.m.text or self.m.caption or None])
async def create_file(
self,
filename: str,
content: str,
send: bool=True,
caption: str=None
):
"""
params:
1. filename: str :: give a filename with some extension or without extension
2. text: str :: contents which is going to be written in the file
use:
use this function to create files with any type of extension (.txt, .py, .java, .html, etc),
this function also sends the created file.
ex: (async)
await app.create_file("sample.txt", "This file was created by app.create_file() method")
"""
try:
path = f"./downloads/{filename}"
file = open(path, "w+")
file.write(content)
file.close()
if send:
await self.send_document(
self.m.chat.id,
path,
caption = caption if caption else f"**Uploaded By:** {self.UserMention()}"
)
if os.path.exists(path):
os.remove(path)
else:
return path
except Exception as e:
await self.error(e)
def rem_dual(
self,
list1: list,
list2: list
):
"""
params:
1. one: list :: list from that you want to remove duplicates
2. two: list :: list that contains removable elements
use:
use this function to remove duplicates from lists
ex:
app.rem_dual([1, 1, 1, 2, 3], [1])
"""
return list(set(list1) - set(list2))
async def kick_user(
self,
chat_id: Union[str, int],
user_id: Union[str, int],
ban_time: int=30
):
"""
params:
1. chat_id: int :: chat id of the chat where this method is used
2. user_id: int :: user id of the user you want to kick from chat
use:
use this function to kick a member from your chat
ex: (async)
await app.kick_user(chat_id, user_id, ban_time=120)
"""
try:
return await self.ban_chat_member(chat_id, user_id, int(time.time()) + ban_time)
except Exception as e:
await self.error(e)
def is_str(
self,
element
):
"""
params:
1. element: [str, bool, int, float] :: anytype of data
use:
use this function to check if the element is string or not
ex:
app.is_str(data)
"""
return isinstance(element, str)
def is_bool(
self,
element
):
"""
params:
1. element: [str, bool, int, float] :: anytype of data
use:
use this function to check if the element is boolean or not
ex:
app.is_bool(data)
"""
return isinstance(element, bool)
def is_float(
self,
element
):
"""
params:
1. element: [str, bool, int, float] :: anytype of data
use:
use this function to check if the element is float or not
ex:
app.is_float(data)
"""
return isinstance(element, float)
def is_int(
self,
element
):
"""
params:
1. element: [str, bool, int, float] :: anytype of data
use:
use this function to check if the element is integer or not
ex:
app.is_int(data)
"""
return isinstance(element, int)
async def get_last_msg(
self,
chat_id,
reverse: bool=False
):
"""
params:
1. chat_id: int :: chat id of group or user
2. reverse: bool, default=False :: if reverse is True you'll get the oldest message in chat
use:
use this function to get last message of the chat or user
ex: (async)
await app.get_last_msg(chat_id, reverse=True)
"""
return await self.get_chat_history(chat_id, limit=1, reverse=reverse)
async def toggle_inline(
self,
):
"""
params:
None
use:
use this function to turn on | off inline mode of your bot
ex: (async)
await app.toggle_inline()
"""
try:
botname = "BotFather"
await self.send_edit("Processing command . . .", text_type=["mono"])
await self.send_message(botname, "/mybots") # BotFather (93372553)
await asyncio.sleep(0.50) # floodwaits
data = await self.get_last_msg(botname)
usernames = list(data[0].reply_markup.inline_keyboard)[0]
unames = []
unames.clear()
for x in usernames:
unames.append(x.text)
await self.send_edit("Choosing bot . . . ", text_type=["mono"])
if self.bot.username in unames:
await data[0].click(self.bot.username)
else:
return await self.send_edit("Looks like you don't have a bot please, use your own bot.", text_type=["mono"], delme=4)
data = await self.get_last_msg(botname)
await self.send_edit("Pressing Bot Settings . . . ", text_type=["mono"])
await data[0].click("Bot Settings")
data = await self.get_last_msg(botname)
await self.send_edit("checking whether inline mode is On or Off . . . ", text_type=["mono"])
await data[0].click("Inline Mode")
data = await self.get_last_msg(botname)
# Turn on inline mode
if "Turn on" in str(data[0]):
await self.send_edit("Turning Inline mode on . . . ", text_type=["mono"])
await data[0].click("Turn on")
await self.send_edit("Inline mode is now turned On.", text_type=["mono"], delme=4)
# Turn inline mode off
elif "Turn inline mode off" in str(data[0]):
await self.send_edit("Turning Inline mode Off . . .", text_type=["mono"])
await data[0].click("Turn inline mode off")
await self.send_edit("Inline mode is now turned Off.", text_type=["mono"], delme=4)
except YouBlockedUser:
await self.unblock_user(botname) # unblock & continue
await self.toggle_inline() # keep process going
except Exception as err:
await self.error(err)
def quote(
self
):
"""
params:
None
use:
use this function to anime quotes
ex:
app.quote()
"""
results = requests.get("https://animechan.vercel.app/api/random").json()
msg = f"❝ {results.get('quote')} ❞"
msg += f" [ {results.get('anime')} ]\n\n"
msg += f"- {results.get('character')}\n\n"
return msg
def ialive_pic(
self
):
"""
params:
None
use:
use this function to get inline alive pic url
ex:
app.ialive_pic()
"""
return self.getdv("USER_PIC") or self.UserPic() or None
def get_file_id(
self,
message: Message
):
"""
params:
1. message (update) :: incoming update
use:
use this function to get file_id of any media in telegram
ex:
app.get_file_id(message)
"""
media = ["photo", "video", "audio", "document", "sticker", "animation"]
for x in media:
if message and message[x]:
if message["caption"]:
return {"data":(message[x]).file_id, "caption":message.caption, "type":x}
else:
return {"data":(message[x]).file_id, "caption":None, "type":x}
elif message["text"]:
return {"data":message.text, "caption":None, "type":"text"}
return {"data":None, "caption":None, "type":None}
def clear_screen(
self
):
"""
params:
None
use:
use this function to clear terminal screen
ex:
app.clear_screen()
"""
subprocess.call("clear" if os.name == "posix" else "cls")
async def add_users(
self,
user_id: Union[int, str, List[int], List[str]],
chat_id: Union[int, str]
):
"""
params:
1. user_id: int :: list of telegram id of user
2. chat_id :: chat id of a group or channel
use:
use this function to add users in a group / channel
ex: (async)
await app.add_users(user_id, chat_id)
"""
try:
done = await self.add_chat_members(chat_id, user_id)
return True if done else False
except Exception as e:
self.log.error(e)
async def user_exists(
self,
user_id: Union[int, str],
chat_id: Union[int, str]
):
"""
params:
1. user_id: int :: id of a telegram user
2. chat :: id of telegram chat
use:
use this | |
import argparse
import logging
import requests
import sqlite3
import sys
import time
from voussoirkit import backoff
from voussoirkit import betterhelp
from voussoirkit import httperrors
from voussoirkit import mutables
from voussoirkit import operatornotify
from voussoirkit import ratelimiter
from voussoirkit import sqlhelpers
from voussoirkit import threadpool
from voussoirkit import vlogging
log = vlogging.getLogger(__name__, 'hnarchive')
VERSION = 1
HEADERS = {
'User-Agent': f'voussoir/hnarchive v{VERSION}.',
}
session = requests.Session()
session.headers.update(HEADERS)
DB_INIT = '''
PRAGMA user_version = 1;
CREATE TABLE IF NOT EXISTS items(
id INT PRIMARY KEY NOT NULL,
deleted INT,
type TEXT,
author TEXT,
time INT,
text TEXT,
dead INT,
parent TEXT,
poll TEXT,
url TEXT,
score INT,
title TEXT,
descendants INT,
retrieved INT
);
CREATE INDEX IF NOT EXISTS index_items_id on items(id);
CREATE INDEX IF NOT EXISTS index_items_parent on items(parent);
CREATE INDEX IF NOT EXISTS index_items_time on items(time);
CREATE INDEX IF NOT EXISTS index_items_type_time on items(type, time);
CREATE INDEX IF NOT EXISTS index_items_age_at_retrieval on items(retrieved - time);
'''
COLUMNS = sqlhelpers.extract_table_column_map(DB_INIT)
ITEMS_COLUMNS = COLUMNS['items']
sql = sqlite3.connect('hnarchive.db')
sql.executescript(DB_INIT)
# HELPERS ##########################################################################################
def ctrlc_commit(function):
def wrapped(*args, **kwargs):
try:
return function(*args, **kwargs)
except KeyboardInterrupt:
commit()
return 1
return wrapped
def int_or_none(x):
if x is None:
return x
return int(x)
# API ##############################################################################################
def get(url, retries=1):
bo = backoff.Quadratic(a=0.2, b=0, c=1, max=10)
while retries > 0:
try:
log.loud(url)
response = session.get(url, timeout=2)
httperrors.raise_for_status(response)
return response
except (
httperrors.HTTP429,
httperrors.HTTP5XX,
requests.exceptions.ConnectionError,
requests.exceptions.ReadTimeout,
):
# Any other 4XX should raise.
retries -= 1
log.loud('Request failed, %d tries remain.', retries)
time.sleep(bo.next())
raise RuntimeError(f'Ran out of retries on {url}.')
def get_item(id):
url = f'https://hacker-news.firebaseio.com/v0/item/{id}.json'
response = get(url, retries=8)
item = response.json()
if item is None:
return None
if 'time' not in item:
# For example, 78692 from the api shows {"id": 78692, "type": "story"},
# but the web says "No such item."
# https://hacker-news.firebaseio.com/v0/item/78692.json
# https://news.ycombinator.com/item?id=78692
return None
return item
def get_items(ids, threads=None):
if threads:
return get_items_multithreaded(ids, threads)
else:
return get_items_singlethreaded(ids)
def get_items_multithreaded(ids, threads):
pool = threadpool.ThreadPool(threads, paused=True)
job_gen = ({'function': get_item, 'kwargs': {'id': id}} for id in ids)
pool.add_generator(job_gen)
for job in pool.result_generator(buffer_size=250):
if job.exception:
raise job.exception
if job.value is not None:
yield job.value
def get_items_singlethreaded(ids):
for id in ids:
item = get_item(id)
if item is not None:
yield item
def get_latest_id():
url = 'https://hacker-news.firebaseio.com/v0/maxitem.json'
response = get(url)
latest_id = int(response.text)
return latest_id
def livestream():
bo = backoff.Linear(m=2, b=5, max=60)
id = select_latest_id() or 1
# missed_loops:
# Usually, livestream assumes that `item is None` means the requested item
# id hasn't been published yet. But, if that item is actually just deleted,
# we would be stuck waiting for it forever. missed_loops is used to
# ocassionally check get_latest_id to see if new items are available, so we
# know that the current id is really just deleted.
# Items are released in small batches of < ~10 at a time. It is important
# that the number in `latest > id+XXX` is big enough that we are sure the
# requested item is really dead and not just part of a fresh batch that
# beat our check in a race condition (consider that between the last
# iteration which triggered the check and the call to get_latest_id, the
# item we were waiting for is published in a new batch). I chose 50 because
# catching up with 50 items is not a big deal.
missed_loops = 0
while True:
item = get_item(id)
if item is None:
log.debug('%s does not exist yet.', id)
missed_loops += 1
if missed_loops % 5 == 0:
latest = get_latest_id()
if latest > (id+50):
log.debug('Skipping %s because future ids exist.', id)
id += 1
continue
time.sleep(bo.next())
continue
id += 1
missed_loops = 0
bo.rewind(2)
yield item
# DATABASE #########################################################################################
def commit():
log.info('Committing.')
sql.commit()
def insert_item(data):
id = data['id']
retrieved = int(time.time())
existing = select_item(id)
if existing is None:
row = {
'id': id,
'deleted': bool(data.get('deleted', False)),
'type': data['type'],
'author': data.get('by', None),
'time': int(data['time']),
'text': data.get('text', None),
'dead': bool(data.get('dead', False)),
'parent': data.get('parent', None),
'poll': data.get('poll', None),
'url': data.get('url', None),
'score': int_or_none(data.get('score', None)),
'title': data.get('title', None),
'descendants': int_or_none(data.get('descendants', None)),
'retrieved': retrieved,
}
log.info('Inserting item %s.', id)
(qmarks, bindings) = sqlhelpers.insert_filler(ITEMS_COLUMNS, row, require_all=True)
query = f'INSERT INTO items VALUES({qmarks})'
sql.execute(query, bindings)
log.loud('Inserted item %s.', id)
else:
row = {
'id': id,
'deleted': bool(data.get('deleted', False)),
'type': data['type'],
'author': data.get('by', existing.get('author', None)),
'time': int(data['time']),
'text': data.get('text', existing.get('text', None)),
'dead': bool(data.get('dead', False)),
'parent': data.get('parent', None),
'poll': data.get('poll', existing.get('poll', None)),
'url': data.get('url', existing.get('url', None)),
'score': int_or_none(data.get('score', existing.get('score', None))),
'title': data.get('title', existing.get('title', None)),
'descendants': int_or_none(data.get('descendants', None)),
'retrieved': retrieved,
}
log.info('Updating item %s.', id)
(qmarks, bindings) = sqlhelpers.update_filler(row, where_key='id')
query = f'UPDATE items {qmarks}'
sql.execute(query, bindings)
log.loud('Updated item %s.', id)
return {'row': row, 'is_new': existing is None}
def insert_items(items, commit_period=200):
ticker = 0
for item in items:
insert_item(item)
ticker = (ticker + 1) % commit_period
if ticker == 0:
commit()
commit()
def select_item(id):
cur = sql.execute('SELECT * FROM items WHERE id == ?', [id])
row = cur.fetchone()
if row is None:
return None
item = dict(zip(ITEMS_COLUMNS, row))
return item
def select_latest_id():
cur = sql.execute('SELECT id FROM items ORDER BY id DESC LIMIT 1')
row = cur.fetchone()
if row is None:
return None
return row[0]
# COMMAND LINE #####################################################################################
DOCSTRING = '''
hnarchive.py
============
{get}
{livestream}
{update}
{update_items}
TO SEE DETAILS ON EACH COMMAND, RUN
> hnarchive.py <command> --help
'''.lstrip()
SUB_DOCSTRINGS = dict(
get='''
get:
Get items between two IDs, inclusive.
flags:
--lower id:
Lower bound item ID. If omitted, starts from 1.
--upper id:
Upper bound item ID. If omitted, ends at newest post.
--threads X:
Use X threads to download items. Default = 1 thread.
--commit_period X:
Commit the database after every X insertions. Default = 200.
'''.strip(),
livestream='''
livestream:
Watch for new items in an infinite loop.
flags:
--commit_period X:
Commit the database after every X insertions. Default = 200.
'''.strip(),
update='''
update:
Get new items, from the highest ID in the database to the present.
flags:
--threads X:
Use X threads to download items. Default = 1 thread.
--commit_period X:
Commit the database after every X insertions. Default = 200.
'''.strip(),
update_items='''
update_items:
Redownload items to update their scores, descendant counts, etc.
flags:
--days X:
Update items where the retrieval date is less than X days ahead of the
submission date.
Stories are only open for comments for 14 days, so the `descendants`
count of any story younger than 14 days should be considered volatile.
It seems the upvote button does not disappear at any age, though I
don't know whether votes on old submissions will actually count.
Regardless, votes and comments tend to solidify within a day or two
after submission so a small number should be sufficient.
--threads X:
Use X threads to download items. Default = 1 thread.
--only_mature:
If True, only update items where the submission date is more than 14
days ago. Without this, you will be updating items which are very close
to the present time, an effort which you may find wasteful.
--commit_period X:
Commit the database after every X insertions. Default = 200.
'''.strip(),
)
DOCSTRING = betterhelp.add_previews(DOCSTRING, SUB_DOCSTRINGS)
@ctrlc_commit
def get_argparse(args):
lower = args.lower or 1
upper = args.upper or get_latest_id()
ids = range(lower, upper+1)
items = get_items(ids, threads=args.threads)
insert_items(items, commit_period=args.commit_period)
return 0
@ctrlc_commit
def livestream_argparse(args):
NOTIFY_EVERY_LINE.set(True)
insert_items(livestream(), commit_period=args.commit_period)
return 0
@ctrlc_commit
def update_argparse(args):
while True:
lower = select_latest_id() or 1
upper = get_latest_id()
if lower == upper:
break
ids = range(lower, upper+1)
items = get_items(ids, threads=args.threads)
insert_items(items, commit_period=args.commit_period)
return 0
@ctrlc_commit
def update_items_argparse(args):
seconds = args.days * 86400
if args.only_mature:
then = time.time() - (86400 * 14)
query = 'SELECT id FROM items WHERE retrieved - time <= ? AND time < ?'
bindings = [seconds, then]
else:
query = 'SELECT id FROM items WHERE retrieved - time <= ?'
bindings = [seconds]
cur = sql.execute(query, bindings)
ids = cur.fetchall()
log.info('Updating %d items.', len(ids))
if not ids:
return 0
ids = [id for (id,) in ids]
items = get_items(ids, threads=args.threads)
insert_items(items, commit_period=args.commit_period)
return 0
NOTIFY_EVERY_LINE = mutables.Boolean(False)
@operatornotify.main_decorator(subject='hnarchive.py', notify_every_line=NOTIFY_EVERY_LINE)
@vlogging.main_decorator
def main(argv):
parser = argparse.ArgumentParser(description=DOCSTRING)
subparsers = parser.add_subparsers()
p_get = subparsers.add_parser('get')
p_get.add_argument('--lower', type=int, default=None)
p_get.add_argument('--upper', type=int, default=None)
p_get.add_argument('--threads', type=int, default=None)
p_get.add_argument('--commit_period', '--commit-period', type=int, default=200)
p_get.set_defaults(func=get_argparse)
p_livestream = subparsers.add_parser('livestream')
p_livestream.add_argument('--commit_period', '--commit-period', type=int, default=200)
p_livestream.set_defaults(func=livestream_argparse)
p_update = subparsers.add_parser('update')
p_update.add_argument('--threads', type=int, default=None)
p_update.add_argument('--commit_period', '--commit-period', type=int, default=200)
p_update.set_defaults(func=update_argparse)
p_update_items = subparsers.add_parser('update_items', aliases=['update-items'])
p_update_items.add_argument('--days', type=float, required=True)
p_update_items.add_argument('--threads', type=int, default=None)
p_update_items.add_argument('--only_mature', | |
from enum import Enum
import random
from time import perf_counter
from .controller import ControllerTypes
from .utils import replace_subarray
class SwitchResponses(Enum):
NO_DATA = -1
MALFORMED = -2
TOO_SHORT = -3
UNKNOWN_SUBCOMMAND = -4
REQUEST_DEVICE_INFO = 2
SET_SHIPMENT = 0x08
SPI_READ = 0x10
SET_MODE = 0x03
TRIGGER_BUTTONS = 0x04
TOGGLE_IMU = 0x40
ENABLE_VIBRATION = 0x48
SET_PLAYER = 0x30
SET_NFC_IR_STATE = 0x22
SET_NFC_IR_CONFIG = 0x21
class ControllerProtocol():
CONTROLLER_INFO = {
ControllerTypes.JOYCON_L: {
"id": 0x01,
"connection_info": 0x0E
},
ControllerTypes.JOYCON_R: {
"id": 0x02,
"connection_info": 0x0E
},
ControllerTypes.PRO_CONTROLLER: {
"id": 0x03,
"connection_info": 0x00
}
}
VIBRATOR_BYTES = [0xA0, 0xB0, 0xC0, 0x90]
def __init__(self, controller_type, bt_address, report_size=50,
colour_body=None, colour_buttons=None):
"""Initializes the protocol for the controller.
:param controller_type: The type of controller (Joy-Con (L),
Pro Controller, etc)
:type controller_type: ControllerTypes
:param bt_address: A colon-separated Bluetooth MAC address
:type bt_address: string
:param report_size: The size of the protocol report, defaults to 50
:type report_size: int, optional
:param colour_body: Sets the body colour of the controller, defaults
to None
:type colour_body: list of bytes, optional
:param colour_buttons: Sets the colour of the controller buttons,
defaults to None
:type colour_buttons: list of bytes, optional
:raises ValueError: On unknown controller type
"""
self.bt_address = bt_address
if controller_type in self.CONTROLLER_INFO.keys():
self.controller_type = controller_type
else:
raise ValueError("Unknown controller type specified")
self.report = None
self.report_size = report_size
self.set_empty_report()
# Input report mode
self.mode = None
# Player number
self.player_number = None
# Setting if the controller has been asked for device info
# Enables buttons/stick output for the standard full report
self.device_info_queried = False
# Standard Input Report Properties
# Timestamp to generate timer byte ticks
self.timer = 0
self.timestamp = None
# High/Low Nibble
self.battery_level = 0x90
self.connection_info = (
self.CONTROLLER_INFO[self.controller_type]["connection_info"])
self.button_status = [0x00] * 3
# Disable left stick if we have a right Joy-Con
if self.controller_type == ControllerTypes.JOYCON_R:
self.left_stick_centre = [0x00] * 3
else:
# Center values which are also reported under
# SPI Stick calibration reads
self.left_stick_centre = [0x6F, 0xC8, 0x77]
# Disable right stick if we have a left Joy-Con
if self.controller_type == ControllerTypes.JOYCON_L:
self.right_stick_centre = [0x00] * 3
else:
# Center values which are also reported under
# SPI Stick calibration reads
self.right_stick_centre = [0x16, 0xD8, 0x7D]
self.vibration_enabled = False
self.vibrator_report = random.choice(self.VIBRATOR_BYTES)
# IMU (Six Axis Sensor) State
self.imu_enabled = False
# Controller colours
# Body Colour
if not colour_body:
self.colour_body = [0x82] * 3
else:
self.colour_body = colour_body
if not colour_buttons:
self.colour_buttons = [0x0F] * 3
else:
self.colour_buttons = colour_buttons
def get_report(self):
report = bytes(self.report)
# Clear report
self.set_empty_report()
return report
def process_commands(self, data):
# Parsing the Switch's message
message = SwitchReportParser(data)
# Responding to the parsed message
if message.response == SwitchResponses.REQUEST_DEVICE_INFO:
self.device_info_queried = True
self.set_subcommand_reply()
self.set_device_info()
elif message.response == SwitchResponses.SET_SHIPMENT:
self.set_subcommand_reply()
self.set_shipment()
elif message.response == SwitchResponses.SPI_READ:
self.set_subcommand_reply()
self.spi_read(message)
elif message.response == SwitchResponses.SET_MODE:
self.set_subcommand_reply()
self.set_mode(message)
elif message.response == SwitchResponses.TRIGGER_BUTTONS:
self.set_subcommand_reply()
self.set_trigger_buttons()
elif message.response == SwitchResponses.TOGGLE_IMU:
self.set_subcommand_reply()
self.toggle_imu(message)
elif message.response == SwitchResponses.ENABLE_VIBRATION:
self.set_subcommand_reply()
self.enable_vibration()
elif message.response == SwitchResponses.SET_PLAYER:
self.set_subcommand_reply()
self.set_player_lights(message)
elif message.response == SwitchResponses.SET_NFC_IR_STATE:
self.set_subcommand_reply()
self.set_nfc_ir_state()
elif message.response == SwitchResponses.SET_NFC_IR_CONFIG:
self.set_subcommand_reply()
self.set_nfc_ir_config()
# Bad Packet handling statements
elif message.response == SwitchResponses.UNKNOWN_SUBCOMMAND:
# Currently set so that the controller ignores any unknown
# subcommands. This is better than sending a NACK response
# since we'd just get stuck in an infinite loop arguing
# with the Switch.
self.set_full_input_report()
elif message.response == SwitchResponses.NO_DATA:
self.set_full_input_report()
elif message.response == SwitchResponses.TOO_SHORT:
self.set_full_input_report()
elif message.response == SwitchResponses.MALFORMED:
self.set_full_input_report()
def set_empty_report(self):
empty_report = [0] * self.report_size
empty_report[0] = 0xA1
self.report = empty_report
def set_subcommand_reply(self):
# Input Report ID
self.report[1] = 0x21
# TODO: Find out what the vibrator byte is doing.
# This is a hack in an attempt to semi-emulate
# actions of the vibrator byte as it seems to change
# when a subcommand reply is sent.
self.vibrator_report = random.choice(self.VIBRATOR_BYTES)
self.set_standard_input_report()
def set_unknown_subcommand(self, subcommand_id):
# Set NACK
self.report[14]
# Set unknown subcommand ID
self.report[15] = subcommand_id
def set_timer(self):
# If the timer hasn't been set before
if not self.timestamp:
self.timestamp = perf_counter()
self.report[2] = 0x00
return
# Get the time that has passed since the last timestamp
# in milliseconds
now = perf_counter()
delta_t = (now - self.timestamp) * 1000
# Get how many ticks have passed in hex with overflow at 255
# Joy-Con uses 4.96ms as the timer tick rate
elapsed_ticks = int(delta_t * 4)
self.timer = (self.timer + elapsed_ticks) & 0xFF
self.report[2] = self.timer
self.timestamp = now
def set_full_input_report(self):
# Setting Report ID to full standard input report ID
self.report[1] = 0x30
self.set_standard_input_report()
self.set_imu_data()
def set_standard_input_report(self):
self.set_timer()
if self.device_info_queried:
self.report[3] = self.battery_level + self.connection_info
self.report[4] = self.button_status[0]
self.report[5] = self.button_status[1]
self.report[6] = self.button_status[2]
self.report[7] = self.left_stick_centre[0]
self.report[8] = self.left_stick_centre[1]
self.report[9] = self.left_stick_centre[2]
self.report[10] = self.right_stick_centre[0]
self.report[11] = self.right_stick_centre[1]
self.report[12] = self.right_stick_centre[2]
self.report[13] = self.vibrator_report
def set_button_inputs(self, upper, shared, lower):
self.report[4] = upper
self.report[5] = shared
self.report[6] = lower
def set_left_stick_inputs(self, left):
self.report[7] = left[0]
self.report[8] = left[1]
self.report[9] = left[2]
def set_right_stick_inputs(self, right):
self.report[10] = right[0]
self.report[11] = right[1]
self.report[12] = right[2]
def set_device_info(self):
# ACK Reply
self.report[14] = 0x82
# Subcommand Reply
self.report[15] = 0x02
# Firmware version
self.report[16] = 0x03
self.report[17] = 0x8B
# Controller ID
self.report[18] = self.CONTROLLER_INFO[self.controller_type]["id"]
# Unknown Byte, always 2
self.report[19] = 0x02
# Controller Bluetooth Address
address = self.bt_address.strip().split(":") # Getting from adapter
address_location = 20
for address_byte_str in address:
# Converting string address bytes to hex
# and assigning to report
address_byte = int(address_byte_str, 16)
self.report[address_location] = address_byte
address_location += 1
# Unknown byte, always 1
self.report[26] = 0x01
# Controller colours location (read from SPI)
self.report[27] = 0x01
def set_shipment(self):
# ACK Reply
self.report[14] = 0x80
# Subcommand reply
self.report[15] = 0x08
def toggle_imu(self, message):
if message.subcommand[1] == 0x01:
self.imu_enabled = True
else:
self.imu_enabled = False
# ACK Reply
self.report[14] = 0x80
# Subcommand reply
self.report[15] = 0x40
def set_imu_data(self):
if not self.imu_enabled:
return
imu_data = [0x75, 0xFD, 0xFD, 0xFF, 0x09, 0x10, 0x21, 0x00, 0xD5, 0xFF,
0xE0, 0xFF, 0x72, 0xFD, 0xF9, 0xFF, 0x0A, 0x10, 0x22, 0x00,
0xD5, 0xFF, 0xE0, 0xFF, 0x76, 0xFD, 0xFC, 0xFF, 0x09, 0x10,
0x23, 0x00, 0xD5, 0xFF, 0xE0, 0xFF]
replace_subarray(self.report, 14, 49, replace_arr=imu_data)
def spi_read(self, message):
addr_top = message.subcommand[2]
addr_bottom = message.subcommand[1]
read_length = message.subcommand[5]
# ACK byte
self.report[14] = 0x90
# Subcommand reply
self.report[15] = 0x10
# Read address
self.report[16] = addr_bottom
self.report[17] = addr_top
# Read length
self.report[20] = read_length
# Stick Parameters
# Params are generally the same for all sticks
# Notable difference is the deadzone (10% Joy-Con vs 15% Pro Con)
params = [0x0F, 0x30, 0x61, # Unused
0x96, 0x30, 0xF3, # Dead Zone/Range Ratio
0xD4, 0x14, 0x54, # X/Y ?
0x41, 0x15, 0x54, # X/Y ?
0xC7, 0x79, 0x9C, # X/Y ?
0x33, 0x36, 0x63] # X/Y ?
# Adjusting deadzone for Joy-Cons
if not self.controller_type == ControllerTypes.PRO_CONTROLLER:
params[3] = 0xAE
# Serial Number read
if addr_top == 0x60 and addr_bottom == 0x00:
# Switch will take this as no serial number
replace_subarray(self.report, 21, 16, 0xFF)
# Colours
elif addr_top == 0x60 and addr_bottom == 0x50:
# Body colour
replace_subarray(
self.report, 21, 3,
replace_arr=self.colour_body)
# Buttons colour
replace_subarray(
self.report, 24, 3,
replace_arr=self.colour_buttons)
# Left/right grip colours (Pro controller)
replace_subarray(self.report, 27, 7, 0xFF)
# Factory sensor/stick device parameters
elif addr_top == 0x60 and addr_bottom == 0x80:
# Six-Axis factory parameters
if self.controller_type == ControllerTypes.PRO_CONTROLLER:
self.report[21] = 0x50
self.report[22] = 0xFD
self.report[23] = 0x00
self.report[24] = 0x00
self.report[25] = 0xC6
self.report[26] = 0x0F
else:
self.report[21] = 0x5E
self.report[22] = 0x01
self.report[23] = 0x00
self.report[24] = 0x00
if self.controller_type == ControllerTypes.JOYCON_L:
self.report[25] = 0xF1
self.report[26] = 0x0F
else:
self.report[25] = 0x0F
self.report[26] = 0xF0
replace_subarray(self.report, 27, 18, replace_arr=params)
# Stick device parameters 2
elif addr_top == 0x60 and addr_bottom == 0x98:
# Setting same params since controllers always
# have duplicates of stick params 1 for stick params 2
replace_subarray(self.report, 21, 18, replace_arr=params)
# User analog stick calibration
elif addr_top == | |
dist.write(' It is not required for loading .wav, .ogg or .opus files, which Panda3D can read out of the box.">\n')
dist.write(' <pkg-ref id="org.panda3d.panda3d.ffmpeg.pkg"/>\n')
dist.write(' </choice>\n')
#if not PkgSkip("OPENAL"):
# dist.write(' <choice id="openal" title="OpenAL Audio Plug-In" tooltip="OpenAL audio output plug-in" description="This package contains the OpenAL audio plug-in, which is an open-source library for playing sounds.">\n')
# dist.write(' <pkg-ref id="org.panda3d.panda3d.openal.pkg"/>\n')
# dist.write(' </choice>\n')
if not PkgSkip("FMODEX"):
dist.write(' <choice id="fmodex" title="FMOD Ex Plug-In" tooltip="FMOD Ex audio output plug-in" description="This package contains the FMOD Ex audio plug-in, which is a commercial library for playing sounds. It is an optional component as Panda3D can use the open-source alternative OpenAL instead.">\n')
dist.write(' <pkg-ref id="org.panda3d.panda3d.fmodex.pkg"/>\n')
dist.write(' </choice>\n')
if os.path.isdir("samples"):
dist.write(' <choice id="samples" title="Sample Programs" tooltip="Python sample programs that use Panda3D" description="This package contains the Python sample programs that can help you with learning how to use Panda3D. Location: /Developer/Examples/Panda3D/">\n')
dist.write(' <pkg-ref id="org.panda3d.panda3d.samples.pkg"/>\n')
dist.write(' </choice>\n')
dist.write(' <choice id="headers" title="C++ Header Files" tooltip="Header files for C++ development with Panda3D" description="This package contains the C++ header files that are needed in order to do C++ development with Panda3D. You don\'t need this if you want to develop in Python. Location: /Developer/Panda3D/include/" start_selected="false">\n')
dist.write(' <pkg-ref id="org.panda3d.panda3d.headers.pkg"/>\n')
dist.write(' </choice>\n')
for pkg in pkgs:
size = GetDirectorySize("dstroot/" + pkg) // 1024
dist.write(' <pkg-ref id="org.panda3d.panda3d.%s.pkg" installKBytes="%d" version="1" auth="Root">file:./Contents/Packages/%s.pkg</pkg-ref>\n' % (pkg, size, pkg))
dist.write('</installer-script>\n')
dist.close()
oscmd('hdiutil create Panda3D-rw.dmg -volname "Panda3D SDK %s" -srcfolder dstroot/Panda3D' % (version))
oscmd('hdiutil convert Panda3D-rw.dmg -format UDBZ -o %s' % (dmg_name))
oscmd('rm -f Panda3D-rw.dmg')
def MakeInstallerFreeBSD(version, runtime=False, **kwargs):
outputdir = GetOutputDir()
oscmd("rm -rf targetroot +DESC pkg-plist +MANIFEST")
oscmd("mkdir targetroot")
# Invoke installpanda.py to install it into a temporary dir
if runtime:
InstallRuntime(destdir="targetroot", prefix="/usr/local", outputdir=outputdir)
else:
InstallPanda(destdir="targetroot", prefix="/usr/local", outputdir=outputdir)
if not os.path.exists("/usr/sbin/pkg"):
exit("Cannot create an installer without pkg")
plist_txt = ''
for root, dirs, files in os.walk("targetroot/usr/local/", True):
for f in files:
plist_txt += os.path.join(root, f)[21:] + "\n"
if not runtime:
plist_txt += "@postexec /sbin/ldconfig -m /usr/local/lib/panda3d\n"
plist_txt += "@postunexec /sbin/ldconfig -R\n"
for remdir in ("lib/panda3d", "share/panda3d", "include/panda3d"):
for root, dirs, files in os.walk("targetroot/usr/local/" + remdir, False):
for d in dirs:
plist_txt += "@dir %s\n" % os.path.join(root, d)[21:]
plist_txt += "@dir %s\n" % remdir
oscmd("echo \"`pkg config abi | tr '[:upper:]' '[:lower:]' | cut -d: -f1,2`:*\" > " + outputdir + "/tmp/architecture.txt")
pkg_arch = ReadFile(outputdir+"/tmp/architecture.txt").strip()
dependencies = ''
if not PkgSkip("PYTHON"):
# If this version of Python was installed from a package or ports, let's mark it as dependency.
oscmd("rm -f %s/tmp/python_dep" % outputdir)
if "PYTHONVERSION" in SDK:
pyver_nodot = SDK["PYTHONVERSION"][6:9:2]
else:
pyver_nodot = "%d%d" % (sys.version_info[:2])
oscmd("pkg query \"\n\t%%n : {\n\t\torigin : %%o,\n\t\tversion : %%v\n\t},\n\" python%s > %s/tmp/python_dep" % (pyver_nodot, outputdir), True)
if os.path.isfile(outputdir + "/tmp/python_dep"):
python_pkg = ReadFile(outputdir + "/tmp/python_dep")
if python_pkg:
dependencies += python_pkg
manifest_txt = INSTALLER_PKG_MANIFEST_FILE[1:].replace("NAME", 'panda3d' if not runtime else 'panda3d-runtime')
manifest_txt = manifest_txt.replace("VERSION", version)
manifest_txt = manifest_txt.replace("ARCH", pkg_arch)
manifest_txt = manifest_txt.replace("ORIGIN", 'devel/panda3d' if not runtime else 'graphics/panda3d-runtime')
manifest_txt = manifest_txt.replace("DEPENDS", dependencies)
manifest_txt = manifest_txt.replace("INSTSIZE", str(GetDirectorySize("targetroot") / 1024 / 1024))
WriteFile("pkg-plist", plist_txt)
WriteFile("+DESC", INSTALLER_PKG_DESCR_FILE[1:] if not runtime else RUNTIME_INSTALLER_PKG_DESCR_FILE[1:])
WriteFile("+MANIFEST", manifest_txt)
oscmd("pkg create -p pkg-plist -r %s -m . -o . %s" % (os.path.abspath("targetroot"), "--verbose" if GetVerbose() else "--quiet"))
def MakeInstallerAndroid(version, **kwargs):
outputdir = GetOutputDir()
oscmd("rm -rf apkroot")
oscmd("mkdir apkroot")
# Also remove the temporary apks.
apk_unaligned = os.path.join(outputdir, "tmp", "panda3d-unaligned.apk")
apk_unsigned = os.path.join(outputdir, "tmp", "panda3d-unsigned.apk")
if os.path.exists(apk_unaligned):
os.unlink(apk_unaligned)
if os.path.exists(apk_unsigned):
os.unlink(apk_unsigned)
# Compile the Java classes into a Dalvik executable.
dx_cmd = "dx --dex --output=apkroot/classes.dex "
if GetOptimize() <= 2:
dx_cmd += "--debug "
if GetVerbose():
dx_cmd += "--verbose "
if "ANDROID_API" in SDK:
dx_cmd += "--min-sdk-version=%d " % (SDK["ANDROID_API"])
dx_cmd += os.path.join(outputdir, "classes")
oscmd(dx_cmd)
# Copy the libraries one by one. In case of library dependencies, strip
# off any suffix (eg. libfile.so.1.0), as Android does not support them.
source_dir = os.path.join(outputdir, "lib")
target_dir = os.path.join("apkroot", "lib", SDK["ANDROID_ABI"])
oscmd("mkdir -p %s" % (target_dir))
# Determine the library directories we should look in.
libpath = [source_dir]
for dir in os.environ.get("LD_LIBRARY_PATH", "").split(':'):
dir = os.path.expandvars(dir)
dir = os.path.expanduser(dir)
if os.path.isdir(dir):
dir = os.path.realpath(dir)
if not dir.startswith("/system") and not dir.startswith("/vendor"):
libpath.append(dir)
def copy_library(source, base):
# Copy file to destination, stripping version suffix.
target = os.path.join(target_dir, base)
if not target.endswith('.so'):
target = target.rpartition('.so.')[0] + '.so'
if os.path.isfile(target):
# Already processed.
return
oscmd("cp %s %s" % (source, target))
# Walk through the library dependencies.
oscmd("ldd %s | grep .so > %s/tmp/otool-libs.txt" % (target, outputdir), True)
for line in open(outputdir + "/tmp/otool-libs.txt", "r"):
line = line.strip()
if not line:
continue
if '.so.' in line:
dep = line.rpartition('.so.')[0] + '.so'
oscmd("patchelf --replace-needed %s %s %s" % (line, dep, target), True)
else:
dep = line
# Find it on the LD_LIBRARY_PATH.
for dir in libpath:
fulldep = os.path.join(dir, dep)
if os.path.isfile(fulldep):
copy_library(os.path.realpath(fulldep), dep)
break
# Now copy every lib in the lib dir, and its dependencies.
for base in os.listdir(source_dir):
if not base.startswith('lib'):
continue
if not base.endswith('.so') and '.so.' not in base:
continue
source = os.path.join(source_dir, base)
if os.path.islink(source):
continue
copy_library(source, base)
# Same for Python extension modules. However, Android is strict about
# library naming, so we have a special naming scheme for these, in
# conjunction with a custom import hook to find these modules.
if not PkgSkip("PYTHON"):
suffix = GetExtensionSuffix()
source_dir = os.path.join(outputdir, "panda3d")
for base in os.listdir(source_dir):
if not base.endswith(suffix):
continue
modname = base[:-len(suffix)]
if '.' not in modname:
source = os.path.join(source_dir, base)
copy_library(source, "libpy.panda3d.{}.so".format(modname))
# Same for standard Python modules.
import _ctypes
source_dir = os.path.dirname(_ctypes.__file__)
for base in os.listdir(source_dir):
if not base.endswith('.so'):
continue
modname = base.partition('.')[0]
source = os.path.join(source_dir, base)
copy_library(source, "libpy.{}.so".format(modname))
def copy_python_tree(source_root, target_root):
for source_dir, dirs, files in os.walk(source_root):
if 'site-packages' in dirs:
dirs.remove('site-packages')
if not any(base.endswith('.py') for base in files):
continue
target_dir = os.path.join(target_root, os.path.relpath(source_dir, source_root))
target_dir = os.path.normpath(target_dir)
os.makedirs(target_dir, 0o755)
for base in files:
if base.endswith('.py'):
target = os.path.join(target_dir, base)
shutil.copy(os.path.join(source_dir, base), target)
# Copy the Python standard library to the .apk as well.
from distutils.sysconfig import get_python_lib
stdlib_source = get_python_lib(False, True)
stdlib_target = os.path.join("apkroot", "lib", "python{0}.{1}".format(*sys.version_info))
copy_python_tree(stdlib_source, stdlib_target)
# But also copy over our custom site.py.
shutil.copy("panda/src/android/site.py", os.path.join(stdlib_target, "site.py"))
# And now make a site-packages directory containing our direct/panda3d/pandac modules.
for tree in "panda3d", "direct", "pandac":
copy_python_tree(os.path.join(outputdir, tree), os.path.join(stdlib_target, "site-packages", tree))
# Copy the models and config files to the virtual assets filesystem.
oscmd("mkdir apkroot/assets")
oscmd("cp -R %s apkroot/assets/models" % (os.path.join(outputdir, "models")))
oscmd("cp -R %s apkroot/assets/etc" % (os.path.join(outputdir, "etc")))
# Make an empty res folder. It's needed for the apk to be installable, apparently.
oscmd("mkdir apkroot/res")
# Now package up the application
oscmd("cp panda/src/android/pview_manifest.xml apkroot/AndroidManifest.xml")
aapt_cmd = "aapt package"
aapt_cmd += " -F %s" % (apk_unaligned)
aapt_cmd += " -M apkroot/AndroidManifest.xml"
aapt_cmd += " -A apkroot/assets -S apkroot/res"
aapt_cmd += " -I $PREFIX/share/aapt/android.jar"
oscmd(aapt_cmd)
# And add all the libraries to it.
oscmd("cd apkroot && aapt add ../%s classes.dex" % (apk_unaligned))
for path, dirs, files in os.walk('apkroot/lib'):
if files:
rel = os.path.relpath(path, 'apkroot')
oscmd("cd apkroot && aapt add ../%s %s/*" % (apk_unaligned, rel))
# Now align the .apk, which is necessary for Android to load it.
oscmd("zipalign -v -p 4 %s %s" % (apk_unaligned, apk_unsigned))
# Finally, sign it using a debug key. This is generated if it doesn't exist.
oscmd("apksigner debug.ks %s panda3d.apk" % (apk_unsigned))
# Clean up.
oscmd("rm -rf apkroot")
os.unlink(apk_unaligned)
os.unlink(apk_unsigned)
def MakeInstaller(version, **kwargs):
target = GetTarget()
if target == 'windows':
fn = "Panda3D-"
dir = "Panda3D-" + version
runtime = kwargs.get('runtime', False)
if runtime:
fn += "Runtime-"
title = "Panda3D " + version
else:
title = "Panda3D SDK " + version
fn += version
if "PYTHONVERSION" in SDK:
pyver = SDK["PYTHONVERSION"][6:9]
else:
pyver = "%d.%d" % (sys.version_info[:2])
if not runtime and pyver != "2.7":
fn += '-py' + pyver
if GetOptimize() <= 2:
fn += "-dbg"
if GetTargetArch() == 'x64':
fn += '-x64'
dir += '-x64'
MakeInstallerNSIS(version, fn + '.exe', title, 'C:\\' + dir, **kwargs)
if not runtime:
MakeDebugSymbolArchive(fn + '-pdb.zip', dir)
| |
== 2:
if ExternalEncoding:
encoding = ExternalEncoding
else:
encoding = 'utf-8'
return instring.encode(encoding)
else:
return instring
@staticmethod
def convert_unicode(instring):
if isinstance(instring, str):
result = quote_xml(instring)
elif sys.version_info.major == 2 and isinstance(instring, unicode):
result = quote_xml(instring).encode('utf8')
else:
result = GeneratedsSuper.gds_encode(str(instring))
return result
def __eq__(self, other):
if type(self) != type(other):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def getSubclassFromModule_(module, class_):
'''Get the subclass of a class from a specific module.'''
name = class_.__name__ + 'Sub'
if hasattr(module, name):
return getattr(module, name)
else:
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = ''
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
CDATA_pattern_ = re_.compile(r"<!\[CDATA\[.*?\]\]>", re_.DOTALL)
# Change this to redirect the generated superclass module to use a
# specific subclass module.
CurrentSubclassModule_ = None
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
"Escape markup chars, but do not modify CDATA sections."
if not inStr:
return ''
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s2 = ''
pos = 0
matchobjects = CDATA_pattern_.finditer(s1)
for mo in matchobjects:
s3 = s1[pos:mo.start()]
s2 += quote_xml_aux(s3)
s2 += s1[mo.start():mo.end()]
pos = mo.end()
s3 = s1[pos:]
s2 += quote_xml_aux(s3)
return s2
def quote_xml_aux(inStr):
s1 = inStr.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace,
pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(
outfile, level, namespace, name_=name,
pretty_print=pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name,
base64.b64encode(self.value),
self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(
element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0,
optional=0, child_attrs=None, choice=None):
self.name = name
self.data_type = data_type
self.container = container
self.child_attrs = child_attrs
self.choice = choice
self.optional = optional
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def set_child_attrs(self, child_attrs): self.child_attrs = child_attrs
def get_child_attrs(self): return self.child_attrs
def set_choice(self, choice): self.choice = choice
def get_choice(self): return self.choice
def set_optional(self, optional): self.optional = optional
def get_optional(self): return self.optional
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class IntlRateV2Request(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, USERID=None, Revision=None, Package=None, **kwargs_):
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.USERID = _cast(None, USERID)
self.Revision = Revision
if Package is None:
self.Package = []
else:
self.Package = Package
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, IntlRateV2Request)
if subclass is not None:
return subclass(*args_, **kwargs_)
if IntlRateV2Request.subclass:
return IntlRateV2Request.subclass(*args_, **kwargs_)
else:
return IntlRateV2Request(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Revision(self):
return self.Revision
def set_Revision(self, Revision):
self.Revision = Revision
def get_Package(self):
return self.Package
def set_Package(self, Package):
self.Package = Package
def add_Package(self, value):
self.Package.append(value)
def add_Package(self, value):
self.Package.append(value)
def insert_Package_at(self, index, value):
self.Package.insert(index, value)
def replace_Package_at(self, index, value):
self.Package[index] = value
def get_USERID(self):
return self.USERID
def set_USERID(self, USERID):
self.USERID = USERID
def hasContent_(self):
if (
self.Revision is not None or
self.Package
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='IntlRateV2Request', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('IntlRateV2Request')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='IntlRateV2Request')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='IntlRateV2Request', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='IntlRateV2Request'):
if self.USERID is not None and 'USERID' not in already_processed:
already_processed.add('USERID')
outfile.write(' USERID=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.USERID), input_name='USERID')), ))
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='IntlRateV2Request', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Revision is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sRevision>%s</%sRevision>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.Revision), input_name='Revision')), namespaceprefix_ , eol_))
for Package_ in self.Package:
Package_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Package', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('USERID', node)
if value is not None and 'USERID' not in already_processed:
already_processed.add('USERID')
self.USERID = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if | |
180, 270):
raise ValueError("bad rotate value")
r = Rect(rect)
if r.isEmpty or r.isInfinite:
raise ValueError("rect must be finite and not empty")
_imgpointer = None
# -------------------------------------------------------------------------
# Calculate the matrix for image insertion.
# -------------------------------------------------------------------------
# If aspect ratio must be kept, we need to know image width and height.
# Easy for pixmaps. For file and stream cases, we make an fz_image and
# take those values from it. In this case, we also hand the fz_image over
# to the actual C-level function (_imgpointer), and set all other
# parameters to None.
# -------------------------------------------------------------------------
if keep_proportion is True: # for this we need the image dimension
if pixmap: # this is the easy case
w = pixmap.width
h = pixmap.height
digest = calc_hash(pixmap.samples)
elif stream: # use tool to access the information
# we also pass through the generated fz_image address
if type(stream) is io.BytesIO:
stream = stream.getvalue()
img_prof = TOOLS.image_profile(stream, keep_image=True)
w, h = img_prof["width"], img_prof["height"]
digest = calc_hash(stream)
stream = None # make sure this arg is NOT used
_imgpointer = img_prof["image"] # pointer to fz_image
else: # worst case: must read the file
stream = open(filename, "rb").read()
digest = calc_hash(stream)
img_prof = TOOLS.image_profile(stream, keep_image=True)
w, h = img_prof["width"], img_prof["height"]
stream = None # make sure this arg is NOT used
filename = None # make sure this arg is NOT used
_imgpointer = img_prof["image"] # pointer to fz_image
maxf = max(w, h)
fw = w / maxf
fh = h / maxf
else:
fw = fh = 1.0
clip = r * ~page.transformationMatrix # target rect in PDF coordinates
matrix = calc_matrix(fw, fh, clip, rotate=rotate) # calculate matrix
# Create a unique image reference name.
ilst = [i[7] for i in doc.getPageImageList(page.number)]
n = "Im" # 'fitz image'
i = 0
_imgname = n + "0" # first name candidate
while _imgname in ilst:
i += 1
_imgname = n + str(i) # try new name
xref = doc.InsertedImages.get(digest, 0) # reuse any previously inserted image
xref = page._insertImage(
filename=filename, # image in file
pixmap=pixmap, # image in pixmap
stream=stream, # image in memory
imask=mask,
matrix=matrix, # generated matrix
overlay=overlay,
oc=oc, # optional content object
xref=xref,
_imgname=_imgname, # generated PDF resource name
_imgpointer=_imgpointer, # address of fz_image
)
if xref > 0:
doc.InsertedImages[digest] = xref
def searchFor(*args, **kwargs) -> list:
"""Search for a string on a page.
Args:
text: string to be searched for
clip: restrict search to this rectangle
quads: (bool) return quads instead of rectangles
flags: bit switches, default: join hyphened words
Returns:
a list of rectangles or quads, each containing one occurrence.
"""
if len(args) != 2:
raise ValueError("bad number of positional parameters")
page, text = args
quads = kwargs.get("quads", 0)
clip = kwargs.get("clip")
flags = kwargs.get("flags", TEXT_DEHYPHENATE)
CheckParent(page)
if flags is None:
flags = TEXT_DEHYPHENATE
tp = page.getTextPage(clip=clip, flags=flags) # create TextPage
rlist = tp.search(text, quads=quads)
tp = None
return rlist
def searchPageFor(
doc: Document,
pno: int,
text: str,
hit_max: int = 0,
quads: bool = False,
clip: rect_like = None,
flags: int = TEXT_DEHYPHENATE,
) -> list:
"""Search for a string on a page.
Args:
pno: page number
text: string to be searched for
clip: restrict search to this rectangle
quads: (bool) return quads instead of rectangles
flags: bit switches, default: join hyphened words
Returns:
a list of rectangles or quads, each containing an occurrence.
"""
return doc[pno].searchFor(
text,
quads=quads,
clip=clip,
flags=flags,
)
def getTextBlocks(
page: Page,
clip: rect_like = None,
flags: OptInt = None,
) -> list:
"""Return the text blocks on a page.
Notes:
Lines in a block are concatenated with line breaks.
Args:
flags: (int) control the amount of data parsed into the textpage.
Returns:
A list of the blocks. Each item contains the containing rectangle
coordinates, text lines, block type and running block number.
"""
CheckParent(page)
if flags is None:
flags = TEXT_PRESERVE_WHITESPACE + TEXT_PRESERVE_IMAGES
tp = page.getTextPage(clip=clip, flags=flags)
blocks = tp.extractBLOCKS()
del tp
return blocks
def getTextWords(
page: Page,
clip: rect_like = None,
flags: OptInt = None,
) -> list:
"""Return the text words as a list with the bbox for each word.
Args:
flags: (int) control the amount of data parsed into the textpage.
"""
CheckParent(page)
if flags is None:
flags = TEXT_PRESERVE_WHITESPACE
tp = page.getTextPage(clip=clip, flags=flags)
words = tp.extractWORDS()
del tp
return words
def getTextbox(
page: Page,
rect: rect_like,
) -> str:
rc = page.getText("text", clip=rect, flags=0)
if rc.endswith("\n"):
rc = rc[:-1]
return rc
def getTextSelection(
page: Page,
p1: point_like,
p2: point_like,
clip: rect_like = None,
):
CheckParent(page)
tp = page.getTextPage(clip=clip, flags=TEXT_DEHYPHENATE)
rc = tp.extractSelection(p1, p2)
del tp
return rc
def getText(
page: Page,
option: str = "text",
clip: rect_like = None,
flags: OptInt = None,
):
"""Extract text from a page or an annotation.
This is a unifying wrapper for various methods of the TextPage class.
Args:
option: (str) text, words, blocks, html, dict, json, rawdict, xhtml or xml.
clip: (rect-like) restrict output to this area.
flags: bitfield to e.g. exclude images.
Returns:
the output of methods getTextWords / getTextBlocks or TextPage
methods extractText, extractHTML, extractDICT, extractJSON, extractRAWDICT,
extractXHTML or etractXML respectively.
Default and misspelling choice is "text".
"""
formats = {
"text": 0,
"html": 1,
"json": 1,
"rawjson": 1,
"xml": 0,
"xhtml": 1,
"dict": 1,
"rawdict": 1,
"words": 0,
"blocks": 1,
}
option = option.lower()
if option not in formats:
option = "text"
if flags is None:
flags = TEXT_PRESERVE_WHITESPACE
if formats[option] == 1:
flags += TEXT_PRESERVE_IMAGES
if option == "words":
return getTextWords(page, clip=clip, flags=flags)
if option == "blocks":
return getTextBlocks(page, clip=clip, flags=flags)
CheckParent(page)
if clip != None:
clip = Rect(clip)
tp = page.getTextPage(clip=clip, flags=flags) # TextPage with or without images
if option == "json":
t = tp.extractJSON()
elif option == "rawjson":
t = tp.extractRAWJSON()
elif option == "dict":
t = tp.extractDICT()
elif option == "rawdict":
t = tp.extractRAWDICT()
elif option == "html":
t = tp.extractHTML()
elif option == "xml":
t = tp.extractXML()
elif option == "xhtml":
t = tp.extractXHTML()
else:
t = tp.extractText()
del tp
return t
def getPageText(
doc: Document,
pno: int,
option: str = "text",
clip: rect_like = None,
flags: OptInt = None,
) -> typing.Any:
"""Extract a document page's text by page number.
Notes:
Convenience function calling page.getText().
Args:
pno: page number
option: (str) text, words, blocks, html, dict, json, rawdict, xhtml or xml.
Returns:
output from page.TextPage().
"""
return doc[pno].getText(option, clip=clip, flags=flags)
def getPixmap(page: Page, **kw) -> Pixmap:
"""Create pixmap of page.
Args:
matrix: Matrix for transformation (default: Identity).
colorspace: (str/Colorspace) cmyk, rgb, gray - case ignored, default csRGB.
clip: (irect-like) restrict rendering to this area.
alpha: (bool) whether to include alpha channel
annots: (bool) whether to also render annotations
"""
CheckParent(page)
matrix = kw.get("matrix", Identity)
colorspace = kw.get("colorspace", csRGB)
clip = kw.get("clip")
alpha = bool(kw.get("alpha", False))
annots = bool(kw.get("annots", True))
if type(colorspace) is str:
if colorspace.upper() == "GRAY":
colorspace = csGRAY
elif colorspace.upper() == "CMYK":
colorspace = csCMYK
else:
colorspace = csRGB
if colorspace.n not in (1, 3, 4):
raise ValueError("unsupported colorspace")
dl = page.getDisplayList(annots=annots)
pix = dl.getPixmap(matrix=matrix, colorspace=colorspace, alpha=alpha, clip=clip)
dl = None
return pix
# doc = page.parent
# return page._makePixmap(doc, matrix, colorspace, alpha, annots, clip)
def getPagePixmap(
doc: Document,
pno: int,
matrix: matrix_like = Identity,
colorspace: Colorspace = csRGB,
clip: rect_like = None,
alpha: bool = False,
annots: bool = True,
) -> Pixmap:
"""Create pixmap of document page by page number.
Notes:
Convenience function calling page.getPixmap.
Args:
pno: (int) page number
matrix: Matrix for transformation (default: Identity).
colorspace: (str,Colorspace) rgb, rgb, gray - case ignored, default csRGB.
clip: (irect-like) restrict rendering to this area.
alpha: (bool) include alpha channel
annots: (bool) also render annotations
"""
return doc[pno].getPixmap(
matrix=matrix, colorspace=colorspace, clip=clip, alpha=alpha, annots=annots
)
def getLinkDict(ln) -> dict:
nl = {"kind": ln.dest.kind, "xref": 0}
try:
nl["from"] = ln.rect
except:
pass
pnt = Point(0, 0)
if ln.dest.flags & LINK_FLAG_L_VALID:
pnt.x = ln.dest.lt.x
if ln.dest.flags & LINK_FLAG_T_VALID:
pnt.y = ln.dest.lt.y
if ln.dest.kind == | |
"""
Utility functions for QS files.
"""
import traceback
import re
import math
import sys
import threading
import os
from PyQt6 import QtCore
from pineboolib.application import types, qsadictmodules
from pineboolib.core.utils import utils_base, logging
from pineboolib import application
from typing import (
Any,
Optional,
Union,
Match,
List,
Generator,
Callable,
Iterable,
Dict,
TYPE_CHECKING,
)
if TYPE_CHECKING:
from sqlalchemy.engine import ( # type: ignore [import] # noqa: F401, F821
base,
) # pragma: no cover
from pineboolib.interfaces import isession # pragma: no cover
LOGGER = logging.get_logger(__name__)
TIMERS: List[QtCore.QTimer] = []
class Switch(object):
"""
Switch emulation class.
from: http://code.activestate.com/recipes/410692/
This class provides the functionality we want. You only need to look at
this if you want to know how this works. It only needs to be defined
once, no need to muck around with its internals.
"""
def __init__(self, value: Any):
"""Construct new witch from initial value."""
self.value = value
self.fall = False
def __iter__(self) -> Generator:
"""Return the match method once, then stop."""
yield self.match
def match(self, *args: List[Any]) -> bool:
"""Indicate whether or not to enter a case suite."""
if self.fall or not args:
return True
elif self.value in args:
self.fall = True
return True
else:
return False
class QsaRegExp(object):
"""
Regexp emulation class.
"""
result_: Optional[Match[str]]
def __init__(self, str_re: str, is_global: bool = False):
"""Create new regexp."""
self.str_re = str_re
self.pattern = re.compile(self.str_re)
self.is_global = is_global
self.result_ = None
def search(self, text: str) -> Optional[Match[str]]:
"""Return Match from search."""
self.result_ = None
if self.pattern is not None:
self.result_ = self.pattern.search(text)
return self.result_
def replace(self, target: str, new_value: str) -> str:
"""Replace string using regex."""
count = 1 if not self.is_global else 0
return self.pattern.sub(new_value, target, count)
def cap(self, i: int) -> Optional[str]:
"""Return regex group number "i"."""
if self.result_ is None:
return None
try:
return self.result_.group(i)
except Exception:
LOGGER.exception("Error calling cap(%s)" % i)
return None
def get_global(self) -> bool:
"""Return if regex is global."""
return self.is_global
def set_global(self, state: bool) -> None:
"""Set regex global flag."""
self.is_global = state
global_ = property(get_global, set_global)
def reg_exp(str_re: str) -> QsaRegExp:
"""
Return qsaRegexp object from search.
@param strRE. Cadena de texto
@return valor procesado
"""
is_global = False
if str_re[-2:] == "/g":
str_re = str_re[:-2]
is_global = True
elif str_re[-1:] == "/":
str_re = str_re[:-1]
if str_re[:1] == "/":
str_re = str_re[1:]
return QsaRegExp(str_re, is_global)
class MathClass(object):
"""QSA Math emulation class."""
def abs(self, value: Union[int, float]) -> Union[int, float]:
"""Get absolute value."""
return math.fabs(value)
def acos(self, value: Union[float, int]) -> float:
"""Return the arc cosine."""
return math.acos(value)
def cos(self, value: Union[float, int]) -> float:
"""Return the cosine of value."""
return math.cos(value)
def asin(self, value: Union[float, int]) -> float:
"""Return the arc sine of value."""
return math.asin(value)
def sin(self, value: Union[float, int]) -> float:
"""Return the sine of value."""
return math.sin(value)
def atan(self, value: Union[float, int]) -> float:
"""Return the arc tangent of value."""
return math.atan(value)
def atan2(self, value_y: Union[float, int], value_x: Union[float, int]) -> float:
"""Return the arc tangent."""
return math.atan2(value_y, value_x)
def tan(self, value: Union[float, int]) -> float:
"""Return the tangent of value."""
return math.tan(value)
def exp(self, value: Union[float, int]) -> float:
"""Return e raised to the power of value."""
return math.exp(value)
def ceil(self, value: float) -> int:
"""Round number to its ceiling."""
return math.ceil(value)
def floor(self, value: float) -> int:
"""Round number to its floor."""
return math.floor(value)
def log(self, value: Union[float, int]) -> float:
"""Return the logarithm of value to the given base."""
return math.log(value)
def random(self) -> float:
"""Return a pseudo-random floating point number between 0 and 1."""
import random
return random.random()
def max(self, number1: Union[float, int], number2: Union[float, int]) -> Union[float, int]:
"""Return the largest of number1 and number2."""
return max([number1, number2])
def min(self, number1: Union[float, int], number2: Union[float, int]) -> Union[float, int]:
"""Return the smallest of number1 and number2."""
return min([number1, number2])
def pow(self, base_: float, exp: float) -> float:
"""Raise base to the power of exp."""
return math.pow(base_, exp)
def round(self, value_1: float, value_2: int = 2) -> float:
"""Round a number x to y decimal places."""
return round(float(value_1), value_2)
def sqrt(self, value: Union[float, int]) -> float:
"""Return the square root of the number passed in the parameter."""
return math.sqrt(value)
def _get_pi(self) -> float:
"""Return PI value."""
return 3.141592653589793
def _get_eulen(self) -> float:
"""Return eulers constant. The base for natural logarithms."""
return 2.718281828459045
def _get_ln2(self) -> float:
"""Return natural logarithm of 2."""
return 0.6931471805599453
def _get_ln10(self) -> float:
"""Return natural logarithm of 10."""
return 2.302585092994046
def _get_log2e(self) -> float:
"""Return base 2 logarithm of E."""
return 1.44269504089
def _get_log10e(self) -> float:
"""Return base 2 logarithm of E."""
return 0.4342944819
def _get_sqrt1_2(self) -> float:
"""Return square root of 1/2."""
return 0.7071067811865476
def _get_sqrt2(self) -> float:
"""Return square root of 2."""
return 1.4142135623730951
PI = property(_get_pi)
E = property(_get_eulen)
LN2 = property(_get_ln2)
LN10 = property(_get_ln10)
LOG2E = property(_get_log2e)
LOG10E = property(_get_log10e)
SQRT1_2 = property(_get_sqrt1_2)
SQRT2 = property(_get_sqrt2)
def parse_float(value: Any) -> float:
"""
Convert to float from almost any value.
@param value. valor a convertir
@return Valor tipo float, o parametro x , si no es convertible
"""
ret = 0.00
try:
if isinstance(value, str) and value.find(":") > -1:
# Convertimos a horas
list_ = value.split(":")
value = float(list_[0]) # Horas
value += float(list_[1]) / 60 # Minutos a hora
value += float(list_[2]) / 3600 # Segundos a hora
if isinstance(value, str):
try:
return float(value)
except Exception:
value = value.replace(".", "")
value = value.replace(",", ".")
try:
return float(value)
except Exception:
return float("nan")
else:
ret = 0.0 if value in (None, "") else float(value)
if ret == int(ret):
return int(ret)
return ret
except Exception:
LOGGER.exception("parseFloat: Error converting %s to float", value)
return float("nan")
def parse_string(obj: Any) -> str:
"""
Convert to string almost any value.
@param obj. valor a convertir
@return str del objeto dado
"""
return obj.toString() if hasattr(obj, "toString") else str(obj)
def parse_int(value: Union[float, int, str], base_: int = 10) -> int:
"""
Convert to int almost any value.
@param x. Value to cenvert
@return integer value
"""
ret_ = 0
tmp_value = str(value)
if tmp_value.find(".") > -1:
tmp_value = tmp_value[0 : tmp_value.find(".")]
if tmp_value.find(",") > -1:
tmp_value = tmp_value[0 : tmp_value.find(",")]
if value is not None:
# x = float(x)
ret_ = int(tmp_value, base_)
# ret_ = int(str(x), base)
return ret_
def length(obj: Any = "") -> int:
"""
Get length of any object.
@param obj, objeto a obtener longitud
@return longitud del objeto
"""
if obj is None:
return 0
if hasattr(obj, "length"):
if isinstance(obj.length, int):
return obj.length
else:
return obj.length()
else:
if isinstance(obj, dict) and "result" in obj.keys():
return len(obj) - 1
else:
return len(obj)
def text(obj: Any) -> str:
"""
Get text property from object.
@param obj. Objeto a procesar
@return Valor de text o text()
"""
try:
return obj.text()
except Exception:
return obj.text
def start_timer(time: int, fun: Callable) -> "QtCore.QTimer":
"""Create new timer that calls a function."""
global TIMERS
timer = QtCore.QTimer()
timer.timeout.connect(fun) # type: ignore [attr-defined] # noqa: F821
timer.start(time)
TIMERS.append(timer)
return timer
def kill_timer(timer: Optional["QtCore.QTimer"] = None) -> None:
"""Stop a given timer."""
global TIMERS
if timer is not None:
timer.stop()
TIMERS.remove(timer)
def kill_timers() -> None:
"""Stop and deletes all timers that have been created with startTimer()."""
global TIMERS
for timer in TIMERS:
timer.stop()
TIMERS = []
def debug(txt: Union[bool, str, int, float]) -> None:
"""
Debug for QSA messages.
@param txt. Mensaje.
"""
from pineboolib import application
application.PROJECT.message_manager().send("debug", None, [utils_base.ustr(txt)])
def format_exc(exc: Optional[int] = None) -> str:
"""Format a traceback."""
return traceback.format_exc(exc)
def is_nan(value: Any) -> bool:
"""
Check if value is NaN.
@param x. Valor numérico
@return True o False
"""
if value in [None, ""]:
return True
if isinstance(value, str) and value.find(":"):
value = value.replace(":", "")
try:
value = float(value)
return math.isnan(value)
except ValueError:
return True
def isnan(value: Any) -> bool:
"""Return if a number is NaN."""
return is_nan(value)
def replace(source: str, search: Any, replace: str) -> str:
"""Replace for QSA where detects if "search" is a Regexp."""
if isinstance(search, str):
return source.replace(search, str(replace))
else:
return search.replace(source, replace)
def splice(*args: Any) -> Any:
| |
must be either standard, daily, or anime")
elif method_name in ["sonarr_season", "sonarr_search", "sonarr_cutoff_search"]:
self.sonarr_options[method_name[7:]] = util.get_bool(method_name, method_data)
elif method_name == "sonarr_tag":
self.sonarr_options["tag"] = util.get_list(method_data)
elif method_final in plex.searches:
self.methods.append(("plex_search", [self.build_filter("plex_search", {"any": {method_name: method_data}})]))
elif method_name == "plex_all":
self.methods.append((method_name, [""]))
elif method_name == "anidb_popular":
list_count = util.regex_first_int(method_data, "List Size", default=40)
if 1 <= list_count <= 30:
self.methods.append((method_name, [list_count]))
else:
logger.warning("Collection Error: anidb_popular must be an integer between 1 and 30 defaulting to 30")
self.methods.append((method_name, [30]))
elif method_name == "mal_id":
self.methods.append((method_name, util.get_int_list(method_data, "MyAnimeList ID")))
elif method_name in ["anidb_id", "anidb_relation"]:
self.methods.append((method_name, config.AniDB.validate_anidb_list(util.get_int_list(method_data, "AniDB ID"), self.library.Plex.language)))
elif method_name in ["anilist_id", "anilist_relations", "anilist_studio"]:
self.methods.append((method_name, config.AniList.validate_anilist_ids(util.get_int_list(method_data, "AniList ID"), studio=method_name == "anilist_studio")))
elif method_name == "trakt_list":
self.methods.append((method_name, config.Trakt.validate_trakt(util.get_list(method_data))))
elif method_name == "trakt_list_details":
valid_list = config.Trakt.validate_trakt(util.get_list(method_data))
item = config.Trakt.standard_list(valid_list[0])
if hasattr(item, "description") and item.description:
self.summaries[method_name] = item.description
self.methods.append((method_name[:-8], valid_list))
elif method_name in ["trakt_watchlist", "trakt_collection"]:
self.methods.append((method_name, config.Trakt.validate_trakt(util.get_list(method_data), trakt_type=method_name[6:], is_movie=self.library.is_movie)))
elif method_name == "imdb_list":
new_list = []
for imdb_list in util.get_list(method_data, split=False):
if isinstance(imdb_list, dict):
dict_methods = {dm.lower(): dm for dm in imdb_list}
if "url" in dict_methods and imdb_list[dict_methods["url"]]:
imdb_url = config.IMDb.validate_imdb_url(imdb_list[dict_methods["url"]], self.library.Plex.language)
else:
raise Failed("Collection Error: imdb_list attribute url is required")
if "limit" in dict_methods and imdb_list[dict_methods["limit"]]:
list_count = util.regex_first_int(imdb_list[dict_methods["limit"]], "List Limit", default=0)
else:
list_count = 0
else:
imdb_url = config.IMDb.validate_imdb_url(str(imdb_list), self.library.Plex.language)
list_count = 0
new_list.append({"url": imdb_url, "limit": list_count})
self.methods.append((method_name, new_list))
elif method_name == "icheckmovies_list":
valid_lists = []
for icheckmovies_list in util.get_list(method_data, split=False):
valid_lists.append(config.ICheckMovies.validate_icheckmovies_list(icheckmovies_list, self.library.Plex.language))
self.methods.append((method_name, valid_lists))
elif method_name == "icheckmovies_list_details":
valid_lists = []
for icheckmovies_list in util.get_list(method_data, split=False):
valid_lists.append(config.ICheckMovies.validate_icheckmovies_list(icheckmovies_list, self.library.Plex.language))
self.methods.append((method_name[:-8], valid_lists))
self.summaries[method_name] = config.ICheckMovies.get_list_description(method_data, self.library.Plex.language)
elif method_name == "letterboxd_list":
self.methods.append((method_name, util.get_list(method_data, split=False)))
elif method_name == "letterboxd_list_details":
values = util.get_list(method_data, split=False)
self.summaries[method_name] = config.Letterboxd.get_list_description(values[0], self.library.Plex.language)
self.methods.append((method_name[:-8], values))
elif method_name in dictionary_builders:
for dict_data in util.get_list(method_data):
if isinstance(dict_data, dict):
def get_int(parent, int_method, data_in, methods_in, default_in, minimum=1, maximum=None):
if int_method not in methods_in:
logger.warning(f"Collection Warning: {parent} {int_method} attribute not found using {default_in} as default")
elif not data_in[methods_in[int_method]]:
logger.warning(f"Collection Warning: {parent} {methods_in[int_method]} attribute is blank using {default_in} as default")
elif isinstance(data_in[methods_in[int_method]], int) and data_in[methods_in[int_method]] >= minimum:
if maximum is None or data_in[methods_in[int_method]] <= maximum:
return data_in[methods_in[int_method]]
else:
logger.warning(f"Collection Warning: {parent} {methods_in[int_method]} attribute {data_in[methods_in[int_method]]} invalid must an integer <= {maximum} using {default_in} as default")
else:
logger.warning(f"Collection Warning: {parent} {methods_in[int_method]} attribute {data_in[methods_in[int_method]]} invalid must an integer >= {minimum} using {default_in} as default")
return default_in
if method_name == "filters":
validate = True
if "validate" in dict_data:
if dict_data["validate"] is None:
raise Failed("Collection Error: validate filter attribute is blank")
if not isinstance(dict_data["validate"], bool):
raise Failed("Collection Error: validate filter attribute must be either true or false")
validate = dict_data["validate"]
for filter_method, filter_data in dict_data.items():
filter_attr, modifier, filter_final = self._split(filter_method)
if filter_final not in all_filters:
raise Failed(f"Collection Error: {filter_final} is not a valid filter attribute")
elif filter_final in movie_only_filters and self.library.is_show:
raise Failed(f"Collection Error: {filter_final} filter attribute only works for movie libraries")
elif filter_final in show_only_filters and self.library.is_movie:
raise Failed(f"Collection Error: {filter_final} filter attribute only works for show libraries")
elif filter_final is None:
raise Failed(f"Collection Error: {filter_final} filter attribute is blank")
else:
self.filters.append((filter_final, self.validate_attribute(filter_attr, modifier, f"{filter_final} filter", filter_data, validate)))
elif method_name == "plex_collectionless":
new_dictionary = {}
dict_methods = {dm.lower(): dm for dm in dict_data}
prefix_list = []
if "exclude_prefix" in dict_methods and dict_data[dict_methods["exclude_prefix"]]:
if isinstance(dict_data[dict_methods["exclude_prefix"]], list):
prefix_list.extend([exclude for exclude in dict_data[dict_methods["exclude_prefix"]] if exclude])
else:
prefix_list.append(str(dict_data[dict_methods["exclude_prefix"]]))
exact_list = []
if "exclude" in dict_methods and dict_data[dict_methods["exclude"]]:
if isinstance(dict_data[dict_methods["exclude"]], list):
exact_list.extend([exclude for exclude in dict_data[dict_methods["exclude"]] if exclude])
else:
exact_list.append(str(dict_data[dict_methods["exclude"]]))
if len(prefix_list) == 0 and len(exact_list) == 0:
raise Failed("Collection Error: you must have at least one exclusion")
exact_list.append(self.name)
new_dictionary["exclude_prefix"] = prefix_list
new_dictionary["exclude"] = exact_list
self.methods.append((method_name, [new_dictionary]))
elif method_name == "plex_search":
self.methods.append((method_name, [self.build_filter("plex_search", dict_data)]))
elif method_name == "tmdb_discover":
new_dictionary = {"limit": 100}
for discover_name, discover_data in dict_data.items():
discover_final = discover_name.lower()
if discover_data:
if (self.library.is_movie and discover_final in tmdb.discover_movie) or (self.library.is_show and discover_final in tmdb.discover_tv):
if discover_final == "language":
if re.compile("([a-z]{2})-([A-Z]{2})").match(str(discover_data)):
new_dictionary[discover_final] = str(discover_data)
else:
raise Failed(f"Collection Error: {method_name} attribute {discover_final}: {discover_data} must match pattern ([a-z]{{2}})-([A-Z]{{2}}) e.g. en-US")
elif discover_final == "region":
if re.compile("^[A-Z]{2}$").match(str(discover_data)):
new_dictionary[discover_final] = str(discover_data)
else:
raise Failed(f"Collection Error: {method_name} attribute {discover_final}: {discover_data} must match pattern ^[A-Z]{{2}}$ e.g. US")
elif discover_final == "sort_by":
if (self.library.is_movie and discover_data in tmdb.discover_movie_sort) or (self.library.is_show and discover_data in tmdb.discover_tv_sort):
new_dictionary[discover_final] = discover_data
else:
raise Failed(f"Collection Error: {method_name} attribute {discover_final}: {discover_data} is invalid")
elif discover_final == "certification_country":
if "certification" in dict_data or "certification.lte" in dict_data or "certification.gte" in dict_data:
new_dictionary[discover_final] = discover_data
else:
raise Failed(f"Collection Error: {method_name} attribute {discover_final}: must be used with either certification, certification.lte, or certification.gte")
elif discover_final in ["certification", "certification.lte", "certification.gte"]:
if "certification_country" in dict_data:
new_dictionary[discover_final] = discover_data
else:
raise Failed(f"Collection Error: {method_name} attribute {discover_final}: must be used with certification_country")
elif discover_final in ["include_adult", "include_null_first_air_dates", "screened_theatrically"]:
if discover_data is True:
new_dictionary[discover_final] = discover_data
elif discover_final in tmdb.discover_dates:
new_dictionary[discover_final] = util.check_date(discover_data, f"{method_name} attribute {discover_final}", return_string=True)
elif discover_final in ["primary_release_year", "year", "first_air_date_year"]:
new_dictionary[discover_final] = util.check_number(discover_data, f"{method_name} attribute {discover_final}", minimum=1800, maximum=self.current_year + 1)
elif discover_final in ["vote_count.gte", "vote_count.lte", "vote_average.gte", "vote_average.lte", "with_runtime.gte", "with_runtime.lte"]:
new_dictionary[discover_final] = util.check_number(discover_data, f"{method_name} attribute {discover_final}", minimum=1)
elif discover_final in ["with_cast", "with_crew", "with_people", "with_companies", "with_networks", "with_genres", "without_genres", "with_keywords", "without_keywords", "with_original_language", "timezone"]:
new_dictionary[discover_final] = discover_data
else:
raise Failed(f"Collection Error: {method_name} attribute {discover_final} not supported")
elif discover_final == "limit":
if isinstance(discover_data, int) and discover_data > 0:
new_dictionary[discover_final] = discover_data
else:
raise Failed(f"Collection Error: {method_name} attribute {discover_final}: must be a valid number greater then 0")
else:
raise Failed(f"Collection Error: {method_name} attribute {discover_final} not supported")
else:
raise Failed(f"Collection Error: {method_name} parameter {discover_final} is blank")
if len(new_dictionary) > 1:
self.methods.append((method_name, [new_dictionary]))
else:
raise Failed(f"Collection Error: {method_name} had no valid fields")
elif "tautulli" in method_name:
new_dictionary = {}
if method_name == "tautulli_popular":
new_dictionary["list_type"] = "popular"
elif method_name == "tautulli_watched":
new_dictionary["list_type"] = "watched"
else:
raise Failed(f"Collection Error: {method_name} attribute not supported")
dict_methods = {dm.lower(): dm for dm in dict_data}
new_dictionary["list_days"] = get_int(method_name, "list_days", dict_data, dict_methods, 30)
new_dictionary["list_size"] = get_int(method_name, "list_size", dict_data, dict_methods, 10)
new_dictionary["list_buffer"] = get_int(method_name, "list_buffer", dict_data, dict_methods, 20)
self.methods.append((method_name, [new_dictionary]))
elif method_name == "mal_season":
new_dictionary = {"sort_by": "anime_num_list_users"}
dict_methods = {dm.lower(): dm for dm in dict_data}
if "sort_by" not in dict_methods:
logger.warning("Collection Warning: mal_season sort_by attribute not found using members as default")
elif not dict_data[dict_methods["sort_by"]]:
logger.warning("Collection Warning: mal_season sort_by attribute is blank using members as default")
elif dict_data[dict_methods["sort_by"]] not in mal.season_sort:
logger.warning(f"Collection Warning: mal_season sort_by attribute {dict_data[dict_methods['sort_by']]} invalid must be either 'members' or 'score' using members as default")
else:
new_dictionary["sort_by"] = mal.season_sort[dict_data[dict_methods["sort_by"]]]
if self.current_time.month in [1, 2, 3]: new_dictionary["season"] = "winter"
elif self.current_time.month in [4, 5, 6]: new_dictionary["season"] = "spring"
elif self.current_time.month in [7, 8, 9]: new_dictionary["season"] = "summer"
elif self.current_time.month in [10, 11, 12]: new_dictionary["season"] = "fall"
if "season" not in dict_methods:
logger.warning(f"Collection Warning: mal_season season attribute not found using the current season: {new_dictionary['season']} as default")
elif not dict_data[dict_methods["season"]]:
logger.warning(f"Collection Warning: mal_season season attribute is blank using the current season: {new_dictionary['season']} as default")
elif dict_data[dict_methods["season"]] not in util.pretty_seasons:
logger.warning(f"Collection Warning: mal_season season attribute {dict_data[dict_methods['season']]} invalid must be either 'winter', 'spring', 'summer' or 'fall' using the current season: {new_dictionary['season']} as default")
else:
new_dictionary["season"] = dict_data[dict_methods["season"]]
new_dictionary["year"] = get_int(method_name, "year", dict_data, dict_methods, self.current_time.year, minimum=1917, maximum=self.current_time.year + 1)
new_dictionary["limit"] = get_int(method_name, "limit", dict_data, dict_methods, 100, maximum=500)
self.methods.append((method_name, [new_dictionary]))
elif method_name == "mal_userlist":
new_dictionary = {"status": "all", "sort_by": "list_score"}
dict_methods = {dm.lower(): dm for dm in dict_data}
if "username" not in dict_methods:
raise Failed("Collection Error: mal_userlist username attribute is required")
elif not dict_data[dict_methods["username"]]:
raise Failed("Collection Error: mal_userlist username attribute is blank")
else:
new_dictionary["username"] = dict_data[dict_methods["username"]]
if "status" not in dict_methods:
logger.warning("Collection Warning: mal_season status attribute not found using all as default")
elif not dict_data[dict_methods["status"]]:
logger.warning("Collection Warning: mal_season status attribute is blank using all as default")
elif dict_data[dict_methods["status"]] not in mal.userlist_status:
logger.warning(f"Collection Warning: mal_season status attribute {dict_data[dict_methods['status']]} invalid must be either 'all', 'watching', 'completed', 'on_hold', 'dropped' or 'plan_to_watch' using all as default")
else:
new_dictionary["status"] = mal.userlist_status[dict_data[dict_methods["status"]]]
if "sort_by" not in dict_methods:
logger.warning("Collection Warning: mal_season sort_by attribute not found using score as default")
elif not dict_data[dict_methods["sort_by"]]:
logger.warning("Collection Warning: mal_season sort_by attribute is blank | |
= (
(0.5 + (self.bin_temp[:,12*year:12*(year+1)] -
self.modelprms['tsnow_threshold']) / 2) * bin_precsnow[:,12*year:12*(year+1)])
self.bin_acc[:,12*year:12*(year+1)] = (
bin_precsnow[:,12*year:12*(year+1)] - self.bin_prec[:,12*year:12*(year+1)])
# if temperature above maximum threshold, then all rain
(self.bin_prec[:,12*year:12*(year+1)]
[self.bin_temp[:,12*year:12*(year+1)] > self.modelprms['tsnow_threshold'] + 1]) = (
bin_precsnow[:,12*year:12*(year+1)]
[self.bin_temp[:,12*year:12*(year+1)] > self.modelprms['tsnow_threshold'] + 1])
(self.bin_acc[:,12*year:12*(year+1)]
[self.bin_temp[:,12*year:12*(year+1)] > self.modelprms['tsnow_threshold'] + 1]) = 0
# if temperature below minimum threshold, then all snow
(self.bin_acc[:,12*year:12*(year+1)]
[self.bin_temp[:,12*year:12*(year+1)] <= self.modelprms['tsnow_threshold'] - 1]) = (
bin_precsnow[:,12*year:12*(year+1)]
[self.bin_temp[:,12*year:12*(year+1)] <= self.modelprms['tsnow_threshold'] - 1])
(self.bin_prec[:,12*year:12*(year+1)]
[self.bin_temp[:,12*year:12*(year+1)] <= self.modelprms['tsnow_threshold'] - 1]) = 0
# ENTER MONTHLY LOOP (monthly loop required since surface type changes)
for month in range(0,12):
# Step is the position as a function of year and month, which improves readability
step = 12*year + month
# ACCUMULATION, MELT, REFREEZE, AND CLIMATIC MASS BALANCE
# Snowpack [m w.e.] = snow remaining + new snow
if step == 0:
self.bin_snowpack[:,step] = self.bin_acc[:,step]
else:
self.bin_snowpack[:,step] = self.snowpack_remaining[:,step-1] + self.bin_acc[:,step]
# MELT [m w.e.]
# energy available for melt [degC day]
if pygem_prms.option_ablation == 1:
# option 1: energy based on monthly temperature
melt_energy_available = self.bin_temp[:,step]*self.dayspermonth[step]
melt_energy_available[melt_energy_available < 0] = 0
elif pygem_prms.option_ablation == 2:
# Seed randomness for repeatability, but base it on step to ensure the daily variability is not
# the same for every single time step
np.random.seed(step)
# option 2: monthly temperature superimposed with daily temperature variability
# daily temperature variation in each bin for the monthly timestep
bin_tempstd_daily = np.repeat(
np.random.normal(loc=0, scale=self.glacier_gcm_tempstd[step],
size=self.dayspermonth[step])
.reshape(1,self.dayspermonth[step]), heights.shape[0], axis=0)
# daily temperature in each bin for the monthly timestep
bin_temp_daily = self.bin_temp[:,step][:,np.newaxis] + bin_tempstd_daily
# remove negative values
bin_temp_daily[bin_temp_daily < 0] = 0
# Energy available for melt [degC day] = sum of daily energy available
melt_energy_available = bin_temp_daily.sum(axis=1)
# SNOW MELT [m w.e.]
self.bin_meltsnow[:,step] = self.surfacetype_ddf_dict[2] * melt_energy_available
# snow melt cannot exceed the snow depth
self.bin_meltsnow[self.bin_meltsnow[:,step] > self.bin_snowpack[:,step], step] = (
self.bin_snowpack[self.bin_meltsnow[:,step] > self.bin_snowpack[:,step], step])
# GLACIER MELT (ice and firn) [m w.e.]
# energy remaining after snow melt [degC day]
melt_energy_available = (
melt_energy_available - self.bin_meltsnow[:,step] / self.surfacetype_ddf_dict[2])
# remove low values of energy available caused by rounding errors in the step above
melt_energy_available[abs(melt_energy_available) < pygem_prms.tolerance] = 0
# DDF based on surface type [m w.e. degC-1 day-1]
for surfacetype_idx in self.surfacetype_ddf_dict:
self.surfacetype_ddf[self.surfacetype == surfacetype_idx] = (
self.surfacetype_ddf_dict[surfacetype_idx])
# Debris enhancement factors in ablation area (debris in accumulation area would submerge)
if surfacetype_idx == 1 and pygem_prms.include_debris:
self.surfacetype_ddf[self.surfacetype == 1] = (
self.surfacetype_ddf[self.surfacetype == 1] * self.debris_ed[self.surfacetype == 1])
self.bin_meltglac[glac_idx_t0,step] = (
self.surfacetype_ddf[glac_idx_t0] * melt_energy_available[glac_idx_t0])
# TOTAL MELT (snow + glacier)
# off-glacier need to include melt of refreeze because there are no glacier dynamics,
# but on-glacier do not need to account for this (simply assume refreeze has same surface type)
self.bin_melt[:,step] = self.bin_meltglac[:,step] + self.bin_meltsnow[:,step]
# REFREEZING
if pygem_prms.option_refreezing == 'HH2015':
if step > 0:
self.tl_rf[:,:,step] = self.tl_rf[:,:,step-1]
self.te_rf[:,:,step] = self.te_rf[:,:,step-1]
# Refreeze based on heat conduction approach (Huss and Hock 2015)
# refreeze time step (s)
rf_dt = 3600 * 24 * self.dayspermonth[step] / pygem_prms.rf_dsc
if pygem_prms.option_rf_limit_meltsnow == 1:
bin_meltlimit = self.bin_meltsnow.copy()
else:
bin_meltlimit = self.bin_melt.copy()
# Debug lowest bin
if self.debug_refreeze:
gidx_debug = np.where(heights == heights[glac_idx_t0].min())[0]
# Loop through each elevation bin of glacier
for nbin, gidx in enumerate(glac_idx_t0):
# COMPUTE HEAT CONDUCTION - BUILD COLD RESERVOIR
# If no melt, then build up cold reservoir (compute heat conduction)
if self.bin_melt[gidx,step] < pygem_prms.rf_meltcrit:
if self.debug_refreeze and gidx == gidx_debug and step < 12:
print('\nMonth ' + str(self.dates_table.loc[step,'month']),
'Computing heat conduction')
# Set refreeze equal to 0
self.refr[gidx] = 0
# Loop through multiple iterations to converge on a solution
# -> this will loop through 0, 1, 2
for h in np.arange(0, pygem_prms.rf_dsc):
# Compute heat conduction in layers (loop through rows)
# go from 1 to rf_layers-1 to avoid indexing errors with "j-1" and "j+1"
# "j+1" is set to zero, which is fine for temperate glaciers but inaccurate for
# cold/polythermal glaciers
for j in np.arange(1, pygem_prms.rf_layers-1):
# Assume temperature of first layer equals air temperature
# assumption probably wrong, but might still work at annual average
# Since next line uses tl_rf for all calculations, set tl_rf[0] to present mean
# monthly air temperature to ensure the present calculations are done with the
# present time step's air temperature
self.tl_rf[0, gidx,step] = self.bin_temp[gidx,step]
# Temperature for each layer
self.te_rf[j,gidx,step] = (self.tl_rf[j,gidx,step] +
rf_dt * self.rf_layers_k[j] / self.rf_layers_ch[j] / pygem_prms.rf_dz**2 *
0.5 * ((self.tl_rf[j-1,gidx,step] - self.tl_rf[j,gidx,step]) -
(self.tl_rf[j,gidx,step] - self.tl_rf[j+1,gidx,step])))
# Update previous time step
self.tl_rf[:,gidx,step] = self.te_rf[:,gidx,step]
if self.debug_refreeze and gidx == gidx_debug and step < 12:
print('tl_rf:', ["{:.2f}".format(x) for x in self.tl_rf[:,gidx,step]])
# COMPUTE REFREEZING - TAP INTO "COLD RESERVOIR" or potential refreezing
else:
if self.debug_refreeze and gidx == gidx_debug and step < 12:
print('\nMonth ' + str(self.dates_table.loc[step,'month']), 'Computing refreeze')
# Refreezing over firn surface
if (self.surfacetype[gidx] == 2) or (self.surfacetype[gidx] == 3):
nlayers = pygem_prms.rf_layers-1
# Refreezing over ice surface
else:
# Approximate number of layers of snow on top of ice
smax = np.round((self.bin_snowpack[gidx,step] / (self.rf_layers_dens[0] / 1000) +
pygem_prms.pp) / pygem_prms.rf_dz, 0)
# if there is very little snow on the ground (SWE > 0.06 m for pp=0.3),
# then still set smax (layers) to 1
if self.bin_snowpack[gidx,step] > 0 and smax == 0:
smax=1
# if no snow on the ground, then set to rf_cold to NoData value
if smax == 0:
self.rf_cold[gidx] = 0
# if smax greater than the number of layers, set to max number of layers minus 1
if smax > pygem_prms.rf_layers - 1:
smax = pygem_prms.rf_layers - 1
nlayers = int(smax)
# Compute potential refreeze, "cold reservoir", from temperature in each layer
# only calculate potential refreezing first time it starts melting each year
if self.rf_cold[gidx] == 0 and self.tl_rf[:,gidx,step].min() < 0:
if self.debug_refreeze and gidx == gidx_debug and step < 12:
print('calculating potential refreeze from ' + str(nlayers) + ' layers')
for j in np.arange(0,nlayers):
j += 1
# units: (degC) * (J K-1 m-3) * (m) * (kg J-1) * (m3 kg-1)
rf_cold_layer = (self.tl_rf[j,gidx,step] * self.rf_layers_ch[j] *
pygem_prms.rf_dz / pygem_prms.Lh_rf / pygem_prms.density_water)
self.rf_cold[gidx] -= rf_cold_layer
if self.debug_refreeze and gidx == gidx_debug and step < 12:
print('j:', j, 'tl_rf @ j:', np.round(self.tl_rf[j,gidx,step],2),
'ch @ j:', np.round(self.rf_layers_ch[j],2),
'rf_cold_layer @ j:', np.round(rf_cold_layer,2),
'rf_cold @ j:', np.round(self.rf_cold[gidx],2))
if self.debug_refreeze and gidx == gidx_debug and step < 12:
print('rf_cold:', np.round(self.rf_cold[gidx],2))
# Compute refreezing
# If melt and liquid prec < potential refreeze, then refreeze all melt and liquid prec
if (bin_meltlimit[gidx,step] + self.bin_prec[gidx,step]) < self.rf_cold[gidx]:
self.refr[gidx] = bin_meltlimit[gidx,step] + self.bin_prec[gidx,step]
# otherwise, refreeze equals the potential refreeze
elif self.rf_cold[gidx] > 0:
self.refr[gidx] = self.rf_cold[gidx]
else:
self.refr[gidx] = 0
# Track the remaining potential refreeze
self.rf_cold[gidx] -= (bin_meltlimit[gidx,step] + self.bin_prec[gidx,step])
# if potential refreeze consumed, set to 0 and set temperature to 0 (temperate firn)
if self.rf_cold[gidx] < 0:
self.rf_cold[gidx] = 0
self.tl_rf[:,gidx,step] = 0
# Record refreeze
self.bin_refreeze[gidx,step] = self.refr[gidx]
if self.debug_refreeze and step < 12 and gidx == gidx_debug:
print('Month ' + str(self.dates_table.loc[step,'month']),
'Rf_cold remaining:', np.round(self.rf_cold[gidx],2),
'Snow depth:', np.round(self.bin_snowpack[glac_idx_t0[nbin],step],2),
'Snow melt:', np.round(self.bin_meltsnow[glac_idx_t0[nbin],step],2),
'Rain:', np.round(self.bin_prec[glac_idx_t0[nbin],step],2),
'Rfrz:', np.round(self.bin_refreeze[gidx,step],2))
elif pygem_prms.option_refreezing == 'Woodward':
# Refreeze based on annual air temperature (Woodward etal. 1997)
# R(m) = (-0.69 * Tair + 0.0096) * 1 m / 100 cm
# calculate annually and place potential refreeze in user defined month
if step%12 == 0:
bin_temp_annual = annualweightedmean_array(self.bin_temp[:,12*year:12*(year+1)],
self.dates_table.iloc[12*year:12*(year+1),:])
bin_refreezepotential_annual = (-0.69 * bin_temp_annual + 0.0096) / 100
# Remove negative refreezing values
bin_refreezepotential_annual[bin_refreezepotential_annual < 0] = 0
self.bin_refreezepotential[:,step] = bin_refreezepotential_annual
# Reset refreeze potential every year
if self.bin_refreezepotential[:,step].max() > 0:
refreeze_potential = self.bin_refreezepotential[:,step]
if self.debug_refreeze:
print('Year ' + str(year) + ' Month ' + str(self.dates_table.loc[step,'month']),
'Refreeze potential:', np.round(refreeze_potential[glac_idx_t0[0]],3),
'Snow depth:', np.round(self.bin_snowpack[glac_idx_t0[0],step],2),
'Snow melt:', np.round(self.bin_meltsnow[glac_idx_t0[0],step],2),
'Rain:', np.round(self.bin_prec[glac_idx_t0[0],step],2))
# Refreeze [m w.e.]
# refreeze cannot exceed rain and melt (snow & glacier melt)
self.bin_refreeze[:,step] = self.bin_meltsnow[:,step] + self.bin_prec[:,step]
# refreeze cannot | |
<filename>greykite/common/viz/timeseries_plotting_mpl.py
# BSD 2-CLAUSE LICENSE
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# #ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# original author: <NAME>
"""Plotting functions in matplotlib."""
import colorsys
import matplotlib
import numpy as np
from matplotlib import pyplot as plt
from pandas.plotting import register_matplotlib_converters
matplotlib.use("agg") # noqa: E402
register_matplotlib_converters()
def plt_compare_timeseries(
df_dict,
time_col,
value_col,
legends_dict=None,
colors_dict=None,
start_time=None,
end_time=None,
transform=lambda x: x,
transform_name="",
plt_title="",
alpha=0.6,
linewidth=4):
"""Compare a collection by of timeseries (given in `df_dict`)
by overlaying them in the specified period between
``start_time`` and ``end_time``.
:param df_dict: Dict[str, pd.DataFrame]
The keys are the arbitrary labels for each dataframe provided by the user.
The values are dataframes each containing a timeseries
with `time_col` and `value_col` as columns.
:param time_col: str
The column denoting time in datetime format.
:param value_col: str
The value column of interest for the y axis.
:param legends_dict: Optional[Dict[str, str]]
Labels for plot legend.
The keys are the df_dict labels (or a subset of them).
The values are the labels appearing in the plot legend.
If not provided or None, the `df_dict` keys will be used.
:param colors_dict: Optional[Dict[str, str]]
A dictionary determining the color for each series in `df_dict`.
The keys are the df_dict labels (or a subset of them).
The values are the colors appearing for each curve in the plot.
If not provided or None, the colors will be generated.
:param start_time: Optional[datetime.datetime]
The start time of the series plot.
:param end_time: Optional[datetime.datetime]
The end time of the series plot.
:param transform: Optional[func]
A function to transform the y values before plotting.
:param transform_name: Optional[str]
The name of the transformation for using in the title.
:param plt_title: str
Plot title.
:param alpha: Optional[float]
Transparency of the curves.
:param linewidth: Optional[float]
The width of the curves.
"""
if start_time is not None:
for label, df in df_dict.items():
df_dict[label] = df[df[time_col] >= start_time]
if end_time is not None:
for label, df in df_dict.items():
df_dict[label] = df[df[time_col] <= end_time]
n = len(df_dict)
labels = list(df_dict.keys())
if colors_dict is None:
hsv_tuples = [(x * 1.0 / n, 0.5, 0.5) for x in range(n)]
rgb_tuples = [(lambda x: colorsys.hsv_to_rgb(*x))(x) for x in hsv_tuples]
colors_dict = {labels[i]: rgb_tuples[i] for i in range(n)}
if legends_dict is None:
legends_dict = {label: label for label in labels}
fig, ax = plt.subplots()
legend_patches = []
for label in labels:
df = df_dict[label]
color = colors_dict[label]
legend = legends_dict.get(label)
# Avoids the following issue:
# "ValueError: view limit minimum -36864.55 is less than 1 and is an invalid
# Matplotlib date value. This often happens if you pass a non-datetime value
# to an axis that has datetime units".
dates = df[time_col].astype("O")
# we add a legend for the given series if legend is not None
# for that series
if legend is not None:
ax.plot(
dates,
transform(df[value_col]),
alpha=alpha,
color=color,
label=legend,
linewidth=linewidth)
patch = matplotlib.patches.Patch(color=color, label=legend)
legend_patches.append(patch)
else:
ax.plot(
dates,
transform(df[value_col]),
alpha=alpha,
color=color,
linewidth=linewidth)
legends = list(legends_dict.values())
ax.legend(
labels=[legend for legend in legends if legend is not None],
handles=legend_patches)
fig.autofmt_xdate() # rotates the dates
ax.set_title(plt_title + " " + transform_name)
plt.show()
def plt_overlay_long_df(
df,
x_col,
y_col,
split_col,
agg_dict=None,
agg_col_names=None,
overlay_color="black",
agg_col_colors=None,
plt_title=None):
"""Overlay by splitting wrt values of a column (split_col).
If some agg metrics (specified by agg_dict) are also desired,
we overlay the aggregate metrics as well.
:param df: pd.DataFrame
data frame which includes the data
:param x_col: str
the column for the values for the x-axis
:param y_col: str
the column for the values for the y-axis
:param split_col: str
the column which is used to split the data to various
parts to be overlayed
:param agg_dict: Optional[dict]
a dictionary to specify aggregations.
we could calculate multiple metrics for example mean and median
:param agg_col_names: optional[list[str]]
names for the aggregated columns.
if not provided it will be generated during aggregations
:param overlay_color: Optional[str]
the color of the overlayed curves.
The color will be transparent so we can see the curves overlap
:param agg_col_colors: Optional[str]
the color of the aggregate curves
:param plt_title: Optional[str]
the title of the plot.
It will default to y_col if not provided
"""
g = df.groupby([split_col], as_index=False)
df_num = len(g)
alpha = 5.0 / df_num
for name, df0 in g:
plt.plot(
df0[x_col],
df0[y_col],
color=overlay_color,
alpha=alpha,
label=None)
agg_df = None
if agg_dict is not None:
g2 = df.groupby([x_col], as_index=False)
agg_df = g2.agg(agg_dict)
agg_df.columns = [" ".join(col).strip() for col in agg_df.columns.values]
if agg_col_names is not None:
agg_df.columns = [x_col] + agg_col_names
if agg_col_colors is None:
n = len(agg_col_names)
hsv_tuples = [(x * 1.0 / n, 0.5, 0.5) for x in range(n)]
rgb_tuples = [colorsys.hsv_to_rgb(*x) for x in hsv_tuples]
agg_col_colors = rgb_tuples
legend_patches = []
for i in range(len(agg_col_names)):
col = agg_col_names[i]
color = agg_col_colors[i]
plt.plot(agg_df[x_col], agg_df[col], label=col, color=color)
patch = matplotlib.patches.Patch(color=color, label=col)
legend_patches.append(patch)
plt.legend(labels=agg_col_names, handles=legend_patches)
if plt_title is None:
plt_title = y_col
plt.title(plt_title)
plt.xlabel(x_col)
plt.ylabel(y_col)
return agg_df
def plt_overlay_with_bands(
df,
x_col,
y_col,
split_col,
perc=(25, 75),
overlay_color="black",
agg_col_colors=("blue", "black", "red"),
plt_title=None):
"""Overlay by splitting wrt a column and plot wrt time.
We also add quantile (percentile) bands
:param df: pd.DataFrame
data frame which includes the data
:param x_col: str
the column for the values for the x-axis
:param y_col: str
the column for the values for the y-axis
:param split_col: str
the column which is used to split the data
to various partitions t be overlayed
:param perc: tuple[float, float]
the percentiles for the bands.
The default is 25 and 75 percentiles
:param overlay_color: Optional[str]
the color of the overlayed curves.
The color will be transparanet so we can see the curves overlap
:param agg_col_colors: Optional[str]
the color of the aggregate curves
:param plt_title: Optional[str]
the title of the plot.
It will default to y_col if not provided
"""
def quantile_fcn(q):
return lambda x: np.nanpercentile(a=x, q=q)
agg_dict = {
y_col: [np.nanmean, quantile_fcn(perc[0]), quantile_fcn(perc[1])]
}
res = plt_overlay_long_df(
df=df,
x_col=x_col,
y_col=y_col,
split_col=split_col,
agg_dict=agg_dict,
agg_col_names=["mean", ("Q" + str(perc[0])), ("Q" + str(perc[1]))],
overlay_color=overlay_color,
agg_col_colors=agg_col_colors,
plt_title=plt_title)
return res
def plt_longterm_ts_agg(
df,
time_col,
window_col,
value_col,
color_col=None,
agg_func=np.nanmean,
plt_title="",
color="blue",
choose_color_func=None):
"""Make a longterm avg plot by taking the average in a window
and moving that across the time.
That window can be a week, month, year etc
:param df: pd.DataFrame
The data frame with the data
:param time_col: str
The column with timestamps
:param window_col: str
This is a column which represents a coarse time granularity
eg "2018-01" represents year-month
In this case the avg across that month is calculated
:param value_col: str
The column denoting the values
:param color_col: Optional[str]
The column denoting the color for each point.
We allow the curve color to change.
When aggregating for each window the first color
appearing in that window will be used by default.
:param agg_func: Optional[func]
the aggregation function to be used across the window
:param plt_title: Optional[str]
Plot title
:param color: Optional[str]
Color of the curve if it is not provided | |
val_f_ifWirelessSSIDAuthChangedCols: If op_ifWirelessSSIDAuthChangedCols is specified, the field named in this input will be compared to the value in ifWirelessSSIDAuthChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifWirelessSSIDAuthChangedCols must be specified if op_ifWirelessSSIDAuthChangedCols is specified.
:type val_f_ifWirelessSSIDAuthChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifWirelessSSIDAuthChangedCols: If op_ifWirelessSSIDAuthChangedCols is specified, this value will be compared to the value in ifWirelessSSIDAuthChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifWirelessSSIDAuthChangedCols must be specified if op_ifWirelessSSIDAuthChangedCols is specified.
:type val_c_ifWirelessSSIDAuthChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifWirelessSSIDAuthEndTime: The operator to apply to the field ifWirelessSSIDAuthEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifWirelessSSIDAuthEndTime: The ending effective time of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifWirelessSSIDAuthEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifWirelessSSIDAuthEndTime: If op_ifWirelessSSIDAuthEndTime is specified, the field named in this input will be compared to the value in ifWirelessSSIDAuthEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifWirelessSSIDAuthEndTime must be specified if op_ifWirelessSSIDAuthEndTime is specified.
:type val_f_ifWirelessSSIDAuthEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifWirelessSSIDAuthEndTime: If op_ifWirelessSSIDAuthEndTime is specified, this value will be compared to the value in ifWirelessSSIDAuthEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifWirelessSSIDAuthEndTime must be specified if op_ifWirelessSSIDAuthEndTime is specified.
:type val_c_ifWirelessSSIDAuthEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifWirelessSSIDAuthStartTime: The operator to apply to the field ifWirelessSSIDAuthStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifWirelessSSIDAuthStartTime: The starting effective time of this record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifWirelessSSIDAuthStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifWirelessSSIDAuthStartTime: If op_ifWirelessSSIDAuthStartTime is specified, the field named in this input will be compared to the value in ifWirelessSSIDAuthStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifWirelessSSIDAuthStartTime must be specified if op_ifWirelessSSIDAuthStartTime is specified.
:type val_f_ifWirelessSSIDAuthStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifWirelessSSIDAuthStartTime: If op_ifWirelessSSIDAuthStartTime is specified, this value will be compared to the value in ifWirelessSSIDAuthStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifWirelessSSIDAuthStartTime must be specified if op_ifWirelessSSIDAuthStartTime is specified.
:type val_c_ifWirelessSSIDAuthStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifWirelessSSIDAuthTimestamp: The operator to apply to the field ifWirelessSSIDAuthTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifWirelessSSIDAuthTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifWirelessSSIDAuthTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifWirelessSSIDAuthTimestamp: If op_ifWirelessSSIDAuthTimestamp is specified, the field named in this input will be compared to the value in ifWirelessSSIDAuthTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifWirelessSSIDAuthTimestamp must be specified if op_ifWirelessSSIDAuthTimestamp is specified.
:type val_f_ifWirelessSSIDAuthTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifWirelessSSIDAuthTimestamp: If op_ifWirelessSSIDAuthTimestamp is specified, this value will be compared to the value in ifWirelessSSIDAuthTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifWirelessSSIDAuthTimestamp must be specified if op_ifWirelessSSIDAuthTimestamp is specified.
:type val_c_ifWirelessSSIDAuthTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the if wireless ssid auths as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of if wireless ssid auth methods. The listed methods will be called on each if wireless ssid auth returned and included in the output. Available methods are: device, interface, vlan.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device, interface, vlan.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` IfWirelessSSIDAuthID
:param sort: The data field(s) to use for sorting the output. Default is IfWirelessSSIDAuthID. Valid values are IfWirelessSSIDAuthID, DataSourceID, DeviceID, InterfaceID, ifWirelessSSIDAuthStartTime, ifWirelessSSIDAuthEndTime, ifWirelessSSIDAuthChangedCols, ifWirelessSSIDAuthTimestamp, SSIDIndex, SSIDAlgorithmIndex, SSIDAuthEnabledInd, SSIDEAPRequiredInd, SSIDEAPMethod, SSIDMACAuthRequiredInd, SSIDMACAuthMethod, SSIDDefaultVlanIndex, VlanID, SSIDAuthAlgorithm.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each IfWirelessSSIDAuth. Valid values are IfWirelessSSIDAuthID, DataSourceID, DeviceID, InterfaceID, ifWirelessSSIDAuthStartTime, ifWirelessSSIDAuthEndTime, ifWirelessSSIDAuthChangedCols, ifWirelessSSIDAuthTimestamp, SSIDIndex, SSIDAlgorithmIndex, SSIDAuthEnabledInd, SSIDEAPRequiredInd, SSIDEAPMethod, SSIDMACAuthRequiredInd, SSIDMACAuthMethod, SSIDDefaultVlanIndex, VlanID, SSIDAuthAlgorithm. If empty or | |
type: {type: map, values: double}}
- 3
- {fcn: u.mylt}
fcns:
mylt:
params: [{a: double}, {b: double}]
ret: boolean
do: {"<": [{m.abs: {"-": [a, 6.2]}}, {m.abs: {"-": [b, 6.2]}}]}
''')[0].action(None), ["1", "5", "3"])
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: {type: array, items: string}
action:
- map.argmaxNLT:
- {value: {"0": 5.5, "1": 2.2, "2": 7.7, "3": 4.4, "4": 6.6, "5": 2.2, "6": 7.6}, type: {type: map, values: double}}
- 3
- params: [{a: double}, {b: double}]
ret: boolean
do: {"<": [{m.abs: {"-": [a, 6.2]}}, {m.abs: {"-": [b, 6.2]}}]}
''')[0].action(None), ["1", "5", "3"])
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: {type: array, items: string}
action:
- map.argminNLT:
- {value: {"0": 5.5, "1": 2.2, "2": 7.7, "3": 4.4, "4": 6.6, "5": 2.2, "6": 7.6}, type: {type: map, values: double}}
- 3
- {fcn: u.mylt}
fcns:
mylt:
params: [{a: double}, {b: double}]
ret: boolean
do: {"<": [{m.abs: {"-": [a, 6.2]}}, {m.abs: {"-": [b, 6.2]}}]}
''')[0].action(None), ["4", "0", "6"])
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: {type: array, items: string}
action:
- map.argminNLT:
- {value: {"0": 5.5, "1": 2.2, "2": 7.7, "3": 4.4, "4": 6.6, "5": 2.2, "6": 7.6}, type: {type: map, values: double}}
- 3
- params: [{a: double}, {b: double}]
ret: boolean
do: {"<": [{m.abs: {"-": [a, 6.2]}}, {m.abs: {"-": [b, 6.2]}}]}
''')[0].action(None), ["4", "0", "6"])
def testToSet(self):
engine, = PFAEngine.fromYaml('''
input: {type: array, items: int}
output: {type: map, values: int}
action:
- {map.toset: [input]}
''')
self.assertEqual(engine.action([1, 2, 3, 4, 5]), {"BA==": 2, "Ag==": 1, "Bg==": 3, "Cg==": 5, "CA==": 4})
def testFromSet(self):
engine, = PFAEngine.fromYaml('''
input: {type: map, values: int}
output: {type: array, items: int}
action:
- {map.fromset: [input]}
''')
self.assertEqual(set(engine.action({"BA==": 2, "Ag==": 1, "Bg==": 3, "Cg==": 5, "CA==": 4})), set([1, 2, 3, 4, 5]))
engine, = PFAEngine.fromYaml('''
input: {type: map, values: string}
output: {type: array, items: string}
action:
- {map.fromset: [input]}
''')
self.assertEqual(set(engine.action({"BA==": "two", "Ag==": "one", "Bg==": "three", "Cg==": "five", "CA==": "four"})), set(["one", "two", "three", "four", "five"]))
def testIn(self):
engine, = PFAEngine.fromYaml('''
input: int
output: boolean
action:
map.in:
- {map.toset: {value: [1, 2, 3, 4, 5], type: {type: array, items: int}}}
- input
''')
self.assertTrue(engine.action(2))
self.assertFalse(engine.action(0))
def testUnion(self):
engine, = PFAEngine.fromYaml('''
input: "null"
output: {type: array, items: int}
action:
map.fromset:
map.union:
- {map.toset: {value: [1, 2, 3, 4, 5], type: {type: array, items: int}}}
- {map.toset: {value: [4, 5, 6, 7, 8], type: {type: array, items: int}}}
''')
self.assertEqual(set(engine.action(None)), set([1, 2, 3, 4, 5, 6, 7, 8]))
def testIntersection(self):
engine, = PFAEngine.fromYaml('''
input: "null"
output: {type: array, items: int}
action:
map.fromset:
map.intersection:
- {map.toset: {value: [1, 2, 3, 4, 5], type: {type: array, items: int}}}
- {map.toset: {value: [4, 5, 6, 7, 8], type: {type: array, items: int}}}
''')
self.assertEqual(set(engine.action(None)), set([4, 5]))
def testDiff(self):
engine, = PFAEngine.fromYaml('''
input: "null"
output: {type: array, items: int}
action:
map.fromset:
map.diff:
- {map.toset: {value: [1, 2, 3, 4, 5], type: {type: array, items: int}}}
- {map.toset: {value: [4, 5, 6, 7, 8], type: {type: array, items: int}}}
''')
self.assertEqual(set(engine.action(None)), set([1, 2, 3]))
def testSymDiff(self):
engine, = PFAEngine.fromYaml('''
input: "null"
output: {type: array, items: int}
action:
map.fromset:
map.symdiff:
- {map.toset: {value: [1, 2, 3, 4, 5], type: {type: array, items: int}}}
- {map.toset: {value: [4, 5, 6, 7, 8], type: {type: array, items: int}}}
''')
self.assertEqual(set(engine.action(None)), set([1, 2, 3, 6, 7, 8]))
def testSubset(self):
engine, = PFAEngine.fromYaml('''
input: {type: array, items: int}
output: boolean
action:
map.subset:
- {map.toset: input}
- {map.toset: {value: [1, 2, 3, 4, 5], type: {type: array, items: int}}}
''')
self.assertTrue(engine.action([1, 2, 3]))
self.assertFalse(engine.action([1, 2, 3, 999]))
self.assertFalse(engine.action([888, 999]))
def testDisjoint(self):
engine, = PFAEngine.fromYaml('''
input: {type: array, items: int}
output: boolean
action:
map.disjoint:
- {map.toset: input}
- {map.toset: {value: [1, 2, 3, 4, 5], type: {type: array, items: int}}}
''')
self.assertFalse(engine.action([1, 2, 3]))
self.assertFalse(engine.action([1, 2, 3, 999]))
self.assertTrue(engine.action([888, 999]))
def testMap(self):
engine, = PFAEngine.fromYaml('''
input: {type: map, values: string}
output: {type: map, values: int}
action:
map.map:
- input
- params: [{x: string}]
ret: int
do: {parse.int: [x, 10]}
''')
self.assertEqual(engine.action({"a": "1", "b": "2", "c": "3", "d": "4", "e": "5"}), {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5})
def testMapWithKey(self):
engine, = PFAEngine.fromYaml('''
input: {type: map, values: string}
output: {type: map, values: int}
action:
map.mapWithKey:
- input
- params: [{key: string}, {value: string}]
ret: int
do:
if: {">": [key, {string: "c"}]}
then: {+: [{parse.int: [value, 10]}, 1000]}
else: {parse.int: [value, 10]}
''')
self.assertEqual(engine.action({"a": "1", "b": "2", "c": "3", "d": "4", "e": "5"}), {"a": 1, "b": 2, "c": 3, "d": 1004, "e": 1005})
def testFilter(self):
engine, = PFAEngine.fromYaml('''
input: {type: map, values: int}
output: {type: map, values: int}
action:
map.filter:
- input
- params: [{x: int}]
ret: boolean
do: {"<": [x, 3]}
''')
self.assertEqual(engine.action({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}), {"a": 1, "b": 2})
def testFilterWithKey(self):
engine, = PFAEngine.fromYaml('''
input: {type: map, values: int}
output: {type: map, values: int}
action:
map.filterWithKey:
- input
- params: [{key: string}, {value: int}]
ret: boolean
do: {"&&": [{"<": [value, 3]}, {"==": [key, {string: "a"}]}]}
''')
self.assertEqual(engine.action({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}), {"a": 1})
def testFilterMap(self):
engine, = PFAEngine.fromYaml('''
input: {type: map, values: int}
output: {type: map, values: int}
action:
map.filterMap:
- input
- params: [{value: int}]
ret: [int, "null"]
do:
if: {"==": [{"%": [value, 2]}, 0]}
then: {"+": [value, 1000]}
else: null
''')
self.assertEqual(engine.action({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}), {"b": 1002, "d": 1004})
def testFilterMapWithKey(self):
engine, = PFAEngine.fromYaml('''
input: {type: map, values: int}
output: {type: map, values: int}
action:
map.filterMapWithKey:
- input
- params: [{key: string}, {value: int}]
ret: [int, "null"]
do:
if: {"&&": [{"==": [{"%": [value, 2]}, 0]}, {"==": [key, {string: "b"}]}]}
then: {"+": [value, 1000]}
else: null
''')
self.assertEqual(engine.action({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}), {"b": 1002})
def testFlatMap(self):
engine, = PFAEngine.fromYaml('''
input: {type: map, values: int}
output: {type: map, values: int}
action:
map.flatMap:
- input
- params: [{value: int}]
ret: {type: map, values: int}
do:
if: {">": [value, 2]}
then:
- let: {out: {value: {}, type: {type: map, values: int}}}
- set:
out:
map.add:
- out
- {s.int: value}
- value
- set:
out:
map.add:
- out
- {s.concat: [{s.int: value}, {s.int: value}]}
- value
- out
else:
{value: {}, type: {type: map, values: int}}
''')
self.assertEqual(engine.action({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}), {"3": 3, "4": 4, "5": 5, "33": 3, "44": 4, "55": 5})
def testFlatMapWithKey(self):
engine, = PFAEngine.fromYaml('''
input: {type: map, values: int}
output: {type: map, values: int}
action:
map.flatMapWithKey:
- input
- params: [{key: string}, {value: int}]
ret: {type: map, values: int}
do:
map.add:
- map.add:
- {value: {}, type: {type: map, values: int}}
- key
- value
- {s.concat: [key, key]}
- {+: [100, value]}
''')
self.assertEqual(engine.action({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}), {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "aa": 101, "bb": 102, "cc": 103, "dd": 104, "ee": 105})
def testZipMap(self):
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: {type: map, values: string}
action:
map.zipmap:
- {value: {"0": "x", "1": "y", "2": "z"}, type: {type: map, values: string}}
- {value: {"0": 101, "1": 102, "2": 103}, type: {type: map, values: int}}
- params: [{a: string}, {b: int}]
ret: string
do: {s.concat: [a, {s.int: b}]}
''')[0].action(None), {"0": "x101", "1": "y102", "2": "z103"})
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: {type: map, values: string}
action:
map.zipmap:
- {value: {"0": "x", "1": "y", "2": "z"}, type: {type: map, values: string}}
- {value: {"0": 101, "1": 102, "2": 103}, type: {type: map, values: int}}
- {value: {"0": "a", "1": "b", "2": "c"}, type: {type: map, values: string}}
- params: [{a: string}, {b: int}, {c: string}]
ret: string
do: {s.concat: [{s.concat: [a, {s.int: b}]}, c]}
''')[0].action(None), {"0": "x101a", "1": "y102b", "2": "z103c"})
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: {type: map, values: string}
action:
map.zipmap:
- {value: {"0": "x", "1": "y", "2": "z"}, type: {type: map, values: string}}
- {value: {"0": 101, "1": 102, "2": 103}, type: {type: map, values: int}}
- {value: {"0": "a", "1": "b", "2": "c"}, type: {type: map, values: string}}
- {value: {"0": true, "1": false, "2": true}, type: {type: map, values: boolean}}
- params: [{a: string}, {b: int}, {c: string}, {d: boolean}]
ret: string
do: {s.concat: [{s.concat: [{s.concat: [a, {s.int: b}]}, c]}, {if: d, then: {string: "-up"}, else: {string: "-down"}}]}
''')[0].action(None), {"0": "x101a-up", "1": "y102b-down", "2": "z103c-up"})
def testZipMapWithKey(self):
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: {type: map, values: string}
action:
map.zipmapWithKey:
- {value: {"0": "x", "1": "y", "2": "z"}, type: {type: map, values: string}}
- {value: {"0": 101, "1": 102, "2": 103}, type: {type: map, | |
= output[0]
return output
reconBaseline = ReconstructionModel(RECON_BASELINE)
reconModels = [ReconstructionModel(f) for f in NETWORKS]
reconPostModels = [ReconstructionModelPostTrain(name, file, inpainting)
for (name, file, inpainting) in POSTTRAIN_NETWORKS]
allReconModels = reconModels + reconPostModels
NETWORK_COMBINATIONS = \
[(importanceBaseline1, reconBaseline), (importanceBaseline2, reconBaseline)] + \
[(importanceBaselineLuminance, reconBaseline)] + \
[(importanceBaseline1, reconNet) for reconNet in allReconModels] + \
[(importanceBaseline2, reconNet) for reconNet in allReconModels] + \
[(importanceBaselineLuminance, reconNet) for reconNet in allReconModels] + \
[(importanceNet, reconBaseline) for importanceNet in importanceModels] + \
list(zip(importanceModels, reconModels)) + \
[(importanceNet, reconPostModel) for importanceNet in importanceModels for reconPostModel in reconPostModels]
#NETWORK_COMBINATIONS = list(zip(importanceModels, reconModels))
print("Network combinations:")
for (i,r) in NETWORK_COMBINATIONS:
print(" %s - %s"%(i.name(), r.name()))
# load sampling patterns
print("load sampling patterns")
with h5py.File(SAMPLING_FILE, 'r') as f:
sampling_pattern = dict([(name, torch.from_numpy(f[name][...]).to(device)) \
for name in SAMPLING_PATTERNS])
# create shading
shading = ScreenSpaceShading(device)
shading.fov(30)
shading.ambient_light_color(np.array([0.1,0.1,0.1]))
shading.diffuse_light_color(np.array([1.0, 1.0, 1.0]))
shading.specular_light_color(np.array([0.0, 0.0, 0.0]))
shading.specular_exponent(16)
shading.light_direction(np.array([0.1,0.1,1.0]))
shading.material_color(np.array([1.0, 0.3, 0.0]))
AMBIENT_OCCLUSION_STRENGTH = 1.0
shading.ambient_occlusion(1.0)
shading.inverse_ao = False
#heatmap
HEATMAP_CFG = [(min, mean) for min in HEATMAP_MIN for mean in HEATMAP_MEAN if min<mean]
print("heatmap configs:", HEATMAP_CFG)
#########################
# DEFINE STATISTICS
#########################
ssimLoss = SSIM(size_average=False)
ssimLoss.to(device)
psnrLoss = PSNR()
psnrLoss.to(device)
lpipsColor = lpips.PerceptualLoss(model='net-lin', net='alex', use_gpu=True)
MIN_FILLING = 0.05
NUM_BINS = 200
class Statistics:
def __init__(self):
self.histogram_color_withAO = np.zeros(NUM_BINS, dtype=np.float64)
self.histogram_color_noAO = np.zeros(NUM_BINS, dtype=np.float64)
self.histogram_depth = np.zeros(NUM_BINS, dtype=np.float64)
self.histogram_normal = np.zeros(NUM_BINS, dtype=np.float64)
self.histogram_mask = np.zeros(NUM_BINS, dtype=np.float64)
self.histogram_ao = np.zeros(NUM_BINS, dtype=np.float64)
self.histogram_counter = 0
def create_datasets(self,
hdf5_file : h5py.File,
stats_name : str, histo_name : str,
num_samples : int,
extra_info : dict):
self.expected_num_samples = num_samples
stats_shape = (num_samples, len(list(StatField)))
self.stats_file = hdf5_file.require_dataset(
stats_name, stats_shape, dtype='f', exact=True)
self.stats_file.attrs['NumFields'] = len(list(StatField))
for field in list(StatField):
self.stats_file.attrs['Field%d'%field.value] = field.name
for key, value in extra_info.items():
self.stats_file.attrs[key] = value
self.stats_index = 0
histo_shape = (NUM_BINS, len(list(HistoField)))
self.histo_file = hdf5_file.require_dataset(
histo_name, histo_shape, dtype='f', exact=True)
self.histo_file.attrs['NumFields'] = len(list(HistoField))
for field in list(HistoField):
self.histo_file.attrs['Field%d'%field.value] = field.name
for key, value in extra_info.items():
self.histo_file.attrs[key] = value
def add_timestep_sample(self, pred_mnda, gt_mnda, sampling_mask):
"""
adds a timestep sample:
pred_mnda: prediction: mask, normal, depth, AO
gt_mnda: ground truth: mask, normal, depth, AO
"""
B = pred_mnda.shape[0]
#shading
shading.ambient_occlusion(AMBIENT_OCCLUSION_STRENGTH)
pred_color_withAO = shading(pred_mnda)
gt_color_withAO = shading(gt_mnda)
shading.ambient_occlusion(0.0)
pred_color_noAO = shading(pred_mnda)
gt_color_noAO = shading(gt_mnda)
#apply border
pred_mnda = pred_mnda[:,:,LOSS_BORDER:-LOSS_BORDER,LOSS_BORDER:-LOSS_BORDER]
pred_color_withAO = pred_color_withAO[:,:,LOSS_BORDER:-LOSS_BORDER,LOSS_BORDER:-LOSS_BORDER]
pred_color_noAO = pred_color_noAO[:,:,LOSS_BORDER:-LOSS_BORDER,LOSS_BORDER:-LOSS_BORDER]
gt_mnda = gt_mnda[:,:,LOSS_BORDER:-LOSS_BORDER,LOSS_BORDER:-LOSS_BORDER]
gt_color_withAO = gt_color_withAO[:,:,LOSS_BORDER:-LOSS_BORDER,LOSS_BORDER:-LOSS_BORDER]
gt_color_noAO = gt_color_noAO[:,:,LOSS_BORDER:-LOSS_BORDER,LOSS_BORDER:-LOSS_BORDER]
mask = gt_mnda[:,0:1,:,:] * 0.5 + 0.5
# PSNR
psnr_mask = psnrLoss(pred_mnda[:,0:1,:,:], gt_mnda[:,0:1,:,:]).cpu().numpy()
psnr_normal = psnrLoss(pred_mnda[:,1:4,:,:], gt_mnda[:,1:4,:,:], mask=mask).cpu().numpy()
psnr_depth = psnrLoss(pred_mnda[:,4:5,:,:], gt_mnda[:,4:5,:,:], mask=mask).cpu().numpy()
psnr_ao = psnrLoss(pred_mnda[:,5:6,:,:], gt_mnda[:,5:6,:,:], mask=mask).cpu().numpy()
psnr_color_withAO = psnrLoss(pred_color_withAO, gt_color_withAO, mask=mask).cpu().numpy()
psnr_color_noAO = psnrLoss(pred_color_noAO, gt_color_noAO, mask=mask).cpu().numpy()
# SSIM
ssim_mask = ssimLoss(pred_mnda[:,0:1,:,:], gt_mnda[:,0:1,:,:]).cpu().numpy()
pred_mnda = gt_mnda + mask * (pred_mnda - gt_mnda)
ssim_normal = ssimLoss(pred_mnda[:,1:4,:,:], gt_mnda[:,1:4,:,:]).cpu().numpy()
ssim_depth = ssimLoss(pred_mnda[:,4:5,:,:], gt_mnda[:,4:5,:,:]).cpu().numpy()
ssim_ao = ssimLoss(pred_mnda[:,5:6,:,:], gt_mnda[:,5:6,:,:]).cpu().numpy()
ssim_color_withAO = ssimLoss(pred_color_withAO, gt_color_withAO).cpu().numpy()
ssim_color_noAO = ssimLoss(pred_color_noAO, gt_color_noAO).cpu().numpy()
# Perceptual
lpips_color_withAO = torch.cat([lpipsColor(pred_color_withAO[b], gt_color_withAO[b], normalize=True) for b in range(B)], dim=0).cpu().numpy()
lpips_color_noAO = torch.cat([lpipsColor(pred_color_noAO[b], gt_color_noAO[b], normalize=True) for b in range(B)], dim=0).cpu().numpy()
# Samples
samples = torch.mean(sampling_mask, dim=(1,2,3)).cpu().numpy()
# Write samples to file
for b in range(B):
assert self.stats_index < self.expected_num_samples, "Adding more samples than specified"
self.stats_file[self.stats_index, :] = np.array([
psnr_mask[b], psnr_normal[b], psnr_depth[b], psnr_ao[b], psnr_color_noAO[b], psnr_color_withAO[b],
ssim_mask[b], ssim_normal[b], ssim_depth[b], ssim_ao[b], ssim_color_noAO[b], ssim_color_withAO[b],
lpips_color_noAO[b], lpips_color_withAO[b],
samples[b]], dtype='f')
self.stats_index += 1
# Histogram
self.histogram_counter += 1
mask_diff = F.l1_loss(gt_mnda[:,0,:,:], pred_mnda[:,0,:,:], reduction='none')
histogram,_ = np.histogram(mask_diff.cpu().numpy(), bins=NUM_BINS, range=(0,1), density=True)
self.histogram_mask += (histogram/(NUM_BINS*B) - self.histogram_mask)/self.histogram_counter
#normal_diff = (-F.cosine_similarity(gt_mnda[0,1:4,:,:], pred_mnda[0,1:4,:,:], dim=0)+1)/2
normal_diff = F.l1_loss(gt_mnda[:,1:4,:,:], pred_mnda[:,1:4,:,:], reduction='none').sum(dim=0) / 6
histogram,_ = np.histogram(normal_diff.cpu().numpy(), bins=NUM_BINS, range=(0,1), density=True)
self.histogram_normal += (histogram/(NUM_BINS*B) - self.histogram_normal)/self.histogram_counter
depth_diff = F.l1_loss(gt_mnda[:,4,:,:], pred_mnda[:,4,:,:], reduction='none')
histogram,_ = np.histogram(depth_diff.cpu().numpy(), bins=NUM_BINS, range=(0,1), density=True)
self.histogram_depth += (histogram/(NUM_BINS*B) - self.histogram_depth)/self.histogram_counter
ao_diff = F.l1_loss(gt_mnda[:,5,:,:], pred_mnda[:,5,:,:], reduction='none')
histogram,_ = np.histogram(ao_diff.cpu().numpy(), bins=NUM_BINS, range=(0,1), density=True)
self.histogram_ao += (histogram/(NUM_BINS*B) - self.histogram_ao)/self.histogram_counter
color_diff = F.l1_loss(gt_color_withAO[:,0,:,:], pred_color_withAO[:,0,:,:], reduction='none')
histogram,_ = np.histogram(color_diff.cpu().numpy(), bins=NUM_BINS, range=(0,1), density=True)
self.histogram_color_withAO += (histogram/(NUM_BINS*B) - self.histogram_color_withAO)/self.histogram_counter
color_diff = F.l1_loss(gt_color_noAO[:,0,:,:], pred_color_noAO[:,0,:,:], reduction='none')
histogram,_ = np.histogram(color_diff.cpu().numpy(), bins=NUM_BINS, range=(0,1), density=True)
self.histogram_color_noAO += (histogram/(NUM_BINS*B) - self.histogram_color_noAO)/self.histogram_counter
def close_stats_file(self):
self.stats_file.attrs['NumEntries'] = self.stats_index
def write_histogram(self):
"""
After every sample for the current dataset was processed, write
a histogram of the errors in a new file
"""
for i in range(NUM_BINS):
self.histo_file[i,:] = np.array([
i / NUM_BINS, (i+1) / NUM_BINS,
self.histogram_mask[i],
self.histogram_normal[i],
self.histogram_depth[i],
self.histogram_ao[i],
self.histogram_color_withAO[i],
self.histogram_color_noAO[i]
])
#########################
# DATASET
#########################
class FullResDataset(torch.utils.data.Dataset):
def __init__(self, file):
self.hdf5_file = h5py.File(file, 'r')
self.dset = self.hdf5_file['gt']
print("Dataset shape:", self.dset.shape)
def __len__(self):
return self.dset.shape[0]
def num_timesteps(self):
return self.dset.shape[1]
def __getitem__(self, idx):
return (self.dset[idx,...], np.array(idx))
#########################
# COMPUTE STATS for each dataset
#########################
for dataset_name, dataset_file in DATASETS:
dataset_file = os.path.join(DATASET_PREFIX, dataset_file)
print("Compute statistics for", dataset_name)
# init luminance importance map
importanceBaselineLuminance.setTestFile(dataset_file)
if importanceBaselineLuminance.isAvailable():
print("Luminance-contrast importance map is available")
# create output file
os.makedirs(OUTPUT_FOLDER, exist_ok = True)
output_file = os.path.join(OUTPUT_FOLDER, dataset_name+'.hdf5')
print("Save to", output_file)
with h5py.File(output_file, 'a') as output_hdf5_file:
# load dataset
set = FullResDataset(dataset_file)
data_loader = torch.utils.data.DataLoader(set, batch_size=BATCH_SIZE, shuffle=False)
# define statistics
StatsCfg = collections.namedtuple(
"StatsCfg",
"stats importance recon heatmin heatmean pattern")
statistics = []
for (inet,rnet) in NETWORK_COMBINATIONS:
for (heatmin, heatmean) in HEATMAP_CFG:
for pattern in SAMPLING_PATTERNS:
stats_info = {
'importance' : inet.name(),
'reconstruction' : rnet.name(),
'heatmin' : heatmin,
'heatmean' : heatmean,
'pattern' : pattern
}
stats_filename = "Stats_%s_%s_%03d_%03d_%s"%(
inet.name(), rnet.name(), heatmin*100, heatmean*100, pattern)
histo_filename = "Histogram_%s_%s_%03d_%03d_%s"%(
inet.name(), rnet.name(), heatmin*100, heatmean*100, pattern)
s = Statistics()
s.create_datasets(
output_hdf5_file,
stats_filename, histo_filename,
len(set) * set.num_timesteps(),
stats_info)
statistics.append(StatsCfg(
stats = s,
importance = inet,
recon = rnet,
heatmin = heatmin,
heatmean = heatmean,
pattern = pattern
))
print(len(statistics), " different combinations are performed per sample")
# compute statistics
try:
with torch.no_grad():
num_minibatch = len(data_loader)
pg = ProgressBar(num_minibatch, 'Evaluation', length=50)
for iteration, (batch, batch_indices) in enumerate(data_loader, 0):
pg.print_progress_bar(iteration)
batch = batch.to(device)
importanceBaselineLuminance.setIndices(batch_indices)
B, T, C, H, W = batch.shape
# try out each combination
for s in statistics:
#print(s)
# get input to evaluation
importanceNetUpscale = s.importance.networkUpscaling()
importancePostUpscale = UPSCALING // importanceNetUpscale
crop_low = torch.nn.functional.interpolate(
batch.reshape(B*T, C, H, W), scale_factor=1/UPSCALING,
mode='area').reshape(B, T, C, H//UPSCALING, W//UPSCALING)
pattern = sampling_pattern[s.pattern][:H, :W]
crop_high = batch
# loop over timesteps
pattern = pattern.unsqueeze(0).unsqueeze(0)
previous_importance = None
previous_output = None
reconstructions = []
for j in range(T):
importanceBaselineLuminance.setTime(j)
# extract flow (always the last two channels of crop_high)
flow = crop_high[:,j,C-2:,:,:]
# compute importance map
importance_input = crop_low[:,j,:5,:,:]
if j==0 or s.importance.disableTemporal:
previous_input = torch.zeros(
B,1,
importance_input.shape[2]*importanceNetUpscale,
importance_input.shape[3]*importanceNetUpscale,
dtype=crop_high.dtype, device=crop_high.device)
else:
flow_low = F.interpolate(flow, scale_factor = 1/importancePostUpscale)
previous_input = models.VideoTools.warp_upscale(
previous_importance,
flow_low,
1,
False)
importance_map = s.importance.call(importance_input, previous_input)
if len(importance_map.shape)==3:
importance_map = importance_map.unsqueeze(1)
previous_importance = importance_map
target_mean = s.heatmean
if USE_BINARY_SEARCH_ON_MEAN:
# For regular sampling, the normalization does not work properly,
# use binary search on the heatmean instead
def f(x):
postprocess = importance.PostProcess(
s.heatmin, x, importancePostUpscale,
LOSS_BORDER//importancePostUpscale,
'basic')
importance_map2 = postprocess(importance_map)[0].unsqueeze(1)
sampling_mask = (importance_map2 >= pattern).to(dtype=importance_map.dtype)
samples = torch.mean(sampling_mask).item()
return samples
target_mean = binarySearch(f, s.heatmean, s.heatmean, 10, 0, 1)
#print("Binary search for #samples, mean start={}, result={} with samples={}, original={}".
# format(s.heatmean, s.heatmean, f(target_mean), f(s.heatmean)))
# normalize and upscale importance map
postprocess = importance.PostProcess(
s.heatmin, target_mean, importancePostUpscale,
LOSS_BORDER//importancePostUpscale,
'basic')
importance_map = postprocess(importance_map)[0].unsqueeze(1)
#print("mean:", torch.mean(importance_map).item())
# create samples
sample_mask = (importance_map >= pattern).to(dtype=importance_map.dtype)
reconstruction_input = torch.cat((
sample_mask * crop_high[:,j,0:5,:,:], # mask, normal x, normal y, normal z, depth
sample_mask * torch.ones(B,1,H,W, dtype=crop_high.dtype, device=crop_high.device), # ao
sample_mask), # sample mask
dim = 1)
# warp previous output
if j==0 or s.recon.disableTemporal:
previous_input = torch.zeros(B,6,H,W, dtype=crop_high.dtype, device=crop_high.device)
else:
previous_input = models.VideoTools.warp_upscale(
previous_output,
flow,
1, False)
# run reconstruction network
reconstruction = s.recon.call(reconstruction_input, sample_mask, previous_input)
# clamp
reconstruction_clamped = torch.cat([
torch.clamp(reconstruction[:,0:1,:,:], -1, +1), # mask
ScreenSpaceShading.normalize(reconstruction[:,1:4,:,:], dim=1),
torch.clamp(reconstruction[:,4:5,:,:], 0, +1), # depth
torch.clamp(reconstruction[:,5:6,:,:], 0, +1) # ao
], dim=1)
reconstructions.append(reconstruction_clamped)
# save for next frame
previous_output = reconstruction_clamped
#endfor: timesteps
# compute statistics
reconstructions = torch.cat(reconstructions, dim=0)
crops_high = torch.cat([crop_high[:,j,:6,:,:] for j in range(T)], dim=0)
sample_masks = torch.cat([sample_mask]*T, dim=0)
s.stats.add_timestep_sample(
reconstructions,
crops_high,
sample_masks)
# endfor: statistic
# endfor: batch
pg.print_progress_bar(num_minibatch)
# end no_grad()
finally:
# close files
for s in statistics:
s.stats.write_histogram()
s.stats.close_stats_file()
# end with: hdf5 file
# end for: loop over datasets
if __name__ == "__main__":
run()
#import pprofile
#prof | |
import numpy as np
import pandas as pd
from pylab import *
import pickle
import tensorflow as tf
import random
import os
from sklearn.model_selection import train_test_split
import matplotlib.lines as mlines
from random import randint
from sklearn import preprocessing
from sklearn.model_selection import KFold
import keras
from keras.models import Sequential
import itertools
from itertools import product
import glob
import os.path
from os import path
def find_best(network, K):
# For every file saved during the cross vallidation, it picks the one that returns the lowest loss in the test set
# and returns the best parameters for the network and the corresponding loss associated with it
# The argument "network" is 1 for the outcome mechanism and 2 for the treatment mechanism
all_filenames = glob.glob("*network{}.csv".format(network))
losses = dict()
keywords = []
for f in all_filenames:
df = pd.read_csv(f)
loss = np.array(df["1"])
key = f.split("/")[-1]
key = key[:-4]
key = "-".join(key.split("-")[1:])
if key not in losses:
losses[key] = []
losses[key].append(loss[~np.isnan(loss)][-1])
best = list(losses.keys())[0]
current = np.inf
for key in losses.keys():
if np.mean(losses[key]) < current:
current = np.mean(losses[key])
best = key
f = open("K0-" + best + ".pkl", "rb")
parameters = pickle.load(f)
return parameters, current
def divide_data(M, k, seed):
# The argument M is the data corresponding to matrix M in the main file and k is the number of folds
# This splits the data into k random folds, as the nuisance parameters have to be learnt with one part of the data
# and the ATE/ATT coefficients have to be learnt with the other part. The part indexed by "train" is used to
# learn the nuisances parameters and the part "test" is used to learn the parameters of interest (ATE/ATT)
# This data is used later in the neural_net function
X_test = []
Y_test = []
X_train = []
Y_train = []
kf = KFold(n_splits=k, random_state=seed, shuffle=True)
for train_index, test_index in kf.split(M):
x_train = M[train_index][:, :-1]
y_train = M[train_index][:, -1]
x_test = M[test_index][:, :-1]
y_test = M[test_index][:, -1]
X_train.append(x_train)
Y_train.append(y_train)
X_test.append(x_test)
Y_test.append(y_test)
return X_train, Y_train, X_test, Y_test
def weights_biases(perm):
# Returns the weights given the dimensions specified in the argument
# These weights are then used in the MLP function where they weight
# each input
initializer = tf.compat.v1.keras.initializers.glorot_normal()
weights = {}
for i in range(len(perm) - 1):
weights["h" + str(i)] = tf.Variable(
initializer([perm[i], perm[i + 1]]), trainable=True
)
weights["b" + str(i)] = tf.Variable(tf.zeros([1, perm[i + 1]]), trainable=True)
return weights
def train(
X_train, y_train, X_test, y_test, epoch, batchSize, optimizer, cost, x, y, sess
):
# Trains the neural network given the train and test data and specifications
# in the arguments
# For every batch computes the loss and gives the overall loss in both, the
# train set and the test set. The cost function is defined in the neural_net
# function below.
L = []
L_test = []
for e in range(epoch):
K = []
for k in range(len(X_test) // batchSize):
batchX_test = X_test[k * batchSize : (k + 1) * batchSize]
batchY_test = y_test[k * batchSize : (k + 1) * batchSize]
K.append(sess.run(cost, feed_dict={x: batchX_test, y: batchY_test}))
L_test.append(np.mean(K))
permutation = np.random.permutation(len(X_train))
for i in range(len(X_train) // batchSize):
I = permutation[i * batchSize : (i + 1) * batchSize]
sess.run(optimizer, feed_dict={x: X_train[I], y: y_train[I]})
L.append(sess.run(cost, feed_dict={x: X_train[I], y: y_train[I]}))
if i % 10 == 0:
print("Step " + str(i) + ", Minibatch Loss= " + "{:.6f}".format(L[-1]))
return L, L_test
def predict(X, batchSize, x, pred, sess):
# Gives the predictions of the output given the input X
P = []
print(len(X))
for i in range(len(X) // batchSize):
P.append(sess.run(pred, feed_dict={x: X[i * batchSize : (i + 1) * batchSize]}))
return np.concatenate(P)
def MLP(x, weights):
# Gives the output from the network. In each layer of the network, the input is
# multiplied by the corresponding weight and trasformed with the ReLu non linearity.
# It also returns the regularized l2 loss. The non linearity can be changed to
# "leaky_relu" or "sigmoid"
layer = tf.matmul(x, weights["h0"]) + weights["b0"]
reg_loss = tf.nn.l2_loss(weights["h0"])
for i in range(1, len(weights) // 2):
layer = (
tf.matmul(tf.nn.relu(layer), weights["h" + str(i)]) + weights["b" + str(i)]
)
reg_loss = reg_loss + tf.nn.l2_loss(weights["h" + str(i)])
return tf.squeeze(layer), reg_loss
def save_data(
q,
nr_layers,
perm,
batch_size,
lr,
reg_constant,
loss,
network,
L,
L_test,
y_test1,
pred_y_test,
):
# This function saves the data in files with the name indicating the k fold,
# the set of parameters used, and the network (the network is 1 for the
# outcome network or 2 for the treatment network)
filename = (
"K{}-Nr_Layers{}-perm{}-batch_size{}-lr{}-reg_constant{}-loss{}-network{}"
)
description = filename.format(
q, nr_layers, perm, batch_size, lr, reg_constant, loss, network
)
# In each csv file, it saves the train and test loss, the actual values of the
# output and the predicted ones
df1 = pd.DataFrame({"Loss_Train": L})
df2 = pd.DataFrame({"Loss_test": L_test})
df3 = pd.DataFrame({"Actual_values": y_test1})
df4 = pd.DataFrame({"Predicted_Values": pred_y_test})
df5 = pd.DataFrame({"Description": description}, index=[0])
df = pd.concat([df1, df2, df3, df4, df5], ignore_index=True, axis=1)
df.to_csv(description + ".csv")
# Creates pickle files for each of the csv files.
f = open(description + ".pkl", "wb")
pickle.dump(
{
"Nr_Layers": nr_layers,
"neurons": perm,
"batch_sizes": batch_size,
"lrs": lr,
"reg_constants": reg_constant,
"losses": loss,
},
f,
)
f.close()
def do_i_exist(q, nr_layers, perm, batch_size, lr, reg_constant, loss, network):
# Checks if the file is already saved so that it does not repeat the training
# for the same hyperparameters during the cross validation procedure later
filename = (
"K{}-Nr_Layers{}-perm{}-batch_size{}-lr{}-reg_constant{}-loss{}-network{}"
)
description = filename.format(
q, nr_layers, perm, batch_size, lr, reg_constant, loss, network
)
file_name = description + ".pkl"
return path.exists(file_name)
def neural_net(
Y_max,
Y_min,
k,
X_neural,
Y_neural,
X_theta,
Y_theta,
network,
cross_validate,
batch_sizes,
Nr_Layers,
neurons,
lrs,
reg_constants,
losses,
):
# The main neural network function, which given the input data and the
# hyperparameters returns the output from both, the first and the second
# network. This output is then to be used in the main file for the
# computation of the ATE/ATT and their standard errors.
# The data indexed by "neural" is used to learn the nuisance parameters
# and the part indexed by "theta" is used to compute the ATE/ATT
config = tf.ConfigProto(
intra_op_parallelism_threads=20,
inter_op_parallelism_threads=20,
allow_soft_placement=True,
device_count={"CPU": 20},
)
# Set the number of epochs
epochs = 50
# G0 are the predicted values of the first network (for the outcome mechanism)
# with the treatment D set to 0
# G1 are the predicted values of the first network (for the outcome mechanism)
# with the treatment D set to 1
# G are the predicted values for the first network (for the outcome mechanism)
# without changing the original input
# D is the treatment variable
# Y is the outcome variable
# M is the predicted outcome for the second netwrok (for the treatment mechanism)
G_0 = []
G_1 = []
G = []
D = []
Y = []
M = []
if cross_validate:
# Takes all possbile combinations of the hyperparameters set by the user and
# cross validates to find the best combination
possibilities = product(
batch_sizes, neurons, lrs, reg_constants, losses, Nr_Layers
)
else:
# Uses the best combinations of the hyperparameters after the cross validation
possibilities = product(
[batch_sizes], [neurons], [lrs], [reg_constants], [losses], [Nr_Layers]
)
for batch_size, neuron, lr, reg_constant, loss, nr_layers in possibilities:
for q in range(k):
perm = (neuron) * nr_layers
# For every fold q, check if for that particular combination of hyperparameters
# the file exists with the do_i_ exist function defined before. If it exists it
# tries the next combination, if not it performs the training below
if (
do_i_exist(
q, nr_layers, perm, batch_size, lr, reg_constant, loss, network
)
and cross_validate
):
continue
x_neural, x_theta = X_neural[q], X_theta[q]
y_theta = Y_theta[q]
y_neural = Y_neural[q]
X_train = x_neural
X_test = x_theta
y_train = y_neural
y_test = y_theta
if network == 2:
# If network is 1 you use the whole input X (which includes treatment D as
# the last column) to predict the outcome Y.
| |
<reponame>Hartorn/airflow<filename>airflow/providers/google/cloud/hooks/spanner.py
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains a Google Cloud Spanner Hook.
"""
from typing import Callable, List, Optional
from google.api_core.exceptions import AlreadyExists, GoogleAPICallError
from google.cloud.spanner_v1.client import Client
from google.cloud.spanner_v1.database import Database
from google.cloud.spanner_v1.instance import Instance
from google.cloud.spanner_v1.transaction import Transaction
from google.longrunning.operations_grpc_pb2 import Operation # noqa: F401
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class SpannerHook(GoogleBaseHook):
"""
Hook for Google Cloud Spanner APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
def __init__(self, gcp_conn_id: str = 'google_cloud_default', delegate_to: Optional[str] = None) -> None:
super().__init__(gcp_conn_id, delegate_to)
self._client = None
def _get_client(self, project_id: str) -> Client:
"""
Provides a client for interacting with the Cloud Spanner API.
:param project_id: The ID of the GCP project.
:type project_id: str
:return: Client
:rtype: google.cloud.spanner_v1.client.Client
"""
if not self._client:
self._client = Client(
project=project_id,
credentials=self._get_credentials(),
client_info=self.client_info
)
return self._client
@GoogleBaseHook.fallback_to_default_project_id
def get_instance(
self,
instance_id: str,
project_id: str,
) -> Instance:
"""
Gets information about a particular instance.
:param project_id: Optional, The ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:return: Spanner instance
:rtype: google.cloud.spanner_v1.instance.Instance
"""
instance = self._get_client(project_id=project_id).instance(instance_id=instance_id)
if not instance.exists():
return None
return instance
def _apply_to_instance(
self, project_id: str,
instance_id: str,
configuration_name: str,
node_count: int,
display_name: str,
func: Callable[[Instance], Operation]
) -> None:
"""
Invokes a method on a given instance by applying a specified Callable.
:param project_id: The ID of the GCP project that owns the Cloud Spanner
database.
:type project_id: str
:param instance_id: The ID of the instance.
:type instance_id: str
:param configuration_name: Name of the instance configuration defining how the
instance will be created. Required for instances which do not yet exist.
:type configuration_name: str
:param node_count: (Optional) Number of nodes allocated to the instance.
:type node_count: int
:param display_name: (Optional) The display name for the instance in the Cloud
Console UI. (Must be between 4 and 30 characters.) If this value is not set
in the constructor, will fall back to the instance ID.
:type display_name: str
:param func: Method of the instance to be called.
:type func: Callable[google.cloud.spanner_v1.instance.Instance]
"""
# noinspection PyUnresolvedReferences
instance = self._get_client(project_id=project_id).instance(
instance_id=instance_id, configuration_name=configuration_name,
node_count=node_count, display_name=display_name)
try:
operation = func(instance) # type: Operation
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e
if operation:
result = operation.result()
self.log.info(result)
@GoogleBaseHook.fallback_to_default_project_id
def create_instance(
self,
instance_id: str,
configuration_name: str,
node_count: int,
display_name: str,
project_id: str,
) -> None:
"""
Creates a new Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param configuration_name: The name of the instance configuration defining how the
instance will be created. Possible configuration values can be retrieved via
https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs/list
:type configuration_name: str
:param node_count: (Optional) The number of nodes allocated to the Cloud Spanner
instance.
:type node_count: int
:param display_name: (Optional) The display name for the instance in the GCP
Console. Must be between 4 and 30 characters. If this value is not set in
the constructor, the name falls back to the instance ID.
:type display_name: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
self._apply_to_instance(project_id, instance_id, configuration_name,
node_count, display_name, lambda x: x.create())
@GoogleBaseHook.fallback_to_default_project_id
def update_instance(
self,
instance_id: str,
configuration_name: str,
node_count: int,
display_name: str,
project_id: str,
) -> None:
"""
Updates an existing Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param configuration_name: The name of the instance configuration defining how the
instance will be created. Possible configuration values can be retrieved via
https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs/list
:type configuration_name: str
:param node_count: (Optional) The number of nodes allocated to the Cloud Spanner
instance.
:type node_count: int
:param display_name: (Optional) The display name for the instance in the GCP
Console. Must be between 4 and 30 characters. If this value is not set in
the constructor, the name falls back to the instance ID.
:type display_name: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
self._apply_to_instance(project_id, instance_id, configuration_name,
node_count, display_name, lambda x: x.update())
@GoogleBaseHook.fallback_to_default_project_id
def delete_instance(self, instance_id: str, project_id: str) -> None:
"""
Deletes an existing Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
instance = self._get_client(project_id=project_id).instance(instance_id)
try:
instance.delete()
return
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e
@GoogleBaseHook.fallback_to_default_project_id
def get_database(
self,
instance_id: str,
database_id: str,
project_id: str,
) -> Optional[Database]:
"""
Retrieves a database in Cloud Spanner. If the database does not exist
in the specified instance, it returns None.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database in Cloud Spanner.
:type database_id: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: Database object or None if database does not exist
:rtype: google.cloud.spanner_v1.database.Database or None
"""
instance = self._get_client(project_id=project_id).instance(
instance_id=instance_id)
if not instance.exists():
raise AirflowException("The instance {} does not exist in project {} !".
format(instance_id, project_id))
database = instance.database(database_id=database_id)
if not database.exists():
return None
return database
@GoogleBaseHook.fallback_to_default_project_id
def create_database(
self,
instance_id: str,
database_id: str,
ddl_statements: List[str],
project_id: str,
) -> None:
"""
Creates a new database in Cloud Spanner.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database to create in Cloud Spanner.
:type database_id: str
:param ddl_statements: The string list containing DDL for the new database.
:type ddl_statements: list[str]
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:return: None
"""
instance = self._get_client(project_id=project_id).instance(
instance_id=instance_id)
if not instance.exists():
raise AirflowException("The instance {} does not exist in project {} !".
format(instance_id, project_id))
database = instance.database(database_id=database_id,
ddl_statements=ddl_statements)
try:
operation = database.create() # type: Operation
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e
if operation:
result = operation.result()
self.log.info(result)
@GoogleBaseHook.fallback_to_default_project_id
def update_database(
self,
instance_id: str,
database_id: str,
ddl_statements: List[str],
project_id: str,
operation_id: Optional[str] = None
) -> None:
"""
Updates DDL of a database in Cloud Spanner.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database in Cloud Spanner.
:type database_id: str
:param ddl_statements: The string list containing DDL for the new database.
:type ddl_statements: list[str]
:param project_id: Optional, the ID of the GCP project that owns the | |
configFromProvisioningServer(self, serverBaseURL, deviceId, disableServerValidation=False):
if disableServerValidation:
ssl._create_default_https_context = ssl._create_unverified_context
url = serverBaseURL + '/GetWeaveProvisioningInfo?macAddr=%016X' % deviceId
with urllib.request.urlopen(url) as resp:
respData = resp.read()
respPlist = plistlib.loads(respData, fmt=plistlib.FMT_XML)
try:
if self.deviceCert == None:
self.deviceCert = base64.b64decode(respPlist['nlWeaveCertificate'], validate=True)
if self.devicePrivateKey == None:
self.devicePrivateKey = base64.b64decode(respPlist['nlWeavePrivateKey'], validate=True)
if self.pairingCode == None:
self.pairingCode = respPlist['nlWeavePairingCode']
except binascii.Error as ex:
raise binascii.Error('Invalid base-64 data returned by provisioning server: %s' % (str(ex)))
def _configFromProvisioningCSVRow(self, row):
if self.serialNum == None:
serialNum = row.get('Serial_Num', None)
if serialNum != None:
self.serialNum = serialNum
if self.deviceCert == None:
deviceCert = row.get('Certificate', None)
if deviceCert != None:
self.deviceCert = base64.b64decode(deviceCert, validate=True)
if self.devicePrivateKey == None:
devicePrivateKey = row.get('Private_Key', None)
if devicePrivateKey == None:
devicePrivateKey = row.get('Private Key', None)
if devicePrivateKey != None:
self.devicePrivateKey = base64.b64decode(devicePrivateKey, validate=True)
if self.pairingCode == None:
pairingCode = row.get('Pairing_Code', None)
if pairingCode == None:
pairingCode = row.get('Pairing Code', None)
if pairingCode != None:
self.pairingCode = pairingCode
if self.productRev == None:
productRev = row.get('Product_Rev', None)
if productRev != None:
self.productRev = int(productRev, base=0)
if self.mfgDate == None:
mfgDate = row.get('Mfg_Date', None)
if mfgDate != None:
self.mfgDate = mfgDate
def _setField(self, tag, value):
if value != None:
self._fields[tag] = value
else:
self._fields.pop(tag, None)
class ESPSerialDeviceControler(object):
'''Supports provisioning ESP32 devices using the ESP32 serial bootloader protocol and
Espressif's esptool.py command.'''
def __init__(self, target, loadAddr=None):
self._target = target
self._esptoolCmd = 'esptool.py'
self._comPort = target.comPort
self._comSpeed = target.comSpeed
self._loadAddr = _applyDefault(loadAddr, target.defaultLoadAddr)
def provisionDevice(self, provData):
# Encode the provisioning data
encodedProvData = provData.encode()
# Verify that the load address is valid and that no part of the provisioning data will
# fall outside the valid address range for the target device.
self._target.validateLoadAddress(self._loadAddr, len(encodedProvData))
# Create an ESP32 firmware image consisting of two segments:
# - a data segment containing the provisioning data.
# - an executable segment containing a small program to reboot the device.
provImage = encodeESP32FirmwareImage(
entryPoint=ESPSerialDeviceControler._rebootProgEntryPoint,
segments=[
(ESPSerialDeviceControler._rebootProgLoadAddr, ESPSerialDeviceControler._rebootProg),
(self._loadAddr, encodedProvData)
])
print('Writing provisioning data to device');
with _WipedNamedTemporaryFile(suffix='.bin') as imageFile:
# Write the provisioning image into a temporary file
imageFile.write(provImage)
imageFile.flush()
# Construct a command to invoke the 'load_ram' operation of the esptool, passing the name of
# of the temporary file as the input.
esptoolCmd = [
self._esptoolCmd,
'--no-stub',
'--chip', 'esp32',
'--port', self._comPort,
'--baud', self._comSpeed,
'load_ram', imageFile.name
]
if _verbosity > 0:
print(' Running "%s"' % ' '.join(esptoolCmd))
if _verbosity > 1:
print(' ESP32 Image file (%s):' % imageFile.name)
imageFile.seek(0)
_hexDumpFile(imageFile, ' ')
# Invoke the esptool command and capture its output.
esptoolProc = subprocess.Popen(esptoolCmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(esptoolOut, esptoolErr) = esptoolProc.communicate()
esptoolOut = esptoolOut.decode('utf-8')
esptoolOut = esptoolOut.strip()
if esptoolProc.returncode != 0 or _verbosity > 0:
print(' esptool command exited with %d' % esptoolProc.returncode)
print(_prefixLines(esptoolOut, ' | '))
return esptoolProc.returncode == 0
@classmethod
def addArguments(cls, argParser):
argParser.add_argument('--esptool-cmd', metavar='<path-name>', help='Path to esptool command. Defaults to \'esptool.py\'.')
argParser.add_argument('--port', metavar='<path-name>', help='COM port device name for ESP32. Defaults to /tty/USB0.')
argParser.add_argument('--speed', metavar='<int>', help='Baud rate for COM port. Defaults to 115200.')
def configFromArguments(self, argValues):
self._esptoolCmd = _applyDefault(argValues.esptool_cmd, self._esptoolCmd)
self._comPort = _applyDefault(argValues.port, self._comPort)
self._comSpeed = _applyDefault(argValues.speed, self._comSpeed)
# Small ESP32 program to initiate a reboot of the device.
_rebootProg = bytearray([
0x70, 0x80, 0xF4, 0x3F, 0xFF, 0xFF, 0xFF, 0x3F, 0xA4, 0x80, 0xF4, 0x3F, 0xA1, 0x3A, 0xD8, 0x50,
0x8C, 0x80, 0xF4, 0x3F, 0x00, 0x0C, 0x00, 0xC0, 0x90, 0x80, 0xF4, 0x3F, 0xF0, 0x49, 0x02, 0x00,
0xA0, 0x80, 0xF4, 0x3F, 0x00, 0x00, 0x00, 0x80, 0x36, 0x41, 0x00, 0x91, 0xF5, 0xFF, 0x81, 0xF5,
0xFF, 0xC0, 0x20, 0x00, 0xA8, 0x09, 0x80, 0x8A, 0x10, 0xC0, 0x20, 0x00, 0x89, 0x09, 0x91, 0xF3,
0xFF, 0x81, 0xF1, 0xFF, 0xC0, 0x20, 0x00, 0x99, 0x08, 0x91, 0xF2, 0xFF, 0x81, 0xF1, 0xFF, 0xC0,
0x20, 0x00, 0x99, 0x08, 0x91, 0xF2, 0xFF, 0x81, 0xF0, 0xFF, 0xC0, 0x20, 0x00, 0x99, 0x08, 0x91,
0xF1, 0xFF, 0x81, 0xEF, 0xFF, 0xC0, 0x20, 0x00, 0x99, 0x08, 0x06, 0xFF, 0xFF,
])
_rebootProgLoadAddr = 0x40080000
_rebootProgEntryPoint = 0x40080028
class JLinkDeviceControler(object):
'''Supports provisioning devices using a SEGGER J-Link debug probe and SEGGER's JLinkExe command.'''
def __init__(self, target, loadAddr=None):
self._target = target
self._jlinkCmd = 'JLinkExe'
self._jLinkDeviceName = target.jlinkDeviceName
self._jlinkInterface = target.jlinkInterface
self._jlinkSpeed = target.jlinkSpeed
self._jlinkSN = None
self._loadAddr = _applyDefault(loadAddr, target.defaultLoadAddr)
def provisionDevice(self, provData):
# Encode the provisioning data
encodedProvData = provData.encode()
# Verify that the load address is valid and that no part of the provisioning data will
# fall outside the valid address range for the target device.
self._target.validateLoadAddress(self._loadAddr, len(encodedProvData))
print('Writing provisioning data to device');
with tempfile.NamedTemporaryFile(mode='w+') as scriptFile:
with _WipedNamedTemporaryFile(suffix='.bin') as provDataFile:
# Write the provisioning data set into a temporary file
provDataFile.write(provData.encode())
provDataFile.flush()
# Write a command script containing the commands:
# r
# loadfile <data-set-file> <data-set-address>
# go
# q
scriptFile.write('r\nloadfile %s 0x%08X\ngo\nq\n' % (provDataFile.name, self._loadAddr))
scriptFile.flush()
jlinkCmd = self._FormJLinkCommand(scriptFile.name)
if _verbosity > 0:
print(' Running "%s"' % ' '.join(jlinkCmd))
if _verbosity > 1:
print(' Script file (%s):' % scriptFile.name)
scriptFile.seek(0)
print(_prefixLines(scriptFile.read().strip(), ' | '))
print(' Provisioning data file (%s):' % provDataFile.name)
provDataFile.seek(0)
_hexDumpFile(provDataFile, ' ')
# Run the jlink command and collect its output.
jlinkProc = subprocess.Popen(jlinkCmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(jlinkOut, jlinkErr) = jlinkProc.communicate()
jlinkOut = jlinkOut.decode('utf-8')
jlinkOut = jlinkOut.strip()
if jlinkProc.returncode != 0 or _verbosity > 0:
print(' J-Link command exited with %d' % jlinkProc.returncode)
print(_prefixLines(jlinkOut, ' | '))
return jlinkProc.returncode == 0
def _FormJLinkCommand(self, scriptFileName):
cmd = [
self._jlinkCmd,
'-Device', self._jLinkDeviceName,
'-If', self._jlinkInterface,
'-Speed', self._jlinkSpeed,
'-ExitOnError', '1',
'-AutoConnect', '1'
]
if self._jlinkSN != None:
cmd += [ '-SelectEmuBySn', self._jlinkSN ]
cmd += [ '-CommandFile', scriptFileName ]
return cmd
@classmethod
def addArguments(cls, argParser):
argParser.add_argument('--jlink-cmd', metavar='<path-name>', help='Path to JLink command. Defaults to \'JLinkExe\'.')
argParser.add_argument('--jlink-if', metavar='SWD|JTAG', help='J-Link interface type. Defaults to SWD.',
choices=[ 'SWD', 'JTAG'])
argParser.add_argument('--jlink-speed', metavar='<int>|adaptive|auto', help='J-Link interface speed.')
argParser.add_argument('--jlink-sn', metavar='<string>', help='J-Link probe serial number.')
def configFromArguments(self, argValues):
self._jlinkCmd = _applyDefault(argValues.jlink_cmd, self._jlinkCmd)
self._jlinkInterface = _applyDefault(argValues.jlink_if, self._jlinkInterface)
self._jlinkSpeed = _applyDefault(argValues.jlink_speed, self._jlinkSpeed)
self._jlinkSN = _applyDefault(argValues.jlink_sn, self._jlinkSN)
class Target(object):
'''Describes a class of target devices and the parameters needed to control or communicate with them.'''
def __init__(self, controllerClass,
jlinkDeviceName=None, jlinkInterface=None, jlinkSpeed=None,
comPort = None, comSpeed=None,
defaultLoadAddr=None, validAddrRanges=None):
self.controllerClass = controllerClass
self.jlinkDeviceName = jlinkDeviceName
self.jlinkInterface = jlinkInterface
self.jlinkSpeed = jlinkSpeed
self.comPort = comPort
self.comSpeed = comSpeed
self.defaultLoadAddr = defaultLoadAddr
self.validAddrRanges = validAddrRanges
def isValidLoadAddress(self, addr, dataLen):
if self.validAddrRanges != None:
addrEnd = addr + dataLen
for (addrRangeStart, addrRangeEnd) in self.validAddrRanges:
if addr >= addrRangeStart and addr <= addrRangeEnd and addrEnd >= addrRangeStart and addrEnd <= addrRangeEnd:
return True
return False
def validateLoadAddress(self, addr, dataLen):
if not self.isValidLoadAddress(addr, dataLen):
raise _UsageError('ERROR: Invalid provisioning data load address\nSome or all of the data would fall outside of the valid memory ranges for the target device');
Target_nrf52840 = Target(controllerClass=JLinkDeviceControler,
jlinkDeviceName='NRF52840_XXAA',
jlinkInterface='SWD',
jlinkSpeed='8000',
defaultLoadAddr=0x2003E000,
validAddrRanges=[
(0x20000000, 0x2003FFFF) # Data RAM region
])
Target_esp32 = Target(controllerClass=ESPSerialDeviceControler,
comPort='/dev/ttyUSB0',
comSpeed='115200',
defaultLoadAddr=(0x40000000 - 0x400),
validAddrRanges=[
(0x3FFAE000, 0x3FFFFFFF) # Internal SRAM 1 + Internal SRAM 2
])
Targets = {
'nrf52840' : Target_nrf52840,
'esp32' : Target_esp32
}
def main():
try:
class CustomArgumentParser(argparse.ArgumentParser):
def error(self, message):
raise _UsageError(message)
argParser = CustomArgumentParser(description='Tool for factory provisioning of devices running OpenWeave firmware.')
argParser.add_argument('--target', metavar='<string>', help='Target device type. Choices are: %s' % (', '.join(Targets.keys())))
argParser.add_argument('--load-addr', metavar='<hex-string>', help='Address in device memory at which provisioning data will be written.',
type=functools.partial(_parseIntArg, min=0, max=0xFFFFFFFF, argDesc='provisioning data address'))
argParser.add_argument('--verbose', '-v', action='count', help='Adjust output verbosity level. Use multiple arguments to increase verbosity.')
ProvisioningData.addArguments(argParser)
JLinkDeviceControler.addArguments(argParser)
ESPSerialDeviceControler.addArguments(argParser)
argParser.add_argument('--prov-csv-file', metavar='<file-name>', help='Read device provisioning data from a provisioning CSV file.')
argParser.add_argument('--prov-server', metavar='<url>', help='Read device provisioning data from a provisioning server.')
argParser.add_argument('--disable-server-validation', action='store_true', help='When using HTTPS, disable validation of the certificate presented by the provisioning server.')
argValues = argParser.parse_args()
if argValues.target == None:
raise _UsageError('Please specify a target device type using the --target argument.')
if argValues.target not in Targets:
raise _UsageError('Unrecognized target device type: %s\nValid device types are: %s' % (argValues.target, ', '.join(Targets.keys())))
if argValues.verbose != None:
global _verbosity
_verbosity = argValues.verbose
# | |
<= 0)
m.c1972 = Constraint(expr= m.x1971 - m.b3020 <= 0)
m.c1973 = Constraint(expr= m.x1972 - m.b3020 <= 0)
m.c1974 = Constraint(expr= m.x1973 - m.b3020 <= 0)
m.c1975 = Constraint(expr= m.x1974 - m.b3020 <= 0)
m.c1976 = Constraint(expr= m.x1975 - m.b3020 <= 0)
m.c1977 = Constraint(expr= m.x1976 - m.b3020 <= 0)
m.c1978 = Constraint(expr= m.x1977 - m.b3020 <= 0)
m.c1979 = Constraint(expr= m.x1978 - m.b3020 <= 0)
m.c1980 = Constraint(expr= m.x1979 - m.b3020 <= 0)
m.c1981 = Constraint(expr= m.x1980 - m.b3020 <= 0)
m.c1982 = Constraint(expr= m.x1981 - m.b3020 <= 0)
m.c1983 = Constraint(expr= m.x1982 - m.b3020 <= 0)
m.c1984 = Constraint(expr= m.x1983 - m.b3020 <= 0)
m.c1985 = Constraint(expr= m.x1984 - m.b3020 <= 0)
m.c1986 = Constraint(expr= m.x1985 - m.b3020 <= 0)
m.c1987 = Constraint(expr= m.x1986 - m.b3020 <= 0)
m.c1988 = Constraint(expr= m.x1987 - m.b3020 <= 0)
m.c1989 = Constraint(expr= m.x1988 - m.b3020 <= 0)
m.c1990 = Constraint(expr= m.x1989 - m.b3020 <= 0)
m.c1991 = Constraint(expr= m.x1990 - m.b3020 <= 0)
m.c1992 = Constraint(expr= m.x1991 - m.b3020 <= 0)
m.c1993 = Constraint(expr= m.x1992 - m.b3020 <= 0)
m.c1994 = Constraint(expr= m.x1993 - m.b3020 <= 0)
m.c1995 = Constraint(expr= m.x1994 - m.b3020 <= 0)
m.c1996 = Constraint(expr= m.x1995 - m.b3020 <= 0)
m.c1997 = Constraint(expr= m.x1996 - m.b3020 <= 0)
m.c1998 = Constraint(expr= m.x1997 - m.b3020 <= 0)
m.c1999 = Constraint(expr= m.x1998 - m.b3020 <= 0)
m.c2000 = Constraint(expr= m.x1999 - m.b3020 <= 0)
m.c2001 = Constraint(expr= m.x2000 - m.b3020 <= 0)
m.c2002 = Constraint(expr= m.x2001 - m.b3021 <= 0)
m.c2003 = Constraint(expr= m.x2002 - m.b3021 <= 0)
m.c2004 = Constraint(expr= m.x2003 - m.b3021 <= 0)
m.c2005 = Constraint(expr= m.x2004 - m.b3021 <= 0)
m.c2006 = Constraint(expr= m.x2005 - m.b3021 <= 0)
m.c2007 = Constraint(expr= m.x2006 - m.b3021 <= 0)
m.c2008 = Constraint(expr= m.x2007 - m.b3021 <= 0)
m.c2009 = Constraint(expr= m.x2008 - m.b3021 <= 0)
m.c2010 = Constraint(expr= m.x2009 - m.b3021 <= 0)
m.c2011 = Constraint(expr= m.x2010 - m.b3021 <= 0)
m.c2012 = Constraint(expr= m.x2011 - m.b3021 <= 0)
m.c2013 = Constraint(expr= m.x2012 - m.b3021 <= 0)
m.c2014 = Constraint(expr= m.x2013 - m.b3021 <= 0)
m.c2015 = Constraint(expr= m.x2014 - m.b3021 <= 0)
m.c2016 = Constraint(expr= m.x2015 - m.b3021 <= 0)
m.c2017 = Constraint(expr= m.x2016 - m.b3021 <= 0)
m.c2018 = Constraint(expr= m.x2017 - m.b3021 <= 0)
m.c2019 = Constraint(expr= m.x2018 - m.b3021 <= 0)
m.c2020 = Constraint(expr= m.x2019 - m.b3021 <= 0)
m.c2021 = Constraint(expr= m.x2020 - m.b3021 <= 0)
m.c2022 = Constraint(expr= m.x2021 - m.b3021 <= 0)
m.c2023 = Constraint(expr= m.x2022 - m.b3021 <= 0)
m.c2024 = Constraint(expr= m.x2023 - m.b3021 <= 0)
m.c2025 = Constraint(expr= m.x2024 - m.b3021 <= 0)
m.c2026 = Constraint(expr= m.x2025 - m.b3021 <= 0)
m.c2027 = Constraint(expr= m.x2026 - m.b3021 <= 0)
m.c2028 = Constraint(expr= m.x2027 - m.b3021 <= 0)
m.c2029 = Constraint(expr= m.x2028 - m.b3021 <= 0)
m.c2030 = Constraint(expr= m.x2029 - m.b3021 <= 0)
m.c2031 = Constraint(expr= m.x2030 - m.b3021 <= 0)
m.c2032 = Constraint(expr= m.x2031 - m.b3021 <= 0)
m.c2033 = Constraint(expr= m.x2032 - m.b3021 <= 0)
m.c2034 = Constraint(expr= m.x2033 - m.b3021 <= 0)
m.c2035 = Constraint(expr= m.x2034 - m.b3021 <= 0)
m.c2036 = Constraint(expr= m.x2035 - m.b3021 <= 0)
m.c2037 = Constraint(expr= m.x2036 - m.b3021 <= 0)
m.c2038 = Constraint(expr= m.x2037 - m.b3021 <= 0)
m.c2039 = Constraint(expr= m.x2038 - m.b3021 <= 0)
m.c2040 = Constraint(expr= m.x2039 - m.b3021 <= 0)
m.c2041 = Constraint(expr= m.x2040 - m.b3021 <= 0)
m.c2042 = Constraint(expr= m.x2041 - m.b3021 <= 0)
m.c2043 = Constraint(expr= m.x2042 - m.b3021 <= 0)
m.c2044 = Constraint(expr= m.x2043 - m.b3021 <= 0)
m.c2045 = Constraint(expr= m.x2044 - m.b3021 <= 0)
m.c2046 = Constraint(expr= m.x2045 - m.b3021 <= 0)
m.c2047 = Constraint(expr= m.x2046 - m.b3021 <= 0)
m.c2048 = Constraint(expr= m.x2047 - m.b3021 <= 0)
m.c2049 = Constraint(expr= m.x2048 - m.b3021 <= 0)
m.c2050 = Constraint(expr= m.x2049 - m.b3021 <= 0)
m.c2051 = Constraint(expr= m.x2050 - m.b3021 <= 0)
m.c2052 = Constraint(expr= m.x2051 - m.b3021 <= 0)
m.c2053 = Constraint(expr= m.x2052 - m.b3021 <= 0)
m.c2054 = Constraint(expr= m.x2053 - m.b3021 <= 0)
m.c2055 = Constraint(expr= m.x2054 - m.b3021 <= 0)
m.c2056 = Constraint(expr= m.x2055 - m.b3021 <= 0)
m.c2057 = Constraint(expr= m.x2056 - m.b3021 <= 0)
m.c2058 = Constraint(expr= m.x2057 - m.b3021 <= 0)
m.c2059 = Constraint(expr= m.x2058 - m.b3021 <= 0)
m.c2060 = Constraint(expr= m.x2059 - m.b3021 <= 0)
m.c2061 = Constraint(expr= m.x2060 - m.b3021 <= 0)
m.c2062 = Constraint(expr= m.x2061 - m.b3021 <= 0)
m.c2063 = Constraint(expr= m.x2062 - m.b3021 <= 0)
m.c2064 = Constraint(expr= m.x2063 - m.b3021 <= 0)
m.c2065 = Constraint(expr= m.x2064 - m.b3021 <= 0)
m.c2066 = Constraint(expr= m.x2065 - m.b3021 <= 0)
m.c2067 = Constraint(expr= m.x2066 - m.b3021 <= 0)
m.c2068 = Constraint(expr= m.x2067 - m.b3021 <= 0)
m.c2069 = Constraint(expr= m.x2068 - m.b3021 <= 0)
m.c2070 = Constraint(expr= m.x2069 - m.b3021 <= 0)
m.c2071 = Constraint(expr= m.x2070 - m.b3021 <= 0)
m.c2072 = Constraint(expr= m.x2071 - m.b3021 <= 0)
m.c2073 = Constraint(expr= m.x2072 - m.b3021 <= 0)
m.c2074 = Constraint(expr= m.x2073 - m.b3021 <= 0)
m.c2075 = Constraint(expr= m.x2074 - m.b3021 <= 0)
m.c2076 = Constraint(expr= m.x2075 - m.b3021 <= 0)
m.c2077 = Constraint(expr= m.x2076 - m.b3021 <= 0)
m.c2078 = Constraint(expr= m.x2077 - m.b3021 <= 0)
m.c2079 = Constraint(expr= m.x2078 - m.b3021 <= 0)
m.c2080 = Constraint(expr= m.x2079 - m.b3021 <= 0)
m.c2081 = Constraint(expr= m.x2080 - m.b3021 <= 0)
m.c2082 = Constraint(expr= m.x2081 - m.b3021 <= 0)
m.c2083 = Constraint(expr= m.x2082 - m.b3021 <= 0)
m.c2084 = Constraint(expr= m.x2083 - m.b3021 <= 0)
m.c2085 = Constraint(expr= m.x2084 - m.b3021 <= 0)
m.c2086 = Constraint(expr= m.x2085 - m.b3021 <= 0)
m.c2087 = Constraint(expr= m.x2086 - m.b3021 <= 0)
m.c2088 = Constraint(expr= m.x2087 - m.b3021 <= 0)
m.c2089 = Constraint(expr= m.x2088 - m.b3021 <= 0)
m.c2090 = Constraint(expr= m.x2089 - m.b3021 <= 0)
m.c2091 = Constraint(expr= m.x2090 - m.b3021 <= 0)
m.c2092 = Constraint(expr= m.x2091 - m.b3021 <= 0)
m.c2093 = Constraint(expr= m.x2092 - m.b3021 <= 0)
m.c2094 = Constraint(expr= m.x2093 - m.b3021 <= 0)
m.c2095 = Constraint(expr= m.x2094 - m.b3021 <= 0)
m.c2096 = Constraint(expr= m.x2095 - m.b3021 <= 0)
m.c2097 = Constraint(expr= m.x2096 - m.b3021 <= 0)
m.c2098 = Constraint(expr= m.x2097 - m.b3021 <= 0)
m.c2099 = Constraint(expr= m.x2098 - m.b3021 <= 0)
m.c2100 = Constraint(expr= m.x2099 - m.b3021 <= 0)
m.c2101 = Constraint(expr= m.x2100 - m.b3021 <= 0)
m.c2102 = Constraint(expr= m.x2101 - m.b3022 <= 0)
m.c2103 = Constraint(expr= m.x2102 - m.b3022 <= 0)
m.c2104 = Constraint(expr= m.x2103 - m.b3022 <= 0)
m.c2105 = Constraint(expr= m.x2104 - m.b3022 <= 0)
m.c2106 = Constraint(expr= m.x2105 - m.b3022 <= 0)
m.c2107 = Constraint(expr= m.x2106 - m.b3022 <= 0)
m.c2108 = Constraint(expr= m.x2107 - m.b3022 <= 0)
m.c2109 = Constraint(expr= m.x2108 - m.b3022 <= 0)
m.c2110 = Constraint(expr= m.x2109 - m.b3022 <= 0)
m.c2111 = Constraint(expr= m.x2110 - m.b3022 <= 0)
m.c2112 = Constraint(expr= m.x2111 - m.b3022 <= 0)
m.c2113 = Constraint(expr= m.x2112 - m.b3022 <= 0)
m.c2114 = Constraint(expr= m.x2113 - m.b3022 <= 0)
m.c2115 = Constraint(expr= m.x2114 - m.b3022 <= 0)
m.c2116 = Constraint(expr= m.x2115 - m.b3022 <= 0)
m.c2117 = Constraint(expr= m.x2116 - m.b3022 <= 0)
m.c2118 = Constraint(expr= m.x2117 - m.b3022 <= 0)
m.c2119 = Constraint(expr= m.x2118 - m.b3022 <= 0)
m.c2120 = Constraint(expr= m.x2119 - m.b3022 <= 0)
m.c2121 = Constraint(expr= m.x2120 - m.b3022 <= 0)
m.c2122 = Constraint(expr= m.x2121 - m.b3022 <= 0)
m.c2123 = Constraint(expr= m.x2122 - m.b3022 <= 0)
m.c2124 = Constraint(expr= m.x2123 - m.b3022 <= 0)
m.c2125 = Constraint(expr= m.x2124 - m.b3022 <= 0)
m.c2126 = Constraint(expr= m.x2125 - m.b3022 <= 0)
m.c2127 = Constraint(expr= m.x2126 - m.b3022 <= 0)
m.c2128 = Constraint(expr= m.x2127 - m.b3022 <= 0)
m.c2129 = Constraint(expr= m.x2128 - m.b3022 <= 0)
m.c2130 = Constraint(expr= m.x2129 - m.b3022 <= 0)
m.c2131 = Constraint(expr= m.x2130 - m.b3022 <= 0)
m.c2132 = Constraint(expr= m.x2131 - m.b3022 <= 0)
m.c2133 = Constraint(expr= m.x2132 - m.b3022 <= 0)
m.c2134 = Constraint(expr= m.x2133 - m.b3022 <= 0)
m.c2135 = Constraint(expr= m.x2134 - m.b3022 <= 0)
m.c2136 = Constraint(expr= m.x2135 - m.b3022 <= 0)
m.c2137 = Constraint(expr= m.x2136 - m.b3022 <= 0)
m.c2138 = Constraint(expr= m.x2137 - m.b3022 <= 0)
m.c2139 = Constraint(expr= m.x2138 - m.b3022 <= 0)
m.c2140 = Constraint(expr= m.x2139 - m.b3022 <= 0)
m.c2141 = Constraint(expr= m.x2140 - m.b3022 <= 0)
m.c2142 = Constraint(expr= m.x2141 - m.b3022 <= 0)
m.c2143 = Constraint(expr= m.x2142 - m.b3022 <= 0)
m.c2144 = Constraint(expr= m.x2143 - m.b3022 <= 0)
m.c2145 = Constraint(expr= m.x2144 - m.b3022 <= 0)
m.c2146 = Constraint(expr= m.x2145 - m.b3022 <= 0)
m.c2147 = Constraint(expr= m.x2146 - m.b3022 <= 0)
m.c2148 = Constraint(expr= m.x2147 - m.b3022 <= 0)
m.c2149 = Constraint(expr= m.x2148 - m.b3022 <= 0)
m.c2150 = Constraint(expr= m.x2149 - m.b3022 <= 0)
m.c2151 = Constraint(expr= m.x2150 - m.b3022 <= 0)
m.c2152 = Constraint(expr= m.x2151 - m.b3022 <= 0)
m.c2153 = Constraint(expr= m.x2152 - m.b3022 <= 0)
m.c2154 = Constraint(expr= m.x2153 - | |
], 1 , 1 , 4 , 0 , 92 , (3, 0, None, None) , 0 , )),
(( 'RemoveHalftone' , ), 22, (22, (), [ ], 1 , 1 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( 'SmoothCharacters' , 'Type' , ), 23, (23, (), [ (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 100 , (3, 0, None, None) , 0 , )),
(( 'CleanNoise' , 'NoiseSize' , ), 24, (24, (), [ (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
(( 'AutoInvertImage' , 'Threshold' , ), 25, (25, (), [ (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 108 , (3, 0, None, None) , 0 , )),
(( 'AutoInvertBlocks' , 'MinWidth' , 'MinHeight' , ), 26, (26, (), [ (3, 1, None, None) ,
(3, 1, None, None) , ], 1 , 1 , 4 , 0 , 112 , (3, 0, None, None) , 1088 , )),
(( 'ReconstructLines' , 'Direction' , ), 29, (29, (), [ (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 116 , (3, 0, None, None) , 0 , )),
(( 'AutoCrop' , 'NewLeftMargin' , 'NewTopMargin' , 'NewRightMargin' , 'NewBottomMargin' ,
), 30, (30, (), [ (3, 1, None, None) , (3, 1, None, None) , (3, 1, None, None) , (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 120 , (3, 0, None, None) , 0 , )),
(( 'AutoRegister' , 'NewLeftMargin' , 'NewTopMargin' , ), 31, (31, (), [ (3, 1, None, None) ,
(3, 1, None, None) , ], 1 , 1 , 4 , 0 , 124 , (3, 0, None, None) , 0 , )),
(( 'DeleteLines' , 'Direction' , 'bRepair' , ), 40, (40, (), [ (3, 1, None, None) ,
(3, 49, '65535', None) , ], 1 , 1 , 4 , 0 , 128 , (3, 0, None, None) , 0 , )),
(( 'BorderExtract' , 'Flags' , 'Algorithm' , ), 41, (41, (), [ (3, 49, '3', None) ,
(3, 49, '2', None) , ], 1 , 1 , 4 , 0 , 132 , (3, 0, None, None) , 0 , )),
(( 'ClearBackground' , 'ThrLevel' , ), 42, (42, (), [ (5, 49, '30.0', None) , ], 1 , 1 , 4 , 0 , 136 , (3, 0, None, None) , 0 , )),
(( 'Resize' , 'PageSize' , 'PageOrientation' , 'ImageAlignment' , 'Width' ,
'Height' , 'Unit' , ), 43, (43, (), [ (3, 1, None, None) , (3, 49, '0', None) ,
(3, 49, '0', None) , (5, 49, '8.5', None) , (5, 49, '11.0', None) , (3, 49, '1', None) , ], 1 , 1 , 4 , 0 , 140 , (3, 0, None, None) , 0 , )),
(( 'IsBlankImage' , 'reserved0' , 'reserved1' , 'reserved2' , 'pVal' ,
), 44, (44, (), [ (3, 49, '0', None) , (5, 49, '0.0', None) , (5, 49, '0.0', None) , (16387, 10, None, None) , ], 1 , 1 , 4 , 0 , 144 , (3, 0, None, None) , 0 , )),
(( 'cx2l' , 'i' , 'pVal' , ), 45, (45, (), [ (3, 1, None, None) ,
(16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 148 , (3, 0, None, None) , 1088 , )),
(( 'cx2l' , 'i' , 'pVal' , ), 45, (45, (), [ (3, 1, None, None) ,
(3, 1, None, None) , ], 1 , 4 , 4 , 0 , 152 , (3, 0, None, None) , 1088 , )),
(( 'cx3d' , 'i' , 'pVal' , ), 46, (46, (), [ (3, 1, None, None) ,
(16389, 10, None, None) , ], 1 , 2 , 4 , 0 , 156 , (3, 0, None, None) , 1088 , )),
(( 'cx3d' , 'i' , 'pVal' , ), 46, (46, (), [ (3, 1, None, None) ,
(5, 1, None, None) , ], 1 , 4 , 4 , 0 , 160 , (3, 0, None, None) , 1088 , )),
(( 'AdvancedBinarize' , 'targetDpi' , 'reserved1' , 'reserved2' , ), 47, (47, (), [
(3, 49, '0', None) , (5, 49, '0.0', None) , (5, 49, '0.0', None) , ], 1 , 1 , 4 , 0 , 164 , (3, 0, None, None) , 0 , )),
(( 'MinimizeBitsPerPixel' , 'reserved1' , 'reserved2' , ), 48, (48, (), [ (5, 49, '0.0', None) ,
(5, 49, '0.0', None) , ], 1 , 1 , 4 , 0 , 168 , (3, 0, None, None) , 1088 , )),
(( 'CleanNoiseExt' , 'Flags' , 'maxNoiseSizeHorz' , 'maxNoiseSizeVert' , 'minObjectDistance' ,
'reserved0' , ), 50, (50, (), [ (3, 1, None, None) , (3, 1, None, None) , (3, 1, None, None) ,
(3, 1, None, None) , (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 172 , (3, 0, None, None) , 0 , )),
(( 'o' , 'pVal' , ), 51, (51, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 176 , (3, 0, None, None) , 1088 , )),
(( 'o' , 'pVal' , ), 51, (51, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 180 , (3, 0, None, None) , 1088 , )),
]
ICiServer_vtables_dispatch_ = 1
ICiServer_vtables_ = [
(( 'CreateImage' , 'ppInt' , ), 1, (1, (), [ (16393, 10, None, "IID('{F2BCF189-0B27-11D4-B5F5-9CC767000000}')") , ], 1 , 1 , 4 , 0 , 28 , (3, 0, None, None) , 0 , )),
(( 'CreateBarcodePro' , 'ppInt' , ), 2, (2, (), [ (16393, 10, None, "IID('{BDDB0244-0CFD-11D4-B5F8-B89D57000000}')") , ], 1 , 1 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
(( 'OpenExt' , 'hModule' , 'MasterId' , 'pParam' , ), 10, (10, (), [
(3, 1, None, None) , (3, 1, None, None) , (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 36 , (3, 0, None, None) , 0 , )),
(( 'CreateRect' , 'left' , 'top' , 'right' , 'bottom' ,
'ppInt' , ), 11, (11, (), [ (3, 49, '0', None) , (3, 49, '0', None) , (3, 49, '0', None) ,
(3, 49, '0', None) , (16393, 10, None, "IID('{4ED88244-0BE1-11D4-B5F6-009FC6000000}')") , ], 1 , 1 , 4 , 0 , 40 , (3, 0, None, None) , 0 , )),
(( 'VerMajor' , 'pVal' , ), 12, (12, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 44 , (3, 0, None, None) , 0 , )),
(( 'VerMinor' , 'pVal' , ), 13, (13, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 48 , (3, 0, None, None) , 0 , )),
(( 'VerRelease' , 'pVal' , ), 14, (14, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 52 , (3, 0, None, None) , 0 , )),
(( 'CreateRepair' , 'ppInt' , ), 5, (5, (), [ (16393, 10, None, "IID('{63F6480C-997E-4FDE-AD63-A24E5F0FFDC7}')") , ], 1 , 1 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( 'CreateTools' , 'ppInt' , ), 6, (6, (), [ (16393, 10, None, "IID('{316BC128-8995-471D-985D-B3E68E87C084}')") , ], 1 , 1 , 4 , 0 , 60 , (3, 0, None, None) , 0 | |
<reponame>nakedible/vpnease-l2tp
from testbase import PersistTest
import testbase
import unittest, sys, datetime
import sqlalchemy.databases.sqlite as sqllite
import tables
from sqlalchemy import *
from sqlalchemy.engine import ResultProxy, RowProxy
from sqlalchemy import exceptions
class QueryTest(PersistTest):
def setUpAll(self):
global users, addresses, metadata
metadata = BoundMetaData(testbase.db)
users = Table('query_users', metadata,
Column('user_id', INT, primary_key = True),
Column('user_name', VARCHAR(20)),
)
addresses = Table('query_addresses', metadata,
Column('address_id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey('query_users.user_id')),
Column('address', String(30)))
metadata.create_all()
def setUp(self):
self.users = users
def tearDown(self):
self.users.delete().execute()
def tearDownAll(self):
metadata.drop_all()
def testinsert(self):
self.users.insert().execute(user_id = 7, user_name = 'jack')
print repr(self.users.select().execute().fetchall())
def testupdate(self):
self.users.insert().execute(user_id = 7, user_name = 'jack')
print repr(self.users.select().execute().fetchall())
self.users.update(self.users.c.user_id == 7).execute(user_name = 'fred')
print repr(self.users.select().execute().fetchall())
def testrowiteration(self):
self.users.insert().execute(user_id = 7, user_name = 'jack')
self.users.insert().execute(user_id = 8, user_name = 'ed')
self.users.insert().execute(user_id = 9, user_name = 'fred')
r = self.users.select().execute()
l = []
for row in r:
l.append(row)
self.assert_(len(l) == 3)
def test_compiled_execute(self):
s = select([self.users], self.users.c.user_id==bindparam('id')).compile()
c = testbase.db.connect()
print repr(c.execute(s, id=7).fetchall())
def test_global_metadata(self):
t1 = Table('table1', Column('col1', Integer, primary_key=True),
Column('col2', String(20)))
t2 = Table('table2', Column('col1', Integer, primary_key=True),
Column('col2', String(20)))
assert t1.c.col1
global_connect(testbase.db)
default_metadata.create_all()
try:
assert t1.count().scalar() == 0
finally:
default_metadata.drop_all()
default_metadata.clear()
@testbase.supported('postgres')
def testpassiveoverride(self):
"""primarily for postgres, tests that when we get a primary key column back
from reflecting a table which has a default value on it, we pre-execute
that PassiveDefault upon insert, even though PassiveDefault says
"let the database execute this", because in postgres we must have all the primary
key values in memory before insert; otherwise we cant locate the just inserted row."""
try:
meta = BoundMetaData(testbase.db)
testbase.db.execute("""
CREATE TABLE speedy_users
(
speedy_user_id SERIAL PRIMARY KEY,
user_name VARCHAR NOT NULL,
user_password VARCHAR NOT NULL
);
""", None)
t = Table("speedy_users", meta, autoload=True)
t.insert().execute(user_name='user', user_password='<PASSWORD>')
l = t.select().execute().fetchall()
self.assert_(l == [(1, 'user', 'lala')])
finally:
testbase.db.execute("drop table speedy_users", None)
@testbase.supported('postgres')
def testschema(self):
meta1 = BoundMetaData(testbase.db)
test_table = Table('my_table', meta1,
Column('id', Integer, primary_key=True),
Column('data', String(20), nullable=False),
schema='alt_schema'
)
test_table.create()
try:
# plain insert
test_table.insert().execute(data='test')
meta2 = BoundMetaData(testbase.db)
test_table = Table('my_table', meta2, autoload=True, schema='alt_schema')
test_table.insert().execute(data='test')
finally:
test_table.drop()
def test_repeated_bindparams(self):
"""test that a BindParam can be used more than once.
this should be run for dbs with both positional and named paramstyles."""
self.users.insert().execute(user_id = 7, user_name = 'jack')
self.users.insert().execute(user_id = 8, user_name = 'fred')
u = bindparam('uid')
s = self.users.select(or_(self.users.c.user_name==u, self.users.c.user_name==u))
r = s.execute(uid='fred').fetchall()
assert len(r) == 1
def test_bindparam_shortname(self):
"""test the 'shortname' field on BindParamClause."""
self.users.insert().execute(user_id = 7, user_name = 'jack')
self.users.insert().execute(user_id = 8, user_name = 'fred')
u = bindparam('uid', shortname='someshortname')
s = self.users.select(self.users.c.user_name==u)
r = s.execute(someshortname='fred').fetchall()
assert len(r) == 1
def testdelete(self):
self.users.insert().execute(user_id = 7, user_name = 'jack')
self.users.insert().execute(user_id = 8, user_name = 'fred')
print repr(self.users.select().execute().fetchall())
self.users.delete(self.users.c.user_name == 'fred').execute()
print repr(self.users.select().execute().fetchall())
def testselectlimit(self):
self.users.insert().execute(user_id=1, user_name='john')
self.users.insert().execute(user_id=2, user_name='jack')
self.users.insert().execute(user_id=3, user_name='ed')
self.users.insert().execute(user_id=4, user_name='wendy')
self.users.insert().execute(user_id=5, user_name='laura')
self.users.insert().execute(user_id=6, user_name='ralph')
self.users.insert().execute(user_id=7, user_name='fido')
r = self.users.select(limit=3, order_by=[self.users.c.user_id]).execute().fetchall()
self.assert_(r == [(1, 'john'), (2, 'jack'), (3, 'ed')], repr(r))
@testbase.unsupported('mssql')
def testselectlimitoffset(self):
self.users.insert().execute(user_id=1, user_name='john')
self.users.insert().execute(user_id=2, user_name='jack')
self.users.insert().execute(user_id=3, user_name='ed')
self.users.insert().execute(user_id=4, user_name='wendy')
self.users.insert().execute(user_id=5, user_name='laura')
self.users.insert().execute(user_id=6, user_name='ralph')
self.users.insert().execute(user_id=7, user_name='fido')
r = self.users.select(limit=3, offset=2, order_by=[self.users.c.user_id]).execute().fetchall()
self.assert_(r==[(3, 'ed'), (4, 'wendy'), (5, 'laura')])
r = self.users.select(offset=5, order_by=[self.users.c.user_id]).execute().fetchall()
self.assert_(r==[(6, 'ralph'), (7, 'fido')])
@testbase.supported('mssql')
def testselectlimitoffset_mssql(self):
try:
r = self.users.select(limit=3, offset=2, order_by=[self.users.c.user_id]).execute().fetchall()
assert False # InvalidRequestError should have been raised
except exceptions.InvalidRequestError:
pass
@testbase.unsupported('mysql')
def test_scalar_select(self):
"""test that scalar subqueries with labels get their type propigated to the result set."""
# mysql and/or mysqldb has a bug here, type isnt propigated for scalar subquery.
datetable = Table('datetable', metadata,
Column('id', Integer, primary_key=True),
Column('today', DateTime))
datetable.create()
try:
datetable.insert().execute(id=1, today=datetime.datetime(2006, 5, 12, 12, 0, 0))
s = select([datetable.alias('x').c.today], scalar=True)
s2 = select([datetable.c.id, s.label('somelabel')])
#print s2.c.somelabel.type
assert isinstance(s2.execute().fetchone()['somelabel'], datetime.datetime)
finally:
datetable.drop()
def test_column_accessor(self):
self.users.insert().execute(user_id=1, user_name='john')
self.users.insert().execute(user_id=2, user_name='jack')
r = self.users.select(self.users.c.user_id==2).execute().fetchone()
self.assert_(r.user_id == r['user_id'] == r[self.users.c.user_id] == 2)
self.assert_(r.user_name == r['user_name'] == r[self.users.c.user_name] == 'jack')
def test_keys(self):
self.users.insert().execute(user_id=1, user_name='foo')
r = self.users.select().execute().fetchone()
self.assertEqual(r.keys(), ['user_id', 'user_name'])
def test_items(self):
self.users.insert().execute(user_id=1, user_name='foo')
r = self.users.select().execute().fetchone()
self.assertEqual(r.items(), [('user_id', 1), ('user_name', 'foo')])
def test_len(self):
self.users.insert().execute(user_id=1, user_name='foo')
r = self.users.select().execute().fetchone()
self.assertEqual(len(r), 2)
r.close()
r = testbase.db.execute('select user_name, user_id from query_users', {}).fetchone()
self.assertEqual(len(r), 2)
r.close()
r = testbase.db.execute('select user_name from query_users', {}).fetchone()
self.assertEqual(len(r), 1)
r.close()
def test_cant_execute_join(self):
try:
users.join(addresses).execute()
except exceptions.ArgumentError, e:
assert str(e) == """Not an executeable clause: query_users JOIN query_addresses ON query_users.user_id = query_addresses.user_id"""
def test_functions(self):
x = testbase.db.func.current_date().execute().scalar()
y = testbase.db.func.current_date().select().execute().scalar()
z = testbase.db.func.current_date().scalar()
assert x == y == z
def test_conn_functions(self):
conn = testbase.db.connect()
try:
x = conn.execute(func.current_date()).scalar()
y = conn.execute(func.current_date().select()).scalar()
z = conn.scalar(func.current_date())
finally:
conn.close()
assert x == y == z
def test_update_functions(self):
"""test sending functions and SQL expressions to the VALUES and SET clauses of INSERT/UPDATE instances,
and that column-level defaults get overridden"""
meta = BoundMetaData(testbase.db)
t = Table('t1', meta,
Column('id', Integer, primary_key=True),
Column('value', Integer)
)
t2 = Table('t2', meta,
Column('id', Integer, primary_key=True),
Column('value', Integer, default="7"),
Column('stuff', String(20), onupdate="thisisstuff")
)
meta.create_all()
try:
t.insert().execute(value=func.length("one"))
assert t.select().execute().fetchone()['value'] == 3
t.update().execute(value=func.length("asfda"))
assert t.select().execute().fetchone()['value'] == 5
r = t.insert(values=dict(value=func.length("sfsaafsda"))).execute()
id = r.last_inserted_ids()[0]
assert t.select(t.c.id==id).execute().fetchone()['value'] == 9
t.update(values={t.c.value:func.length("asdf")}).execute()
assert t.select().execute().fetchone()['value'] == 4
t2.insert().execute()
t2.insert().execute(value=func.length("one"))
t2.insert().execute(value=func.length("asfda") + -19, stuff="hi")
assert select([t2.c.value, t2.c.stuff]).execute().fetchall() == [(7,None), (3,None), (-14,"hi")]
t2.update().execute(value=func.length("asdsafasd"), stuff="some stuff")
assert select([t2.c.value, t2.c.stuff]).execute().fetchall() == [(9,"some stuff"), (9,"some stuff"), (9,"some stuff")]
t2.delete().execute()
t2.insert(values=dict(value=func.length("one") + 8)).execute()
assert t2.select().execute().fetchone()['value'] == 11
t2.update(values=dict(value=func.length("asfda"))).execute()
assert select([t2.c.value, t2.c.stuff]).execute().fetchone() == (5, "thisisstuff")
t2.update(values={t2.c.value:func.length("asfdaasdf"), t2.c.stuff:"foo"}).execute()
print "HI", select([t2.c.value, t2.c.stuff]).execute().fetchone()
assert select([t2.c.value, t2.c.stuff]).execute().fetchone() == (9, "foo")
finally:
meta.drop_all()
@testbase.supported('postgres')
def test_functions_with_cols(self):
x = testbase.db.func.current_date().execute().scalar()
y = testbase.db.func.current_date().select().execute().scalar()
z = testbase.db.func.current_date().scalar()
w = select(['*'], from_obj=[testbase.db.func.current_date()]).scalar()
# construct a column-based FROM object out of a function, like in [ticket:172]
s = select([column('date', type=DateTime)], from_obj=[testbase.db.func.current_date()])
q = s.execute().fetchone()[s.c.date]
r = s.alias('datequery').select().scalar()
assert x == y == z == w == q == r
def test_column_order_with_simple_query(self):
# should return values in column definition order
self.users.insert().execute(user_id=1, user_name='foo')
r = self.users.select(self.users.c.user_id==1).execute().fetchone()
self.assertEqual(r[0], 1)
self.assertEqual(r[1], 'foo')
self.assertEqual(r.keys(), ['user_id', 'user_name'])
self.assertEqual(r.values(), [1, 'foo'])
def test_column_order_with_text_query(self):
# should return values in query order
self.users.insert().execute(user_id=1, user_name='foo')
r = testbase.db.execute('select user_name, user_id from query_users', {}).fetchone()
self.assertEqual(r[0], 'foo')
self.assertEqual(r[1], 1)
self.assertEqual(r.keys(), ['user_name', 'user_id'])
self.assertEqual(r.values(), ['foo', 1])
@testbase.unsupported('oracle', 'firebird')
def test_column_accessor_shadow(self):
meta = BoundMetaData(testbase.db)
shadowed = Table('test_shadowed', meta,
Column('shadow_id', INT, primary_key = True),
Column('shadow_name', VARCHAR(20)),
Column('parent', VARCHAR(20)),
Column('row', VARCHAR(40)),
Column('__parent', VARCHAR(20)),
Column('__row', VARCHAR(20)),
)
shadowed.create()
try:
shadowed.insert().execute(shadow_id=1, shadow_name='The Shadow', parent='The Light', row='Without light there is no shadow', __parent='Hidden parent', __row='Hidden row')
r = shadowed.select(shadowed.c.shadow_id==1).execute().fetchone()
self.assert_(r.shadow_id == r['shadow_id'] == r[shadowed.c.shadow_id] == 1)
self.assert_(r.shadow_name == r['shadow_name'] == r[shadowed.c.shadow_name] == 'The Shadow')
self.assert_(r.parent == r['parent'] == r[shadowed.c.parent] == 'The Light')
self.assert_(r.row == r['row'] == r[shadowed.c.row] == 'Without light there is no shadow')
self.assert_(r['__parent'] == 'Hidden parent')
self.assert_(r['__row'] == 'Hidden row')
try:
print r.__parent, r.__row
self.fail('Should not allow access to private attributes')
except AttributeError:
pass # expected
r.close()
finally:
shadowed.drop()
class CompoundTest(PersistTest):
"""test compound statements like UNION, INTERSECT, particularly their ability to nest on
different databases."""
def setUpAll(self):
global metadata, t1, t2, t3
metadata = BoundMetaData(testbase.db)
t1 = Table('t1', metadata,
Column('col1', Integer, primary_key=True),
Column('col2', String(30)),
Column('col3', String(40)),
Column('col4', String(30))
)
t2 = Table('t2', metadata,
Column('col1', Integer, primary_key=True),
Column('col2', String(30)),
Column('col3', String(40)),
Column('col4', String(30)))
t3 = Table('t3', metadata,
Column('col1', Integer, primary_key=True),
Column('col2', String(30)),
Column('col3', String(40)),
Column('col4', String(30)))
metadata.create_all()
t1.insert().execute([
dict(col2="t1col2r1", col3="aaa", col4="aaa"),
dict(col2="t1col2r2", col3="bbb", col4="bbb"),
dict(col2="t1col2r3", col3="ccc", col4="ccc"),
])
t2.insert().execute([
dict(col2="t2col2r1", col3="aaa", col4="bbb"),
dict(col2="t2col2r2", col3="bbb", col4="ccc"),
dict(col2="t2col2r3", col3="ccc", col4="aaa"),
])
t3.insert().execute([
dict(col2="t3col2r1", col3="aaa", col4="ccc"),
dict(col2="t3col2r2", col3="bbb", col4="aaa"),
dict(col2="t3col2r3", col3="ccc", col4="bbb"),
])
def tearDownAll(self):
metadata.drop_all()
def test_union(self):
(s1, s2) = (
select([t1.c.col3, t1.c.col4], t1.c.col2.in_("t1col2r1", "t1col2r2")),
select([t2.c.col3, t2.c.col4], t2.c.col2.in_("t2col2r2", "t2col2r3"))
)
u = union(s1, s2)
assert u.execute().fetchall() == [('aaa', 'aaa'), ('bbb', 'bbb'), ('bbb', 'ccc'), ('ccc', 'aaa')]
assert u.alias('bar').select().execute().fetchall() == [('aaa', 'aaa'), ('bbb', 'bbb'), ('bbb', 'ccc'), ('ccc', 'aaa')]
@testbase.unsupported('mysql')
def test_intersect(self):
i = intersect(
select([t2.c.col3, t2.c.col4]),
select([t2.c.col3, t2.c.col4], t2.c.col4==t3.c.col3)
)
assert i.execute().fetchall() == [('aaa', 'bbb'), ('bbb', 'ccc'), ('ccc', 'aaa')]
assert i.alias('bar').select().execute().fetchall() == [('aaa', 'bbb'), ('bbb', 'ccc'), ('ccc', 'aaa')]
@testbase.unsupported('mysql')
def test_except_style1(self):
e = except_(union(
| |
<filename>nutils/mesh.py
# Copyright (c) 2014 Evalf
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
The mesh module provides mesh generators: methods that return a topology and an
accompanying geometry function. Meshes can either be generated on the fly, e.g.
:func:`rectilinear`, or read from external an externally prepared file,
:func:`gmsh`, and converted to nutils format. Note that no mesh writers are
provided at this point; output is handled by the :mod:`nutils.plot` module.
"""
from . import topology, function, util, element, numpy, numeric, transform, log, warnings, types, _
import os, itertools, pathlib
# MESH GENERATORS
@log.title
def rectilinear(richshape, periodic=(), name='rect'):
'rectilinear mesh'
ndims = len(richshape)
shape = []
offset = []
scale = []
uniform = True
for v in richshape:
if numeric.isint(v):
assert v > 0
shape.append(v)
scale.append(1)
offset.append(0)
elif numpy.equal(v, numpy.linspace(v[0],v[-1],len(v))).all():
shape.append(len(v)-1)
scale.append((v[-1]-v[0]) / float(len(v)-1))
offset.append(v[0])
else:
shape.append(len(v)-1)
uniform = False
root = transform.Identifier(ndims, name)
axes = [topology.DimAxis(0,n,idim in periodic) for idim, n in enumerate(shape)]
topo = topology.StructuredTopology(root, axes)
if uniform:
if all(o == offset[0] for o in offset[1:]):
offset = offset[0]
if all(s == scale[0] for s in scale[1:]):
scale = scale[0]
geom = function.rootcoords(ndims) * scale + offset
else:
funcsp = topo.basis('spline', degree=1, periodic=())
coords = numeric.meshgrid(*richshape).reshape(ndims, -1)
geom = (funcsp * coords).sum(-1)
return topo, geom
def line(nodes, periodic=False, bnames=None):
if isinstance(nodes, int):
uniform = True
assert nodes > 0
nelems = nodes
scale = 1
offset = 0
else:
nelems = len(nodes)-1
scale = (nodes[-1]-nodes[0]) / nelems
offset = nodes[0]
uniform = numpy.equal(nodes, offset + numpy.arange(nelems+1) * scale).all()
root = transform.Identifier(1, 'line')
domain = topology.StructuredLine(root, 0, nelems, periodic=periodic, bnames=bnames)
geom = function.rootcoords(1) * scale + offset if uniform else domain.basis('std', degree=1, periodic=False).dot(nodes)
return domain, geom
def newrectilinear(nodes, periodic=None, bnames=[['left','right'],['bottom','top'],['front','back']]):
if periodic is None:
periodic = numpy.zeros(len(nodes), dtype=bool)
else:
periodic = numpy.asarray(periodic)
assert len(periodic) == len(nodes) and periodic.ndim == 1 and periodic.dtype == bool
dims = [line(nodesi, periodici, bnamesi) for nodesi, periodici, bnamesi in zip(nodes, periodic, tuple(bnames)+(None,)*len(nodes))]
domain, geom = dims.pop(0)
for domaini, geomi in dims:
domain = domain * domaini
geom = function.concatenate(function.bifurcate(geom,geomi))
return domain, geom
@log.title
def multipatch(patches, nelems, patchverts=None, name='multipatch'):
'''multipatch rectilinear mesh generator
Generator for a :class:`~nutils.topology.MultipatchTopology` and geometry.
The :class:`~nutils.topology.MultipatchTopology` consists of a set patches,
where each patch is a :class:`~nutils.topology.StructuredTopology` and all
patches have the same number of dimensions.
The ``patches`` argument, a :class:`numpy.ndarray`-like with shape
``(npatches, 2*ndims)`` or ``(npatches,)+(2,)*ndims``, defines the
connectivity by labelling the patch vertices. For example, three
one-dimensional patches can be connected at one edge by::
# connectivity: 3
# │
# 1──0──2
patches=[[0,1], [0,2], [0,3]]
Or two two-dimensional patches along an edge by::
# connectivity: 3──4──5
# │ │ │
# 0──1──2
patches=[[[0,3],[1,4]], [[1,4],[2,5]]]
The geometry is specified by the ``patchverts`` argument: a
:class:`numpy.ndarray`-like with shape ``(nverts,ngeomdims)`` specifying for
each vertex a coordinate. Note that the dimension of the geometry may be
higher than the dimension of the patches. The created geometry is a
patch-wise linear interpolation of the vertex coordinates. If the
``patchverts`` argument is omitted the geometry describes a unit hypercube
per patch.
The ``nelems`` argument is either an :class:`int` defining the number of
elements per patch per dimension, or a :class:`dict` with edges (a pair of
vertex numbers) as keys and the number of elements (:class:`int`) as values,
with key ``None`` specifying the default number of elements. Example::
# connectivity: 3─────4─────5
# │ 4x3 │ 8x3 │
# 0─────1─────2
patches=[[[0,3],[1,4]], [[1,4],[2,5]]]
nelems={None: 4, (1,2): 8, (4,5): 8, (0,3): 3, (1,4): 3, (2,5): 3}
Since the patches are structured topologies, the number of elements per
patch per dimension should be unambiguous. In above example specifying
``nelems={None: 4, (1,2): 8}`` will raise an exception because the patch on
the right has 8 elements along edge ``(1,2)`` and 4 along ``(4,5)``.
Example
-------
An L-shaped domain can be generated by::
# connectivity: 2──5
# │ |
# 1──4─────7 y
# │ │ │ │
# 0──3─────6 └──x
domain, geom = mesh.multipatch(
patches=[[0,1,3,4], [1,2,4,5], [3,4,6,7]],
patchverts=[[0,0], [0,1], [0,2], [1,0], [1,1], [1,2], [3,0], [3,1]],
nelems={None: 4, (3,6): 8, (4,7): 8})
The number of elements is chosen such that all elements in the domain have
the same size.
A topology and geometry describing the surface of a sphere can be generated
by creating a multipatch cube surface and inflating the cube to a sphere::
# connectivity: 3────7
# ╱│ ╱│
# 2────6 │ y
# │ │ │ │ │
# │ 1──│─5 │ z
# │╱ │╱ │╱
# 0────4 *────x
topo, cube = multipatch(
patches=[
# The order of the vertices is chosen such that normals point outward.
[2,3,0,1],
[4,5,6,7],
[4,6,0,2],
[1,3,5,7],
[1,5,0,4],
[2,6,3,7],
],
patchverts=tuple(itertools.product(*([[-1,1]]*3))),
nelems=10,
)
sphere = cube / function.sqrt((cube**2).sum(0))
Args
----
patches:
A :class:`numpy.ndarray` with shape sequence of patches with each patch being a list of vertex indices.
patchverts:
A sequence of coordinates of the vertices.
nelems:
Either an :class:`int` specifying the number of elements per patch per
dimension, or a :class:`dict` with edges (a pair of vertex numbers) as
keys and the number of elements (:class:`int`) as values, with key
``None`` specifying the default number of elements.
Returns
-------
:class:`nutils.topology.MultipatchTopology`:
The multipatch topology.
:class:`nutils.function.Array`:
The geometry defined by the ``patchverts`` or a unit hypercube per patch
if ``patchverts`` is not specified.
'''
patches = numpy.array(patches)
if patches.dtype != int:
raise ValueError('`patches` should be an array of ints.')
if patches.ndim < 2 or patches.ndim == 2 and patches.shape[-1] % 2 != 0:
raise ValueError('`patches` should be an array with shape (npatches,2,...,2) or (npatches,2*ndims).')
elif patches.ndim > 2 and patches.shape[1:] != (2,) * (patches.ndim - 1):
raise ValueError('`patches` should be an array with shape (npatches,2,...,2) or (npatches,2*ndims).')
patches = patches.reshape(patches.shape[0], -1)
# determine topological dimension of patches
ndims = 0
while 2**ndims < patches.shape[1]:
ndims += 1
if 2**ndims > patches.shape[1]:
raise ValueError('Only hyperrectangular patches are supported: ' \
'number of patch vertices should be a power of two.')
patches = patches.reshape([patches.shape[0]] + [2]*ndims)
# group all common patch edges (and/or boundaries?)
if isinstance(nelems, int):
nelems = {None: nelems}
elif isinstance(nelems, dict):
nelems = {(k and frozenset(k)): v for k, v in nelems.items()}
else:
raise ValueError('`nelems` should be an `int` or `dict`')
# create patch topologies, geometries
if patchverts is not None:
patchverts = numpy.array(patchverts)
indices = set(patches.flat)
if tuple(sorted(indices)) != tuple(range(len(indices))):
raise ValueError('Patch vertices in `patches` should be numbered consecutively, starting at 0.')
if len(patchverts) != len(indices):
raise ValueError('Number of `patchverts` does not equal number of vertices specified in `patches`.')
if len(patchverts.shape) != 2:
raise ValueError('Every patch vertex should be an array of dimension 1.')
topos = []
coords = []
for i, patch in enumerate(patches):
# find shape of patch and local patch coordinates
shape = []
for dim in range(ndims):
nelems_sides = []
sides = [(0,1)]*ndims
sides[dim] = slice(None),
for side in itertools.product(*sides):
sideverts = frozenset(patch[side])
if sideverts in nelems:
nelems_sides.append(nelems[sideverts])
else:
nelems_sides.append(nelems[None])
if len(set(nelems_sides)) != 1:
raise ValueError('duplicate number of elements specified for patch {} in dimension {}'.format(i, dim))
shape.append(nelems_sides[0])
# create patch | |
#!/usr/bin/env python
# File: dataset_misr.py
# Author: <NAME>, 5/7/13
#
# Readers and plotters for MISR data sets
#
# Copyright 2013-2015, by the California Institute of Technology. ALL
# RIGHTS RESERVED. United States Government Sponsorship
# acknowledged. Any commercial use must be negotiated with the Office
# of Technology Transfer at the California Institute of Technology.
#
# This software may be subject to U.S. export control laws and
# regulations. By accepting this document, the user agrees to comply
# with all applicable U.S. export laws and regulations. User has the
# responsibility to obtain export licenses, or other export authority
# as may be required before exporting such information to foreign
# countries or providing access to foreign persons.
import os, fnmatch, sys, pickle
import matplotlib.pyplot as plt
import numpy as np
import pylab
from scipy.stats import nanmean
from pyhdf import HDF, SD
from dataset import Dataset
import matplotlib
#import matplotlib.plyplot as plt
import datetime
################### MISR ##############
### Analysis per pixel (location). ###
#######################################
class MISRData(Dataset):
# Contains code needed to load, plot, and interpret MISR data.
def __init__(self, rawdirname=None, AODdirname=None, filename=None, force_read=False):
"""MISRData(rawdirname="", AODdirname="", filename="")
Read in raw and AOD MISR data (pickled) from filename.
If it doesn't exist, read in the full data from
the appropriate dirnames, save it out, and proceed.
"""
Dataset.__init__(self, filename, "misr", '')
if (not os.path.exists(filename)) or force_read:
MISRData.read_misr_dir(rawdirname, AODdirname, filename)
# Read in the data
self.readin()
def readin(self):
"""readin()
Read in MISR data (pickled) from self.filename.
"""
print "Loading in MISR data from pickle..."
inf = open(self.filename, 'r')
(self.data, self.rgbimages, self.along_track, self.cross_track,
self.latlons, self.datestr) = \
pickle.load(inf)
inf.close()
print "...done."
self.xlabel = 'Date'
self.ylabel = 'AOD (tau)'
self.xvals = np.arange(self.data.shape[1]).reshape(-1,1)
self.labels = self.latlons
@classmethod
def orbit_to_date(cls, orbit):
"""orbit_to_date(orbit) -> date string
Convert MISR orbit number to a date string and return it.
Thanks to <NAME>ay for this routine.
Examples:
>>> MISRData.orbit_to_date(1)
'12/18/99 09:36'
>>> MISRData.orbit_to_date(1506)
'03/30/00 17:58'
"""
# First MISR orbit: Dec. 18.4, 1999
first_orbit = datetime.datetime(1999, 12, 18, 9, 36, 0)
# Compute elapsed time (delta)
num_orbits = orbit - 1
num_secs = num_orbits * 5933.14
num_days = num_secs / (60.*60.*24.)
delta = datetime.timedelta(num_days)
now = first_orbit + delta
return now.strftime('%m/%d/%y %H:%M')
@classmethod
def read_misr_dir(cls, rawdirname, AODdirname, outfile):
"""read_misr_dir(rawdirname, AODdirname, outfile)
Read in raw MISR data from .hdf files in rawdirname,
and AOD data from all .hdf files in AODdirname.
Pickle the result and save it to outfile.
Note: does NOT update object fields.
Follow this with a call to readin().
"""
# Get the meta-information
#meta = sd.attributes()
# for val in ['Origin_block.ulc.x',
# 'Origin_block.ulc.y',
# 'Local_mode_site_name']:
#info[val] = meta[val]
# Get orbit parameters?
data = []
rgbimages = []
datestr = []
datestr2 = []
i = 0
# Read in the AOD (from local mode) data; this is what we'll analyze
files = sorted(os.listdir(AODdirname))
for f in files:
if fnmatch.fnmatch(f, '*.hdf'):
print " %d / %d " % (i, len(files)),
i += 1
filename = AODdirname + f
# Check that filename exists and is an HDF file
if HDF.ishdf(filename) != 1:
print "File %s cannot be found or is not an HDF-4 file." % filename
continue
orbit = int(filename.split('_')[5].split('O')[1])
thisdate = MISRData.orbit_to_date(orbit)
print "orbit: %d -> %s " % (orbit, thisdate)
datestr = datestr + [thisdate]
sd = SD.SD(filename)
# This is 3 (SOMBlock) x 32 (x) x 128 (y) x 4 (bands)
dataset = sd.select('RegBestEstimateSpectralOptDepth')
dim = dataset.dimensions()
# Get all of the data for the green band (band = 1)
along_track = dim['SOMBlockDim:RegParamsAer'] * dim['XDim:RegParamsAer']
cross_track = dim['YDim:RegParamsAer']
data_now = dataset.get((0,0,0,1),(dim['SOMBlockDim:RegParamsAer'],
dim['XDim:RegParamsAer'],
dim['YDim:RegParamsAer'],
1)).squeeze()
# Reshape to concatenate blocks
nrows = data_now.shape[0]*data_now.shape[1]
ncols = data_now.shape[2]
data_now = data_now.reshape((nrows, ncols))
# Set -9999 values to NaN
naninds = np.equal(data_now, -9999)
# Visualize this timeslice
#pylab.imshow(data_now)
#pylab.title(thisdate)
#pylab.axis('off')
#pylab.savefig(filename + '.png')
# Set -9999 values to NaN
data_now[naninds] = float('NaN')
data_now = data_now.reshape((-1, 1))
#print type(data_now)
#print data_now.shape
if data == []:
data = [data_now]
else:
data.append(data_now)
# Close the file
sd.end()
print '.',
sys.stdout.flush()
data = np.asarray(data).squeeze().T
print data.shape
print
# Data is now n x d, where n = # pixels and d = # timepts
print 'Read data set with %d pixels, %d time points.' % data.shape
# TODO: Add lat/lon coords here
latlons = ['Unknown'] * data.shape[0]
# Read in the raw data (for later visualization)
files = sorted(os.listdir(rawdirname + 'AN/'))
print "+++++++++++++"
print len(files)
iii = 0
for f in files:
if fnmatch.fnmatch(f, '*.hdf'):
filename = rawdirname + 'AN/' + f
#print filename
print " %d / %d " % (iii, len(files)),
iii += 1
# Check that filename exists and is an HDF file
if HDF.ishdf(filename) != 1:
print "File %s cannot be found or is not an HDF-4 file." % filename
continue
# We'll assume there's a one-to-one correspondence
# with the AOD data. But print it out anyway as a check.
orbit = int(filename.split('_')[6].split('O')[1])
thisdate = MISRData.orbit_to_date(orbit)
print "orbit: %d -> %s " % (orbit, thisdate)
datestr2 = datestr2 + [thisdate]
sd = SD.SD(filename)
##################################################################################################################################################################
dataset = sd.select('Green Radiance/RDQI')
dim = dataset.dimensions()
data_g = dataset.get((60,0,0),
(4, dim['XDim:GreenBand'], dim['YDim:GreenBand']),
(1, 1, 1)
).reshape([2048, 2048])
mountains = np.equal(data_g, 65511)
padding = np.equal(data_g, 65515)
hlines = np.equal(data_g, 65523)
data_g[data_g == 65515] = 0 # PADDING
conv_factor_ds = sd.select('GreenConversionFactor')
dim = conv_factor_ds.dimensions()
conv_factor = conv_factor_ds.get((60,0,0),
(4, dim['XDim:BRF Conversion Factors'], dim['YDim:BRF Conversion Factors']),
(1, 1, 1)
).reshape((32, 32))
conv_factor[conv_factor < 0] = 0
for x in range(0,data_g.shape[0],64):
for y in range(0,data_g.shape[1],64):
converted = np.multiply(data_g[x:x+64,y:y+64],
conv_factor[x/64,y/64])
data_g[x:x+64,y:y+64] = converted
dataset = sd.select('Red Radiance/RDQI')
dim = dataset.dimensions()
data_r = dataset.get((60,0,0),
(4, dim['XDim:RedBand'], dim['YDim:RedBand']),
(1, 1, 1)
).reshape([2048, 2048])
data_r[data_r == 65515] = 0 # PADDING
conv_factor_ds = sd.select('RedConversionFactor')
dim = conv_factor_ds.dimensions()
conv_factor = conv_factor_ds.get((60,0,0),
(4, dim['XDim:BRF Conversion Factors'], dim['YDim:BRF Conversion Factors']),
(1, 1, 1)
).reshape((32, 32))
conv_factor[conv_factor < 0] = 0
for x in range(0,data_r.shape[0],64):
for y in range(0,data_r.shape[1],64):
converted = np.multiply(data_r[x:x+64,y:y+64],
conv_factor[x/64,y/64])
data_r[x:x+64,y:y+64] = converted
dataset = sd.select('Blue Radiance/RDQI')
dim = dataset.dimensions()
data_b = dataset.get((60,0,0),
(4, dim['XDim:BlueBand'], dim['YDim:BlueBand']),
(1, 1, 1)
).reshape([2048, 2048])
data_b[data_b == 65515] = 0 # PADDING
conv_factor_ds = sd.select('BlueConversionFactor')
dim = conv_factor_ds.dimensions()
conv_factor = conv_factor_ds.get((60,0,0),
(4, dim['XDim:BRF Conversion Factors'], dim['YDim:BRF Conversion Factors']),
(1, 1, 1)
).reshape((32, 32))
conv_factor[conv_factor < 0] = 0
for x in range(0,data_b.shape[0],64):
for y in range(0,data_b.shape[1],64):
converted = np.multiply(data_b[x:x+64,y:y+64],
conv_factor[x/64,y/64])
data_b[x:x+64,y:y+64] = converted
im = np.zeros([2048, 2048, 3])
data_r = data_r / float(data_r.max()) * 256
data_g = data_g / float(data_g.max()) * 256
data_b = data_b / float(data_b.max()) * 256
im[...,0] = data_r
im[...,1] = data_g
im[...,2] = data_b
im = im.astype('uint8')
im[np.equal(im, 0)] = 255
im[0:512, 64:, :] = im[0:512, :-64, :]
im[1024:, :-64, :] = im[1024:, 64:, :]
im[1536:, :-64, :] = im[1536:, 64:, :]
isnotwhite = np.not_equal(im, 255)
isnotwhiterows = isnotwhite.sum(1)
isnotwhitecols = isnotwhite.sum(0)
goodrows = [i for i in range(im.shape[0]) if isnotwhiterows[i, :].sum() > 0]
goodcols = [i for i in range(im.shape[1]) if isnotwhitecols[i, :].sum() > 0]
im = im[goodrows[0]:goodrows[-1], goodcols[0]:goodcols[-1], :]
rgbimages.append(im)
# Close the file
sd.end()
print '.',
sys.stdout.flush()
outf = open(outfile, 'w')
print len(datestr)
# Assert that the raw and AOD sequences are corresponding
for i in range(len(datestr)):
if datestr[i] != datestr2[i]:
print "ERROR! Date sequences do not align."
print " detected at index %d: AOD %s, raw %s" % (i, datestr[i], datestr2[i])
pickle.dump((data, rgbimages, along_track, cross_track,
latlons, datestr), outf)
#pickle.dump((data, along_track, cross_track,
# latlons, datestr), outf)
outf.close()
################### MISR ##############
### Analysis per time point. ###
#######################################
class MISRDataTime(MISRData):
# Contains code needed to load, plot, and interpret MISR data.
# Here, each item is one time point, so plot data spaitally.
def __init__(self, rawdirname=None, AODdirname=None, filename=None):
MISRData.__init__(self, rawdirname, AODdirname, filename)
def readin(self, filename=None):
"""readin(filename="")
Read in MISR data (pickled) from filename.
"""
super(MISRDataTime, self).readin()
# data is pixels x timepts
self.name = self.name + | |
"location". This defaults to the empty string,
indicating that the application corresponds to the "root" of the
server.
protocol: The protocol to use for the URL scheme
(default: 'http')
port (int): The TCP port to simulate. Defaults to
the standard port used by the given scheme (i.e., 80 for 'http'
and 443 for 'https'). A string may also be passed, as long as
it can be parsed as an int.
params (dict): A dictionary of query string parameters,
where each key is a parameter name, and each value is
either a ``str`` or something that can be converted
into a ``str``, or a list of such values. If a ``list``,
the value will be converted to a comma-delimited string
of values (e.g., 'thing=1,2,3').
params_csv (bool): Set to ``False`` to encode list values
in query string params by specifying multiple instances
of the parameter (e.g., 'thing=1&thing=2&thing=3').
Otherwise, parameters will be encoded as comma-separated
values (e.g., 'thing=1,2,3'). Defaults to ``True``.
query_string (str): A raw query string to include in the
request (default: ``None``). If specified, overrides
`params`.
content_type (str): The value to use for the Content-Type header in
the request. If specified, this value will take precedence over
any value set for the Content-Type header in the
`headers` keyword argument. The ``falcon`` module provides a number
of :ref:`constants for common media types <media_type_constants>`.
headers (dict): Extra headers as a dict-like (Mapping) object, or an
iterable yielding a series of two-member (*name*, *value*)
iterables. Each pair of strings provides the name and value
for an HTTP header. If desired, multiple header values may be
combined into a single (*name*, *value*) pair by joining the values
with a comma when the header in question supports the list
format (see also RFC 7230 and RFC 7231). Header names are not
case-sensitive.
body (str): The body of the request (default ''). The value will be
encoded as UTF-8 in the WSGI environ. Alternatively, a byte string
may be passed, in which case it will be used as-is.
json(JSON serializable): A JSON document to serialize as the
body of the request (default: ``None``). If specified,
overrides `body` and sets the Content-Type header to
``'application/json'``, overriding any value specified by either
the `content_type` or `headers` arguments.
host(str): A string to use for the hostname part of the fully
qualified request URL (default: 'falconframework.org')
remote_addr (str): A string to use as the remote IP address for the
request (default: '127.0.0.1'). For WSGI, this corresponds to
the 'REMOTE_ADDR' environ variable. For ASGI, this corresponds
to the IP address used for the 'client' field in the connection
scope.
http_version (str): The HTTP version to simulate. Must be either
'2', '2.0', 1.1', '1.0', or '1' (default '1.1'). If set to '1.0',
the Host header will not be added to the scope.
asgi_chunk_size (int): The maximum number of bytes that will be
sent to the ASGI app in a single 'http.request' event (default
4096).
asgi_disconnect_ttl (int): The maximum number of seconds to wait
since the request was initiated, before emitting an
'http.disconnect' event when the app calls the
receive() function (default 300).
extras (dict): Additional values to add to the WSGI
``environ`` dictionary or the ASGI scope for the request
(default: ``None``)
Returns:
:py:class:`~.Result`: The result of the request
"""
path, query_string, headers, body, extras = _prepare_sim_args(
path, query_string, params, params_csv, content_type, headers, body, json, extras)
# ---------------------------------------------------------------------
# NOTE(kgriffs): 'http' scope
# ---------------------------------------------------------------------
content_length = None
if body is not None:
if isinstance(body, str):
body = body.encode()
content_length = len(body)
http_scope = helpers.create_scope(
path=path,
query_string=query_string,
method=method,
headers=headers,
host=host,
scheme=protocol,
port=port,
http_version=http_version,
remote_addr=remote_addr,
root_path=root_path,
content_length=content_length,
)
if 'method' in extras and extras['method'] != method.upper():
raise ValueError(
'ASGI scope extras may not override the request method. '
'Please use the method parameter.'
)
http_scope.update(extras)
# ---------------------------------------------------------------------
disconnect_at = time.time() + max(0, asgi_disconnect_ttl)
req_event_emitter = helpers.ASGIRequestEventEmitter(
(body or b''),
chunk_size=asgi_chunk_size,
disconnect_at=disconnect_at,
)
resp_event_collector = helpers.ASGIResponseEventCollector()
if not _one_shot:
task_req = create_task(
app(http_scope, req_event_emitter, resp_event_collector))
if _stream_result:
# NOTE(kgriffs): Wait until the response has been started and give
# the task a chance to progress. Otherwise, we won't have a
# status or headers to pass to StreamedResult.
while not resp_event_collector.status:
await asyncio.sleep(0)
return StreamedResult(resp_event_collector.body_chunks,
code_to_http_status(
resp_event_collector.status),
resp_event_collector.headers,
task_req,
req_event_emitter)
req_event_emitter.disconnect()
await task_req
return Result(resp_event_collector.body_chunks,
code_to_http_status(resp_event_collector.status),
resp_event_collector.headers)
# ---------------------------------------------------------------------
# NOTE(kgriffs): 'lifespan' scope
# ---------------------------------------------------------------------
lifespan_scope = {
'type': ScopeType.LIFESPAN,
'asgi': {
'version': '3.0',
'spec_version': '2.0',
},
}
shutting_down = asyncio.Condition()
lifespan_event_emitter = helpers.ASGILifespanEventEmitter(shutting_down)
lifespan_event_collector = helpers.ASGIResponseEventCollector()
# ---------------------------------------------------------------------
async def conductor():
# NOTE(kgriffs): We assume this is a Falcon ASGI app, which supports
# the lifespan protocol and thus we do not need to catch
# exceptions that would signify no lifespan protocol support.
task_lifespan = get_running_loop().create_task(
app(lifespan_scope, lifespan_event_emitter, lifespan_event_collector)
)
await _wait_for_startup(lifespan_event_collector.events)
task_req = create_task(
app(http_scope, req_event_emitter, resp_event_collector))
req_event_emitter.disconnect()
await task_req
# NOTE(kgriffs): Notify lifespan_event_emitter that it is OK
# to proceed.
async with shutting_down:
shutting_down.notify()
await _wait_for_shutdown(lifespan_event_collector.events)
await task_lifespan
await conductor()
return Result(resp_event_collector.body_chunks,
code_to_http_status(resp_event_collector.status),
resp_event_collector.headers)
class ASGIConductor:
"""Test conductor for ASGI apps.
This class provides more control over the lifecycle of a simulated
request as compared to :class:`~.TestClient`. In addition, the conductor's
asynchronous interface affords interleaved requests and the testing of
streaming protocols such as server-sent events (SSE) and WebSocket.
:class:`~.ASGIConductor` is implemented as a context manager. Upon
entering and exiting the context, the appropriate ASGI lifespan events
will be simulated.
Within the context, HTTP requests can be simulated using an interface
that is similar to :class:`~.TestClient`, except that all ``simulate_*()``
methods are coroutines::
async with testing.ASGIConductor(some_app) as conductor:
async def post_events():
for i in range(100):
await conductor.simulate_post('/events', json={'id': i}):
await asyncio.sleep(0.01)
async def get_events_sse():
# Here, we will get only some of the single server-sent events
# because the non-streaming method is "single-shot". In other
# words, simulate_get() will emit a client disconnect event
# into the app before returning.
result = await conductor.simulate_get('/events')
# Alternatively, we can use simulate_get_stream() as a context
# manager to perform a series of reads on the result body that
# are interleaved with the execution of the post_events()
# coroutine.
async with conductor.simulate_get_stream('/events') as sr:
while some_condition:
# Read next body chunk that was received (if any).
chunk = await sr.stream.read()
if chunk:
# TODO: Do something with the chunk
pass
# Exiting the context causes the request event emitter to
# begin emitting 'http.disconnect' events and then awaits
# the completion of the asyncio task that is running the
# simulated ASGI request.
asyncio.gather(post_events(), get_events_sse())
Note:
Because the :class:`~.ASGIConductor` interface uses coroutines,
it cannot be used directly with synchronous testing frameworks such as
pytest.
As a workaround, the test can be adapted by wrapping it in
an inline async function and then invoking it via
:meth:`falcon.invoke_coroutine_sync` or decorating the test function
with :meth:`falcon.runs_sync`.
Alternatively, you can try searching PyPI to see if an async plugin is
available for your testing framework of choice. For example, the
``pytest-asyncio`` plugin is available for ``pytest`` users.
Args:
app (callable): An ASGI application to target when simulating
requests.
Keyword Arguments:
headers (dict): Default headers to set on every request (default
``None``). These defaults may be overridden by passing values
for the same headers to one of the ``simulate_*()`` methods.
Attributes:
app: The app that this client instance was configured to use.
"""
def __init__(self, app, headers=None):
if not _is_asgi_app(app):
raise CompatibilityError(
'ASGIConductor may only be used with an ASGI app')
self.app = app
self._default_headers = headers
self._shutting_down = asyncio.Condition()
self._lifespan_event_collector = helpers.ASGIResponseEventCollector()
self._lifespan_task = None
async def __aenter__(self):
lifespan_scope = {
'type': ScopeType.LIFESPAN,
'asgi': {
'version': '3.0',
'spec_version': '2.0',
},
}
lifespan_event_emitter = helpers.ASGILifespanEventEmitter(
self._shutting_down)
# NOTE(kgriffs): We assume this is a Falcon ASGI app, which supports
# the lifespan protocol and thus we do not need to catch
# exceptions that would signify no lifespan protocol support.
self._lifespan_task | |
"▁Walker": 7032,
"▁850": 7033,
"▁1938": 7034,
"DM": 7035,
"▁Massachusetts": 7036,
"▁Pur": 7037,
"HI": 7038,
"▁2017-2018": 7039,
"▁55%": 7040,
"▁Liber": 7041,
"7,6": 7042,
"▁Harris": 7043,
"▁Hamburg": 7044,
"▁Bla": 7045,
"▁Mak": 7046,
"Ri": 7047,
"zh": 7048,
"▁Team": 7049,
"Tra": 7050,
"oka": 7051,
"▁Gau": 7052,
"▁Roland": 7053,
"gus": 7054,
"mic": 7055,
"▁1996.": 7056,
"City": 7057,
"nik": 7058,
"▁Howard": 7059,
"▁LGBT": 7060,
"▁ten": 7061,
"▁Rie": 7062,
"sol": 7063,
"iga": 7064,
"▁1-": 7065,
"▁But": 7066,
"Jo": 7067,
"▁Ac": 7068,
"3,5%": 7069,
"lari": 7070,
"EX": 7071,
"mir": 7072,
"0,1%": 7073,
"tti": 7074,
"▁McG": 7075,
"rai": 7076,
"▁Gal": 7077,
"▁Rama": 7078,
"▁CPU": 7079,
"▁Standard": 7080,
"lie": 7081,
"AMA": 7082,
"▁Lever": 7083,
"2004": 7084,
"XO": 7085,
"ise": 7086,
"▁rap": 7087,
"▁Hon": 7088,
"▁Hotel": 7089,
"chop": 7090,
"▁collage": 7091,
"▁Congo": 7092,
"▁DS": 7093,
"▁Wan": 7094,
"RS": 7095,
"▁feet": 7096,
"▁Electronic": 7097,
"▁Teen": 7098,
"▁225": 7099,
"▁pro": 7100,
"▁Paraguay": 7101,
"ASE": 7102,
"▁FED": 7103,
"▁ar": 7104,
"aggio": 7105,
"▁single": 7106,
"Pa": 7107,
"yuk": 7108,
"▁1992.": 7109,
"▁make": 7110,
"▁Cast": 7111,
"850": 7112,
"▁Lewis": 7113,
"▁gene": 7114,
"▁poster": 7115,
"▁Matthew": 7116,
"posit": 7117,
"▁Ajax": 7118,
"▁Bruno": 7119,
"▁Villar": 7120,
"▁Rhod": 7121,
"▁Net": 7122,
"aca": 7123,
"made": 7124,
"aw": 7125,
"old": 7126,
"IT": 7127,
"▁Infinit": 7128,
"▁Ama": 7129,
"borough": 7130,
"▁KB": 7131,
"TAT": 7132,
"ota": 7133,
"Hay": 7134,
"▁Book": 7135,
"▁Pepe": 7136,
"5-1": 7137,
"andra": 7138,
"▁Omar": 7139,
"right": 7140,
"▁Cab": 7141,
"▁Cau": 7142,
"▁Das": 7143,
"▁Irina": 7144,
"issa": 7145,
"▁Walter": 7146,
"▁Vá": 7147,
"nd": 7148,
"▁VIDEO": 7149,
"▁21%": 7150,
"▁Lug": 7151,
"▁Pad": 7152,
"hem": 7153,
"▁Lisa": 7154,
"▁Firefox": 7155,
"WHO": 7156,
"▁NAM": 7157,
"▁Warner": 7158,
"bay": 7159,
"steiger": 7160,
"▁Hum": 7161,
"▁Take": 7162,
"▁clo": 7163,
"bert": 7164,
"▁Ethiopia": 7165,
"cent": 7166,
"ISS": 7167,
"▁Nur": 7168,
"▁Iz": 7169,
"▁blogger": 7170,
"▁Azerbaijan": 7171,
"▁Moro": 7172,
"Si": 7173,
"▁Stephan": 7174,
"▁Garcia": 7175,
"form": 7176,
"▁Smartphone": 7177,
"ille": 7178,
"▁8)": 7179,
"▁Grammy": 7180,
"LM": 7181,
"▁Salman": 7182,
"▁Mats": 7183,
"▁Acer": 7184,
"▁Channel": 7185,
"▁2018:": 7186,
"▁Marcelo": 7187,
"▁Guill": 7188,
"▁Surface": 7189,
"▁131": 7190,
"▁3-6": 7191,
"▁BA": 7192,
"▁Jen": 7193,
"yer": 7194,
"▁Low": 7195,
"▁8-9": 7196,
"IoT": 7197,
"▁Dụng": 7198,
"▁Hua": 7199,
"VE": 7200,
"▁3.2": 7201,
"rian": 7202,
"▁Him": 7203,
"▁Opera": 7204,
"▁ko": 7205,
"quet": 7206,
"▁Kend": 7207,
"its": 7208,
"New": 7209,
"▁Laurent": 7210,
"▁Douglas": 7211,
"▁139": 7212,
"▁Has": 7213,
"▁Was": 7214,
"more": 7215,
"Har": 7216,
"▁Berg": 7217,
"LOS": 7218,
"▁Bio": 7219,
"▁Sale": 7220,
"▁ami": 7221,
"▁Billboard": 7222,
"dus": 7223,
"▁Rus": 7224,
"lev": 7225,
"999": 7226,
"▁2016-2017": 7227,
"▁Market": 7228,
"oli": 7229,
"uch": 7230,
"lash": 7231,
"rium": 7232,
"Ka": 7233,
"SAT": 7234,
"Te": 7235,
"▁Dow": 7236,
"ume": 7237,
"Mos": 7238,
"▁1937": 7239,
"▁LA": 7240,
"UN": 7241,
"cati": 7242,
"▁Well": 7243,
"mét": 7244,
"lock": 7245,
"▁152": 7246,
"▁Nikola": 7247,
"teur": 7248,
"▁Capel": 7249,
"care": 7250,
"vát": 7251,
"uca": 7252,
"▁1991.": 7253,
"▁Cay": 7254,
"pto": 7255,
"3.8": 7256,
"ins": 7257,
"▁129": 7258,
"▁Dance": 7259,
"▁Non": 7260,
"450": 7261,
"▁Lú": 7262,
"▁911": 7263,
"▁Kop": 7264,
"▁titan": 7265,
"ala": 7266,
"VA": 7267,
"▁Bran": 7268,
"▁Singh": 7269,
"▁Maha": 7270,
"▁Alvar": 7271,
"lich": 7272,
"gna": 7273,
"▁Jorge": 7274,
"yla": 7275,
"▁Cle": 7276,
"▁Phantom": 7277,
"▁Basel": 7278,
"ada": 7279,
"▁Football": 7280,
"▁Rubi": 7281,
"▁Herre": 7282,
"▁Syn": 7283,
"▁Emily": 7284,
"▁168": 7285,
"▁Yuri": 7286,
"▁Rani": 7287,
"halli": 7288,
"▁Sum": 7289,
"UNC": 7290,
"▁Row": 7291,
"▁Om": 7292,
"sper": 7293,
"▁Connect": 7294,
"▁cara": 7295,
"china": 7296,
"▁Bull": 7297,
"grad": 7298,
"AT": 7299,
"lú": 7300,
"▁Holl": 7301,
"▁break": 7302,
"rê": 7303,
"▁Blade": 7304,
"gne": 7305,
"▁185": 7306,
"▁Wen": 7307,
"stin": 7308,
"▁Rat": 7309,
"▁0-3": 7310,
"▁Rihanna": 7311,
"▁hybrid": 7312,
"vit": 7313,
"▁Stein": 7314,
"mal": 7315,
"pore": 7316,
"are": 7317,
"vít": 7318,
"▁Owen": 7319,
"US": 7320,
"▁Bez": 7321,
"HO": 7322,
"▁para": 7323,
"▁23%": 7324,
"ous": 7325,
"sic": 7326,
"tah": 7327,
"▁136": 7328,
"▁1993.": 7329,
"▁Korea": 7330,
"▁Radio": 7331,
"▁Chap": 7332,
"▁Trust": 7333,
"▁LL": 7334,
"1-8": 7335,
"▁Force": 7336,
"▁Ort": 7337,
"▁Bart": 7338,
"▁Arizona": 7339,
"▁HI": 7340,
"▁Jess": 7341,
"▁Vienna": 7342,
"▁É": 7343,
"here": 7344,
"▁Susan": 7345,
"▁short": 7346,
"rod": 7347,
"▁PA": 7348,
"▁iPod": 7349,
"▁Hat": 7350,
"▁Denis": 7351,
"▁NO": 7352,
"ale": 7353,
"▁People": 7354,
"jan": 7355,
"▁Aviv": 7356,
"▁Kaspersky": 7357,
"nim": 7358,
"morph": 7359,
"IA": 7360,
"▁Mich": 7361,
"FI": 7362,
"mine": 7363,
"▁Panda": 7364,
"▁Latvia": 7365,
"▁Minnesota": 7366,
"▁Grande": 7367,
"▁Middle": 7368,
"ip": 7369,
"▁WA": 7370,
"▁Schol": 7371,
"yahoo": 7372,
"▁Got": 7373,
"▁Evans": 7374,
"▁Macedonia": 7375,
"▁táp": 7376,
"▁Dal": 7377,
"▁Rico": 7378,
"▁pe": 7379,
"▁Nike": 7380,
"kaya": 7381,
"▁Jersey": 7382,
"antha": 7383,
"▁Dominic": 7384,
"▁Emer": 7385,
"Settings": 7386,
"321": 7387,
"▁Studio": 7388,
"▁Naga": 7389,
"▁Of": 7390,
"stic": 7391,
"▁Tayyip": 7392,
"7,7": 7393,
"▁India": 7394,
"ora": 7395,
"▁Gib": 7396,
"ef": 7397,
"Sport": 7398,
"▁Maxim": 7399,
"▁po": 7400,
"▁om": 7401,
"▁0,9": 7402,
"▁AB": 7403,
"dio": 7404,
"him": 7405,
"▁Eo": 7406,
"▁Moz": 7407,
"▁cent": 7408,
"▁hack": 7409,
"▁Gel": 7410,
"ONG": 7411,
"▁Shanghai": 7412,
"▁2009)": 7413,
"Ụ": 7414,
"▁Gan": 7415,
"▁Naka": 7416,
"▁146": 7417,
"▁Championship": 7418,
"lina": 7419,
"▁Cape": 7420,
"bó": 7421,
"▁5000": 7422,
"ith": 7423,
"▁Link": 7424,
"-47": 7425,
"dic": 7426,
"▁Rá": 7427,
"▁Ble": 7428,
"ici": 7429,
"▁Dry": 7430,
"hed": 7431,
"▁omega": 7432,
"▁Jur": 7433,
"▁Toma": 7434,
"▁estrogen": 7435,
"▁Yi": 7436,
"essa": 7437,
"ving": 7438,
"ím": 7439,
"▁Ted": 7440,
"▁Tung": 7441,
"▁2010)": 7442,
"VB": 7443,
"▁167": 7444,
"▁soda": 7445,
"but": 7446,
"▁Sco": 7447,
"▁ró": 7448,
"▁420": 7449,
"▁Beauty": 7450,
"▁Alba": 7451,
"5,9": 7452,
"tai": 7453,
"7,4": 7454,
"▁vir": 7455,
"AG": 7456,
"rena": 7457,
"ula": 7458,
"▁1994.": 7459,
"Star": 7460,
"▁hún": 7461,
"Da": 7462,
"ddy": 7463,
"NEWS": 7464,
"ICE": 7465,
"sko": 7466,
"▁Alice": 7467,
"▁net": 7468,
"tero": 7469,
"IB": 7470,
"▁Independent": 7471,
"▁133": 7472,
"pod": 7473,
"8,9": 7474,
"▁Glen": 7475,
"iro": 7476,
"py": 7477,
"hia": 7478,
"ishi": 7479,
"▁Digital": 7480,
"▁mag": 7481,
"inga": 7482,
"▁Hans": 7483,
"▁Gut": 7484,
"▁Lily": 7485,
"▁1080": 7486,
"▁College": 7487,
"▁Dai": 7488,
"eh": 7489,
"▁Julian": 7490,
"▁Titan": 7491,
"ops": 7492,
"Bar": 7493,
"Ó": 7494,
"chan": 7495,
"ese": 7496,
"▁Jama": 7497,
"▁Julia": 7498,
"dier": 7499,
"eve": 7500,
"vad": 7501,
"▁Shak": 7502,
"/2001": 7503,
"▁Norman": 7504,
"▁Venus": 7505,
"▁Javier": 7506,
"Germain": 7507,
"jin": 7508,
"rat": 7509,
"▁Arthur": 7510,
"▁Blu": 7511,
"▁Giu": 7512,
"▁ion": 7513,
"zel": 7514,
"zza": 7515,
"▁Aus": 7516,
"mmer": 7517,
"itz": 7518,
"▁Puerto": 7519,
"001": 7520,
"▁1929": 7521,
"▁0,5%": 7522,
"2007": 7523,
"▁Bernard": 7524,
"▁19%": 7525,
"▁Little": 7526,
"▁Suk": 7527,
"660": 7528,
"▁Skri": 7529,
"phra": 7530,
"nikov": 7531,
"RIM": 7532,
"tý": 7533,
"1,6": 7534,
"▁Hey": 7535,
"▁Cooper": 7536,
"▁2.3": 7537,
"▁5.0": 7538,
"GAS": 7539,
"Claude": 7540,
"▁Guinness": 7541,
"Ex": 7542,
"▁Chat": 7543,
"▁Richter": 7544,
"▁Log": 7545,
"▁Science": 7546,
"▁Ahmed": 7547,
"SK": 7548,
"▁Slovenia": 7549,
"▁144": 7550,
"▁Taj": 7551,
"▁Abdullah": 7552,
"▁Hab": 7553,
"▁Toshiba": 7554,
"NR": 7555,
"hill": 7556,
"▁142": 7557,
"lik": 7558,
"▁Back": 7559,
"Sy": 7560,
"▁Sala": 7561,
"▁2017:": 7562,
"tino": 7563,
"ern": 7564,
"▁shi": 7565,
"▁32.": 7566,
"▁SSD": 7567,
"▁Spring": 7568,
"▁Yuk": 7569,
"▁Vincent": 7570,
"idi": 7571,
"▁Janeiro": 7572,
"▁Najib": 7573,
"▁Harvey": 7574,
"ali": 7575,
"las": 7576,
"▁Tanzania": 7577,
"▁ang": 7578,
"ec": 7579,
"mura": 7580,
"1-6": 7581,
"3)": 7582,
"oh": 7583,
"▁Marcus": 7584,
"▁des": 7585,
"OA": 7586,
"▁VÀ": 7587,
"▁showroom": 7588,
"gó": 7589,
"▁Talent": 7590,
"▁Rand": 7591,
"149": 7592,
"Cho": 7593,
"▁DO": 7594,
"bro": 7595,
"vai": 7596,
"▁Kom": 7597,
"suke": 7598,
"▁2.1": 7599,
"art": 7600,
"▁Ade": 7601,
"▁Nan": 7602,
"Sha": 7603,
"tap": 7604,
"toni": 7605,
"HR": 7606,
"RV": 7607,
"ambi": 7608,
"eira": 7609,
"of": 7610,
"▁SNS": 7611,
"▁Jimmy": 7612,
"ania": 7613,
"1997": 7614,
"mg": 7615,
"▁Hein": 7616,
"AL": 7617,
"▁Moore": 7618,
"ches": 7619,
"mol": 7620,
"▁Financial": 7621,
"▁Mars": 7622,
"▁Stalin": 7623,
"view": 7624,
"▁Hos": 7625,
"▁Philippe": 7626,
"PU": 7627,
"▁Yun": 7628,
"▁Legend": 7629,
"▁pizza": 7630,
"▁137": 7631,
"Col": 7632,
"▁VS": 7633,
"▁Miguel": 7634,
"gger": 7635,
"▁insulin": 7636,
"▁Mate": 7637,
"▁Ian": 7638,
"▁Simeon": 7639,
"▁163": 7640,
"▁Glass": 7641,
"▁777": 7642,
"▁Ao": 7643,
"▁DU": 7644,
"▁Uganda": 7645,
"eda": 7646,
"▁380": 7647,
"▁Civic": 7648,
"▁stylist": 7649,
"▁Kin": 7650,
"▁Bruce": 7651,
"▁Page": 7652,
"▁XIX": 7653,
"uz": 7654,
"▁Future": 7655,
"▁Kra": 7656,
"▁View": 7657,
"▁mõ": 7658,
"▁156": 7659,
"▁Brent": 7660,
"yen": 7661,
"▁Walt": 7662,
"▁8-10": 7663,
"▁Boko": 7664,
"ÀI": 7665,
"▁Amy": 7666,
"/02/": 7667,
"travel": 7668,
"bur": 7669,
"bio": 7670,
"▁Mul": 7671,
| |
#!/usr/bin/env python
"""
A headless open source tool for synchronizing users' name and emails between a
MySQL database and mailman server. Correct login credentials for the database
and mailman server must be provided in a '.env' file.
Python2 & Python3 compatible.
Author: <NAME>
"""
from __future__ import print_function, division, unicode_literals
import argparse
import os
import re
import sys
import time
from builtins import range
import dotenv
import splinter
import pymysql
def login_webserv(site, uri, pswd):
"""
Logs into the webserv by navigating to the URL, inputting credentials, and
clicking the login button.
Args:
site (splinter.driver.webdriver): Instance of the splinter browser.
uri (str): Web address to navigate with splinter browser.
pswd (str): Password required to login to the webserv.
"""
site.visit(uri)
assert site.is_text_present(VERSION_NUMBER), "Incorrect version number\nAborting"
site.fill('adminpw', pswd)
site.find_by_name('admlogin').click()
assert site.is_text_not_present('Authorization failed.'), "Login failed\nAborting"
def logout_webserv(site, uri):
"""
Logs out of the webserv.
Args:
site (splinter.driver.webdriver): Instance of the splinter browser.
uri (str): Web address to navigate with splinter browser.
"""
site.visit(uri + '/logout')
site.quit()
def get_db_content(HOST, UNAME, DBPSWD, NAME):
"""
Collects the emails from the "database" to be used later.
Args:
HOST (str): Uri for mysql database
UNAME (str): Username for mysql database
DBPSWD (str): Password for mysql database
NAME (str): Name of table in mysql database
Attributes:
db (pymysql.connections.Connection): Database connection
cursor (pymysql.cursors.Cursor): Used to interact with database
data (tuple): All users returned from fetching from database
content (dict): Data in the expected form of a database
Returns:
dict: Content attribute that contains all of the users on the database.
"""
# Open database connection
db = pymysql.connect(HOST, UNAME, DBPSWD, NAME)
# prepare a cursor object using cursor() method
cursor = db.cursor()
# execute SQL query using execute() method.
cursor.execute("SELECT CONCAT(p.lname, ' ', p.fname) AS name, p.email FROM \
ac_person p\
WHERE p.deleted IS NULL AND p.onnoticelist = 't'\
ORDER BY p.lname ASC, p.fname ASC")
# Fetch a single row using fetchone() method.
data = cursor.fetchall() # data = (("lname fname", "email"))
content = dict()
try: #python2 version
for user in data:
content[unicode(user[1], "utf-8")] = unicode(user[0], "utf-8")
except NameError: #python3
for user in data:
content[user[1]] = user[0]
# disconnect from server
db.close()
log("Database data is collected")
return content
def get_web_emails(site, uri):
"""
Scrapes the webserv for all of the users uploaded.
Args:
site (splinter.driver.webdriver): Instance of the splinter browser.
uri (str): Web address to navigate with splinter browser.
Attributes:
letters (list): Contains every letter representing a tab in the html.
web_emails (list): Stores all of the scraped emails.
maxUsers (int): Stores the total number of emails on the webserv. Used
for logging progress.
current (int): Counter for what percentage of emails have been scraped
rows (str): Unused variable that stores how many rows the terminal window is.
columns (int): Stores the number of columns wide the terminal window is
chunks (int): Stores the current "chunk" the scraper is at from the html.
Used for scraping all data if the webserv has organized it in
sublists.
links (splinter.element_list.ElementList): Stores splinter obj referring to all the matching
elements. Used to find all emails on current screen.
Returns:
list: Web_emails attribute that contains all emails scraped from the webserv.
"""
#navigate to membership management
site.visit(uri + '/members/list')
letters = map(str, re.findall(r'/members\?letter=[a-z0-9]', site.html))
if letters != []:
letters = list(set(letters))
web_emails = list()
maxUsers = int(re.search('<em>([0-9]*) members total', site.html).group(1))
current = 0
rows, columns = os.popen('stty size', 'r').read().split()
columns = min(int(columns) - len("] 100% complete "), 80)
log("Scraping data from webserv")
if letters != []: #found letters
for letter in letters:
site.visit(uri + letter)
chunks = len(site.find_link_by_partial_href('&chunk='))
for chunk in range(chunks+1):
site.visit(uri + letter + '&chunk=' + str(chunk))
links = site.find_link_by_partial_href('--at--')
for link in links:
web_emails.append(link.value)
ratio = len(web_emails)/maxUsers
current = int(round(ratio*columns))
if not args.quiet:
sys.stdout.write("\r\033[93m" + '['\
+ '#'*(current) + ' '*(columns - current) \
+ "] " + str(int(round(ratio*100)))\
+ "% complete\033[0m")
sys.stdout.flush()
if not args.quiet:
sys.stdout.write('\n')
else: #all on one page
site.visit(uri + '/members/list')
links = site.find_link_by_partial_href('--at--')
for link in links:
web_emails.append(link.value)
log("Webserv data is collected")
return web_emails
def compare_datasets(webmail, db_content):
"""
Compares email lists and appends data to appropriate add/rm_email data
structs.
Examples:
if (email in database but not webserv) add;
if (email in webserv but not datatbase) remove;
Args:
webmail (list): List of all emails scraped from the webserv.
db_content (dict): Dictonary, to be used as a list of keys(emails), containing
all the users on the database.
Attributes:
add_users (str): Newline separated emails to be added of the format:
"lname fname <email>\\n".
rm_emails (str): Newline separated emails to be removed of the format:
"email\\n".
Returns:
tuple: Contains the emails to add and remove from the webserv
"""
add_users = ""
rm_emails = ""
log("Comparing emails found on webserv with emails in database")
#compares every email from the webserv to those found in the database
for web_data in webmail:
if web_data not in db_content: #if true, then that email must be removed
rm_emails += web_data + '\n'
#compares every email from the database to those found in the webserv
for db_data in db_content:
if db_data not in webmail: #if true, then that email must be added
add_users += db_content[db_data] + ' <' + db_data + '>\n'
return tuple([add_users, rm_emails])
def update_webserv(site, uri, data):
"""
Updates the webserv by adding and removing emails based on descrepencies
between the webser and database.
Args:
site (splinter.driver.webdriver): Instance of the splinter browser.
uri (str): Web address to navigate with splinter browser.
data (tuple): First index is a list of emails to add. Second index is a
list of emails to remove.
Attributes:
added_emails (list): Stores all emails to be added to webserv.
removed_emails (list): Stores all emails to be removed from webserv.
"""
log("Synchronizing data on the webserv")
added_users, removed_emails = data
add_webserv_emails(site, uri, added_users)
remove_webserv_emails(site, uri, removed_emails)
log("Webserv and database are synced")
def add_webserv_emails(site, uri, users):
"""
Takes users that have been passed in and navigates to subscription page
of the webserv that adds content to the webserv.
Args:
site (splinter.driver.webdriver): Instance of the splinter browser.
uri (str): Web address to navigate with splinter browser.
users (str): All emails that are to be added to the webserv.
Format: "lname fname <email>\\n"
Attributes:
users (list): Converted emails string (args) to list.
"""
if not args.dryrun:
# emails: string of emails newline separated
site.visit(uri + '/members/add')
site.choose('send_welcome_msg_to_this_batch', '0')
site.fill('subscribees', users)
site.find_by_name('setmemberopts_btn').click()
users = users.split('\n')
if users[-1] == "":
users.pop()
for user in users:
log("\033[32madded\033[0m " + user)
def remove_webserv_emails(site, uri, emails):
"""
Takes emails that have been passed in and navigates to unsubscription page
of the webserv that removes all matching content in the webserv.
Args:
site (splinter.driver.webdriver): Instance of the splinter browser.
uri (str): Web address to navigate with splinter browser.
emails (str): All emails that are to be removed from the webserv.
Format: "email\\n"
Attributes:
emails (list): Converted emails string (args) to list.
"""
if not args.dryrun:
# emails: string of emails newline separated
site.visit(uri + '/members/remove')
site.choose('send_unsub_ack_to_this_batch', '0')
site.fill('unsubscribees', emails)
site.find_by_name('setmemberopts_btn').click()
emails = emails.split('\n')
if emails[-1] == '':
emails.pop()
for email in emails:
log("\033[34mremoved\033[0m " + email)
def log(message):
"""
Outputs to stdout in the format of:
"YYYY-mm-dd hh:mm:ss <message>"
Args:
message (str): Content to output with timestamp.
"""
if not args.quiet:
print(time.strftime("%Y-%m-%d %H:%M:%S") + ' ' + message)
if __name__ == "__main__":
# argparse used to generate help menu and easy commandline argument parsing
parser = argparse.ArgumentParser(description="A headless open source tool\
for synchronizing users' name and email between a mysql database\
and mailman server. Correct login credentials for the database and\
mailman server must be provided in a '.env' file. This script is\
python2 and python3 compatible.", epilog="Author: <NAME>")
parser.add_argument("-q", "--quiet", help="suppress output", action="store_true")
parser.add_argument("-v", "--verbose", help="use the headed firefox browser",
action="store_true")
parser.add_argument("-d", "--dryrun", help="perform a dry run by not \
changing the listserv", action="store_true")
args = parser.parse_args()
if args.verbose:
browser = splinter.Browser()
else:
browser = splinter.Browser('phantomjs')
VERSION_NUMBER = "version 2.1.24"
#collect login data collected from .env
dotenv.load_dotenv(dotenv.find_dotenv())
URI = os.getenv('LISTSERV_URI')
PSWD = os.getenv('PASSWORD')
HOST = os.getenv('HOST')
UNAME | |
Constraint(expr= - m.x18 + m.x33 + m.x36 + m.x39 + m.x42 + m.x45 - m.x48 - m.x60 - m.x75 - m.x119 + m.x120 == 0)
m.c23 = Constraint(expr= - m.x19 + m.x34 + m.x37 + m.x40 + m.x43 + m.x46 - m.x49 - m.x61 - m.x76 - m.x120 + m.x121 == 0)
m.c24 = Constraint(expr= - m.x3 - m.x21 - m.x33 + m.x48 + m.x51 + m.x54 + m.x57 - m.x63 - m.x78 - m.x122 + m.x123 == 0)
m.c25 = Constraint(expr= - m.x4 - m.x22 - m.x34 + m.x49 + m.x52 + m.x55 + m.x58 - m.x64 - m.x79 - m.x123 + m.x124 == 0)
m.c26 = Constraint(expr= - m.x6 - m.x36 + m.x60 + m.x63 + m.x66 + m.x69 + m.x72 - m.x81 - m.x125 + m.x126 == 0)
m.c27 = Constraint(expr= - m.x7 - m.x37 + m.x61 + m.x64 + m.x67 + m.x70 + m.x73 - m.x82 - m.x126 + m.x127 == 0)
m.c28 = Constraint(expr= - m.x9 - m.x24 - m.x39 - m.x51 - m.x66 + m.x75 + m.x78 + m.x81 + m.x84 + m.x87 - m.x128
+ m.x129 == 0)
m.c29 = Constraint(expr= - m.x10 - m.x25 - m.x40 - m.x52 - m.x67 + m.x76 + m.x79 + m.x82 + m.x85 + m.x88 - m.x129
+ m.x130 == 0)
m.c30 = Constraint(expr= - m.x12 - m.x27 - m.x42 - m.x54 - m.x69 - m.x84 - m.x131 + m.x132 == -0.17)
m.c31 = Constraint(expr= - m.x13 - m.x28 - m.x43 - m.x55 - m.x70 - m.x85 - m.x132 + m.x133 == -0.83)
m.c32 = Constraint(expr= - m.x15 - m.x30 - m.x45 - m.x57 - m.x72 - m.x87 - m.x134 + m.x135 == -0.39)
m.c33 = Constraint(expr= - m.x16 - m.x31 - m.x46 - m.x58 - m.x73 - m.x88 - m.x135 + m.x136 == -0.8)
m.c34 = Constraint(expr=m.x90*m.x120 - (m.x89*m.x119 + m.x92*m.x48 + m.x95*m.x60 + m.x98*m.x75 - (m.x89*m.x33 + m.x89*
m.x36 + m.x89*m.x39 + m.x89*m.x42 + m.x89*m.x45)) - 0.7*m.x18 == 0)
m.c35 = Constraint(expr=m.x91*m.x121 - (m.x90*m.x120 + m.x93*m.x49 + m.x96*m.x61 + m.x99*m.x76 - (m.x90*m.x34 + m.x90*
m.x37 + m.x90*m.x40 + m.x90*m.x43 + m.x90*m.x46)) - 0.7*m.x19 == 0)
m.c36 = Constraint(expr=m.x93*m.x123 - (m.x92*m.x122 + m.x89*m.x33 + m.x95*m.x63 + m.x98*m.x78 - (m.x92*m.x48 + m.x92*
m.x51 + m.x92*m.x54 + m.x92*m.x57)) - 0.7*m.x3 - 0.7*m.x21 == 0)
m.c37 = Constraint(expr=m.x94*m.x124 - (m.x93*m.x123 + m.x90*m.x34 + m.x96*m.x64 + m.x99*m.x79 - (m.x93*m.x49 + m.x93*
m.x52 + m.x93*m.x55 + m.x93*m.x58)) - 0.7*m.x4 - 0.7*m.x22 == 0)
m.c38 = Constraint(expr=m.x96*m.x126 - (m.x95*m.x125 + m.x89*m.x36 + m.x98*m.x81 - (m.x95*m.x60 + m.x95*m.x63 + m.x95*
m.x66 + m.x95*m.x69 + m.x95*m.x72)) - 0.7*m.x6 == 0)
m.c39 = Constraint(expr=m.x97*m.x127 - (m.x96*m.x126 + m.x90*m.x37 + m.x99*m.x82 - (m.x96*m.x61 + m.x96*m.x64 + m.x96*
m.x67 + m.x96*m.x70 + m.x96*m.x73)) - 0.7*m.x7 == 0)
m.c40 = Constraint(expr=m.x99*m.x129 - (m.x98*m.x128 + m.x89*m.x39 + m.x92*m.x51 + m.x95*m.x66 - (m.x98*m.x75 + m.x98*
m.x78 + m.x98*m.x81 + m.x98*m.x84 + m.x98*m.x87)) - 0.7*m.x9 - 0.7*m.x24 == 0)
m.c41 = Constraint(expr=m.x100*m.x130 - (m.x99*m.x129 + m.x90*m.x40 + m.x93*m.x52 + m.x96*m.x67 - (m.x99*m.x76 + m.x99*
m.x79 + m.x99*m.x82 + m.x99*m.x85 + m.x99*m.x88)) - 0.7*m.x10 - 0.7*m.x25 == 0)
m.c42 = Constraint(expr=m.x102*m.x120 - (m.x101*m.x119 + m.x104*m.x48 + m.x107*m.x60 + m.x110*m.x75 - (m.x101*m.x33 +
m.x101*m.x36 + m.x101*m.x39 + m.x101*m.x42 + m.x101*m.x45)) - 0.5*m.x18 == 0)
m.c43 = Constraint(expr=m.x103*m.x121 - (m.x102*m.x120 + m.x105*m.x49 + m.x108*m.x61 + m.x111*m.x76 - (m.x102*m.x34 +
m.x102*m.x37 + m.x102*m.x40 + m.x102*m.x43 + m.x102*m.x46)) - 0.5*m.x19 == 0)
m.c44 = Constraint(expr=m.x105*m.x123 - (m.x104*m.x122 + m.x101*m.x33 + m.x107*m.x63 + m.x110*m.x78 - (m.x104*m.x48 +
m.x104*m.x51 + m.x104*m.x54 + m.x104*m.x57)) - 0.1*m.x3 - 0.5*m.x21 == 0)
m.c45 = Constraint(expr=m.x106*m.x124 - (m.x105*m.x123 + m.x102*m.x34 + m.x108*m.x64 + m.x111*m.x79 - (m.x105*m.x49 +
m.x105*m.x52 + m.x105*m.x55 + m.x105*m.x58)) - 0.1*m.x4 - 0.5*m.x22 == 0)
m.c46 = Constraint(expr=m.x108*m.x126 - (m.x107*m.x125 + m.x101*m.x36 + m.x110*m.x81 - (m.x107*m.x60 + m.x107*m.x63 +
m.x107*m.x66 + m.x107*m.x69 + m.x107*m.x72)) - 0.1*m.x6 == 0)
m.c47 = Constraint(expr=m.x109*m.x127 - (m.x108*m.x126 + m.x102*m.x37 + m.x111*m.x82 - (m.x108*m.x61 + m.x108*m.x64 +
m.x108*m.x67 + m.x108*m.x70 + m.x108*m.x73)) - 0.1*m.x7 == 0)
m.c48 = Constraint(expr=m.x111*m.x129 - (m.x110*m.x128 + m.x101*m.x39 + m.x104*m.x51 + m.x107*m.x66 - (m.x110*m.x75 +
m.x110*m.x78 + m.x110*m.x81 + m.x110*m.x84 + m.x110*m.x87)) - 0.1*m.x9 - 0.5*m.x24 == 0)
m.c49 = Constraint(expr=m.x112*m.x130 - (m.x111*m.x129 + m.x102*m.x40 + m.x105*m.x52 + m.x108*m.x67 - (m.x111*m.x76 +
m.x111*m.x79 + m.x111*m.x82 + m.x111*m.x85 + m.x111*m.x88)) - 0.1*m.x10 - 0.5*m.x25 == 0)
m.c50 = Constraint(expr= m.x2 - m.b137 <= 0)
m.c51 = Constraint(expr= m.x3 - m.b138 <= 0)
m.c52 = Constraint(expr= m.x4 - m.b139 <= 0)
m.c53 = Constraint(expr= m.x5 - m.b140 <= 0)
m.c54 = Constraint(expr= m.x6 - m.b141 <= 0)
m.c55 = Constraint(expr= m.x7 - m.b142 <= 0)
m.c56 = Constraint(expr= m.x8 - m.b143 <= 0)
m.c57 = Constraint(expr= m.x9 - m.b144 <= 0)
m.c58 = Constraint(expr= m.x10 - m.b145 <= 0)
m.c59 = Constraint(expr= m.x11 - m.b146 <= 0)
m.c60 = Constraint(expr= m.x12 - m.b147 <= 0)
m.c61 = Constraint(expr= m.x13 - m.b148 <= 0)
m.c62 = Constraint(expr= m.x14 - m.b149 <= 0)
m.c63 = Constraint(expr= m.x15 - m.b150 <= 0)
m.c64 = Constraint(expr= m.x16 - m.b151 <= 0)
m.c65 = Constraint(expr= m.x17 - m.b152 <= 0)
m.c66 = Constraint(expr= m.x18 - m.b153 <= 0)
m.c67 = Constraint(expr= m.x19 - m.b154 <= 0)
m.c68 = Constraint(expr= m.x20 - m.b155 <= 0)
m.c69 = Constraint(expr= m.x21 - m.b156 <= 0)
m.c70 = Constraint(expr= m.x22 - m.b157 <= 0)
m.c71 = Constraint(expr= m.x23 - m.b158 <= 0)
m.c72 = Constraint(expr= m.x24 - m.b159 <= 0)
m.c73 = Constraint(expr= m.x25 - m.b160 <= 0)
m.c74 = Constraint(expr= m.x26 - m.b161 <= 0)
m.c75 = Constraint(expr= m.x27 - m.b162 <= 0)
m.c76 = Constraint(expr= m.x28 - m.b163 <= 0)
m.c77 = Constraint(expr= m.x29 - m.b164 <= 0)
m.c78 = Constraint(expr= m.x30 - m.b165 <= 0)
m.c79 = Constraint(expr= m.x31 - m.b166 <= 0)
m.c80 = Constraint(expr= m.x32 - m.b167 <= 0)
m.c81 = Constraint(expr= m.x33 - m.b168 <= 0)
m.c82 = Constraint(expr= m.x34 - m.b169 <= 0)
m.c83 = Constraint(expr= m.x35 - m.b170 <= 0)
m.c84 = Constraint(expr= m.x36 - m.b171 <= 0)
m.c85 = Constraint(expr= m.x37 - m.b172 <= 0)
m.c86 = Constraint(expr= m.x38 - m.b173 <= 0)
m.c87 = Constraint(expr= m.x39 - m.b174 <= 0)
m.c88 = Constraint(expr= m.x40 - m.b175 <= 0)
m.c89 = Constraint(expr= m.x41 - m.b176 <= 0)
m.c90 = Constraint(expr= m.x42 - m.b177 <= 0)
m.c91 = Constraint(expr= m.x43 - m.b178 <= 0)
m.c92 = Constraint(expr= m.x44 - m.b179 <= 0)
m.c93 = Constraint(expr= m.x45 - m.b180 <= 0)
m.c94 = Constraint(expr= m.x46 - m.b181 <= 0)
m.c95 = Constraint(expr= m.x47 - m.b182 <= 0)
m.c96 = Constraint(expr= m.x48 - m.b183 <= 0)
m.c97 = Constraint(expr= m.x49 - m.b184 <= 0)
m.c98 = Constraint(expr= m.x50 - m.b185 <= 0)
m.c99 = Constraint(expr= m.x51 - m.b186 <= 0)
m.c100 = Constraint(expr= m.x52 - m.b187 <= 0)
m.c101 = Constraint(expr= m.x53 - m.b188 <= 0)
m.c102 = Constraint(expr= m.x54 - m.b189 <= 0)
m.c103 = Constraint(expr= m.x55 - m.b190 <= 0)
m.c104 = Constraint(expr= m.x56 - m.b191 <= 0)
m.c105 = Constraint(expr= m.x57 - m.b192 <= 0)
m.c106 = Constraint(expr= m.x58 - m.b193 <= 0)
m.c107 = Constraint(expr= m.x59 - m.b194 <= 0)
m.c108 = Constraint(expr= m.x60 - m.b195 <= 0)
m.c109 = Constraint(expr= m.x61 - m.b196 <= 0)
m.c110 = Constraint(expr= m.x62 - m.b197 <= 0)
m.c111 = Constraint(expr= m.x63 - m.b198 <= 0)
m.c112 = Constraint(expr= m.x64 - m.b199 <= 0)
m.c113 = Constraint(expr= m.x65 - m.b200 <= 0)
m.c114 = Constraint(expr= m.x66 - m.b201 <= 0)
m.c115 = Constraint(expr= m.x67 - m.b202 <= 0)
m.c116 = Constraint(expr= m.x68 - m.b203 <= 0)
m.c117 = Constraint(expr= m.x69 - m.b204 <= 0)
m.c118 = Constraint(expr= m.x70 - m.b205 <= 0)
m.c119 = Constraint(expr= m.x71 - m.b206 <= 0)
m.c120 = Constraint(expr= m.x72 - m.b207 <= 0)
m.c121 = Constraint(expr= m.x73 - m.b208 <= 0)
m.c122 = Constraint(expr= m.x74 - m.b209 <= 0)
m.c123 = Constraint(expr= m.x75 - m.b210 <= 0)
m.c124 = Constraint(expr= m.x76 - m.b211 <= 0)
m.c125 = Constraint(expr= m.x77 - m.b212 <= 0)
m.c126 = Constraint(expr= m.x78 - m.b213 <= 0)
m.c127 = Constraint(expr= m.x79 - m.b214 <= 0)
m.c128 = Constraint(expr= m.x80 - m.b215 <= 0)
m.c129 = Constraint(expr= m.x81 - m.b216 <= 0)
m.c130 = Constraint(expr= m.x82 - m.b217 <= 0)
m.c131 = Constraint(expr= m.x83 - m.b218 <= 0)
m.c132 = Constraint(expr= m.x84 - m.b219 <= 0)
m.c133 = Constraint(expr= m.x85 - m.b220 <= 0)
m.c134 = Constraint(expr= m.x86 - m.b221 | |
"""
Sweeps all the apple pick bagfiles and saves all the plots from a certain variable in a pdf,
and for a certain label (e.g. Succesful or Failure)
Created by: <EMAIL>
References:
https://stackoverflow.com/questions/38938454/python-saving-multiple-subplot-figures-to-pdf
"""
# ... System related packages
import os
import time
from os.path import exists
from tqdm import tqdm
# ... File related packages
import pandas as pd
import csv
import bagpy
from bagpy import bagreader
# ... Math related packages
import math
import numpy as np
from scipy.ndimage.filters import gaussian_filter, median_filter
# ... Plot related packages
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import seaborn as sns
# sns.set() # Setting seaborn as default style even if use only matplotlib
plt.close('all')
def broken_axes(plot_number, axrray, time, variables, legends, e, f, g, h, title, y_label, label, y_lim_max, y_lim_min):
"""
Creates a plot with two subplots and a break point in the x-axis. This is very useful when a plot is very long and
you only need to focus on a certain area... in this case in two areas: Grasp and Pick
Reference: https://stackoverflow.com/questions/32185411/break-in-x-axis-of-matplotlib
:param axrray:
:param plot_number:
:param y_label:
:param label: Label of the apple pick: Successful or Failure
:param time:
:param variables:
:param legends:
:param e:
:param f:
:param g:
:param h:
:return:
"""
# # fig, (ax, ax2) = plt.subplots(1, 2, sharey=True, facecolor='w', figsize=(16, 9))
# figu, (ax, ax2) = plt.subplots(1, 2, sharey=True, facecolor='w')
# Just to account the fact that we started numbering in 1
pos = (plot_number - 1) % 4
# y_max is a reference number to know where to place the annotation within the plot
y_max = []
for i in range(len(variables)):
axrray[pos, 0].plot(time, variables[i], label=legends[i])
axrray[pos, 1].plot(time, variables[i], label=legends[i])
y_max.append(max(variables[i]))
ax = axrray[pos, 0]
ax2 = axrray[pos, 1]
# Place the labels 'Grasp' and 'Pick' at the top of the pdf page
if pos == 0:
ax.legend()
ax2.legend()
ymax = 1.7 * max(y_max)
ax.annotate('Grasp', xy=(e, ymax))
ax2.annotate('Pick', xy=(g, ymax))
ax.grid()
ax2.grid()
ax.set_xlim(e - 0.5, f + 0.5)
ax2.set_xlim(g - 0.5, h + 0.5)
ax.set_ylim(y_lim_max, y_lim_min)
ax2.set_ylim(y_lim_max, y_lim_min)
ax.spines['right'].set_visible(False)
ax2.spines['left'].set_visible(False)
ax.set_ylabel(y_label)
ax.yaxis.tick_left()
# ax.tick_params(labelright='off')
ax2.tick_params(labelleft='off')
ax2.yaxis.tick_right()
# d = .015
d = .02
kwargs = dict(transform=ax.transAxes, color='k', clip_on=False)
ax.plot((1 - d, 1 + d), (-d, +d), **kwargs)
ax.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs)
kwargs.update(transform=ax2.transAxes)
ax2.plot((-d, +d), (1 - d, 1 + d), **kwargs)
ax2.plot((-d, +d), (-d, +d), **kwargs)
# Place the label 'Time' at the bottom of the pdf page
if pos == 3:
plt.xlabel('Elapsed time [sec]')
# plt.suptitle(title + ' ' + f'$\\bf{label}$')
ax.set_title(title + ' ' + f'$\\bf{label}$', size=8, loc='left')
# # plt.savefig('plots/' + title + ' ' + label + '.png')
def elapsed_time(variable, time_stamp):
"""
Simplifies the time axis, by subtracting the initial time.
This is useful because usually the time stamps are given in a long format (i.e. in the order of 1e9)
:param variable: Reference variable to obtain the size of the time array
:param time_stamp: The time stamp array that is going to be simplified
:return: Simplified time as Elapsed Time
"""
elapsed = [None] * len(variable)
for i in range(len(variable)):
elapsed[i] = time_stamp[i] - time_stamp[0]
return elapsed
def event_times(trial_events_elapsed_time, event_indexes, f1, f2, f3, arm):
"""
Finds the times at which the hand's servos and arms motors start and stop moving.
These instants are important because they correspond to the periods when the Grasping and Pick happen.
Therefore, we would focus on the data within these values, and disregard the rest.
:param trial_events_elapsed_time:
:param event_indexes:
:param f1: Finger 1 [time, speed]
:param f2: Finger 2 [time, speed]
:param f3: Finger 3 [time, speed]
:param arm: Arm Joints' [time, speed]
:return: All the special instants: open hand, closed hand, move arm, stop arm
"""
# Initial Open Hand Event
open_hand_event_index = event_indexes[1]
open_hand_event_time = trial_events_elapsed_time[open_hand_event_index]
f1_state_elapsed_time = f1[0]
f1_state_speed = f1[1]
f2_state_elapsed_time = f2[0]
f2_state_speed = f2[1]
f3_state_elapsed_time = f3[0]
f3_state_speed = f3[1]
arm_joints_elapsed_time = arm[0]
joint_0_spd = arm[1]
joint_1_spd = arm[2]
joint_2_spd = arm[3]
joint_3_spd = arm[4]
joint_4_spd = arm[5]
joint_5_spd = arm[6]
if len(event_indexes) == 2:
# This was the case of real_apple_pick11
pulling_apple_event_index = event_indexes[1]
final_open_hand_event_index = event_indexes[1]
closing_hand_event_index = event_indexes[1]
elif len(event_indexes) == 4:
# This was the case of the real_apple_pick 1 to 10
pulling_apple_event_index = event_indexes[3]
final_open_hand_event_index = event_indexes[3]
closing_hand_event_index = event_indexes[2]
elif len(event_indexes) == 5:
# This was the case of the real_apple_pick 12 to 33
pulling_apple_event_index = event_indexes[3]
final_open_hand_event_index = event_indexes[4]
closing_hand_event_index = event_indexes[2]
elif len(event_indexes) == 6:
# This was the case of the real_apple_pick 34 to 77
pulling_apple_event_index = event_indexes[3]
final_open_hand_event_index = event_indexes[5]
closing_hand_event_index = event_indexes[2]
elif len(event_indexes) == 3:
# This is for the last improvement
pulling_apple_event_index = event_indexes[0]
final_open_hand_event_index = event_indexes[1]
closing_hand_event_index = event_indexes[2]
# Be careful when moving from ROS events into hand's, because they don't have the same indexes
pulling_apple_event_time = trial_events_elapsed_time[pulling_apple_event_index]
final_open_hand_event_time = trial_events_elapsed_time[final_open_hand_event_index]
closing_hand_event_time = trial_events_elapsed_time[closing_hand_event_index]
a = open_hand_event_time
b = closing_hand_event_time
c = pulling_apple_event_time
d = final_open_hand_event_time
# Servos Start Moving Event
# Find the instance when the fingers' motors start moving (index and value)
# print('Point to start evaluating: ', closing_hand_event_time)
i1, e1 = find_instance(f1_state_speed, f1_state_elapsed_time, 0.01, closing_hand_event_time, 'starts')
i2, e2 = find_instance(f2_state_speed, f2_state_elapsed_time, 0.01, closing_hand_event_time, 'starts')
i3, e3 = find_instance(f3_state_speed, f3_state_elapsed_time, 0.01, closing_hand_event_time, 'starts')
e = min(e1, e2, e3)
# print('\nFinger servos start moving at: %.2f, %.2f and %.2f ' % (e1, e2, e3))
# print('The time delay between event and servo moving is: %.2f' % (e - b))
# Servos Stop Moving Event
# Find the instance when the finger's motors stop indeed moving
j1, f1 = find_instance(f1_state_speed, f1_state_elapsed_time, 0.01, e, 'stops')
j2, f2 = find_instance(f2_state_speed, f2_state_elapsed_time, 0.01, e, 'stops')
j3, f3 = find_instance(f3_state_speed, f3_state_elapsed_time, 0.01, e, 'stops')
f = max(f1, f2, f3)
# print('Finger servos stop moving at: %.2f, %.2f and %.2f' % (f1, f2, f3))
if len(event_indexes) == 4:
c = f
k0, g0 = find_instance(joint_0_spd, arm_joints_elapsed_time, 0.01, c, 'starts')
k1, g1 = find_instance(joint_1_spd, arm_joints_elapsed_time, 0.01, c, 'starts')
k2, g2 = find_instance(joint_2_spd, arm_joints_elapsed_time, 0.01, c, 'starts')
k3, g3 = find_instance(joint_3_spd, arm_joints_elapsed_time, 0.01, c, 'starts')
k4, g4 = find_instance(joint_4_spd, arm_joints_elapsed_time, 0.01, c, 'starts')
k5, g5 = find_instance(joint_5_spd, arm_joints_elapsed_time, 0.01, c, 'starts')
g = min(g0, g1, g2, g3, g4, g5)
# print(
# "The times at which the UR5 joints start are: %.2f, %.2f, %2.f, %2.f, %.2f and %.2f" % (g0, g1, g2, g3, g4, g5))
# print('\nUR5 starts moving at: %.2f ' % g)
if len(event_indexes) == 4:
c = g
k = max(g0, g1, g2, g3, g4, g5)
# print("The values of k are: %.2f, %.2f, %2.f, %2.f, %.2f and %.2f" % (k0, k1, k2, k3, k4, k5))
# Arm Stops pulling apple
l0, h0 = find_instance(joint_0_spd, arm_joints_elapsed_time, 0.001, g, 'stops')
l1, h1 = find_instance(joint_1_spd, arm_joints_elapsed_time, 0.001, g, 'stops')
l2, h2 = find_instance(joint_2_spd, arm_joints_elapsed_time, 0.001, g, 'stops')
l3, h3 = find_instance(joint_3_spd, arm_joints_elapsed_time, 0.001, g, 'stops')
l4, h4 = find_instance(joint_4_spd, arm_joints_elapsed_time, 0.001, g, 'stops')
l5, h5 = find_instance(joint_5_spd, arm_joints_elapsed_time, 0.001, g, 'stops')
h = max(h0, h1, h2, h3, h4, h5)
# print(
# "The times at which the UR5 joints stop are: %.2f, %.2f, %2.f, %2.f, %.2f and %.2f" % (h0, h1, h2, h3, h4, h5))
# print('UR5 stops moving at: %.2f' % h)
# A simpler approach: just by using the states.
# Grasp Events
start_grasp_apple_event_index = event_indexes[0]
e = trial_events_elapsed_time[start_grasp_apple_event_index]
f = e + 2.0
# Pick Events
start_pulling_apple_event_index = event_indexes[1]
finish_pulling_apple_event_index = event_indexes[2]
g = trial_events_elapsed_time[start_pulling_apple_event_index]
h = trial_events_elapsed_time[finish_pulling_apple_event_index]
return a, b, c, d, e, f, g, h
def filter_variables(variables, parameter):
"""
This function is meant to filter a list of lists, because usually these built-in functions don't do it
"""
# Median Filter
variables_filtered = []
for i in range(len(variables)):
variable_filtered = median_filter(variables[i], parameter)
variables_filtered.append(variable_filtered)
return variables_filtered
def find_instance(array, time_array, threshold, initial_time, case='starts'):
"""
There are also some events that are important to spot such as when the fingers start moving indeed
Since these are not published by the hand, we'll find those instances by finding | |
#fd[p].append("NAi1")
fd[p].append('')
else:
#fd[p].append("emmean nai")
fd[p].append('')
if gf in v[s]:
if sf in v[s][gf]['pec50mean']:
fd[p].append(v[s][gf]['pec50mean'][sf])
else:
#fd[p].append("NAi1")
fd[p].append('')
else:
#fd[p].append("pec50mean nai")
fd[p].append('')
if gf in v[s]:
if sf in v[s][gf]['emsem']:
fd[p].append(v[s][gf]['emsem'][sf])
else:
#fd[p].append("NAi1")
fd[p].append('')
else:
#fd[p].append("emsem nai")
fd[p].append('')
if gf in v[s]:
if sf in v[s][gf]['pec50sem']:
fd[p].append(v[s][gf]['pec50sem'][sf])
else:
#fd[p].append("NAi1")
fd[p].append('')
else:
#fd[p].append("pec50sem nai")
fd[p].append('')
# Loop over Bouvier
s = 'Bouvier'
for gf in distinct_g_families:
if gf in v[s]:
if v[s][gf]['max'] > threshold_primary_bouvier:
fd[p].append("primary")
elif v[s][gf]['max'] > threshold_secondary_bouvier:
fd[p].append("secondary")
else:
fd[p].append("NC")
else:
fd[p].append('NA') # This last empty one means not-available, NOT no-coupling
# Last bit adds values to subfamilies (i.e. subtypes)
for gf, sfs in distinct_g_subunit_families.items():
for sf in sfs:
if gf in v[s]:
if sf in v[s][gf]['emdn']:
fd[p].append(v[s][gf]['emdn'][sf])
else:
#fd[p].append("NAb1")
fd[p].append('')
else:
#fd[p].append("emdn nab")
fd[p].append('')
if gf in v[s]:
if sf in v[s][gf]['pec50dn']:
fd[p].append(v[s][gf]['pec50dn'][sf])
else:
#fd[p].append("NAb1")
fd[p].append('')
else:
#fd[p].append("pec50dn nab")
fd[p].append('')
if gf in v[s]:
if sf in v[s][gf]['emmean']:
fd[p].append(v[s][gf]['emmean'][sf])
else:
#fd[p].append("NAb1")
fd[p].append('')
else:
#fd[p].append("emmean nab")
fd[p].append('')
if gf in v[s]:
if sf in v[s][gf]['pec50mean']:
fd[p].append(v[s][gf]['pec50mean'][sf])
else:
#fd[p].append("NAb1")
fd[p].append('')
else:
#fd[p].append("pec50mean nab")
fd[p].append('')
if gf in v[s]:
if sf in v[s][gf]['emsem']:
fd[p].append(v[s][gf]['emsem'][sf])
else:
#fd[p].append("NAb1")
fd[p].append('')
else:
#fd[p].append("emsem nab")
fd[p].append('')
if gf in v[s]:
if sf in v[s][gf]['pec50sem']:
fd[p].append(v[s][gf]['pec50sem'][sf])
else:
#fd[p].append("NAb1")
fd[p].append('')
else:
#fd[p].append("pec50sem nab")
fd[p].append('')
# max Values for Gs, Gi/Go, Gq/G11, G12/13 and source Inoue
s = 'Aska'
for gf in distinct_g_families:
if gf in v[s]:
# fd[p].append(v[s][gf]['max'] + 100)
fd[p].append(v[s][gf]['max'])
else:
#fd[p].append("NAi max")
fd[p].append('')
# max Values for Gs, Gi/Go, Gq/G11, G12/13 and source Bouvier
s = 'Bouvier'
for gf in distinct_g_families:
if gf in v[s]:
#fd[p].append(v[s][gf]['max'] + 200)
fd[p].append(v[s][gf]['max'])
else:
#fd[p].append("NAb max")
fd[p].append('')
return fd
def GProtein(request, dataset="GuideToPharma"):
name_of_cache = 'gprotein_statistics_{}'.format(dataset)
context = cache.get(name_of_cache)
if context == None:
context = OrderedDict()
i = 0
gproteins = ProteinGProtein.objects.all().prefetch_related('proteingproteinpair_set')
slug_translate = {'001': "ClassA", '002': "ClassB1", '004': "ClassC", '006': "ClassF"}
selectivitydata = {}
for slug in slug_translate.keys():
jsondata = {}
for gp in gproteins:
# ps = gp.proteingproteinpair_set.all()
ps = gp.proteingproteinpair_set.filter(protein__family__slug__startswith=slug,
source=dataset).prefetch_related('protein')
# print(ps,len(ps))
if ps:
jsondata[str(gp)] = []
for p in ps:
if dataset == "Aska" and p.log_rai_mean < -1:
continue
if str(p.protein.entry_name).split('_')[0].upper() not in selectivitydata:
selectivitydata[str(p.protein.entry_name).split('_')[0].upper()] = []
selectivitydata[str(p.protein.entry_name).split('_')[0].upper()].append(str(gp))
# print(p.protein.family.parent.parent.parent)
jsondata[str(gp)].append(str(p.protein.entry_name) + '\n')
jsondata[str(gp)] = ''.join(jsondata[str(gp)])
context[slug_translate[slug]] = jsondata
context["selectivitydata"] = selectivitydata
cache.set(name_of_cache, context, 60 * 60 * 24 * 7) # seven days timeout on cache
return render(request,
'signprot/gprotein.html',
context
)
# @cache_page(60*60*24*2) # 2 days caching
def couplings(request, template_name='signprot/coupling_browser.html'):
"""
Presents coupling data between Receptors and G-proteins.
Data coming from Guide to Pharmacology, <NAME> and <NAME>
"""
context = OrderedDict()
threshold_primary = 0.5 # -0.1
threshold_secondary = 0.01 # -1
proteins = Protein.objects.filter(sequence_type__slug='wt',
family__slug__startswith='00',
species__common_name='Human').all().prefetch_related('family')
data = {}
class_names = {}
family_names = {}
for p in proteins:
p_class = p.family.slug.split('_')[0]
if p_class not in class_names:
class_names[p_class] = p.family.parent.parent.parent.name
family_names[p_class] = p.family.parent.name
p_class_name = class_names[p_class].replace('Class','').strip()
p_family_name = family_names[p_class].replace('receptors','').strip()
p_accession = p.accession
data[p.entry_short()] = {'class': p_class_name,
'family': p_family_name,
'accession': p_accession,
'pretty': p.short(),
'GuideToPharma': {},
'Aska': {},
'Bouvier': {}}
distinct_g_families = []
distinct_g_subunit_families = {}
distinct_sources = ['GuideToPharma', 'Aska', 'Bouvier']
couplings = ProteinGProteinPair.objects.all().prefetch_related('protein',
'g_protein_subunit',
'g_protein')
for c in couplings:
p = c.protein.entry_short()
s = c.source
t = c.transduction
m = c.emax_dnorm
gf = c.g_protein.name
gf = gf.replace(" family", "")
if gf not in distinct_g_families:
distinct_g_families.append(gf)
distinct_g_subunit_families[gf] = []
if c.g_protein_subunit:
g = c.g_protein_subunit.entry_name
g = g.replace("_human", "")
if g not in distinct_g_subunit_families[gf]:
distinct_g_subunit_families[gf].append(g)
distinct_g_subunit_families[gf] = sorted(distinct_g_subunit_families[gf])
if s not in data[p]:
data[p][s] = {}
if gf not in data[p][s]:
data[p][s][gf] = {}
# If transduction in GuideToPharma data
if t:
data[p][s][gf] = t
else:
if 'subunits' not in data[p][s][gf]:
data[p][s][gf] = {'subunits': {}, 'best': 0.00}
if m is None:
continue
data[p][s][gf]['subunits'][g] = round(Decimal(m), 2)
if round(Decimal(m), 2) == -0.00:
data[p][s][gf]['subunits'][g] = 0.00
# get the lowest number into 'best'
if m > data[p][s][gf]['best']:
data[p][s][gf]['best'] = round(Decimal(m), 2)
fd = {} # final data
# distinct_g_families = sorted(distinct_g_families)
distinct_g_families = ['Gs', 'Gi/Go', 'Gq/G11', 'G12/G13']
distinct_g_subunit_families = OrderedDict(
[('Gs', ['gnas2', 'gnal']),
('Gi/Go', ['gnai1', 'gnai2', 'gnai3', 'gnao', 'gnaz']),
('Gq/G11', ['gnaq', 'gna11', 'gna14', 'gna15']),
('G12/G13', ['gna12', 'gna13'])])
# This for loop, which perhaps should be a function in itself, perhaps an instance of a Couplings class, does
# the job of merging together two data-sets, that of the GuideToPharma and Asuka's results.
for p, v in data.items():
fd[p] = [v['class'], v['family'], v['accession'], p, v['pretty']]
s = 'GuideToPharma'
# Merge
for gf in distinct_g_families:
values = []
if 'GuideToPharma' in v and gf in v['GuideToPharma']:
values.append(v['GuideToPharma'][gf])
if 'Aska' in v and gf in v['Aska']:
best = v['Aska'][gf]['best']
if best > threshold_primary:
values.append('primary')
elif best > threshold_secondary:
values.append('secondary')
if 'Bouvier' in v and gf in v['Bouvier']:
best = v['Bouvier'][gf]['best']
if best > threshold_primary:
values.append('primary')
elif best > threshold_secondary:
values.append('secondary')
if 'primary' in values:
fd[p].append('primary')
elif 'secondary' in values:
fd[p].append('secondary')
else:
fd[p].append('')
s = 'GuideToPharma'
# First loop over GuideToPharma
for gf in distinct_g_families:
if gf in v[s]:
fd[p].append(v[s][gf])
else:
fd[p].append("")
s = 'Aska'
for gf in distinct_g_families:
if gf in v[s]:
if v[s][gf]['best'] > threshold_primary:
fd[p].append("primary")
elif v[s][gf]['best'] > threshold_secondary:
fd[p].append("secondary")
else:
fd[p].append("No coupling")
else:
fd[p].append("")
s = 'Bouvier'
for gf in distinct_g_families:
if gf in v[s]:
if v[s][gf]['best'] > threshold_primary:
fd[p].append("primary")
elif v[s][gf]['best'] > threshold_secondary:
fd[p].append("secondary")
else:
fd[p].append("No coupling")
else:
fd[p].append("")
for gf, sfs in distinct_g_subunit_families.items():
for sf in sfs:
if gf in v[s]:
if sf in v[s][gf]['subunits']:
fd[p].append(v[s][gf]['subunits'][sf])
else:
fd[p].append("")
else:
fd[p].append("")
context['data'] = fd
context['distinct_gf'] = distinct_g_families
context['distinct_sf'] = distinct_g_subunit_families
return render(request,
template_name, context
)
@cache_page(60 * 60 * 24 * 2)
def familyDetail(request, slug):
# get family
pf = ProteinFamily.objects.get(slug=slug)
# get family list
ppf = pf
families = [ppf.name]
while ppf.parent.parent:
families.append(ppf.parent.name)
ppf = ppf.parent
families.reverse()
# number of proteins
proteins = Protein.objects.filter(family__slug__startswith=pf.slug, sequence_type__slug='wt')
no_of_proteins = proteins.count()
no_of_human_proteins = Protein.objects.filter(family__slug__startswith=pf.slug, species__id=1,
sequence_type__slug='wt').count()
list_proteins = list(proteins.values_list('pk', flat=True))
# get structures of this family
structures = SignprotStructure.objects.filter(protein__family__slug__startswith=slug)
complex_structures = SignprotComplex.objects.filter(protein__family__slug__startswith=slug)
mutations = MutationExperiment.objects.filter(protein__in=proteins).prefetch_related('residue__generic_number',
'exp_qual', 'ligand')
mutations_list = {}
for mutation in mutations:
if not mutation.residue.generic_number: continue # cant map those without display numbers
if mutation.residue.generic_number.label not in mutations_list: mutations_list[
mutation.residue.generic_number.label] = []
if mutation.ligand:
ligand = mutation.ligand.name
else:
ligand = ''
if mutation.exp_qual:
qual = mutation.exp_qual.qual
else:
qual = ''
mutations_list[mutation.residue.generic_number.label].append(
[mutation.foldchange, ligand.replace("'", "\\'"), qual])
interaction_list = {} ###FIXME - always empty
try:
pc = ProteinConformation.objects.get(protein__family__slug=slug, protein__sequence_type__slug='consensus')
except ProteinConformation.DoesNotExist:
try:
pc = ProteinConformation.objects.get(protein__family__slug=slug, protein__species_id=1,
protein__sequence_type__slug='wt')
except:
pc = None
p = None
p = pc.protein
residues = Residue.objects.filter(protein_conformation=pc).order_by('sequence_number').prefetch_related(
'protein_segment', 'generic_number', 'display_generic_number')
jsondata = {}
jsondata_interaction = {}
for r in residues:
if r.generic_number:
if r.generic_number.label in mutations_list:
jsondata[r.sequence_number] = [mutations_list[r.generic_number.label]]
if r.generic_number.label in interaction_list:
jsondata_interaction[r.sequence_number] = interaction_list[r.generic_number.label]
# process residues and return them in chunks of 10
# this is done for easier scaling on smaller screens
chunk_size = 10
r_chunks = []
r_buffer = []
last_segment = False
border = False
title_cell_skip = 0
for i, r in enumerate(residues):
# title of segment to be written out for the first residue in each segment
segment_title = False
# keep track of last residues segment (for marking borders)
if r.protein_segment.slug != last_segment:
last_segment = r.protein_segment.slug
border = True
# if on a border, is there room to write out the title? If not, write title in next chunk
if i == 0 or (border and len(last_segment) <= (chunk_size - i % chunk_size)):
segment_title = True
border = False
title_cell_skip += len(last_segment) # skip cells following title (which has colspan > 1)
if i and i % chunk_size == 0:
r_chunks.append(r_buffer)
r_buffer = []
r_buffer.append((r, segment_title, title_cell_skip))
# update cell skip counter
if title_cell_skip > 0:
title_cell_skip -= 1
if r_buffer:
r_chunks.append(r_buffer)
context = {'pf': pf, 'families': families, 'structures': structures, 'no_of_proteins': no_of_proteins,
'no_of_human_proteins': no_of_human_proteins, 'mutations': mutations, 'r_chunks': r_chunks,
'chunk_size': chunk_size, 'p': p, 'complex_structures': complex_structures}
return render(request,
'signprot/family_details.html',
context
)
@cache_page(60 * 60 * 24 * 2)
def Ginterface(request, protein=None):
residuelist = Residue.objects.filter(protein_conformation__protein__entry_name=protein).prefetch_related(
'protein_segment', 'display_generic_number', 'generic_number')
SnakePlot = DrawSnakePlot(
residuelist, "Class A (Rhodopsin)", protein, nobuttons=1)
# TEST
gprotein_residues | |
expected
@pytest.mark.django_db
@pytest.mark.parametrize(
"dry_run, adjustment_type, adjustment_amount, expected_rent, expected_amount_left",
[
# Save amount left
# Discount
(False, RentAdjustmentType.DISCOUNT, 100, Decimal(1827), Decimal(0)),
(False, RentAdjustmentType.DISCOUNT, 10000, Decimal(0), Decimal(8073)),
# Increase
(False, RentAdjustmentType.INCREASE, 100, Decimal(2027), Decimal(0)),
(False, RentAdjustmentType.INCREASE, 10000, Decimal(11927), Decimal(0)),
# Don't save amount left
# Discount
(True, RentAdjustmentType.DISCOUNT, 100, Decimal(1827), Decimal(100)),
(True, RentAdjustmentType.DISCOUNT, 10000, Decimal(0), Decimal(10000)),
# Increase
(True, RentAdjustmentType.INCREASE, 100, Decimal(2027), Decimal(100)),
(True, RentAdjustmentType.INCREASE, 10000, Decimal(11927), Decimal(10000)),
],
)
def test_adjustment_type_amount_total(
lease_test_data,
rent_factory,
contract_rent_factory,
rent_adjustment_factory,
dry_run,
adjustment_type,
adjustment_amount,
expected_rent,
expected_amount_left,
):
lease = lease_test_data["lease"]
rent = rent_factory(
lease=lease,
cycle=RentCycle.JANUARY_TO_DECEMBER,
due_dates_type=DueDatesType.FIXED,
due_dates_per_year=1,
)
contract_rent = contract_rent_factory(
rent=rent,
intended_use_id=1,
amount=Decimal(100),
period=PeriodType.PER_YEAR,
base_amount=Decimal(100),
base_amount_period=PeriodType.PER_YEAR,
)
rent_adjustment = rent_adjustment_factory(
rent=rent,
intended_use=contract_rent.intended_use,
type=adjustment_type,
start_date=None,
end_date=None,
amount_type=RentAdjustmentAmountType.AMOUNT_TOTAL,
full_amount=adjustment_amount,
amount_left=adjustment_amount,
)
range_start = date(year=2018, month=1, day=1)
range_end = date(year=2018, month=12, day=31)
calculation_result = rent.get_amount_for_date_range(
range_start, range_end, dry_run=dry_run
)
assert calculation_result.get_total_amount() == expected_rent
rent_adjustment = RentAdjustment.objects.get(pk=rent_adjustment.id)
assert rent_adjustment.amount_left == expected_amount_left
@pytest.mark.django_db
@pytest.mark.parametrize(
"adjustment_start_date1, adjustment_end_date1, adjustment_type1, adjustment_amount1, "
"adjustment_start_date2, adjustment_end_date2, adjustment_type2, adjustment_amount2, expected",
[
(
date(year=2018, month=1, day=1), # Adjustment 1 start date
date(year=2018, month=6, day=30), # Adjustment 1 end date
RentAdjustmentType.DISCOUNT, # Adjustment 1 type
50, # Adjustment 1 amount
date(year=2018, month=7, day=1), # Adjustment 2 start date
date(year=2018, month=12, day=31), # Adjustment 2 end date
RentAdjustmentType.DISCOUNT, # Adjustment 2 type
50, # Adjustment 2 amount
Decimal(600),
),
(
date(year=2018, month=1, day=1), # Adjustment 1 start date
date(year=2018, month=12, day=31), # Adjustment 1 end date
RentAdjustmentType.DISCOUNT, # Adjustment 1 type
50, # Adjustment 1 amount
date(year=2018, month=1, day=1), # Adjustment 2 start date
date(year=2018, month=12, day=31), # Adjustment 2 end date
RentAdjustmentType.DISCOUNT, # Adjustment 2 type
50, # Adjustment 2 amount
Decimal(300),
),
(
date(year=2018, month=3, day=1), # Adjustment 1 start date
date(year=2018, month=8, day=31), # Adjustment 1 end date
RentAdjustmentType.DISCOUNT, # Adjustment 1 type
50, # Adjustment 1 amount
date(year=2018, month=5, day=1), # Adjustment 2 start date
date(year=2018, month=10, day=31), # Adjustment 2 end date
RentAdjustmentType.DISCOUNT, # Adjustment 2 type
50, # Adjustment 2 amount
Decimal(700),
),
],
)
def test_get_amount_for_date_range_adjustments_two_in_series(
lease_test_data,
rent_factory,
contract_rent_factory,
rent_adjustment_factory,
adjustment_start_date1,
adjustment_end_date1,
adjustment_type1,
adjustment_amount1,
adjustment_start_date2,
adjustment_end_date2,
adjustment_type2,
adjustment_amount2,
expected,
):
lease = lease_test_data["lease"]
rent = rent_factory(
lease=lease,
type=RentType.FIXED,
cycle=RentCycle.JANUARY_TO_DECEMBER,
due_dates_type=DueDatesType.FIXED,
due_dates_per_year=1,
)
contract_rent = contract_rent_factory(
rent=rent,
intended_use_id=1,
amount=Decimal(1200),
period=PeriodType.PER_YEAR,
base_amount=Decimal(100),
base_amount_period=PeriodType.PER_YEAR,
)
rent_adjustment_factory(
rent=rent,
intended_use=contract_rent.intended_use,
type=adjustment_type1,
start_date=adjustment_start_date1,
end_date=adjustment_end_date1,
amount_type=RentAdjustmentAmountType.PERCENT_PER_YEAR,
full_amount=adjustment_amount1,
)
rent_adjustment_factory(
rent=rent,
intended_use=contract_rent.intended_use,
type=adjustment_type2,
start_date=adjustment_start_date2,
end_date=adjustment_end_date2,
amount_type=RentAdjustmentAmountType.PERCENT_PER_YEAR,
full_amount=adjustment_amount2,
)
range_start = date(year=2018, month=1, day=1)
range_end = date(year=2018, month=12, day=31)
calculation_result = rent.get_amount_for_date_range(range_start, range_end)
assert calculation_result.get_total_amount() == expected
@pytest.mark.django_db
@pytest.mark.parametrize(
"adjustment_start_date1, adjustment_end_date1, adjustment_type1, adjustment_amount1, "
"adjustment_start_date2, adjustment_end_date2, adjustment_type2, adjustment_amount2, expected",
[
(
date(year=2018, month=1, day=1), # Adjustment 1 start date
date(year=2018, month=6, day=30), # Adjustment 1 end date
RentAdjustmentType.DISCOUNT, # Adjustment 1 type
50, # Adjustment 1 amount
date(year=2018, month=7, day=1), # Adjustment 2 start date
date(year=2018, month=12, day=31), # Adjustment 2 end date
RentAdjustmentType.DISCOUNT, # Adjustment 2 type
50, # Adjustment 2 amount
Decimal(600),
),
(
date(year=2018, month=1, day=1), # Adjustment 1 start date
date(year=2018, month=12, day=31), # Adjustment 1 end date
RentAdjustmentType.DISCOUNT, # Adjustment 1 type
50, # Adjustment 1 amount
date(year=2018, month=1, day=1), # Adjustment 2 start date
date(year=2018, month=12, day=31), # Adjustment 2 end date
RentAdjustmentType.DISCOUNT, # Adjustment 2 type
50, # Adjustment 2 amount
Decimal(300),
),
(
date(year=2018, month=3, day=1), # Adjustment 1 start date
date(year=2018, month=8, day=31), # Adjustment 1 end date
RentAdjustmentType.DISCOUNT, # Adjustment 1 type
50, # Adjustment 1 amount
date(year=2018, month=5, day=1), # Adjustment 2 start date
date(year=2018, month=10, day=31), # Adjustment 2 end date
RentAdjustmentType.DISCOUNT, # Adjustment 2 type
50, # Adjustment 2 amount
Decimal(700),
),
],
)
def test_get_amount_for_date_range_adjustments_two_in_series_fixed_initial_year_rent(
lease_test_data,
rent_factory,
contract_rent_factory,
fixed_initial_year_rent_factory,
rent_adjustment_factory,
adjustment_start_date1,
adjustment_end_date1,
adjustment_type1,
adjustment_amount1,
adjustment_start_date2,
adjustment_end_date2,
adjustment_type2,
adjustment_amount2,
expected,
):
lease = lease_test_data["lease"]
rent = rent_factory(
lease=lease,
type=RentType.FIXED,
cycle=RentCycle.JANUARY_TO_DECEMBER,
due_dates_type=DueDatesType.FIXED,
due_dates_per_year=1,
)
fixed_initial_year_rent = fixed_initial_year_rent_factory(
rent=rent, intended_use_id=1, amount=Decimal(1200)
)
rent_adjustment_factory(
rent=rent,
intended_use=fixed_initial_year_rent.intended_use,
type=adjustment_type1,
start_date=adjustment_start_date1,
end_date=adjustment_end_date1,
amount_type=RentAdjustmentAmountType.PERCENT_PER_YEAR,
full_amount=adjustment_amount1,
)
rent_adjustment_factory(
rent=rent,
intended_use=fixed_initial_year_rent.intended_use,
type=adjustment_type2,
start_date=adjustment_start_date2,
end_date=adjustment_end_date2,
amount_type=RentAdjustmentAmountType.PERCENT_PER_YEAR,
full_amount=adjustment_amount2,
)
range_start = date(year=2018, month=1, day=1)
range_end = date(year=2018, month=12, day=31)
calculation_result = rent.get_amount_for_date_range(range_start, range_end)
assert calculation_result.get_total_amount() == expected
@pytest.mark.django_db
@pytest.mark.parametrize(
"adjustment_start_date1, adjustment_end_date1, adjustment_type1, adjustment_amount1, expected",
[
(
date(year=2018, month=1, day=1),
date(year=2018, month=6, day=30),
RentAdjustmentType.DISCOUNT,
50,
Decimal(900),
)
],
)
def test_fixed_initial_year_rent_amount_for_date_range(
lease_test_data,
rent_factory,
fixed_initial_year_rent_factory,
rent_adjustment_factory,
adjustment_start_date1,
adjustment_end_date1,
adjustment_type1,
adjustment_amount1,
expected,
):
lease = lease_test_data["lease"]
rent = rent_factory(
lease=lease,
type=RentType.FIXED,
cycle=RentCycle.JANUARY_TO_DECEMBER,
due_dates_type=DueDatesType.FIXED,
due_dates_per_year=1,
)
fixed_initial_year_rent = fixed_initial_year_rent_factory(
rent=rent,
intended_use_id=1,
start_date=date(year=2018, month=1, day=1),
end_date=date(year=2018, month=12, day=31),
amount=Decimal(1200),
)
rent_adjustment_factory(
rent=rent,
intended_use=fixed_initial_year_rent.intended_use,
type=adjustment_type1,
start_date=adjustment_start_date1,
end_date=adjustment_end_date1,
amount_type=RentAdjustmentAmountType.PERCENT_PER_YEAR,
full_amount=adjustment_amount1,
)
range_start = date(year=2018, month=1, day=1)
range_end = date(year=2018, month=12, day=31)
calculation_result = rent.fixed_initial_year_rent_amount_for_date_range(
fixed_initial_year_rent.intended_use, range_start, range_end
)
assert calculation_result.get_total_amount() == expected
@pytest.mark.django_db
@pytest.mark.parametrize(
"rent_start_date, rent_end_date, period_start_date, period_end_date, expected",
[
(None, None, None, None, True),
(
None,
None,
date(year=1990, month=1, day=1),
date(year=1990, month=1, day=1),
True,
),
(
None,
None,
date(year=2017, month=1, day=1),
date(year=2019, month=12, day=31),
True,
),
(
date(year=2017, month=1, day=1),
date(year=2019, month=12, day=31),
date(year=2017, month=1, day=1),
date(year=2019, month=12, day=31),
True,
),
(
date(year=2000, month=1, day=1),
date(year=2000, month=12, day=31),
date(year=1990, month=1, day=1),
date(year=2020, month=1, day=1),
True,
),
(
date(year=1990, month=1, day=1),
date(year=2020, month=1, day=1),
date(year=2000, month=1, day=1),
date(year=2000, month=12, day=31),
True,
),
(
date(year=2000, month=1, day=1),
date(year=2000, month=12, day=31),
date(year=1999, month=12, day=15),
date(year=2000, month=1, day=15),
True,
),
(
date(year=2000, month=1, day=1),
date(year=2000, month=12, day=31),
date(year=2000, month=1, day=15),
date(year=2000, month=2, day=15),
True,
),
(
date(year=2017, month=1, day=1),
date(year=2019, month=12, day=31),
date(year=2020, month=1, day=1),
date(year=2020, month=12, day=31),
False,
),
(
date(year=1990, month=1, day=1),
date(year=1990, month=1, day=1),
date(year=2020, month=1, day=1),
date(year=2020, month=12, day=31),
False,
),
(
date(year=1990, month=1, day=1),
date(year=1990, month=1, day=1),
date(year=1990, month=1, day=2),
date(year=1990, month=1, day=2),
False,
),
],
)
def test_is_active_on_period(
lease_test_data,
rent_factory,
rent_start_date,
rent_end_date,
period_start_date,
period_end_date,
expected,
):
lease = lease_test_data["lease"]
rent = rent_factory(
lease=lease,
cycle=RentCycle.JANUARY_TO_DECEMBER,
due_dates_type=DueDatesType.FIXED,
due_dates_per_year=1,
start_date=rent_start_date,
end_date=rent_end_date,
)
assert rent.is_active_on_period(period_start_date, period_end_date) == expected
@pytest.mark.django_db
@pytest.mark.parametrize(
"start_date1, end_date1, start_date2, end_date2, expected",
[
(
date(year=2017, month=1, day=1),
date(year=2019, month=12, day=31),
date(year=2017, month=1, day=1),
date(year=2019, month=12, day=31),
[],
),
(
date(year=2017, month=1, day=1),
date(year=2017, month=12, day=31),
date(year=2018, month=1, day=1),
date(year=2018, month=3, day=31),
[(date(2018, 4, 1), date(2018, 8, 31))],
),
(
date(year=2018, month=1, day=1),
date(year=2018, month=3, day=31),
date(year=2018, month=8, day=1),
date(year=2018, month=12, day=31),
[(date(2018, 4, 1), date(2018, 7, 31))],
),
(
date(year=2017, month=1, day=1),
date(year=2018, month=6, day=30),
date(year=2017, month=1, day=1),
date(year=2018, month=6, day=30),
[(date(2018, 7, 1), date(2018, 8, 31))],
),
(
date(year=2017, month=1, day=1),
date(year=2017, month=1, day=1),
date(year=2017, month=1, day=1),
date(year=2017, month=1, day=1),
[],
),
(
date(year=2019, month=1, day=1),
date(year=2019, month=1, day=1),
date(year=2019, month=1, day=1),
date(year=2019, month=1, day=1),
[],
),
(
date(year=2018, month=3, day=1),
date(year=2018, month=3, day=31),
date(year=2018, month=3, day=1),
date(year=2018, month=3, day=31),
[(date(2018, 4, 1), date(2018, 8, 31))],
),
],
)
def test_fixed_initial_year_rent_for_date_range_remaining_ranges(
lease_test_data,
rent_factory,
contract_rent_factory,
fixed_initial_year_rent_factory,
start_date1,
end_date1,
start_date2,
end_date2,
expected,
):
lease = lease_test_data["lease"]
rent = rent_factory(
lease=lease,
cycle=RentCycle.JANUARY_TO_DECEMBER,
due_dates_type=DueDatesType.FIXED,
due_dates_per_year=1,
)
contract_rent = contract_rent_factory(
rent=rent,
intended_use_id=1,
amount=Decimal(100),
period=PeriodType.PER_YEAR,
base_amount=Decimal(100),
base_amount_period=PeriodType.PER_YEAR,
)
fixed_initial_year_rent_factory(
rent=rent,
intended_use=contract_rent.intended_use,
amount=Decimal(100),
start_date=start_date1,
end_date=end_date1,
)
fixed_initial_year_rent_factory(
rent=rent,
intended_use=contract_rent.intended_use,
amount=Decimal(100),
start_date=start_date2,
end_date=end_date2,
)
range_start = date(year=2018, month=3, day=1)
range_end = date(year=2018, month=8, day=31)
calculation_result = rent.fixed_initial_year_rent_amount_for_date_range(
contract_rent.intended_use, range_start, range_end
)
assert calculation_result.remaining_ranges == expected
@pytest.mark.django_db
@pytest.mark.parametrize(
"rent_cycle, due_dates_per_year, billing_period, expected",
[
(
RentCycle.JANUARY_TO_DECEMBER,
0,
(date(year=2017, month=1, day=1), date(year=2017, month=6, day=1)),
False,
),
(
RentCycle.JANUARY_TO_DECEMBER,
1,
(date(year=2017, month=1, day=1), date(year=2017, month=6, day=1)),
False,
),
(
RentCycle.JANUARY_TO_DECEMBER,
4,
(date(year=2017, month=1, day=1), date(year=2017, month=6, day=1)),
False,
),
(
RentCycle.JANUARY_TO_DECEMBER,
12,
(date(year=2017, month=1, day=1), date(year=2017, month=6, day=1)),
False,
),
(
RentCycle.JANUARY_TO_DECEMBER,
1,
(date(year=2017, month=1, day=1), date(year=2017, month=12, day=31)),
True,
),
(
RentCycle.JANUARY_TO_DECEMBER,
2,
(date(year=2017, month=1, day=1), date(year=2017, month=6, day=30)),
False,
),
(
RentCycle.JANUARY_TO_DECEMBER,
2,
(date(year=2017, month=7, day=1), date(year=2017, month=12, day=31)),
True,
),
(
RentCycle.APRIL_TO_MARCH,
0,
(date(year=2017, month=1, day=1), date(year=2017, month=6, day=1)),
False,
),
(
RentCycle.APRIL_TO_MARCH,
1,
(date(year=2017, month=1, day=1), date(year=2017, month=6, day=1)),
False,
),
(
RentCycle.APRIL_TO_MARCH,
4,
(date(year=2017, month=1, day=1), date(year=2017, month=6, day=1)),
False,
),
(
RentCycle.APRIL_TO_MARCH,
12,
(date(year=2017, month=1, day=1), date(year=2017, month=6, day=1)),
False,
),
(
RentCycle.APRIL_TO_MARCH,
1,
(date(year=2017, month=1, day=1), date(year=2017, month=12, day=31)),
True,
),
(
RentCycle.APRIL_TO_MARCH,
2,
(date(year=2017, month=1, day=1), date(year=2017, month=6, day=30)),
False,
),
(
RentCycle.APRIL_TO_MARCH,
2,
(date(year=2017, month=7, day=1), date(year=2017, month=12, day=31)),
True,
),
],
)
def test_is_the_last_billing_period(
lease_test_data,
rent_factory,
rent_cycle,
due_dates_per_year,
billing_period,
expected,
):
lease = lease_test_data["lease"]
rent = rent_factory(lease=lease)
rent.cycle = rent_cycle
rent.start_date = date(year=2000, month=1, day=1)
rent.end_date = date(year=2030, month=1, day=1)
rent.due_dates_type | |
["blue", "green", "blue", "green", "blue", "green", "blue", "green", "", "green"])
#
celldata = {}
for v_n in selected_v_ns:
#try:
Printcolor.green("\tInterpolating. grid: {} it: {} v_n: {} ".format(d3intclass.new_grid.grid_type, it, v_n))
celldata[str(v_n)] = d3intclass.get_int(it, v_n)
# except:
# celldata[str(v_n)] = np.empty(0,)
# Printcolor.red("\tFailed to interpolate. grid: {} it: {}v_n: {} ".format(d3intclass.new_grid.type, it, v_n))
xf = d3intclass.new_grid.get_int_grid("xf")
yf = d3intclass.new_grid.get_int_grid("yf")
zf = d3intclass.new_grid.get_int_grid("zf")
Printcolor.green("\tProducing vtk. it: {} v_ns: {} ".format(it, selected_v_ns))
gridToVTK(fpath, xf, yf, zf, cellData=celldata)
Printcolor.blue("\tDone. File is saved: {}".format(fpath))
else:
print_colored_string(
["task:", "vtk", "grid:", d3intclass.new_grid.grid_type, "it:", str(it), "v_ns:", selected_v_ns, ":",
"skipping"],
["blue", "green", "blue", "green", "blue", "green", "blue", "green", "", "blue"])
#
# celldata = {}
# for v_n in selected_v_ns:
# try:
# print_colored_string(["task:", "int", "grid:", d3intclass.new_grid.type, "it:", str(it), "v_n:", v_n, ":", "interpolating"],
# ["blue", "green", "blue", "green", "blue", "green", "blue", "green", "", "green"])
# celldata[str(v_n)] = d3intclass.get_int(it, v_n)
# except:
# print_colored_string(
# ["task:", "int", "grid:", d3intclass.new_grid.type, "it:", str(it), "v_n:", v_n, ":",
# "failed"],
# ["blue", "green", "blue", "green", "blue", "green", "blue", "green", "", "red"])
# Printcolor.green("Data for v_ns:{} is interpolated and preapred".format(selected_v_ns))
# # producing the vtk file
# try:
# print_colored_string(
# ["task:", "vtk", "grid:", d3intclass.new_grid.type, "it:", str(it), "v_ns:", selected_v_ns, ":", "computing"],
# ["blue", "green", "blue", "green", "blue", "green", "blue", "green", "", "green"])
# xf = d3intclass.new_grid.get_int_grid("xf")
# yf = d3intclass.new_grid.get_int_grid("yf")
# zf = d3intclass.new_grid.get_int_grid("zf")
#
# gridToVTK(fpath, xf, yf, zf, cellData=celldata)
# except:
# print_colored_string(
# ["task:", "int", "grid:", d3intclass.new_grid.type, "it:", str(it), "v_ns:", selected_v_ns, ":",
# "failed"],
# ["blue", "green", "blue", "green", "blue", "green", "blue", "green", "", "red"])
# else:
# print_colored_string(["task:", "prof slice", "it:", "{}".format(it), "plane:", plane, ":", "skipping"],
# ["blue", "green", "blue", "green", "blue", "green", "", "blue"])
def d3_interpolate_mjenclosed(d3intclass, glob_its, glob_masks, outdir, rewrite=False):
# getting cylindrical grid [same for any iteration)
dphi_cyl = d3intclass.new_grid.get_int_grid("dphi_cyl")
dr_cyl = d3intclass.new_grid.get_int_grid("dr_cyl")
dz_cyl = d3intclass.new_grid.get_int_grid("dz_cyl")
r_cyl = d3intclass.new_grid.get_int_grid("r_cyl")
#
for it in glob_its:
sys.stdout.flush()
_outdir = outdir + str(it) + '/'
if not os.path.isdir(_outdir):
os.mkdir(_outdir)
#
for mask in glob_masks:
__outdir = _outdir + mask + '/'
if not os.path.isdir(__outdir):
os.mkdir(__outdir)
#
fpath = __outdir + __d3intmjfname__
#
if True:
#
rho = d3intclass.get_int(it, "rho") # [rho_NS, rho_ATM]
lapse = d3intclass.get_int(it, "lapse") # [lapse_BH, dummy1]
if mask == "disk":
rho_lims = MASK_STORE.disk_mask_setup["rho"]
lapse_lims = MASK_STORE.disk_mask_setup["lapse"]
rho_mask = (rho > rho_lims[0]) & (rho < rho_lims[1])
lapse_mask = lapse > lapse_lims[0] # > BH
elif mask == "remnant":
rho_lims = MASK_STORE.disk_mask_setup["rho"]
lapse_lims = MASK_STORE.disk_mask_setup["lapse"]
rho_mask = rho > rho_lims[1]
lapse_mask = lapse > lapse_lims[0] # > BH
else:
raise NameError("No method for mask: {}".format(mask))
#
tot_mask = rho_mask & lapse_mask
#
if np.sum(tot_mask.astype(int)) ==0 :
print_colored_string(["task:", "MJ_encl", "it:", "{}".format(it), "mask:", mask, ":", "Mask=0"],
["blue", "green", "blue", "green", "blue", "green", "", "red"])
if (os.path.isfile(fpath) and rewrite) or not os.path.isfile(fpath):
if os.path.isfile(fpath): os.remove(fpath)
print_colored_string(["task:", "MJ_encl", "it:", "{}".format(it), "mask:", mask, ":", "computing"],
["blue", "green", "blue", "green", "blue", "green", "", "green"])
#
dens_cyl = d3intclass.get_int(it, "density")
ang_mom_cyl = d3intclass.get_int(it, "ang_mom")
ang_mom_flux_cyl = d3intclass.get_int(it, "ang_mom_flux")
#
dens_cyl[~tot_mask] = 0.
ang_mom_cyl[~tot_mask] = 0.
ang_mom_flux_cyl[~tot_mask] = 0.
#
I_rc = 2 * np.sum(dens_cyl * r_cyl ** 2 * dz_cyl * dphi_cyl, axis=(1, 2))
D_rc = 2 * np.sum(dens_cyl * dz_cyl * dphi_cyl, axis=(1, 2)) # integrate over phi,z
J_rc = 2 * np.sum(ang_mom_cyl * dz_cyl * dphi_cyl, axis=(1, 2)) # integrate over phi,z
Jf_rc= 2 * np.sum(ang_mom_flux_cyl * dz_cyl * dphi_cyl, axis=(1, 2))
#
ofile = open(fpath, "w")
ofile.write("# 1:rcyl 2:drcyl 3:M 4:J 5:Jf 6:I\n")
for i in range(r_cyl.shape[0]):
ofile.write("{} {} {} {} {} {}\n".format(r_cyl[i, 0, 0], dr_cyl[i, 0, 0],
D_rc[i], J_rc[i], Jf_rc[i], I_rc[i]))
ofile.close()
#
d3intclass.delete_for_it(it=it, except_v_ns=[], rm_masks=True, rm_comp=True, rm_prof=False)
sys.stdout.flush()
#
else:
print_colored_string(["task:", "MJ_encl", "it:", "{}".format(it), "mask:", mask, ":", "skipping"],
["blue", "green", "blue", "green", "blue", "green", "", "blue"])
# except KeyboardInterrupt:
# exit(1)
# except IOError:
# print_colored_string(["task:", "MJ_encl", "it:", "{}".format(it), ":", "IOError"],
# ["blue", "green", "blue", "green", "", "red"])
# except:
# print_colored_string(["task:", "MJ_encl", "it:", "{}".format(it), ":", "failed"],
# ["blue", "green", "blue", "green", "", "red"])
def plot_d3_prof_slices(d3class, glob_its, glob_v_ns, resdir, figdir='module_slices/', rewritefigs=False):
iterations = select_number(glob_its, d3class.list_iterations)
v_ns = select_string(glob_v_ns, __d3sliceplotvns__, for_all="all")
# tmerg = d1class.get_par("tmerger_gw")
i = 1
for it in iterations:
for rl in __d3sliceplotrls__:
for v_n in v_ns:
# --- Getting XZ data ---
try:
data_arr = d3class.get_data(it, rl, "xz", v_n)
x_arr = d3class.get_data(it, rl, "xz", "x")
z_arr = d3class.get_data(it, rl, "xz", "z")
def_dic_xz = {'task': 'colormesh', 'ptype': 'cartesian', 'aspect': 1.,
'xarr': x_arr, "yarr": z_arr, "zarr": data_arr,
'position': (1, 1), # 'title': '[{:.1f} ms]'.format(time_),
'cbar': {'location': 'right .04 .2', 'label': r'$\rho$ [geo]', # 'fmt': '%.1e',
'labelsize': 14,
'fontsize': 14},
'v_n_x': 'x', 'v_n_y': 'z', 'v_n': 'rho',
'xmin': None, 'xmax': None, 'ymin': None, 'ymax': None, 'vmin': 1e-10, 'vmax': 1e-4,
'fill_vmin': False, # fills the x < vmin with vmin
'xscale': None, 'yscale': None,
'mask': None, 'cmap': 'inferno_r', 'norm': "log",
'fancyticks': True,
'title': {"text": r'$t-t_{merg}:$' + r'${:.1f}$'.format(0), 'fontsize': 14},
'sharex': True, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14
}
except KeyError:
print_colored_string(
["task:", "plot prof slice", "it:", "{}".format(it), "rl:", "{:d}".format(rl), "v_ns:", v_n,
":", "KeyError in getting xz {}".format(v_n)],
["blue", "green", "blue", "green", "blue", "green", "blue", "green", "", "red"])
except NameError:
print_colored_string(
["task:", "plot prof slice", "it:", "{}".format(it), "rl:", "{:d}".format(rl), "v_ns:", v_n,
":", "NameError in getting xz {}".format(v_n)],
["blue", "green", "blue", "green", "blue", "green", "blue", "green", "", "red"])
continue
# --- Getting XY data ---
try:
data_arr = d3class.get_data(it, rl, "xy", v_n)
x_arr = d3class.get_data(it, rl, "xy", "x")
y_arr = d3class.get_data(it, rl, "xy", "y")
def_dic_xy = {'task': 'colormesh', 'ptype': 'cartesian', 'aspect': 1.,
'xarr': x_arr, "yarr": y_arr, "zarr": data_arr,
'position': (2, 1), # 'title': '[{:.1f} ms]'.format(time_),
'cbar': {},
'v_n_x': 'x', 'v_n_y': 'y', 'v_n': 'rho',
'xmin': None, 'xmax': None, 'ymin': None, 'ymax': None, 'vmin': 1e-10, 'vmax': 1e-4,
'fill_vmin': False, # fills the x < vmin with vmin
'xscale': None, 'yscale': None,
'mask': None, 'cmap': 'inferno_r', 'norm': "log",
'fancyticks': True,
'title': {},
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14
}
except KeyError:
print_colored_string(
["task:", "plot prof slice", "it:", "{}".format(it), "rl:", "{:d}".format(rl), "v_ns:", v_n,
":", "KeyError in getting xy {} ".format(v_n)],
["blue", "green", "blue", "green", "blue", "green", "blue", "green", "", "red"])
continue
except NameError:
print_colored_string(
["task:", "plot prof slice", "it:", "{}".format(it), "rl:", "{:d}".format(rl), "v_ns:", v_n,
":", "NameError in getting xy {} ".format(v_n)],
["blue", "green", "blue", "green", "blue", "green", "blue", "green", "", "red"])
continue
# "Q_eff_nua", "Q_eff_nue", "Q_eff_nux"
if v_n in ["Q_eff_nua", "Q_eff_nue", "Q_eff_nux"]:
dens_arr = d3class.get_data(it, rl, "xz", "density")
data_arr = d3class.get_data(it, rl, "xz", v_n)
data_arr = data_arr / dens_arr
x_arr = d3class.get_data(it, rl, "xz", "x")
z_arr = d3class.get_data(it, rl, "xz", "z")
def_dic_xz['xarr'], def_dic_xz['yarr'], def_dic_xz['zarr'] = x_arr, z_arr, data_arr
#
dens_arr = d3class.get_data(it, rl, "xy", "density")
data_arr = d3class.get_data(it, rl, "xy", v_n)
data_arr = data_arr / dens_arr
x_arr = d3class.get_data(it, rl, "xy", "x")
y_arr = d3class.get_data(it, rl, "xy", "y")
def_dic_xy['xarr'], def_dic_xy['yarr'], def_dic_xy['zarr'] = x_arr, y_arr, data_arr
if v_n == 'rho':
pass
elif v_n == 'w_lorentz':
def_dic_xy['v_n'] = 'w_lorentz'
def_dic_xy['vmin'] = 1
def_dic_xy['vmax'] = 1.3
def_dic_xy['norm'] = None
def_dic_xz['v_n'] = 'w_lorentz'
def_dic_xz['vmin'] = 1
def_dic_xz['vmax'] = 1.3
def_dic_xz['norm'] = None
elif v_n == 'vol':
def_dic_xy['v_n'] = 'vol'
def_dic_xy['vmin'] = 1
def_dic_xy['vmax'] = 10
# def_dic_xy['norm'] = None
def_dic_xz['v_n'] = 'vol'
def_dic_xz['vmin'] = 1
def_dic_xz['vmax'] = 10
# def_dic_xz['norm'] = None
elif v_n == 'press':
def_dic_xy['v_n'] = 'press'
def_dic_xy['vmin'] = 1e-12
def_dic_xy['vmax'] = 1e-6
def_dic_xz['v_n'] = 'press'
def_dic_xz['vmin'] = 1e-12
def_dic_xz['vmax'] = 1e-6
elif v_n == 'eps':
def_dic_xy['v_n'] = 'eps'
def_dic_xy['vmin'] = 5e-3
def_dic_xy['vmax'] = 5e-1
def_dic_xz['v_n'] = 'eps'
def_dic_xz['vmin'] = 5e-3
def_dic_xz['vmax'] = 5e-1
elif v_n == 'lapse':
def_dic_xy['v_n'] = 'lapse'
def_dic_xy['vmin'] = 0.15
def_dic_xy['vmax'] = 1
def_dic_xy['norm'] = None
def_dic_xz['v_n'] = 'lapse'
def_dic_xz['vmin'] = 0.15
def_dic_xz['vmax'] = 1
def_dic_xz['norm'] = None
elif v_n == 'velx':
def_dic_xy['v_n'] = 'velx'
def_dic_xy['vmin'] = 0.01
def_dic_xy['vmax'] = 1.
# def_dic_xy['norm'] = None
def_dic_xz['v_n'] = 'velx'
def_dic_xz['vmin'] = 0.01
def_dic_xz['vmax'] = 1.
# def_dic_xz['norm'] = None
elif v_n == 'vely':
def_dic_xy['v_n'] = 'vely'
def_dic_xy['vmin'] = 0.01
def_dic_xy['vmax'] = 1.
# def_dic_xy['norm'] = None
def_dic_xz['v_n'] | |
' or ", if True shlex
"""
if shlex is None:
matchobj = SHLEXRGX.match(colspecfieldstr)
shlex = bool(matchobj)
retval = None
if shlex:
shlexoutput = _shlex.split(colspecfieldstr, comments=False, posix=True)
if shlexoutput:
shlexword1 = shlexoutput[0]
retval = shlexword1
else:
raise ValueError(colspecfieldstr)
else:
retval = colspecfieldstr
import pdb; pdb.set_trace()
return retval
def build_column_map(colspecstr):
"""
Args:
colspecstr (str or OrderedDict): a colspecstrstr column-key to type-callable mapping
Returns:
OrderedDict: (colkey, type_func) mappings "column_map" # TODO
"""
#
if not colspecstr:
return collections.OrderedDict()
if hasattr(colspecstr, 'items'):
return colspecstr
return collections.OrderedDict(
parse_colspecstr(colspecstr, default=unicode)
)
def get_list_from_str(str_, idelim=',', typefunc=int):
"""
Split a string of things separated by commas & cast/wrap with typefunc
Args:
str_ (str (.strip, .split)): string to split
idelim (str): string to split by ("input delimiter")
typefunc (callable): wrap results with this callable
Returns:
list: list of Type_func(
"""
return [typefunc(x.strip()) for x in str_.split(idelim)]
def sort_by(iterable,
sortstr=None,
reverse=False,
col_map=None,
default_type=None,
default_value=None):
"""sort an iterable, cast to ``col_map.get(colkey, default_type)``,
and default to ``default_value``.
Args:
iterable (iterable): iterable of lines/rows
Kwargs:
sortstr (None, str): comma separated list of column index (``1,2,3``)
reverse (bool): (True, Descending), (False, Ascending) default: False
col_map (None, dict): dict mapping column n to a typefunc
default_type (None, callable): type callable (default: None)
default_value (\*): default N/A value for columns not specified
in col_map (default: None)
Returns:
list: sorted list of lines/rows
"""
# raise Exception()
def keyfunc_iter(obj, sortstr=sortstr, col_map=col_map):
"""Parse and yield column values according to ``sortstr`` and ``col_map``
Args:
obj (object): obj to sort (as from ``sorted(keyfunc=thisfunc)``)
sortstr (str): sort string of comma-separated columns
col_map (None, dict): dict mapping column n to a typefunc (default: None)
Yields:
object: typecasted column value
"""
if sortstr:
column_sequence = get_list_from_str(sortstr, typefunc=int)
else:
column_sequence = xrange(len(obj.result))
log.debug(('column_sequence', column_sequence))
if col_map is None:
col_map = {}
for n in column_sequence:
type_func = col_map.get(str(n), default_type)
retval = default_value
if n < len(obj.result):
colvalue = obj.result[n]
if type_func:
try:
retval = type_func(colvalue)
except ValueError as e:
e.msg += "\n" + repr((type_func, colvalue, e,))
raise
else:
retval = colvalue
else:
retval = default_value
yield retval
def keyfunc(obj, sortstr=sortstr, col_map=col_map):
"""key function (e.g. for ``sorted(key=keyfunc)``)
Args:
obj (PylineResult): ``obj.result = ['col1', 'col2', 'coln']``
Returns:
tuple: (col2, col0, col1)
"""
keyvalue = tuple(keyfunc_iter(obj, sortstr, col_map))
errdata = [
(('keyvalue', keyvalue),
('sortstr', sortstr))]
log.debug((errdata,))
return keyvalue
sorted_values = sorted(iterable,
key=keyfunc,
reverse=reverse)
return sorted_values
def str2boolintorfloat(str_):
"""
Try to cast a string as a ``bool``, ``float``, ``int``,
or ``str_.__class__``.
Args:
str_ (basestring): string to try and cast
Returns:
object: casted ``{boot, float, int, or str_.__class__}``
"""
match = re.match('([\d\.]+)', str_)
type_ = None
if not match:
type_ = str_.__class__
value = str_
value_lower = value.strip().lower()
if value_lower == 'true':
type_ = bool
value = True
elif value_lower == 'false':
type_ = bool
value = False
return value
else:
try:
numstr = match.group(1)
if '.' in numstr:
type_ = float
value = type_(numstr)
else:
type_ = int
value = type_(numstr)
except (ValueError, NameError, IndexError) as e:
value = str_
log.exception((e, (type_, value)))
return value
def parse_formatstring(str_):
"""
Parse a format string like
``format:+isTrue,-isFalse,key0=value,key1=1.1,keyTrue=true``
Args:
str_ (basestring): _format
Returns:
OrderedDict_: {key: {True|False|float|int|str}}
.. code:: python
{
fmtkey: _formatstr,
argkey: argstr,
'key0': 'value0',
'key1': 1,
'key2': True,
'key2.1': 2.1,
}
Inspired by rdflib ``rdfpipe -h | grep 'FORMAT:'``::
FORMAT:(+)KW1,-KW2,KW3=VALUE
format
format:opt1
format:opt2=True
"""
fmtkey = '_output_format'
argkey = '_output_format_args'
strsplit = str_.split(':', 1)
if len(strsplit) == 1:
_format = strsplit[0]
_format = _format if _format else None
return OrderedDict_((
(fmtkey, _format),
(argkey, None),
))
else:
_format, argstr = strsplit
_format = _format if _format else None
opts = OrderedDict_()
opts[fmtkey] = _format
opts[argkey] = argstr if argstr else None
_args = [x.strip() for x in argstr.split(',')]
for arg in _args:
if not arg:
continue
key, value = None, None
if '=' in arg:
key, value = [x.strip() for x in arg.split('=', 1)]
else:
if arg[0] == '-':
key, value = arg[1:], False
elif arg[0] == '+':
key, value = arg[1:], True
else:
key, value = arg, True
if not isinstance(value, (bool, float, int)):
opts[key] = str2boolintorfloat(value)
else:
opts[key] = value
return opts
class ResultWriter(object):
OUTPUT_FILETYPES = {
'csv': ",",
'json': True,
'tsv': "\t",
'html': True,
'jinja': True,
"txt": True,
"checkbox": True,
"chk": True
}
output_format = None
def __init__(self, _output, *args, **kwargs):
self._output = _output
self._conf = kwargs
self.setup(_output, *args, **kwargs)
def setup(self, *args, **kwargs):
pass
def set_output(self, _output):
if _output and self._output is not None:
raise Exception()
else:
self._output = _output
def header(self, *args, **kwargs):
pass
def write(self, obj):
print(unicode(obj), file=self._output)
def write_numbered(self, obj):
print(obj, file=self._output)
def footer(self, *args, **kwargs):
pass
@classmethod
def is_valid_output_format(cls, _output_formatstr):
opts = parse_formatstring(_output_formatstr)
_output_format = opts.get('_output_format')
if _output_format in cls.OUTPUT_FILETYPES:
return _output_format
return False
@classmethod
def get_writer(cls, _output,
output_format="csv",
**kwargs):
"""get writer object for _output with the specified output_format
Args:
_output (file-like .write): output to write to
Kwargs:
output_format (str): a formatstring to be parsed by
:py:func:`parse_formatstring`
Filetypes::
txt | csv | tsv | json | html | jinja | checkbox
Returns:
ResultWriter: a configured ResultWriter subclass instance
"""
opts = parse_formatstring(output_format.strip())
_output_format = opts.pop('_output_format', None)
opts.update(kwargs)
if not cls.is_valid_output_format(_output_format):
raise ValueError("_output_format: %r" % _output_format)
writer = None
if _output_format == "txt":
writer = ResultWriter_txt(_output)
elif _output_format == "csv":
writer = ResultWriter_csv(_output, **opts)
elif _output_format == "tsv":
writer = ResultWriter_csv(_output, delimiter='\t', **opts)
elif _output_format == "json":
writer = ResultWriter_json(_output)
elif _output_format == "html":
writer = ResultWriter_html(_output, **opts)
elif _output_format.startswith("jinja"):
writer = ResultWriter_jinja(_output, **opts)
elif _output_format in ("checkbox", "chk"):
writer = ResultWriter_checkbox(_output, **opts)
else:
raise ValueError("_output_format: %r" % _output_format)
output_func = None
if kwargs.get('number_lines'):
output_func = writer.write_numbered
else:
output_func = writer.write
writer.output_func = output_func
return writer
class ResultWriter_txt(ResultWriter):
output_format = 'txt'
def write_numbered(self, obj):
self.write(obj._numbered_str(odelim='\t'))
class ResultWriter_csv(ResultWriter):
output_format = 'csv'
def setup(self, *args, **kwargs):
self.delimiter = kwargs.get(
'delimiter',
ResultWriter.OUTPUT_FILETYPES.get(
self.output_format,
','))
self._output_csv = csv.writer(self._output,
quoting=csv.QUOTE_NONNUMERIC,
delimiter=self.delimiter)
# doublequote=True)
def header(self, *args, **kwargs):
attrs = kwargs.get('attrs')
if attrs is not None:
self._output_csv.writerow(attrs)
def write(self, obj):
self._output_csv.writerow(obj.result)
def write_numbered(self, obj):
self._output_csv.writerow(tuple(obj._numbered()))
class ResultWriter_json(ResultWriter):
output_format = 'json'
def write(self, obj):
print(
json.dumps(
obj._asdict(),
indent=2),
end=',\n',
file=self._output)
write_numbered = write
class ResultWriter_html(ResultWriter):
output_format = 'html'
escape_func = staticmethod(cgi.escape)
def header(self, *args, **kwargs):
self._output.write("<table>")
self._output.write("<tr>")
attrs = kwargs.get('attrs')
if attrs is not None:
for col in attrs:
self._output.write(u"<th>%s</th>" % self.escape_func(col))
self._output.write("</tr>")
def _html_row(self, obj):
yield '\n<tr>'
for attr, col in obj._asdict().iteritems(): # TODO: zip(_fields, ...)
yield "<td%s>" % (
attr is not None and (' class="%s"' % attr) or '')
if hasattr(col, '__iter__'):
for value in col:
yield u'<span>%s</span>' % self.escape_func(value)
else:
# TODO
colvalue = (
col and hasattr(col, 'rstrip') and col.rstrip()
or str(col))
yield self.escape_func(colvalue)
yield "</td>"
yield "</tr>"
def write(self, obj):
return self._output.write(u''.join(self._html_row(obj,)))
def footer(self):
self._output.write('</table>\n')
class ResultWriter_jinja(ResultWriter):
output_format = 'jinja'
escape_func = staticmethod(cgi.escape)
def setup(self, *args, **kwargs):
log.debug(('args', args))
log.debug(('kwargs', kwargs))
import jinja2, os
self.escape_func = jinja2.escape
templatepath = kwargs.get('template', kwargs.get('tmpl'))
if templatepath is None:
raise ValueError(
"Specify at least a template= like "
"'jinja:+autoescape,template=./template.jinja2'")
self.templatepath = os.path.dirname(templatepath)
self.template = os.path.basename(templatepath)
self.loader = jinja2.FileSystemLoader(self.templatepath)
envargs = OrderedDict_()
envargs['autoescape'] = kwargs.get('autoescape', True)
# envargs['extensions'] = []
self.env = jinja2.Environment(**envargs)
self.tmpl = self.loader.load(self.env, self.template)
def write(self, obj):
context = OrderedDict_()
context['obj'] = obj
jinja2_output = self.tmpl.render(**context)
return self._output.write(jinja2_output)
class ResultWriter_checkbox(ResultWriter):
output_format = 'checkbox'
def _checkbox_row(self, obj, wrap=79):
yield u'\n'.join(textwrap.wrap(
unicode(obj),
initial_indent=u'- [ ] ',
subsequent_indent=u' '
))
yield '\n'
def write(self, obj):
return self._output.write(u''.join(self._checkbox_row(obj)))
def get_option_parser():
import optparse
prs = optparse.OptionParser(
usage=(
"%prog [-f<path>] [-o|--output-file=<path>] \n"
" [-F|--input-delim='\\t'] \n"
" [--max|--max-split=3] \n"
" [-d|--output-delimiter='||'] \n"
" [-n|--number-lines] \n"
" [-m|--modules=<mod2>] \n"
" [-p|--pathpy] [--pathlib] \n"
" [-r '<rgx>'|--regex='<rgx>'] \n"
" '<python_expression>' \n"
),
description=(
"Pyline is a UNIX command-line tool for line-based processing "
"in Python with regex and output transform features "
"similar to grep, sed, and awk."
),
epilog=EPILOG)
prs.add_option('-f', '--in', '--input-file',
dest='file',
action='store',
default='-',
help="Input file #default: '-' for stdin")
prs.add_option('-F', '--input-delim',
dest='idelim',
action='store',
default=None,
help=('words = line.split(-F)'
' #default: None (whitespace)'))
prs.add_option('--max', '--input-delim-split-max', '--max-split',
dest='idelim_split_max',
action='store',
default=-1,
type=int,
help='words = line.split(-F, --max)')
prs.add_option('--shlex',
action='store_true',
help='words | |
# -*- coding: utf-8 -*-
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-interfaces - based on the path /interfaces/interface/ethernet/switched-vlan/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State variables for VLANs
"""
__slots__ = ('_path_helper', '_extmethods', '__interface_mode','__native_vlan','__access_vlan','__trunk_vlans',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
helper = kwargs.pop("path_helper", None)
if helper is False:
self._path_helper = False
elif helper is not None and isinstance(helper, xpathhelper.YANGPathHelper):
self._path_helper = helper
elif hasattr(self, "_parent"):
helper = getattr(self._parent, "_path_helper", False)
self._path_helper = helper
else:
self._path_helper = False
self._extmethods = False
self.__interface_mode = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACCESS': {}, 'TRUNK': {}},), is_leaf=True, yang_name="interface-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-mode-type', is_config=False)
self.__native_vlan = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="native-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
self.__access_vlan = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="access-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
self.__trunk_vlans = YANGDynClass(unique=True, base=TypedListType(allowed_type=[RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])$'}),]), is_leaf=False, yang_name="trunk-vlans", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='union', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['interfaces', 'interface', 'ethernet', 'switched-vlan', 'state']
def _get_interface_mode(self):
"""
Getter method for interface_mode, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/interface_mode (oc-vlan-types:vlan-mode-type)
YANG Description: Set the interface to access or trunk mode for
VLANs
"""
return self.__interface_mode
def _set_interface_mode(self, v, load=False):
"""
Setter method for interface_mode, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/interface_mode (oc-vlan-types:vlan-mode-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_mode() directly.
YANG Description: Set the interface to access or trunk mode for
VLANs
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACCESS': {}, 'TRUNK': {}},), is_leaf=True, yang_name="interface-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-mode-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_mode must be of a type compatible with oc-vlan-types:vlan-mode-type""",
'defined-type': "oc-vlan-types:vlan-mode-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACCESS': {}, 'TRUNK': {}},), is_leaf=True, yang_name="interface-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-mode-type', is_config=False)""",
})
self.__interface_mode = t
if hasattr(self, '_set'):
self._set()
def _unset_interface_mode(self):
self.__interface_mode = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'ACCESS': {}, 'TRUNK': {}},), is_leaf=True, yang_name="interface-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-mode-type', is_config=False)
def _get_native_vlan(self):
"""
Getter method for native_vlan, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/native_vlan (oc-vlan-types:vlan-id)
YANG Description: Set the native VLAN id for untagged frames arriving on
a trunk interface. Tagged frames sent on an interface
configured with a native VLAN should have their tags
stripped prior to transmission. This configuration is only
valid on a trunk interface.
"""
return self.__native_vlan
def _set_native_vlan(self, v, load=False):
"""
Setter method for native_vlan, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/native_vlan (oc-vlan-types:vlan-id)
If this variable is read-only (config: false) in the
source YANG file, then _set_native_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_native_vlan() directly.
YANG Description: Set the native VLAN id for untagged frames arriving on
a trunk interface. Tagged frames sent on an interface
configured with a native VLAN should have their tags
stripped prior to transmission. This configuration is only
valid on a trunk interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="native-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """native_vlan must be of a type compatible with oc-vlan-types:vlan-id""",
'defined-type': "oc-vlan-types:vlan-id",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="native-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)""",
})
self.__native_vlan = t
if hasattr(self, '_set'):
self._set()
def _unset_native_vlan(self):
self.__native_vlan = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="native-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
def _get_access_vlan(self):
"""
Getter method for access_vlan, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/access_vlan (oc-vlan-types:vlan-id)
YANG Description: Assign the access vlan to the access port.
"""
return self.__access_vlan
def _set_access_vlan(self, v, load=False):
"""
Setter method for access_vlan, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/access_vlan (oc-vlan-types:vlan-id)
If this variable is read-only (config: false) in the
source YANG file, then _set_access_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_access_vlan() directly.
YANG Description: Assign the access vlan to the access port.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="access-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """access_vlan must be of a type compatible with oc-vlan-types:vlan-id""",
'defined-type': "oc-vlan-types:vlan-id",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="access-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)""",
})
self.__access_vlan = t
if hasattr(self, '_set'):
self._set()
def _unset_access_vlan(self):
self.__access_vlan = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}), is_leaf=True, yang_name="access-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='oc-vlan-types:vlan-id', is_config=False)
def _get_trunk_vlans(self):
"""
Getter method for trunk_vlans, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/trunk_vlans (union)
YANG Description: Specify VLANs, or ranges thereof, that the interface may
carry when in trunk mode. If not specified, all VLANs are
allowed on the interface. Ranges are specified in the form
x..y, where x<y - ranges are assumed to be inclusive (such
that the VLAN range is x <= range <= y.
"""
return self.__trunk_vlans
def _set_trunk_vlans(self, v, load=False):
"""
Setter method for trunk_vlans, mapped from YANG variable /interfaces/interface/ethernet/switched_vlan/state/trunk_vlans (union)
If this variable is read-only (config: false) in the
source YANG file, then _set_trunk_vlans is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_trunk_vlans() directly.
YANG Description: Specify VLANs, or ranges thereof, that the interface may
carry when in trunk mode. If not specified, all VLANs are
allowed on the interface. Ranges are specified in the form
x..y, where x<y - ranges are assumed to be inclusive (such
that the VLAN range is x <= range <= y.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,unique=True, base=TypedListType(allowed_type=[RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])$'}),]), is_leaf=False, yang_name="trunk-vlans", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='union', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """trunk_vlans must be of a type compatible with union""",
'defined-type': "openconfig-vlan:union",
'generated-type': """YANGDynClass(unique=True, base=TypedListType(allowed_type=[RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])$'}),]), is_leaf=False, yang_name="trunk-vlans", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='union', is_config=False)""",
})
self.__trunk_vlans = t
if hasattr(self, '_set'):
self._set()
def _unset_trunk_vlans(self):
self.__trunk_vlans = YANGDynClass(unique=True, base=TypedListType(allowed_type=[RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': ['1..4094']}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])\\.\\.(409[0-4]|40[0-8][0-9]|[1-3][0-9]{3}|[1-9][0-9]{1,2}|[1-9])$'}),]), is_leaf=False, yang_name="trunk-vlans", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='union', is_config=False)
interface_mode = __builtin__.property(_get_interface_mode)
native_vlan = __builtin__.property(_get_native_vlan)
access_vlan = __builtin__.property(_get_access_vlan)
trunk_vlans = __builtin__.property(_get_trunk_vlans)
_pyangbind_elements = OrderedDict([('interface_mode', interface_mode), ('native_vlan', native_vlan), ('access_vlan', access_vlan), ('trunk_vlans', trunk_vlans), ])
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-interfaces - based on the path /interfaces/interface/ethernet/switched-vlan/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State variables for VLANs
"""
__slots__ = ('_path_helper', '_extmethods', '__interface_mode','__native_vlan','__access_vlan','__trunk_vlans',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
helper = kwargs.pop("path_helper", None)
if helper is False:
self._path_helper = False
elif helper is not None and isinstance(helper, xpathhelper.YANGPathHelper):
self._path_helper = helper
elif hasattr(self, "_parent"):
helper = getattr(self._parent, "_path_helper", False)
self._path_helper = helper
else:
self._path_helper = False
self._extmethods | |
import argparse
import os
import logging
import random
import time
import torch
import torchvision
import torch.nn as nn
from sklearn import metrics
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import confusion_matrix
from datahelper import *
from model import *
from util import plot
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Device configuration
print("[INFO] Utilized device as [{}]".format(device))
def train_model(args, model):
"""The model training subroutine, including epoch-wise eval
"""
# deploy the model to device if avail
model = model.to(device)
criterion = nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(),lr=args['lr'])
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10,20,30,40,50], gamma=0.1)
path_to_trainset = args['dataset_dir'] + 'train/'
label_map = load_label(args['dataset_dir'] + 'train.csv')
dataset_loader = load_dataset(path_to_trainset)
# generate a dataset(str_dirname)
train_set, dev_set = next(dataset_loader)
tot_loss = []
tot_devacc = []
for epoch in range(1, args['epoches'] + 1): #args.num_epoches
ts = train_set[:]
random.shuffle(ts)
ds = dev_set[:]
random.shuffle(ds)
start_time = time.time()
flag = 1
counter = 1
epoch_loss = 0
while flag: # exploit training set by 'bags' ==> to generate samples with diverse classes
images,labels,tot = [],[],[]
# fetch 20 bags of samples and "shuffle" them
# Then feed to the NN
for i in range(args['bag_size']):
if not ts:
flag = 0
break
dirname = ts.pop()
tmp_images, tmp_labels = unpack_directory(dirname, path_to_trainset,label_map)
images.extend(tmp_images)
labels.extend(tmp_labels)
tot = list(zip(images,labels))
random.shuffle(tot)
if tot == []:
break
images[:], labels[:] = zip(*tot)
# Batch training, based on the index partition
# partition: batch (len=32(default)) starting-index of images ABOVE
partition = []
for i in range(0, len(images), args['batch_size']):
partition.append(i)
step = 0 # current 'bags'
for pt in range(len(partition)):
#print('[INFO] Now do training .. Epoch{} | Bag{} | miniBatch{}'
# .format(epoch, counter, step))
# A batch train
if pt == len(partition) - 1:
image_batch, label_batch = torch.cat(images[partition[pt]: ], dim=0), torch.cat(labels[partition[pt]: ],dim=0)
else:
image_batch, label_batch = torch.cat(images[partition[pt]:partition[pt+1]], dim=0), torch.cat(labels[partition[pt]:partition[pt+1] ],dim=0)
image_batch = image_batch.to(device)
label_batch = label_batch.to(device)
out = model(image_batch)
# To obtain the Gold label(multi-class)
v_length = len(label_batch)
#print('[DEBUG]out-shape:{},label-shape:{}'.format(out.shape,label_batch.shape))
loss = criterion(out.squeeze(), label_batch.squeeze())
optimizer.zero_grad()
loss.backward()
optimizer.step()
step += 1
epoch_loss += loss.item()
# Eval step-wise use batch-size in train set
samples_ = random.sample(range(len(images)), args['batch_size']) # random sample explored
sample_img = [images[idx] for idx in samples_]
sample_img = torch.cat(sample_img, dim=0).to(device)
sample_label = [labels[idx] for idx in samples_]
sample_label = torch.cat(sample_label, dim=0)
s_out = model(sample_img).detach()
s_out = s_out.cpu()
thresholds = (s_out.max(dim=0).values + s_out.min(dim=0).values) / 2
hard_label = np.array([[1 if score > thresholds[i] else 0 for i, score in enumerate(j)] for j in s_out])
_tmp = abs(sample_label - hard_label)
acc = 0
for row in _tmp:
_f = 1
for element in row:
if element > 0.0001:
_f = 0
if _f:
acc += 1
acc = float(acc) / args['batch_size']
current_time = time.time()
print('[LOGGER] Epoch[{}/{}], Step[{}]| Acc: {:.3f} | Time elapsed: {:.2f}/sec'
.format(epoch, args['epoches'], counter, acc, current_time - start_time)) # args.num_epoches
counter += 1
tot_loss.append(epoch_loss)
print('[INFO] Epoch[{}/{}] Ended| Loss {:.4f} | Time elapsed: {:.2f}/sec\nStarting Eval step...'
.format(epoch, args['epoches'], epoch_loss, current_time - start_time))
# save model
if epoch % args['steps_save_ckpt'] == 0:
torch.save(model, args['output_dir'] + 'epoch-{}.ckpt'.format(epoch))
# ==== Evaluate this epoch result using dev set ====
ts = train_set[:]
devacc = eval(args, model, ts, ds)
tot_devacc.append(devacc)
scheduler.step()
plt.plot(tot_loss)
plt.ylabel('Moving Loss each training epoches')
plt.xlabel('Epoches')
plt.savefig(args['output_dir'] + 'loss.png')
plt.close()
plt.plot(tot_devacc)
plt.ylabel('Moving Acc each training epoches')
plt.xlabel('Epoches')
plt.savefig(args['output_dir'] + 'acc.png')
plt.close()
def eval(args, model, trainset, devset):
images,scores,labels,xtrues = [],[],[],[]
path_to_trainset = args['dataset_dir'] + 'train/'
label_map = load_label(args['dataset_dir'] + 'train.csv')
ds = devset[:]
ts = trainset[:]
# train the svm
while(ts):
# traverse each dir in dev set
dirname = ts.pop()
images, labels = unpack_directory(dirname, path_to_trainset, label_map)
random.shuffle(images)
x_gold = labels[0]
dir_score = []
# Predicted score
partition = []
for i in range(0, len(images), args['batch_size']):
partition.append(i)
# minibatch training
for pt in range(len(partition)):
# A batch train
if pt == len(partition) - 1:
image_batch = torch.cat(images[partition[pt]: ], dim=0)
else:
image_batch= torch.cat(images[partition[pt]:partition[pt+1]], dim=0)
image_batch = image_batch.to(device)
out = model(image_batch).detach()
out = out.cpu()
dir_score.append(out) # consider a bag at a time
dir_score = torch.cat(dir_score, dim=0)
dir_score = dir_score.mean(dim=0)
scores.append(dir_score)
xtrues.append(x_gold)
x_score = torch.stack(scores,dim=0)
x_true = torch.cat(xtrues,dim=0)
svm = svm_decision(x_score, x_true)
images,scores,labels,ytrues = [],[],[],[]
while(ds):
# traverse each dir in dev set
dirname = ds.pop()
images, labels = unpack_directory(dirname, path_to_trainset, label_map)
random.shuffle(images)
y_gold = labels[0]
dir_score = []
# Predicted score
partition = []
for i in range(0, len(images), args['batch_size']):
partition.append(i)
# minibatch training
for pt in range(len(partition)):
# A batch train
if pt == len(partition) - 1:
image_batch = torch.cat(images[partition[pt]: ], dim=0)
else:
image_batch= torch.cat(images[partition[pt]:partition[pt+1]], dim=0)
image_batch = image_batch.to(device)
out = model(image_batch).detach()
out = out.cpu()
dir_score.append(out) # consider a bag at a time
dir_score = torch.cat(dir_score, dim=0)
dir_score = dir_score.mean(dim=0)
scores.append(dir_score)
ytrues.append(y_gold)
# concat
y_score = torch.stack(scores,dim=0)
y_true = torch.cat(ytrues,dim=0)
# use MID value to represent thresh for each label
thresholds = (y_score.max(dim=0).values + y_score.min(dim=0).values) / 2
# To obtain the Gold label(multi-class)
#y_pred = torch.FloatTensor([[1 if score > thresholds[i] else 0 for i, score in enumerate(j)] for j in y_score])
y_pred = svm.predict(y_score)
# Acc record
diff = y_pred - y_true.numpy()
devacc = 0
for row in diff:
_f = 1
for element in row:
if abs(element.item()) > 0.0001:
_f = 0
if _f:
devacc += 1
devacc = float(devacc) / len(y_true)
# plot roc curve
plot(y_score, y_true, args['output_dir'])
# macro F1 Eval
f1_macro = metrics.f1_score(y_true, y_pred, average='macro')
# micro F1 Eval
f1_micro = metrics.f1_score(y_true, y_pred, average='micro')
# AUC Eval
try:
roc_auc = metrics.roc_auc_score(y_true, y_score, average='macro')
except ValueError:
print('[WARNING] Current dev set has not all of the labels')
roc_auc = -1
print('[INFO] Eval result:\n|ACC:{}|\n|AUC:{}|\n|F1 Macro:{}|\n|F1 Micro:{}|'.format(
devacc, roc_auc, f1_macro, f1_micro))
return devacc # for train subroutine
def predict(args, model, trainset):
model = model.to(device)
path_to_testset = args['dataset_dir'] + 'test/'
test_sets = listdir(path_to_testset)
path_to_trainset = args['dataset_dir'] + 'train/'
label_map = load_label(args['dataset_dir'] + 'train.csv')
# ===== train the svm =====
images,scores,labels,xtrues = [],[],[],[]
while trainset:
# traverse each dir in dev set
dirname = trainset.pop()
images, labels = unpack_directory(dirname, path_to_trainset, label_map)
random.shuffle(images)
x_gold = labels[0] # directory share the label
# Predicted score
dir_score = []
partition = [] # minibatch training
for i in range(0, len(images), args['batch_size']):
partition.append(i)
# minibatch training
for pt in range(len(partition)):
# A batch train
if pt == len(partition) - 1:
image_batch = torch.cat(images[partition[pt]: ], dim=0)
else:
image_batch= torch.cat(images[partition[pt]:partition[pt+1]], dim=0)
image_batch = image_batch.to(device)
out = model(image_batch).detach()
out = out.cpu()
dir_score.append(out) # consider a bag at a time
dir_score = torch.cat(dir_score, dim=0)
dir_score = dir_score.mean(dim=0)
scores.append(dir_score)
xtrues.append(x_gold)
x_score = torch.stack(scores, dim=0).numpy()
x_true = torch.cat(xtrues, dim=0).numpy()
print('Training set for SVM: ', x_score.shape)
svm = svm_decision(x_score, x_true)
# ===== predict the score =====
y_score = []
for dirname in test_sets:
# predict for each file
images = unpack_directory(dirname, path_to_testset)
dir_score = []
partition = []
for i in range(0, len(images), args['batch_size']):
partition.append(i)
for pt in range(len(partition)):
# minibatch train
if pt == len(partition) - 1:
image_batch = torch.cat(images[partition[pt]: ], dim=0)
else:
image_batch = torch.cat(images[partition[pt]:partition[pt+1]], dim=0)
image_batch = image_batch.to(device)
out = model(image_batch).detach()
out = out.cpu()
dir_score.append(out)
dir_scores = torch.cat(dir_score, dim=0)
if len(images) != dir_scores.shape[0]:
print('[WARNING] The read and write are not matched.')
dir_scores = dir_scores.mean(dim=0) # reduce dim=0 (shape=10)
y_score.append(dir_scores)
# row represents each dir
# column represents each label
y_score = torch.stack(y_score, dim=0)
y_prob = y_scores.numpy().round(4) # output, round=4
#thresholds = (y_scores.max(dim=0).values + y_scores.min(dim=0).values) / 2
#str_label = [[str(i) for i, score in enumerate(_scores) if score > thresholds[i]] for _scores in y_scores]
y_pred = svm.predict(y_score)
str_label = [[str(i) for i, pred_label in enumerate(row) if pred_label >= 0.99] for row in y_pred] # >=0.99 ~ ==1
str_prob = [[str(p) for p in list(_prob)] for _prob in y_prob]
# split using ;
print_score = [[dirname, ';'.join(_prob)] for dirname, _prob in zip(test_sets, str_prob)]
print_label = [[dirname, ';'.join(_label)] for dirname, _label in zip(test_sets, str_label)]
csv_record(args['output_dir'] + 'test_pred.csv', print_score)
csv_record(args['output_dir'] + 'test.csv', print_label)
print('[INFO] Predict done.')
def svm_decision(y_score, y_true):
"""
Args:
y_score: [batch x 10] score | |
def test_recommendTezConfigurations(self, os_listdir_mock, os_isdir_mock, os_exists_mock):
os_exists_mock.return_value = True
os_isdir_mock.return_value = True
os_listdir_mock.return_value = ['TEZ{0.7.0.2.3.0.0-2155}']
self.maxDiff = None
configurations = {
"yarn-site": {
"properties": {
"yarn.scheduler.minimum-allocation-mb": "256",
"yarn.scheduler.maximum-allocation-mb": "8192",
},
},
"capacity-scheduler": {
"properties": {
"yarn.scheduler.capacity.root.queues": "queue1,queue2"
}
}
}
clusterData = {
"cpu": 4,
"mapMemory": 3000,
"amMemory": 2000,
"reduceMemory": 2056,
"containers": 3,
"ramPerContainer": 256
}
expected = {
"capacity-scheduler": {
"properties": {
"yarn.scheduler.capacity.root.queues": "queue1,queue2"
}
},
"tez-site": {
"properties": {
"tez.task.resource.memory.mb": "768",
"tez.am.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC",
"tez.task.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC",
"tez.runtime.io.sort.mb": "202",
"tez.session.am.dag.submit.timeout.secs": "600",
"tez.runtime.unordered.output.buffer.size-mb": "57",
"tez.am.resource.memory.mb": "4000",
"tez.queue.name": "queue2",
}
},
"yarn-site": {
"properties": {
"yarn.scheduler.minimum-allocation-mb": "256",
"yarn.scheduler.maximum-allocation-mb": "8192"
}
}
}
services = {
"Versions": {
"parent_stack_version": "2.2",
"stack_name": "HDP",
"stack_version": "2.3",
"stack_hierarchy": {
"stack_name": "HDP",
"stack_versions": ["2.2", "2.1", "2.0.6"]
}
},
"services": [
{
"href": "/api/v1/stacks/HDP/versions/2.2/services/YARN",
"StackServices": {
"service_name": "YARN",
"service_version": "2.6.0.2.2",
"stack_name": "HDP",
"stack_version": "2.2"
},
"components": [
{
"StackServiceComponents": {
"advertise_version": "false",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "APP_TIMELINE_SERVER",
"display_name": "App Timeline Server",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1+",
"component_category": "SLAVE",
"component_name": "NODEMANAGER",
"display_name": "NodeManager",
"is_client": "false",
"is_master": "false",
"hostnames": [
"c6403.ambari.apache.org"
]
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1-2",
"component_category": "MASTER",
"component_name": "RESOURCEMANAGER",
"display_name": "ResourceManager",
"is_client": "false",
"is_master": "true",
"hostnames": []
},
"dependencies": []
},
{
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1+",
"component_category": "CLIENT",
"component_name": "YARN_CLIENT",
"display_name": "YARN Client",
"is_client": "true",
"is_master": "false",
"hostnames": []
},
"dependencies": []
}
]
},
],
"configurations": configurations,
"changed-configurations": [ ],
"ambari-server-properties": {}
}
hosts = {
"items" : [
{
"href" : "/api/v1/hosts/c6401.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6401.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6401.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
},
{
"href" : "/api/v1/hosts/c6402.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6402.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6402.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
},
{
"href" : "/api/v1/hosts/c6403.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "c6403.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6403.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
}
]
}
self.stackAdvisor.recommendTezConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
server_host = socket.getfqdn()
for host in hosts["items"]:
if server_host == host["Hosts"]["host_name"]:
server_host = host["Hosts"]["public_host_name"]
tez_ui_url = "http://" + server_host + ":8080/#/main/view/TEZ/tez_cluster_instance"
# Test JDK1.7
services['ambari-server-properties'] = {'java.home': '/usr/jdk64/jdk1.7.3_23'}
expected['tez-site']['properties']['tez.tez-ui.history-url.base'] = tez_ui_url
self.stackAdvisor.recommendTezConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
# Test JDK1.8
services['ambari-server-properties'] = {'java.home': '/usr/jdk64/jdk1.8_44'}
expected['tez-site']['properties']['tez.am.launch.cmd-opts'] = "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseG1GC -XX:+ResizeTLAB"
expected['tez-site']['properties']['tez.task.launch.cmd-opts'] = "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseG1GC -XX:+ResizeTLAB"
expected['tez-site']['properties']['tez.tez-ui.history-url.base'] = tez_ui_url
self.stackAdvisor.recommendTezConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
# Test JDK1.9
services['ambari-server-properties'] = {'java.home': '/usr/jdk64/jdk1.9.2_44'}
expected['tez-site']['properties']['tez.am.launch.cmd-opts'] = "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseG1GC -XX:+ResizeTLAB"
expected['tez-site']['properties']['tez.task.launch.cmd-opts'] = "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseG1GC -XX:+ResizeTLAB"
expected['tez-site']['properties']['tez.tez-ui.history-url.base'] = tez_ui_url
self.stackAdvisor.recommendTezConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
def test_validateHiveConfigurations(self):
properties = {"hive_security_authorization": "None",
"hive.exec.orc.default.stripe.size": "8388608",
'hive.tez.container.size': '2048',
'hive.tez.java.opts': '-server -Xmx546m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps',
'hive.auto.convert.join.noconditionaltask.size': '1100000000'}
recommendedDefaults = {'hive.tez.container.size': '1024',
'hive.tez.java.opts': '-server -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps',
'hive.auto.convert.join.noconditionaltask.size': '1000000000'}
configurations = {
"hive-site": {
"properties": {"hive.security.authorization.enabled": "true", 'hive.tez.java.opts': '-server -Djava.net.preferIPv4Stack=true'}
},
"hive-env": {
"properties": {"hive_security_authorization": "None"}
}
}
services = {
"services": []
}
# Test for 'ranger-hive-plugin-properties' not being in configs
res_expected = []
res = self.stackAdvisor.validateHiveConfigurations(properties, recommendedDefaults, configurations, services, {})
self.assertEquals(res, res_expected)
# This test intentionally calls all validate methods with
# incorrect parameters (empty configs)
def test_noRiskyDictLookups(self):
properties = {}
recommendedDefaults = {}
configurations = {"core-site": {"properties": {}}}
services = {
"services": [],
"Versions": {
"stack_name": "HDP",
"stack_version": "2.3"
},
"configurations": configurations
}
hosts = {
"items" : [
{
"href" : "/api/v1/hosts/c6401.ambari.apache.org",
"Hosts" : {
"cpu_count" : 1,
"disk_info" : [
{
"available" : "4564632",
"used" : "5230344",
"percent" : "54%",
"size" : "10319160",
"type" : "ext4",
"mountpoint" : "/"
},
{
"available" : "1832436",
"used" : "0",
"percent" : "0%",
"size" : "1832436",
"type" : "tmpfs",
"mountpoint" : "/dev/shm"
}
],
"host_name" : "c6401.ambari.apache.org",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "c6401.ambari.apache.org",
"rack_info" : "/default-rack",
"total_mem" : 1922680
}
}
]
}
def return_c6401_hostname(services, service_name, component_name):
return ["c6401.ambari.apache.org"]
self.stackAdvisor.getComponentHostNames = return_c6401_hostname
validators = self.stackAdvisor.getServiceConfigurationValidators()
# Setting up empty configs and services info
for serviceName, validator in validators.items():
services["services"].extend([{"StackServices": {"service_name": serviceName},
"components": []}])
for siteName in validator.keys():
configurations[siteName] = {"properties": {}}
# Emulate enabled RANGER
services["services"].extend([{"StackServices": {"service_name": "RANGER"},
"components": []}])
configurations["ranger-hbase-plugin-properties"] = {
"ranger-hbase-plugin-enabled": "Yes"
}
exceptionThrown = False
try:
recommendations = self.stackAdvisor.recommendConfigurations(services, hosts)
except Exception as e:
exceptionThrown = True
self.assertTrue(exceptionThrown)
pass
def test_recommendRangerConfigurations(self):
clusterData = {}
# Recommend for not existing DB_FLAVOR and http enabled, HDP-2.3
services = {
"Versions": {
"parent_stack_version": "2.2",
"stack_name": "HDP",
"stack_version": "2.3",
"stack_hierarchy": {
"stack_name": "HDP",
"stack_versions": ["2.2", "2.1", "2.0.6"]
}
},
"services": [
{
"StackServices": {
"service_name": "RANGER",
"service_version": "0.5.0.2.3"
},
"components": [
{
"StackServiceComponents": {
"component_name": "RANGER_ADMIN",
"hostnames": ["host1"]
}
}
]
},
{
"href": "/api/v1/stacks/HDP/versions/2.3/services/KNOX",
"StackServices": {
"service_name": "KNOX",
"service_version": "0.9.0.2.3",
"stack_name": "HDP",
"stack_version": "2.3"
},
"components": [
{
"href": "/api/v1/stacks/HDP/versions/2.3/services/KNOX/components/KNOX_GATEWAY",
"StackServiceComponents": {
"advertise_version": "false",
"cardinality": "1+",
"component_category": "MASTER",
"component_name": "KNOX_GATEWAY",
"display_name": "<NAME>",
"is_client": "false",
"is_master": "true",
"hostnames": ["c6401.ambari.apache.org"]
},
"dependencies": []
}
]
}
],
"configurations": {
"admin-properties": {
"properties": {
"DB_FLAVOR": "NOT_EXISTING",
}
},
"ranger-admin-site": {
"properties": {
"ranger.service.http.port": "7777",
"ranger.service.http.enabled": "true",
"ranger.sso.providerurl": "",
}
}
},
"ambari-server-properties": {
"ambari.ldap.isConfigured" : "true",
"authentication.ldap.bindAnonymously" : "false",
"authentication.ldap.baseDn" : "dc=apache,dc=org",
"authentication.ldap.groupNamingAttr" : "cn",
"authentication.ldap.primaryUrl" : "c6403.ambari.apache.org:389",
"authentication.ldap.userObjectClass" : "posixAccount",
"authentication.ldap.secondaryUrl" : "c6403.ambari.apache.org:389",
"authentication.ldap.usernameAttribute" : "uid",
"authentication.ldap.dnAttribute" : "dn",
"authentication.ldap.useSSL" : "false",
"authentication.ldap.managerPassword" : "/etc/ambari-server/conf/ldap-password.dat",
"authentication.ldap.groupMembershipAttr" : "memberUid",
"authentication.ldap.groupObjectClass" : "posixGroup",
"authentication.ldap.managerDn" : "uid=hdfs,ou=people,ou=dev,dc=apache,dc=org"
}
}
expected = {
'admin-properties': {
'properties': {
'policymgr_external_url': 'http://host1:7777'
}
},
'ranger-ugsync-site': {
'properties': {
'ranger.usersync.group.objectclass': 'posixGroup',
'ranger.usersync.group.nameattribute': 'cn',
'ranger.usersync.group.memberattributename': 'memberUid',
'ranger.usersync.ldap.binddn': 'uid=hdfs,ou=people,ou=dev,dc=apache,dc=org',
'ranger.usersync.ldap.user.nameattribute': 'uid',
'ranger.usersync.ldap.user.objectclass': 'posixAccount',
'ranger.usersync.ldap.url': 'ldap://c6403.ambari.apache.org:389',
'ranger.usersync.ldap.searchBase': 'dc=apache,dc=org'
}
},
'ranger-admin-site': {
'properties': {
"ranger.audit.solr.zookeepers": "NONE",
"ranger.audit.source.type": "solr",
"ranger.sso.providerurl": "https://c6401.ambari.apache.org:8443/gateway/knoxsso/api/v1/websso"
}
},
'ranger-env': {
'properties': {
'ranger-storm-plugin-enabled': 'No',
}
},
'ranger-knox-security': {'properties': {}}
}
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected)
# Recommend ranger.audit.solr.zookeepers when solrCloud is disabled
services['configurations']['ranger-env'] = {
"properties": {
"is_solrCloud_enabled": "false"
}
}
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations['ranger-admin-site']['properties']['ranger.audit.solr.zookeepers'], 'NONE')
def test_recommendRangerKMSConfigurations(self):
clusterData = {}
services = {
"ambari-server-properties": {
"ambari-server.user": "root"
},
"Versions": {
"stack_version" : "2.3",
},
"services": [
{
"StackServices": {
"service_name": "RANGER_KMS",
"service_version": "0.5.0.2.3"
},
"components": [
{
"StackServiceComponents": {
"component_name": "RANGER_KMS_SERVER",
"hostnames": ["host1"]
}
}
]
}
],
"configurations": {
"kms-env": {
"properties": {
"kms_user": "kmsname"
}
},
"core-site": {
"properties": {
"fs.defaultFS": "hdfs://host1:8020"
}
},
'ranger-kms-audit': {
'properties': {
}
},
'kms-properties': {
'properties': {
'DB_FLAVOR': 'ORACLE',
'db_host' : 'c6401.ambari.apache.org:1521:XE',
'db_name' : "XE"
}
},
'cluster-env': {
'properties': {
'security_enabled': 'false'
}
}
},
"forced-configurations": []
}
expected = {
'kms-properties': {
'properties': {}
},
'dbks-site': {
'properties': {
"ranger.ks.jpa.jdbc.driver" : "oracle.jdbc.driver.OracleDriver",
"ranger.ks.jpa.jdbc.url" : "jdbc:oracle:thin:@c6401.ambari.apache.org:1521:XE"
}
},
'core-site': {
'properties': {
}
},
'ranger-kms-audit': {
'properties': {
}
},
'kms-site': {
'properties': {
},
'property_attributes': {
'hadoop.kms.proxyuser.HTTP.hosts': {'delete': 'true'},
'hadoop.kms.proxyuser.HTTP.users': {'delete': 'true'},
'hadoop.kms.proxyuser.root.hosts': {'delete': 'true'},
'hadoop.kms.proxyuser.root.users': {'delete': 'true'}
}
}
}
# non kerberized cluster. There should be no proxyuser configs
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerKMSConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected)
# kerberized cluster
services['services'].append({
"StackServices": {
"service_name": "KERBEROS"
}
})
services['configurations']['cluster-env']['properties']['security_enabled'] = "true"
services['configurations']['cluster-env']['properties']['ambari_principal_name'] = "<EMAIL>"
expected = {
'kms-properties': {
'properties': {}
},
'dbks-site': {
'properties': {
"ranger.ks.jpa.jdbc.driver" : "oracle.jdbc.driver.OracleDriver",
"ranger.ks.jpa.jdbc.url" : "jdbc:oracle:thin:@c6401.ambari.apache.org:1521:XE"
}
},
'core-site': {
'properties': {
'hadoop.proxyuser.kmsname.groups': '*'
}
},
'ranger-kms-audit': {
'properties': {
}
},
'kms-site': {
'properties': {
'hadoop.kms.proxyuser.HTTP.hosts': '*',
'hadoop.kms.proxyuser.HTTP.users': '*',
'hadoop.kms.proxyuser.ambari-cl1.hosts': '*',
'hadoop.kms.proxyuser.ambari-cl1.users': '*'
}
}
}
# on kerberized cluster property should be recommended
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerKMSConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected)
recommendedConfigurations = {}
services['changed-configurations'] = [
{
'type': 'kms-env',
'name': 'kms_user',
'old_value': 'kmsname'
}
]
services['configurations']['kms-env']['properties']['kms_user'] = 'kmsnew'
expected['core-site'] = {
'properties': | |
fit_mask_bad.append(b)
if len(user_mask) > 0:
for i in user_mask:
ibad = np.where((lam_gal / (1.0 + z) >= i[0]) & (lam_gal / (1.0 + z) <= i[1]))[0]
for b in ibad:
fit_mask_bad.append(b)
if mask_metal:
# galaxy = interpolate_metal(galaxy,noise)
metal_mask_bad = metal_masker(lam_gal, galaxy, noise, fits_file)
for b in metal_mask_bad:
fit_mask_bad.append(b)
fit_mask_bad = np.sort(np.unique(fit_mask_bad))
fit_mask_good = np.setdiff1d(np.arange(0, len(lam_gal), 1, dtype=int), fit_mask_bad)
###############################################################
c = 299792.458 # speed of light in km/s
frac = lam_gal[1] / lam_gal[0] # Constant lambda fraction per pixel
# dlam_gal = (frac - 1) * lam_gal # Size of every pixel in Angstrom
# print('\n Size of every pixel: %s (A)' % dlam_gal)
# wdisp = t['wdisp'][mask] # Intrinsic dispersion of every pixel, in pixels units
# fwhm_gal = 2.355 * wdisp * dlam_gal # Resolution FWHM of every pixel, in angstroms
fwhm_gal = t['fwhm_res'][mask]
velscale = np.log(frac) * c # Constant velocity scale in km/s per pixel
# If the galaxy is at significant redshift, one should bring the galaxy
# spectrum roughly to the rest-frame wavelength, before calling pPXF
# (See Sec2.4 of Cappellari 2017). In practice there is no
# need to modify the spectrum in any way, given that a red shift
# corresponds to a linear shift of the log-rebinned spectrum.
# One just needs to compute the wavelength range in the rest-frame
# and adjust the instrumental resolution of the galaxy observations.
# This is done with the following three commented lines:
#
lam_gal = lam_gal / (1.0 + z) # Compute approximate restframe wavelength
fwhm_gal = fwhm_gal / (1.0 + z) # Adjust resolution in Angstrom
# fwhm_gal = np.full_like(lam_gal,0.0)
# We pass this interp1d class to the fit_model function to correct for
# the instrumental resolution of emission lines in our model
# fwhm_gal_ftn = interp1d(lam_gal,fwhm_gal,kind='linear',bounds_error=False,fill_value=(1.e-10,1.e-10))
val, idx = find_nearest(lam_gal, 5175)
################################################################################
#################### Correct for galactic extinction ##################
galaxy = ccm_unred(lam_gal, galaxy, ebv)
#######################################################################
# Write to log
write_log((fits_file, ra, dec, z, cosmology, fit_min, fit_max, velscale, ebv), 'prepare_sdss_spec', run_dir)
################################################################################
if plot:
prepare_sdss_plot(lam_gal, galaxy, noise, fit_mask_bad, run_dir)
if verbose:
print('\n')
print('-----------------------------------------------------------')
print('{0:<30}{1:<30}'.format(' file:' , fits_file.name ))
print('{0:<30}{1:<30}'.format(' SDSS redshift:' , '%0.5f' % z ))
print('{0:<30}{1:<30}'.format(' fitting region' , '(%d,%d) [A]' % (fit_reg[0 ],fit_reg[1]) ))
print('{0:<30}{1:<30}'.format(' velocity scale' , '%0.2f [km/s/pixel]' % velscale ))
print('{0:<30}{1:<30}'.format(' Galactic E(B-V):', '%0.3f' % ebv ))
print('-----------------------------------------------------------')
################################################################################
return lam_gal,galaxy,noise,z,ebv,velscale,fwhm_gal,fit_mask_good,binnum,spaxelx,spaxely
##################################################################################
# Alias function
prepare_ifu_plot = prepare_sdss_plot
#### Prepare stellar templates ###################################################
def prepare_stellar_templates(galaxy, lam_gal, fit_reg, velscale, fwhm_gal,fit_mask, losvd_options, run_dir):
"""
Prepares stellar templates for convolution using pPXF.
This example is from Capellari's pPXF examples, the code
for which can be found here: https://www-astro.physics.ox.ac.uk/~mxc/.
"""
# Stellar template directory
if (losvd_options["library"]=="IndoUS"):
temp_dir = "badass_data_files/IndoUS/"
fwhm_temp = 1.35 # Indo-US Template Library FWHM in Å (linear)
if (losvd_options["library"]=="Vazdekis2010"):
temp_dir = "badass_data_files/Vazdekis2010/"
fwhm_temp = 2.51 # Vazdekis+10 spectra have a constant resolution FWHM of 2.51A (linear)
if (losvd_options["library"]=="eMILES"):
temp_dir = "badass_data_files/eMILES/"
fwhm_temp = 2.51 # eMILES spectra have a constant resolution FWHM of 2.51A (linear)
fit_min,fit_max = float(fit_reg[0]),float(fit_reg[1])
#
# Get a list of templates stored in temp_dir. We only include 50 stellar
# templates of various spectral type from the Indo-US Coude Feed Library of
# Stellar templates (https://www.noao.edu/cflib/). We choose this library
# because it is (1) empirical, (2) has a broad wavelength range with
# minimal number of gaps, and (3) is at a sufficiently high resolution (~1.35 Å)
# such that we can probe as high a redshift as possible with the SDSS. It may
# be advantageous to use a different stellar template library (such as the MILES
# library) depdending on the science goals. BADASS only uses pPXF to measure stellar
# kinematics (i.e, stellar velocity and dispersion), and does NOT compute stellar
# population ages.
temp_list = natsort.natsorted(glob.glob(temp_dir + '/*.fits') )#
# Extract the wavelength range and logarithmically rebin one spectrum
# to the same velocity scale of the input galaxy spectrum, to determine
# the size needed for the array which will contain the template spectra.
#
hdu = fits.open(temp_list[0])
ssp = hdu[0].data
h2 = hdu[0].header
hdu.close()
lam_temp = np.array(h2['CRVAL1'] + h2['CDELT1']*np.arange(h2['NAXIS1']))
# By cropping the templates we save some fitting time
mask_temp = ( (lam_temp > (fit_min-100.)) & (lam_temp < (fit_max+100.)) )
ssp = ssp[mask_temp]
lam_temp = lam_temp[mask_temp]
lamRange_temp = [np.min(lam_temp), np.max(lam_temp)]
sspNew = log_rebin(lamRange_temp, ssp, velscale=velscale)[0]
templates = np.empty((sspNew.size, len(temp_list)))
# Interpolates the galaxy spectral resolution at the location of every pixel
# of the templates. Outside the range of the galaxy spectrum the resolution
# will be extrapolated, but this is irrelevant as those pixels cannot be
# used in the fit anyway.
if isinstance(fwhm_gal,(list,np.ndarray)):
fwhm_gal_interp = np.interp(lam_temp, lam_gal, fwhm_gal)
elif isinstance(fwhm_gal,(int,float)):
fwhm_gal_interp = np.full_like(lam_temp,fwhm_gal)
# Convolve the whole Vazdekis library of spectral templates
# with the quadratic difference between the SDSS and the
# Vazdekis instrumental resolution. Logarithmically rebin
# and store each template as a column in the array TEMPLATES.
# Quadratic sigma difference in pixels Vazdekis --> SDSS
# The formula below is rigorously valid if the shapes of the
# instrumental spectral profiles are well approximated by Gaussians.
#
# In the line below, the fwhm_dif is set to zero when fwhm_gal < fwhm_tem.
# In principle it should never happen and a higher resolution template should be used.
#
fwhm_dif = np.sqrt((fwhm_gal_interp**2 - fwhm_temp**2).clip(0))
sigma = fwhm_dif/2.355/h2['CDELT1'] # Sigma difference in pixels
for j, fname in enumerate(temp_list):
hdu = fits.open(fname)
ssp = hdu[0].data
ssp = ssp[mask_temp]
ssp = gaussian_filter1d(ssp, sigma) # perform convolution with variable sigma
sspNew,loglam_temp,velscale_temp = log_rebin(lamRange_temp, ssp, velscale=velscale)#[0]
templates[:, j] = sspNew/np.median(sspNew) # Normalizes templates
hdu.close()
# The galaxy and the template spectra do not have the same starting wavelength.
# For this reason an extra velocity shift DV has to be applied to the template
# to fit the galaxy spectrum. We remove this artificial shift by using the
# keyword VSYST in the call to PPXF below, so that all velocities are
# measured with respect to DV. This assume the redshift is negligible.
# In the case of a high-redshift galaxy one should de-redshift its
# wavelength to the rest frame before using the line below (see above).
#
c = 299792.458 # speed of light in km/s
vsyst = np.log(lam_temp[0]/lam_gal[0])*c # km/s
npix = galaxy.shape[0] # number of output pixels
ntemp = np.shape(templates)[1]# number of templates
# Pre-compute FFT of templates, since they do not change (only the LOSVD and convolution changes)
temp_fft,npad = template_rfft(templates) # we will use this throughout the code
# If vel_const AND disp_const are True, there is no need to convolve during the
# fit, so we perform the convolution here and pass the convolved templates to fit_model.
if (losvd_options["vel_const"]["bool"]==True) & (losvd_options["disp_const"]["bool"]==True):
stel_vel = losvd_options["vel_const"]["val"]
stel_disp = losvd_options["disp_const"]["val"]
conv_temp = convolve_gauss_hermite(temp_fft,npad,float(velscale),\
[stel_vel, stel_disp],np.shape(lam_gal)[0],velscale_ratio=1,sigma_diff=0,vsyst=vsyst)
stel_templates = conv_temp
# If vel_const OR disp_const is False, do not perform the convolution.
# Package the stellar templates, vsyst, and npad (everything needed for convolution)
# into a tuple called stel_templates, to be used in fit_model()
elif (losvd_options["vel_const"]["bool"]==False) | (losvd_options["disp_const"]["bool"]==False):
stel_templates = (temp_fft, npad, vsyst)
##############################################################################
return stel_templates
##################################################################################
#### Initialize Parameters #######################################################
def initialize_pars(lam_gal,galaxy,noise,fit_reg,fwhm_gal,fit_mask_good,velscale,
comp_options,user_lines,user_constraints,combined_lines,losvd_options,host_options,power_options,poly_options,
opt_feii_options,uv_iron_options,balmer_options,
run_dir,fit_type='init',fit_stat="RCHI2",
fit_opt_feii=True,fit_uv_iron=True,fit_balmer=True,
fit_losvd=False,fit_host=True,fit_power=True,fit_poly=False,
fit_narrow=True,fit_broad=True,fit_outflow=True,fit_absorp=True,
tie_line_fwhm=False,tie_line_voff=False,remove_lines=False,verbose=True):
"""
Initializes all free parameters for the fit based on user input and options.
"""
# Issue warnings for dumb options
if ((fit_narrow==False) & (fit_outflow==True)): # why would you fit outflow without narrow lines?
raise ValueError('\n Why would you fit outflows without narrow lines? Turn on narrow line component! \n')
################################################################################
# Initial conditions for some parameters
max_flux = np.nanmax(galaxy)
median_flux = np.nanmedian(galaxy)
# Padding on the edges; any line(s) within this many angstroms is omitted
# from the fit so problems do not occur with the fit
edge_pad = 10.0
def get_init_amp(line_center):
line_center = float(line_center)
try:
return (np.max(galaxy[(lam_gal>line_center-10.) & (lam_gal<line_center+10.)]))
except ValueError:
return | |
import os
import requests
from typing import Optional, Union, Iterable, Mapping, Sequence
from platypush.plugins import Plugin, action
from platypush.schemas.mastodon import MastodonSchema, MastodonSearchSchema, MastodonAccountCreationSchema, \
MastodonAccountSchema, MastodonStatusSchema, MastodonFeaturedHashtagSchema, MastodonAccountListSchema, \
MastodonFilterSchema, MastodonMediaSchema, MastodonConversationSchema, MastodonListSchema, \
MastodonNotificationSchema
from platypush.utils import get_mime_type
class MastodonPlugin(Plugin):
"""
Plugin to interact with `Mastodon <https://mastodon.social/about>`_ instances.
It requires an active API token associated to an app registered on the instance.
In order to get one:
- Open ``https://<mastodon-base-url>/settings/applications/``
- Create a new application
- Select the scopes relevant for your specific usage.
- Take note of the token reported on the *Your access token* row.
The notifications subscription service requires the ``ngrok`` plugin and the
`http` backend to be enabled, since we need to expose an external URL that
the Mastodon instance can call when new events occur.
"""
class SubscriptionConfig:
tunnel_url: str
local_port: int
auth_secret: str
private_key: str
public_key: str
server_key: str
def __init__(self, base_url: str, access_token: Optional[str] = None, **kwargs):
"""
:param base_url: Base URL of the Mastodon web server, in the form of ``https://<domain-name>``.
:param access_token: Access token as reported on ``https://<base_url>/settings/applications/<app_id>``.
"""
super().__init__(**kwargs)
self._base_url = base_url
self._access_token = access_token
self._subscription_config = self.SubscriptionConfig()
def base_url(self, version: str, base_url: Optional[str] = None) -> str:
return f'{base_url or self._base_url}/api/{version}'
def _run(
self, path: str, method: str = 'get', version: str = 'v2', headers: Optional[dict] = None,
base_url: Optional[str] = None, access_token: Optional[str] = None,
schema: Optional[MastodonSchema] = None, **kwargs
) -> Optional[Union[dict, list]]:
headers = {
'Authorization': f'Bearer {access_token or self._access_token}',
'Accept': 'application/json',
**(headers or {}),
}
method = getattr(requests, method.lower())
rs = method(self.base_url(base_url=base_url, version=version) + '/' + path, headers=headers, **kwargs)
rs.raise_for_status()
rs = rs.json()
if schema:
rs = schema.dump(rs)
return rs
# noinspection PyShadowingBuiltins
@action
def search(
self, query: str, type: Optional[str] = None, min_id: Optional[str] = None,
max_id: Optional[str] = None, limit: int = 20, offset: int = 0, following: bool = False,
**kwargs
) -> Mapping[str, Iterable[dict]]:
"""
Perform a search.
:param query: Search query.
:param type: Filter by type. Supported types:
- ``accounts``
- ``hashtags``
- ``statuses``
:param min_id: Return results newer than this ID.
:param max_id: Return results older than this ID.
:param limit: Maximum number of results (default: 20).
:param offset: Return results from this offset (default: 0).
:param following: Only return results from accounts followed by the user (default: False).
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonSearchSchema
"""
return self._run(
'search',
version='v2',
schema=MastodonSearchSchema(),
params={
'q': query,
**({'type': type} if type else {}),
**({'min_id': min_id} if min_id else {}),
**({'max_id': max_id} if max_id else {}),
**({'limit': limit} if limit else {}),
**({'offset': offset} if offset else {}),
**({'following': following} if following else {}),
}, **kwargs
)
@action
def register_account(
self, username: str, email: str, password: str, locale: str = 'en',
reason: Optional[str] = None, **kwargs
) -> dict:
"""
Register a new account.
It requires the specified API token to have ``write:accounts`` permissions.
:param username: <NAME>.
:param email: User's email address (must be a valid address).
:param password: <PASSWORD> the first login.
:param locale: Language/encoding for the confirmation email.
:param reason: Text that will be reviewed by moderators if registrations require manual approval.
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonAccountCreationSchema
"""
return self._run(
'accounts',
method='post',
version='v1',
schema=MastodonAccountCreationSchema(),
json={
'username': username,
'email': email,
'password': password,
'locale': locale,
'reason': reason,
'agreement': True,
}, **kwargs
)
@action
def update_account(
self, discoverable: Optional[bool] = None, bot: Optional[bool] = None,
display_name: Optional[str] = None, note: Optional[str] = None,
avatar: Optional[str] = None, header: Optional[str] = None,
locked: Optional[bool] = None, privacy: Optional[str] = None,
sensitive: Optional[bool] = None, language: Optional[str] = None,
metadata: Optional[Iterable[Mapping]] = None, **kwargs
) -> dict:
"""
Updates the properties of the account associated to the access token.
It requires the specified API token to have ``write:accounts`` permissions.
:param discoverable: Whether the account should be shown in the profile directory.
:param bot: Whether the account is a bot.
:param display_name: The display name to use for the profile.
:param note: The account bio (HTML is supported).
:param avatar: Path to an avatar image.
:param header: Path to a header image.
:param locked: Whether manual approval of follow requests is required.
:param privacy: Default post privacy for authored statuses.
:param sensitive: Whether to mark authored statuses as sensitive by default.
:param language: Default language to use for authored statuses (ISO 6391 code).
:param metadata: Profile metadata items with ``name`` and ``value``.
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonAccountSchema
"""
avatar = os.path.expanduser(avatar) if avatar else None
header = os.path.expanduser(header) if header else None
return self._run(
'accounts/update_credentials',
method='patch',
version='v1',
schema=MastodonAccountSchema(),
data={
**({'discoverable': discoverable} if discoverable is not None else {}),
**({'bot': bot} if bot is not None else {}),
**({'display_name': display_name} if display_name is not None else {}),
**({'note': note} if note is not None else {}),
**({'locked': locked} if locked is not None else {}),
**({'source[privacy]': privacy} if privacy is not None else {}),
**({'source[sensitive]': sensitive} if sensitive is not None else {}),
**({'source[language]': language} if language is not None else {}),
**({'fields_attributes': metadata} if metadata is not None else {}),
},
files={
**({'avatar': (
os.path.basename(avatar), open(avatar, 'rb'), get_mime_type(avatar)
)} if avatar is not None else {}),
**({'header': (
os.path.basename(header), open(header, 'rb'), get_mime_type(header)
)} if header is not None else {}),
},
**kwargs
)
@action
def get_account(self, account_id: str, **kwargs) -> dict:
"""
Retrieve an account by ID.
It requires the specified API token to have ``read:accounts`` permissions.
:param account_id: Account ID to retrieve.
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonAccountSchema
"""
return self._run(
f'accounts/{account_id}',
version='v1',
schema=MastodonAccountSchema(),
**kwargs
)
@action
def get_statuses(self, account_id: str, min_id: Optional[str] = None, max_id: Optional[str] = None,
limit: int = 20, offset: int = 0, **kwargs) -> Iterable[dict]:
"""
Retrieve statuses by account ID.
It requires the specified API token to have the ``read:statuses`` permission.
:param account_id: Account ID.
:param min_id: Return results newer than this ID.
:param max_id: Return results older than this ID.
:param limit: Maximum number of results (default: 20).
:param offset: Return results from this offset (default: 0).
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonStatusSchema(many=True)
"""
return self._run(
f'accounts/{account_id}/statuses',
version='v1',
schema=MastodonStatusSchema(many=True),
params={
**({'min_id': min_id} if min_id else {}),
**({'max_id': max_id} if max_id else {}),
**({'limit': limit} if limit else {}),
**({'offset': offset} if offset else {}),
},
**kwargs
)
@action
def get_followers(self, account_id: str, max_id: Optional[str] = None,
since_id: Optional[str] = None, limit: int = 20, offset: int = 0,
**kwargs) -> Iterable[dict]:
"""
Retrieve the list of followers of an account.
It requires the specified API token to have the ``read:accounts`` permission.
:param account_id: Account ID.
:param max_id: Return results older than this ID.
:param since_id: Return results newer than this ID.
:param limit: Maximum number of results (default: 20).
:param offset: Return results from this offset (default: 0).
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonAccountSchema(many=True)
"""
return self._run(
f'accounts/{account_id}/followers',
version='v1',
schema=MastodonAccountSchema(many=True),
params={
**({'since_id': since_id} if since_id else {}),
**({'max_id': max_id} if max_id else {}),
**({'limit': limit} if limit else {}),
**({'offset': offset} if offset else {}),
},
**kwargs
)
@action
def get_following(self, account_id: str, max_id: Optional[str] = None,
since_id: Optional[str] = None, limit: int = 20, offset: int = 0,
**kwargs) -> Iterable[dict]:
"""
Retrieve the list of accounts followed by a specified account.
It requires the specified API token to have the ``read:accounts`` permission.
:param account_id: Account ID.
:param max_id: Return results older than this ID.
:param since_id: Return results newer than this ID.
:param limit: Maximum number of results (default: 20).
:param offset: Return results from this offset (default: 0).
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonAccountSchema(many=True)
"""
return self._run(
f'accounts/{account_id}/following',
version='v1',
schema=MastodonAccountSchema(many=True),
params={
**({'since_id': since_id} if since_id else {}),
**({'max_id': max_id} if max_id else {}),
**({'limit': limit} if limit else {}),
**({'offset': offset} if offset else {}),
},
**kwargs
)
@action
def get_featured_tags(self, account_id: Optional[str] = None, | |
any(check_rel):
QMessageBox().critical(self,
'Cell ID = Relative\'s ID', 'Some cells are '
'mother or bud of itself. Make sure that the Relative\'s ID'
' is different from the Cell ID!',
QMessageBox.Ok)
return None
elif any(check_buds_S):
QMessageBox().critical(self,
'Bud in S/G2/M not in 0 Generation number',
'Some buds '
'in S phase do not have 0 as Generation number!\n'
'Buds in S phase must have 0 as "Generation number"',
QMessageBox.Ok)
return None
elif any(check_mothers):
QMessageBox().critical(self,
'Mother not in >=1 Generation number',
'Some mother cells do not have >=1 as "Generation number"!\n'
'Mothers MUST have >1 "Generation number"',
QMessageBox.Ok)
return None
elif any(check_buds_G1):
QMessageBox().critical(self,
'Buds in G1!',
'Some buds are in G1 phase!\n'
'Buds MUST be in S/G2/M phase',
QMessageBox.Ok)
return None
elif num_moth_S != num_bud_S:
QMessageBox().critical(self,
'Number of mothers-buds mismatch!',
f'There are {num_moth_S} mother cells in "S/G2/M" phase,'
f'but there are {num_bud_S} bud cells.\n\n'
'The number of mothers and buds in "S/G2/M" '
'phase must be equal!',
QMessageBox.Ok)
return None
elif any(check_relID_S):
QMessageBox().critical(self,
'Relative\'s ID of cells in S/G2/M = -1',
'Some cells are in "S/G2/M" phase but have -1 as Relative\'s ID!\n'
'Cells in "S/G2/M" phase must have an existing '
'ID as Relative\'s ID!',
QMessageBox.Ok)
return None
else:
corrected_assignment = self.inputCca_df['corrected_assignment']
cca_df = pd.DataFrame({
'cell_cycle_stage': ccsValues,
'generation_num': genNumValues,
'relative_ID': relIDValues,
'relationship': relatValues,
'emerg_frame_i': emergFrameValues,
'division_frame_i': divisFrameValues,
'is_history_known': historyValues,
'corrected_assignment': corrected_assignment},
index=self.IDs)
cca_df.index.name = 'Cell_ID'
d = dict.fromkeys(cca_df.select_dtypes(np.int64).columns, np.int32)
cca_df = cca_df.astype(d)
return cca_df
def ok_cb(self, checked):
cca_df = self.getCca_df()
if cca_df is None:
return
self.cca_df = cca_df
self.cancel = False
self.close()
def cancel_cb(self, checked):
self.cancel = True
self.close()
def exec_(self):
self.show(block=True)
def show(self, block=False):
self.setWindowFlags(Qt.Dialog | Qt.WindowStaysOnTopHint)
super().show()
w = (
self.viewBox.minimumSizeHint().width()
+ 5*self.tableLayout.columnCount()
)
winGeometry = self.geometry()
l, t, h = winGeometry.left(), winGeometry.top(), winGeometry.height()
self.setGeometry(l, t, w, h)
if block:
self.loop = QEventLoop()
self.loop.exec_()
def eventFilter(self, object, event):
# Disable wheel scroll on widgets to allow scroll only on scrollarea
if event.type() == QEvent.Wheel:
event.ignore()
return True
return False
def clearComboboxFocus(self):
self.sender().clearFocus()
def closeEvent(self, event):
if hasattr(self, 'loop'):
self.loop.exit()
class askStopFrameSegm(QDialog):
def __init__(
self, user_ch_file_paths, user_ch_name,
concat_segm=False, parent=None
):
self.parent = parent
self.cancel = True
self.concat_segm = concat_segm
super().__init__(parent)
self.setWindowTitle('Enter stop frame')
mainLayout = QVBoxLayout()
formLayout = QFormLayout()
buttonsLayout = QHBoxLayout()
# Message
infoTxt = (
"""
<!DOCTYPE html>
<html>
<head>
<style>
p.big {
line-height: 1.2;
}
</style>
</head>
<body>
<p class="big">
Enter a <b>stop frame number</b> when to stop<br>
segmentation for each Position loaded:
</p>
</body>
</html>
"""
)
infoLabel = QLabel(infoTxt, self)
_font = QtGui.QFont()
_font.setPixelSize(13)
infoLabel.setFont(_font)
infoLabel.setAlignment(Qt.AlignCenter)
# padding: top, left, bottom, right
infoLabel.setStyleSheet("padding:0px 0px 8px 0px;")
self.dataDict = {}
# Form layout widget
for img_path in user_ch_file_paths:
pos_foldername = os.path.basename(
os.path.dirname(
os.path.dirname(img_path)
)
)
spinBox = QSpinBox()
posData = load.loadData(img_path, user_ch_name, QParent=parent)
posData.getBasenameAndChNames()
posData.buildPaths()
posData.loadImgData()
posData.loadOtherFiles(
load_segm_data=False,
load_metadata=True,
loadSegmInfo=True,
)
spinBox.setMaximum(posData.SizeT)
if posData.segmSizeT == 1:
spinBox.setValue(posData.SizeT)
else:
if self.concat_segm and posData.segmSizeT < posData.SizeT:
spinBox.setMinimum(posData.segmSizeT+1)
spinBox.setValue(posData.SizeT)
else:
spinBox.setValue(posData.segmSizeT)
spinBox.setAlignment(Qt.AlignCenter)
visualizeButton = QPushButton('Visualize')
visualizeButton.clicked.connect(self.visualize_cb)
formLabel = QLabel(f'{pos_foldername} ')
layout = QHBoxLayout()
layout.addWidget(formLabel, alignment=Qt.AlignRight)
layout.addWidget(spinBox)
layout.addWidget(visualizeButton)
self.dataDict[visualizeButton] = (spinBox, posData)
formLayout.addRow(layout)
self.formLayout = formLayout
mainLayout.addWidget(infoLabel, alignment=Qt.AlignCenter)
mainLayout.addLayout(formLayout)
okButton = widgets.okPushButton('Ok')
okButton.setShortcut(Qt.Key_Enter)
cancelButton = widgets.cancelPushButton('Cancel')
buttonsLayout.addWidget(okButton, alignment=Qt.AlignRight)
buttonsLayout.addWidget(cancelButton, alignment=Qt.AlignLeft)
buttonsLayout.setContentsMargins(0, 10, 0, 0)
okButton.clicked.connect(self.ok_cb)
cancelButton.clicked.connect(self.close)
mainLayout.addLayout(buttonsLayout)
self.setLayout(mainLayout)
# # self.setModal(True)
def saveSegmSizeT(self):
for spinBox, posData in self.dataDict.values():
posData.segmSizeT = spinBox.value()
posData.saveMetadata()
def ok_cb(self, event):
self.cancel = False
self.saveSegmSizeT()
self.close()
def visualize_cb(self, checked=True):
spinBox, posData = self.dataDict[self.sender()]
posData.frame_i = spinBox.value()-1
self.slideshowWin = imageViewer(
posData=posData, spinBox=spinBox
)
self.slideshowWin.update_img()
self.slideshowWin.framesScrollBar.setDisabled(True)
self.slideshowWin.show()
def exec_(self):
self.show(block=True)
def show(self, block=False):
self.setWindowFlags(Qt.Dialog | Qt.WindowStaysOnTopHint)
super().show()
if block:
self.loop = QEventLoop()
self.loop.exec_()
def closeEvent(self, event):
if hasattr(self, 'loop'):
self.loop.exit()
class QLineEditDialog(QDialog):
def __init__(
self, title='Entry messagebox', msg='Entry value',
defaultTxt='', parent=None, allowedValues=None,
warnLastFrame=False, isInteger=False, isFloat=False
):
QDialog.__init__(self, parent)
self.loop = None
self.cancel = True
self.allowedValues = allowedValues
self.warnLastFrame = warnLastFrame
self.isFloat = isFloat
self.isInteger = isInteger
if allowedValues and warnLastFrame:
self.maxValue = max(allowedValues)
self.setWindowTitle(title)
# Layouts
mainLayout = QVBoxLayout()
LineEditLayout = QVBoxLayout()
buttonsLayout = QHBoxLayout()
# Widgets
msg = QLabel(msg)
_font = QtGui.QFont()
_font.setPixelSize(13)
msg.setFont(_font)
msg.setAlignment(Qt.AlignCenter)
# padding: top, left, bottom, right
msg.setStyleSheet("padding:0px 0px 3px 0px;")
if isFloat:
ID_QLineEdit = QDoubleSpinBox()
if allowedValues is not None:
_min, _max = allowedValues
ID_QLineEdit.setMinimum(_min)
ID_QLineEdit.setMaximum(_max)
else:
ID_QLineEdit.setMaximum(2**32)
if defaultTxt:
ID_QLineEdit.setValue(float(defaultTxt))
elif isInteger:
ID_QLineEdit = QSpinBox()
if allowedValues is not None:
_min, _max = allowedValues
ID_QLineEdit.setMinimum(_min)
ID_QLineEdit.setMaximum(_max)
else:
ID_QLineEdit.setMaximum(2147483647)
if defaultTxt:
ID_QLineEdit.setValue(int(defaultTxt))
else:
ID_QLineEdit = QLineEdit()
ID_QLineEdit.setText(defaultTxt)
ID_QLineEdit.textChanged[str].connect(self.ID_LineEdit_cb)
ID_QLineEdit.setFont(_font)
ID_QLineEdit.setAlignment(Qt.AlignCenter)
self.ID_QLineEdit = ID_QLineEdit
if allowedValues is not None:
notValidLabel = QLabel()
notValidLabel.setStyleSheet('color: red')
notValidLabel.setFont(_font)
notValidLabel.setAlignment(Qt.AlignCenter)
self.notValidLabel = notValidLabel
okButton = widgets.okPushButton('Ok')
okButton.setShortcut(Qt.Key_Enter)
cancelButton = widgets.cancelPushButton('Cancel')
# Events
okButton.clicked.connect(self.ok_cb)
cancelButton.clicked.connect(self.cancel_cb)
# Contents margins
buttonsLayout.setContentsMargins(0,10,0,0)
# Add widgets to layouts
LineEditLayout.addWidget(msg, alignment=Qt.AlignCenter)
LineEditLayout.addWidget(ID_QLineEdit)
if allowedValues is not None:
LineEditLayout.addWidget(notValidLabel, alignment=Qt.AlignCenter)
buttonsLayout.addStretch(1)
buttonsLayout.addWidget(cancelButton)
buttonsLayout.insertSpacing(1, 20)
buttonsLayout.addWidget(okButton)
# Add layouts
mainLayout.addLayout(LineEditLayout)
mainLayout.addLayout(buttonsLayout)
self.setLayout(mainLayout)
# self.setModal(True)
def ID_LineEdit_cb(self, text):
# Get inserted char
idx = self.ID_QLineEdit.cursorPosition()
if idx == 0:
return
newChar = text[idx-1]
# Allow only integers
try:
val = int(newChar)
if val > np.iinfo(np.uint16).max:
self.ID_QLineEdit.setText(str(np.iinfo(np.uint16).max))
if self.allowedValues is not None:
currentVal = int(self.ID_QLineEdit.text())
if currentVal not in self.allowedValues:
self.notValidLabel.setText(f'{currentVal} not existing!')
else:
self.notValidLabel.setText('')
except Exception as e:
text = text.replace(newChar, '')
self.ID_QLineEdit.setText(text)
return
def warnValLessLastFrame(self, val):
msg = QMessageBox()
warn_txt = (f"""
<p style="font-size:12px">
WARNING: saving until a frame number below the last visited
frame ({self.maxValue})<br>
will result in <b>loss of information
about any edit or annotation you did on frames
{val}-{self.maxValue}.</b><br><br>
Are you sure you want to proceed?
</p>
""")
answer = msg.warning(
self, 'WARNING: Potential loss of information',
warn_txt, msg.Yes | msg.Cancel
)
return answer == msg.Cancel
def ok_cb(self, event):
if self.allowedValues:
if self.notValidLabel.text():
return
if self.isFloat or self.isInteger:
val = self.ID_QLineEdit.value()
else:
val = int(self.ID_QLineEdit.text())
if self.warnLastFrame and val < self.maxValue:
cancel = self.warnValLessLastFrame(val)
if cancel:
return
self.cancel = False
self.EntryID = val
self.close()
def cancel_cb(self, event):
self.cancel = True
self.close()
def exec_(self):
self.show(block=True)
def show(self, block=False):
self.setWindowFlags(Qt.Dialog | Qt.WindowStaysOnTopHint)
super().show()
if block:
self.loop = QEventLoop()
self.loop.exec_()
def closeEvent(self, event):
if hasattr(self, 'loop'):
self.loop.exit()
class editID_QWidget(QDialog):
def __init__(self, clickedID, IDs, parent=None):
self.IDs = IDs
self.clickedID = clickedID
self.cancel = True
self.how = None
super().__init__(parent)
self.setWindowTitle("Edit ID")
mainLayout = QVBoxLayout()
VBoxLayout = QVBoxLayout()
msg = QLabel(f'Replace ID {clickedID} with:')
_font = QtGui.QFont()
_font.setPixelSize(13)
msg.setFont(_font)
# padding: top, left, bottom, right
msg.setStyleSheet("padding:0px 0px 3px 0px;")
VBoxLayout.addWidget(msg, alignment=Qt.AlignCenter)
ID_QLineEdit = QLineEdit()
ID_QLineEdit.setFont(_font)
ID_QLineEdit.setAlignment(Qt.AlignCenter)
self.ID_QLineEdit = ID_QLineEdit
VBoxLayout.addWidget(ID_QLineEdit)
note = QLabel(
'NOTE: To replace multiple IDs at once\n'
'write "(old ID, new ID), (old ID, new ID)" etc.'
)
note.setFont(_font)
note.setAlignment(Qt.AlignCenter)
# padding: top, left, bottom, right
note.setStyleSheet("padding:12px 0px 0px 0px;")
VBoxLayout.addWidget(note, alignment=Qt.AlignCenter)
mainLayout.addLayout(VBoxLayout)
HBoxLayout = QHBoxLayout()
okButton = widgets.okPushButton('Ok')
okButton.setShortcut(Qt.Key_Enter)
HBoxLayout.addWidget(okButton, alignment=Qt.AlignRight)
cancelButton = widgets.cancelPushButton('Cancel')
# cancelButton.setShortcut(Qt.Key_Escape)
HBoxLayout.addWidget(cancelButton, alignment=Qt.AlignLeft)
HBoxLayout.setContentsMargins(0, 10, 0, 0)
mainLayout.addLayout(HBoxLayout)
self.setLayout(mainLayout)
# Connect events
self.prevText = ''
ID_QLineEdit.textChanged[str].connect(self.ID_LineEdit_cb)
okButton.clicked.connect(self.ok_cb)
cancelButton.clicked.connect(self.cancel_cb)
# self.setModal(True)
def ID_LineEdit_cb(self, text):
# Get inserted char
idx = self.ID_QLineEdit.cursorPosition()
if idx == 0:
return
newChar = text[idx-1]
# Do nothing if user is deleting text
if idx == 0 or len(text)<len(self.prevText):
self.prevText = text
return
# Do not allow chars except for "(", ")", "int", ","
m = re.search(r'\(|\)|\d|,', newChar)
if m is None:
self.prevText = text
text = text.replace(newChar, '')
self.ID_QLineEdit.setText(text)
return
# Cast integers greater than uint16 machine limit
m_iter = re.finditer(r'\d+', self.ID_QLineEdit.text())
for m in m_iter:
val = int(m.group())
uint16_max = np.iinfo(np.uint16).max
if val > uint16_max:
text = self.ID_QLineEdit.text()
text = f'{text[:m.start()]}{uint16_max}{text[m.end():]}'
self.ID_QLineEdit.setText(text)
# Automatically close ( bracket
if newChar == '(':
text += ')'
self.ID_QLineEdit.setText(text)
self.prevText = text
def ok_cb(self, event):
self.cancel = False
txt = self.ID_QLineEdit.text()
valid = False
# Check validity of inserted text
try:
ID = int(txt)
how = [(self.clickedID, ID)]
if ID in self.IDs:
warn_msg = (
f'ID {ID} is already existing. If you continue ID {ID} '
f'will be swapped with ID {self.clickedID}\n\n'
'Do you want to continue?'
)
msg = QMessageBox()
do_swap = msg.warning(
self, 'Invalid entry', warn_msg, msg.Yes | msg.Cancel
)
if do_swap == msg.Yes:
valid = True
else:
return
else:
| |
<reponame>dfsp-spirit/cogload<gh_stars>1-10
import os
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
import brainload as bl
import brainload.freesurferdata as fsd
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_DATA_DIR = os.path.join(THIS_DIR, os.pardir, 'test_data')
# Respect the environment variable BRAINLOAD_TEST_DATA_DIR if it is set. If not, fall back to default.
TEST_DATA_DIR = os.getenv('BRAINLOAD_TEST_DATA_DIR', TEST_DATA_DIR)
FSAVERAGE_NUM_VERTS_PER_HEMISPHERE = 163842 # number of vertices of the 'fsaverage' subject from FreeSurfer 6.0
FSAVERAGE_NUM_FACES_PER_HEMISPHERE = 327680
SUBJECT1_SURF_LH_WHITE_NUM_VERTICES = 149244 # this number is quite arbitrary: the number of vertices is specific for this subject and surface.
SUBJECT1_SURF_LH_WHITE_NUM_FACES = 298484 # this number is quite arbitrary: the number of faces is specific for this subject and surface.
SUBJECT1_SURF_RH_WHITE_NUM_VERTICES = 153333 # this number is quite arbitrary: the number of vertices is specific for this subject and surface.
SUBJECT1_SURF_RH_WHITE_NUM_FACES = 306662 # this number is quite arbitrary: the number of faces is specific for this subject and surface.
def test_get_morphometry_data_suffix_for_surface_with_surf_white():
suffix = fsd._get_morphometry_data_suffix_for_surface('white')
assert suffix == ''
def test_get_morphometry_data_suffix_for_surface_with_surf_other():
suffix = fsd._get_morphometry_data_suffix_for_surface('pial')
assert suffix == '.pial'
def test_read_mgh_file_with_valid_fsaverage_file():
mgh_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'rh.area.fsaverage.mgh')
mgh_data, mgh_meta_data = fsd.read_mgh_file(mgh_file)
assert mgh_meta_data['data_bytespervox'] == 4
assert len(mgh_meta_data) == 13
assert mgh_data.shape == (FSAVERAGE_NUM_VERTS_PER_HEMISPHERE, 1, 1)
def test_merge_meshes():
m1_vertex_coords = np.array([[0, 0, 0], [5, -5, 0], [5, 5, 0], [10, 5, 0]])
m1_faces = np.array([[0, 1, 2], [1, 2, 3]])
m2_vertex_coords = np.array([[0, 0, 0], [10, -10, 0], [10, 10, 0], [15, 10, 0]])
m2_faces = np.array([[0, 2, 1], [1, 3, 2]])
merged_verts, merged_faces = fsd._merge_meshes(np.array([[m1_vertex_coords, m1_faces], [m2_vertex_coords, m2_faces]]))
assert merged_verts.shape == (8, 3)
assert merged_faces.shape == (4, 3)
# test vertices
assert_allclose(np.array([0, 0, 0]), merged_verts[0])
assert_allclose(np.array([5, -5, 0]), merged_verts[1])
assert_allclose(np.array([0, 0, 0]), merged_verts[4])
assert_allclose(np.array([10, -10, 0]), merged_verts[5])
# test faces without vertex index shift
assert_allclose(np.array([0, 1, 2]), merged_faces[0])
assert_allclose(np.array([1, 2, 3]), merged_faces[1])
# test faces WITH vertex index shift (shift should be +4 because m1 has 4 vertices)
assert_allclose(np.array([4, 6, 5]), merged_faces[2])
assert_allclose(np.array([5, 7, 6]), merged_faces[3])
def test_merge_morphometry_data():
morph_data1 = np.array([0.0, 0.1, 0.2, 0.3])
morph_data2 = np.array([0.4])
morph_data3 = np.array([0.5, 0.6])
merged_data = fsd.merge_morphometry_data(np.array([morph_data1, morph_data2, morph_data3]))
assert merged_data.shape == (7,)
assert merged_data[0] == pytest.approx(0.0, 0.0001)
assert merged_data[4] == pytest.approx(0.4, 0.0001)
assert merged_data[6] == pytest.approx(0.6, 0.0001)
def test_read_fs_surface_file_and_record_meta_data_without_existing_metadata():
surf_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'lh.white')
vert_coords, faces, meta_data = fsd.read_fs_surface_file_and_record_meta_data(surf_file, 'lh')
assert meta_data['lh.num_vertices'] == SUBJECT1_SURF_LH_WHITE_NUM_VERTICES
assert meta_data['lh.num_faces'] == SUBJECT1_SURF_LH_WHITE_NUM_FACES
assert meta_data['lh.surf_file'] == surf_file
assert vert_coords.shape == (SUBJECT1_SURF_LH_WHITE_NUM_VERTICES, 3)
assert faces.shape == (SUBJECT1_SURF_LH_WHITE_NUM_FACES, 3)
assert len(meta_data) == 3
def test_read_fs_surface_file_and_record_meta_data_with_existing_metadata():
surf_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'lh.white')
vert_coords, faces, meta_data = fsd.read_fs_surface_file_and_record_meta_data(surf_file, 'lh', meta_data={'this_boy': 'still_exists'})
assert vert_coords.shape == (SUBJECT1_SURF_LH_WHITE_NUM_VERTICES, 3)
assert faces.shape == (SUBJECT1_SURF_LH_WHITE_NUM_FACES, 3)
assert len(meta_data) == 4
assert meta_data['this_boy'] == 'still_exists'
def test_read_fs_surface_file_and_record_meta_data_raises_on_wrong_hemisphere_value():
surf_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'lh.white')
with pytest.raises(ValueError) as exc_info:
vert_coords, faces, meta_data = fsd.read_fs_surface_file_and_record_meta_data(surf_file, 'invalid_hemisphere')
assert 'hemisphere_label must be one of' in str(exc_info.value)
assert 'invalid_hemisphere' in str(exc_info.value)
def test_read_fs_morphometry_data_file_and_record_meta_data_with_subj1_curv_file_without_existing_metadata():
morphometry_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'lh.area')
per_vertex_data, meta_data = fsd.read_fs_morphometry_data_file_and_record_meta_data(morphometry_file, 'lh')
assert len(meta_data) == 3
assert meta_data['lh.morphometry_file'] == morphometry_file
assert meta_data['lh.morphometry_file_format'] == 'curv'
assert meta_data['lh.num_data_points'] == SUBJECT1_SURF_LH_WHITE_NUM_VERTICES
assert per_vertex_data.shape == (SUBJECT1_SURF_LH_WHITE_NUM_VERTICES, )
def test_read_fs_morphometry_data_file_and_record_meta_data_with_subj1_curv_file_with_existing_metadata():
morphometry_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'lh.area')
per_vertex_data, meta_data = fsd.read_fs_morphometry_data_file_and_record_meta_data(morphometry_file, 'lh', meta_data={'this_boy': 'still_exists'})
assert len(meta_data) == 4
assert meta_data['this_boy'] == 'still_exists'
assert per_vertex_data.shape == (SUBJECT1_SURF_LH_WHITE_NUM_VERTICES, )
def test_read_fs_morphometry_data_file_and_record_meta_data_with_fsavg_mgh_file_with_existing_metadata():
morphometry_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'lh.area.fsaverage.mgh')
per_vertex_data, meta_data = fsd.read_fs_morphometry_data_file_and_record_meta_data(morphometry_file, 'lh', format='mgh', meta_data={'this_boy': 'still_exists'})
assert len(meta_data) == 4
assert meta_data['this_boy'] == 'still_exists'
assert meta_data['lh.morphometry_file'] == morphometry_file
assert meta_data['lh.morphometry_file_format'] == 'mgh'
assert meta_data['lh.num_data_points'] == FSAVERAGE_NUM_VERTS_PER_HEMISPHERE
assert per_vertex_data.shape == (FSAVERAGE_NUM_VERTS_PER_HEMISPHERE, )
def test_read_fs_morphometry_data_file_and_record_meta_data_raises_on_wrong_hemisphere_value():
morphometry_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'lh.area')
with pytest.raises(ValueError) as exc_info:
per_vertex_data, meta_data = fsd.read_fs_morphometry_data_file_and_record_meta_data(morphometry_file, 'invalid_hemisphere')
assert 'hemisphere_label must be one of' in str(exc_info.value)
assert 'invalid_hemisphere' in str(exc_info.value)
def test_read_fs_morphometry_data_file_and_record_meta_data_raises_on_wrong_format_value():
morphometry_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'lh.area')
with pytest.raises(ValueError) as exc_info:
per_vertex_data, meta_data = fsd.read_fs_morphometry_data_file_and_record_meta_data(morphometry_file, 'lh', format='invalid_format')
assert 'format must be one of' in str(exc_info.value)
assert 'invalid_format' in str(exc_info.value)
def test_load_subject_mesh_files_raises_on_invalid_hemi():
lh_surf_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'lh.white')
rh_surf_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'rh.white')
with pytest.raises(ValueError) as exc_info:
vert_coords, faces, meta_data = fsd.load_subject_mesh_files(lh_surf_file, rh_surf_file, hemi='invalid_hemisphere')
assert 'hemi must be one of' in str(exc_info.value)
assert 'invalid_hemisphere' in str(exc_info.value)
def test_load_subject_mesh_files():
lh_surf_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'lh.white')
rh_surf_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'rh.white')
vert_coords, faces, meta_data = fsd.load_subject_mesh_files(lh_surf_file, rh_surf_file)
assert meta_data['lh.num_vertices'] == SUBJECT1_SURF_LH_WHITE_NUM_VERTICES
assert meta_data['lh.num_faces'] == SUBJECT1_SURF_LH_WHITE_NUM_FACES
assert meta_data['lh.surf_file'] == lh_surf_file
assert meta_data['rh.num_vertices'] == SUBJECT1_SURF_RH_WHITE_NUM_VERTICES
assert meta_data['rh.num_faces'] == SUBJECT1_SURF_RH_WHITE_NUM_FACES
assert meta_data['rh.surf_file'] == rh_surf_file
assert vert_coords.shape == (SUBJECT1_SURF_LH_WHITE_NUM_VERTICES + SUBJECT1_SURF_RH_WHITE_NUM_VERTICES, 3)
assert faces.shape == (SUBJECT1_SURF_LH_WHITE_NUM_FACES + SUBJECT1_SURF_RH_WHITE_NUM_FACES, 3)
assert len(meta_data) == 6
def test_load_subject_mesh_files_preserves_existing_meta_data():
lh_surf_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'lh.white')
rh_surf_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'rh.white')
vert_coords, faces, meta_data = fsd.load_subject_mesh_files(lh_surf_file, rh_surf_file, hemi='both', meta_data={'this_boy': 'still_exists'})
assert vert_coords.shape == (SUBJECT1_SURF_LH_WHITE_NUM_VERTICES + SUBJECT1_SURF_RH_WHITE_NUM_VERTICES, 3)
assert faces.shape == (SUBJECT1_SURF_LH_WHITE_NUM_FACES + SUBJECT1_SURF_RH_WHITE_NUM_FACES, 3)
assert meta_data['this_boy'] == 'still_exists'
assert len(meta_data) == 7
def test_load_subject_mesh_files_works_with_left_hemisphere_only():
lh_surf_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'lh.white')
vert_coords, faces, meta_data = fsd.load_subject_mesh_files(lh_surf_file, None, hemi='lh')
assert meta_data['lh.num_vertices'] == SUBJECT1_SURF_LH_WHITE_NUM_VERTICES
assert meta_data['lh.num_faces'] == SUBJECT1_SURF_LH_WHITE_NUM_FACES
assert meta_data['lh.surf_file'] == lh_surf_file
assert vert_coords.shape == (SUBJECT1_SURF_LH_WHITE_NUM_VERTICES, 3)
assert faces.shape == (SUBJECT1_SURF_LH_WHITE_NUM_FACES, 3)
assert len(meta_data) == 3
def test_load_subject_mesh_files_works_with_right_hemisphere_only():
rh_surf_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'rh.white')
vert_coords, faces, meta_data = fsd.load_subject_mesh_files(None, rh_surf_file, hemi='rh')
assert meta_data['rh.num_vertices'] == SUBJECT1_SURF_RH_WHITE_NUM_VERTICES
assert meta_data['rh.num_faces'] == SUBJECT1_SURF_RH_WHITE_NUM_FACES
assert meta_data['rh.surf_file'] == rh_surf_file
assert vert_coords.shape == (SUBJECT1_SURF_RH_WHITE_NUM_VERTICES, 3)
assert faces.shape == (SUBJECT1_SURF_RH_WHITE_NUM_FACES, 3)
assert len(meta_data) == 3
def test_load_subject_morphometry_data_files():
lh_morphometry_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'lh.area')
rh_morphometry_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'rh.area')
morphometry_data, meta_data = fsd.load_subject_morphometry_data_files(lh_morphometry_file, rh_morphometry_file)
assert meta_data['lh.morphometry_file'] == lh_morphometry_file
assert meta_data['lh.morphometry_file_format'] == 'curv'
assert meta_data['lh.num_data_points'] == SUBJECT1_SURF_LH_WHITE_NUM_VERTICES
assert meta_data['rh.morphometry_file'] == rh_morphometry_file
assert meta_data['rh.morphometry_file_format'] == 'curv'
assert meta_data['rh.num_data_points'] == SUBJECT1_SURF_RH_WHITE_NUM_VERTICES
assert len(meta_data) == 6
assert morphometry_data.shape == (SUBJECT1_SURF_LH_WHITE_NUM_VERTICES + SUBJECT1_SURF_RH_WHITE_NUM_VERTICES, )
def test_load_subject_morphometry_data_files_preserves_existing_meta_data():
lh_morphometry_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'lh.area')
rh_morphometry_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'rh.area')
morphometry_data, meta_data = fsd.load_subject_morphometry_data_files(lh_morphometry_file, rh_morphometry_file, meta_data={'this_boy': 'still_exists'})
assert meta_data['this_boy'] == 'still_exists'
assert len(meta_data) == 7
assert morphometry_data.shape == (SUBJECT1_SURF_LH_WHITE_NUM_VERTICES + SUBJECT1_SURF_RH_WHITE_NUM_VERTICES, )
def test_load_subject_morphometry_data_files_works_with_left_hemisphere_only():
lh_morphometry_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'lh.area')
morphometry_data, meta_data = fsd.load_subject_morphometry_data_files(lh_morphometry_file, None, hemi='lh')
assert meta_data['lh.morphometry_file'] == lh_morphometry_file
assert meta_data['lh.morphometry_file_format'] == 'curv'
assert meta_data['lh.num_data_points'] == SUBJECT1_SURF_LH_WHITE_NUM_VERTICES
assert meta_data['rh.num_data_points'] == 0
assert len(meta_data) == 4
assert morphometry_data.shape == (SUBJECT1_SURF_LH_WHITE_NUM_VERTICES, )
def test_load_subject_morphometry_data_files_works_with_right_hemisphere_only():
rh_morphometry_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'rh.area')
morphometry_data, meta_data = fsd.load_subject_morphometry_data_files(None, rh_morphometry_file, hemi='rh')
assert meta_data['rh.morphometry_file'] == rh_morphometry_file
assert meta_data['rh.morphometry_file_format'] == 'curv'
assert meta_data['rh.num_data_points'] == SUBJECT1_SURF_RH_WHITE_NUM_VERTICES
assert meta_data['lh.num_data_points'] == 0
assert len(meta_data) == 4
assert morphometry_data.shape == (SUBJECT1_SURF_RH_WHITE_NUM_VERTICES, )
def test_load_subject_morphometry_data_files_raises_on_invalid_format():
with pytest.raises(ValueError) as exc_info:
morphometry_data, meta_data = fsd.load_subject_morphometry_data_files('some_file', 'some_other_file', format='invalid_format')
assert 'format must be one of' in str(exc_info.value)
assert 'invalid_format' in str(exc_info.value)
def test_load_subject_morphometry_data_files_raises_on_invalid_hemisphere():
with pytest.raises(ValueError) as exc_info:
morphometry_data, meta_data = fsd.load_subject_morphometry_data_files('some_file', 'some_other_file', hemi='invalid_hemisphere')
assert 'hemi must be one of' in str(exc_info.value)
assert 'invalid_hemisphere' in str(exc_info.value)
def test_parse_subject():
vert_coords, faces, morphometry_data, meta_data = bl.subject('subject1', subjects_dir=TEST_DATA_DIR)
assert len(meta_data) == 20
expected_subjects_dir = TEST_DATA_DIR
expected_lh_surf_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'lh.white')
expected_rh_surf_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'rh.white')
expected_lh_morphometry_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'lh.area')
expected_rh_morphometry_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'rh.area')
assert meta_data['lh.num_vertices'] == SUBJECT1_SURF_LH_WHITE_NUM_VERTICES
assert meta_data['lh.num_faces'] == SUBJECT1_SURF_LH_WHITE_NUM_FACES
assert meta_data['lh.surf_file'] == expected_lh_surf_file
assert meta_data['rh.num_vertices'] == SUBJECT1_SURF_RH_WHITE_NUM_VERTICES
assert meta_data['rh.num_faces'] == SUBJECT1_SURF_RH_WHITE_NUM_FACES
assert meta_data['rh.surf_file'] == expected_rh_surf_file
assert meta_data['lh.morphometry_file'] == expected_lh_morphometry_file
assert meta_data['lh.morphometry_file_format'] == 'curv'
assert meta_data['lh.num_data_points'] == SUBJECT1_SURF_LH_WHITE_NUM_VERTICES
assert meta_data['rh.morphometry_file'] == expected_rh_morphometry_file
assert meta_data['rh.morphometry_file_format'] == 'curv'
assert meta_data['rh.num_data_points'] == SUBJECT1_SURF_RH_WHITE_NUM_VERTICES
assert meta_data['subject_id'] == 'subject1'
assert meta_data['display_subject'] == 'subject1'
assert meta_data['subjects_dir'] == expected_subjects_dir
assert meta_data['surf'] == 'white'
assert meta_data['display_surf'] == 'white'
assert meta_data['measure'] == 'area'
assert meta_data['space'] == 'native_space'
assert meta_data['hemi'] == 'both'
assert vert_coords.shape == (SUBJECT1_SURF_LH_WHITE_NUM_VERTICES + SUBJECT1_SURF_RH_WHITE_NUM_VERTICES, 3)
assert faces.shape == (SUBJECT1_SURF_LH_WHITE_NUM_FACES + SUBJECT1_SURF_RH_WHITE_NUM_FACES, 3)
assert morphometry_data.shape == (SUBJECT1_SURF_LH_WHITE_NUM_VERTICES + SUBJECT1_SURF_RH_WHITE_NUM_VERTICES, )
def test_parse_subject_preserves_existing_meta_data():
vert_coords, faces, morphometry_data, meta_data = bl.subject('subject1', subjects_dir=TEST_DATA_DIR, meta_data={'this_boy': 'still_exists'})
assert len(meta_data) == 21
assert meta_data['this_boy'] == 'still_exists'
def test_parse_subject_raises_on_invalid_hemisphere():
with pytest.raises(ValueError) as exc_info:
vert_coords, faces, morphometry_data, meta_data = bl.subject('subject1', subjects_dir=TEST_DATA_DIR, hemi='invalid_hemisphere')
assert 'hemi must be one of' in str(exc_info.value)
assert 'invalid_hemisphere' in str(exc_info.value)
def test_parse_subject_works_with_left_hemisphere_only():
vert_coords, faces, morphometry_data, meta_data = bl.subject('subject1', subjects_dir=TEST_DATA_DIR, hemi='lh')
assert len(meta_data) == 15
expected_subjects_dir = TEST_DATA_DIR
expected_lh_surf_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'lh.white')
expected_lh_morphometry_file = os.path.join(TEST_DATA_DIR, 'subject1', 'surf', 'lh.area')
assert meta_data['lh.num_vertices'] == SUBJECT1_SURF_LH_WHITE_NUM_VERTICES
assert meta_data['lh.num_faces'] == SUBJECT1_SURF_LH_WHITE_NUM_FACES
assert meta_data['lh.surf_file'] == expected_lh_surf_file
assert meta_data['lh.morphometry_file'] == expected_lh_morphometry_file
assert meta_data['lh.morphometry_file_format'] == 'curv'
assert meta_data['lh.num_data_points'] == SUBJECT1_SURF_LH_WHITE_NUM_VERTICES
assert meta_data['rh.num_data_points'] == 0
assert meta_data['subject_id'] == 'subject1'
assert meta_data['subjects_dir'] == expected_subjects_dir
assert meta_data['surf'] == 'white'
assert meta_data['measure'] == 'area'
assert meta_data['space'] == 'native_space'
assert meta_data['hemi'] | |
"""
Script to define the classfiers from individual sensor streams
Input: Training and Test set
Output: Classfied data frame
Date: Sep 18, 2014
Author: <NAME>
"""
import os
import time
import math
import pandas as pd
from django.conf import settings
from sklearn.externals import joblib
from django_pandas.io import read_frame
from common_imports import *
from energylenserver.core import audio
from energylenserver.core import location as lc
from energylenserver.models.models import *
from energylenserver.models.DataModels import WiFiTestData
from energylenserver.common_imports import *
from energylenserver.core import functions as func
from energylenserver.models import functions as mod_func
from energylenserver.preprocessing import wifi as pre_p_w
from energylenserver.preprocessing import audio as pre_p_a
from constants import lower_mdp_percent_change, upper_mdp_percent_change, no_test_data
# Offline processing
from energylenserver.common_offline import *
base_dir = settings.BASE_DIR
def get_trained_model(sensor, apt_no, phone_model):
"""
Get trained model or train model if isn't trained
# Future TODO: Adding new localization models
"""
if sensor == "wifi":
# Get WiFi training data
user_list = mod_func.get_users_for_training(apt_no, phone_model)
data = mod_func.get_sensor_training_data("wifi", apt_no, user_list)
train_df = read_frame(data, verbose=False)
train_df.drop_duplicates(train_df.columns[1:], inplace=True)
if len(train_df) == 0:
return train_df
dst_folder = os.path.join(base_dir, 'energylenserver/trained_models/wifi/')
folder_listing = os.listdir(dst_folder)
for file_i in folder_listing:
filename_arr = file_i.split("_")
# Use model if exists
if filename_arr[0] == str(apt_no) and filename_arr[1] == phone_model:
n_records = int(filename_arr[2])
if n_records == len(train_df):
# Use existing
train_df = pd.read_csv(dst_folder + file_i)
return train_df
# Model folder empty -- No model exists - Create one
train_df = pre_p_w.format_train_data(train_df, apt_no, phone_model)
return train_df
if sensor in ["rawaudio", "audio"]:
dst_folder = os.path.join(base_dir, 'energylenserver/trained_models/audio/')
folder_listing = os.listdir(dst_folder)
for file_i in folder_listing:
filename_arr = file_i.split("_")
# Use model if exists
if filename_arr[0] == str(apt_no) and filename_arr[1] == phone_model:
n_trained_appl = int(filename_arr[2])
# Number of appliances in the metadata
data = mod_func.retrieve_metadata(apt_no)
metadata_df = read_frame(data, verbose=False)
metadata_df['appliance'] = metadata_df.appliance.apply(lambda s: s.split('_')[0])
# metadata_df = metadata_df[-metadata_df.appliance.isin(['Fridge'])]
m_appl_count = len(metadata_df.appliance.unique())
if n_trained_appl == m_appl_count:
# Use existing
model = joblib.load(dst_folder + file_i)
return model
# Model folder empty -- No model exists - Create one
model = audio.train_audio_classification_model(sensor, apt_no, phone_model)
return model
def localize_new_data(apt_no, start_time, end_time, user):
try:
pmodel = user.phone_model
dev_id = user.dev_id
sensor = "wifi"
# Get training data
train_df = get_trained_model(sensor, apt_no, pmodel)
if len(train_df) == 0:
location = "Unknown"
return location
# Get test data for the past x min - for better classification
s_time = start_time - 10 * 60 # 10 minutes
# Get queryset of filtering later
data_all = WiFiTestData.objects.all()
# Get test data - filter for all queryset
data = data_all.filter(dev_id__in=[dev_id],
timestamp__gte=s_time,
timestamp__lte=end_time)
test_df = read_frame(data, verbose=False)
test_df.drop_duplicates(test_df.columns[1:], inplace=True)
# Format data for classification
test_df = pre_p_w.format_data_for_classification(test_df)
# Classify
pred_label = lc.determine_location(train_df, test_df)
test_df['label'] = pred_label
# Save location label to the database
sliced_df = test_df[(test_df.time >= start_time) & (test_df.time <= end_time)]
if len(sliced_df) == 0:
location = "Unknown"
else:
location = func.get_max_class(sliced_df['label'])
data = data_all.filter(dev_id__in=[dev_id],
timestamp__gte=start_time,
timestamp__lte=end_time).update(label=location)
logger.debug("%s :: Test data between [%s] and [%s] :: %s",
user.name, time.ctime(start_time), time.ctime(end_time),
location)
return location
# Offline processing
return location, sliced_df
except Exception, e:
if str(e) == "(1205, 'Lock wait timeout exceeded; try restarting transaction')":
logger.error("[ClassifyNewLocationDataException]:: %s", e)
else:
logger.exception("[ClassifyNewLocationDataException]:: %s", e)
return location
def correct_label(label, pred_label, label_type, edge, act_location):
"""
Classified label correction using Metadata
Procedure: Match with the Metadata. If doesn't match then correct based on magnitude
"""
logger.debug("[Correcting Labeling]")
logger.debug("-" * stars)
apt_no = edge.meter.apt_no
magnitude = math.fabs(edge.magnitude)
old_label = label
# Get Metadata
data = mod_func.retrieve_metadata(apt_no)
metadata_df = read_frame(data, verbose=False)
# Check if it matches with the metadata
if label_type == "location":
in_metadata, matched_md_l = func.exists_in_metadata(
apt_no, label, "all", magnitude, metadata_df, logger, "dummy_user")
else:
in_metadata, matched_md_l = func.exists_in_metadata(
apt_no, "all", label, magnitude, metadata_df, logger, "dummy_user")
# Indicates the (label, edge_mag) does not exist --> incorrect label
if not in_metadata:
logger.debug("Label not found in Metadata. Checking all entries..")
in_metadata, matched_md_list = func.exists_in_metadata(
apt_no, "not_all", "not_all", magnitude, metadata_df, logger, "dummy_user")
# Correction only if the inferred appliance is not audio
if in_metadata:
# Matched entries
matched_md = pd.concat(matched_md_list)
matched_md.reset_index(drop=True, inplace=True)
# From metadata, if inferred appliance is audio or presence based
metadata_df['appliance'] = metadata_df.appliance.apply(lambda s: s.split('_')[0])
md_df = metadata_df.ix[:, ['appliance', 'presence_based',
'audio_based']].drop_duplicates()
md_df.reset_index(inplace=True, drop=True)
md_audio = md_df.ix[0]['audio_based']
md_presence = md_df.ix[0]['presence_based']
# Non-audio based
if not md_audio:
# Presence based appliance
if md_presence and act_location != "dummy":
matched_md = matched_md[matched_md.md_loc == act_location]
appl_list = matched_md.md_appl.unique()
logger.debug("Entry in the same activity location %s:\n %s", act_location,
matched_md)
if len(appl_list) == 1:
label = appl_list[0]
logger.debug("Corrected Label: %s --> %s", old_label, label)
return label
# Correction process -- Select the one closest to the metadata
matched_md = matched_md[
matched_md.md_power_diff == matched_md.md_power_diff.min()]
logger.debug(
"Entry with least distance from the metadata:\n %s", matched_md)
if label_type == "location":
unique_label = matched_md.md_loc.unique().tolist()
else:
unique_label = matched_md.md_appl.unique().tolist()
if len(unique_label) == 1:
label = unique_label[0]
else:
# Multiple matched entries - select the one with the
# maximum count in the pred_label
idict = func.list_count_items(pred_label)
# Remove the entries not in unique label list
filtered_l = [key for key in idict.keys() if key in unique_label]
# Get the max count
new_label_list = []
for key in filtered_l:
for l in pred_label:
if key == l:
new_label_list.append(key)
label = func.get_max_class(pd.Series(new_label_list))
else:
# No matching metadata found
# Cause: different power consumption of an appliance
# Solution: Select the one with the highest value
logger.debug("No metadata found")
new_label_list = [l for l in pred_label if l != old_label]
label = func.get_max_class(pd.Series(new_label_list))
logger.debug("Corrected Label: %s --> %s", old_label, label)
return label
def classify_location(apt_no, start_time, end_time, user, edge, n_users_at_home):
logger.debug("[Classifying location] - %s [%d]", user, edge.magnitude)
logger.debug("-" * stars)
try:
pmodel = user.phone_model
dev_id = user.dev_id
sensor = "wifi"
# Get WiFi test data
data = mod_func.get_sensor_data("wifi", start_time, end_time, [dev_id])
test_df = read_frame(data, verbose=False)
test_df.drop_duplicates(test_df.columns[1:], inplace=True)
test_df.reset_index(drop=True, inplace=True)
# '''
location_list = test_df.label.unique()
logger.debug("Pre-labeled locations: %s", location_list)
if len(location_list) == 0:
logger.debug("Insufficient test data")
return False # For Offline processing
return no_test_data
if "none" not in location_list and "Unknown" not in location_list:
location = func.get_max_class(test_df['label'])
'''
Commented: Can't correct bcoz we need the user's location
and not appliance location as in EnergyLens
if n_users_at_home == 1:
location = correct_label(location, test_df['label'], 'location', edge)
data.update(label=location)
'''
# Offline processing - evaluation
'''
Check with ground truth and attribute reason
'''
match = match_location(apt_no, dev_id, edge.timestamp, location)
logger.debug("Match %s", match)
# Write location
write_classification_labels(apt_no, dev_id, edge.timestamp, "location", location)
if not match:
write_reason(apt_no, dev_id, edge.timestamp, "location", "localization")
else:
write_reason(apt_no, dev_id, edge.timestamp, "correct", "")
return location
# '''
'''
# Get queryset of filtering later
data_all = WiFiTestData.objects.all()
# Get test data - filter for all queryset
data = data_all.filter(dev_id__in=[dev_id],
timestamp__gte=s_time,
timestamp__lte=end_time)
'''
# Format data for classification
train_df = get_trained_model(sensor, apt_no, pmodel)
test_df = pre_p_w.format_data_for_classification(test_df)
# Classify
pred_label = lc.determine_location(train_df, test_df)
test_df['pred_label'] = pred_label
# Save location label to the database
sliced_df = test_df[
(test_df.time >= (start_time + 60)) & (test_df.time) <= end_time]
if len(sliced_df) == 0:
location = "Unknown"
else:
location = func.get_max_class(sliced_df['pred_label'])
# Offline processing - evaluation
'''
Check with ground truth and attribute reason
'''
match = match_location(apt_no, dev_id, edge.timestamp, location)
# Write location
write_classification_labels(apt_no, dev_id, edge.timestamp, "location", location)
if not match:
write_reason(apt_no, dev_id, edge.timestamp, "location", "localization")
else:
write_reason(apt_no, dev_id, edge.timestamp, "correct", "")
'''
Commented: Can't correct bcoz we need user's location for usage/detection
and not appliance location as in EnergyLens
if n_users_at_home == 1:
location = correct_label(location, sliced_df['pred_label'], 'location', edge)
'''
data.update(label=location)
# data = data_all.filter(dev_id__in=[dev_id],
# timestamp__gte=s_time,
# timestamp__lte=end_time).update(label=location)
# Update
# sliced_df = test_df[(test_df.time >= (start_time + 45)
# ) & (test_df.time) <= end_time]
# location = func.get_max_class(pred_label)
return location
except Exception, e:
if str(e) == "(1205, 'Lock wait timeout exceeded; try restarting transaction')":
logger.error("[ClassifyLocationException]:: %s", e)
else:
logger.exception("[ClassifyLocationException]:: %s", e)
return location
def classify_appliance(apt_no, start_time, end_time, user, edge, n_users_at_home):
"""
Classifies appliance based on audio or metadata
"""
logger.debug("[Classifying appliance] - %s [%d]", user, edge.magnitude)
logger.debug("-" * stars)
try:
appliance = "Unknown"
# Get Metadata
data = mod_func.retrieve_metadata(apt_no)
metadata_df = read_frame(data, verbose=False)
# Check for existence
in_metadata, matched_md = func.exists_in_metadata(
apt_no, "not_all", "not_all", math.fabs(edge.magnitude), metadata_df,
logger, user.dev_id)
if in_metadata:
# --Classify using metadata--
md_df = pd.concat(matched_md)
md_df.reset_index(drop=True, inplace=True)
md_audio = md_df.md_audio.unique()
md_presence = md_df.md_presence.unique()
# | |
#/*
# * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
# * contributor license agreements. See the NOTICE file distributed with
# * this work for additional information regarding copyright ownership.
# * The OpenAirInterface Software Alliance licenses this file to You under
# * the OAI Public License, Version 1.1 (the "License"); you may not use this file
# * except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.openairinterface.org/?page_id=698
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *-------------------------------------------------------------------------------
# * For more information about the OpenAirInterface (OAI) Software Alliance:
# * <EMAIL>
# */
#---------------------------------------------------------------------
# Python for CI of OAI-eNB + COTS-UE
#
# Required Python Version
# Python 3.x
#
# Required Python Package
# pexpect
#---------------------------------------------------------------------
#-----------------------------------------------------------
# Import
#-----------------------------------------------------------
import sys # arg
import re # reg
import logging
import os
import shutil
import subprocess
import time
from multiprocessing import Process, Lock, SimpleQueue
from zipfile import ZipFile
#-----------------------------------------------------------
# OAI Testing modules
#-----------------------------------------------------------
import sshconnection as SSH
import helpreadme as HELP
import constants as CONST
#-----------------------------------------------------------
# Class Declaration
#-----------------------------------------------------------
class Containerize():
def __init__(self):
self.ranRepository = ''
self.ranBranch = ''
self.ranAllowMerge = False
self.ranCommitID = ''
self.ranTargetBranch = ''
self.eNBIPAddress = ''
self.eNBUserName = ''
self.eNBPassword = ''
self.eNBSourceCodePath = ''
self.eNB1IPAddress = ''
self.eNB1UserName = ''
self.eNB1Password = ''
self.eNB1SourceCodePath = ''
self.eNB2IPAddress = ''
self.eNB2UserName = ''
self.eNB2Password = ''
self.eNB2SourceCodePath = ''
self.forcedWorkspaceCleanup = False
self.imageKind = ''
self.eNB_instance = 0
self.eNB_serverId = ['', '', '']
self.yamlPath = ['', '', '']
self.services = ['', '', '']
self.nb_healthy = [0, 0, 0]
self.exitStatus = 0
self.eNB_logFile = ['', '', '']
self.testCase_id = ''
self.flexranCtrlDeployed = False
self.flexranCtrlIpAddress = ''
self.cli = ''
self.cliBuildOptions = ''
self.dockerfileprefix = ''
self.host = ''
self.allImagesSize = {}
self.collectInfo = {}
self.pingContName = ''
self.pingOptions = ''
self.pingLossThreshold = ''
self.svrContName = ''
self.svrOptions = ''
self.cliContName = ''
self.cliOptions = ''
#-----------------------------------------------------------
# Container management functions
#-----------------------------------------------------------
def BuildImage(self, HTML):
if self.ranRepository == '' or self.ranBranch == '' or self.ranCommitID == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
if self.eNB_serverId[self.eNB_instance] == '0':
lIpAddr = self.eNBIPAddress
lUserName = self.eNBUserName
lPassWord = self.eNBPassword
lSourcePath = self.eNBSourceCodePath
elif self.eNB_serverId[self.eNB_instance] == '1':
lIpAddr = self.eNB1IPAddress
lUserName = self.eNB1UserName
lPassWord = <PASSWORD>NB1Password
lSourcePath = self.eNB1SourceCodePath
elif self.eNB_serverId[self.eNB_instance] == '2':
lIpAddr = self.eNB2IPAddress
lUserName = self.eNB2UserName
lPassWord = self.eNB2Password
lSourcePath = self.eNB2SourceCodePath
if lIpAddr == '' or lUserName == '' or lPassWord == '' or lSourcePath == '':
HELP.GenericHelp(CONST.Version)
sys.exit('Insufficient Parameter')
logging.debug('Building on server: ' + lIpAddr)
mySSH = SSH.SSHConnection()
mySSH.open(lIpAddr, lUserName, lPassWord)
# Checking the hostname to get adapted on cli and dockerfileprefixes
mySSH.command('hostnamectl', '\$', 5)
result = re.search('Ubuntu|Red Hat', mySSH.getBefore())
self.host = result.group(0)
if self.host == 'Ubuntu':
self.cli = 'docker'
self.dockerfileprefix = '.ubuntu18'
self.cliBuildOptions = '--no-cache'
elif self.host == 'Red Hat':
self.cli = 'sudo podman'
self.dockerfileprefix = '.rhel8.2'
self.cliBuildOptions = '--no-cache --disable-compression'
imageNames = []
result = re.search('eNB', self.imageKind)
# Creating a tupple with the imageName and the DockerFile prefix pattern on obelix
if result is not None:
imageNames.append(('oai-enb', 'eNB'))
else:
result = re.search('gNB', self.imageKind)
if result is not None:
imageNames.append(('oai-gnb', 'gNB'))
else:
result = re.search('all', self.imageKind)
if result is not None:
imageNames.append(('oai-enb', 'eNB'))
imageNames.append(('oai-gnb', 'gNB'))
imageNames.append(('oai-lte-ue', 'lteUE'))
imageNames.append(('oai-nr-ue', 'nrUE'))
if self.host == 'Red Hat':
imageNames.append(('oai-physim', 'phySim'))
if self.host == 'Ubuntu':
imageNames.append(('oai-lte-ru', 'lteRU'))
if len(imageNames) == 0:
imageNames.append(('oai-enb', 'eNB'))
# Workaround for some servers, we need to erase completely the workspace
if self.forcedWorkspaceCleanup:
mySSH.command('echo ' + lPassWord + ' | sudo -S rm -Rf ' + lSourcePath, '\$', 15)
self.testCase_id = HTML.testCase_id
# on RedHat/CentOS .git extension is mandatory
result = re.search('([a-zA-Z0-9\:\-\.\/])+\.git', self.ranRepository)
if result is not None:
full_ran_repo_name = self.ranRepository.replace('git/', 'git')
else:
full_ran_repo_name = self.ranRepository + '.git'
mySSH.command('mkdir -p ' + lSourcePath, '\$', 5)
mySSH.command('cd ' + lSourcePath, '\$', 5)
mySSH.command('if [ ! -e .git ]; then stdbuf -o0 git clone ' + full_ran_repo_name + ' .; else stdbuf -o0 git fetch --prune; fi', '\$', 600)
# Raphael: here add a check if git clone or git fetch went smoothly
mySSH.command('git config user.email "<EMAIL>"', '\$', 5)
mySSH.command('git config user.name "<NAME>"', '\$', 5)
mySSH.command('echo ' + lPassWord + ' | sudo -S git clean -x -d -ff', '\$', 30)
mySSH.command('mkdir -p cmake_targets/log', '\$', 5)
# if the commit ID is provided use it to point to it
if self.ranCommitID != '':
mySSH.command('git checkout -f ' + self.ranCommitID, '\$', 30)
# if the branch is not develop, then it is a merge request and we need to do
# the potential merge. Note that merge conflicts should already been checked earlier
imageTag = 'develop'
sharedTag = 'develop'
if (self.ranAllowMerge):
imageTag = 'ci-temp'
if self.ranTargetBranch == '':
if (self.ranBranch != 'develop') and (self.ranBranch != 'origin/develop'):
mySSH.command('git merge --ff origin/develop -m "Temporary merge for CI"', '\$', 5)
else:
logging.debug('Merging with the target branch: ' + self.ranTargetBranch)
mySSH.command('git merge --ff origin/' + self.ranTargetBranch + ' -m "Temporary merge for CI"', '\$', 5)
# if asterix, copy the entitlement and subscription manager configurations
if self.host == 'Red Hat':
mySSH.command('mkdir -p tmp/ca/', '\$', 5)
mySSH.command('mkdir -p tmp/entitlement/', '\$', 5)
mySSH.command('sudo cp /etc/rhsm/ca/redhat-uep.pem tmp/ca/', '\$', 5)
mySSH.command('sudo cp /etc/pki/entitlement/*.pem tmp/entitlement/', '\$', 5)
sharedimage = 'ran-build'
# Let's remove any previous run artifacts if still there
mySSH.command(self.cli + ' image prune --force', '\$', 30)
if (not self.ranAllowMerge):
mySSH.command(self.cli + ' image rm ' + sharedimage + ':' + sharedTag, '\$', 30)
for image,pattern in imageNames:
mySSH.command(self.cli + ' image rm ' + image + ':' + imageTag, '\$', 30)
# Build the shared image only on Push Events (not on Merge Requests)
if (not self.ranAllowMerge):
mySSH.command(self.cli + ' build ' + self.cliBuildOptions + ' --target ' + sharedimage + ' --tag ' + sharedimage + ':' + sharedTag + ' --file docker/Dockerfile.ran' + self.dockerfileprefix + ' --build-arg NEEDED_GIT_PROXY="http://proxy.eurecom.fr:8080" . > cmake_targets/log/ran-build.log 2>&1', '\$', 1600)
# First verify if the shared image was properly created.
status = True
mySSH.command(self.cli + ' image inspect --format=\'Size = {{.Size}} bytes\' ' + sharedimage + ':' + sharedTag, '\$', 5)
if mySSH.getBefore().count('o such image') != 0:
logging.error('\u001B[1m Could not build properly ran-build\u001B[0m')
status = False
else:
result = re.search('Size *= *(?P<size>[0-9\-]+) *bytes', mySSH.getBefore())
if result is not None:
imageSize = float(result.group('size'))
imageSize = imageSize / 1000
if imageSize < 1000:
logging.debug('\u001B[1m ran-build size is ' + ('%.0f' % imageSize) + ' kbytes\u001B[0m')
self.allImagesSize['ran-build'] = str(round(imageSize,1)) + ' kbytes'
else:
imageSize = imageSize / 1000
if imageSize < 1000:
logging.debug('\u001B[1m ran-build size is ' + ('%.0f' % imageSize) + ' Mbytes\u001B[0m')
self.allImagesSize['ran-build'] = str(round(imageSize,1)) + ' Mbytes'
else:
imageSize = imageSize / 1000
logging.debug('\u001B[1m ran-build size is ' + ('%.3f' % imageSize) + ' Gbytes\u001B[0m')
self.allImagesSize['ran-build'] = str(round(imageSize,1)) + ' Gbytes'
else:
logging.debug('ran-build size is unknown')
# If the shared image failed, no need to continue
if not status:
# Recover the name of the failed container?
mySSH.command(self.cli + ' ps --quiet --filter "status=exited" -n1 | xargs ' + self.cli + ' rm -f', '\$', 5)
mySSH.command(self.cli + ' image prune --force', '\$', 30)
mySSH.close()
logging.error('\u001B[1m Building OAI Images Failed\u001B[0m')
HTML.CreateHtmlTestRow(self.imageKind, 'KO', CONST.ALL_PROCESSES_OK)
HTML.CreateHtmlTabFooter(False)
sys.exit(1)
else:
# Recover build logs, for the moment only possible when build is successful
mySSH.command(self.cli + ' create --name test ' + sharedimage + ':' + sharedTag, '\$', 5)
mySSH.command('mkdir -p cmake_targets/log/ran-build', '\$', 5)
mySSH.command(self.cli + ' cp test:/oai-ran/cmake_targets/log/. cmake_targets/log/ran-build', '\$', 5)
mySSH.command(self.cli + ' rm -f test', '\$', 5)
# Build the target image(s)
for image,pattern in imageNames:
# the archived Dockerfiles have "ran-build:latest" as base image
# we need to update them with proper tag
mySSH.command('sed -i -e "s#' + sharedimage + ':latest#' + sharedimage + ':' + sharedTag + '#" docker/Dockerfile.' + pattern + self.dockerfileprefix, '\$', 5)
mySSH.command(self.cli + ' build ' + self.cliBuildOptions + ' --target ' + image + ' --tag ' + image + ':' + imageTag + ' --file docker/Dockerfile.' + pattern + self.dockerfileprefix + ' . > cmake_targets/log/' + image + '.log 2>&1', '\$', 1200)
# split the log
mySSH.command('mkdir -p cmake_targets/log/' + image, '\$', 5)
mySSH.command('python3 ci-scripts/docker_log_split.py --logfilename=cmake_targets/log/' + image + '.log', '\$', 5)
# checking the status of the build
mySSH.command(self.cli + ' image inspect --format=\'Size = {{.Size}} bytes\' ' + image + ':' + imageTag, '\$', 5)
if mySSH.getBefore().count('o such image') != 0:
logging.error('\u001B[1m Could not build properly ' + image + '\u001B[0m')
status = False
# Here we should check if the last container corresponds to a failed command and destroy it
mySSH.command(self.cli + ' ps --quiet --filter "status=exited" -n1 | xargs ' + self.cli + ' rm -f', '\$', 5)
self.allImagesSize[image] = 'N/A -- Build Failed'
else:
result = re.search('Size *= *(?P<size>[0-9\-]+) *bytes', mySSH.getBefore())
if result is not None:
imageSize = float(result.group('size'))
imageSize = imageSize / 1000
if imageSize < 1000:
logging.debug('\u001B[1m ' + image + ' size is ' + ('%.0f' % imageSize) + ' kbytes\u001B[0m')
self.allImagesSize[image] = str(round(imageSize,1)) + ' kbytes'
else:
imageSize = imageSize / 1000
if imageSize < 1000:
logging.debug('\u001B[1m ' + image + ' size is ' + ('%.0f' % imageSize) + ' Mbytes\u001B[0m')
self.allImagesSize[image] = str(round(imageSize,1)) | |
<filename>src/archive/tests/local_test_kfp_components.py
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def convert_csv_to_parquet_op(
train_paths: list,
valid_paths: list,
output_path: str,
columns: list,
cols_dtype: list,
sep: str,
gpus: str,
output_dataset: dict,
shuffle: str = None,
recursive: bool = False
):
'''
train_paths: list
List of paths to folders or files in GCS for training.
For recursive folder search, set the recursive variable to True
Format:
'<bucket_name>/<subfolder1>/<subfolder>/' or
'<bucket_name>/<subfolder1>/<subfolder>/flat_file.csv' or
a combination of both.
valid_paths: list
List of paths to folders or files in GCS for validation
For recursive folder search, set the recursive variable to True
Format:
'<bucket_name>/<subfolder1>/<subfolder>/' or
'<bucket_name>/<subfolder1>/<subfolder>/flat_file.csv' or
a combination of both.
output_path: str
Path to write the converted parquet files
Format:
'<bucket_name>/<subfolder1>/<subfolder>/'
gpus: str
GPUs available. Example:
If there are 4 gpus available, must be '0,1,2,3'
output_dataset: dict
Metadata pointing to the converted dataset
Format:
output_dataset['train'] = \
'<bucket_name>/<subfolder1>/<subfolder>/'
shuffle: str
How to shuffle the converted data, default to None.
Options:
PER_PARTITION
PER_WORKER
FULL
'''
# Standard Libraries
import logging
from pathlib import Path
import fsspec
import os
# External Dependencies
from dask_cuda import LocalCUDACluster
from dask.distributed import Client
import numpy as np
# NVTabular
from nvtabular.utils import device_mem_size, get_rmm_size
import nvtabular as nvt
from nvtabular.io.shuffle import Shuffle
logging.basicConfig(level=logging.INFO)
# Specify column dtypes (from numpy). Note that 'hex' means that
# the values will be hexadecimal strings that should be converted to int32
# ADDED for local testing
logging.info('Converting columns dtypes to numpy objects')
converted_col_dtype = {}
for col, dt in cols_dtype.items():
if dt == 'hex':
converted_col_dtype[col] = 'hex'
else:
converted_col_dtype[col] = getattr(np, dt)
fs_spec = fsspec.filesystem('gs')
rec_symbol = '**' if recursive else '*'
TRAIN_SPLIT_FOLDER = 'train'
VALID_SPLIT_FOLDER = 'valid'
if gpus:
logging.info('Creating a Dask CUDA cluster')
cluster = LocalCUDACluster(
n_workers=len(gpus.split(sep=',')),
CUDA_VISIBLE_DEVICES=gpus,
rmm_pool_size=get_rmm_size(0.8 * device_mem_size())
)
client = Client(cluster)
else:
raise Exception('Cannot create Cluster. \
Provide a list of available GPUs')
# CHANGED for local testing
for folder_name, data_paths in zip(
[TRAIN_SPLIT_FOLDER, VALID_SPLIT_FOLDER],
[train_paths, valid_paths]
):
valid_paths = []
for path in data_paths:
try:
if fs_spec.isfile(path):
valid_paths.append(
os.path.join('/gcs', fs_spec.info(path)['name'])
)
else:
path = os.path.join(
fs_spec.info(path)['name'], rec_symbol
)
for i in fs_spec.glob(path):
if fs_spec.isfile(i):
valid_paths.append(os.path.join('/gcs', i))
except FileNotFoundError as fnf_expt:
print(fnf_expt)
print('One of the paths provided are incorrect.')
except OSError as os_err:
print(os_err)
print(f'Verify access to the bucket.')
dataset = nvt.Dataset(
path_or_source = valid_paths,
engine='csv',
names=columns,
sep=sep,
dtypes=converted_col_dtype,
client=client
)
full_output_path = os.path.join('/gcs', output_path, folder_name)
logging.info(f'Writing parquet file(s) to {full_output_path}')
if shuffle:
shuffle = getattr(Shuffle, shuffle)
dataset.to_parquet(
full_output_path,
preserve_files=True,
shuffle=shuffle
)
# CHANGED for local testing
output_dataset[folder_name] = full_output_path
client.close
# close cluster?
return output_dataset
def fit_dataset_op(
datasets: dict, # Input from previous
fitted_workflow: dict, # Output for next
workflow_path: str, # Location of the saved workflow
gpus: str,
split_name: str = 'train',
protocol: str = 'tcp',
device_limit_frac: float = 0.8,
device_pool_frac: float = 0.9,
part_mem_frac: float = 0.125
):
'''
datasets: dict
Input metadata from previus step. Stores the full path of the
converted datasets.
How to access:
full_path = dataset['train']
fitted_workflow: dict
Output metadata for next step. Stores the full path of the
converted dataset, and saved workflow with statistics.
workflow_path: str
Path to the current workflow, not fitted.
Format:
'<bucket_name>/<subfolder1>/<subfolder>/'
split_name: str
Which dataset to calculate the statistics.
'''
import logging
import nvtabular as nvt
import os
import fsspec
from dask_cuda import LocalCUDACluster
from dask.distributed import Client
from nvtabular.utils import device_mem_size
logging.basicConfig(level=logging.INFO)
# FIT_FOLDER = os.path.join('/gcs', workflow_path, 'fitted_workflow')
FIT_FOLDER = '.'
# Check if the `split_name` dataset is present
data_path = datasets.get(split_name, '')
if not data_path:
raise RuntimeError(f'Dataset does not have {split_name} split.')
# Dask Cluster defintions
device_size = device_mem_size()
device_limit = int(device_limit_frac * device_size)
device_pool_size = int(device_pool_frac * device_size)
part_size = int(part_mem_frac * device_size)
rmm_pool_size = (device_pool_size // 256) * 256
if gpus:
logging.info('Creating a Dask CUDA cluster')
cluster = LocalCUDACluster(
protocol=protocol,
n_workers=len(gpus.split(sep=',')),
CUDA_VISIBLE_DEVICES=gpus,
device_memory_limit=device_limit,
rmm_pool_size=rmm_pool_size
)
client = Client(cluster)
else:
raise Exception('Cannot create Cluster. \
Check cluster parameters')
# Load Transformation steps
# full_workflow_path = os.path.join('/gcs', workflow_path)
full_workflow_path = '/home/renatoleite/workspace/merlin-on-vertex/src/kfp_components/saved_workflow'
logging.info('Loading saved workflow')
workflow = nvt.Workflow.load(full_workflow_path, client)
fitted_dataset = nvt.Dataset(
data_path, engine="parquet", part_size=part_size
)
logging.info('Starting workflow fitting')
workflow.fit(fitted_dataset)
logging.info('Finished generating statistics for dataset.')
logging.info(f'Saving workflow to {FIT_FOLDER}')
# workflow.save(FIT_FOLDER)
# CHANGED to run locally
fitted_workflow['fitted_workflow'] = FIT_FOLDER
fitted_workflow['datasets'] = datasets
return fitted_workflow
def transform_dataset_op(
fitted_workflow: dict,
transformed_dataset: dict,
output_transformed: str,
gpus: str,
split_name: str = 'train',
shuffle: str = None,
protocol: str = 'tcp',
device_limit_frac: float = 0.8,
device_pool_frac: float = 0.9,
part_mem_frac: float = 0.125,
):
'''
fitted_workflow: dict
Input metadata from previous step. Stores the path of the fitted_workflow
and the location of the datasets (train and validation).
Usage:
train_path = fitted_workflow['datasets']['train]
output: '<bucket_name>/<subfolder1>/<subfolder>/'
transformed_dataset: dict
Output metadata for next step. Stores the path of the transformed dataset
and the validation dataset.
output_transformed: str,
Path to write the transformed parquet files
Format:
'<bucket_name>/<subfolder1>/<subfolder>/'
gpus: str
GPUs available. Example:
If there are 4 gpus available, must be '0,1,2,3'
shuffle: str
How to shuffle the converted data, default to None.
Options:
PER_PARTITION
PER_WORKER
FULL
'''
import logging
import nvtabular as nvt
import os
from dask_cuda import LocalCUDACluster
from dask.distributed import Client
from nvtabular.utils import device_mem_size
from nvtabular.io.shuffle import Shuffle
logging.basicConfig(level=logging.INFO)
# Define output path for transformed files
TRANSFORM_FOLDER = os.path.join('/gcs', output_transformed, split_name)
# Get path to dataset to be transformed
data_path = fitted_workflow.get('datasets').get(split_name, '')
if not data_path:
raise RuntimeError(f'Dataset does not have {split_name} split.')
# Dask Cluster defintions
device_size = device_mem_size()
device_limit = int(device_limit_frac * device_size)
device_pool_size = int(device_pool_frac * device_size)
part_size = int(part_mem_frac * device_size)
rmm_pool_size = (device_pool_size // 256) * 256
if gpus:
logging.info('Creating a Dask CUDA cluster')
cluster = LocalCUDACluster(
protocol=protocol,
n_workers=len(gpus.split(sep=',')),
CUDA_VISIBLE_DEVICES=gpus,
device_memory_limit=device_limit,
rmm_pool_size=rmm_pool_size
)
client = Client(cluster)
else:
raise Exception('Cannot create Cluster. \
Provide a list of available GPUs')
# Load Transformation steps
logging.info('Loading workflow and statistics')
workflow = nvt.Workflow.load(fitted_workflow['fitted_workflow'], client)
logging.info('Creating dataset definition')
dataset = nvt.Dataset(
data_path, engine="parquet", part_size=part_size
)
if shuffle:
shuffle = getattr(Shuffle, shuffle)
logging.info('Starting workflow transformation')
workflow.transform(dataset).to_parquet(
output_files=len(gpus.split(sep='/')),
output_path=TRANSFORM_FOLDER,
shuffle=shuffle
)
logging.info('Finished transformation')
transformed_dataset['transformed_dataset'] = TRANSFORM_FOLDER
transformed_dataset['original_datasets'] = fitted_workflow.get('datasets')
return transformed_dataset
def export_parquet_from_bq_op(
output_path: str,
bq_project: str,
bq_dataset_id: str,
bq_table_train: str,
bq_table_valid: str,
output_dataset: dict,
location: str
):
'''
output_path: str
Path to write the exported parquet files
Format:
'gs://<bucket_name>/<subfolder1>/<subfolder>/'
bq_project: str
GCP project id
bq_dataset_id: str
Bigquery dataset id
bq_table_train: str
Bigquery table name for training dataset
bq_table_valid: str
BigQuery table name for validation dataset
output_dataset: dict
Output metadata for the next step. Stores the path in GCS
for the datasets.
Usage:
train_path = output_dataset['train']
# returns: bucket_name/subfolder/subfolder/
'''
# Standard Libraries
import logging
import os
from google.cloud import bigquery
logging.basicConfig(level=logging.INFO)
TRAIN_SPLIT_FOLDER = 'train'
VALID_SPLIT_FOLDER = 'valid'
client = bigquery.Client()
for folder_name, table_id in zip(
[TRAIN_SPLIT_FOLDER, VALID_SPLIT_FOLDER],
[bq_table_train, bq_table_valid]
):
bq_glob_path = os.path.join(
'gs://',
output_path,
folder_name,
f'{folder_name}-*.parquet'
)
dataset_ref = bigquery.DatasetReference(bq_project, bq_dataset_id)
table_ref = dataset_ref.table(table_id)
logging.info(f'Extracting {table_ref} to {bq_glob_path}')
client.extract_table(table_ref, bq_glob_path, location=location)
full_output_path = os.path.join('/gcs', output_path, folder_name)
logging.info(f'Saving metadata for {folder_name} path: {full_output_path}')
output_dataset[folder_name] = full_output_path
return output_dataset
def import_parquet_to_bq_op(
transform_dataset: dict, # Input[Dataset],
output_bq_table: dict, # Output[Dataset]
bq_project: str,
bq_dataset_id: str,
bq_dest_table_id: str
):
'''
transformed_dataset: dict
Input metadata. Stores the path in GCS
for the datasets.
Usage:
train_path = output_dataset['train']
# returns: bucket_name/subfolder/subfolder/
bq_project: str
GCP project id
bq_dataset_id: str
Bigquery dataset id
bq_dest_table_id: str
Bigquery destination table name
'''
# Standard Libraries
import logging
import os
from google.cloud import bigquery
logging.basicConfig(level=logging.INFO)
data_path = transform_dataset['transformed_dataset'][5:]
full_data_path = os.path.join('gs://', data_path, '*.parquet')
# Construct a BigQuery client object.
client = bigquery.Client(project=bq_project)
table_id | |
<reponame>bgusach/influxdb-python
# -*- coding: utf-8 -*-
"""Unit tests for misc module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import timedelta
import json
import unittest
import warnings
import requests_mock
from influxdb.tests import skipIfPYpy, using_pypy
from nose.tools import raises
from .client_test import _mocked_session
if not using_pypy:
import pandas as pd
from pandas.util.testing import assert_frame_equal
from influxdb import DataFrameClient
@skipIfPYpy
class TestDataFrameClient(unittest.TestCase):
"""Set up a test DataFrameClient object."""
def setUp(self):
"""Instantiate a TestDataFrameClient object."""
# By default, raise exceptions on warnings
warnings.simplefilter('error', FutureWarning)
def test_write_points_from_dataframe(self):
"""Test write points from df in TestDataFrameClient object."""
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[now, now + timedelta(hours=1)],
columns=["column_one", "column_two",
"column_three"])
expected = (
b"foo column_one=\"1\",column_two=1i,column_three=1.0 0\n"
b"foo column_one=\"2\",column_two=2i,column_three=2.0 "
b"3600000000000\n"
)
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/write",
status_code=204)
cli = DataFrameClient(database='db')
cli.write_points(dataframe, 'foo')
self.assertEqual(m.last_request.body, expected)
cli.write_points(dataframe, 'foo', tags=None)
self.assertEqual(m.last_request.body, expected)
def test_dataframe_write_points_with_whitespace_measurement(self):
"""write_points should escape white space in measurements."""
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[now, now + timedelta(hours=1)],
columns=["column_one", "column_two",
"column_three"])
expected = (
b"meas\\ with\\ space "
b"column_one=\"1\",column_two=1i,column_three=1.0 0\n"
b"meas\\ with\\ space "
b"column_one=\"2\",column_two=2i,column_three=2.0 "
b"3600000000000\n"
)
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/write",
status_code=204)
cli = DataFrameClient(database='db')
cli.write_points(dataframe, 'meas with space')
self.assertEqual(m.last_request.body, expected)
def test_dataframe_write_points_with_whitespace_in_column_names(self):
"""write_points should escape white space in column names."""
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[now, now + timedelta(hours=1)],
columns=["column one", "column two",
"column three"])
expected = (
b"foo column\\ one=\"1\",column\\ two=1i,column\\ three=1.0 0\n"
b"foo column\\ one=\"2\",column\\ two=2i,column\\ three=2.0 "
b"3600000000000\n"
)
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/write",
status_code=204)
cli = DataFrameClient(database='db')
cli.write_points(dataframe, 'foo')
self.assertEqual(m.last_request.body, expected)
def test_write_points_from_dataframe_with_none(self):
"""Test write points from df in TestDataFrameClient object."""
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[["1", None, 1.0], ["2", 2.0, 2.0]],
index=[now, now + timedelta(hours=1)],
columns=["column_one", "column_two",
"column_three"])
expected = (
b"foo column_one=\"1\",column_three=1.0 0\n"
b"foo column_one=\"2\",column_two=2.0,column_three=2.0 "
b"3600000000000\n"
)
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/write",
status_code=204)
cli = DataFrameClient(database='db')
cli.write_points(dataframe, 'foo')
self.assertEqual(m.last_request.body, expected)
cli.write_points(dataframe, 'foo', tags=None)
self.assertEqual(m.last_request.body, expected)
def test_write_points_from_dataframe_with_line_of_none(self):
"""Test write points from df in TestDataFrameClient object."""
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[[None, None, None], ["2", 2.0, 2.0]],
index=[now, now + timedelta(hours=1)],
columns=["column_one", "column_two",
"column_three"])
expected = (
b"foo column_one=\"2\",column_two=2.0,column_three=2.0 "
b"3600000000000\n"
)
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/write",
status_code=204)
cli = DataFrameClient(database='db')
cli.write_points(dataframe, 'foo')
self.assertEqual(m.last_request.body, expected)
cli.write_points(dataframe, 'foo', tags=None)
self.assertEqual(m.last_request.body, expected)
def test_write_points_from_dataframe_with_all_none(self):
"""Test write points from df in TestDataFrameClient object."""
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[[None, None, None], [None, None, None]],
index=[now, now + timedelta(hours=1)],
columns=["column_one", "column_two",
"column_three"])
expected = (
b"\n"
)
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/write",
status_code=204)
cli = DataFrameClient(database='db')
cli.write_points(dataframe, 'foo')
self.assertEqual(m.last_request.body, expected)
cli.write_points(dataframe, 'foo', tags=None)
self.assertEqual(m.last_request.body, expected)
def test_write_points_from_dataframe_in_batches(self):
"""Test write points in batch from df in TestDataFrameClient object."""
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[now, now + timedelta(hours=1)],
columns=["column_one", "column_two",
"column_three"])
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/write",
status_code=204)
cli = DataFrameClient(database='db')
self.assertTrue(cli.write_points(dataframe, "foo", batch_size=1))
def test_write_points_from_dataframe_with_tag_columns(self):
"""Test write points from df w/tag in TestDataFrameClient object."""
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[['blue', 1, "1", 1, 1.0],
['red', 0, "2", 2, 2.0]],
index=[now, now + timedelta(hours=1)],
columns=["tag_one", "tag_two", "column_one",
"column_two", "column_three"])
expected = (
b"foo,tag_one=blue,tag_two=1 "
b"column_one=\"1\",column_two=1i,column_three=1.0 "
b"0\n"
b"foo,tag_one=red,tag_two=0 "
b"column_one=\"2\",column_two=2i,column_three=2.0 "
b"3600000000000\n"
)
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/write",
status_code=204)
cli = DataFrameClient(database='db')
cli.write_points(dataframe, 'foo',
tag_columns=['tag_one', 'tag_two'])
self.assertEqual(m.last_request.body, expected)
cli.write_points(dataframe, 'foo',
tag_columns=['tag_one', 'tag_two'], tags=None)
self.assertEqual(m.last_request.body, expected)
def test_write_points_from_dataframe_with_tag_cols_and_global_tags(self):
"""Test write points from df w/tag + cols in TestDataFrameClient."""
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[['blue', 1, "1", 1, 1.0],
['red', 0, "2", 2, 2.0]],
index=[now, now + timedelta(hours=1)],
columns=["tag_one", "tag_two", "column_one",
"column_two", "column_three"])
expected = (
b"foo,global_tag=value,tag_one=blue,tag_two=1 "
b"column_one=\"1\",column_two=1i,column_three=1.0 "
b"0\n"
b"foo,global_tag=value,tag_one=red,tag_two=0 "
b"column_one=\"2\",column_two=2i,column_three=2.0 "
b"3600000000000\n"
)
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/write",
status_code=204)
cli = DataFrameClient(database='db')
cli.write_points(dataframe, 'foo',
tag_columns=['tag_one', 'tag_two'],
tags={'global_tag': 'value'})
self.assertEqual(m.last_request.body, expected)
def test_write_points_from_dataframe_with_tag_cols_and_defaults(self):
"""Test default write points from df w/tag in TestDataFrameClient."""
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[['blue', 1, "1", 1, 1.0, 'hot'],
['red', 0, "2", 2, 2.0, 'cold']],
index=[now, now + timedelta(hours=1)],
columns=["tag_one", "tag_two", "column_one",
"column_two", "column_three",
"tag_three"])
expected_tags_and_fields = (
b"foo,tag_one=blue "
b"column_one=\"1\",column_two=1i "
b"0\n"
b"foo,tag_one=red "
b"column_one=\"2\",column_two=2i "
b"3600000000000\n"
)
expected_tags_no_fields = (
b"foo,tag_one=blue,tag_two=1 "
b"column_one=\"1\",column_two=1i,column_three=1.0,"
b"tag_three=\"hot\" 0\n"
b"foo,tag_one=red,tag_two=0 "
b"column_one=\"2\",column_two=2i,column_three=2.0,"
b"tag_three=\"cold\" 3600000000000\n"
)
expected_fields_no_tags = (
b"foo,tag_one=blue,tag_three=hot,tag_two=1 "
b"column_one=\"1\",column_two=1i,column_three=1.0 "
b"0\n"
b"foo,tag_one=red,tag_three=cold,tag_two=0 "
b"column_one=\"2\",column_two=2i,column_three=2.0 "
b"3600000000000\n"
)
expected_no_tags_no_fields = (
b"foo "
b"tag_one=\"blue\",tag_two=1i,column_one=\"1\","
b"column_two=1i,column_three=1.0,tag_three=\"hot\" "
b"0\n"
b"foo "
b"tag_one=\"red\",tag_two=0i,column_one=\"2\","
b"column_two=2i,column_three=2.0,tag_three=\"cold\" "
b"3600000000000\n"
)
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/write",
status_code=204)
cli = DataFrameClient(database='db')
cli.write_points(dataframe, 'foo',
field_columns=['column_one', 'column_two'],
tag_columns=['tag_one'])
self.assertEqual(m.last_request.body, expected_tags_and_fields)
cli.write_points(dataframe, 'foo',
tag_columns=['tag_one', 'tag_two'])
self.assertEqual(m.last_request.body, expected_tags_no_fields)
cli.write_points(dataframe, 'foo',
field_columns=['column_one', 'column_two',
'column_three'])
self.assertEqual(m.last_request.body, expected_fields_no_tags)
cli.write_points(dataframe, 'foo')
self.assertEqual(m.last_request.body, expected_no_tags_no_fields)
def test_write_points_from_dataframe_with_tag_escaped(self):
"""Test write points from df w/escaped tag in TestDataFrameClient."""
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(
data=[
['blue orange', "1", 1, 'hot=cold'], # space, equal
['red,green', "2", 2, r'cold\fire'], # comma, backslash
['some', "2", 2, ''], # skip empty
['some', "2", 2, None], # skip None
['', "2", 2, None], # all tags empty
],
index=pd.period_range(now, freq='H', periods=5),
columns=["tag_one", "column_one", "column_two", "tag_three"]
)
expected_escaped_tags = (
b"foo,tag_one=blue\\ orange,tag_three=hot\\=cold "
b"column_one=\"1\",column_two=1i "
b"0\n"
b"foo,tag_one=red\\,green,tag_three=cold\\\\fire "
b"column_one=\"2\",column_two=2i "
b"3600000000000\n"
b"foo,tag_one=some "
b"column_one=\"2\",column_two=2i "
b"7200000000000\n"
b"foo,tag_one=some "
b"column_one=\"2\",column_two=2i "
b"10800000000000\n"
b"foo "
b"column_one=\"2\",column_two=2i "
b"14400000000000\n"
)
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/write",
status_code=204)
cli = DataFrameClient(database='db')
cli.write_points(dataframe, 'foo',
field_columns=['column_one', 'column_two'],
tag_columns=['tag_one', 'tag_three'])
self.assertEqual(m.last_request.body, expected_escaped_tags)
def test_write_points_from_dataframe_with_numeric_column_names(self):
"""Test write points from df with numeric cols."""
now = pd.Timestamp('1970-01-01 00:00+00:00')
# df with numeric column names
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[now, now + timedelta(hours=1)])
expected = (
b'foo,hello=there 0=\"1\",1=1i,2=1.0 0\n'
b'foo,hello=there 0=\"2\",1=2i,2=2.0 3600000000000\n'
)
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/write",
status_code=204)
cli = DataFrameClient(database='db')
cli.write_points(dataframe, "foo", {"hello": "there"})
self.assertEqual(m.last_request.body, expected)
def test_write_points_from_dataframe_with_numeric_precision(self):
"""Test write points from df with numeric precision."""
now = pd.Timestamp('1970-01-01 00:00+00:00')
# df with numeric column names
dataframe = pd.DataFrame(data=[["1", 1, 1.1111111111111],
["2", 2, 2.2222222222222]],
index=[now, now + timedelta(hours=1)])
expected_default_precision = (
b'foo,hello=there 0=\"1\",1=1i,2=1.11111111111 0\n'
b'foo,hello=there 0=\"2\",1=2i,2=2.22222222222 3600000000000\n'
)
expected_specified_precision = (
b'foo,hello=there 0=\"1\",1=1i,2=1.1111 0\n'
b'foo,hello=there 0=\"2\",1=2i,2=2.2222 3600000000000\n'
)
expected_full_precision = (
b'foo,hello=there 0=\"1\",1=1i,2=1.1111111111111 0\n'
b'foo,hello=there 0=\"2\",1=2i,2=2.2222222222222 3600000000000\n'
)
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/write",
status_code=204)
cli = DataFrameClient(database='db')
cli.write_points(dataframe, "foo", {"hello": "there"})
self.assertEqual(m.last_request.body, expected_default_precision)
cli = DataFrameClient(database='db')
cli.write_points(dataframe, "foo", {"hello": "there"},
numeric_precision=4)
self.assertEqual(m.last_request.body, expected_specified_precision)
cli = DataFrameClient(database='db')
cli.write_points(dataframe, "foo", {"hello": "there"},
numeric_precision='full')
self.assertEqual(m.last_request.body, expected_full_precision)
def test_write_points_from_dataframe_with_period_index(self):
"""Test write points from df with period index."""
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[pd.Period('1970-01-01'),
pd.Period('1970-01-02')],
columns=["column_one", "column_two",
"column_three"])
expected = (
b"foo column_one=\"1\",column_two=1i,column_three=1.0 0\n"
b"foo column_one=\"2\",column_two=2i,column_three=2.0 "
b"86400000000000\n"
)
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/write",
status_code=204)
cli = DataFrameClient(database='db')
cli.write_points(dataframe, "foo")
self.assertEqual(m.last_request.body, expected)
def test_write_points_from_dataframe_with_time_precision(self):
"""Test write points from df with time precision."""
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[now, now + timedelta(hours=1)],
columns=["column_one", "column_two",
"column_three"])
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/write",
status_code=204)
cli = DataFrameClient(database='db')
measurement = "foo"
cli.write_points(dataframe, measurement, time_precision='h')
self.assertEqual(m.last_request.qs['precision'], ['h'])
self.assertEqual(
b'foo column_one="1",column_two=1i,column_three=1.0 0\nfoo '
b'column_one="2",column_two=2i,column_three=2.0 1\n',
m.last_request.body,
)
cli.write_points(dataframe, measurement, time_precision='m')
self.assertEqual(m.last_request.qs['precision'], ['m'])
self.assertEqual(
b'foo column_one="1",column_two=1i,column_three=1.0 0\nfoo '
b'column_one="2",column_two=2i,column_three=2.0 60\n',
m.last_request.body,
)
cli.write_points(dataframe, measurement, time_precision='s')
self.assertEqual(m.last_request.qs['precision'], ['s'])
self.assertEqual(
b'foo column_one="1",column_two=1i,column_three=1.0 0\nfoo '
b'column_one="2",column_two=2i,column_three=2.0 3600\n',
m.last_request.body,
)
cli.write_points(dataframe, measurement, time_precision='ms')
self.assertEqual(m.last_request.qs['precision'], ['ms'])
self.assertEqual(
b'foo column_one="1",column_two=1i,column_three=1.0 0\nfoo '
b'column_one="2",column_two=2i,column_three=2.0 3600000\n',
m.last_request.body,
)
cli.write_points(dataframe, measurement, time_precision='u')
self.assertEqual(m.last_request.qs['precision'], ['u'])
self.assertEqual(
b'foo column_one="1",column_two=1i,column_three=1.0 0\nfoo '
b'column_one="2",column_two=2i,column_three=2.0 3600000000\n',
m.last_request.body,
)
cli.write_points(dataframe, measurement, time_precision='n')
self.assertEqual(m.last_request.qs['precision'], ['n'])
self.assertEqual(
b'foo column_one="1",column_two=1i,column_three=1.0 0\n'
b'foo column_one="2",column_two=2i,column_three=2.0 '
b'3600000000000\n',
m.last_request.body,
)
@raises(TypeError)
def test_write_points_from_dataframe_fails_without_time_index(self):
"""Test failed write points from df without time index."""
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
columns=["column_one", "column_two",
"column_three"])
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series",
status_code=204)
cli = DataFrameClient(database='db')
cli.write_points(dataframe, "foo")
@raises(TypeError)
def test_write_points_from_dataframe_fails_with_series(self):
"""Test failed write points from df with series."""
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.Series(data=[1.0, 2.0],
index=[now, now + timedelta(hours=1)])
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series",
status_code=204)
cli = DataFrameClient(database='db')
cli.write_points(dataframe, "foo")
def test_query_into_dataframe(self):
"""Test query into df for TestDataFrameClient object."""
data = {
"results": [{
"series": [
{"measurement": "network",
"tags": {"direction": ""},
"columns": ["time", "value"],
| |
b"POST",
'{0}/__experiments/multiplot?from={1}&to={2}&points={3}'.format(
self.uri, '1412902262560', '1412988662560', 500),
json.dumps({'metrics': [{'entity_id': self.entity_id,
'check_id': check_id,
'metric': 'whut'}]}).encode("utf-8")))
self.assertEquals(resp.code, 200)
self.assertEquals(data['metrics'][0]['type'], 'unknown')
self.assertEquals(len(data['metrics'][0]['data']), 0)
def test_multiplot_malformatted_remote_metric(self):
"""
Multiplot metrics for remote checks must stuff the monitoring
zone in the front of the metric name, e.g., mzord.duration.
Requesting an incorrectly formatted metric name causes an unknown
metric and empty data to be returned.
"""
(resp, data) = self.successResultOf(
json_request(self, self.root, b"POST",
'{0}/__experiments/multiplot?from={1}&to={2}&points={3}'.format(
self.uri, '1412902262560', '1412988662560', 500),
json.dumps({'metrics': [{'entity_id': self.entity_id,
'check_id': self.check_id,
'metric': 'LOLWUT'}]}).encode("utf-8")))
self.assertEquals(resp.code, 200)
self.assertEquals(data['metrics'][0]['type'], 'unknown')
self.assertEquals(len(data['metrics'][0]['data']), 0)
def test_multiplot_nonexistent_metric(self):
"""
Getting multiplot metrics that Mimic doesn't know about cause
an unknown metric and empty data to be returned, if the check
type is one that Mimic knows about.
"""
(resp, data) = self.successResultOf(
json_request(self, self.root, b"POST",
'{0}/__experiments/multiplot?from={1}&to={2}&points={3}'.format(
self.uri, '1412902262560', '1412988662560', 500),
json.dumps({'metrics': [{'entity_id': self.entity_id,
'check_id': self.check_id,
'metric': 'mzord.nonexistent'}]}).encode("utf-8")))
self.assertEquals(resp.code, 200)
self.assertEquals(data['metrics'][0]['type'], 'unknown')
self.assertEquals(len(data['metrics'][0]['data']), 0)
def test_multiplot_single_point(self):
"""
Plotting a single point should not cause a server error.
"""
resp = self.successResultOf(
request(self, self.root, b"POST",
'{0}/__experiments/multiplot?from={1}&to={2}&points={3}'.format(
self.uri, '1412902262560', '1412988662560', 1),
json.dumps({'metrics': [{'entity_id': self.entity_id,
'check_id': self.check_id,
'metric': 'mzord.available'}]}).encode("utf-8")))
self.assertEquals(resp.code, 200)
def test_get_all_notification_plans(self):
"""
get all notification plans
"""
req = request(self, self.root, b"GET", self.uri + '/notification_plans')
resp = self.successResultOf(req)
self.assertEquals(resp.code, 200)
data = self.get_responsebody(resp)
self.assertEquals(2, data['metadata']['count'])
def test_get_notification_plan(self):
"""
Get a specific notification plan
"""
req = request(self, self.root, b"GET", self.uri + '/notification_plans/' + self.np_id)
resp = self.successResultOf(req)
self.assertEquals(resp.code, 200)
data = self.get_responsebody(resp)
self.assertEquals(data['id'], self.np_id)
req = request(self, self.root, b"GET",
self.uri + '/notification_plans/npTechnicalContactsEmail')
resp = self.successResultOf(req)
self.assertEquals(resp.code, 200)
data = self.get_responsebody(resp)
self.assertEquals(data['id'], 'npTechnicalContactsEmail')
def test_get_all_notifications(self):
"""
Get all notification targets
"""
req = request(self, self.root, b"GET", self.uri + '/notifications')
resp = self.successResultOf(req)
self.assertEquals(resp.code, 200)
data = self.get_responsebody(resp)
self.assertEquals(2, data['metadata']['count'])
def test_update_notification(self):
"""
Update a notification target
"""
postdata = {'id': self.nt_id, 'label': 'changed'}
req = request(self, self.root, b"PUT", self.uri + '/notifications/' + self.nt_id,
json.dumps(postdata).encode("utf-8"))
resp = self.successResultOf(req)
self.assertEquals(resp.code, 204)
req = request(self, self.root, b"GET", self.uri + '/notifications')
resp = self.successResultOf(req)
self.assertEquals(resp.code, 200)
data = self.get_responsebody(resp)
mynt = None
for nt in data['values']:
if nt['id'] == self.nt_id:
mynt = nt
break
self.assertIsNot(None, mynt)
self.assertEquals('changed', mynt['label'])
def test_update_missing_notification(self):
"""
Attempting to update a non-existing notification causes a 404.
"""
(resp, data) = self.successResultOf(
json_request(self, self.root, b"PUT",
'{0}/notifications/ntDoesNotExist'.format(self.uri),
json.dumps({'label': 'my awesome notification'}).encode("utf-8")))
self.assertEquals(resp.code, 404)
self.assertEquals(data['message'], 'Object does not exist')
def test_delete_notification(self):
"""
Delete a notification target
"""
req = request(self, self.root, b"DELETE", self.uri + '/notifications/' + self.nt_id)
resp = self.successResultOf(req)
self.assertEquals(resp.code, 204)
req = request(self, self.root, b"GET", self.uri + '/notifications')
resp = self.successResultOf(req)
self.assertEquals(resp.code, 200)
data = self.get_responsebody(resp)
mynt = None
for nt in data['values']:
if nt['id'] == self.nt_id:
mynt = nt
break
self.assertEquals(None, mynt)
def test_delete_nonexistent_notification_404s(self):
"""
Deleting a notification that does not exist causes a 404.
"""
(resp, data) = self.successResultOf(
json_request(self, self.root, b"DELETE", '{0}/notifications/ntWhut'.format(self.uri)))
self.assertEquals(resp.code, 404)
self.assertEquals(data['details'], 'Object "Notification" with key "ntWhut" does not exist')
def test_update_notificationplan(self):
"""
Update a notification plan
"""
postdata = {'id': self.np_id, 'label': 'changed'}
req = request(self, self.root, b"PUT", self.uri + '/notification_plans/' + self.np_id,
json.dumps(postdata).encode("utf-8"))
resp = self.successResultOf(req)
self.assertEquals(resp.code, 204)
req = request(self, self.root, b"GET", self.uri + '/notification_plans/' + self.np_id)
resp = self.successResultOf(req)
self.assertEquals(resp.code, 200)
data = self.get_responsebody(resp)
self.assertEquals('changed', data['label'])
def test_update_missing_notification_plan(self):
"""
Attempting to update a non-existing notification plan causes a 404.
"""
(resp, data) = self.successResultOf(
json_request(self, self.root, b"PUT",
'{0}/notification_plans/npDoesNotExist'.format(self.uri),
json.dumps({'label': 'WAT WAT WAT'}).encode("utf-8")))
self.assertEquals(resp.code, 404)
self.assertEquals(data['message'], 'Object does not exist')
def test_delete_notificationplan(self):
"""
Delete a notification plan
"""
req = request(self, self.root, b"DELETE", self.uri + '/notification_plans/' + self.np_id)
resp = self.successResultOf(req)
self.assertEquals(resp.code, 204)
req = request(self, self.root, b"GET", self.uri + '/notification_plans')
resp = self.successResultOf(req)
self.assertEquals(resp.code, 200)
data = self.get_responsebody(resp)
mynp = None
for np in data['values']:
if np['id'] == self.np_id:
mynp = np
break
self.assertEquals(None, mynp)
def test_delete_nonexistent_notification_plan_404s(self):
"""
Deleting a notification plan that does not exist causes a 404.
"""
(resp, data) = self.successResultOf(
json_request(self, self.root, b"DELETE", '{0}/notification_plans/npWhut'.format(self.uri)))
self.assertEquals(resp.code, 404)
self.assertEquals(data['details'], 'Object "NotificationPlan" with key "npWhut" does not exist')
def test_get_notificationtypes(self):
"""
Get notification types
"""
req = request(self, self.root, b"GET", self.uri + '/notification_types')
resp = self.successResultOf(req)
self.assertEquals(resp.code, 200)
data = self.get_responsebody(resp)
self.assertEquals(4, data['metadata']['count'])
def test_get_suppression(self):
"""
Get a specific suppression
"""
req = request(self, self.root, b"GET", self.uri + '/suppressions/' + self.sp_id)
resp = self.successResultOf(req)
self.assertEquals(resp.code, 200)
data = self.get_responsebody(resp)
self.assertEquals(data['id'], self.sp_id)
self.assertEquals(data['notification_plans'], [])
self.assertEquals(data['entities'], [])
self.assertEquals(data['checks'], [])
self.assertEquals(data['alarms'], [])
def test_get_all_suppressions(self):
"""
Get all the suppressions
"""
req = request(self, self.root, b"GET", self.uri + '/suppressions')
resp = self.successResultOf(req)
self.assertEquals(resp.code, 200)
data = self.get_responsebody(resp)
self.assertEquals(1, data['metadata']['count'])
self.assertEquals(self.sp_id, data['values'][0]['id'])
def test_update_suppression(self):
"""
Update an suppression
"""
postdata = {'id': self.sp_id, 'label': 'changed'}
req = request(self, self.root, b"PUT", self.uri + '/suppressions/' + self.sp_id,
json.dumps(postdata).encode("utf-8"))
resp = self.successResultOf(req)
self.assertEquals(resp.code, 204)
req = request(self, self.root, b"GET", self.uri + '/suppressions/' + self.sp_id)
resp = self.successResultOf(req)
self.assertEquals(resp.code, 200)
data = self.get_responsebody(resp)
self.assertEquals('changed', data['label'])
def test_update_missing_suppression(self):
"""
Attempting to update a non-existing suppression causes a 404.
"""
(resp, data) = self.successResultOf(
json_request(self, self.root, b"PUT",
'{0}/suppressions/spDoesNotExist'.format(self.uri),
json.dumps({'label': 'my-suppression'}).encode("utf-8")))
self.assertEquals(resp.code, 404)
self.assertEquals(data['message'], 'Object does not exist')
def test_delete_suppression(self):
"""
Delete an suppression
"""
req = request(self, self.root, b"DELETE", self.uri + '/suppressions/' + self.sp_id)
resp = self.successResultOf(req)
self.assertEquals(resp.code, 204)
req = request(self, self.root, b"GET", self.uri + '/suppressions')
resp = self.successResultOf(req)
self.assertEquals(resp.code, 200)
data = self.get_responsebody(resp)
mysp = None
for sp in data['values']:
if sp['id'] == self.sp_id:
mysp = sp
break
self.assertEquals(None, mysp)
def test_delete_nonexistent_suppression_404s(self):
"""
Deleting a suppression that does not exist causes a 404.
"""
(resp, data) = self.successResultOf(
json_request(self, self.root, b"DELETE", '{0}/suppressions/spWhut'.format(self.uri)))
self.assertEquals(resp.code, 404)
self.assertEquals(data['details'], 'Object "Suppression" with key "spWhut" does not exist')
def test_list_monitoring_zones(self):
"""
List the monitoring zones
"""
req = request(self, self.root, b"GET", self.uri + '/monitoring_zones')
resp = self.successResultOf(req)
self.assertEquals(resp.code, 200)
data = self.get_responsebody(resp)
mz = data['values'][0]
self.assertEquals('mzdfw', mz['id'])
def test_list_alarm_examples(self):
"""
List the alarm examples
"""
req = request(self, self.root, b"GET", self.uri + '/alarm_examples')
resp = self.successResultOf(req)
self.assertEquals(resp.code, 200)
data = self.get_responsebody(resp)
ax = data['values'][0]
self.assertEquals('remote.http_body_match_1', ax['id'])
def test_alarm_count_per_np(self):
"""
test_alarm_count_per_np
"""
req = request(self, self.root, b"GET", self.uri + '/views/alarmCountsPerNp')
resp = self.successResultOf(req)
self.assertEquals(resp.code, 200)
data = self.get_responsebody(resp)
self.assertEquals(data['values'][0]['alarm_count'], 1)
self.assertEquals(data['values'][0]['notification_plan_id'], 'npTechnicalContactsEmail')
def test_alarms_by_np(self):
"""
test_alarms_by_np
"""
req = request(self, self.root, b"GET", self.uri + '/views/overview')
resp = self.successResultOf(req)
self.assertEquals(resp.code, 200)
alarm = self.get_responsebody(resp)['values'][0]['alarms'][0]
alarm['notification_plan_id'] = self.np_id
req = request(self, self.root, b"PUT",
self.uri + '/entities/' + self.entity_id + '/alarms/' + self.alarm_id,
json.dumps(alarm).encode("utf-8"))
resp = self.successResultOf(req)
self.assertEquals(resp.code, 204)
req = request(self, self.root, b"GET", self.uri + '/views/alarmsByNp/' + self.np_id)
resp = self.successResultOf(req)
self.assertEquals(resp.code, 200)
data = self.get_responsebody(resp)
self.assertEquals(data['values'][0]['id'], self.alarm_id)
def test_delete_np_in_use(self):
"""
Cant delete a notificationPlan that's being pointed to by alarms
"""
req = request(self, self.root, b"GET", self.uri + '/views/overview')
resp = self.successResultOf(req)
self.assertEquals(resp.code, 200)
alarm = self.get_responsebody(resp)['values'][0]['alarms'][0]
alarm['notification_plan_id'] = self.np_id
req = request(self, self.root, b"PUT",
self.uri + '/entities/' + self.entity_id + '/alarms/' + self.alarm_id,
json.dumps(alarm).encode("utf-8"))
resp = self.successResultOf(req)
self.assertEquals(resp.code, 204)
req = request(self, self.root, b"DELETE", self.uri + '/notification_plans/' + self.np_id)
resp = self.successResultOf(req)
self.assertEquals(resp.code, 403)
data = self.get_responsebody(resp)
self.assertTrue(self.alarm_id in data['message'])
self.assertTrue(self.alarm_id in data['details'])
def test_reset_session(self):
"""
Reset session, remove all objects
"""
req = request(self, self.root, b"GET", self.uri + '/entities')
resp = self.successResultOf(req)
self.assertEquals(resp.code, 200)
data = self.get_responsebody(resp)
self.assertEquals(data['metadata']['count'], 1)
req = request(self, self.root, b"GET", self.uri + '/mimic/reset')
resp = self.successResultOf(req)
self.assertEquals(resp.code, 200)
req = request(self, self.root, b"GET", self.uri + '/entities')
resp = self.successResultOf(req)
self.assertEquals(resp.code, 200)
data = self.get_responsebody(resp)
self.assertEquals(data['metadata']['count'], 0)
def test_unicode_label(self):
"""
Create an entity with weird letters in the name.
"""
req = request(self, self.root, b"POST", self.uri + '/entities',
json.dumps({'label': u'\u0CA0_\u0CA0'}).encode("utf-8"))
resp = self.successResultOf(req)
self.assertEquals(resp.code, 201)
def test_overview_pagination(self):
"""
The overview call returns paginated results.
"""
self.createEntity('entity-2')
req = request(self, self.root, b"GET", self.uri + '/views/overview?limit=1')
resp = self.successResultOf(req)
self.assertEquals(resp.code, 200)
data = self.get_responsebody(resp)
self.assertEquals(data['values'][0]['entity']['label'], 'ItsAnEntity')
req = request(self, self.root, b"GET", self.uri +
'/views/overview?marker=' + data['metadata']['next_marker'])
resp = self.successResultOf(req)
self.assertEquals(resp.code, 200)
data = self.get_responsebody(resp)
self.assertEquals(data['values'][0]['entity']['label'], 'entity-2')
def test_overview_pagination_marker_not_found(self):
"""
If the pagination marker is not present in the entities list,
the paginated overview call returns results from the beginning.
"""
req = request(self, self.root, b"GET", self.uri + '/views/overview?marker=enDoesNotExist')
resp | |
#!/usr/bin/env python
import sys
import apsw
import shlex
import os
import csv
import re
import textwrap
import time
import codecs
import base64
if sys.platform=="win32":
_win_colour=False
try:
import colorama
colorama.init()
del colorama
_win_colour=True
except: # there are several failure reasons, ignore them all
pass
class Shell(object):
"""Implements a SQLite shell
:param stdin: Where to read input from (default sys.stdin)
:param stdout: Where to send output (default sys.stdout)
:param stderr: Where to send errors (default sys.stderr)
:param encoding: Default encoding for files opened/created by the
Shell. If you want stdin/out/err to use a particular encoding
then you need to provide them `already configured <http://docs.python.org/library/codecs.html#codecs.open>`__ that way.
:param args: This should be program arguments only (ie if
passing in sys.argv do not include sys.argv[0] which is the
program name. You can also pass in None and then call
:meth:`process_args` if you want to catch any errors
in handling the arguments yourself.
:param db: A existing :class:`Connection` you wish to use
The commands and behaviour are modelled after the `interactive
shell <https://sqlite.org/sqlite.html>`__ that is part of
SQLite.
You can inherit from this class to embed in your own code and user
interface. Internally everything is handled as unicode.
Conversions only happen at the point of input or output which you
can override in your own code.
This implementation fixes a number of bugs/quirks present in the
sqlite shell. Its control-C handling is also friendlier. Some
examples of issues not present in this implementation:
* https://sqlite.org/src/info/c25aab7e7e
* https://sqlite.org/src/info/7b61b6c6ce
* https://sqlite.org/src/info/ee19e690ec
* https://sqlite.org/src/info/2466653295
Errors and diagnostics are only ever sent to error output
(self.stderr) and never to the regular output (self.stdout). This
means using shell output is always easy and consistent.
Shell commands begin with a dot (eg .help). They are implemented
as a method named after the command (eg command_help). The method
is passed one parameter which is the list of arguments to the
command.
Output modes are implemented by functions named after the mode (eg
output_column).
When you request help the help information is automatically
generated from the docstrings for the command and output
functions.
You should not use a Shell object concurrently from multiple
threads. It is one huge set of state information which would
become inconsistent if used simultaneously, and then give baffling
errors. It is safe to call methods one at a time from different
threads. ie it doesn't care what thread calls methods as long as
you don't call more than one concurrently.
"""
class Error(Exception):
"""Class raised on errors. The expectation is that the error
will be displayed by the shell as text so there are no
specific subclasses as the distinctions between different
types of errors doesn't matter."""
pass
def __init__(self, stdin=None, stdout=None, stderr=None, encoding="utf8", args=None, db=None):
"""Create instance, set defaults and do argument processing."""
super(Shell, self).__init__()
# The parameter doc has to be in main class doc as sphinx
# ignores any described here
self.exceptions=False
self.history_file="~/.sqlite_history"
self._db=None
self.dbfilename=None
if db:
self.db=db, db.filename
else:
self.db=None, None
self.prompt= "sqlite> "
self.moreprompt=" ..> "
self.separator="|"
self.bail=False
self.echo=False
self.timer=False
self.header=False
self.nullvalue=""
self.output=self.output_list
self._output_table=self._fmt_sql_identifier("table")
self.widths=[]
# do we truncate output in list mode? (explain doesn't, regular does)
self.truncate=True
# a stack of previous outputs. turning on explain saves previous, off restores
self._output_stack=[]
# other stuff
self.set_encoding(encoding)
if stdin is None: stdin=sys.stdin
if stdout is None: stdout=sys.stdout
if stderr is None: stderr=sys.stderr
self.stdin=stdin
self.stdout=stdout
self._original_stdout=stdout
self.stderr=stderr
# we don't become interactive until the command line args are
# successfully parsed and acted upon
self.interactive=None
# current colouring object
self.command_colour() # set to default
self._using_readline=False
self._input_stack=[]
self.input_line_number=0
self.push_input()
self.push_output()
self._input_descriptions=[]
if args:
try:
self.process_args(args)
except:
if len(self._input_descriptions):
self._input_descriptions.append("Processing command line arguments")
self.handle_exception()
raise
if self.interactive is None:
self.interactive=getattr(self.stdin, "isatty", False) and self.stdin.isatty() and getattr(self.stdout, "isatty", False) and self.stdout.isatty()
def _ensure_db(self):
"The database isn't opened until first use. This function ensures it is now open."
if not self._db:
if not self.dbfilename:
self.dbfilename=":memory:"
self._db=apsw.Connection(self.dbfilename, flags=apsw.SQLITE_OPEN_URI | apsw.SQLITE_OPEN_READWRITE | apsw.SQLITE_OPEN_CREATE)
return self._db
def _set_db(self, newv):
"Sets the open database (or None) and filename"
(db, dbfilename)=newv
if self._db:
self._db.close(True)
self._db=None
self._db=db
self.dbfilename=dbfilename
db=property(_ensure_db, _set_db, None, "The current :class:`Connection`")
def process_args(self, args):
"""Process command line options specified in args. It is safe to
call this multiple times. We try to be compatible with SQLite shell
argument parsing.
:param args: A list of string options. Do not include the
program as args[0]
:returns: A tuple of (databasefilename, initfiles,
sqlncommands). This is provided for informational purposes
only - they have already been acted upon. An example use
is that the SQLite shell does not enter the main interactive
loop if any sql/commands were provided.
The first non-option is the database file name. Each
remaining non-option is treated as a complete input (ie it
isn't joined with others looking for a trailing semi-colon).
The SQLite shell uses single dash in front of options. We
allow both single and double dashes. When an unrecognized
argument is encountered then
:meth:`process_unknown_args` is called.
"""
# we don't use optparse as we need to use single dashes for
# options - all hand parsed
if not args:
return None, [], []
# are options still valid?
options=True
# have we seen the database name?
havedbname=False
# List of init files to read
inits=[]
# List of sql/dot commands
sqls=[]
while args:
if not options or not args[0].startswith("-"):
options=False
if not havedbname:
# grab new database
self.db=None, args[0]
havedbname=True
else:
sqls.append(args[0])
args=args[1:]
continue
# remove initial single or double dash
args[0]=args[0][1:]
if args[0].startswith("-"):
args[0]=args[0][1:]
if args[0]=="init":
if len(args)<2:
raise self.Error("You need to specify a filename after -init")
inits.append(args[1])
args=args[2:]
continue
if args[0]=="header" or args[0]=="noheader":
self.header=args[0]=="header"
args=args[1:]
continue
if args[0] in ("echo", "bail", "interactive"):
setattr(self, args[0], True)
args=args[1:]
continue
if args[0]=="batch":
self.interactive=False
args=args[1:]
continue
if args[0] in ("separator", "nullvalue", "encoding"):
if len(args)<2:
raise self.Error("You need to specify a value after -"+args[0])
getattr(self, "command_"+args[0])([args[1]])
args=args[2:]
continue
if args[0]=="version":
self.write(self.stdout, apsw.sqlitelibversion()+"\n")
# A pretty gnarly thing to do
sys.exit(0)
if args[0]=="help":
self.write(self.stderr, self.usage())
sys.exit(0)
if args[0] in ("no-colour", "no-color", "nocolour", "nocolor"):
self.colour_scheme="off"
self._out_colour()
args=args[1:]
continue
# only remaining known args are output modes
if getattr(self, "output_"+args[0], None):
self.command_mode(args[:1])
args=args[1:]
continue
newargs=self.process_unknown_args(args)
if newargs is None:
raise self.Error("Unrecognized argument '"+args[0]+"'")
args=newargs
for f in inits:
self.command_read([f])
for s in sqls:
self.process_complete_line(s)
return self.dbfilename, inits, sqls
def process_unknown_args(self, args):
"""This is called when :meth:`process_args` encounters an
argument it doesn't understand. Override this method if you
want to be able to understand additional command line arguments.
:param args: A list of the remaining arguments. The initial one will
have had the leading dashes removed (eg if it was --foo on the command
line then args[0] will be "foo"
:returns: None if you don't recognize the argument either. Otherwise
return the list of remaining arguments after you have processed
yours.
"""
return None
def usage(self):
"Returns the usage message. Make sure it is newline terminated"
msg="""
Usage: program [OPTIONS] FILENAME [SQL|CMD] [SQL|CMD]...
FILENAME is the name of a SQLite database. A new database is
created if the file does not exist.
OPTIONS include:
-init filename read/process named file
-echo print commands before execution
-[no]header turn headers on or off
-bail stop after hitting an error
-interactive force interactive I/O
-batch force batch I/O
-column set output mode to 'column'
-csv set output mode to 'csv'
-html set output mode to 'html'
-line set output mode to 'line'
-list set output mode to 'list'
-python set output mode to 'python'
-separator 'x' set output field separator (|)
-nullvalue 'text' set text string for NULL values
-version show SQLite version
-encoding 'name' the encoding to use for files
opened via .import, .read & .output
-nocolour disables colour output to screen
"""
return msg.lstrip()
###
### Value formatting routines. They take a value and return a
### text formatting of them. Mostly used by the various output's
### but also by random other pieces of code.
###
_binary_type = eval(("buffer", "bytes") [sys.version_info>=(3,0)])
_basestring = | |
# -*- coding:utf-8 -*-
"""
=============================================================================
Provide Material Studio markup file class which do operations on these files.
=============================================================================
Written by PytLab <<EMAIL>>, August 2015
Updated by PytLab <<EMAIL>>, Novermber 2016
==============================================================
"""
from os import getcwd
import logging
import xml.etree.cElementTree as ET
import numpy as np
from vaspy import VasPy, LazyProperty
from vaspy.atomco import AtomCo
from vaspy.errors import UnmatchedDataShape
from vaspy.functions import str2list
class XsdFile(AtomCo):
def __init__(self, filename):
"""
Create a Material Studio *.xsd file class.
Example:
>>> a = XsdFile(filename='ts.xsd')
Class attributes descriptions
=======================================================================
Attribute Description
============ =======================================================
filename string, name of the file the direct coordiante data
stored in
natom int, the number of total atom number
atom_types list of strings, atom types
atom_numbers list of int, atom number of atoms in atoms
atom_names list of string,
Value of attribute 'Name' in Atom3d tag.
tf np.array, T & F info for atoms, dtype=np.string
data np.array, coordinates of atoms, dtype=float64
bases np.array, basis vectors of space, dtype=np.float64
============ =======================================================
"""
super(XsdFile, self).__init__(filename)
# Set logger.
self.__logger = logging.getLogger("vaspy.XsdFile")
# Load data in xsd.
self.load()
def load(self):
# get element tree
tree = ET.ElementTree(file=self.filename)
self.tree = tree
# MS version info
root = self.root = tree.getroot()
ms_version = root.get('Version')
if ms_version:
self.ms_version = ms_version
# add WrittenBy attr
if 'WrittenBy' in root.attrib:
root.set('WrittenBy', 'VASPy')
else:
root.attrib.setdefault('WrittenBy', 'VASPy')
# atom info
self.get_atom_info()
# lattice parameters
self.bases = self.get_bases()
# info in Name property.
self.get_name_info()
return
def get_bases(self):
"get bases from SpaceGroup element"
# lattice parameters
bases = []
for elem in self.tree.iter():
if elem.tag == 'SpaceGroup':
for attr in ['AVector', 'BVector', 'CVector']:
basis = elem.attrib[attr] # string
basis = [float(i.strip()) for i in basis.split(',')]
bases.append(basis)
break
bases = np.array(bases)
#set base constant as 1.0
self.bases_const = 1.0
return bases
def __get_identity_mappings(self):
"""
Private helper function to get IdentityMapping tag.
获取IdentityMapping标签对象.
"""
# Find IdentityMapping tag using Xpath.
identity_mappings = self.tree.findall('.//IdentityMapping')
if not identity_mappings:
msg = 'No IdentityMapping tag found.'
self.__logger.warning(msg)
return
#raise ValueError(msg)
return identity_mappings
def get_atom_info(self):
"获取和原子相关的信息, 直接进行属性赋值"
# atom info
atomco_dict = {}
natoms_dict = {}
atom_types = []
tf = []
tf_dict = {}
atom_names = []
atom_components = []
atom_name_dict = {}
identity_mappings = self.__get_identity_mappings()
if identity_mappings is None:
atom3d_iter = self.root.findall('.//Atom3d')
else:
atom3d_iter = identity_mappings[0].iter('Atom3d')
# For each Atom3d tag
for elem in atom3d_iter:
# Atom name and number
atom = elem.attrib['Components']
atom_components.append(atom)
if atom not in natoms_dict:
natoms_dict.setdefault(atom, 1)
atom_types.append(atom)
else:
natoms_dict[atom] += 1
# Coordinates
# NOTE: In bulk the origin point may not have coordinate,
# so use '0.0,0.0,0.0' as default value.
if 'XYZ' not in elem.attrib:
xyz = '0.0,0.0,0.0'
msg = ("Found an Atom3d tag without 'XYZ' attribute" +
", set to {}").format(xyz)
self.__logger.info(msg)
else:
xyz = elem.attrib['XYZ']
coordinate = [float(i.strip()) for i in xyz.split(',')]
if atom not in atomco_dict:
atomco_dict.setdefault(atom, [coordinate])
else:
atomco_dict[atom].append(coordinate)
# T&F info
if 'RestrictedProperties' in elem.attrib:
tf_info = ['F', 'F', 'F']
else:
tf_info = ['T', 'T', 'T']
if atom not in tf_dict:
tf_dict.setdefault(atom, [tf_info])
else:
tf_dict[atom].append(tf_info)
# atom name
atom_name = elem.attrib.get('Name')
# Damn, sometimes tag has no Name attr,
# so customize it
# 有可能个别原子是从其他文件复制过来的原因
if not atom_name:
atom_name = atom + '_custom'
if atom not in atom_name_dict:
atom_name_dict.setdefault(atom, [atom_name])
else:
atom_name_dict[atom].append(atom_name)
atom_numbers = [natoms_dict[atm] for atm in atom_types]
coordinates = []
for atom in atom_types: # sorted by atoms
# combine all coordinates
coordinates += atomco_dict[atom]
# combine all tf info
tf += tf_dict[atom]
# combine all atom_names
atom_names += atom_name_dict[atom]
# set class attrs
self.natom = len(atom_names)
self.atom_numbers = atom_numbers
self.atom_types = atom_types
self.tf = np.array(tf)
self.atom_names = atom_names
self.atom_components = atom_components
self.atom_names_dict = atom_name_dict
self.data = np.array(coordinates)
def get_name_info(self):
"""
获取文件中能量,力等数据.
"""
# Get info string.
info = None
for elem in self.tree.iter("SymmetrySystem"):
info = elem.attrib.get('Name')
break
if info is None:
return
# Get thermo data.
fieldnames = ["energy", "force", "magnetism", "path"]
try:
for key, value in zip(fieldnames, info.split()):
if key != "path":
data = float(value.split(':')[-1].strip())
else:
data = value.split(":")[-1].strip()
setattr(self, key, data)
except:
# Set default values.
self.force, self.energy, self.magnetism = 0.0, 0.0, 0.0
msg = "No data info in Name property '{}'".format(info)
self.__logger.warning(msg)
finally:
self.path = getcwd()
def update(self):
"""
根据最新数据获取更新ElementTree内容
"""
if self.natom != len(self.data):
raise UnmatchedDataShape(
'length of data is not equal to atom number.')
elif self.natom != len(self.tf):
raise UnmatchedDataShape(
'length of tf is not equal to atom number.')
elif self.natom != len(self.atom_names):
raise UnmatchedDataShape(
'length of atom names is not equal to atom number.')
# atoms info
self.update_atoms()
# space group
self.update_bases()
# Thermodynamic info.
self.update_name()
return
def update_atoms(self):
"""
更新ElementTree原子相关的值
update attribute values about atoms in element tree.
"""
# Find <IdentityMapping> tag.
identity_mappings = self.__get_identity_mappings()
# Loop over all atom type.
for atom in self.atom_types:
# Index for atom with same type.
idx = 0
# Loop over all IdentityMapping tags.
for identity_mapping in identity_mappings:
# Loop over all Atom3d tags in IdentityMapping.
for elem in identity_mapping.iter('Atom3d'):
if elem.attrib['Components'] != atom:
continue
# XYZ value
xyz = self.atomco_dict[atom][idx] # list of float
xyz = ','.join([str(v) for v in xyz])
if 'XYZ' not in elem.attrib:
msg = ("Found an Atom3d tag without 'XYZ' attribute" +
", set to {}").format(xyz)
self.__logger.info(msg)
elem.attrib['XYZ'] = xyz
# TF value
tf = self.tf_dict[atom][idx]
tf = ','.join(tf)
if tf == 'F,F,F':
if 'RestrictedProperties' not in elem.attrib:
elem.attrib.setdefault('RestrictedProperties',
'FractionalXYZ')
elif tf == 'T,T,T':
if 'RestrictedProperties' in elem.attrib:
elem.attrib.pop('RestrictedProperties')
# Atom name.
elem.set('Name', self.atom_names_dict[atom][idx])
idx += 1
def update_name(self):
"""
更新ElementTree中能量,力,作业路径等信息。
"""
value = ""
for key, attr in zip(['E', 'F', 'M'], ["energy", "force", "magnetism"]):
data = getattr(self, attr, 0.0)
value += "{}:{} ".format(key, data)
value = value.strip()
# Get current path.
path = getcwd()
value = "{} {}:{}".format(value, "P", path)
for elem in self.tree.iter("SymmetrySystem"):
elem.set("Name", value)
break
def update_bases(self):
"update bases value in ElementTree"
bases = self.bases.tolist()
bases_str = []
# float -> string
for basis in bases:
xyz = ','.join([str(v) for v in basis]) # vector string
bases_str.append(xyz)
for elem in self.tree.iter('SpaceGroup'):
elem.set('AVector', bases_str[0])
elem.set('BVector', bases_str[1])
elem.set('CVector', bases_str[2])
break
def modify_color(self, atom_number, color=(255, 117, 51)):
'''
Modify color of atom.
Parameters
----------
atom_number: int, number of atom(start from 1)
color: tuple of int, RGB value of color
Example
-------
>>> a.modify_color(99, color=(255, 255, 255))
'''
# get atom type and number of this type
# [48, 48, 30, 14] -> [48, 96, 126, 140]
atoms_num_sum = [sum(self.atom_numbers[: i+1])
for i in range(len(self.atom_types))]
for idx, n in enumerate(atoms_num_sum):
if atom_number <= n:
atom_idx = idx
break
atom_type = self.atom_types[atom_idx]
type_atom_number = atom_number - atoms_num_sum[atom_idx-1] # start from 1
# go through tags to modify atom color
color_attr = '%d,%d,%d, 255' % color
i = 0 # atom number counter
for elem in self.tree.iter('Atom3d'):
if 'XYZ' in elem.attrib and elem.attrib['Components'] == atom_type:
i += 1
# locate tag
if i == type_atom_number:
# modify color attribute
if 'Color' not in elem.attrib:
elem.attrib.setdefault('Color', color_attr)
else:
elem.set('Color', color_attr)
break
def tofile(self, filename='./new.xsd'):
"XsdFile object to .xsd file."
self.update()
self.tree.write(filename)
return
class ArcFile(VasPy):
def __init__(self, filename):
"""
Create a Material Studio *.arc file class.
Example:
>>> a = ArcFile("00-05.arc")
Class attributes descriptions
================================================================
Attribute Description
=============== ==============================================
filename string, name of arc file.
coords_iterator generator, yield Cartisan coordinates in
numpy array.
lengths list of float, lengths of lattice axes.
angles list of float, angles of lattice axes.
================ ==============================================
"""
super(ArcFile, self).__init__(filename)
# Set logger.
self.__logger = logging.getLogger("vaspy.ArcFile")
@property
def coords_iterator(self):
"""
Return generator for Cartisan coordinates in arc file iteration.
返回每个轨迹的所有原子的笛卡尔坐标
"""
with open(self.filename, "r") as f:
collecting = False
coords = []
for line in f:
line = line.strip()
if not collecting and line.startswith("PBC "): # NOTE: Use "PBC " to tell "PBC=" apart
collecting = True
elif collecting and line.startswith("end"):
collecting = False
yield np.array(coords)
coords = []
# Collect coordinates data.
elif collecting:
line_list = str2list(line)
coord = [float(c) for c in line_list[1: 4]]
coords.append(coord)
@LazyProperty
def lengths(self):
"""
Lengths of axes of supercell.
晶格基向量长度。
"""
with open(self.filename, "r") | |
# This program is in the public domain
"""
Join reflectivity datasets with matching intent/cross section.
"""
from __future__ import print_function
from copy import deepcopy
import numpy as np
from dataflow.lib import unit
from .refldata import Intent, ReflData, Environment
from .util import poisson_average, extend
from .resolution import divergence_simple, dTdL2dQ, TiTdL2Qxz
try:
#from typing import List, Dict, Union, Sequence
#Columns = Dict[str, List[np.ndarray]]
#StackedColumns = Dict[str, np.ndarray]
#IndexSet = List[int]
pass
except ImportError:
pass
def sort_files(datasets, key):
# type: (List[ReflData], str) -> List[ReflData]
"""
Order files by key.
key can be one of: file, time, theta, or slit
"""
if key == 'file':
keyfn = lambda data: data.name
elif key == 'time':
import datetime
keyfn = lambda data: (data.date + datetime.timedelta(seconds=data.monitor.start_time[0]))
elif key == "theta":
keyfn = lambda data: (data.sample.angle_x[0], data.detector.angle_x[0])
elif key == "slit":
keyfn = lambda data: (data.slit1.x, data.slit2.x)
elif key == "none":
return datasets
else:
raise ValueError("Unknown sort key %r: use file, time, theta or slit"
% key)
datasets = datasets[:]
datasets.sort(key=keyfn)
return datasets
def join_datasets(group, Qtol, dQtol, by_Q=False):
# type: (List[ReflData], float, float, bool) -> ReflData
"""
Create a new dataset which joins the results of all datasets in the group.
This is a multistep operation with the various parts broken into separate
functions.
"""
#print "joining files in",group[0].path,group[0].name,group[0].entry
# Make sure all datasets are normalized by the same factor.
normbase = group[0].normbase
assert all(data.normbase == normbase for data in group), "can't mix time and monitor normalized data"
# Gather the columns
fields = get_fields(group)
env = get_env(group)
fields.update(env)
columns = stack_columns(group, fields)
columns = apply_mask(group, columns)
columns = set_QdQ(columns)
# Group points together, either by target angles, by actual angles or by Q
# TODO: maybe include target Q
# TODO: check background subtraction based on trajectoryData._q
# ----- it can't be right since Qz_target is not properly propagated
# ----- through the join...
if Qtol == 0. and dQtol == 0.:
targets = get_target_values(group)
targets = stack_columns(group, targets)
targets = apply_mask(group, targets)
groups = group_by_target_angles(targets)
elif by_Q:
groups = group_by_Q(columns, Qtol=Qtol, dQtol=dQtol)
else:
groups = group_by_actual_angles(columns, Qtol=Qtol, dQtol=dQtol)
# Join the data points in the individual columns
columns = merge_points(groups, columns, normbase)
# Sort so that points are in display order
# Column keys are:
# Qx, Qz, dQ: Q and resolution
# Td: detector theta
# Ti: incident (sample) theta
# dT: angular divergence
# Li: monochromator wavelength
# Ld: detector wavelength
# dL: wavelength dispersion
isslit = Intent.isslit(group[0].intent)
isrock = Intent.isrock(group[0].intent)
if isrock:
# Sort detector rocking curves so that small deviations in sample
# angle don't throw off the order in detector angle.
keys = ('<KEY>')
#keys = ('Td', 'Ti', 'Ld', 'dT', 'dL')
elif isslit:
keys = ('dT', 'Ld', 'dL')
else:
keys = ('<KEY>')
#keys = ('Ti', 'Td', 'Ld', 'dT', 'dL')
columns = sort_columns(columns, keys)
data = build_dataset(group, columns, normbase)
#print "joined",data.intent
return data
def build_dataset(group, columns, norm):
# type: (List[ReflData], StackedColumns, str) -> ReflData
"""
Build a new dataset from a set of columns.
Metadata is set from the first dataset in the group.
If there are any sample environment columns they will be added to
data.sample.environment.
"""
head = group[0]
# Copy details of first file as metadata for the returned dataset.
# Note: using deepcopy since this is going to update subgroup
# data such as data.slit1.x.
data = deepcopy(group[0])
## Could instead do a semi-deep copy using info from the group:
#data = copy(group[0])
#for group_name, _ in data._groups:
# setattr(data, group_name, copy(getattr(data, group_name)))
# Clear the fields that are no longer defined
data.sample.angle_y = None
data.sample.rotation = None
data.detector.angle_y = None
data.detector.rotation = None
for k in range(1, 5):
slit = getattr(data, 'slit%d'%k)
slit.x = slit.y = slit.x_target = slit.y_target = None
# summary data derived from group, or copied from head if commented
#data.instrument
#data.geometry
#data.probe
data.path = None # no longer refers to head file
data.uri = None # TODO: does a reduction have a DOI?
data.points = len(columns['v'])
#data.channels # unused
#data.scale = 1.0 # unused
#data.name
#data.entry
#data.description
data.date = min(d.date for d in group) # set date to earliest date
data.duration = sum(d.duration for d in group) # cumulative duration
data.polarization = (head.polarization
if all(d.polarization == head.polarization for d in group)
else '')
#data.normbase
data.warnings = [] # initialize per-file history
#data.vlabel
#data.vunits
#data.vscale
#data.xlabel
#data.xunits
#data.xscale
data.mask = None # all points are active after join
#data.angular_resolution # averaged
data.Qz_basis = head.Qz_basis
data.scan_value = [] # TODO: may want Td, Ti as alternate scan axes
data.scan_label = []
data.scan_units = []
data.intent = head.intent # preserve intent
#data.x # read-only
#data.dx # read-only
#data.v # averaged
#data.dv # averaged
#data.Qz # read-only # TODO: if join by Q then Qx,Qz,dQ need to be set
#data.Qx # read-only
#data.dQ # read-only
# Fill in rates and monitors.
# Note: we are not tracking monitor variance, so assume it is equal
# to monitor counts (dm^2 = m). This does not account for monitor
# scaling due to dead time correction, etc. In practice it doesn't
# matter since we've already normalized the counts to a count rate
# and we don't need detector counts or variance.
v, dv, m, t = columns['v'], columns['dv'], columns['monitor'], columns['time']
dmsq = m
data.v = v
data.dv = dv
data.monitor.count_time = t
data.monitor.counts = m
data.monitor.counts_variance = dmsq
data.monitor.roi_counts = columns['roi']
data.monitor.roi_variance = columns['roi']
data.monitor.source_power = columns['source_power']
data.monitor.source_power_variance = columns['source_power_variance']
# Assign a value to detector counts. We need this if we norm after join.
if norm == "none":
# v = counts, dv = dcounts
data.detector.counts = v
data.detector.counts_variance = dv**2
elif norm == "time":
# v = counts/time, dv = dcounts/time
data.detector.counts = v * extend(t, v)
data.detector.counts_variance = (dv * extend(t, dv))**2
elif norm == "monitor":
# v = counts/monitor, (dv/v)^2 = (dcounts/counts)^2+(dmonitor/monitor)^2
# => dc^2 = (m dv)^2 - (v dm)^2
data.detector.counts = v * extend(m, v)
data.detector.counts_variance = (extend(m, dv)*dv)**2 - v**2*extend(dmsq,v)
# Fill in the fields we have averaged
data.sample.angle_x = columns['Ti']
data.detector.angle_x = columns['Td']
data.sample.angle_x_target = columns['Ti_target']
data.detector.angle_x_target = columns['Td_target']
data.slit1.x = columns['s1']
data.slit2.x = columns['s2']
data.slit3.x = columns['s3']
data.slit4.x = columns['s4']
# TODO: cleaner handling of candor data
# Angular resolution may be stored separately from dT in the joined set
# if it is multidimensional or dT is set to something else for grouping.
res = 'angular_resolution' if 'angular_resolution' in columns else 'dT'
data.angular_resolution = columns[res]
# Some fields may not have been in the original data
if data.Qz_target is not None:
data.Qz_target = columns['Qz_target']
if data.monochromator.wavelength is not None:
data.monochromator.wavelength = columns['Li']
if data.detector.wavelength is not None:
# On candor data.detector.wavelength has shape [1, 2, 54] since it is
# constant for all measurement points. Since Ld and dL need to be
# scalars for grouping it is inconvenient to maintain the
# full wavelength for each frame, so we instead assume that any time
# the wavelength is multidimensional then it is constant. Further,
# we assume that the constant is included in the group[0] metadata
# we use as the basis of our return value.
if getattr(data.detector.wavelength, 'ndim', 0) < 2:
data.detector.wavelength = columns['Ld']
data.detector.wavelength_resolution = columns['dL']
# Add in any sample environment fields
data.sample.environment = {}
for k, v in head.sample.environment.items():
if k in columns:
env = Environment()
env.units = v.units
env.average = columns[k]
data.sample.enviroment[k] = env
# TODO: could maybe join the environment logs
# ----- this would require setting them all to a common start time
return data
def get_fields(group):
# type: (List[ReflData]) -> Columns
"""
Extract geometry and counts from all files in group into separate fields.
Returns a map of columns: list of vectors, with one vector for each
dataset in the group.
"""
columns = dict(
s1=[data.slit1.x for data in group],
s2=[data.slit2.x for data in group],
s3=[data.slit3.x for data in group],
s4=[data.slit4.x for data in | |
#!/usr/bin/env python
import sys
import PyTango
from upgrade_utils import *
class Controller:
def __init__(self, type, file, kcls, name, id):
self.type = type
self.file = file
self.kcls = kcls
self.name = name
self.id = id
def __str__(self):
return "%s %s %s %s %s" % (self.type, self.file, self.kcls, self.name, self.id)
def get_controller_prop(self):
return [self.type, self.file, self.kcls, self.name, self.id]
class Up02To03(Upgrade):
def get_pool_controllers(self, serv, db=None):
"""Gets the list of Pool controllers from pool version 0.1.x"""
pool_serv_name = "Pool/%s" % serv
pool_dev_name = get_pool_device_from_server(serv)
db = db or get_db()
ctrls = db.get_device_property(
pool_dev_name, ["Controller"])["Controller"]
i = 0
ret = []
while i < len(ctrls):
type = ctrls[i]
file = ctrls[i + 1]
kcls = ctrls[i + 2]
name = ctrls[i + 3]
id = ctrls[i + 4]
i += 5
ret.append(Controller(type, file, kcls, name, id))
return ret
@classmethod
def fromTo(cls):
return ("0.2.x", "0.3.0")
@classmethod
def supports(cls, old_vers, new_vers):
return old_vers.startswith('0.2') and new_vers.startswith('0.3')
@classmethod
def supports_old(cls, old_vers):
return old_vers.startswith('0.2')
@classmethod
def supports_new(cls, new_vers):
return new_vers.startswith('0.3')
def upgrade(self, db, serv, old_vers, new_vers):
if not Up02To03.supports(old_vers, new_vers):
raise Exception("Unsupported upgrade")
yield "Upgrading %s from %s to %s... " % (serv, old_vers, new_vers), 1
id = 1
pending_put_properties = {}
# map used to store <id, name (alias)>
elem_map, reverse_elem_map = {}, {}
pool_serv_name = "Pool/%s" % serv
pool_dev_name = get_pool_device_from_server(serv, db=db)
# 1 - get controllers
# 2 - get elements
elems = db.get_device_class_list(pool_serv_name)
i = 0
elems_dict = {}
while i < len(elems):
dev, kcls = elems[i], elems[i + 1]
if not elems_dict.has_key(kcls):
elems_dict[kcls] = []
elems_dict[kcls].append(dev)
i += 2
# 3.1 - store motors
yield "Storing motor information...", 5
for m_dev_name in elems_dict.get("Motor", []):
props = db.get_device_property(
m_dev_name, ("id", "ctrl_id", "axis"))
id = int(props["id"][0])
m_alias = db.get_alias(m_dev_name).lower()
elem_map[id] = m_alias
reverse_elem_map[m_alias] = id
# 3.2 - store counter timers
yield "Storing counter information...", 10
for expch_dev_name in elems_dict.get("CTExpChannel", []):
props = db.get_device_property(
expch_dev_name, ("id", "ctrl_id", "axis"))
id = int(props["id"][0])
expch_alias = db.get_alias(expch_dev_name).lower()
elem_map[id] = expch_alias
reverse_elem_map[expch_alias] = id
# 3.3 - store 0D
yield "Storing 0D information...", 15
for expch_dev_name in elems_dict.get("ZeroDExpChannel", []):
props = db.get_device_property(
expch_dev_name, ("id", "ctrl_id", "axis"))
id = int(props["id"][0])
expch_alias = db.get_alias(expch_dev_name).lower()
elem_map[id] = expch_alias
reverse_elem_map[expch_alias] = id
# 3.4 - store 1D
yield "Storing 1D information...", 20
for expch_dev_name in elems_dict.get("OneDExpChannel", []):
props = db.get_device_property(
expch_dev_name, ("id", "ctrl_id", "axis"))
id = int(props["id"][0])
expch_alias = db.get_alias(expch_dev_name).lower()
elem_map[id] = expch_alias
reverse_elem_map[expch_alias] = id
# 3.5 - store 2D
yield "Storing 2D information...", 25
for expch_dev_name in elems_dict.get("TwoDExpChannel", []):
props = db.get_device_property(
expch_dev_name, ("id", "ctrl_id", "axis"))
id = int(props["id"][0])
expch_alias = db.get_alias(expch_dev_name).lower()
elem_map[id] = expch_alias
reverse_elem_map[expch_alias] = id
# 3.6 - store communication channels
yield "Storing communication channel information...", 30
for comch_dev_name in elems_dict.get("CommunicationChannel", []):
props = db.get_device_property(
comch_dev_name, ("id", "ctrl_id", "axis"))
id = int(props["id"][0])
comch_alias = db.get_alias(comch_dev_name).lower()
elem_map[id] = comch_alias
reverse_elem_map[comch_alias] = id
# 3.7 - store IO register
yield "Storing ioregister information...", 35
for ior_dev_name in elems_dict.get("IORegister", []):
props = db.get_device_property(
ior_dev_name, ("id", "ctrl_id", "axis"))
id = int(props["id"][0])
ior_alias = db.get_alias(ior_dev_name).lower()
elem_map[id] = ior_alias
reverse_elem_map[ior_alias] = id
# 3.8 - For MotorGroup remove 'motor_group_id' and add 'id'
yield "Storing MotorGroup information...", 40
for mg_dev_name in elems_dict.get("MotorGroup", []):
mg_dev_name = mg_dev_name.lower()
props = ('id', 'motor_list', 'motor_group_list',
'pseudo_motor_list', 'phys_group_elt', 'user_group_elt')
props = db.get_device_property(mg_dev_name, props)
id = int(props["id"][0])
mg_alias = db.get_alias(mg_dev_name).lower()
elem_map[id] = mg_alias
reverse_elem_map[mg_alias] = id
pending_put_properties[mg_dev_name] = {}
new_motor_list = []
skip = True
for name in props['motor_list']:
name = name.lower()
# if all are already IDs, skip the property
try:
int(name)
except Exception, e:
skip = False
new_motor_list.append(reverse_elem_map.get(name, name))
if not skip:
pending_put_properties[mg_dev_name].update(
{'motor_list': new_motor_list})
new_motor_group_list = []
skip = True
for name in props['motor_group_list']:
name = name.lower()
# if all are already IDs, skip the property
try:
int(name)
except Exception, e:
skip = False
new_motor_group_list.append(
reverse_elem_map.get(name, name))
if not skip:
pending_put_properties[mg_dev_name].update(
{'motor_group_list': new_motor_group_list})
new_pseudo_motor_list = []
skip = True
for name in props['pseudo_motor_list']:
name = name.lower()
# if all are already IDs, skip the property
try:
int(name)
except Exception, e:
skip = False
new_pseudo_motor_list.append(
reverse_elem_map.get(name, name))
if not skip:
pending_put_properties[mg_dev_name].update(
{'pseudo_motor_list': new_pseudo_motor_list})
new_phys_group_elt = []
skip = True
for name in props['phys_group_elt']:
name = name.lower()
# if all are already IDs, skip the property
try:
int(name)
except Exception, e:
skip = False
new_phys_group_elt.append(reverse_elem_map.get(name, name))
if not skip:
pending_put_properties[mg_dev_name].update(
{'phys_group_elt': new_phys_group_elt})
new_user_group_elt = []
skip = True
for name in props['user_group_elt']:
name = name.lower()
# if all are already IDs, skip the property
try:
int(name)
except Exception, e:
skip = False
new_user_group_elt.append(reverse_elem_map.get(name, name))
if not skip:
pending_put_properties[mg_dev_name].update(
{'user_group_elt': new_user_group_elt})
# 3.9 - For MeasurementGroup remove 'measurement_group_id' and add 'id'
yield "Storing MeasurementGroup information...", 45
for mg_dev_name in elems_dict.get("MeasurementGroup", []):
mg_dev_name = mg_dev_name.lower()
props = ('id', 'ct_list', 'zerodexpchannel_list', 'onedexpchannel_list',
'twodexpchannel_list', 'pseudocounter_list', 'motor_list',
'phys_group_elt', 'user_group_elt')
props = db.get_device_property(mg_dev_name, props)
id = int(props["id"][0])
mg_alias = db.get_alias(mg_dev_name).lower()
elem_map[id] = mg_alias
reverse_elem_map[mg_alias] = id
pending_put_properties[mg_dev_name] = {}
new_ct_list = []
skip = True
for name in props['ct_list']:
name = name.lower()
try:
int(name)
except Exception, e:
skip = False
new_ct_list.append(reverse_elem_map.get(name, name))
if not skip:
pending_put_properties[mg_dev_name].update(
{'ct_list': new_ct_list})
new_zerodexpchannel_list = []
skip = True
for name in props['zerodexpchannel_list']:
name = name.lower()
try:
int(name)
except Exception, e:
skip = False
new_zerodexpchannel_list.append(
reverse_elem_map.get(name, name))
if not skip:
pending_put_properties[mg_dev_name].update(
{'zerodexpchannel_list': new_zerodexpchannel_list})
new_onedexpchannel_list = []
skip = True
for name in props['onedexpchannel_list']:
name = name.lower()
try:
int(name)
except Exception, e:
skip = False
new_onedexpchannel_list.append(
reverse_elem_map.get(name, name))
if not skip:
pending_put_properties[mg_dev_name].update(
{'onedexpchannel_list': new_onedexpchannel_list})
new_twodexpchannel_list = []
skip = True
for name in props['twodexpchannel_list']:
name = name.lower()
try:
int(name)
except Exception, e:
skip = False
new_twodexpchannel_list.append(
reverse_elem_map.get(name, name))
if not skip:
pending_put_properties[mg_dev_name].update(
{'twodexpchannel_list': new_twodexpchannel_list})
new_pseudocounter_list = []
skip = True
for name in props['pseudocounter_list']:
name = name.lower()
try:
int(name)
except Exception, e:
skip = False
new_pseudocounter_list.append(
reverse_elem_map.get(name, name))
if not skip:
pending_put_properties[mg_dev_name].update(
{'pseudocounter_list': new_pseudocounter_list})
new_motor_list = []
skip = True
for name in props['motor_list']:
name = name.lower()
try:
int(name)
except Exception, e:
skip = False
new_motor_list.append(reverse_elem_map.get(name, name))
if not skip:
pending_put_properties[mg_dev_name].update(
{'motor_list': new_motor_list})
new_phys_group_elt = []
skip = True
for name in props['phys_group_elt']:
name = name.lower()
try:
int(name)
except Exception, e:
skip = False
new_phys_group_elt.append(reverse_elem_map.get(name, name))
if not skip:
pending_put_properties[mg_dev_name].update(
{'phys_group_elt': new_phys_group_elt})
new_user_group_elt = []
skip = True
for name in props['user_group_elt']:
name = name.lower()
try:
int(name)
except Exception, e:
skip = False
new_user_group_elt.append(reverse_elem_map.get(name, name))
if not skip:
pending_put_properties[mg_dev_name].update(
{'user_group_elt': new_user_group_elt})
# 3.10 - For PseudoMotor replace motor_list from a list of names to a
# list of IDs
yield "Storing PseudoMotor information...", 50
for pm_dev_name in elems_dict.get("PseudoMotor", []):
pm_dev_name = pm_dev_name.lower()
props = db.get_device_property(pm_dev_name, ("id", "ctrl_id", "axis",
"motor_group", "motor_group_id", "motor_list"))
id = int(props["id"][0])
pm_alias = db.get_alias(pm_dev_name).lower()
elem_map[id] = pm_alias
reverse_elem_map[pm_alias] = id
motor_group = props["motor_group"]
if not motor_group:
motor_group_id = motor_group = props["motor_group_id"]
if not motor_group_id:
print "WARNING: neither motor_group nor motor_group_id property is defined for %s." % pm_dev_name
else:
motor_group = motor_group[0].lower()
motor_group_id = reverse_elem_map[motor_group]
props["motor_group_id"] = [str(motor_group_id)]
db.put_device_property(pm_dev_name, props)
db.delete_device_property(
pm_dev_name, ["pseudo_motor_id", "motor_group", "role", "role_idx"])
skip = True
motor_ids = []
for name in props["motor_list"]:
name = name.lower()
try:
int(name)
except Exception, e:
skip = False
motor_ids.append(reverse_elem_map[name])
if not skip:
db.put_device_property(pm_dev_name, {"motor_list": motor_ids})
# 3.11 - For PseudoCounter replace channel_list from a list of names to
# a list of IDs
yield "Storing PseudoCounter information...", 60
for pc_dev_name in elems_dict.get("PseudoCounter", []):
pc_dev_name = pc_dev_name.lower()
props = db.get_device_property(pc_dev_name, ("id", "ctrl_id", "axis",
"channel_list"))
id = int(props["id"][0])
pc_alias = db.get_alias(pc_dev_name).lower()
elem_map[id] = pc_alias
reverse_elem_map[pc_alias] = id
db.delete_device_property(
pc_dev_name, ["pseudo_counter_id", "role", "role_idx"])
skip = True
channel_ids = []
for name in props["channel_list"]:
name = name.lower()
try:
int(name)
except Exception, e:
skip = False
channel_ids.append(reverse_elem_map[name])
if not skip:
db.put_device_property(
pc_dev_name, {"channel_list": channel_ids})
# 4. - Apply pending properties
yield "Applying pending properties...", 75
for dev, props in pending_put_properties.iteritems():
for name, val in props.iteritems():
for i, elem in enumerate(val):
if isinstance(elem, str):
val[i] = reverse_elem_map[elem]
db.put_device_property(dev, props)
# 5. - Finally update the version property in the database
yield "Updating the version | |
<filename>tutorial_7_a_star_solved.py<gh_stars>0
# ------------------------------------------------------------------------------------------------
# Copyright (c) 2016 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ------------------------------------------------------------------------------------------------
# A* Search Solution for Tutorial sample #7: The Maze Decorator
# Note: I wrote this code quickly, and did a simple pass to make sure it's somewhat readable.
# Probably not as efficient or optimized as it could be (almost sure of it).
# I bet there are multiple places it could be improved, feel free to do whatever you'd like with it.
import MalmoPython
import os
import sys
import time
import json
import copy
from MIDCA import goals
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # flush print output immediately
def GetMissionXML( seed, gp ):
return '''<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<Mission xmlns="http://ProjectMalmo.microsoft.com" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<About>
<Summary>Hello world!</Summary>
</About>
<ServerSection>
<ServerInitialConditions>
<Time>
<StartTime>1000</StartTime>
<AllowPassageOfTime>false</AllowPassageOfTime>
</Time>
<Weather>clear</Weather>
</ServerInitialConditions>
<ServerHandlers>
<FlatWorldGenerator generatorString="3;7,44*49,73,35:1,159:4,95:13,35:13,159:11,95:10,159:14,159:6,35:6,95:6;12;"/>
<DrawingDecorator>
<DrawSphere x="-27" y="70" z="0" radius="30" type="air"/>
</DrawingDecorator>
<MazeDecorator>
<Seed>'''+str(seed)+'''</Seed>
<SizeAndPosition width="10" length="10" height="10" xOrigin="-32" yOrigin="69" zOrigin="-5"/>
<StartBlock type="emerald_block" fixedToEdge="true"/>
<EndBlock type="redstone_block" fixedToEdge="true"/>
<PathBlock type="diamond_block"/>
<FloorBlock type="air"/>
<GapBlock type="air"/>
<GapProbability>'''+str(gp)+'''</GapProbability>
<AllowDiagonalMovement>false</AllowDiagonalMovement>
</MazeDecorator>
<ServerQuitFromTimeUp timeLimitMs="30000"/>
<ServerQuitWhenAnyAgentFinishes/>
</ServerHandlers>
</ServerSection>
<AgentSection mode="Survival">
<Name>MalmoTutorialBot</Name>
<AgentStart>
<Placement x="0.5" y="56.0" z="0.5"/>
</AgentStart>
<AgentHandlers>
<DiscreteMovementCommands/>
<AgentQuitFromTouchingBlockType>
<Block type="redstone_block"/>
</AgentQuitFromTouchingBlockType>
<ObservationFromGrid>
<Grid name="front20x10">
<min x="-10" y="-1" z="0"/>
<max x="10" y="-1" z="10"/>
</Grid>
</ObservationFromGrid>
</AgentHandlers>
</AgentSection>
</Mission>'''
class Node():
'''
a node that will be used in A* search
'''
agent_loc = None # x, y coordinate
state = None # all_tiles in a grid
parent_node = []
actions_taken = [] # actions taken to reach this node
depth = 0
def __init__(self, agent_loc, state, parent_node, actions_taken):
self.agent_loc = agent_loc
self.state = state
self.parent_node = parent_node
if parent_node:
self.depth = parent_node.depth+1
else:
self.depth = 0
self.actions_taken = actions_taken
def __str__(self):
s = "aloc="+str(self.agent_loc)+"state_x_len="+str(len(self.state))+"state_y_len"+str(len(self.state[0]))
return s
def get_child_nodes(node, curr_nodes, already_visited_nodes):
if not node: # sanity check
return []
x = node.agent_loc[0]
y = node.agent_loc[1]
valid_child_nodes = []
# add each action
if x != len(grid[y]):
# make a copy just to make sure we are not modifying some
# node some other place
e_actions = copy.deepcopy(node.actions_taken)
e_actions.append("movewest 1")
east_node = Node((x+1,y),grid,node,e_actions)
valid_child_nodes.append(east_node) #east node
if x != 0:
w_actions = copy.deepcopy(node.actions_taken)
w_actions.append("moveeast 1")
west_node = Node((x-1,y),grid,node,w_actions)
valid_child_nodes.append(west_node) # west node
if y != len(grid):
s_actions = copy.deepcopy(node.actions_taken)
s_actions.append("movesouth 1")
south_node = Node((x,y+1),grid,node,s_actions)
valid_child_nodes.append(south_node) #east node
if y != 0:
n_actions = copy.deepcopy(node.actions_taken)
n_actions.append("movenorth 1")
north_node = Node((x,y-1),grid,node,n_actions)
valid_child_nodes.append(north_node) # west node
# filter out anything that is air
try:
valid_child_nodes = filter(lambda n: n.state[n.agent_loc[1]][n.agent_loc[0]] != u'air', valid_child_nodes)
except:
pass
# filter out anything that is already in curr_nodes (may not be necessary)
curr_node_locs = map(lambda n: n.agent_loc,curr_nodes)
valid_child_nodes = filter(lambda n: n.agent_loc not in curr_node_locs, valid_child_nodes)
# filter out anything that we have already visited (necessary, prevents cycles)
visited_node_locs = map(lambda n: n.agent_loc,already_visited_nodes)
valid_child_nodes = filter(lambda n: n.agent_loc not in visited_node_locs, valid_child_nodes)
return valid_child_nodes
def A_star_search(start_state):
# find the location of the emerald block, record this as agent's position and start
# also find location of redstone block, save as goal loc
agent_loc_x = -1
agent_loc_y = -1
goal_x = -1
goal_y = -1
found_emerald_block = False
found_redstone_block = False
for y in range(len(start_state)):
for x in range(len(start_state[y])):
if start_state[y][x] == u'emerald_block':
agent_loc_x = x
agent_loc_y = y
found_emerald_block = True
elif start_state[y][x] == u'redstone_block':
goal_x = x
goal_y = y
found_redstone_block = True
# just in case our state isn't valid
if not (found_emerald_block and found_redstone_block):
return []
print "agent_loc = " +str(agent_loc_x) + ","+str(agent_loc_y)
print "goal_loc = "+str(goal_x) + ","+str(goal_y)
# safety check, make sure start and goal are not the same
if str(agent_loc_x) + ","+str(agent_loc_y) == str(goal_x) + ","+str(goal_y):
return []
# root node
root_node = Node((agent_loc_x, agent_loc_y),grid,None,[])
def goal_reached(node):
reached = False
try:
reached = grid[node.agent_loc[1]][node.agent_loc[0]] == u'redstone_block'
except:
print "somehow it broked with y="+str(node.agent_loc[1])+", x="+str(node.agent_loc[1])
return reached
def manhattan_dist_to_goal(node):
#print "g(n)="+str((abs(goal_x - node.agent_loc[0])+abs(goal_y-node.agent_loc[1])))+",h(n)="+str(node.depth)
return (abs(goal_x - node.agent_loc[0])+abs(goal_y-node.agent_loc[1])) + node.depth
def depth_and_manhatten(node):
return manhattan_dist_to_goal(node)
# initialize the queue with the first node
curr_nodes = [root_node]
already_visited_nodes = []
while len(curr_nodes) > 0 and not goal_reached(curr_nodes[0]):
# get first node from our queue
curr_node = curr_nodes[0]
curr_nodes = curr_nodes[1:] # take the first node off
# save this node so we don't visit again
already_visited_nodes.append(curr_node)
# get child nodes
child_nodes = get_child_nodes(copy.deepcopy(curr_node),curr_nodes, already_visited_nodes)
# add child nodes to queue
curr_nodes += child_nodes
# sort queue based on manhatten + depth
curr_nodes = sorted(curr_nodes,key=depth_and_manhatten)
#print "already_visited_nodes = "+str(map(lambda n: n.agent_loc, already_visited_nodes))
print "Q = " +str(map(lambda n: "f(n)="+str(depth_and_manhatten(n))+",xy="+str(n.agent_loc), curr_nodes))
#print "queue (depths) = "+str(map(lambda n: n.depth, curr_nodes))
#print "queue size = " +str(len(curr_nodes))
#print "queue distances to goal: "+str(map(depth_and_manhatten,curr_nodes))
# sort nodes based on depth (bfs for now)
#time.sleep(0.1)
# make sure goal was reached
if not goal_reached(curr_nodes[0]):
print "ERROR: search terminated without finding a path"
else:
print "computed path:"
for action in curr_nodes[0].actions_taken:
print " " + str(action)
return curr_nodes[0].actions_taken
def pretty_print_grid_obs(grid):
'''
Displays the state used in A* nodes
'''
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j] == u'air':
print 'a',
elif grid[i][j] == u'diamond_block':
print 'd',
elif grid[i][j] == u'emerald_block':
print 'E',
elif grid[i][j] == u'redstone_block':
print 'R',
else:
print '?',
print ""
# Create default Malmo objects:
time.sleep(5) # helps recording the video
agent_host = MalmoPython.AgentHost()
try:
agent_host.parse( sys.argv )
except RuntimeError as e:
print 'ERROR:',e
print agent_host.getUsage()
exit(1)
if agent_host.receivedArgument("help"):
print agent_host.getUsage()
exit(0)
if agent_host.receivedArgument("test"):
num_repeats = 1
else:
num_repeats = 10
num_repeats = 10
for i in range(num_repeats):
my_mission = MalmoPython.MissionSpec(GetMissionXML("random", float(i/10.0)), True)
my_mission_record = MalmoPython.MissionRecordSpec()
#agent_host.setObservationsPolicy(MalmoPython.ObservationsPolicy.LATEST_OBSERVATION_ONLY)
# Attempt to start a mission:
max_retries = 3
for retry in range(max_retries):
try:
agent_host.startMission( my_mission, my_mission_record )
break
except RuntimeError as e:
if retry == max_retries - 1:
print "Error starting mission:",e
exit(1)
else:
time.sleep(2)
# Loop until mission starts:
print "Waiting for the mission to start ",
world_state = agent_host.getWorldState()
while not world_state.is_mission_running:
sys.stdout.write(".")
time.sleep(0.1)
world_state = agent_host.getWorldState()
for error in world_state.errors:
print "Error:",error.text
print
print "Mission running ",
# Loop until mission ends:
while world_state.is_mission_running:
sys.stdout.write(".")
time.sleep(0.1)
world_state = agent_host.getWorldState()
#print "observations are " + str(world_state.observations)
if world_state.number_of_observations_since_last_state > 0:
#print "Got " + str(world_state.number_of_observations_since_last_state) + " observations since last state."
msg = world_state.observations[-1].text
ob = json.loads(msg)
#print "ob is "+str(ob)
'''
I used a special observation to get 200 blocks in front of the agent,
which I know will contain the maze (and extra air tiles). I think perform
A* over this grid of blocks.
'''
# get the data in front of the agent ONCE
all_tiles = ob.get(u'front20x10',0)
grid = []
for i in range(10):
#print "looking at all_tiles["+str((i*20))+":"+str(((i+1)*20))+"][::1]"
# for some reason it's reverse (might have to do with agent's yaw)
reverse_row = (all_tiles[i*21:((i+1)*21)])[::-1]
#print "len(row) = " + str(len(reverse_row))
grid.append(reverse_row)
# lets see what it looks like
pretty_print_grid_obs(grid)
# run A* and compute plan
plan = A_star_search(grid)
if plan:
for a in plan:
| |
<reponame>wenfeifei/miniblink49
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.layout_tests.models.test_configuration import *
from webkitpy.layout_tests.models.test_expectations import *
try:
from collections import OrderedDict
except ImportError:
# Needed for Python < 2.7
from webkitpy.thirdparty.ordered_dict import OrderedDict
class Base(unittest.TestCase):
# Note that all of these tests are written assuming the configuration
# being tested is Windows XP, Release build.
def __init__(self, testFunc):
host = MockHost()
self._port = host.port_factory.get('test-win-xp', None)
self._exp = None
unittest.TestCase.__init__(self, testFunc)
def get_basic_tests(self):
return ['failures/expected/text.html',
'failures/expected/image_checksum.html',
'failures/expected/crash.html',
'failures/expected/needsrebaseline.html',
'failures/expected/needsmanualrebaseline.html',
'failures/expected/missing_text.html',
'failures/expected/image.html',
'failures/expected/timeout.html',
'passes/text.html']
def get_basic_expectations(self):
return """
Bug(test) failures/expected/text.html [ Failure ]
Bug(test) failures/expected/crash.html [ WontFix ]
Bug(test) failures/expected/needsrebaseline.html [ NeedsRebaseline ]
Bug(test) failures/expected/needsmanualrebaseline.html [ NeedsManualRebaseline ]
Bug(test) failures/expected/missing_image.html [ Rebaseline Missing ]
Bug(test) failures/expected/image_checksum.html [ WontFix ]
Bug(test) failures/expected/image.html [ WontFix Mac ]
"""
def parse_exp(self, expectations, overrides=None, is_lint_mode=False):
expectations_dict = OrderedDict()
expectations_dict['expectations'] = expectations
if overrides:
expectations_dict['overrides'] = overrides
self._port.expectations_dict = lambda: expectations_dict
expectations_to_lint = expectations_dict if is_lint_mode else None
self._exp = TestExpectations(self._port, self.get_basic_tests(), expectations_dict=expectations_to_lint, is_lint_mode=is_lint_mode)
def assert_exp_list(self, test, results):
self.assertEqual(self._exp.get_expectations(test), set(results))
def assert_exp(self, test, result):
self.assert_exp_list(test, [result])
def assert_bad_expectations(self, expectations, overrides=None):
self.assertRaises(ParseError, self.parse_exp, expectations, is_lint_mode=True, overrides=overrides)
class BasicTests(Base):
def test_basic(self):
self.parse_exp(self.get_basic_expectations())
self.assert_exp('failures/expected/text.html', FAIL)
self.assert_exp_list('failures/expected/image_checksum.html', [WONTFIX, SKIP])
self.assert_exp('passes/text.html', PASS)
self.assert_exp('failures/expected/image.html', PASS)
class MiscTests(Base):
def test_multiple_results(self):
self.parse_exp('Bug(x) failures/expected/text.html [ Crash Failure ]')
self.assertEqual(self._exp.get_expectations('failures/expected/text.html'), set([FAIL, CRASH]))
def test_result_was_expected(self):
# test basics
self.assertEqual(TestExpectations.result_was_expected(PASS, set([PASS]), test_needs_rebaselining=False), True)
self.assertEqual(TestExpectations.result_was_expected(FAIL, set([PASS]), test_needs_rebaselining=False), False)
# test handling of SKIPped tests and results
self.assertEqual(TestExpectations.result_was_expected(SKIP, set([CRASH]), test_needs_rebaselining=False), True)
self.assertEqual(TestExpectations.result_was_expected(SKIP, set([LEAK]), test_needs_rebaselining=False), True)
# test handling of MISSING results and the REBASELINE specifier
self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=True), True)
self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=False), False)
self.assertTrue(TestExpectations.result_was_expected(PASS, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertTrue(TestExpectations.result_was_expected(MISSING, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertTrue(TestExpectations.result_was_expected(TEXT, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertTrue(TestExpectations.result_was_expected(IMAGE, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertTrue(TestExpectations.result_was_expected(IMAGE_PLUS_TEXT, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertTrue(TestExpectations.result_was_expected(AUDIO, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertFalse(TestExpectations.result_was_expected(TIMEOUT, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertFalse(TestExpectations.result_was_expected(CRASH, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
self.assertFalse(TestExpectations.result_was_expected(LEAK, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
def test_remove_pixel_failures(self):
self.assertEqual(TestExpectations.remove_pixel_failures(set([FAIL])), set([FAIL]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([PASS])), set([PASS]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([IMAGE])), set([PASS]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([FAIL])), set([FAIL]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([PASS, IMAGE, CRASH])), set([PASS, CRASH]))
def test_suffixes_for_expectations(self):
self.assertEqual(TestExpectations.suffixes_for_expectations(set([FAIL])), set(['txt', 'png', 'wav']))
self.assertEqual(TestExpectations.suffixes_for_expectations(set([IMAGE])), set(['png']))
self.assertEqual(TestExpectations.suffixes_for_expectations(set([FAIL, IMAGE, CRASH])), set(['txt', 'png', 'wav']))
self.assertEqual(TestExpectations.suffixes_for_expectations(set()), set())
def test_category_expectations(self):
# This test checks unknown tests are not present in the
# expectations and that known test part of a test category is
# present in the expectations.
exp_str = 'Bug(x) failures/expected [ WontFix ]'
self.parse_exp(exp_str)
test_name = 'failures/expected/unknown-test.html'
unknown_test = test_name
self.assertRaises(KeyError, self._exp.get_expectations,
unknown_test)
self.assert_exp_list('failures/expected/crash.html', [WONTFIX, SKIP])
def test_get_expectations_string(self):
self.parse_exp(self.get_basic_expectations())
self.assertEqual(self._exp.get_expectations_string('failures/expected/text.html'), 'FAIL')
def test_expectation_to_string(self):
# Normal cases are handled by other tests.
self.parse_exp(self.get_basic_expectations())
self.assertRaises(ValueError, self._exp.expectation_to_string,
-1)
def test_get_test_set(self):
# Handle some corner cases for this routine not covered by other tests.
self.parse_exp(self.get_basic_expectations())
s = self._exp.get_test_set(WONTFIX)
self.assertEqual(s, set(['failures/expected/crash.html', 'failures/expected/image_checksum.html']))
def test_needs_rebaseline_reftest(self):
try:
filesystem = self._port.host.filesystem
filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'failures/expected/needsrebaseline.html'), 'content')
filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'failures/expected/needsrebaseline-expected.html'), 'content')
filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'failures/expected/needsmanualrebaseline.html'), 'content')
filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'failures/expected/needsmanualrebaseline-expected.html'), 'content')
self.parse_exp("""Bug(user) failures/expected/needsrebaseline.html [ NeedsRebaseline ]
Bug(user) failures/expected/needsmanualrebaseline.html [ NeedsManualRebaseline ]""", is_lint_mode=True)
self.assertFalse(True, "ParseError wasn't raised")
except ParseError, e:
warnings = """expectations:1 A reftest cannot be marked as NeedsRebaseline/NeedsManualRebaseline failures/expected/needsrebaseline.html
expectations:2 A reftest cannot be marked as NeedsRebaseline/NeedsManualRebaseline failures/expected/needsmanualrebaseline.html"""
self.assertEqual(str(e), warnings)
def test_parse_warning(self):
try:
filesystem = self._port.host.filesystem
filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'disabled-test.html-disabled'), 'content')
filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'test-to-rebaseline.html'), 'content')
'disabled-test.html-disabled',
self.parse_exp("Bug(user) [ FOO ] failures/expected/text.html [ Failure ]\n"
"Bug(user) non-existent-test.html [ Failure ]\n"
"Bug(user) disabled-test.html-disabled [ ImageOnlyFailure ]\n"
"Bug(user) [ Release ] test-to-rebaseline.html [ NeedsRebaseline ]", is_lint_mode=True)
self.assertFalse(True, "ParseError wasn't raised")
except ParseError, e:
warnings = ("expectations:1 Unrecognized specifier 'foo' failures/expected/text.html\n"
"expectations:2 Path does not exist. non-existent-test.html\n"
"expectations:4 A test cannot be rebaselined for Debug/Release. test-to-rebaseline.html")
self.assertEqual(str(e), warnings)
def test_parse_warnings_are_logged_if_not_in_lint_mode(self):
oc = OutputCapture()
try:
oc.capture_output()
self.parse_exp('-- this should be a syntax error', is_lint_mode=False)
finally:
_, _, logs = oc.restore_output()
self.assertNotEquals(logs, '')
def test_error_on_different_platform(self):
# parse_exp uses a Windows port. Assert errors on Mac show up in lint mode.
self.assertRaises(ParseError, self.parse_exp,
'Bug(test) [ Mac ] failures/expected/text.html [ Failure ]\nBug(test) [ Mac ] failures/expected/text.html [ Failure ]',
is_lint_mode=True)
def test_error_on_different_build_type(self):
# parse_exp uses a Release port. Assert errors on DEBUG show up in lint mode.
self.assertRaises(ParseError, self.parse_exp,
'Bug(test) [ Debug ] failures/expected/text.html [ Failure ]\nBug(test) [ Debug ] failures/expected/text.html [ Failure ]',
is_lint_mode=True)
def test_overrides(self):
self.parse_exp("Bug(exp) failures/expected/text.html [ Failure ]",
"Bug(override) failures/expected/text.html [ ImageOnlyFailure ]")
self.assert_exp_list('failures/expected/text.html', [FAIL, IMAGE])
def test_overrides__directory(self):
self.parse_exp("Bug(exp) failures/expected/text.html [ Failure ]",
"Bug(override) failures/expected [ Crash ]")
self.assert_exp_list('failures/expected/text.html', [FAIL, CRASH])
self.assert_exp_list('failures/expected/image.html', [CRASH])
def test_overrides__duplicate(self):
self.assert_bad_expectations("Bug(exp) failures/expected/text.html [ Failure ]",
"Bug(override) failures/expected/text.html [ ImageOnlyFailure ]\n"
"Bug(override) failures/expected/text.html [ Crash ]\n")
def test_pixel_tests_flag(self):
def match(test, result, pixel_tests_enabled):
return self._exp.matches_an_expected_result(
test, result, pixel_tests_enabled, sanitizer_is_enabled=False)
self.parse_exp(self.get_basic_expectations())
self.assertTrue(match('failures/expected/text.html', FAIL, True))
self.assertTrue(match('failures/expected/text.html', FAIL, False))
self.assertFalse(match('failures/expected/text.html', CRASH, True))
self.assertFalse(match('failures/expected/text.html', CRASH, False))
self.assertTrue(match('failures/expected/image_checksum.html', PASS, True))
self.assertTrue(match('failures/expected/image_checksum.html', PASS, False))
self.assertTrue(match('failures/expected/crash.html', PASS, False))
self.assertTrue(match('failures/expected/needsrebaseline.html', TEXT, True))
self.assertFalse(match('failures/expected/needsrebaseline.html', CRASH, True))
self.assertTrue(match('failures/expected/needsmanualrebaseline.html', TEXT, True))
self.assertFalse(match('failures/expected/needsmanualrebaseline.html', CRASH, True))
self.assertTrue(match('passes/text.html', PASS, False))
def test_sanitizer_flag(self):
def match(test, result):
return self._exp.matches_an_expected_result(
test, result, pixel_tests_are_enabled=False, sanitizer_is_enabled=True)
self.parse_exp("""
Bug(test) failures/expected/crash.html [ Crash ]
Bug(test) failures/expected/image.html [ ImageOnlyFailure ]
Bug(test) failures/expected/text.html [ Failure ]
Bug(test) failures/expected/timeout.html [ Timeout ]
""")
self.assertTrue(match('failures/expected/crash.html', CRASH))
self.assertTrue(match('failures/expected/image.html', PASS))
self.assertTrue(match('failures/expected/text.html', PASS))
self.assertTrue(match('failures/expected/timeout.html', TIMEOUT))
def test_more_specific_override_resets_skip(self):
self.parse_exp("Bug(x) failures/expected [ Skip ]\n"
"Bug(x) failures/expected/text.html [ ImageOnlyFailure ]\n")
self.assert_exp('failures/expected/text.html', IMAGE)
self.assertFalse(self._port._filesystem.join(self._port.layout_tests_dir(),
'failures/expected/text.html') in
self._exp.get_tests_with_result_type(SKIP))
def test_bot_test_expectations(self):
"""Test that expectations are merged rather than overridden when using flaky option 'unexpected'."""
test_name1 = 'failures/expected/text.html'
test_name2 = 'passes/text.html'
expectations_dict = OrderedDict()
expectations_dict['expectations'] = "Bug(x) %s [ ImageOnlyFailure ]\nBug(x) %s [ Slow ]\n" % (test_name1, test_name2)
self._port.expectations_dict = lambda: expectations_dict
expectations = TestExpectations(self._port, self.get_basic_tests())
self.assertEqual(expectations.get_expectations(test_name1), set([IMAGE]))
self.assertEqual(expectations.get_expectations(test_name2), set([SLOW]))
def bot_expectations():
return {test_name1: ['PASS', 'TIMEOUT'], test_name2: ['CRASH']}
self._port.bot_expectations = bot_expectations
self._port._options.ignore_flaky_tests = 'unexpected'
expectations = TestExpectations(self._port, self.get_basic_tests())
self.assertEqual(expectations.get_expectations(test_name1), set([PASS, IMAGE, TIMEOUT]))
self.assertEqual(expectations.get_expectations(test_name2), set([CRASH, SLOW]))
class SkippedTests(Base):
def check(self, expectations, overrides, skips, lint=False, expected_results=[WONTFIX, SKIP, FAIL]):
port = MockHost().port_factory.get('test-win-xp')
port._filesystem.write_text_file(port._filesystem.join(port.layout_tests_dir(), 'failures/expected/text.html'), 'foo')
expectations_dict = OrderedDict()
expectations_dict['expectations'] = expectations
if overrides:
expectations_dict['overrides'] = overrides
port.expectations_dict = lambda: expectations_dict
port.skipped_layout_tests = lambda tests: set(skips)
expectations_to_lint = expectations_dict if lint else None
exp = TestExpectations(port, ['failures/expected/text.html'], expectations_dict=expectations_to_lint, is_lint_mode=lint)
self.assertEqual(exp.get_expectations('failures/expected/text.html'), set(expected_results))
def test_skipped_tests_work(self):
self.check(expectations='', overrides=None, skips=['failures/expected/text.html'], expected_results=[WONTFIX, SKIP])
def test_duplicate_skipped_test_fails_lint(self):
self.assertRaises(ParseError, self.check, expectations='Bug(x) failures/expected/text.html [ Failure ]\n',
overrides=None, skips=['failures/expected/text.html'], lint=True)
def test_skipped_file_overrides_expectations(self):
self.check(expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None,
skips=['failures/expected/text.html'])
def test_skipped_dir_overrides_expectations(self):
self.check(expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None,
skips=['failures/expected'])
def test_skipped_file_overrides_overrides(self):
self.check(expectations='', overrides='Bug(x) failures/expected/text.html [ Failure ]\n',
skips=['failures/expected/text.html'])
def test_skipped_dir_overrides_overrides(self):
self.check(expectations='', overrides='Bug(x) failures/expected/text.html [ Failure ]\n',
skips=['failures/expected'])
def test_skipped_entry_dont_exist(self):
port = MockHost().port_factory.get('test-win-xp')
expectations_dict = OrderedDict()
expectations_dict['expectations'] = ''
port.expectations_dict = lambda: expectations_dict
port.skipped_layout_tests = lambda tests: set(['foo/bar/baz.html'])
capture = OutputCapture()
capture.capture_output()
exp = TestExpectations(port)
_, _, logs = capture.restore_output()
self.assertEqual('The following test foo/bar/baz.html from the Skipped list doesn\'t exist\n', logs)
def test_expectations_string(self):
self.parse_exp(self.get_basic_expectations())
notrun = 'failures/expected/text.html'
self._exp.add_extra_skipped_tests([notrun])
self.assertEqual('NOTRUN', self._exp.get_expectations_string(notrun))
class ExpectationSyntaxTests(Base):
def test_unrecognized_expectation(self):
self.assert_bad_expectations('Bug(test) failures/expected/text.html [ Unknown ]')
def test_macro(self):
exp_str = 'Bug(test) [ Win ] failures/expected/text.html [ Failure ]'
self.parse_exp(exp_str)
self.assert_exp('failures/expected/text.html', FAIL)
def assert_tokenize_exp(self, line, bugs=None, specifiers=None, expectations=None, warnings=None, comment=None, name='foo.html'):
bugs = bugs or []
specifiers = specifiers or []
expectations = expectations or []
warnings = warnings or []
filename = 'TestExpectations'
line_number = '1'
expectation_line = TestExpectationParser._tokenize_line(filename, line, line_number)
self.assertEqual(expectation_line.warnings, warnings)
self.assertEqual(expectation_line.name, | |
<filename>src/lotusCalender.py
import json
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtCore import Qt, QTime, QDate, pyqtSignal, QRect, QRectF, QObject, pyqtProperty, QDateTime
from PyQt5.QtGui import QColor, QPen
from PyQt5.QtWidgets import QPushButton, QWidget, QDialogButtonBox, QVBoxLayout, QHBoxLayout, \
QDialog, QFormLayout, QSpinBox, QDateTimeEdit, QLineEdit, QTimeEdit, QRadioButton, QMessageBox, QLabel, \
QCalendarWidget, QStackedWidget, QColorDialog, QSizePolicy, QSpacerItem, QGridLayout, QCheckBox, QMenu, QAction
from src.constants import SCHEDULE_FILE_PATH
from src.lotusUtils import clear_layout, clear_grid, camel_case
DAYS = ["M", "T", "W", "R", "F", "Sa", "Su"]
def to_qdate(date_object:dict):
return QDate(date_object["year"], date_object["month"], date_object["day"])
def to_qtime(date_object:dict):
return QTime(date_object["hour"], date_object["minute"])
def to_qcolor(data_object:dict):
return QColor(data_object["r"], data_object["g"], data_object["b"])
class Schedule(QObject):
updated = pyqtSignal()
connect_buttons = pyqtSignal(list)
def __init__(self):
super(QObject, self).__init__()
self.load_schedule()
self.schedule:dict = self._schedule
@property
def schedule(self):
return self._schedule
@schedule.setter
def schedule(self, schedule:dict):
with open(SCHEDULE_FILE_PATH, "w+") as schedule_file:
json.dump(schedule, schedule_file, indent=4, sort_keys=True)
schedule_file.close()
self._schedule:dict = schedule
self.updated.emit()
def load_schedule(self):
try:
schedule_file = open(SCHEDULE_FILE_PATH)
self._schedule = json.load(schedule_file)
schedule_file.close()
except FileNotFoundError as e:
with open(SCHEDULE_FILE_PATH, "w+") as schedule_file:
schedule_file.write("{}")
schedule_file.close()
with open(SCHEDULE_FILE_PATH, "r") as schedule_file:
self._schedule = json.load(schedule_file)
schedule_file.close()
def get_recurring_event_start_end_dates(self, event_data:dict):
return to_qdate(event_data["start"]), to_qdate(event_data["end"])
def is_recurring_event_date(self, event_data:dict, date:QDate):
event_type = event_data["type"]
start_date, end_date = self.get_recurring_event_start_end_dates(event_data)
if start_date <= date <= end_date:
day_of_week = DAYS[date.dayOfWeek() - 1]
if event_type == "class":
for b in event_data["blocks"]:
if b["day"] == day_of_week:
return True
elif event_type == "recurring event":
if event_data["day"] == day_of_week:
return True
return False
def get_event_stylesheet(self, event_name:str):
return "background-color: rgb({},{},{})".format(self._schedule[event_name]["color"]["r"],
self._schedule[event_name]["color"]["g"],
self._schedule[event_name]["color"]["b"])
def get_event_button(self, event_name:str, time:QTime, include_time=True):
time_string = time.toString("h:mm AP")
button_title = "{}{}".format(event_name, " " + time_string if include_time else "")
button = QPushButton(button_title)
button.setStyleSheet(self.get_event_stylesheet(event_name))
return button
def get_event_buttons(self, date:QDate, include_times=True):
buttons = []
for event_name, data in self._schedule.items():
event_type = data["type"]
if event_type in ["class", "recurring event"]:
start_date, end_date = self.get_recurring_event_start_end_dates(data)
if start_date <= date <= end_date:
if event_type == "class":
for b in data["blocks"]:
if b["day"] == DAYS[date.dayOfWeek()-1]:
# Class on this day
time = to_qtime(b["time"])
button = self.get_event_button(event_name, time, include_times)
buttons.append((button, event_name, date, time))
if event_type == "recurring event":
if data["day"] == DAYS[date.dayOfWeek()-1]:
time = to_qtime(data["time"])
button = self.get_event_button(event_name, time, include_times)
buttons.append((button, event_name, date, time))
elif event_type in ["one time class event", "one time event"]:
if to_qdate(data["date"]) == date:
time = to_qtime(data["time"])
button = self.get_event_button(event_name, time, include_times)
buttons.append((button, event_name, date, time))
self.connect_buttons.emit(buttons)
return buttons
def add_event(self, data:dict):
event_name = data["name"]
if event_name in self._schedule.keys():
return False
else:
data.pop("name")
self._schedule[event_name] = data
self.schedule = self._schedule
return True
def edit_event(self, data:dict):
event_name = data["name"]
overwritten_data = self._schedule[event_name]
data.pop("name")
if overwritten_data["type"] == "class":
self.delete_class_events(event_name)
self._schedule[event_name] = data
self.schedule = self._schedule
def delete_event(self, event_name:str):
deleted_data = self._schedule[event_name]
if deleted_data["type"] == "class":
self.delete_class_events(event_name)
self._schedule.pop(event_name)
self.schedule = self._schedule
def delete_class_events(self, class_name):
to_delete = []
for name, data in self._schedule.items():
if data["type"] == "one time class event":
if data["class_name"] == class_name:
to_delete.append(name)
for name in to_delete:
self._schedule.pop(name)
class UICalendarWindow(QWidget):
def __init__(self, schedule:Schedule, parent=None):
super(UICalendarWindow, self).__init__(parent)
self.schedule = schedule
self.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
self.layout = QtWidgets.QGridLayout()
self.bottom_left_layout = QtWidgets.QVBoxLayout()
self.schedule_table = ScheduleTable(self.schedule)
self.schedule.updated.connect(self.update_table)
self.layout.addWidget(self.schedule_table, 0, 0)
self.add_schedule_button = QPushButton("Add New Scheduled Notes")
self.add_schedule_button.clicked.connect(self.add_notes)
self.layout.addWidget(self.add_schedule_button, 1, 0)
self.setLayout(self.layout)
self.show()
def add_notes(self):
popup = Popup(self.schedule)
result = popup.exec_()
if result:
data = popup.get_data()
self.schedule.add_event(data)
def update_table(self):
self.layout.removeWidget(self.schedule_table)
new_table = ScheduleTable(self.schedule)
self.schedule_table.deleteLater()
self.schedule_table = new_table
self.layout.addWidget(self.schedule_table, 0, 0)
self.adjustSize()
class ScheduleTable(QWidget):
def __init__(self, schedule:Schedule):
super(ScheduleTable, self).__init__()
self.schedule = schedule
self.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
self.class_grid_layout = QtWidgets.QGridLayout()
self.class_grid_layout.setVerticalSpacing(5)
self.class_grid_layout.setHorizontalSpacing(10)
self.class_grid_layout.setContentsMargins(0, 0, 0, 2 * self.class_grid_layout.verticalSpacing())
self.event_grid_layout = QtWidgets.QGridLayout()
self.event_grid_layout.setVerticalSpacing(5)
self.event_grid_layout.setHorizontalSpacing(10)
self.event_grid_layout.setContentsMargins(0, 0, 0, self.event_grid_layout.verticalSpacing())
self.add_layout_headers()
self.layout = QtWidgets.QVBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
classes_label = QLabel("Scheduled Classes", self)
self.layout.addWidget(classes_label, alignment=Qt.AlignCenter)
self.layout.addItem(self.class_grid_layout)
events_label = QLabel("Scheduled Events", self)
self.layout.addWidget(events_label, alignment=Qt.AlignCenter)
self.layout.addItem(self.event_grid_layout)
self.setLayout(self.layout)
self.initialize_layout()
def edit_event(self, event_name:str, data:dict):
popup = Popup(self.schedule, edit_data=(event_name, data))
popup.exec_()
def get_edit_button(self, event_name:str, data:dict):
if data is None:
return None
button = QPushButton(event_name, self)
button.setStyleSheet(self.schedule.get_event_stylesheet(event_name))
button.clicked.connect(lambda state, x=event_name, y=data: self.edit_event(x, y))
return button
def add_layout_headers(self):
class_layout_headers = ["Class Name", "Block(s)", "Event(s)"]
for i in range(0, len(class_layout_headers)):
header_label = QLabel(class_layout_headers[i])
header_label.setAlignment(Qt.AlignLeft)
self.class_grid_layout.addWidget(header_label, 0, i)
event_layout_headers = ["Event Name", "Time(s)", "Type"]
for i in range(0, len(event_layout_headers)):
header_label = QLabel(event_layout_headers[i])
header_label.setAlignment(Qt.AlignLeft)
self.event_grid_layout.addWidget(header_label, 0, i)
def get_events_by_type(self):
classes = []
events = []
for (event_name, data) in self.schedule.schedule.items():
event_type = data["type"]
if event_type == "class":
classes.append((event_name, data))
elif event_type in ["recurring event", "one time event", "one time class event"]:
events.append((event_name, data))
# Sort lists by name
classes.sort(key = lambda x: x[0])
events.sort(key = lambda x: x[0])
return classes, events
def get_class_string(self, data:dict):
days = {}
blocks:list = data["blocks"]
for b in blocks:
if b["day"] in days.keys():
days[b["day"]].append(b)
else:
days[b["day"]] = [b]
class_string = ""
for j, day in enumerate(sorted(days.keys(), key=DAYS.index)):
day_string = day + ": "
for k, b in enumerate(days[day]):
day_string += QTime(b["time"]["hour"], b["time"]["minute"]).toString("hh:mmAP") + (
", " if k != len(days[day]) - 1 else "")
class_string += day_string + (" " if j != len(days.keys()) - 1 else "")
return class_string
def add_class_row(self, i, class_name, data):
button = self.get_edit_button(class_name, data)
self.class_grid_layout.addWidget(button, i + 1, 0)
self.class_grid_layout.addWidget(QLabel(self.get_class_string(data), self), i + 1, 1)
class_event_button_layout = QVBoxLayout()
class_event_button_layout.setSpacing(5)
self.class_grid_layout.addLayout(class_event_button_layout, i + 1, 2)
def get_event_label(self, event_type, data):
if event_type == "recurring event":
return data["day"] + ": " + to_qtime(data["time"]).toString("hh:mmAP")
else:
return to_qdate(data["date"]).toString("MMM dd yyyy ") + to_qtime(data["time"]).toString("hh:mmAP")
def add_event_row(self, i, event_name, data):
event_type: str = data["type"]
if event_type in ["one time event", "recurring event"]:
self.event_grid_layout.addWidget(self.get_edit_button(event_name, data), i + 1, 0)
self.event_grid_layout.addWidget(QLabel(camel_case(event_type), self), i + 1, 2)
self.event_grid_layout.addWidget(QLabel(self.get_event_label(event_type, data), self), i + 1, 1)
if event_type == "one time class event":
for row in range(self.class_grid_layout.rowCount()):
# noinspection PyTypeChecker
edit_button: QPushButton = self.class_grid_layout.itemAtPosition(row, 0).widget()
if edit_button.text() == data["class_name"]:
# noinspection PyTypeChecker
layout: QVBoxLayout = self.class_grid_layout.itemAtPosition(row, 2)
if layout is None:
continue
else:
layout.addWidget(self.get_edit_button(event_name, data))
def initialize_layout(self):
classes, events = self.get_events_by_type()
# Add Classes to Table
for i, (class_name, data) in enumerate(classes):
self.add_class_row(i, class_name, data)
# Add Events to Table
for i, (event_name, data) in enumerate(events):
self.add_event_row(i, event_name, data)
class DayViewer(QWidget):
back = pyqtSignal()
def __init__(self, date : QDate, schedule:Schedule, parent=None):
super(QWidget, self).__init__()
layout = QGridLayout()
layout.setAlignment(Qt.AlignTop)
layout.setSpacing(10)
layout.setContentsMargins(0, 0, 0, 0)
back_button = QPushButton("Back")
back_button.clicked.connect(self.back.emit)
layout.addWidget(back_button, 0, 0)
layout.addWidget(QLabel(date.toString("Classes for MMMM d, yyyy")), 0, 1)
for i, (button, _, _, _) in enumerate(schedule.get_event_buttons(date)):
layout.addWidget(button, i+1, 0, 1, 3)
self.setLayout(layout)
class ScheduleCalendar(QCalendarWidget):
def __init__(self, schedule:Schedule, stack:QStackedWidget, parent=None):
super(ScheduleCalendar, self).__init__()
self.schedule = schedule
self.activated.connect(self.open_day_viewer)
self.stack = stack
self.setGridVisible(True)
self.day_viewer = None
def paintCell(self, painter, rect, date):
blocks = []
for event_name, data in self.schedule.schedule.items():
event_type = data["type"]
if event_type in ["class", "recurring event"]:
if self.schedule.is_recurring_event_date(data, date):
event_blocks = []
if event_type == "class":
event_blocks = data["blocks"]
else:
event_blocks.append({
"day": data["day"],
"time": data["time"]
})
for b in event_blocks:
if b["day"] == DAYS[date.dayOfWeek() - 1]:
blocks.append({
"type": "class",
"color": data["color"],
"time": b["time"]
})
else:
event_date = to_qdate(data["date"])
if date == event_date:
blocks.append({
"type": "event",
"color": data["color"],
"time": data["time"]
})
blocks.sort(key = lambda x: x["time"]["hour"])
for block in blocks:
color = block["color"]
qcolor = to_qcolor(color)
painter.setBrush(qcolor)
len_float = float(len(blocks))
atop = rect.top() + ((blocks.index(block) / len_float) * (rect.height()))
height = rect.height() / len_float
block_rect = QRectF(rect.left()+1, atop+1, rect.width()-3, height-3)
painter.setPen(Qt.NoPen if qcolor != Qt.white else Qt.lightGray)
painter.drawRect(block_rect)
painter.setPen(QPen())
# noinspection PyCallingNonCallable
painter.drawText(QRect(rect), Qt.TextSingleLine|Qt.AlignCenter, str(date.day()))
def open_day_viewer(self, date:QDate):
date_contains_event = False
for event_name, data in self.schedule.schedule.items():
event_type = data["type"]
if event_type in ["class", "recurring event"]:
if self.schedule.is_recurring_event_date(data, date):
date_contains_event = True
break
else:
event_date = to_qdate(data["date"])
if date == event_date:
date_contains_event = True
break
if date_contains_event:
self.day_viewer = DayViewer(date, self.schedule, parent=self)
self.day_viewer.back.connect(lambda: self.stack.removeWidget(self.day_viewer))
self.stack.addWidget(self.day_viewer)
self.stack.setCurrentWidget(self.day_viewer)
class DayPicker(QWidget):
def __init__(self):
super(DayPicker, self).__init__()
self.layout = QHBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.layout)
weekends = True
days = DAYS[0:len(DAYS) if weekends else 5]
self.buttons = []
for day in days:
if day is not None:
radio = QRadioButton()
radio.sizePolicy().setRetainSizeWhenHidden(False)
radio.setText(day)
self.buttons.append(radio)
self.layout.addWidget(radio)
def get_day(self):
for button in self.buttons:
if button.isChecked():
return button.text()
class ClassTimePicker(QWidget):
def __init__(self, parent=None):
super(ClassTimePicker, self).__init__(parent=parent)
self.time_selector = QTimeEdit()
self.time_selector.sizePolicy().setRetainSizeWhenHidden(False)
self.time_selector.setDisplayFormat("hh:mm AP")
self.time_selector.setTime(QTime(12, 0, 0))
self.day_picker = DayPicker()
self.day_picker.sizePolicy().setRetainSizeWhenHidden(False)
self.layout = QHBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.addWidget(self.time_selector)
self.layout.addWidget(self.day_picker)
self.setLayout(self.layout)
def get_time(self):
return self.time_selector.time()
def set_time(self, time:QTime):
self.time_selector.setTime(time)
def set_day(self, day):
for b in self.day_picker.buttons:
if b.text() == day:
b.click()
def is_valid(self):
return self.day_picker.get_day() is not None
class DateTimePickerSeriesModel(QObject):
def __init__(self, parent=None):
super(DateTimePickerSeriesModel, self).__init__(parent)
self._content = QDateTime.currentDateTime()
| |
dict of additional parameters for the radial basis function used for q-out (q-space of output feature map).
See q_radial_basis_params but only for q-out.
Defaults to q_radial_basis_params.
:param q_in_radial_basis_params: A dict of additional parameters for the radial basis function used for q-in (q-space of input feature map).
See q_radial_basis_params but only for q-in.
Defaults to q_radial_basis_params.
:param sub_kernel_selection_rule:
Rule defining for the TP filter which pairs of l_p and l_q to use for each l_filter.
Defaults to "TP\pm 1".
Options are:
- dict with string keys: defines some constraints which combinations to use.
The following constraint always holds:
\|l_p - l_q\| <= l_filter <= l_p + l_q
Additionally constraints can be defined by the following keys in the dict:
- "l_diff_to_out_max": Maximum difference between l_p and l_filter as well as l_q and l_filter.
Default to 1 (as in "TP\pm 1")
- "l_max" (optional): Maximum value for l_p and l_q.
- "l_in_diff_max" (optional): Maximum difference between l_p and l_q.
- dict with ints as keys and list of int-pairs as values that defines
for each l_filter the used pairs of l_p and l_q.
E.g. {0: [(0, 0), (1, 1)], 1: [(0, 1), (1, 0), (1, 1)]}
Note that this parameter is ignored if no TP-filter basis is used.
For additional parameters see EquivariantPQLayer.
"""
type_in = SphericalTensorType.from_multiplicities_or_type(type_in)
type_out = SphericalTensorType.from_multiplicities_or_type(type_out)
if batch_norm_config is None:
batch_norm_config = {}
if non_linearity_config is None:
non_linearity_config = {}
if use_non_linearity:
type_non_lin_in, non_linearity = build_non_linearity(type_out, **non_linearity_config)
conv = EquivariantPQLayer(type_in, type_non_lin_in,
kernel_definition=kernel,
p_kernel_size=p_kernel_size,
q_sampling_schema_in=q_sampling_schema_in,
q_sampling_schema_out=q_sampling_schema_out,
transposed=transposed,
auto_recompute_kernel=auto_recompute,
**kernel_kwargs)
if use_batch_norm:
batch_norm = BatchNorm(type_non_lin_in.Rs, **batch_norm_config)
return nn.Sequential(
OrderedDict([('conv', conv), ('batch_norm', batch_norm), ('non_linearity', non_linearity)]))
else:
return nn.Sequential(OrderedDict([('conv', conv), ('non_linearity', non_linearity)]))
else:
conv = EquivariantPQLayer(type_in, type_out,
kernel_definition=kernel,
p_kernel_size=p_kernel_size,
q_sampling_schema_in=q_sampling_schema_in,
q_sampling_schema_out=q_sampling_schema_out,
transposed=transposed,
auto_recompute_kernel=auto_recompute,
**kernel_kwargs)
if use_batch_norm:
batch_norm = BatchNorm(type_out.Rs, **batch_norm_config)
return nn.Sequential(OrderedDict([('conv', conv), ('batch_norm', batch_norm)]))
else:
return conv
def build_p_layer(type_in: Union[SphericalTensorType, List[int]],
type_out: Union[SphericalTensorType, List[int]],
kernel_size: int,
non_linearity_config=None,
use_non_linearity=True,
batch_norm_config=None,
use_batch_norm=True,
transposed=False,
auto_recompute=True,
**kernel_kwargs):
"""
Builds a p-layer consisting of an EquivariantPLayer followed by a nonlinearity (e.g. gated nonlinearity).
:param type_in: The spherical tensor type of the input feature map.
This defines how many channels of each tensor order the input feature map has.
It can either be given as SphericalTensorType object or as List[int]] the element at index i of the list
defines the number of order-i channels,
e.g. the first element defines the number of order-0 (scalar) channels
and the second the number of order-1 (vector) channels and so on.
For all orders corresponding to out-of-range indices the number of channels is 0.
:param type_out: The spherical tensor type of the output feature map (after non-linearity).
This defines how many channels of each tensor order the output feature map has.
It can either be given as SphericalTensorType object or as List[int]] the element at index i of the list
defines the number of order-i channels,
e.g. the first element defines the number of order-0 (scalar) channels
and the second the number of order-1 (vector) channels and so on.
For all orders corresponding to out-of-range indices the number of channels is 0.
:param p_kernel_size: Size of the kernel in p-space.
Note that the kernel always covers the whole q-space (as it is not translationally equivariant),
so there is no q_kernel_size.
:param non_linearity_config: Dict with the following optional keys:
- tensor_non_lin: The nonlinearity to use for channels with l>0 (non-scalar channels).
Default (and currently only option) is "gated".
- scalar_non_lin: The nonlinearity to use for channles with l=0 (scalar channels).
Valid options are "swish" and "relu".
Default is "swish".
:param use_non_linearity: Whether to use a nonlinearity.
:param batch_norm_config: Dict with the following optional keys:
- eps: avoid division by zero when we normalize by the variance
- momentum: momentum of the running average
- affine: do we have weight and bias parameters
- reduce: method to contract over the spacial dimensions
:param use_batch_norm: Whether to use a batch normalization
:param transposed: Whether to perform a transposed convolution using the equivariant kernel
:param auto_recompute: Whether to automatically recompute the kernel in each forward pass.
By default it is recomputed each time.
If this parameter is set to false, it is not recomputed and the method recompute() needs to be called
explicitly after parameters of this nn.Module have been updated.
:param kernel_selection_rule: Rule defining which angular filter orders (l_filter) to use
for a paths form input orders l_in to output orders l_out.
Defaults to using all possible filter orders,
i.e. all l_filter with \|l_in - l_out\| <= l_filter <= l_in + l_out.
Options are:
- dict with key "lmax" and int value which additionally defines a maximum l_filter.
- dict with int-pairs as keys and list of ints as values that defines
for each pair of l_in and l_out the list of l_filter to use.
E.g. {(0,0): [0], (1,1): [0,1], (0,1): [1]}
:param p_radial_basis_type: The radial basis function type used for p-space.
Valid options are "gaussian" (default), "cosine", "bessel".
Note that this parameter is ignored if there is no basis filter using p-space.
:param p_radial_basis_params: A (optional) dict of additional parameters for the radial basis function used for p-space.
Valid keys in this dict are:
- num_layers: Number of layers in the FC applied to the radial basis function.
If num_layers = 0 (default) then no FC is applied to the radial basis function.
- num_units: Number of units (neurons) in each of the layer in the FC applied to the radial basis function.
No default, this parameter is required and must be >0 if num_layers > 0.
- activation_function: activation function used in the FC applied to the radial basis function,
valid are "relu" (default) or "swish"
Note that this parameter is ignored if there is no basis filter using p-space.
For additional parameters see EquivariantPLayer.
"""
return build_pq_layer(type_in, type_out, kernel_size,
kernel='p_space',
q_sampling_schema_in=None, q_sampling_schema_out=None,
non_linearity_config=non_linearity_config,
use_non_linearity=use_non_linearity,
batch_norm_config=batch_norm_config,
use_batch_norm=use_batch_norm,
transposed=transposed,
auto_recompute=auto_recompute,
**kernel_kwargs)
def build_q_reduction_layer(type_in: Union[SphericalTensorType, List[int]], q_sampling_schema_in: Q_SamplingSchema,
reduction='length_weighted_average',
auto_recompute=True,
**kwargs):
"""
Builds a q-reduction layer to globally reduce q-space leaving only p-space.
:param type_in: The spherical tensor type of the input feature map.
This defines how many channels of each tensor order the input feature map has.
It can either be given as SphericalTensorType object or as List[int]] the element at index i of the list
defines the number of order-i channels,
e.g. the first element defines the number of order-0 (scalar) channels
and the second the number of order-1 (vector) channels and so on.
For all orders corresponding to out-of-range indices the number of channels is 0.
:param q_sampling_schema_in: The q-sampling schema of input feature map.
The q-sampling schema may either be given as a Q_SamplingSchema object,
a Tensor of size (Q_in, 3) or a list of length Q_in (one element for each vector) of lists of size 3 of floats.
Note that Q_in is not explicitly given but derived form the length of this parameter.
If this is None (default) then the input does not have q-space but only p-space.
:param reduction: The type of reduction to use. Valid options are:
- length_weighted_average: To use QLengthWeightedAvgPool (global length-weighted avg-pooling over q-space)
For additional parameters in param kwargs see QLengthWeightedAvgPool.
- mean: To use global avg-pooling over q-space.
- conv: To use an EquivariantPQLayer (and gated nonlinearity) without output q-space.
For additional parameters in param kwargs see build_pq_layer
(except the params type_out, q_sampling_schema_out).
:param auto_recompute: Whether to automatically recompute the kernels in each forward pass.
:return (reduction_layer, type_out):
- reduction_layer: The created q-reduction layer (nn.Module)
- type_out: The spherical tensor type of the output feature map.
"""
type_in = SphericalTensorType.from_multiplicities_or_type(type_in)
if reduction == 'length_weighted_average':
return QLengthWeightedAvgPool(type_in, q_sampling_schema_in,
auto_recompute=auto_recompute, **kwargs), type_in
elif reduction == 'mean':
return partial(torch.mean, dim=2), type_in
elif reduction == 'conv':
type_out = SphericalTensorType.from_multiplicities_or_type(kwargs.pop('type_out', type_in))
return build_pq_layer(type_in, type_out,
q_sampling_schema_in=q_sampling_schema_in,
q_sampling_schema_out=None,
**kwargs), type_out
else:
raise ValueError(f'q-reduction "{reduction}" not supported.')
def build_non_linearity(type_out: SphericalTensorType, tensor_non_lin='gated', | |
""" Utilities for viewing MRI images and interactively identify electrodes
Includes version of OrthoSlicer3D code originally written by:
<NAME>, <NAME>, <NAME>
"""
from __future__ import division, print_function
from time import time
import itertools
from pandas import DataFrame as df
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from .models import ModelSurface
import numpy as np
import weakref
from nibabel.optpkg import optional_package
from nibabel.orientations import aff2axcodes, axcodes2ornt
class OrthoSlicer3D(object):
"""Orthogonal-plane slicer.
OrthoSlicer3d expects 3-dimensional data, and by default it creates a
figure with 3 axes, one for each slice orientation.
Clicking and dragging the mouse in any one axis will select out the
corresponding slices in the other two. Scrolling up and
down moves the slice up and down in the current axis.
Example
-------
>>> import numpy as np
>>> a = np.sin(np.linspace(0,np.pi,20))
>>> b = np.sin(np.linspace(0,np.pi*5,20))
>>> data = np.outer(a,b)[..., np.newaxis]*a
>>> OrthoSlicer3D(data).show() # doctest: +SKIP
"""
# Skip doctest above b/c not all systems have mpl installed
def __init__(self, data, affine=None, axes=None, cmap='gray',
pcnt_range=(1., 99.), figsize=(8, 8), title=None):
"""
Parameters
----------
data : ndarray
The data that will be displayed by the slicer. Should have 3+
dimensions.
affine : array-like | None
Affine transform for the data. This is used to determine
how the data should be sliced for plotting into the saggital,
coronal, and axial view axes. If None, identity is assumed.
The aspect ratio of the data are inferred from the affine
transform.
axes : tuple of mpl.Axes | None, optional
3 or 4 axes instances for the 3 slices plus volumes,
or None (default).
cmap : str | instance of cmap, optional
String or cmap instance specifying colormap.
pcnt_range : array-like, optional
Percentile range over which to scale image for display.
figsize : tuple
Figure size (in inches) to use if axes are None.
"""
# Nest imports so that matplotlib.use() has the appropriate
# effect in testing
plt, _, _ = optional_package('matplotlib.pyplot')
mpl_img, _, _ = optional_package('matplotlib.image')
mpl_patch, _, _ = optional_package('matplotlib.patches')
self._title = title
self._closed = False
data = np.asanyarray(data)
if data.ndim < 3:
raise ValueError('data must have at least 3 dimensions')
affine = np.array(affine, float) if affine is not None else np.eye(4)
if affine.ndim != 2 or affine.shape != (4, 4):
raise ValueError('affine must be a 4x4 matrix')
# determine our orientation
self._affine = affine.copy()
codes = axcodes2ornt(aff2axcodes(self._affine))
self._order = np.argsort([c[0] for c in codes])
self._flips = np.array([c[1] < 0 for c in codes])[self._order]
self._flips = list(self._flips) + [False] # add volume dim
self._scalers = np.abs(self._affine).max(axis=0)[:3]
self._inv_affine = np.linalg.inv(affine)
# current volume info
self._volume_dims = data.shape[3:]
self._current_vol_data = data[:, :, :, 0] if data.ndim > 3 else data
self._data = data
vmin, vmax = np.percentile(data, pcnt_range)
del data
if axes is None: # make the axes
# ^ +---------+ ^ +---------+
# | | | | | |
# | Sag | | Cor |
# S | 0 | S | 1 |
# | | | |
# | | | |
# +---------+ +---------+
# A --> <-- R
# ^ +---------+ +---------+
# | | | | |
# | Axial | | Vol |
# A | 2 | | 3 |
# | | | |
# | | | |
# +---------+ +---------+
# <-- R <-- t -->
fig, axes = plt.subplots(2, 2)
fig.set_size_inches(figsize, forward=True)
self._axes = [axes[0, 0], axes[0, 1], axes[1, 0], axes[1, 1]]
plt.tight_layout(pad=0.1)
if self.n_volumes <= 1:
fig.delaxes(self._axes[3])
self._axes.pop(-1)
if self._title is not None:
fig.canvas.set_window_title(str(title))
else:
self._axes = [axes[0], axes[1], axes[2]]
if len(axes) > 3:
self._axes.append(axes[3])
# Start midway through each axis, idx is current slice number
self._ims, self._data_idx = list(), list()
# set up axis crosshairs
self._crosshairs = [None] * 3
r = [self._scalers[self._order[2]] / self._scalers[self._order[1]],
self._scalers[self._order[2]] / self._scalers[self._order[0]],
self._scalers[self._order[1]] / self._scalers[self._order[0]]]
self._sizes = [self._data.shape[o] for o in self._order]
for ii, xax, yax, ratio, label in zip([0, 1, 2], [1, 0, 0], [2, 2, 1],
r, ('SAIP', 'SLIR', 'ALPR')):
ax = self._axes[ii]
d = np.zeros((self._sizes[yax], self._sizes[xax]))
im = self._axes[ii].imshow(d, vmin=vmin, vmax=vmax, aspect=1,
cmap=cmap, interpolation='nearest',
origin='lower')
self._ims.append(im)
vert = ax.plot([0] * 2, [-0.5, self._sizes[yax] - 0.5],
color=(0, 1, 0), linestyle='-')[0]
horiz = ax.plot([-0.5, self._sizes[xax] - 0.5], [0] * 2,
color=(0, 1, 0), linestyle='-')[0]
self._crosshairs[ii] = dict(vert=vert, horiz=horiz)
# add text labels (top, right, bottom, left)
lims = [0, self._sizes[xax], 0, self._sizes[yax]]
bump = 0.01
poss = [[lims[1] / 2., lims[3]],
[(1 + bump) * lims[1], lims[3] / 2.],
[lims[1] / 2., 0],
[lims[0] - bump * lims[1], lims[3] / 2.]]
anchors = [['center', 'bottom'], ['left', 'center'],
['center', 'top'], ['right', 'center']]
for pos, anchor, lab in zip(poss, anchors, label):
ax.text(pos[0], pos[1], lab,
horizontalalignment=anchor[0],
verticalalignment=anchor[1])
ax.axis(lims)
ax.set_aspect(ratio)
ax.patch.set_visible(False)
ax.set_frame_on(False)
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False)
self._data_idx.append(0)
self._data_idx.append(-1) # volume
# Set up volumes axis
if self.n_volumes > 1 and len(self._axes) > 3:
ax = self._axes[3]
ax.set_axis_bgcolor('k')
ax.set_title('Volumes')
y = np.zeros(self.n_volumes + 1)
x = np.arange(self.n_volumes + 1) - 0.5
step = ax.step(x, y, where='post', color='y')[0]
ax.set_xticks(np.unique(np.linspace(0, self.n_volumes - 1,
5).astype(int)))
ax.set_xlim(x[0], x[-1])
yl = [self._data.min(), self._data.max()]
yl = [l + s * np.diff(lims)[0] for l, s in zip(yl, [-1.01, 1.01])]
patch = mpl_patch.Rectangle([-0.5, yl[0]], 1., np.diff(yl)[0],
fill=True, facecolor=(0, 1, 0),
edgecolor=(0, 1, 0), alpha=0.25)
ax.add_patch(patch)
ax.set_ylim(yl)
self._volume_ax_objs = dict(step=step, patch=patch)
self._figs = set([a.figure for a in self._axes])
for fig in self._figs:
fig.canvas.mpl_connect('scroll_event', self._on_scroll)
fig.canvas.mpl_connect('motion_notify_event', self._on_mouse)
fig.canvas.mpl_connect('button_press_event', self._on_mouse)
fig.canvas.mpl_connect('key_press_event', self._on_keypress)
fig.canvas.mpl_connect('close_event', self._cleanup)
# actually set data meaningfully
self._position = np.zeros(4)
self._position[3] = 1. # convenience for affine multn
self._changing = False # keep track of status to avoid loops
self._links = [] # other viewers this one is linked to
plt.draw()
for fig in self._figs:
fig.canvas.draw()
self._set_volume_index(0, update_slices=False)
self._set_position(0., 0., 0.)
self._draw()
def __repr__(self):
title = '' if self._title is None else ('%s ' % self._title)
vol = '' if self.n_volumes <= 1 else (', %s' % self.n_volumes)
r = ('<%s: %s(%s, %s, %s%s)>'
% (self.__class__.__ch_idx__, title, self._sizes[0],
self._sizes[1], self._sizes[2], vol))
return r
# User-level functions ###################################################
def show(self):
"""Show the slicer in blocking mode; convenience for ``plt.show()``
"""
plt, _, _ = optional_package('matplotlib.pyplot')
plt.show()
def close(self):
"""Close the viewer figures
"""
self._cleanup()
plt, _, _ = optional_package('matplotlib.pyplot')
for f in self._figs:
plt.close(f)
def _cleanup(self):
"""Clean up before closing"""
self._closed = True
for link in list(self._links): # make a copy before iterating
self._unlink(link())
@property
def n_volumes(self):
"""Number of volumes in the data"""
return int(np.prod(self._volume_dims))
@property
def position(self):
"""The current coordinates"""
return self._position[:3].copy()
def link_to(self, other):
"""Link positional changes between two canvases
Parameters
----------
other : instance of OrthoSlicer3D
Other viewer to use to link movements.
"""
if not isinstance(other, self.__class__):
raise TypeError('other must be an instance of %s, not %s'
% (self.__class__.__ch_idx__, type(other)))
self._link(other, is_primary=True)
def _link(self, other, is_primary):
"""Link a viewer"""
ref = weakref.ref(other)
if ref in self._links:
return
self._links.append(ref)
if is_primary:
other._link(self, is_primary=False)
other.set_position(*self.position)
def _unlink(self, other):
"""Unlink a viewer"""
ref = weakref.ref(other)
if ref in self._links:
self._links.pop(self._links.index(ref))
ref()._unlink(self)
def _notify_links(self):
"""Notify linked canvases of a position change"""
for link in self._links:
link().set_position(*self.position[:3])
def set_position(self, x=None, y=None, z=None):
"""Set current displayed slice indices
Parameters
----------
x : float | None
X coordinate to use. If None, do not change.
y : float | None
Y coordinate to use. If None, do not change.
z : float | None
Z coordinate to use. If None, do not change.
"""
self._set_position(x, y, z)
self._draw()
def set_volume_idx(self, v):
"""Set current displayed volume index
Parameters
----------
v : int
Volume index.
"""
self._set_volume_index(v)
self._draw()
def _set_volume_index(self, v, update_slices=True):
"""Set the plot data using a volume index"""
v = self._data_idx[3] if v is None else int(round(v))
if v == self._data_idx[3]:
return
max_ = np.prod(self._volume_dims)
self._data_idx[3] = max(min(int(round(v)), max_ - 1), 0)
idx = (slice(None), slice(None), slice(None))
if self._data.ndim > 3:
idx = idx + tuple(np.unravel_index(self._data_idx[3],
self._volume_dims))
self._current_vol_data = self._data[idx]
# update all of our slice plots
if update_slices:
self._set_position(None, None, None, notify=False)
def _set_position(self, x, y, z, notify=True):
"""Set the plot data using a physical position"""
| |
#------------------------------------------------------------------------------
# Copyright (C) 1996-2010 Power System Engineering Research Center (PSERC)
# Copyright (C) 2007-2010 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
""" Defines a generalised OPF solver and an OPF model.
Based on opf.m from MATPOWER by <NAME>, developed at PSERC Cornell.
See U{http://www.pserc.cornell.edu/matpower/} for more info.
"""
#------------------------------------------------------------------------------
# Imports:
#------------------------------------------------------------------------------
import logging
import random
from time import time
from numpy import \
array, pi, diff, Inf, ones, r_, float64, zeros, arctan2, sin, cos
from scipy.sparse import lil_matrix, csr_matrix, hstack
from util import _Named, fair_max
from case import REFERENCE
from generator import PW_LINEAR
from solver import DCOPFSolver, PIPSSolver
#------------------------------------------------------------------------------
# Logging:
#------------------------------------------------------------------------------
logger = logging.getLogger(__name__)
#------------------------------------------------------------------------------
# "OPF" class:
#------------------------------------------------------------------------------
class OPF(object):
""" Defines a generalised OPF solver.
Based on opf.m from MATPOWER by <NAME>, developed at PSERC
Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more info.
"""
def __init__(self, case, dc=True, ignore_ang_lim=True, opt=None):
""" Initialises a new OPF instance.
"""
#: Case under optimisation.
self.case = case
#: Use DC power flow formulation.
self.dc = dc
#: Ignore angle difference limits for branches even if specified.
self.ignore_ang_lim = ignore_ang_lim
#: Solver options (See pips.py for futher details).
self.opt = {} if opt is None else opt
#--------------------------------------------------------------------------
# Public interface:
#--------------------------------------------------------------------------
def solve(self, solver_klass=None):
""" Solves an optimal power flow and returns a results dictionary.
"""
# Start the clock.
t0 = time()
# Build an OPF model with variables and constraints.
om = self._construct_opf_model(self.case)
if om is None:
return {"converged": False, "output": {"message": "No Ref Bus."}}
# Call the specific solver.
# if self.opt["verbose"]:
# print '\nPYLON Version %s, %s', "0.4.2", "April 2010"
if solver_klass is not None:
result = solver_klass(om, opt=self.opt).solve()
elif self.dc:
# if self.opt["verbose"]:
# print ' -- DC Optimal Power Flow\n'
result = DCOPFSolver(om, opt=self.opt).solve()
else:
# if self.opt["verbose"]:
# print ' -- AC Optimal Power Flow\n'
result = PIPSSolver(om, opt=self.opt).solve()
result["elapsed"] = time() - t0
if self.opt.has_key("verbose"):
if self.opt["verbose"]:
logger.info("OPF completed in %.3fs." % result["elapsed"])
return result
#--------------------------------------------------------------------------
# Private interface:
#--------------------------------------------------------------------------
def _construct_opf_model(self, case):
""" Returns an OPF model.
"""
# Zero the case result attributes.
self.case.reset()
base_mva = case.base_mva
# Check for one reference bus.
oneref, refs = self._ref_check(case)
if not oneref: #return {"status": "error"}
None
# Remove isolated components.
bs, ln, gn = self._remove_isolated(case)
# Update bus indexes.
self.case.index_buses(bs)
# Convert single-block piecewise-linear costs into linear polynomial.
gn = self._pwl1_to_poly(gn)
# Set-up initial problem variables.
Va = self._get_voltage_angle_var(refs, bs)
Pg = self._get_pgen_var(gn, base_mva)
if self.dc: # DC model.
# Get the susceptance matrices and phase shift injection vectors.
B, Bf, Pbusinj, Pfinj = self.case.makeBdc(bs, ln)
# Power mismatch constraints (B*Va + Pg = Pd).
Pmis = self._power_mismatch_dc(bs, gn, B, Pbusinj, base_mva)
# Branch flow limit constraints.
Pf, Pt = self._branch_flow_dc(ln, Bf, Pfinj, base_mva)
else:
# Set-up additional AC-OPF problem variables.
Vm = self._get_voltage_magnitude_var(bs, gn)
Qg = self._get_qgen_var(gn, base_mva)
Pmis, Qmis, Sf, St = self._nln_constraints(len(bs), len(ln))
vl = self._const_pf_constraints(gn, base_mva)
# TODO: Generator PQ capability curve constraints.
# PQh, PQl = self._pq_capability_curve_constraints(gn)
# Branch voltage angle difference limits.
ang = self._voltage_angle_diff_limit(bs, ln)
if self.dc:
vars = [Va, Pg]
constraints = [Pmis, Pf, Pt, ang]
else:
vars = [Va, Vm, Pg, Qg]
constraints = [Pmis, Qmis, Sf, St, #PQh, PQL,
vl, ang]
# Piece-wise linear generator cost constraints.
y, ycon = self._pwl_gen_costs(gn, base_mva)
if ycon is not None:
vars.append(y)
constraints.append(ycon)
# Add variables and constraints to the OPF model object.
opf = OPFModel(case)
opf.add_vars(vars)
opf.add_constraints(constraints)
if self.dc: # user data
opf._Bf = Bf
opf._Pfinj = Pfinj
return opf
def _ref_check(self, case):
""" Checks that there is only one reference bus.
"""
refs = [bus._i for bus in case.buses if bus.type == REFERENCE]
if len(refs) == 1:
return True, refs
else:
logger.error("OPF requires a single reference bus.")
return False, refs
def _remove_isolated(self, case):
""" Returns non-isolated case components.
"""
# case.deactivate_isolated()
buses = case.connected_buses
branches = case.online_branches
gens = case.online_generators
return buses, branches, gens
def _pwl1_to_poly(self, generators):
""" Converts single-block piecewise-linear costs into linear
polynomial.
"""
for g in generators:
if (g.pcost_model == PW_LINEAR) and (len(g.p_cost) == 2):
g.pwl_to_poly()
return generators
#--------------------------------------------------------------------------
# Optimisation variables:
#--------------------------------------------------------------------------
def _get_voltage_angle_var(self, refs, buses):
""" Returns the voltage angle variable set.
"""
Va = array([b.v_angle * (pi / 180.0) for b in buses])
Vau = Inf * ones(len(buses))
Val = -Vau
Vau[refs] = Va[refs]
Val[refs] = Va[refs]
return Variable("Va", len(buses), Va, Val, Vau)
def _get_voltage_magnitude_var(self, buses, generators):
""" Returns the voltage magnitude variable set.
"""
Vm = array([b.v_magnitude for b in buses])
# For buses with generators initialise Vm from gen data.
for g in generators:
Vm[g.bus._i] = g.v_magnitude
Vmin = array([b.v_min for b in buses])
Vmax = array([b.v_max for b in buses])
return Variable("Vm", len(buses), Vm, Vmin, Vmax)
def _get_pgen_var(self, generators, base_mva):
""" Returns the generator active power set-point variable.
"""
Pg = array([g.p / base_mva for g in generators])
Pmin = array([g.p_min / base_mva for g in generators])
Pmax = array([g.p_max / base_mva for g in generators])
return Variable("Pg", len(generators), Pg, Pmin, Pmax)
def _get_qgen_var(self, generators, base_mva):
""" Returns the generator reactive power variable set.
"""
Qg = array([g.q / base_mva for g in generators])
Qmin = array([g.q_min / base_mva for g in generators])
Qmax = array([g.q_max / base_mva for g in generators])
return Variable("Qg", len(generators), Qg, Qmin, Qmax)
#--------------------------------------------------------------------------
# Constraints:
#--------------------------------------------------------------------------
def _nln_constraints(self, nb, nl):
""" Returns non-linear constraints for OPF.
"""
Pmis = NonLinearConstraint("Pmis", nb)
Qmis = NonLinearConstraint("Qmis", nb)
Sf = NonLinearConstraint("Sf", nl)
St = NonLinearConstraint("St", nl)
return Pmis, Qmis, Sf, St
def _power_mismatch_dc(self, buses, generators, B, Pbusinj, base_mva):
""" Returns the power mismatch constraint (B*Va + Pg = Pd).
"""
nb, ng = len(buses), len(generators)
# Negative bus-generator incidence matrix.
gen_bus = array([g.bus._i for g in generators])
neg_Cg = csr_matrix((-ones(ng), (gen_bus, range(ng))), (nb, ng))
Amis = hstack([B, neg_Cg], format="csr")
Pd = array([bus.p_demand for bus in buses])
Gs = array([bus.g_shunt for bus in buses])
bmis = -(Pd - Gs) / base_mva - Pbusinj
return LinearConstraint("Pmis", Amis, bmis, bmis, ["Va", "Pg"])
def _branch_flow_dc(self, branches, Bf, Pfinj, base_mva):
""" Returns the branch flow limit constraint. The real power flows
at the from end the lines are related to the bus voltage angles by
Pf = Bf * Va + Pfinj.
"""
# Indexes of constrained lines.
il = array([i for i,l in enumerate(branches) if 0.0 < l.rate_a < 1e10])
lpf = -Inf * ones(len(il))
rate_a = array([l.rate_a / base_mva for l in branches])
upf = rate_a[il] - Pfinj[il]
upt = rate_a[il] + Pfinj[il]
Pf = LinearConstraint("Pf", Bf[il, :], lpf, upf, ["Va"])
Pt = LinearConstraint("Pt", -Bf[il, :], lpf, upt, ["Va"])
return Pf, Pt
def _const_pf_constraints(self, gn, base_mva):
""" Returns a linear constraint enforcing constant power factor for
dispatchable loads.
The power factor is derived from the original value of Pmin and either
Qmin (for inductive loads) or Qmax (for capacitive loads). If both Qmin
and Qmax are zero, this implies a unity power factor without the need
for an additional constraint.
"""
ivl = array([i for i, g in enumerate(gn)
if g.is_load and (g.q_min != 0.0 or g.q_max != 0.0)])
vl = [gn[i] for i in ivl]
nvl = len(vl)
ng = len(gn)
Pg = array([g.p for g in vl]) / base_mva
Qg = array([g.q for g in vl]) / base_mva
Pmin = array([g.p_min for g in vl]) / base_mva
Qmin = array([g.q_min for g in vl]) / base_mva
Qmax = array([g.q_max for g in vl]) / base_mva
# At least one of the Q limits must be zero (corresponding to Pmax==0).
for g in vl:
if g.qmin != 0.0 and g.q_max != 0.0:
logger.error("Either Qmin or Qmax | |
<reponame>Grey-Peters/IanPeters<gh_stars>0
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from azure.common import (
AzureConflictHttpError,
AzureHttpError,
)
from ..constants import (
DEFAULT_HTTP_TIMEOUT,
DEV_QUEUE_HOST,
QUEUE_SERVICE_HOST_BASE,
X_MS_VERSION,
)
from .._common_error import (
_dont_fail_not_exist,
_dont_fail_on_exist,
_validate_not_none,
_ERROR_CONFLICT,
_ERROR_STORAGE_MISSING_INFO,
)
from .._common_serialization import (
xml_escape,
_convert_class_to_xml,
_get_request_body,
_parse_response_for_dict_filter,
_parse_response_for_dict_prefix,
_update_request_uri_query_local_storage,
_ETreeXmlToObject,
)
from .._common_conversion import (
_int_or_none,
_str,
_str_or_none,
)
from .._http import (
HTTPRequest,
)
from ..models import (
SignedIdentifiers,
StorageServiceProperties,
)
from .models import (
Queue,
QueueEnumResults,
QueueMessagesList,
)
from ..auth import (
StorageSASAuthentication,
StorageSharedKeyAuthentication,
)
from ..connection import (
StorageConnectionParameters,
)
from .._serialization import (
_convert_signed_identifiers_to_xml,
)
from ._serialization import (
_update_storage_queue_header,
)
from ..sharedaccesssignature import (
SharedAccessSignature,
)
from ..storageclient import _StorageClient
_HTTP_RESPONSE_NO_CONTENT = 204
class QueueService(_StorageClient):
'''
This is the main class managing queue resources.
'''
def __init__(self, account_name=None, account_key=None, protocol='https',
host_base=QUEUE_SERVICE_HOST_BASE, dev_host=DEV_QUEUE_HOST,
timeout=DEFAULT_HTTP_TIMEOUT, sas_token=None, connection_string=None,
request_session=None):
'''
account_name:
your storage account name, required for all operations.
account_key:
your storage account key, required for all operations.
protocol:
Optional. Protocol. Defaults to http.
host_base:
Optional. Live host base url. Defaults to Azure url. Override this
for on-premise.
dev_host:
Optional. Dev host url. Defaults to localhost.
timeout:
Optional. Timeout for the http request, in seconds.
sas_token:
Optional. Token to use to authenticate with shared access signature.
connection_string:
Optional. If specified, the first four parameters (account_name,
account_key, protocol, host_base) may be overridden
by values specified in the connection_string. The next three parameters
(dev_host, timeout, sas_token) cannot be specified with a
connection_string. See
http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
for the connection string format.
request_session:
Optional. Session object to use for http requests.
'''
if connection_string is not None:
connection_params = StorageConnectionParameters(connection_string)
account_name = connection_params.account_name
account_key = connection_params.account_key
protocol = connection_params.protocol.lower()
host_base = connection_params.host_base_queue
super(QueueService, self).__init__(
account_name, account_key, protocol, host_base, dev_host, timeout, sas_token, request_session)
if self.account_key:
self.authentication = StorageSharedKeyAuthentication(
self.account_name,
self.account_key,
)
elif self.sas_token:
self.authentication = StorageSASAuthentication(self.sas_token)
else:
raise ValueError(_ERROR_STORAGE_MISSING_INFO)
def generate_shared_access_signature(self, queue_name,
shared_access_policy=None,
sas_version=X_MS_VERSION):
'''
Generates a shared access signature for the queue.
Use the returned signature with the sas_token parameter of QueueService.
queue_name:
Required. Name of queue.
shared_access_policy:
Instance of SharedAccessPolicy class.
sas_version:
x-ms-version for storage service, or None to get a signed query
string compatible with pre 2012-02-12 clients, where the version
is not included in the query string.
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('shared_access_policy', shared_access_policy)
_validate_not_none('self.account_name', self.account_name)
_validate_not_none('self.account_key', self.account_key)
sas = SharedAccessSignature(self.account_name, self.account_key)
return sas.generate_signed_query_string(
queue_name,
None,
shared_access_policy,
sas_version,
)
def get_queue_service_properties(self, timeout=None):
'''
Gets the properties of a storage account's Queue Service, including
Windows Azure Storage Analytics.
timeout:
Optional. The timeout parameter is expressed in seconds.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.authentication)
response = self._perform_request(request)
return _ETreeXmlToObject.parse_response(
response, StorageServiceProperties)
def list_queues(self, prefix=None, marker=None, maxresults=None,
include=None):
'''
Lists all of the queues in a given storage account.
prefix:
Filters the results to return only queues with names that begin
with the specified prefix.
marker:
A string value that identifies the portion of the list to be
returned with the next list operation. The operation returns a
NextMarker element within the response body if the list returned
was not complete. This value may then be used as a query parameter
in a subsequent call to request the next portion of the list of
queues. The marker value is opaque to the client.
maxresults:
Specifies the maximum number of queues to return. If maxresults is
not specified, the server will return up to 5,000 items.
include:
Optional. Include this parameter to specify that the container's
metadata be returned as part of the response body.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/?comp=list'
request.query = [
('prefix', _str_or_none(prefix)),
('marker', _str_or_none(marker)),
('maxresults', _int_or_none(maxresults)),
('include', _str_or_none(include))
]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.authentication)
response = self._perform_request(request)
return _ETreeXmlToObject.parse_enum_results_list(
response, QueueEnumResults, "Queues", Queue)
def create_queue(self, queue_name, x_ms_meta_name_values=None,
fail_on_exist=False):
'''
Creates a queue under the given account.
queue_name:
name of the queue.
x_ms_meta_name_values:
Optional. A dict containing name-value pairs to associate with the
queue as metadata.
fail_on_exist:
Specify whether throw exception when queue exists.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + ''
request.headers = [('x-ms-meta-name-values', x_ms_meta_name_values)]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.authentication)
if not fail_on_exist:
try:
response = self._perform_request(request)
if response.status == _HTTP_RESPONSE_NO_CONTENT:
return False
return True
except AzureHttpError as ex:
_dont_fail_on_exist(ex)
return False
else:
response = self._perform_request(request)
if response.status == _HTTP_RESPONSE_NO_CONTENT:
raise AzureConflictHttpError(
_ERROR_CONFLICT.format(response.message), response.status)
return True
def delete_queue(self, queue_name, fail_not_exist=False):
'''
Permanently deletes the specified queue.
queue_name:
Name of the queue.
fail_not_exist:
Specify whether throw exception when queue doesn't exist.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + ''
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.authentication)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except AzureHttpError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_queue_metadata(self, queue_name):
'''
Retrieves user-defined metadata and queue properties on the specified
queue. Metadata is associated with the queue as name-values pairs.
queue_name:
Name of the queue.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '?comp=metadata'
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.authentication)
response = self._perform_request(request)
return _parse_response_for_dict_prefix(
response,
prefixes=['x-ms-meta', 'x-ms-approximate-messages-count'])
def set_queue_metadata(self, queue_name, x_ms_meta_name_values=None):
'''
Sets user-defined metadata on the specified queue. Metadata is
associated with the queue as name-value pairs.
queue_name:
Name of the queue.
x_ms_meta_name_values:
Optional. A dict containing name-value pairs to associate with the
queue as metadata.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '?comp=metadata'
request.headers = [('x-ms-meta-name-values', x_ms_meta_name_values)]
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.authentication)
self._perform_request(request)
def get_queue_acl(self, queue_name):
'''
Returns details about any stored access policies specified on the
queue that may be used with Shared Access Signatures.
queue_name:
Name of existing queue.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '?comp=acl'
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.authentication)
response = self._perform_request(request)
return _ETreeXmlToObject.parse_response(response, SignedIdentifiers)
def set_queue_acl(self, queue_name, signed_identifiers=None):
'''
Sets stored access policies for the queue that may be used with
Shared Access Signatures.
queue_name:
Name of existing queue.
signed_identifiers:
SignedIdentifers instance
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(queue_name) + '?comp=acl'
request.body = _get_request_body(
_convert_signed_identifiers_to_xml(signed_identifiers))
request.path, request.query = _update_request_uri_query_local_storage(
request, self.use_local_storage)
request.headers = _update_storage_queue_header(
request, self.authentication)
self._perform_request(request)
def put_message(self, queue_name, message_text, visibilitytimeout=None,
messagettl=None):
'''
Adds a new message to the back of the message queue. A visibility
timeout can also be specified to make the message invisible until the
visibility timeout expires. A message must be in a format that can be
included in an XML request with UTF-8 encoding. The encoded message can
be up to 64KB in size for versions 2011-08-18 and newer, or 8KB in size
for previous versions.
queue_name:
Name of the queue.
message_text:
Message content.
visibilitytimeout:
Optional. If not specified, the default value is 0. Specifies the
new visibility timeout value, in seconds, relative to server time.
The new value must be larger than or equal to 0, and cannot be
larger than 7 days. The visibility timeout of a message cannot be
set to a value later than the expiry time. visibilitytimeout
should be set to a value smaller | |
<filename>apps/logics/equipment.py
#-*- coding: utf-8 -*-
import math
import copy
import random
from apps.common import utils
from apps.config import game_config
from apps.models.user_equipments import UserEquipments
from apps.models.user_forge import UserForge
from apps.models.user_property import UserProperty
from apps.models.user_material import UserMaterial
from apps.models.user_cards import UserCards
from apps.models.user_teams import UserTeams
def puton(oc_user, params):
"""穿装备
"""
eqdbid = params['eqdbid'] #装备id
cid = params['cid'] #装备的角色
part = params['part'] #装备的部位
user_equipments_obj = UserEquipments.get_instance(oc_user.uid)
equipment = user_equipments_obj.equipments.get(eqdbid)
#判断装备存在不存在
if not equipment:
return 1,{"msg":"this equipment is not exist"}
#判断该装备是否已经被装备
if eqdbid in user_equipments_obj.get_puton_equip():
return 2,{"msg":"this equipment has puton yet"}
#判断该装备类型是否能穿
equip_config = game_config.equipment_config[equipment["eid"]]
equip_category = equip_config["category"]
if equip_category != '0':
if cid == '0':
user_cards_obj = UserCards.get(oc_user.uid)
category = user_cards_obj.category
else:
user_teams_obj = UserTeams.get(oc_user.uid)
category = user_teams_obj.category(cid)
if equip_category != category:
return 3,{"msg":"you can't put on this category equipment"}
#判断该装备的等级是否满足
elv = equipment["lv"]
lv = oc_user.property_info.property_info["lv"]
if elv > (lv + 10):
return 4,{"msg":"lv > 10"}
user_equipments_obj.put_on(eqdbid,part,cid)
#称号
from apps.models.user_title import UserTitle
user_title_obj = UserTitle.get_instance(oc_user.uid)
user_title_obj.set_title('4')
return 0, {"msg": True}
def puton_all(oc_user, params):
"""一键穿装备
"""
eqdbid = params['eqdbid'] #装备id
cid = params['cid'] #装备的角色
user_equipments_obj = UserEquipments.get_instance(oc_user.uid)
equipments = user_equipments_obj.equipments
#判断该装备类型是否能穿
eqdbid_list = eqdbid.split(',')
for eq in eqdbid_list:
if eq not in equipments:
continue
equipment = equipments[eq]
eid = equipment["eid"]
equip_config = game_config.equipment_config[eid]
equip_type = equip_config["type"]
user_equipments_obj.put_on(eq,equip_type,cid)
#称号
from apps.models.user_title import UserTitle
user_title_obj = UserTitle.get_instance(oc_user.uid)
user_title_obj.set_title('4')
return 0,{}
def takeoff(oc_user, params):
"""脱装备
"""
eqdbid = params['eqdbid']
cid = params['cid']
part = params['part']
user_equipments_obj = UserEquipments.get_instance(oc_user.uid)
user_equipments_obj.take_off(eqdbid, part, cid)
return 0,{"msg": True}
def getall(oc_user, params):
"""获取所有装备
"""
user_equipments_obj = UserEquipments.get_instance(oc_user.uid)
data = user_equipments_obj.equipments
return 0,{"equipments": data}
def singleSell(oc_user, params):
"""卖单个装备
"""
eqdbid = params['eqdbid']
user_equipments_obj = UserEquipments.get_instance(oc_user.uid)
money = user_equipments_obj.single_sell(eqdbid)
return 0, {"msg": money}
def batchSell(oc_user, params):
"""卖装备,按品质
"""
quality = int(params['quality'])
user_equipments_obj = UserEquipments.get_instance(oc_user.uid)
money = user_equipments_obj.batch_sell(quality)
return 0, {"msg": money}
def strengthen(oc_user, params):
"""装备强化
"""
eqdbid = params['eqdbid']
user_equipments_obj = UserEquipments.get_instance(oc_user.uid)
equipment = user_equipments_obj.equipments.get(eqdbid, {})
if not equipment:
return 1,{"msg":"this equipment is not exist"}
eid = equipment["eid"]
lv = equipment["lv"]
quality = equipment["quality"]
minilv = equipment["minilv"]
equipment_config = game_config.equipment_config[eid]
_type = equipment_config['type']
strengthen_config = game_config.equipment_strengthen_config
#强化所需要的基础配置消耗
equip_influence = strengthen_config['equip_influence'][_type]
minilv += 1
if minilv > len(equip_influence):
return 2,{"msg":"this equipment can't strengthen"}
equipment["minilv"] = minilv
eq_minilv_growth = equip_influence[str(minilv)]
class_influence = strengthen_config['class_influence'][str(quality)]
growth_multiplier = class_influence['growth_multiplier']
#随着等级改变 金币的变化
coin = int(
math.ceil(
( (lv - 1)
* eq_minilv_growth['coin_growth']
+ eq_minilv_growth['coin']
) * growth_multiplier
)
)
#现在知道了消耗的金钱和材料的数量
property_obj = UserProperty.get(oc_user.uid)
if not property_obj.minus_coin(coin):
return 3,{"msg":"you have not enough coin"}
mid, base_num = eq_minilv_growth["item"]
growth_num = eq_minilv_growth["item_growth"]
#随着等级改变 材料数量的变化
final_num = ((lv - 1) * growth_num + base_num) * growth_multiplier
final_num = int(math.ceil(final_num))
material_obj = UserMaterial.get(oc_user.uid)
if material_obj.materials.get(mid,{}).get("num",0) < final_num:
return 4,{"msg":"you have not enough material"}
material_obj.minus_material(mid, final_num)
if not "strenth_cast" in equipment:
equipment["strenth_cast"] = ("0", 0)
equipment["strenth_cast"] = (mid, equipment["strenth_cast"][1] + final_num)
user_equipments_obj.put()
return 0, {"msg": equipment}
def roll(oc_user, params):
"""普通洗练
"""
eqdbid = params['eqdbid']
user_equipments_obj = UserEquipments.get_instance(oc_user.uid)
equipment = user_equipments_obj.equipments.get(eqdbid)
if not equipment:
return 1,{"msg":"this equipment is not exist"}
lv = equipment["lv"]
quality = equipment["quality"]
if quality < 3 or lv < 10:
return 2,{"msg":"this equipment can't roll"}
#消耗金钱
equipment_forge_config = game_config.equipment_forge_config
cost_base = equipment_forge_config["shuffle_costBase"] #洗练消耗金币初始
cost_growth = equipment_forge_config["shuffle_costGrowth"] #洗练消耗金币初始
cost_coin = int(cost_base + (lv-1)* cost_growth)
user_property_obj = UserProperty.get(oc_user.uid)
if user_property_obj.property_info["coin"] < cost_coin:
return 3,{"msg":"you have not enough coin"}
user_property_obj.minus_coin(cost_coin)
#开始计算属性
vice_attr = equipment["vice_attr"]
total = 0
keyary = []
for k, v in vice_attr.items():
total += v
keyary.append(k)
if len(vice_attr) == 2:
value_ary = []
#向上取整
purple_min = int(total * 0.0833 + 1)
purple_max = int(total * 0.833)
limit_up = total - purple_min # 2 - 1 1x
if limit_up > purple_max:
limit_up = purple_max
limit_down = total - purple_max
if limit_down > purple_min:
purple_min = limit_down
result = random.randint(purple_min, limit_up)
value_ary.append(result)
_total = total - result
value_ary.append(_total)
elif len(vice_attr) == 3:
value_ary = []
#向上取整
purple_min = int(total * 0.0625 + 1)
purple_max = int(total * 0.625)
limit_up = total - 2 * purple_min # 3 - 1
if limit_up > purple_max:
limit_up = purple_max
result = random.randint(purple_min, limit_up)
value_ary.append(result)
_total = total - result
limit_up = _total - purple_min
if limit_up > purple_max:
limit_up = purple_max
limit_down = _total - purple_max
if limit_down > purple_min:
purple_min = limit_down
result = random.randint(purple_min, limit_up)
value_ary.append(result)
_total = _total - result
value_ary.append(_total)
else:
value_ary = []
#向上取整
purple_min = int(total * 0.05 + 1)
purple_max = int(total * 0.5)
limit_up = total - 3 * purple_min # 4 - 1
if limit_up > purple_max:
limit_up = purple_max
result = random.randint(purple_min, limit_up) #第一次
value_ary.append(result)
_total = total - result
limit_up = _total - 2 * purple_min # 4 - 2
if limit_up > purple_max:
limit_up = purple_max
result = random.randint(purple_min, limit_up) #第二次
value_ary.append(result)
_total = _total - result
limit_up = _total - 1 * purple_min # 4 - 3
if limit_up > purple_max:
limit_up = purple_max
limit_down = _total - purple_max
if limit_down > purple_min:
purple_min = limit_down
result = random.randint(purple_min, limit_up) #第三次
value_ary.append(result)
_total = _total - result #第四次
value_ary.append(_total)
for x in range(len(vice_attr)):
key = keyary[x]
vice_attr[key] = value_ary[x]
user_equipments_obj.put()
return 0, {"msg": equipment}
def advanced_roll(oc_user, params):
"""高级洗练
"""
ueid = params['ueid']
eqdbid = ueid
user_equipments_obj = UserEquipments.get_instance(oc_user.uid)
equipment = user_equipments_obj.equipments.get(eqdbid)
if not equipment:
return 1,{"msg":"this equipment is not exist"}
lv = equipment["lv"]
quality = equipment["quality"]
if quality < 3 or lv < 10:
return 2,{"msg":"this equipment can't roll"}
#消耗金钱
equipment_forge_config = game_config.equipment_forge_config
cost_base = equipment_forge_config["shuffle_costBase"] #洗练消耗金币初始
cost_growth = equipment_forge_config["shuffle_costGrowth"] #洗练消耗金币初始
cost_coin = int(cost_base + (lv-1)* cost_growth)
user_property_obj = UserProperty.get(oc_user.uid)
if user_property_obj.property_info["coin"] < cost_coin:
return 3,{"msg":"you have not enough coin"}
user_property_obj.minus_coin(cost_coin)
#开始计算属性
vice_attr = equipment["vice_attr"]
total = 0
keyary = []
for k, v in vice_attr.items():
total += v
keyary.append(k)
if len(vice_attr) == 2:
value_ary = []
#向上取整
purple_min = int(total * 0.0833 + 1)
purple_max = int(total * 0.833)
limit_up = total - purple_min # 2 - 1 1x
if limit_up > purple_max:
limit_up = purple_max
limit_down = total - purple_max
if limit_down > purple_min:
purple_min = limit_down
result = random.randint(purple_min, limit_up)
value_ary.append(result)
_total = total - result
value_ary.append(_total)
elif len(vice_attr) == 3:
value_ary = []
#向上取整
purple_min = int(total * 0.0625 + 1)
purple_max = int(total * 0.625)
limit_up = total - 2 * purple_min # 3 - 1
if limit_up > purple_max:
limit_up = purple_max
result = random.randint(purple_min, limit_up)
value_ary.append(result)
_total = total - result
limit_up = _total - purple_min
if limit_up > purple_max:
limit_up = purple_max
limit_down = _total - purple_max
if limit_down > purple_min:
purple_min = limit_down
result = random.randint(purple_min, limit_up)
value_ary.append(result)
_total = _total - result
value_ary.append(_total)
else:
value_ary = []
#向上取整
purple_min = int(total * 0.05 + 1)
purple_max = int(total * 0.5)
limit_up = total - 3 * purple_min # 4 - 1
if limit_up > purple_max:
limit_up = purple_max
result = random.randint(purple_min, limit_up) #第一次
value_ary.append(result)
_total = total - result
limit_up = _total - 2 * purple_min # 4 - 2
if limit_up > purple_max:
limit_up = purple_max
result = random.randint(purple_min, limit_up) #第二次
value_ary.append(result)
_total = _total - result
limit_up = _total - 1 * purple_min # 4 - 3
if limit_up > purple_max:
limit_up = purple_max
limit_down = _total - purple_max
if limit_down > purple_min:
purple_min = limit_down
result = random.randint(purple_min, limit_up) #第三次
value_ary.append(result)
_total = _total - result #第四次
value_ary.append(_total)
for x in range(len(vice_attr)):
key = keyary[x]
vice_attr[key] = value_ary[x]
user_equipments_obj.put()
return 0, {"equipment": equipment}
def punch(oc_user, params):
"""打孔
"""
eqdbid = params['eqdbid']
index = int(params["index"])
user_equipments_obj = UserEquipments.get_instance(oc_user.uid)
equipment = user_equipments_obj.equipments[eqdbid]
#判断装备是否存在
if not equipment:
return 1,{"msg":"this equipment is not exist"}
#判断是否已经开过
gem_hole = equipment["gem_hole"]
if gem_hole[index-1] != 0:
return 2,{"msg":u"该宝石孔已经开过了"}
#判断上一个是否开启
if (index > 1) and (gem_hole[index-2] == 0):
return 3,{"msg":u"请开上一个孔"}
gemSlot_unlock = game_config.gem_config["gemSlot_unlock"]
cfg = gemSlot_unlock[str(index)]
coin_base = cfg['coin_base']
item = cfg['item']
#判断金钱是否充足
lv | |
criterium suggested by Steven:
if abs(flux_opt_old-flux_opt)/np.abs(fluxerr_opt) < epsilon:
break
flux_opt_old = flux_opt
# reject any discrepant values; use [nsigma_inner] as the
# rejection criterium for the inner region defined by
# [mask_inner]; outside of that use [nsigma_outer]
sigma2 = (D - flux_opt * P)**2
mask_pos = (V > 0)
sigma2[mask_pos] /= V[mask_pos]
mask_temp[mask_inner] = (sigma2[mask_inner] > nsigma_inner**2)
mask_temp[mask_outer] = (sigma2[mask_outer] > nsigma_outer**2)
mask_use[mask_temp] = False
if show:
log.info('no. of rejected pixels: {}'.format(np.sum(~mask_use)))
log.info('np.amax((D - flux_opt * P)**2 / V): {}'.format(np.amax(sigma2)))
ds9_arrays(data=D, psf=P, bkg_var=bkg_var, variance=V,
fluxoptP = flux_opt*P, data_min_fluxoptP=(D - flux_opt * P),
data_min_fluxoptP_squared_div_variance=sigma2,
mask_use=mask_use.astype(int))
return flux_opt, fluxerr_opt
################################################################################
def flux_optimal_s2n (P, bkg_var, s2n, fwhm=5., max_iters=15, epsilon=1e-7):
"""Similar to function [flux_optimal] above, but this function returns
the total flux [in e-] required for the point source to have a
particular signal-to-noise ratio [s2n], given the PSF image [P]
and the background variance [bkg_var] (=sky background + RN**2).
This function is used to estimate the limiting magnitude of an
image at a set of coordinates (in get_psfoptflux), and
also to estimate the flux of the fake stars that are being added
to the image with a required S/N [set_zogy.fakestar_s2n].
Note that the background-subtracted image itself should be added
to [bkg_var] in order to calculate the flux required to reach the
required S/N with respect to the image, i.e. taking into account
the poisson noise of any object present in the image.
"""
for i in range(max_iters):
if i==0:
# initial estimate of variance (scalar)
V = bkg_var
# and flux (see Eq. 13 of Naylor 1998)
flux_opt = s2n * fwhm * np.sqrt(np.median(V)) / np.sqrt(2*np.log(2)/np.pi)
else:
# estimate new flux based on fluxerr_opt of previous iteration
flux_opt = s2n * fluxerr_opt
# improved estimate of variance (2D list)
V = bkg_var + flux_opt * P
# new estimate of D
D = flux_opt * P
# get optimal flux, avoiding zeros in V
index = np.nonzero(V)
if len(index) != 0:
flux_opt, fluxerr_opt = get_optflux (P[index], D[index], V[index])
else:
break
# also break out of loop if S/N sufficiently close
if fluxerr_opt != 0:
if abs(flux_opt/fluxerr_opt - s2n) / s2n < epsilon:
break
return flux_opt
################################################################################
def clipped_stats(array, nsigma=3, max_iters=10, epsilon=1e-6, clip_upper_frac=0,
clip_zeros=True, get_median=True, get_mode=False, mode_binsize=0.1,
verbose=False, make_hist=False, name_hist=None, hist_xlabel=None,
use_median=False, log=None):
if verbose and get_par(set_zogy.timing,tel) and log is not None:
log.info('executing clipped_stats ...')
t = time.time()
# remove zeros
if clip_zeros:
array = array[array.nonzero()]
if clip_upper_frac != 0:
index_upper = int((1.-clip_upper_frac)*array.size+0.5)
array = np.sort(array.flatten(), kind='quicksort')[:index_upper]
mean_old = float('inf')
for i in range(max_iters):
if array.size > 0:
if not use_median:
mean = array.mean()
else:
mean = np.median(array)
std = array.std()
if abs(mean_old-mean)/abs(mean) < epsilon:
break
mean_old = mean
index = ((array>(mean-nsigma*std)) & (array<(mean+nsigma*std)))
array = array[index]
else:
array = np.zeros(1)
break
# make sure to calculate mean if median was used in clipping
if use_median:
mean = array.mean()
# add median
if get_median:
median = np.median(array)
# and mode
if get_mode:
bins = np.arange(mean-nsigma*std, mean+nsigma*std, mode_binsize)
hist, bin_edges = np.histogram(array, bins)
index = np.argmax(hist)
mode = (bins[index]+bins[index+1])/2.
if make_hist:
bins = np.linspace(mean-nsigma*std, mean+nsigma*std)
plt.hist(np.ravel(array), bins, color='tab:blue')
x1,x2,y1,y2 = plt.axis()
plt.plot([mean, mean], [y2,y1], color='black')
plt.plot([mean+std, mean+std], [y2,y1], color='black', linestyle='--')
plt.plot([mean-std, mean-std], [y2,y1], color='black', linestyle='--')
title = 'mean (black line): {:.3f}, std: {:.3f}'.format(mean, std)
if get_median:
plt.plot([median, median], [y2,y1], color='tab:orange')
title = '{}, median (orange line): {:.3f}'.format(title, median)
if get_mode:
plt.plot([mode, mode], [y2,y1], color='tab:red')
title = '{}, mode (red line): {:.3f}'.format(title, mode)
plt.title(title)
if hist_xlabel is not None:
plt.xlabel(hist_xlabel)
plt.ylabel('number')
if get_par(set_zogy.make_plots,tel):
if name_hist is None: name_hist = 'clipped_stats_hist.pdf'
plt.savefig(name_hist)
if get_par(set_zogy.show_plots,tel): plt.show()
plt.close()
if verbose and get_par(set_zogy.timing,tel) and log is not None:
log_timing_memory (t0=t, label='clipped_stats', log=log)
if get_mode:
if get_median:
return mean, std, median, mode
else:
return mean, std, mode
else:
if get_median:
return mean, std, median
else:
return mean, std
################################################################################
def read_header(header, keywords, log=None):
# list with values to return
values = []
# loop keywords
for key in keywords:
# use function [get_keyvalue] (see below) to return the value
# from either the variable defined in settings file, or from
# the fits header using the keyword name defined in the
# settings file
value = get_keyvalue(key, header, log=log)
if key=='filter':
value = str(value)
values.append(value)
if len(values)==1:
return values[0]
else:
return values
################################################################################
def get_keyvalue (key, header, log=None):
# check if [key] is defined in settings file
var = 'set_zogy.{}'.format(key)
try:
value = eval(var)
except:
# if it does not work, try using the value of the keyword name
# (defined in settings file) from the fits header instead
try:
key_name = eval('set_zogy.key_{}'.format(key))
except:
if log is not None:
log.critical('either [{}] or [key_{}] needs to be defined in '
'[settings_file]'.format(key, key))
raise RuntimeError
else:
if key_name in header:
value = header[key_name]
else:
if log is not None:
log.critical('keyword {} not present in header'
.format(key_name))
raise RuntimeError
if log is not None:
log.info('keyword: {}, adopted value: {}'.format(key, value))
return value
################################################################################
def get_remap_name(new_name, ref_name, remap_name):
# in case full paths are provided for the input images and the new
# and ref directories are different, the remapped reference images
# should end up in the new directory
def get_dir_name (name):
dir_name = '.'
if '/' in name:
dir_name = '/'.join(name.split('/')[:-1])
return dir_name
new_dir = get_dir_name(new_name)
ref_dir = get_dir_name(ref_name)
if new_dir != ref_dir:
remap_name = '{}/{}'.format(new_dir, remap_name.split('/')[-1])
return remap_name
################################################################################
def prep_optimal_subtraction(input_fits, nsubs, imtype, fwhm, header, log,
fits_mask=None, data_cal=None, remap=False):
log.info('executing prep_optimal_subtraction ...')
t = time.time()
if imtype=='new':
base = base_new
else:
base = base_ref
# read input_fits
data_wcs = read_hdulist (input_fits, dtype='float32')
# get gain, pixscale and saturation level from header
keywords = ['gain', 'pixscale', 'satlevel']
gain, pixscale, satlevel = read_header(header, keywords, log=log)
ysize, xsize = np.shape(data_wcs)
# determine data_mask
if fits_mask is not None:
# read in mask image
data_mask = read_hdulist (fits_mask, dtype='uint8')
else:
data_mask = None
fits_mask = input_fits.replace('.fits', '_mask.fits')
# create new mask or modify an existing one
data_mask = create_modify_mask (data_wcs, satlevel, data_mask=data_mask)
# check if background was already subtracted from input_fits
if 'BKG-SUB' in header:
bkg_sub = header['BKG-SUB']
else:
bkg_sub = False
log.info('background already subtracted from {}?: {}'
.format(input_fits, bkg_sub))
# [interp_Xchan] determines whether interpolation is allowed
# across different channels in [mini2back]
if (tel not in ['ML1', 'BG2', 'BG3', 'BG4'] or
get_par(set_zogy.MLBG_chancorr,tel)):
interp_Xchan = True
else:
interp_Xchan = False
# for bkg_std, do not interpolate for ML/BG images
if tel in ['ML1', 'BG2', 'BG3', 'BG4']:
interp_Xchan_std = False
else:
interp_Xchan_std = True
# if not, then read in background image; N.B.: this if block below
# is not relevant anymore, since the background is subtracted from
# the image in [run_sextractor], but leave it be for now
if not bkg_sub:
fits_bkg = '{}_bkg.fits'.format(base)
if os.path.exists(fits_bkg):
data_bkg = read_hdulist (fits_bkg, dtype='float32')
else:
# if it does not exist, create it from the background mesh
fits_bkg_mini = '{}_bkg_mini.fits'.format(base)
data_bkg_mini, header_mini = read_hdulist (
fits_bkg_mini, get_header=True, dtype='float32')
if 'BKG-SIZE' in header_mini:
bkg_size = header_mini['BKG-SIZE']
else:
bkg_size = get_par(set_zogy.bkg_boxsize,tel)
data_bkg = mini2back (data_bkg_mini, data_wcs.shape,
order_interp=2, bkg_boxsize=bkg_size,
interp_Xchan=interp_Xchan,
timing=get_par(set_zogy.timing,tel),
log=log)
# subtract the background
data_wcs -= data_bkg
# edge pixels will now be negative, best to ensure that they
# are set to zero
value_edge = get_par(set_zogy.mask_value['edge'],tel)
mask_edge = (data_mask & value_edge == value_edge)
data_wcs[mask_edge] = 0
# update header with new background
if 'S-BKG' in header:
bkg_temp = header['S-BKG'] - np.median(data_bkg_mini)
header['S-BKG'] = (bkg_temp, '[e-] median background full image')
# 2020-06-07: create background-subtracted image for the
# reference image only for SWarp to work on below
header['DATEFILE'] = (Time.now().isot, 'UTC date of writing file')
fits_bkgsub = input_fits.replace('.fits', '_bkgsub.fits')
if imtype=='ref':
fits.writeto(fits_bkgsub, data_wcs, header, overwrite=True)
else:
# could also make one for the new image, but not needed
if False:
fits.writeto(fits_bkgsub, data_wcs, header, overwrite=True)
# read in background std image in | |
often gives
better results.
transformation: bool, default = False
When set to True, a power transformation is applied to make the data more normal /
Gaussian-like. This is useful for modeling issues related to heteroscedasticity or
other situations where normality is desired. The optimal parameter for stabilizing
variance and minimizing skewness is estimated through maximum likelihood.
transformation_method: str, default = 'yeo-johnson'
Defines the method for transformation. By default, the transformation method is set
to 'yeo-johnson'. The other available option is 'quantile' transformation. Both
the transformation transforms the feature set to follow a Gaussian-like or normal
distribution. Note that the quantile transformer is non-linear and may distort
linear correlations between variables measured at the same scale.
handle_unknown_categorical: bool, default = True
When set to True, unknown categorical levels in new / unseen data are replaced by
the most or least frequent level as learned in the training data. The method is
defined under the unknown_categorical_method param.
unknown_categorical_method: str, default = 'least_frequent'
Method used to replace unknown categorical levels in unseen data. Method can be
set to 'least_frequent' or 'most_frequent'.
pca: bool, default = False
When set to True, dimensionality reduction is applied to project the data into
a lower dimensional space using the method defined in pca_method param. In
supervised learning pca is generally performed when dealing with high feature
space and memory is a constraint. Note that not all datasets can be decomposed
efficiently using a linear PCA technique and that applying PCA may result in loss
of information. As such, it is advised to run multiple experiments with different
pca_methods to evaluate the impact.
pca_method: str, default = 'linear'
The 'linear' method performs Linear dimensionality reduction using Singular Value
Decomposition. The other available options are:
kernel : dimensionality reduction through the use of RVF kernel.
incremental : replacement for 'linear' pca when the dataset to be decomposed is
too large to fit in memory
pca_components: int/float, default = 0.99
Number of components to keep. if pca_components is a float, it is treated as a
target percentage for information retention. When pca_components is an integer
it is treated as the number of features to be kept. pca_components must be strictly
less than the original number of features in the dataset.
ignore_low_variance: bool, default = False
When set to True, all categorical features with insignificant variances are
removed from the dataset. The variance is calculated using the ratio of unique
values to the number of samples, and the ratio of the most common value to the
frequency of the second most common value.
combine_rare_levels: bool, default = False
When set to True, all levels in categorical features below the threshold defined
in rare_level_threshold param are combined together as a single level. There must
be atleast two levels under the threshold for this to take effect.
rare_level_threshold represents the percentile distribution of level frequency.
Generally, this technique is applied to limit a sparse matrix caused by high
numbers of levels in categorical features.
rare_level_threshold: float, default = 0.1
Percentile distribution below which rare categories are combined. Only comes into
effect when combine_rare_levels is set to True.
bin_numeric_features: list, default = None
When a list of numeric features is passed they are transformed into categorical
features using KMeans, where values in each bin have the same nearest center of a
1D k-means cluster. The number of clusters are determined based on the 'sturges'
method. It is only optimal for gaussian data and underestimates the number of bins
for large non-gaussian datasets.
remove_outliers: bool, default = False
When set to True, outliers from the training data are removed using PCA linear
dimensionality reduction using the Singular Value Decomposition technique.
outliers_threshold: float, default = 0.05
The percentage / proportion of outliers in the dataset can be defined using
the outliers_threshold param. By default, 0.05 is used which means 0.025 of the
values on each side of the distribution's tail are dropped from training data.
remove_multicollinearity: bool, default = False
When set to True, the variables with inter-correlations higher than the threshold
defined under the multicollinearity_threshold param are dropped. When two features
are highly correlated with each other, the feature that is less correlated with
the target variable is dropped.
multicollinearity_threshold: float, default = 0.9
Threshold used for dropping the correlated features. Only comes into effect when
remove_multicollinearity is set to True.
remove_perfect_collinearity: bool, default = True
When set to True, perfect collinearity (features with correlation = 1) is removed
from the dataset, When two features are 100% correlated, one of it is randomly
dropped from the dataset.
create_clusters: bool, default = False
When set to True, an additional feature is created where each instance is assigned
to a cluster. The number of clusters is determined using a combination of
Calinski-Harabasz and Silhouette criterion.
cluster_iter: int, default = 20
Number of iterations used to create a cluster. Each iteration represents cluster
size. Only comes into effect when create_clusters param is set to True.
polynomial_features: bool, default = False
When set to True, new features are created based on all polynomial combinations
that exist within the numeric features in a dataset to the degree defined in
polynomial_degree param.
polynomial_degree: int, default = 2pca_method_pass
Degree of polynomial features. For example, if an input sample is two dimensional
and of the form [a, b], the polynomial features with degree = 2 are:
[1, a, b, a^2, ab, b^2].
trigonometry_features: bool, default = False
When set to True, new features are created based on all trigonometric combinations
that exist within the numeric features in a dataset to the degree defined in the
polynomial_degree param.
polynomial_threshold: float, default = 0.1
This is used to compress a sparse matrix of polynomial and trigonometric features.
Polynomial and trigonometric features whose feature importance based on the
combination of Random Forest, AdaBoost and Linear correlation falls within the
percentile of the defined threshold are kept in the dataset. Remaining features
are dropped before further processing.
group_features: list or list of list, default = None
When a dataset contains features that have related characteristics, group_features
param can be used for statistical feature extraction. For example, if a dataset has
numeric features that are related with each other (i.e 'Col1', 'Col2', 'Col3'), a
list containing the column names can be passed under group_features to extract
statistical information such as the mean, median, mode and standard deviation.
group_names: list, default = None
When group_features is passed, a name of the group can be passed into the
group_names param as a list containing strings. The length of a group_names
list must equal to the length of group_features. When the length doesn't
match or the name is not passed, new features are sequentially named such as
group_1, group_2 etc.
feature_selection: bool, default = False
When set to True, a subset of features are selected using a combination of various
permutation importance techniques including Random Forest, Adaboost and Linear
correlation with target variable. The size of the subset is dependent on the
feature_selection_param. Generally, this is used to constrain the feature space
in order to improve efficiency in modeling. When polynomial_features and
feature_interaction are used, it is highly recommended to define the
feature_selection_threshold param with a lower value. Feature selection algorithm
by default is 'classic' but could be 'boruta', which will lead PyCaret to create
use the Boruta selection algorithm.
feature_selection_threshold: float, default = 0.8
| |
<gh_stars>0
# Copyright (c) 2021 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from oslo_policy import policy as base_policy
from oslo_utils import uuidutils
from neutron import policy
from neutron.tests.unit.conf.policies import test_base as base
class PortAPITestCase(base.PolicyBaseTestCase):
def setUp(self):
super(PortAPITestCase, self).setUp()
self.network = {
'id': uuidutils.generate_uuid(),
'project_id': self.project_id}
self.target = {
'project_id': self.project_id,
'tenant_id': self.alt_project_id,
'network_id': self.network['id'],
'ext_parent_network_id': self.network['id']}
self.alt_target = {
'project_id': self.alt_project_id,
'tenant_id': self.alt_project_id,
'network_id': self.network['id'],
'ext_parent_network_id': self.network['id']}
self.plugin_mock = mock.Mock()
self.plugin_mock.get_network.return_value = self.network
mock.patch(
'neutron_lib.plugins.directory.get_plugin',
return_value=self.plugin_mock).start()
class SystemAdminTests(PortAPITestCase):
def setUp(self):
super(SystemAdminTests, self).setUp()
self.context = self.system_admin_ctx
def test_create_port(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'create_port', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'create_port', self.alt_target)
def test_create_port_with_device_owner(self):
target = self.target.copy()
target['device_owner'] = 'network:test'
alt_target = self.alt_target.copy()
alt_target['device_owner'] = 'network:test'
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:device_owner',
target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:device_owner',
alt_target)
def test_create_port_with_mac_address(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'create_port:mac_address',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'create_port:mac_address',
self.alt_target)
def test_create_port_with_fixed_ips(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'create_port:fixed_ips',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'create_port:fixed_ips',
self.alt_target)
def test_create_port_with_fixed_ips_and_ip_address(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'create_port:fixed_ips:ip_address',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'create_port:fixed_ips:ip_address',
self.alt_target)
def test_create_port_with_fixed_ips_and_subnet_id(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'create_port:fixed_ips:subnet_id',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'create_port:fixed_ips:subnet_id',
self.alt_target)
def test_create_port_with_port_security_enabled(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'create_port:port_security_enabled',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'create_port:port_security_enabled',
self.alt_target)
def test_create_port_with_binding_host_id(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'create_port:binding:host_id',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'create_port:binding:host_id',
self.alt_target)
def test_create_port_with_binding_profile(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'create_port:binding:profile',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'create_port:binding:profile',
self.alt_target)
def test_create_port_with_binding_vnic_type(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'create_port:binding:vnic_type',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'create_port:binding:vnic_type',
self.alt_target)
def test_create_port_with_allowed_address_pairs(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_port:allowed_address_pairs',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_port:allowed_address_pairs',
self.alt_target)
def test_create_port_with_allowed_address_pairs_and_mac_address(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_port:allowed_address_pairs:mac_address',
self.alt_target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_port:allowed_address_pairs:mac_address',
self.target)
def test_create_port_with_allowed_address_pairs_and_ip_address(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_port:allowed_address_pairs:ip_address',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_port:allowed_address_pairs:ip_address',
self.alt_target)
def test_get_port(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'get_port', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'get_port', self.alt_target)
def test_get_port_binding_vif_type(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'get_port:binding:vif_type',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'get_port:binding:vif_type',
self.alt_target)
def test_get_port_binding_vif_details(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'get_port:binding:vif_details',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'get_port:binding:vif_details',
self.alt_target)
def test_get_port_binding_host_id(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'get_port:binding:host_id',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'get_port:binding:host_id',
self.alt_target)
def test_get_port_binding_profile(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'get_port:binding:profile',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'get_port:binding:profile',
self.alt_target)
def test_get_port_resource_request(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'get_port:resource_request',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'get_port:resource_request',
self.alt_target)
def test_update_port(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'update_port', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'update_port', self.alt_target)
def test_update_port_with_device_owner(self):
target = self.target.copy()
target['device_owner'] = 'network:test'
alt_target = self.alt_target.copy()
alt_target['device_owner'] = 'network:test'
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'update_port:device_owner',
target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'update_port:device_owner',
alt_target)
def test_update_port_with_mac_address(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'update_port:mac_address',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'update_port:mac_address',
self.alt_target)
def test_update_port_with_fixed_ips(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'update_port:fixed_ips',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'update_port:fixed_ips',
self.alt_target)
def test_update_port_with_fixed_ips_and_ip_address(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'update_port:fixed_ips:ip_address',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'update_port:fixed_ips:ip_address',
self.alt_target)
def test_update_port_with_fixed_ips_and_subnet_id(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'update_port:fixed_ips:subnet_id',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'update_port:fixed_ips:subnet_id',
self.alt_target)
def test_update_port_with_port_security_enabled(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'update_port:port_security_enabled',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'update_port:port_security_enabled',
self.alt_target)
def test_update_port_with_binding_host_id(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'update_port:binding:host_id',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'update_port:binding:host_id',
self.alt_target)
def test_update_port_with_binding_profile(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'update_port:binding:profile',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'update_port:binding:profile',
self.alt_target)
def test_update_port_with_binding_vnic_type(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'update_port:binding:vnic_type',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'update_port:binding:vnic_type',
self.alt_target)
def test_update_port_with_allowed_address_pairs(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_port:allowed_address_pairs',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_port:allowed_address_pairs',
self.alt_target)
def test_update_port_with_allowed_address_pairs_and_mac_address(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_port:allowed_address_pairs:mac_address',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_port:allowed_address_pairs:mac_address',
self.alt_target)
def test_update_port_with_allowed_address_pairs_and_ip_address(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_port:allowed_address_pairs:ip_address',
self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_port:allowed_address_pairs:ip_address',
self.alt_target)
def test_update_port_data_plane_status(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_port:data_plane_status', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_port:data_plane_status', self.alt_target)
def test_delete_port(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'delete_port', self.target)
self.assertRaises(
base_policy.InvalidScope,
policy.enforce, self.context, 'delete_port', self.alt_target)
class SystemMemberTests(SystemAdminTests):
def setUp(self):
super(SystemMemberTests, self).setUp()
self.context = self.system_member_ctx
class SystemReaderTests(SystemMemberTests):
def setUp(self):
super(SystemReaderTests, self).setUp()
self.context = self.system_reader_ctx
class ProjectAdminTests(PortAPITestCase):
def setUp(self):
super(ProjectAdminTests, self).setUp()
self.context = self.project_admin_ctx
def test_create_port(self):
self.assertTrue(
policy.enforce(self.context, 'create_port', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port', self.alt_target)
def test_create_port_with_device_owner(self):
target = self.target.copy()
target['device_owner'] = 'network:test'
alt_target = self.alt_target.copy()
alt_target['device_owner'] = 'network:test'
self.assertTrue(
policy.enforce(self.context,
'create_port:device_owner', target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:device_owner',
alt_target)
def test_create_port_with_mac_address(self):
self.assertTrue(
policy.enforce(self.context,
'create_port:mac_address', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:mac_address',
self.alt_target)
def test_create_port_with_fixed_ips(self):
self.assertTrue(
policy.enforce(self.context,
'create_port:fixed_ips', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:fixed_ips',
self.alt_target)
def test_create_port_with_fixed_ips_and_ip_address(self):
self.assertTrue(
policy.enforce(self.context,
'create_port:fixed_ips:ip_address', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:fixed_ips:ip_address',
self.alt_target)
def test_create_port_with_fixed_ips_and_subnet_id(self):
self.assertTrue(
policy.enforce(self.context,
'create_port:fixed_ips:subent_id', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:fixed_ips:subnet_id',
self.alt_target)
def test_create_port_with_port_security_enabled(self):
self.assertTrue(
policy.enforce(self.context,
'create_port:port_security_enabled', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:port_security_enabled',
self.alt_target)
def test_create_port_with_binding_host_id(self):
self.assertTrue(
policy.enforce(self.context,
'create_port:binding:host_id', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:binding:host_id',
self.alt_target)
def test_create_port_with_binding_profile(self):
self.assertTrue(
policy.enforce(self.context,
'create_port:binding:profile', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:binding:profile',
self.alt_target)
def test_create_port_with_binding_vnic_type(self):
self.assertTrue(
policy.enforce(self.context,
'create_port:binding:vnic_type', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:binding:vnic_type',
self.alt_target)
def test_create_port_with_allowed_address_pairs(self):
self.assertTrue(
policy.enforce(self.context,
'create_port:allowed_address_pairs', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_port:allowed_address_pairs',
self.alt_target)
def test_create_port_with_allowed_address_pairs_and_mac_address(self):
self.assertTrue(
policy.enforce(self.context,
'create_port:allowed_address_pairs:mac_address',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_port:allowed_address_pairs:mac_address',
self.alt_target)
def test_create_port_with_allowed_address_pairs_and_ip_address(self):
self.assertTrue(
policy.enforce(self.context,
'create_port:allowed_address_pairs:ip_address',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_port:allowed_address_pairs:ip_address',
self.alt_target)
def test_get_port(self):
self.assertTrue(
policy.enforce(self.context, 'get_port', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'get_port', self.alt_target)
def test_get_port_binding_vif_type(self):
self.assertTrue(
policy.enforce(
self.context, 'get_port:binding:vif_type', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'get_port:binding:vif_type',
self.alt_target)
def test_get_port_binding_vif_details(self):
self.assertTrue(
policy.enforce(
self.context, 'get_port:binding:vif_details', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'get_port:binding:vif_details',
self.alt_target)
def test_get_port_binding_host_id(self):
self.assertTrue(
policy.enforce(
self.context, 'get_port:binding:host_id', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'get_port:binding:host_id',
self.alt_target)
def test_get_port_binding_profile(self):
self.assertTrue(
policy.enforce(
self.context, 'get_port:binding:profile', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'get_port:binding:profile',
self.alt_target)
def test_get_port_resource_request(self):
self.assertTrue(
policy.enforce(
self.context, 'get_port:resource_request', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'get_port:resource_request',
self.alt_target)
def test_update_port(self):
self.assertTrue(
policy.enforce(self.context, 'update_port', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'update_port', self.alt_target)
def test_update_port_with_device_owner(self):
target = self.target.copy()
target['device_owner'] = 'network:test'
alt_target = self.alt_target.copy()
alt_target['device_owner'] = 'network:test'
self.assertTrue(
policy.enforce(self.context,
'update_port:device_owner', target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'update_port:device_owner',
alt_target)
def test_update_port_with_mac_address(self):
self.assertTrue(
policy.enforce(
self.context, 'update_port:mac_address', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'update_port:mac_address',
self.alt_target)
def test_update_port_with_fixed_ips(self):
self.assertTrue(
policy.enforce(self.context,
'update_port:fixed_ips', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'update_port:fixed_ips',
self.alt_target)
def test_update_port_with_fixed_ips_and_ip_address(self):
self.assertTrue(
policy.enforce(self.context,
'update_port:fixed_ips:ip_address', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'update_port:fixed_ips:ip_address',
self.alt_target)
def test_update_port_with_fixed_ips_and_subnet_id(self):
self.assertTrue(
policy.enforce(self.context,
'update_port:fixed_ips:subent_id', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'update_port:fixed_ips:subnet_id',
self.alt_target)
def test_update_port_with_port_security_enabled(self):
self.assertTrue(
policy.enforce(self.context,
'update_port:port_security_enabled', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'update_port:port_security_enabled',
self.alt_target)
def test_update_port_with_binding_host_id(self):
self.assertTrue(
policy.enforce(self.context,
'update_port:binding:host_id', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'update_port:binding:host_id',
self.alt_target)
def test_update_port_with_binding_profile(self):
self.assertTrue(
policy.enforce(self.context,
'update_port:binding:profile', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'update_port:binding:profile',
self.alt_target)
def test_update_port_with_binding_vnic_type(self):
self.assertTrue(
policy.enforce(self.context,
'update_port:binding:vnic_type', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'update_port:binding:vnic_type',
self.alt_target)
def test_update_port_with_allowed_address_pairs(self):
self.assertTrue(
policy.enforce(self.context,
'update_port:allowed_address_pairs', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_port:allowed_address_pairs',
self.alt_target)
def test_update_port_with_allowed_address_pairs_and_mac_address(self):
self.assertTrue(
policy.enforce(self.context,
'update_port:allowed_address_pairs:mac_address',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_port:allowed_address_pairs:mac_address',
self.alt_target)
def test_update_port_with_allowed_address_pairs_and_ip_address(self):
self.assertTrue(
policy.enforce(self.context,
'update_port:allowed_address_pairs:ip_address',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_port:allowed_address_pairs:ip_address',
self.alt_target)
def test_update_port_data_plane_status(self):
self.assertTrue(
policy.enforce(self.context,
'update_port:data_plane_status',
self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_port:data_plane_status', self.alt_target)
def test_delete_port(self):
self.assertTrue(
policy.enforce(self.context, 'delete_port', self.target))
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'delete_port', self.alt_target)
class ProjectMemberTests(ProjectAdminTests):
def setUp(self):
super(ProjectMemberTests, self).setUp()
self.context = self.project_member_ctx
def test_create_port_with_device_owner(self):
target = self.target.copy()
target['device_owner'] = 'network:test'
alt_target = self.alt_target.copy()
alt_target['device_owner'] = 'network:test'
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:device_owner',
target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:device_owner',
alt_target)
def test_create_port_with_mac_address(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:mac_address',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:mac_address',
self.alt_target)
def test_create_port_with_fixed_ips(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:fixed_ips',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:fixed_ips',
self.alt_target)
def test_create_port_with_fixed_ips_and_ip_address(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:fixed_ips:ip_address',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:fixed_ips:ip_address',
self.alt_target)
def test_create_port_with_fixed_ips_and_subnet_id(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:fixed_ips:subnet_id',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:fixed_ips:subnet_id',
self.alt_target)
def test_create_port_with_port_security_enabled(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:port_security_enabled',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:port_security_enabled',
self.alt_target)
def test_create_port_with_binding_host_id(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:binding:host_id',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:binding:host_id',
self.alt_target)
def test_create_port_with_binding_profile(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:binding:profile',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce, self.context, 'create_port:binding:profile',
self.alt_target)
def test_create_port_with_allowed_address_pairs(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_port:allowed_address_pairs',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_port:allowed_address_pairs',
self.alt_target)
def test_create_port_with_allowed_address_pairs_and_mac_address(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_port:allowed_address_pairs:mac_address',
self.target)
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_port:allowed_address_pairs:mac_address',
self.alt_target)
def test_create_port_with_allowed_address_pairs_and_ip_address(self):
self.assertRaises(
| |
<filename>src/NLG/NonLocalGame.py
import math
from math import sqrt
import matplotlib.pyplot as plt
from qiskit.extensions import RYGate, RZGate, RXGate, IGate, CXGate
from sklearn.preprocessing import StandardScaler
from NLG.agents.BasicAgent import BasicAgent
from NLG.agents.DQNAgent import DQNAgent
from NLG.models.LinearModel import LinearModel
def get_scaler(env, N, ALL_POSSIBLE_ACTIONS, round_to=2):
""":returns scikit-learn scaler object to scale the states"""
# Note: you could also populate the replay buffer here
states = []
for _ in range(N):
action = np.random.choice(ALL_POSSIBLE_ACTIONS)
state, reward, done = env.step(action)
states.append(np.round(state, round_to))
if done:
break
scaler = StandardScaler()
scaler.fit(states)
return scaler
def show_plot_of(plot_this, label, place_line_at=()):
# plot relevant information
fig_dims = (10, 6)
fig, ax = plt.subplots(figsize=fig_dims)
for pl in place_line_at:
plt.axhline(y=pl, color='r', linestyle='-')
plt.xlabel('Epochs')
plt.ylabel(label)
plt.plot(plot_this)
plt.show()
def override(f): return f
from abc import ABC, abstractmethod
class abstractEnvironment(ABC):
""" abstract environment to create CHSH framework
actions are expected in this format
ACTIONS = [q + axis + "0" for axis in 'xyz' for q in 'ra']
ACTIONS = [q + axis + "0" for axis in 'y' for q in 'r']
PLAYER = ['a', 'b']
QUESTION = ['0', '1']
ALL_POSSIBLE_ACTIONS = [[p + q + a] for p in PLAYER for q in QUESTION for a in ACTIONS] # place one gate at some place
ALL_POSSIBLE_ACTIONS.append(["xxr0"])
# for 1 game with 2 EPR
ALL_POSSIBLE_ACTIONS.append(["a0cxnot"])
ALL_POSSIBLE_ACTIONS.append(["b0cxnot"])
#
# for xor paralel with 2EPR
ALL_POSSIBLE_ACTIONS.append(["a0cxnotr"])
ALL_POSSIBLE_ACTIONS.append(["b0cxnotr"])"""
@abstractmethod
def reset(self):
"""Return initial_time_step."""
self.counter = 1
self.history_actions = []
self.state = self.initial_state.copy()
self.accuracy = self.calc_accuracy([self.measure_probabilities_analytically() for _ in range(len(self.game_type))])
self.repr_state = np.array([x for _ in range(len(self.game_type)) for x in self.state], dtype=np.complex64)
return self.repr_state
@abstractmethod
def step(self, action):
"""Apply action and return new time_step."""
pass
def measure_probabilities_analytically(self):
""" :returns probabilities of questions (e.g. 00,01,10,11) happening in matrix """
probabilities = [abs(a) ** 2 for a in self.state]
return probabilities
def calc_accuracy(self, result):
""" Calculates accurary by going through rules of the game given by game_type matrix
:returns winning probability / accuracy / win rate based on winning game_type """
win_rate = 0
for x, combination_of_questions in enumerate(self.game_type):
for y, query in enumerate(combination_of_questions):
win_rate += (query * result[x][y])
win_rate = win_rate * 1 / len(self.game_type)
return win_rate
# def EPR_result(self, result):
# """ If state is bigger than with 2 qubits, we must reduce state so that it matches the scale of the game.
# This functions reduces bigger states result to smaller one by taking the first bit. """
# if self.n_qubits <= 2: return result
#
# new_result = []
# for r, row in enumerate(result):
# new_result.append([])
# for c in range(0, len(row), self.reduce_by * 2):
# new_result[r].append(
# sum(result[r][c:(c + self.reduce_by // 2)]) +
# sum(result[r][c + self.reduce_by:(c + self.reduce_by + self.reduce_by // 2)])
# )
# new_result[r].append(
# sum(result[r][(c + self.reduce_by // 2): c + self.reduce_by]) +
# sum(result[r][(c + self.reduce_by + self.reduce_by // 2):(c + self.reduce_by * 2)])
# )
#
# return new_result
# def paralel_non_local(self, result):
# """ selects probabilities for paralel games """
#
# return self.calc_acc(result)
# # dividing_to_paralel = dict()
# for state in result:
# for x in range(len(state)):
# dividing_to_paralel[self.possible_states[x]] = state[x]
#
# new_result_1 = []
# new_result_2 = []
# for s in range(len(result)):
# paralel_1 = dict()
# paralel_2 = dict()
# for key in dividing_to_paralel.keys():
# try: paralel_1[str(key[0]) + str(key[2])] += dividing_to_paralel[key]
# except KeyError: paralel_1[str(key[0]) + str(key[2])] = dividing_to_paralel[key]
# try: paralel_2[str(key[1]) + str(key[3])] += dividing_to_paralel[key]
# except KeyError: paralel_2[str(key[1]) + str(key[3])] = dividing_to_paralel[key]
#
# new_result_1.append(list(paralel_1.values()))
# new_result_2.append(list(paralel_2.values()))
#
# return self.calc_acc(new_result_1) * self.calc_acc(new_result_2)
def n_qubits_from_state(self):
""" There are 2^n states of n qubits, to get the n, we need to make log2 from state"""
assert len(self.state) % 2 == 0
return int(math.log(len(self.state), 2))
def count_gates(self):
""" :returns count of relevant gates """
count = 0
for action in self.history_actions:
if action in {"xxr0"}: # ending action
pass
# elif action in {"smallerAngle", "biggerAngle"}:
# count += 0.5
else:
count += 1
return count
def get_gate(self, action):
""" :returns gate got from string code of action """
gate = action[2:4]
if gate == "rx" or gate == "ax":
return RXGate
elif gate == "ry" or gate == "ay":
return RYGate
elif gate == "rz" or gate == "az":
return RZGate
elif gate == "cx":
return CXGate
else:
return IGate
def reward_only_difference(self, difference):
# reward is the increase in winning probability
return difference
def reward_qubic(self, difference):
return (difference ** 3) * 1000
def reward_only_best(self, difference):
""" reward only if it its better than results before """
reward = difference * 100
if np.round(self.accuracy, 2) > np.round(self.max_acc, 2):
reward += 50 * (self.max_acc - self.accuracy)
self.min_gates = len(self.history_actions)
self.max_acc = self.accuracy
elif np.round(self.accuracy, 2) == np.round(self.max_acc, 2):
if self.min_gates > len(self.history_actions):
self.min_gates = len(self.history_actions)
if self.counter == self.max_gates or self.history_actions[-1] == "xxr0":
if np.round(self.max_acc, 2) == np.round(self.accuracy, 2) and self.min_gates == self.count_gates():
reward = 5000 * (1 / (self.count_gates() + 1)) * self.accuracy
elif np.round(self.max_acc, 2) == np.round(self.accuracy, 2):
reward -= 1000 * (self.count_gates() + 1) / self.accuracy
else:
reward -= 10000 * (self.count_gates() + 1) / self.accuracy # alebo tu dam tiez nejaky vzorcek
return reward
def reward_combined(self, difference):
reward = difference
if np.round(self.accuracy, 2) >= np.round(self.max_acc, 2):
self.max_acc = self.accuracy
if self.history_actions[-1] == "xxr0":
reward += 80 * (1 / (self.count_gates() + 1)) * self.accuracy # alebo za count_gates len(history_actuons)
# if self.counter == self.max_gates:
# reward += 50 * (1 / (self.count_gates() + 1))
return reward
def complex_array_to_real(self, inp_array):
""" decomposes complex array into array of real numbers with double size. """
return np.concatenate((np.real(inp_array), np.imag(inp_array)))
import random
import warnings
warnings.filterwarnings('ignore')
import pickle
import numpy as np
class Game:
""" creates framework for easier manipulation """
def __init__(self, scaler=None, round_to=2, batch_size=32):
self.scaler = scaler
self.round_to = round_to
self.batch_size = batch_size
def play_one_episode(self, agent, env, DO):
""" Plays one episode of CHSH training
:returns last accuracy acquired and rewards from whole episode """
# in this version we will NOT use "exploring starts" method
# instead we will explore using an epsilon-soft policy
state = env.reset()
if self.scaler is not None: state = self.scaler.transform([state])
else: state = np.array([np.around(state, self.round_to)], dtype=np.float32)
done = False
# be aware of the timing
# each triple is s(t), a(t), r(t)
# but r(t) results from taking action a(t-1) from s(t-1) and landing in s(t)
rew_accum = 0
while not done:
action = agent.act(state)
next_state, reward, done = env.step(action[0])
if self.scaler is not None: next_state = self.scaler.transform([np.around(next_state, self.round_to)])
else: next_state = np.array([np.around(next_state, self.round_to)], dtype=np.float32)
if DO == 'train':
if type(agent) == BasicAgent:
agent.train(state.copy(), action[1], reward, next_state.copy(), done)
elif type(agent) == DQNAgent:
agent.update_replay_memory(state.copy(), action[1], reward, next_state.copy(), done)
agent.replay(self.batch_size)
state = next_state.copy()
rew_accum += reward
try: print(env.memory_state[tuple(env.history_actions)][2])
except: print(env.history_actions)
# print("state: ", env.repr_state)
return env.accuracy, rew_accum
def evaluate_train(self, N, agent, env):
""" Performes the whole training of agent in env in N steps
:returns portfolio value and rewards for all episodes - serves to plot how it has trained"""
DO = "train"
portfolio_value = []
rewards = []
for e in range(N):
val, rew = self.play_one_episode(agent, env, DO)
print('episode:', end=' ')
print(e, end=' ')
print('acc:', end=' ')
print(val)
print('rew:', end=' ')
print(rew)
portfolio_value.append(val) # append episode end portfolio value
rewards.append(rew)
# save the weights when we are done
if DO == 'train':
# # save the DQN
agent.save(f'.training/linear.npz')
# save the scaler
with open(f'../.training/scaler.pkl', 'wb') as f:
pickle.dump(self.scaler, f)
return portfolio_value, rewards
def evaluate_test(self, agent, env):
""" Tests what has the agent learnt in N=1 steps :returns accuracy and reward """
DO = "test"
portfolio_value = []
if DO == 'test':
N = 1
# then load the previous scaler
if self.scaler != None:
with open(f'../.training/scaler.pkl', 'rb') as f:
self.scaler = pickle.load(f)
# make sure epsilon is not 1!
# no need to run multiple episodes if epsilon = 0, it's deterministic, it outputs always what it has already learnt
agent.epsilon = 0
# load trained weights
agent.load(f'.training/linear.npz')
# play the game num_episodes times
for e in range(N):
val = | |
"""
API for TensorFlow usage.
"""
import functools
import itertools
from typing import Any, Callable, List, Optional, Sequence, Tuple
import tensorflow as tf # type: ignore
from targetran._check import (
_check_shear_input,
_check_translate_input,
_check_crop_input,
_check_input_range,
)
from targetran._tf_functional import (
_tf_convert,
_tf_cast_to_int,
_tf_round_to_int,
_tf_resize_image,
_tf_pad_image,
_tf_gather_image,
)
from targetran._transform import (
_AffineDependency,
_affine_transform,
_flip_left_right,
_flip_up_down,
_rotate,
_shear,
_translate,
_get_crop_inputs,
_get_random_size_fractions,
_crop,
_resize,
_get_flip_left_right_mats,
_get_flip_up_down_mats,
_get_rotate_mats,
_get_shear_mats,
_get_translate_mats,
)
from targetran._typing import T
from targetran.utils import Interpolation
def to_tf(
image_seq: Sequence[T],
bboxes_seq: Sequence[T],
labels_seq: Sequence[T]
) -> Tuple[Sequence[tf.Tensor], Sequence[tf.Tensor], Sequence[tf.Tensor]]:
"""
Convert seqs to TF (eager) tensor seqs.
"""
tuples = [
(_tf_convert(image),
tf.reshape(_tf_convert(bboxes), (-1, 4)),
_tf_convert(labels))
for image, bboxes, labels in itertools.zip_longest(
image_seq, bboxes_seq, labels_seq, fillvalue=[]
)
]
tf_image_seq, tf_bboxes_seq, tf_labels_seq = tuple(zip(*tuples))
return tf_image_seq, tf_bboxes_seq, tf_labels_seq
def seqs_to_tf_dataset(
image_seq: Sequence[T],
bboxes_seq: Sequence[T],
labels_seq: Sequence[T]
) -> tf.data.Dataset:
tf_image_seq, tf_bboxes_seq, tf_labels_seq = to_tf(
image_seq, bboxes_seq, labels_seq
)
# Tensors of different shapes can be included in a TF Dataset
# as ragged-tensors.
ds = tf.data.Dataset.zip((
tf.data.Dataset.from_tensor_slices(tf.ragged.stack(tf_image_seq)),
tf.data.Dataset.from_tensor_slices(tf.ragged.stack(tf_bboxes_seq)),
tf.data.Dataset.from_tensor_slices(tf.ragged.stack(tf_labels_seq))
))
# However, our transformations expect normal tensors, so the ragged-tensors
# have to be first converted back to tensors during mapping. Therefore,
# the whole point of using ragged-tensors is ONLY for building a Dataset...
# Note that the label ragged-tensors are of rank-0, so they are implicitly
# converted to tensors during mapping. Strange TF Dataset behaviour...
ds = ds.map(
lambda i, b, l: (
i if isinstance(i, tf.Tensor) else i.to_tensor(),
b if isinstance(b, tf.Tensor) else b.to_tensor(),
l
)
)
return ds
def _tf_get_affine_dependency() -> _AffineDependency:
return _AffineDependency(
_tf_convert, tf.shape, tf.reshape, tf.expand_dims, tf.squeeze,
_tf_pad_image, tf.range, _tf_cast_to_int, _tf_round_to_int,
tf.repeat, tf.tile, tf.ones_like, tf.stack, tf.concat, tf.matmul,
tf.clip_by_value, tf.math.floor, tf.math.ceil, _tf_gather_image,
tf.identity, tf.reduce_max, tf.reduce_min,
tf.logical_and, tf.boolean_mask
)
def _tf_affine_transform(
image: tf.Tensor,
bboxes: tf.Tensor,
labels: tf.Tensor,
image_dest_tran_mat: tf.Tensor,
bboxes_tran_mat: tf.Tensor,
interpolation: Interpolation
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
return _affine_transform(
image, bboxes, labels, image_dest_tran_mat, bboxes_tran_mat,
interpolation, _tf_get_affine_dependency()
)
def tf_flip_left_right(
image: tf.Tensor,
bboxes: tf.Tensor,
labels: tf.Tensor
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
return _flip_left_right(
image, bboxes, labels,
Interpolation.NEAREST, _tf_get_affine_dependency()
)
def tf_flip_up_down(
image: tf.Tensor,
bboxes: tf.Tensor,
labels: tf.Tensor
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
return _flip_up_down(
image, bboxes, labels,
Interpolation.NEAREST, _tf_get_affine_dependency()
)
def tf_rotate(
image: tf.Tensor,
bboxes: tf.Tensor,
labels: tf.Tensor,
angle_deg: float,
interpolation: Interpolation = Interpolation.BILINEAR
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
return _rotate(
image, bboxes, labels, _tf_convert(angle_deg), tf.cos, tf.sin,
interpolation, _tf_get_affine_dependency()
)
def tf_shear(
image: tf.Tensor,
bboxes: tf.Tensor,
labels: tf.Tensor,
angle_deg: float,
interpolation: Interpolation = Interpolation.BILINEAR,
_check_input: bool = True
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
if _check_input:
_check_shear_input(angle_deg)
return _shear(
image, bboxes, labels, _tf_convert(angle_deg), tf.tan,
interpolation, _tf_get_affine_dependency()
)
def tf_translate(
image: tf.Tensor,
bboxes: tf.Tensor,
labels: tf.Tensor,
translate_height: int,
translate_width: int,
interpolation: Interpolation = Interpolation.BILINEAR,
_check_input: bool = True
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
if _check_input:
_check_translate_input(
image.get_shape(), translate_height, translate_width
)
return _translate(
image, bboxes, labels,
_tf_convert(translate_height), _tf_convert(translate_width),
interpolation, _tf_get_affine_dependency()
)
def _tf_get_crop_inputs(
image_height: int,
image_width: int,
height_fraction_range: Tuple[float, float],
width_fraction_range: Tuple[float, float],
rand_fn: Callable[..., tf.Tensor]
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
return _get_crop_inputs(
image_height, image_width, height_fraction_range, width_fraction_range,
rand_fn, _tf_convert, _tf_round_to_int
)
def tf_crop(
image: tf.Tensor,
bboxes: tf.Tensor,
labels: tf.Tensor,
offset_height: int,
offset_width: int,
crop_height: int,
crop_width: int,
_check_input: bool = True
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
if _check_input:
_check_crop_input(image.get_shape(), offset_height, offset_width)
return _crop(
image, bboxes, labels,
_tf_convert(offset_height), _tf_convert(offset_width),
_tf_convert(crop_height), _tf_convert(crop_width),
_tf_convert, tf.shape, tf.reshape, tf.concat,
tf.logical_and, tf.squeeze, tf.clip_by_value, tf.boolean_mask
)
def tf_resize(
image: tf.Tensor,
bboxes: tf.Tensor,
labels: tf.Tensor,
dest_size: Tuple[int, int]
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
return _resize(
image, bboxes, labels, dest_size,
_tf_convert, tf.shape, tf.reshape, _tf_resize_image, tf.concat
)
class TFRandomTransform:
def __init__(
self,
tf_fn: Callable[..., Tuple[tf.Tensor, tf.Tensor, tf.Tensor]],
probability: float,
seed: Optional[int],
name: str,
is_affine: bool
) -> None:
self._tf_fn = tf_fn
self.probability = probability
self._rng = tf.random.Generator.from_seed(seed) if seed is not None \
else tf.random.Generator.from_non_deterministic_state()
self.name = name
self.is_affine = is_affine
def _rand_fn(self, shape: Sequence[int] = ()) -> tf.Tensor:
return self._rng.uniform(shape=shape)
def _get_mats(
self,
image: tf.Tensor,
rand_fn: Callable[..., tf.Tensor]
) -> Tuple[tf.Tensor, tf.Tensor]:
pass
def __call__(
self,
image: tf.Tensor,
bboxes: tf.Tensor,
labels: tf.Tensor,
*args: Any,
**kwargs: Any
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
# Make sure inputs are in the needed format.
image = _tf_convert(image)
bboxes = _tf_convert(bboxes)
labels = _tf_convert(labels)
if self._rand_fn() < _tf_convert(self.probability):
return self._tf_fn(image, bboxes, labels, *args, **kwargs)
return image, bboxes, labels
def _get_random_indices(
rng: tf.random.Generator,
num_indices: int,
num_selected_indices: int,
selected_probabilities: Optional[List[float]] = None,
) -> tf.Tensor:
"""
Roughly mimicking Numpy choice for getting indices, without replacement.
The indices always start from 0.
References:
https://github.com/tensorflow/tensorflow/issues/9260#issuecomment-437875125
"""
probs = selected_probabilities if selected_probabilities \
else tf.ones(num_indices)
logits = tf.math.log(probs)
z = -tf.math.log(-tf.math.log( # pylint: disable=invalid-unary-operand-type
rng.uniform(tf.shape(logits), 0, 1)
))
_, indices = tf.nn.top_k(logits + z, num_selected_indices)
return indices
class TFCombineAffine(TFRandomTransform):
def __init__(
self,
transforms: Sequence[TFRandomTransform],
num_selected_transforms: Optional[int] = None,
selected_probabilities: Optional[List[float]] = None,
interpolation: Interpolation = Interpolation.BILINEAR,
probability: float = 1.0,
seed: Optional[int] = None
) -> None:
not_affine_trans = list(filter(lambda t: not t.is_affine, transforms))
if not_affine_trans:
raise AssertionError(
f"Non-affine transforms cannot be included in TFCombineAffine: "
f"{[t.name for t in not_affine_trans]}"
)
if num_selected_transforms and selected_probabilities:
if len(selected_probabilities) != len(transforms):
raise ValueError(
"Number of items in selected_probabilities should be "
"the same as the number of items in transforms."
)
super().__init__(
_tf_affine_transform, probability, seed, "TFCombineAffine", True
)
self._transforms = transforms
self._num_selected_transforms = num_selected_transforms
self._selected_probabilities = selected_probabilities
self._interpolation = interpolation
self._identity_mat = tf.expand_dims(tf.constant([
[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]
]), axis=0)
def _combine_mats(
self,
image: tf.Tensor,
rand_fn: Callable[..., tf.Tensor]
) -> Tuple[tf.Tensor, tf.Tensor]:
image_dest_tran_mats, bboxes_tran_mats, probs = tuple(zip(
*[(*t._get_mats(image, rand_fn), t.probability)
for i, t in enumerate(self._transforms)]
))
if self._num_selected_transforms:
indices = _get_random_indices(
self._rng,
len(self._transforms),
self._num_selected_transforms,
self._selected_probabilities
)
image_dest_tran_mats = tf.gather( # pylint: disable=no-value-for-parameter
image_dest_tran_mats, indices
)
bboxes_tran_mats = tf.gather( # pylint: disable=no-value-for-parameter
bboxes_tran_mats, indices
)
else:
conditions = tf.reshape(rand_fn() < probs, (len(probs), 1, 1))
image_dest_tran_mats = tf.where(
conditions, image_dest_tran_mats, self._identity_mat
)
bboxes_tran_mats = tf.where(
conditions, bboxes_tran_mats, self._identity_mat
)
image_dest_tran_mat = functools.reduce(
tf.matmul, tf.unstack(image_dest_tran_mats)
)
# Note the reversed order for the bboxes tran matrices.
bboxes_tran_mat = functools.reduce(
tf.matmul, tf.unstack(bboxes_tran_mats)[::-1]
)
return image_dest_tran_mat, bboxes_tran_mat
def __call__(
self,
image: tf.Tensor,
bboxes: tf.Tensor,
labels: tf.Tensor,
*args: Any,
**kwargs: Any
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
image_dest_tran_mat, bboxes_tran_mat = self._combine_mats(
image, self._rand_fn
)
return super().__call__(
image, bboxes, labels, image_dest_tran_mat, bboxes_tran_mat,
self._interpolation
)
class TFRandomFlipLeftRight(TFRandomTransform):
def __init__(
self,
probability: float = 0.5,
seed: Optional[int] = None
) -> None:
super().__init__(
tf_flip_left_right, probability, seed, "TFRandomFlipLeftRight", True
)
def _get_mats(
self,
image: tf.Tensor,
rand_fn: Callable[..., tf.Tensor]
) -> Tuple[tf.Tensor, tf.Tensor]:
return _get_flip_left_right_mats(_tf_convert)
def __call__(
self,
image: tf.Tensor,
bboxes: tf.Tensor,
labels: tf.Tensor,
*args: Any,
**kwargs: Any
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
return super().__call__(image, bboxes, labels)
class TFRandomFlipUpDown(TFRandomTransform):
def __init__(
self,
probability: float = 0.5,
seed: Optional[int] = None
) -> None:
super().__init__(
tf_flip_up_down, probability, seed, "TFRandomFlipUpDown", True
)
def _get_mats(
self,
image: tf.Tensor,
rand_fn: Callable[..., tf.Tensor]
) -> Tuple[tf.Tensor, tf.Tensor]:
return _get_flip_up_down_mats(_tf_convert)
def __call__(
self,
image: tf.Tensor,
bboxes: tf.Tensor,
labels: tf.Tensor,
*args: Any,
**kwargs: Any
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
return super().__call__(image, bboxes, labels)
class TFRandomRotate(TFRandomTransform):
def __init__(
self,
angle_deg_range: Tuple[float, float] = (-15.0, 15.0),
interpolation: Interpolation = Interpolation.BILINEAR,
probability: float = 0.9,
seed: Optional[int] = None
) -> None:
_check_input_range(angle_deg_range, None, "angle_deg_range")
super().__init__(tf_rotate, probability, seed, "TFRandomRotate", True)
self.angle_deg_range = angle_deg_range
self.interpolation = interpolation
def _get_angle_deg(self, rand_fn: Callable[..., tf.Tensor]) -> tf.Tensor:
return _tf_convert(self.angle_deg_range[1] - self.angle_deg_range[0]) \
* rand_fn() + _tf_convert(self.angle_deg_range[0])
def _get_mats(
self,
image: tf.Tensor,
rand_fn: Callable[..., tf.Tensor]
) -> Tuple[tf.Tensor, tf.Tensor]:
return _get_rotate_mats(
self._get_angle_deg(rand_fn), _tf_convert, tf.cos, tf.sin
)
def __call__(
self,
image: tf.Tensor,
bboxes: tf.Tensor,
labels: tf.Tensor,
*args: Any,
**kwargs: Any
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
return super().__call__(
image, bboxes, labels, self._get_angle_deg(self._rand_fn),
self.interpolation
)
class TFRandomShear(TFRandomTransform):
def __init__(
self,
angle_deg_range: Tuple[float, float] = (-10.0, 10.0),
interpolation: Interpolation = Interpolation.BILINEAR,
probability: float = 0.9,
seed: Optional[int] = None
) -> None:
_check_input_range(angle_deg_range, (-90.0, 90.0), "angle_deg_range")
super().__init__(tf_shear, probability, seed, "TFRandomShear", True)
self.angle_deg_range = angle_deg_range
self.interpolation = interpolation
def _get_angle_deg(self, rand_fn: Callable[..., tf.Tensor]) -> tf.Tensor:
return _tf_convert(self.angle_deg_range[1] - self.angle_deg_range[0]) \
* rand_fn() + _tf_convert(self.angle_deg_range[0])
def _get_mats(
self,
image: tf.Tensor,
rand_fn: Callable[..., tf.Tensor]
) -> Tuple[tf.Tensor, tf.Tensor]:
return _get_shear_mats(
self._get_angle_deg(rand_fn), _tf_convert, tf.tan
)
def __call__(
self,
image: tf.Tensor,
bboxes: tf.Tensor,
labels: tf.Tensor,
*args: Any,
**kwargs: Any
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
return super().__call__(
image, bboxes, labels, self._get_angle_deg(self._rand_fn),
self.interpolation, False
)
class TFRandomTranslate(TFRandomTransform):
def __init__(
| |
#!/usr/bin/env python
################################################################################
#
# file_name_parameters
#
#
# Copyright (c) 10/9/2009 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#################################################################################
"""
********************************************
:mod:`file_name_parameters` -- Overview
********************************************
.. moduleauthor:: <NAME> <<EMAIL>>
Handles file names for ruffus
"""
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# imports
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
import os,copy
import re
import glob
from operator import itemgetter
from itertools import groupby
from collections import defaultdict
from time import strftime, gmtime
if __name__ == '__main__':
import sys
sys.path.insert(0,".")
from ruffus_exceptions import *
#from file_name_parameters import *
from ruffus_utility import *
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Functions
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
import re
#_________________________________________________________________________________________
# get_readable_path_str
#_________________________________________________________________________________________
def get_readable_path_str(original_path, max_len):
"""
Truncates path to max_len characters if necessary
If the result is a path within nested directory, will remove partially
truncated directories names
"""
if len(original_path) < max_len:
return original_path
truncated_name = original_path[-(max_len - 5):]
if "/" not in truncated_name:
return "[...]" + truncated_name
return "[...]" + re.sub("^[^/]+", "", truncated_name)
#_________________________________________________________________________________________
# epoch_seconds_to_str
#_________________________________________________________________________________________
def epoch_seconds_to_str (epoch_seconds):
"""
Converts seconds since epoch into nice string with date and time to 2 significant
digits for seconds
"""
# returns 24 char long 25 May 2011 23:37:40.12
time_str = strftime("%d %b %Y %H:%M:%S", gmtime(epoch_seconds))
#
fraction_of_second_as_str = ("%.2f" % (epoch_seconds - int(epoch_seconds)))[1:]
# or fraction = ("%.2f" % (divmod(epoch_seconds, 1)[1]))[1:]
return (time_str + fraction_of_second_as_str)
err_msg_no_regex_match = ("No jobs were run because no files names matched. "
"Please make sure that the regular expression is correctly specified.")
err_msg_empty_files_parameter= ("@files() was empty, i.e. no files were specified. "
"Please make sure this is by design.")
#_________________________________________________________________________________________
# t_params_tasks_globs_run_time_data
#_________________________________________________________________________________________
class t_params_tasks_globs_run_time_data(object):
"""
After parameters are parsed into tasks, globs, runtime data
"""
def __init__ (self, params, tasks, globs, runtime_data_names):
self.params = params
self.tasks = tasks
self.globs = globs
self.runtime_data_names = runtime_data_names
def __str__ (self):
return str(self.params)
def param_iter (self):
for p in self.params:
yield t_params_tasks_globs_run_time_data(p, self.tasks, self.globs,
self.runtime_data_names)
def unexpanded_globs (self):
"""
do not expand globs
"""
return t_params_tasks_globs_run_time_data(self.params, self.tasks, [],
self.runtime_data_names)
def single_file_to_list (self):
"""
if parameter is a simple string, wrap that in a list unless it is glob
Useful for simple @transform cases
"""
if isinstance(self.params, basestring) and not is_glob(self.params):
self.params = [self.params]
return True
return False
def regex_replaced (self, filename, regex, regex_or_suffix = REGEX_SUBSTITUTE):
output_glob = regex_replace(filename, regex, self.globs, regex_or_suffix)
output_param = regex_replace(filename, regex, self.params, regex_or_suffix)
return t_params_tasks_globs_run_time_data(output_param, self.tasks, output_glob,
self.runtime_data_names)
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# needs_update_func
# functions which are called to see if a job needs to be updated
#
# Each task is a series of parallel jobs
# each of which has the following pseudo-code
#
# for param in param_generator_func():
# if needs_update_func(*param):
# job_wrapper(*param)
#
# N.B. param_generator_func yields iterators of *sequences*
# if you are generating single parameters, turn them into lists:
#
# for a in alist:
# yield (a,)
#
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
#_________________________________________________________________________________________
# needs_update_check_directory_missing
# N.B. throws exception if this is an ordinary file, not a directory
#_________________________________________________________________________________________
def needs_update_check_directory_missing (dirs):
"""
Called per directory:
Does it exist?
Is it an ordinary file not a directory? (throw exception
"""
for d in dirs:
#print >>sys.stderr, "check directory missing %d " % os.path.exists(d) # DEBUG
if not os.path.exists(d):
return True, "Directory [%s] is missing" % d
if not os.path.isdir(d):
raise error_not_a_directory("%s already exists but as a file, not a directory" % d )
return False, "All directories exist"
#_________________________________________________________________________________________
# check_input_files_exist
#_________________________________________________________________________________________
def check_input_files_exist (*params):
"""
If inputs are missing then there is no way a job can run successful.
Must throw exception.
This extra function is a hack to make sure input files exists right before
job is called for better error messages, and to save things from blowing
up inside the task function
"""
if len(params):
input_files = params[0]
for f in get_strings_in_nested_sequence(input_files):
if not os.path.exists(f):
raise MissingInputFileError("No way to run job: "+
"Input file ['%s'] does not exist" % f)
#_________________________________________________________________________________________
# needs_update_check_exist
#_________________________________________________________________________________________
def needs_update_check_exist (*params):
"""
Given input and output files, see if all exist
Each can be
#. string: assumed to be a filename "file1"
#. any other type
#. arbitrary nested sequence of (1) and (2)
"""
# missing output means build
if len(params) < 2:
return True, "i/o files not specified"
i, o = params[0:2]
i = get_strings_in_nested_sequence(i)
o = get_strings_in_nested_sequence(o)
#
# build: missing output file
#
if len(o) == 0:
return True, "Missing output file"
# missing input / output file means always build
missing_files = []
for io in (i, o):
for p in io:
if not os.path.exists(p):
missing_files.append(p)
if len(missing_files):
return True, "Missing file%s [%s]" % ("s" if len(missing_files) > 1 else "",
", ".join(missing_files))
#
# missing input -> build only if output absent
#
if len(i) == 0:
return False, "Missing input files"
return False, "Up to date"
#_________________________________________________________________________________________
# needs_update_check_modify_time
#_________________________________________________________________________________________
def needs_update_check_modify_time (*params):
"""
Given input and output files, see if all exist and whether output files are later than input files
Each can be
#. string: assumed to be a filename "file1"
#. any other type
#. arbitrary nested sequence of (1) and (2)
"""
needs_update, err_msg = needs_update_check_exist (*params)
if (needs_update, err_msg) != (False, "Up to date"):
return needs_update, err_msg
i, o = params[0:2]
i = get_strings_in_nested_sequence(i)
o = get_strings_in_nested_sequence(o)
#
# get sorted modified times for all input and output files
#
filename_to_times = [[], []]
file_times = [[], []]
#_____________________________________________________________________________________
# pretty_io_with_date_times
#_____________________________________________________________________________________
def pretty_io_with_date_times (filename_to_times):
# sort
for io in range(2) :
filename_to_times[io].sort()
#
# add asterisk for all files which are causing this job to be out of date
#
file_name_to_asterisk = dict()
oldest_output_mtime = filename_to_times[1][0][0]
for mtime, file_name in filename_to_times[0]:
file_name_to_asterisk[file_name] = "*" if mtime >= oldest_output_mtime else " "
newest_output_mtime = filename_to_times[0][-1][0]
for mtime, file_name in filename_to_times[1]:
file_name_to_asterisk[file_name] = "*" if mtime <= newest_output_mtime else " "
#
# try to fit in 100 - 15 = 85 char lines
# date time ~ 25 characters so limit file name to 55 characters
#
msg = "\n"
category_names = "Input", "Output"
for io in range(2):
msg += " %s files:\n" % category_names[io]
for mtime, file_name in filename_to_times[io]:
file_datetime_str = epoch_seconds_to_str(mtime)
msg += (" " + # indent
file_name_to_asterisk[file_name] + " " + # asterisked out of date files
file_datetime_str + ": " + # date time of file
get_readable_path_str(file_name, 55) + "\n") # file name truncated to 55
return msg
#
# Ignore output file if it is found in the list of input files
# By definition they have the same timestamp,
# and the job will otherwise appear to be out of date
#
# Symbolic links followed
real_input_file_names = set()
for input_file_name in i:
real_input_file_names.add(os.path.realpath(input_file_name))
mtime = os.path.getmtime(input_file_name)
filename_to_times[0].append((mtime, input_file_name))
file_times[0].append(mtime)
for output_file_name in o:
real_file_name = os.path.realpath(output_file_name)
mtime = os.path.getmtime(output_file_name)
if real_file_name not in real_input_file_names:
file_times[1].append(mtime)
filename_to_times[1].append((mtime, output_file_name))
#
# Debug: Force print modified file names and times
#
#if len(file_times[0]) and len (file_times[1]):
# print >>sys.stderr, pretty_io_with_date_times(filename_to_times), file_times, (max(file_times[0]) >= min(file_times[1]))
#else:
# print >>sys.stderr, i, o
#
# update if any input file >= (more recent) output file
#
if len(file_times[0]) and len (file_times[1]) and max(file_times[0]) >= min(file_times[1]):
return True, pretty_io_with_date_times(filename_to_times)
return False, "Up to date"
#_________________________________________________________________________________________
#
# is_file_re_combining
#
#_________________________________________________________________________________________
def is_file_re_combining (old_args):
"""
Helper function for @files_re
check if parameters wrapped in combine
"""
combining_all_jobs = False
orig_args = []
for arg in old_args:
if isinstance(arg, combine):
combining_all_jobs = True
if len(arg.args) == 1:
orig_args.append(arg.args[0])
else:
orig_args.append(arg[0].args)
| |
asv_to_taxa_dict = {}
taxa_to_asv_dict = {}
for line in taxonomy:
if 'Feature' not in line:
asv, taxa, _confidence = line.split('\t')
taxa = asv_taxa_clean_up(taxa, level)
if taxa:
if asv not in asv_to_taxa_dict:
asv_to_taxa_dict[asv] = taxa
else:
print('something wrong')
if taxa not in taxa_to_asv_dict:
taxa_to_asv_dict[taxa] = set()
taxa_to_asv_dict[taxa].add(asv)
taxonomy.close()
assigned_asv = {}
select_taxa_list = open(args.select_taxa_list)
for line in select_taxa_list:
if line[0] != '#':
taxa = line.split('\t')[0]
#taxa = asv_taxa_clean_up(taxa, level)
if taxa:
if taxa in taxa_to_asv_dict:
assigned_asv[taxa] = taxa_to_asv_dict[taxa]
select_taxa_list.close()
#universal_ko_dict, universal_ko_lookup = load_ko(args.kegg_file)
#ko_super_set_type, ko_to_path_lookup = ko_to_pathway(universal_ko_dict, universal_ko_lookup)
contrib = open(args.nsti_file)
nsti_fun = {}
for line in contrib:
if 'NSTI' not in line:
#P1-1 K00001 01edd66886a699ad420ca0d8db401937 154.0 1.091424521615875 2 308.0 2.18284904323175
asv = line.split('\t')[0]
nsti = float(line.split('\t')[2])
process = False
if asv in asv_to_taxa_dict:
taxa = asv_to_taxa_dict[asv]
if taxa in assigned_asv:
if taxa not in nsti_fun:
nsti_fun[taxa] = []
nsti_fun[taxa].append(nsti)
contrib.close()
outfile_name = args.output_file
outfile = open(outfile_name,'w')
header = ('#taxa\tmean_nsti\tmedian_nsti\tstd_nsti\tmedian-std\n')
outfile.write(header)
for taxa in nsti_fun:
nsti_list = nsti_fun[taxa]
outline = ('{}\t{}\t{}\t{}\t{}\n').format(taxa, np.mean(nsti_list), np.median(nsti_list), np.std(nsti_list), np.median(nsti_list)-np.std(nsti_list))
print(outline)
outfile.write(outline)
outfile.close()
def define_type(s_taxa, p_taxa, raw_s_fa, raw_p_fa):
coord_dict = {'S':[0,0], 'P':[0,0]}
s_fa = np.median(raw_s_fa)
p_fa = np.median(raw_p_fa)
coord_dict = {'S':[s_taxa, s_fa], 'P':[p_taxa, p_fa]}
return(coord_dict)
def plot_funbubbles():
import plotly.graph_objects as go
import pickle
file = open('C:/Gresham/Project_Gravimondo/Project_Impact_2/sigilo_results/pathway/pct_metagenome_contrib_family.p','rb')
fun_dict = pickle.load(file)
file.close()
condition_dict = {}
site_abundance_dict = {}
taxon_set = set()
for each in fun_dict:
taxa = each.split('_')
condition = taxa[-1]
if condition not in condition_dict:
condition_dict[condition] = set()
taxa_list = taxa[1:-1]
taxon = ''
for taxa in taxa_list:
taxon += taxa +'_'
taxon = taxon[:-1]
condition_dict[condition].add(taxon)
taxon_set.add(taxon)
for site in fun_dict[each]:
if site != 'ko':
if condition not in site_abundance_dict:
site_abundance_dict[condition] = {}
if taxon not in site_abundance_dict[condition]:
site_abundance_dict[condition][taxon] = {'S':0, 'P':0}
# val = np.median(fun_dict[each][site])
#
# if val == 0:
# val = 1
#
# val = np.log10(val)
#
# if val < 0:
# val = 0
site_abundance_dict[condition][taxon][site] += fun_dict[each][site]
for condition, taxa_set in condition_dict.items():
x_compound_dict = {'S':[], 'P':[]}
y_compound_dict = {'S':[], 'P':[]}
for taxon in taxa_set:
for site in site_abundance_dict[condition][taxon]:
val = site_abundance_dict[condition][taxon][site]
x_compound_dict[site].append(val)
y_compound_dict[site].append(taxon)
outfile_name = ('family_{}.pdf').format(condition)
#import plotly.graph_objects as go
fig = go.Figure()
fig.add_trace(go.Scatter(
x=x_compound_dict['S'],
y=y_compound_dict['S'],
marker=dict(color='rgba(128, 0, 128, 0.5)', size=6),
mode="markers",
name="Impact",
))
fig.add_trace(go.Scatter(
x=x_compound_dict['P'],
y=y_compound_dict['P'],
marker=dict(color='rgba(44, 160, 101, 0.5)', size=6),
mode="markers",
name="Conserved",
))
fig.update_layout(title=condition,
xaxis_title="Percent Relative Abundance",
yaxis_title="Taxa",
font_size=10,
width=1500,
height=600)
fig.show()
fig.write_image(outfile_name)
print(len(taxon_set))
def apply_versatility():
global ko_dict
versa_dict = {}
for site in ko_dict:
for level in ko_dict[site]:
if level not in versa_dict:
versa_dict[level] = {}
if site not in versa_dict[level]:
# a set of all taxa in a site at a level
# this is for normalization ot the KO specific numbers of taxa
versa_dict[level][site] = {'total_taxa': set()}
for ko in ko_dict[site][level]:
if ko not in versa_dict[level][site]:
# the KO of every site should have:
# how many taxa have the ko:
# versa_dict[level][site][ko]['taxa_num'].add(taxon)
# a taxa specific functional abundance:
# versa_dict[level][site][ko][taxon] += (ko_dict[site][level][ko][taxon])
# a distribution of all functional abundances
# versa_dict[level][site][ko]['fa_dist'].append(versa_dict[level][site][ko][taxon])
versa_dict[level][site][ko] = {'taxa_num':set(), 'fa_dist':[]}
for taxon in ko_dict[site][level][ko]:
versa_dict[level][site][ko]['taxa_num'].add(taxon)
versa_dict[level][site]['total_taxa'].add(taxon)
if taxon not in versa_dict[level][site][ko]:
versa_dict[level][site][ko][taxon] = 0
#The KO functional abundance of the a specific taxa
# not normalised
# ko_dict[site][level][ko]['total']
versa_dict[level][site][ko][taxon] += (ko_dict[site][level][ko][taxon])
versa_dict[level][site][ko]['fa_dist'].append(versa_dict[level][site][ko][taxon])
fet_ct = 0
versatile_dict = {}
for level in versa_dict:
temp_site_dict = {}
site_list = {}
for site in versa_dict[level]:
# a set of all taxa in a site at a level
total_taxa = len(versa_dict[level][site]['total_taxa'])
for ko in versa_dict[level][site]:
#filters 'total_taxa'
if 'taxa_num' in versa_dict[level][site][ko]:
#only consider those sites with ko in each replicate
if ko not in site_list:
site_list[ko] = set()
site_list[ko].add(site)
# how many taxa have the ko:
taxa_num = len(versa_dict[level][site][ko]['taxa_num'])
fa_dist = versa_dict[level][site][ko]['fa_dist']
if ko not in temp_site_dict:
temp_site_dict[ko]={}
if site not in temp_site_dict[ko]:
temp_site_dict[ko][site]={'taxa_num': 0, 'total_taxa': total_taxa, 'fa_dist': []}
temp_site_dict[ko][site]['taxa_num'] += taxa_num
if len(fa_dist) > 1:
for each in fa_dist:
temp_site_dict[ko][site]['fa_dist'].append(each)
else:
temp_site_dict[ko][site]['fa_dist'].append(fa_dist[0])
#temp_site_dict[ko][site]['total_taxa'] = total_taxa
for ko in temp_site_dict:
process = True
#ko_sites = site_list[ko]
# print(ko_sites)
#for each in ['S', 'V', 'P']:
# for each in ['S1', 'S2', 'S3', 'V1', 'V2', 'V3', 'P1', 'P2', 'P3']:
# if each not in ko_sites:
# process = False
if process:
uname = ('{}_{}').format(level, ko)
print(uname)
# for x_sample_set in [['M1', 'M2', 'M3'], ['P1', 'P2', 'P3']]:
# for y_sample_set in [['M1', 'M2', 'M3'], ['P1', 'P2', 'P3']]:
# if x_sample_set != y_sample_set:
x_set = []
x_t_set = []
x_fa = []
x_ratio = []
y_set = []
y_t_set = []
y_fa = []
y_ratio = []
z_set = []
z_t_set = []
z_fa = []
z_ratio = []
for x in ['S1', 'S2', 'S3']:
if x in temp_site_dict[ko]:
x_num = temp_site_dict[ko][x]['taxa_num']
x_total = temp_site_dict[ko][x]['total_taxa']
x_fa_dist = temp_site_dict[ko][x]['fa_dist']
else:
x_num = 0
x_total = 1
x_fa_dist = [1]
x_set.append(x_num)
x_t_set.append(x_total)
if len(x_fa_dist) > 1:
for each in x_fa_dist:
x_fa.append(each)
x_ratio.append((x_num/x_total)/each)
else:
x_ratio.append((x_num/x_total)/fa_dist[0])
x_fa.append(fa_dist[0])
# for y in ['V1', 'V2']:
# if y in temp_site_dict[ko]:
# y_num = temp_site_dict[ko][y]['taxa_num']
# y_total = temp_site_dict[ko][y]['total_taxa']
# y_fa_dist = temp_site_dict[ko][y]['fa_dist']
# else:
# y_num = 0
# y_total = 1
# y_fa_dist = [1]
#
# y_set.append(y_num)
# y_t_set.append(y_total)
#
# if len(y_fa_dist) > 1:
# for each in y_fa_dist:
# y_fa.append(each)
# y_ratio.append((y_num/y_total)/each)
# else:
# y_ratio.append((y_num/y_total)/fa_dist[0])
# y_fa.append(fa_dist[0])
for z in ['P1', 'P2', 'P3']:
if z in temp_site_dict[ko]:
z_num = temp_site_dict[ko][z]['taxa_num']
z_total = temp_site_dict[ko][z]['total_taxa']
z_fa_dist = temp_site_dict[ko][z]['fa_dist']
else:
z_num = 0
z_total = 1
z_fa_dist = [1]
z_set.append(z_num)
z_t_set.append(z_total)
if len(z_fa_dist) > 1:
for each in z_fa_dist:
z_fa.append(each)
z_ratio.append((z_num/z_total)/each)
else:
z_ratio.append((z_num/z_total)/fa_dist[0])
z_fa.append(fa_dist[0])
print(x_ratio)
#print(y_ratio)
print(z_ratio)
#_u, mwu_pval_1 = run_mwu(x_ratio, y_ratio)
#_u, mwu_pval_2 = run_mwu(y_ratio, z_ratio)
_u, mwu_pval_3 = run_mwu(x_ratio, z_ratio)
if (mwu_pval_3) <= 0.05:
# obs = np.array([x_set, y_set, z_set])
# chi2, pval, dof, expected = stats.chi2_contingency(obs, correction=True)
# if pval <= 0.05:
# if uname not in versatile_dict:
# versatile_dict[uname] = {}
#
# versatile_dict[uname]['M_fa'] = x_fa
# versatile_dict[uname]['P_fa'] = y_fa
# versatile_dict[uname]['M_num'] = sum(x_set)
# versatile_dict[uname]['P_num'] = sum(y_set)
# versatile_dict[uname]['M_norm'] = sum(x_t_set)
# versatile_dict[uname]['P_norm'] = sum(y_t_set)
#_od, fet_pval= stats.fisher_exact([[sum(x_set), sum(x_t_set)-sum(x_set)],[sum(y_set), sum(y_t_set)-sum(y_set)]])
#if pval <= 0.05:
print(ko)
if uname not in versatile_dict:
versatile_dict[uname] = {}
#coord_dict = define_type((sum(x_set)/sum(x_t_set)), (sum(y_set)/sum(y_t_set)), x_fa, y_fa)
versatile_dict[uname]['S_fa'] = x_fa
#versatile_dict[uname]['V_fa'] = y_fa
versatile_dict[uname]['P_fa'] = z_fa
versatile_dict[uname]['S_num'] = sum(x_set)
#versatile_dict[uname]['V_num'] = sum(y_set)
versatile_dict[uname]['P_num'] = sum(z_set)
versatile_dict[uname]['S_norm'] = sum(x_t_set)
#versatile_dict[uname]['V_norm'] = sum(y_t_set)
versatile_dict[uname]['P_norm'] = sum(z_t_set)
#versatile_dict[uname]['istype'] = istype
#versatile_dict[uname]['ratio_taxa'] = ratio_taxa
#versatile_dict[uname]['ratio_fa'] = ratio_fa
versatile_dict[uname]['S_ratio'] = x_ratio
#versatile_dict[uname]['V_ratio'] = y_ratio
versatile_dict[uname]['P_ratio'] = z_ratio
# print(x_fa)
# print(y_fa)
fet_ct+=1
# if level not in versatile_dict:
# versatile_dict[level] = set()
# versatile_dict[level].add(ko)
#
#print(obs, pval, chi_ct, fet_pval, fet_ct)
pickle_name = ('plot_versatility.p').format()
pickle.dump(versatile_dict, open(pickle_name, 'wb'))
return(versa_dict)
def make_versatility_figures():
import plotly.express as px
convert_rank_to_taxa = {0:'kingdom', 1:'phylum', 2:'class', 3:'order', 4:'family', 5:'genus', 6:'species'}
file = open('plot_versatility.p','rb')
versatile_dict = pickle.load(file)
file.close()
for uname in versatile_dict:
i_tag = ('Impacted, n = {} of {}: {}. Median = {}').format(versatile_dict[uname]['M_num'], versatile_dict[uname]['M_norm'], round(versatile_dict[uname]['M_num']/versatile_dict[uname]['M_norm'],2), np.median(versatile_dict[uname]['M_fa']))
p_tag = ('Pristine, n = {} of {}: {}. Median = {}').format(versatile_dict[uname]['P_num'], versatile_dict[uname]['P_norm'], round(versatile_dict[uname]['P_num']/versatile_dict[uname]['P_norm'],2), np.median(versatile_dict[uname]['P_fa']))
#s_tag = ('Source').format(path)
#v_tag = ('Valley').format(path)
x_data = i_tag, p_tag
i_list = return_log10(versatile_dict[uname]['M_fa'])
p_list = return_log10(versatile_dict[uname]['P_fa'])
#s_list = return_log10(plot_round[uname]['S'])
#v_list = return_log10(plot_round[uname]['V'])
y_data = i_list, p_list
ko = uname.split('_')[1]
taxa_level = convert_rank_to_taxa[int(uname.split('_')[0])]
fig = go.Figure()
colors = 'rgba(255, 144, 14, 0.5)', 'rgba(44, 160, 101, 0.5)'
outfile_name = ('{}_{}_versatility.pdf').format(taxa_level, ko)
print(outfile_name)
for xd, yd, cls in zip(x_data, y_data, colors):
fig.add_trace(go.Box(
#,
y=yd,
name=xd,
boxpoints='all',
notched=True,
jitter=0.5,
whiskerwidth=0.2,
fillcolor=cls,
line_color=cls,
marker_size=5,
line_width=1,
showlegend=False)
)
fig.update_layout(
title=ko,
xaxis_title="Sample Site",
yaxis_title="Log10(Relative Functional Abundance)",
font=dict(
family="Courier New, monospace",
size=10,
color="#7f7f7f"
)
)
fig.show()
fig.write_image(outfile_name)
x_list = []
y_list = []
size_list = []
istype_list = []
uname_list = []
type_dict = {}
for uname in versatile_dict:
if 'K02591' in uname:
size_list.append(int(uname.split('_')[0])**3)
x_list.append(np.log10(versatile_dict[uname]['ratio_taxa']))
y_list.append(np.log10(versatile_dict[uname]['ratio_fa']))
istype = versatile_dict[uname]['istype']
istype_list.append(istype)
uname_list.append(uname)
if istype not in | |
#!/usr/bin/env/python
#
# -*- coding: utf-8 -*-
###
# Copyright (c) 2016, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import os
import re
import sys
import time
import urllib
import sqlite3
import httplib
import threading
import dns.resolver
import supybot.log as log
import supybot.conf as conf
import supybot.utils as utils
import supybot.ircdb as ircdb
import supybot.world as world
from supybot.commands import *
import supybot.ircmsgs as ircmsgs
import supybot.plugins as plugins
import supybot.commands as commands
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
import supybot.schedule as schedule
import supybot.registry as registry
try:
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization('Sigyn')
except:
_ = lambda x:x
def repetitions(s):
# returns a list of (pattern,count), used to detect a repeated pattern inside a single string.
r = re.compile(r"(.+?)\1+")
for match in r.finditer(s):
yield (match.group(1), len(match.group(0))/len(match.group(1)))
def isCloaked (prefix):
if not ircutils.isUserHostmask(prefix):
return False
(nick,ident,host) = ircutils.splitHostmask(prefix)
if '/' in host:
if host.startswith('gateway/') or host.startswith('nat/'):
return False
return True
return False
def compareString (a,b):
"""return 0 to 1 float percent of similarity ( 0.85 seems to be a good average )"""
if a == b:
return 1
sa, sb = set(a), set(b)
n = len(sa.intersection(sb))
if float(len(sa) + len(sb) - n) == 0:
return 0
jacc = n / float(len(sa) + len(sb) - n)
return jacc
def largestString (s1,s2):
"""return largest pattern available in 2 strings"""
# From https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Longest_common_substring#Python2
# License: CC BY-SA
m = [[0] * (1 + len(s2)) for i in xrange(1 + len(s1))]
longest, x_longest = 0, 0
for x in xrange(1, 1 + len(s1)):
for y in xrange(1, 1 + len(s2)):
if s1[x - 1] == s2[y - 1]:
m[x][y] = m[x - 1][y - 1] + 1
if m[x][y] > longest:
longest = m[x][y]
x_longest = x
else:
m[x][y] = 0
return s1[x_longest - longest: x_longest]
def floatToGMT (t):
f = None
try:
f = float(t)
except:
return None
return time.strftime('%Y-%m-%d %H:%M:%S GMT',time.gmtime(f))
def _getRe(f):
def get(irc, msg, args, state):
original = args[:]
s = args.pop(0)
def isRe(s):
try:
foo = f(s)
return True
except ValueError:
return False
try:
while len(s) < 512 and not isRe(s):
s += ' ' + args.pop(0)
if len(s) < 512:
state.args.append([s,f(s)])
else:
state.errorInvalid('regular expression', s)
except IndexError:
args[:] = original
state.errorInvalid('regular expression', s)
return get
getPatternAndMatcher = _getRe(utils.str.perlReToPythonRe)
addConverter('getPatternAndMatcher', getPatternAndMatcher)
class Ircd (object):
def __init__(self,irc):
self.irc = irc
# contains Chan instances
self.channels = {}
# contains Pattern instances
self.patterns = {}
# contains whowas requested for a short period of time
self.whowas = {}
# contains klines requested for a short period of time
self.klines = {}
# contains various TimeoutQueue for detection purpose
# often it's [host] { with various TimeOutQueue and others elements }
self.queues = {}
# flag or time
self.opered = False
# flag or time
self.defcon = False
# used for temporary storage of outgoing actions
self.pending = {}
self.logs = {}
# contains servers notices when full or in bad state
# [servername] = time.time()
self.limits = {}
# current ip to dline, one at time, due to testline limitation
self.dline = ''
# flag or time
self.efnet = False
# { ip : message }
self.digs = {}
# flag or time
self.netsplit = False
self.tors = {}
self.ping = None
self.servers = {}
self.resolving = {}
def __repr__(self):
return '%s(patterns=%r, queues=%r, channels=%r, pending=%r, logs=%r, digs=%r, limits=%r, whowas=%r, klines=%r)' % (self.__class__.__name__,
self.patterns, self.queues, self.channels, self.pending, self.logs, self.digs, self.limits, self.whowas, self.klines)
def restore (self,db):
c = db.cursor()
c.execute("""SELECT id, pattern, regexp, mini, life FROM patterns WHERE removed_at is NULL""")
items = c.fetchall()
if len(items):
for item in items:
(uid,pattern,regexp,limit,life) = item
if regexp == 1:
regexp = True
else:
regexp = False
self.patterns[uid] = Pattern(uid,pattern,regexp,limit,life)
c.close()
def add (self,db,prefix,pattern,limit,life,regexp):
c = db.cursor()
t = 0
if regexp:
t = 1
c.execute("""INSERT INTO patterns VALUES (NULL, ?, ?, ?, ?, ?, ?, ?, ?, NULL, NULL)""", (pattern,t,limit,life,prefix,'',0,float(time.time())))
uid = int(c.lastrowid)
self.patterns[uid] = Pattern(uid,pattern,regexp,limit,life)
db.commit()
c.close()
return uid
def count(self,db,uid):
uid = int(uid)
if uid in self.patterns:
c = db.cursor()
c.execute("""SELECT id, triggered FROM patterns WHERE id=? LIMIT 1""",(uid,))
items = c.fetchall()
if len(items):
(uid,triggered) = items[0]
triggered = int(triggered + 1)
c.execute("""UPDATE patterns SET triggered=? WHERE id=?""",(triggered,uid))
db.commit()
c.close()
def ls (self,db,pattern,deep=False):
c = db.cursor()
glob = '*%s*' % pattern
like = '%'+pattern+'%'
i = None
try:
i = int(pattern)
except:
i = None
if i:
c.execute("""SELECT id, pattern, regexp, operator, at, triggered, removed_at, removed_by, comment, mini, life FROM patterns WHERE id=? LIMIT 1""",(i,))
else:
if deep:
c.execute("""SELECT id, pattern, regexp, operator, at, triggered, removed_at, removed_by, comment, mini, life FROM patterns WHERE id GLOB ? OR id LIKE ? OR pattern GLOB ? OR pattern LIKE ? OR comment GLOB ? OR comment LIKE ? ORDER BY id DESC""",(glob,like,glob,like,glob,like))
else:
c.execute("""SELECT id, pattern, regexp, operator, at, triggered, removed_at, removed_by, comment, mini, life FROM patterns WHERE (id GLOB ? OR id LIKE ? OR pattern GLOB ? OR pattern LIKE ? OR comment GLOB ? OR comment LIKE ?) and removed_at is NULL ORDER BY id DESC""",(glob,like,glob,like,glob,like))
items = c.fetchall()
c.close()
if len(items):
results = []
for item in items:
(uid,pattern,regexp,operator,at,triggered,removed_at,removed_by,comment,limit,life) = item
end = ''
if i:
if removed_by:
end = ' - disabled on %s by %s - ' % (floatToGMT(removed_at),removed_by.split('!')[0])
results.append('#%s "%s" by %s on %s (%s calls) %s/%ss%s(%s)' % (uid,pattern,operator.split('!')[0],floatToGMT(at),triggered,limit,life,end,comment))
else:
if removed_by:
end = ' (disabled)'
results.append('[#%s "%s" (%s calls) %s/%ss%s]' % (uid,pattern,triggered,limit,life,end))
return results
return []
def edit (self,db,uid,limit,life,comment):
c = db.cursor()
uid = int(uid)
c.execute("""SELECT id, life FROM patterns WHERE id=? LIMIT 1""",(uid,))
items = c.fetchall()
if len(items):
if comment:
c.execute("""UPDATE patterns SET life=?, mini=?, comment=? WHERE id=? LIMIT 1""",(life,limit,comment,uid))
else:
c.execute("""UPDATE patterns SET life=?, mini=? WHERE id=? LIMIT 1""",(life,limit,uid))
db.commit()
if uid in self.patterns:
self.patterns[uid].life = life
self.patterns[uid].limit = limit
found = True
c.close()
return (len(items))
def toggle (self,db,uid,prefix,active):
c = db.cursor()
uid = int(uid)
c.execute("""SELECT id, pattern, regexp, mini, life, removed_at, removed_by FROM patterns WHERE id=? LIMIT 1""",(uid,))
items = c.fetchall()
updated = False
if len(items):
(id,pattern,regexp,limit,life,removed_at,removed_by) = items[0]
if active and removed_at:
c.execute("""UPDATE patterns SET removed_at=NULL, removed_by=NULL WHERE id=? LIMIT 1""",(uid,))
self.patterns[uid] = Pattern(uid,pattern,regexp == 1,limit,life)
updated = True
elif not removed_at:
c.execute("""UPDATE patterns SET removed_at=?, removed_by=? WHERE id=? LIMIT 1""",(float(time.time()),prefix,uid))
if uid in self.patterns:
del self.patterns[uid]
updated = True
db.commit()
c.close()
return updated
class Chan (object):
def __init__(self,channel):
self.channel = channel
self.patterns = None
self.buffers = {}
self.logs = {}
self.nicks = {}
self.called = False
def __repr__(self):
return '%s(channel=%r, patterns=%r, buffers=%r, logs=%r, nicks=%r)' % (self.__class__.__name__,
self.channel, self.patterns, self.buffers, self.logs, self.nicks)
class Pattern (object):
def __init__(self,uid,pattern,regexp,limit,life):
self.uid = uid
self.pattern = pattern
self.limit = limit
self.life = life
self._match = False
if regexp:
self._match = utils.str.perlReToPythonRe(pattern)
def match (self,text):
| |
import traceback, sys, time, signal, importlib, yaml, os, os.path, datetime
from pythreader import Task, TaskQueue, Primitive, synchronized, PyThread, LogFile
from webpie import Logged, Logger, HTTPServer, RequestProcessor, yaml_expand as expand, init_uid
from multiprocessing import Process, Pipe
import re, socket
setproctitle = None
try: from setproctitle import setproctitle
except: pass
RequestTask = RequestProcessor
#class RequestTask(RequestProcessor, Task):
#
# def __init__(self, wsgi_app, request, logger):
# #print("RequestTask.__init__: args:", wsgi_app, request, logger)
# Task.__init__(self, name=f"[RequestTask {request.Id}]")
# RequestProcessor.__init__(self, wsgi_app, request, logger)
class Service(Primitive, Logged):
def __init__(self, config, logger=None):
name = config["name"]
#print("Service(): config:", config)
self.ServiceName = name
Primitive.__init__(self, name=f"[service {name}]")
Logged.__init__(self, f"[app {name}]", logger, debug=True)
self.Config = None
self.Initialized = self.initialize(config)
@synchronized
def initialize(self, config=None):
config = config or self.Config
self.Config = config
reload_files = config.get("touch_reload", [])
if isinstance(reload_files, str):
reload_files = [reload_files]
self.ReloadFileTimestamps = {path: self.mtime(path) for path in reload_files}
self.Prefix = config.get("prefix", "/")
self.ReplacePrefix = config.get("replace_prefix")
self.Timeout = config.get("timeout", 10)
saved_path = sys.path[:]
saved_modules = set(sys.modules.keys())
saved_environ = os.environ.copy()
try:
args = None
if "file" in config:
print('*** Use of "file" parameter is deprecated. Use "module" instead')
self.ScriptFileName = fname = config.get("module", config.get("file"))
g = {}
extra_path = config.get("python_path")
if extra_path is not None:
if isinstance(extra_path, str):
extra_path = [extra_path]
sys.path = extra_path + sys.path
if "env" in config:
os.environ.update(config["env"])
try: exec(open(fname, "r").read(), g)
except:
tb = traceback.format_exc()
self.log_error(f"Error importing module {fname}:\n{tb}")
return False
if "create" in config:
# deprecated
print('*** Use of "create" parameter is deprecated. Use "application: function()" instead')
application = config["create"] + "()"
else:
application = config.get("application", "application")
if application.endswith("()"):
args = config.get("args")
fcn_name = application[:-2]
fcn = g.get(fcn_name)
if fcn is None:
self.log_error(f"Application creation function {fcn_name} not found in module {fname}")
return False
try:
if isinstance(args, dict):
app = fcn(**args)
elif isinstance(args, (list, tuple)):
app = fcn(*args)
elif args is None:
app = fcn()
else:
app = fcn(args)
except:
tb = traceback.format_exc()
self.log_error(f"Error calling the application initialization function:\n{tb}")
return False
if app is None:
self.log_error(f'Application creation function {fcn_name} returned None')
return False
else:
app = g.get(application)
if app is None:
self.log_error(f'Application object "{application}" not found in {fname}')
return False
self.AppArgs = args
self.WSGIApp = app
max_workers = config.get("max_workers", 5)
queue_capacity = config.get("queue_capacity", 10)
self.RequestQueue = TaskQueue(max_workers, capacity = queue_capacity,
delegate=self)
self.log("initiaized")
except:
tb = traceback.format_exc()
self.log_error(f"Error initializing application:\n{tb}")
return False
finally:
sys.path = saved_path
extra_modules = set(sys.modules.keys()) - set(saved_modules)
#print("loadApp: removing modules:", sorted(list(extra_modules)))
for m in extra_modules:
del sys.modules[m]
for n in set(os.environ.keys()) - set(saved_environ.keys()):
del os.environ[n]
os.environ.update(saved_environ)
return True
def taskFailed(self, queue, task, exc_type, exc_value, tb):
self.log_error("request failed:", "".join(traceback.format_exception(exc_type, exc_value, tb)))
try:
task.Request.close()
except:
pass
def accept(self, request):
#print(f"Service {self}: accept()")
if not self.Initialized:
return False
header = request.HTTPHeader
uri = header.URI
self.debug("accept: uri:", uri, " prefix:", self.Prefix)
#print("Sevice", self," accept: uri:", uri, " prefix:", self.Prefix)
if uri.startswith(self.Prefix):
uri = uri[len(self.Prefix):]
if not uri.startswith("/"): uri = "/" + uri
if self.ReplacePrefix:
uri = self.ReplacePrefix + uri
header.replaceURI(uri)
request.AppName = self.ServiceName
script_path = self.Prefix
while script_path and script_path.endswith("/"):
script_path = script_path[:-1]
request.Environ["SCRIPT_NAME"] = script_path
request.Environ["SCRIPT_FILENAME"] = self.ScriptFileName
self.RequestQueue.addTask(RequestTask(self.WSGIApp, request, self.Logger))
#print("Service", self, " accepted")
return True
else:
#print("Service", self, " rejected")
return False
def close(self):
self.RequestQueue.hold()
def join(self):
self.RequestQueue.join()
def mtime(self, path):
try: return os.path.getmtime(path)
except: return None
def reloadIfNeeded(self):
for path, old_timestamp in self.ReloadFileTimestamps.items():
mt = self.mtime(path)
if mt is not None and mt != old_timestamp:
ct = time.ctime(mt)
self.log(f"file {path} was modified at {ct}")
break
else:
return False
self.Initialized = self.initialize()
class MPLogger(PyThread):
def __init__(self, logger, queue_size=-1, debug=False, name=None):
import multiprocessing
PyThread.__init__(self, name=name, daemon=True)
self.Logger = logger
self.Queue = multiprocessing.Queue(queue_size)
self.Debug = debug
def run(self):
#
# master side
#
from queue import Empty
while True:
msg = self.Queue.get()
who, t = msg[:2]
parts = [str(p) for p in msg[2:]]
t = datetime.datetime.fromtimestamp(t)
process_timestamp = t.strftime("%m/%d/%Y %H:%M:%S") + ".%03d" % (t.microsecond//1000)
self.Logger.log(who, "%s: %s" % (process_timestamp, " ".join(parts)))
def log(self, who, *parts):
#
# subprocess side
#
parts = tuple(str(p) for p in parts)
self.Queue.put((who, time.time())+parts)
def debug(self, who, *parts):
#
# subprocess side
#
if self.Debug:
self.log(f"{who} [DEBUG]", *parts)
def error(self, who, *parts):
#
# subprocess side
#
self.log(f"{who} [ERROR]", *parts)
class MultiServerSubprocess(Process):
def __init__(self, port, sock, config_file, logger=None):
Process.__init__(self, daemon=True)
#print("MultiServerSubprocess.__init__: logger:", logger)
self.Sock = sock
self.Logger = logger
self.Port = port
self.Server = None
self.ConnectionToMaster, self.ConnectionToSubprocess = Pipe()
self.ConfigFile = config_file # path
self.ReconfiguredTime = 0
self.Services = []
self.MasterSide = True
self.Stop = False
self.MasterPID = os.getpid()
def log(self, *parts):
mypid=os.getpid()
self.Logger.log(f"[Subprocess {mypid}]", *parts)
def reconfigure(self):
#print("MultiServerSubprocess.reconfigure()...")
self.ReconfiguredTime = os.path.getmtime(self.ConfigFile)
self.Config = config = expand(yaml.load(open(self.ConfigFile, 'r'), Loader=yaml.SafeLoader))
templates = config.get("templates", {})
services = config.get("services", [])
service_list = []
assert isinstance(services, list)
for svc_cfg in services:
#print("svc_cfg:", svc_cfg)
svc = None
if "template" in svc_cfg:
template = templates.get(svc_cfg.get("template", "*"))
if template is not None:
c = {}
c.update(template)
c.update(svc_cfg)
svc_cfg = expand(c)
names = svc_cfg.get("names", [svc_cfg.get("name")])
for name in names:
c = svc_cfg.copy()
c["name"] = name
svc = Service(expand(c), self.Logger)
if svc.Initialized:
service_list.append(svc)
#print("Service", svc, "created and added to the list")
else:
self.log(f'service "{svc.ServiceName}" failed to initialize - removing from service list')
else:
#print("MultiServerSubprocess.reconfigure: svc_cfg:", svc_cfg)
#print("MultiServerSubprocess.reconfigure: expanded:", expand(svc_cfg))
svc = Service(expand(svc_cfg), self.Logger)
if not svc.Initialized:
#print("service not initialzed")
self.log(f'service "{svc.ServiceName}" failed to initialize - removing from service list')
else:
service_list.append(svc)
#print("Service", svc, "created and added to the list")
#print("--------")
names = ",".join(s.Name for s in service_list)
if self.Server is None:
self.Server = HTTPServer.from_config(self.Config, service_list, logger=self.Logger)
self.log(f"server created with services: {names}")
else:
self.Server.setServices(service_list)
self.log(f"server reconfigured with services: {names}")
self.Services = service_list
self.log("reconfigured")
#print("MultiServerSubprocess.reconfigure() done")
CheckConfigInterval = 5.0
def run(self):
init_uid(tag="%03d" % (os.getpid() % 1000,))
#print("MultiServerSubprocess.run()...")
if setproctitle is not None:
setproctitle("multiserver %s worker" % (self.Port,))
pid = os.getpid()
self.LogName = f"MultiServerSubprocess({pid})"
self.reconfigure()
self.MasterSide = False
self.Sock.settimeout(5.0)
last_check_config = 0
while not self.Stop:
# see if the parent process is still alive
try: os.kill(self.MasterPID, 0)
except:
print("master process died")
break
try: csock, caddr = self.Sock.accept()
except socket.timeout:
pass
else:
#print("run(): services:", [str(s) for s in self.Services])
self.Server.connection_accepted(csock, caddr)
if self.ConnectionToMaster.poll(0):
msg = self.ConnectionToMaster.recv()
self.log("message from master:", msg)
if msg == "stop":
self.Stop = True
elif msg == "reconfigure":
self.reconfigure()
if not self.Stop and time.time() > last_check_config + self.CheckConfigInterval:
if os.path.getmtime(self.ConfigFile) > self.ReconfiguredTime:
self.reconfigure()
else:
for svc in self.Services:
if isinstance(svc, Service):
svc.reloadIfNeeded()
last_check_config = time.time()
self.Server.close()
self.Server.join()
for svc in self.Services:
svc.close()
svc.join()
def stop(self):
if self.MasterSide:
self.ConnectionToSubprocess.send("stop")
else:
self.Stop = True
def request_reconfigure(self):
self.ConnectionToSubprocess.send("reconfigure")
class MPMultiServer(PyThread, Logged):
def __init__(self, config_file, logger=None, debug=False):
PyThread.__init__(self)
Logged.__init__(self, "[Multiserver]", logger, debug=debug)
self.ConfigFile = config_file
self.Server = None
self.Port = None
self.ReconfiguredTime = 0
self.Subprocesses = []
self.Sock = None
self.Stop = False
self.MPLogger = None
if logger is not None:
self.MPLogger = MPLogger(logger, debug=debug)
self.MPLogger.start()
self.Debug = debug
self.reconfigure()
@synchronized
def reconfigure(self, *ignore):
self.ReconfiguredTime = os.path.getmtime(self.ConfigFile)
self.Config = config = expand(yaml.load(open(self.ConfigFile, 'r'), Loader=yaml.SafeLoader))
port = self.Config["port"]
if self.Port is None:
self.Port = port
self.Sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.Sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.Sock.bind(('', self.Port))
self.Sock.listen(10)
elif port != self.Port:
print("Can not change port number")
sys.exit(1)
new_nprocesses = self.Config.get("processes", 1)
if new_nprocesses > len(self.Subprocesses):
for p in self.Subprocesses:
p.request_reconfigure()
for _ in range(new_nprocesses - len(self.Subprocesses)):
p = MultiServerSubprocess(self.Port, self.Sock, self.ConfigFile, logger=self.MPLogger)
p.start()
self.Subprocesses.append(p)
#self.log("started new subprocess")
elif new_nprocesses < len(self.Subprocesses):
while new_nprocesses < len(self.Subprocesses):
p = self.Subprocesses.pop()
p.stop()
#self.log("stopped a subprocess")
for p in self.Subprocesses:
p.request_reconfigure()
else:
for p in self.Subprocesses:
p.request_reconfigure()
#self.log("subprocesses running now:", len(self.Subprocesses))
def run(self):
if setproctitle is not None:
setproctitle("multiserver %s master" % (self.Port,))
while not self.Stop:
time.sleep(5)
if os.path.getmtime(self.ConfigFile) > self.ReconfiguredTime:
self.reconfigure()
self.check_children()
@synchronized
def check_children(self, *ignore):
#print("child died")
n_died = 0
alive = []
for p in self.Subprocesses:
if not p.is_alive():
print("subprocess died with status", p.exitcode, file=sys.stderr)
self.log("subprocess died with status", p.exitcode)
n_died += 1
else:
alive.append(p)
self.Subprocesses = alive
if n_died and not self.Stop:
#time.sleep(5) # do not restart subprocesses too often
for _ in range(n_died):
| |
"rili": 36472,
"schop": 36473,
"svar": 36474,
"ást": 36475,
"école": 36476,
"āva": 36477,
"ıdır": 36478,
"Л": 36479,
"ской": 36480,
"▁Akt": 36481,
"▁Apel": 36482,
"▁Dragnea": 36483,
"▁Flüge": 36484,
"▁Giardia": 36485,
"▁Höf": 36486,
"▁Intensiv": 36487,
"▁Kız": 36488,
"▁Libro": 36489,
"▁Mwana": 36490,
"▁Märk": 36491,
"▁Poesía": 36492,
"▁Semana": 36493,
"▁Treff": 36494,
"▁Umeå": 36495,
"▁acquire": 36496,
"▁bingo": 36497,
"▁concern": 36498,
"▁corte": 36499,
"▁definition": 36500,
"▁election": 36501,
"▁exploit": 36502,
"▁folklor": 36503,
"▁indonesia": 36504,
"▁infant": 36505,
"▁interne": 36506,
"▁judi": 36507,
"▁masala": 36508,
"▁mtu": 36509,
"▁nato": 36510,
"▁parodi": 36511,
"▁passe": 36512,
"▁peu": 36513,
"▁quis": 36514,
"▁rodo": 36515,
"▁shower": 36516,
"▁significant": 36517,
"▁sonda": 36518,
"▁taking": 36519,
"▁texture": 36520,
"▁understand": 36521,
"▁values": 36522,
"▁victim": 36523,
"▁Água": 36524,
"卫": 36525,
"岭": 36526,
"忍": 36527,
"胤": 36528,
"豫": 36529,
"래": 36530,
"석": 36531,
"혜": 36532,
"Kanak": 36533,
"Trøndelag": 36534,
"ZAK": 36535,
"angkat": 36536,
"cité": 36537,
"dhiya": 36538,
"ebat": 36539,
"estar": 36540,
"etara": 36541,
"ferd": 36542,
"fyll": 36543,
"idean": 36544,
"itario": 36545,
"ités": 36546,
"jame": 36547,
"lingan": 36548,
"ljub": 36549,
"osas": 36550,
"pavil": 36551,
"poana": 36552,
"poj": 36553,
"stati": 36554,
"stelle": 36555,
"vazi": 36556,
"yty": 36557,
"zeti": 36558,
"zlar": 36559,
"áis": 36560,
"ürü": 36561,
"нов": 36562,
"▁Bahnhof": 36563,
"▁Békés": 36564,
"▁Dreh": 36565,
"▁Escola": 36566,
"▁Greit": 36567,
"▁Helsingin": 36568,
"▁Hodi": 36569,
"▁Joka": 36570,
"▁Kaffe": 36571,
"▁Kambi": 36572,
"▁Khuda": 36573,
"▁Kikwete": 36574,
"▁Kristiansand": 36575,
"▁Lauf": 36576,
"▁Pensi": 36577,
"▁Potential": 36578,
"▁Rady": 36579,
"▁Reiz": 36580,
"▁Waar": 36581,
"▁Warna": 36582,
"▁abstract": 36583,
"▁batu": 36584,
"▁berte": 36585,
"▁bola": 36586,
"▁celebrate": 36587,
"▁commun": 36588,
"▁corri": 36589,
"▁dif": 36590,
"▁favorite": 36591,
"▁fermentum": 36592,
"▁hapa": 36593,
"▁hatin": 36594,
"▁humbur": 36595,
"▁koji": 36596,
"▁koo": 36597,
"▁lank": 36598,
"▁lav": 36599,
"▁mire": 36600,
"▁planning": 36601,
"▁ponto": 36602,
"▁rana": 36603,
"▁seu": 36604,
"▁slipper": 36605,
"▁spambots": 36606,
"▁strap": 36607,
"▁tonn": 36608,
"▁transf": 36609,
"▁vehicles": 36610,
"▁vini": 36611,
"▁จาก": 36612,
"交": 36613,
"怀": 36614,
"是": 36615,
"袁": 36616,
"貝": 36617,
"走": 36618,
"BUD": 36619,
"ISTI": 36620,
"[31]": 36621,
"blogg": 36622,
"broj": 36623,
"contract": 36624,
"egli": 36625,
"elni": 36626,
"fry": 36627,
"iiii": 36628,
"nagy": 36629,
"nija": 36630,
"owni": 36631,
"reje": 36632,
"sikia": 36633,
"tempe": 36634,
"tinggi": 36635,
"vatel": 36636,
"üstü": 36637,
"ή": 36638,
"Во": 36639,
"ское": 36640,
"▁Autónoma": 36641,
"▁Belang": 36642,
"▁Disse": 36643,
"▁Kadıköy": 36644,
"▁Kanun": 36645,
"▁Llobregat": 36646,
"▁Møller": 36647,
"▁Oficial": 36648,
"▁Perşembe": 36649,
"▁Rodzin": 36650,
"▁Sometimes": 36651,
"▁Västra": 36652,
"▁[0]": 36653,
"▁ataca": 36654,
"▁bisa": 36655,
"▁bless": 36656,
"▁costume": 36657,
"▁diverse": 36658,
"▁ecc": 36659,
"▁gull": 36660,
"▁handle": 36661,
"▁infrastructure": 36662,
"▁ish": 36663,
"▁limita": 36664,
"▁mandra": 36665,
"▁maza": 36666,
"▁minibar": 36667,
"▁nutri": 36668,
"▁outdoor": 36669,
"▁peuple": 36670,
"▁pira": 36671,
"▁rates": 36672,
"▁recog": 36673,
"▁respect": 36674,
"▁rhad": 36675,
"▁sont": 36676,
"▁taxa": 36677,
"▁timp": 36678,
"▁valde": 36679,
"▁Во": 36680,
"ホ": 36681,
"マン": 36682,
"仪": 36683,
"実": 36684,
"开": 36685,
"於": 36686,
"話": 36687,
"1⁄2": 36688,
"Nkosi": 36689,
"Saya": 36690,
"[32]": 36691,
"akit": 36692,
"alna": 36693,
"alternativ": 36694,
"dagar": 36695,
"eerd": 36696,
"exist": 36697,
"fluent": 36698,
"hasta": 36699,
"iiiiii": 36700,
"imine": 36701,
"irg": 36702,
"lakshmi": 36703,
"lő": 36704,
"tería": 36705,
"ért": 36706,
"üne": 36707,
"ūta": 36708,
"ен": 36709,
"ии": 36710,
"▁2014-": 36711,
"▁Although": 36712,
"▁György": 36713,
"▁Identifi": 36714,
"▁Kodu": 36715,
"▁Muharrem": 36716,
"▁Protest": 36717,
"▁Putih": 36718,
"▁Stjepan": 36719,
"▁Vestfold": 36720,
"▁Vrancea": 36721,
"▁capito": 36722,
"▁caput": 36723,
"▁commerce": 36724,
"▁compete": 36725,
"▁consider": 36726,
"▁dak": 36727,
"▁designed": 36728,
"▁efficiency": 36729,
"▁fait": 36730,
"▁habe": 36731,
"▁hahaha": 36732,
"▁hala": 36733,
"▁heu": 36734,
"▁invite": 36735,
"▁masse": 36736,
"▁mena": 36737,
"▁mental": 36738,
"▁perdu": 36739,
"▁prevent": 36740,
"▁regret": 36741,
"▁satu": 36742,
"▁seconda": 36743,
"▁slave": 36744,
"▁spela": 36745,
"▁trois": 36746,
"▁volgens": 36747,
"▁¿": 36748,
"別": 36749,
"店": 36750,
"振": 36751,
"揚": 36752,
"桑": 36753,
"猫": 36754,
"舍": 36755,
"解": 36756,
"難": 36757,
"세": 36758,
")))": 36759,
"KEL": 36760,
"addict": 36761,
"akkam": 36762,
"enean": 36763,
"eş": 36764,
"fyl": 36765,
"istik": 36766,
"klaus": 36767,
"neen": 36768,
"nuti": 36769,
"reik": 36770,
"vande": 36771,
"vuori": 36772,
"wunder": 36773,
"yör": 36774,
"ziale": 36775,
"zım": 36776,
"íd": 36777,
"łowie": 36778,
"ɣ": 36779,
"ха": 36780,
"ה": 36781,
"م": 36782,
"ย": 36783,
"ἀ": 36784,
"▁18.30": 36785,
"▁2556": 36786,
"▁=)": 36787,
"▁Adolescent": 36788,
"▁CLUB": 36789,
"▁Departamento": 36790,
"▁Fyr": 36791,
"▁Jugend": 36792,
"▁Nordea": 36793,
"▁Pazo": 36794,
"▁Sağ": 36795,
"▁Stö": 36796,
"▁Tenis": 36797,
"▁Yurt": 36798,
"▁abort": 36799,
"▁afar": 36800,
"▁agree": 36801,
"▁associa": 36802,
"▁astronom": 36803,
"▁campos": 36804,
"▁configuration": 36805,
"▁contrast": 36806,
"▁dab": 36807,
"▁diamant": 36808,
"▁dob": 36809,
"▁faith": 36810,
"▁hóa": 36811,
"▁inspir": 36812,
"▁javascript": 36813,
"▁kolo": 36814,
"▁krem": 36815,
"▁lack": 36816,
"▁llama": 36817,
"▁mie": 36818,
"▁netto": 36819,
"▁ohi": 36820,
"▁older": 36821,
"▁oy": 36822,
"▁paulista": 36823,
"▁pays": 36824,
"▁rece": 36825,
"▁requi": 36826,
"▁tamin": 36827,
"▁tribun": 36828,
"▁ón": 36829,
"▁Ā": 36830,
"▁Α": 36831,
"▁Β": 36832,
"▁η": 36833,
"▁♥": 36834,
"伝": 36835,
"卓": 36836,
"学院": 36837,
"弼": 36838,
"眼": 36839,
"香港": 36840,
"齐": 36841,
"단": 36842,
"법": 36843,
"용": 36844,
"재": 36845,
"//": 36846,
"AKO": 36847,
"CHER": 36848,
"HAMA": 36849,
"KTI": 36850,
"TIK": 36851,
"\\\\\\\\": 36852,
"ampu": 36853,
"dij": 36854,
"erings": 36855,
"financi": 36856,
"jadi": 36857,
"lasti": 36858,
"lugu": 36859,
"nö": 36860,
"oane": 36861,
"ovci": 36862,
"teko": 36863,
"tenta": 36864,
"tyt": 36865,
"ünden": 36866,
"̊": 36867,
"ми": 36868,
"сть": 36869,
"ُ": 36870,
"▁Gerne": 36871,
"▁Grup": 36872,
"▁Important": 36873,
"▁Pension": 36874,
"▁Proces": 36875,
"▁Sobre": 36876,
"▁Vantaa": 36877,
"▁Veter": 36878,
"▁amore": 36879,
"▁baja": 36880,
"▁calle": 36881,
"▁cinéma": 36882,
"▁dare": 36883,
"▁dias": 36884,
"▁forex": 36885,
"▁gaus": 36886,
"▁hei": 36887,
"▁indicate": 36888,
"▁integrat": 36889,
"▁liki": 36890,
"▁mear": 36891,
"▁modifi": 36892,
"▁parang": 36893,
"▁parking": 36894,
"▁sensu": 36895,
"▁serta": 36896,
"▁spart": 36897,
"▁temo": 36898,
"▁temple": 36899,
"▁vine": 36900,
"▁worldwide": 36901,
"▁zal": 36902,
"▁Çelik": 36903,
"▁На": 36904,
"▁Сер": 36905,
"▁محمد": 36906,
"半": 36907,
"少女": 36908,
"幽": 36909,
"界": 36910,
"考": 36911,
"蓮": 36912,
"載": 36913,
"을": 36914,
"Administr": 36915,
"RADI": 36916,
"UME": 36917,
"UNU": 36918,
"administra": 36919,
"carre": 36920,
"chová": 36921,
"euses": 36922,
"gén": 36923,
"hija": 36924,
"ografía": 36925,
"praw": 36926,
"riai": 36927,
"sled": 36928,
"sloven": 36929,
"stift": 36930,
"timet": 36931,
"urnar": 36932,
"wende": 36933,
"ész": 36934,
"ída": 36935,
"šte": 36936,
"ște": 36937,
"ей": 36938,
"บ": 36939,
"พระ": 36940,
"▁Andreea": 36941,
"▁Baat": 36942,
"▁Dawid": 36943,
"▁Gwe": 36944,
"▁Hordaland": 36945,
"▁Ils": 36946,
"▁Jär": 36947,
"▁Kaise": 36948,
"▁Osim": 36949,
"▁Rehabilita": 36950,
"▁Stiri": 36951,
"▁Straße": 36952,
"▁Vytautas": 36953,
"▁advertising": 36954,
"▁animals": 36955,
"▁bumi": 36956,
"▁donat": 36957,
"▁eating": 36958,
"▁exclu": 36959,
"▁exposure": 36960,
"▁extrem": 36961,
"▁fellow": 36962,
"▁grafit": 36963,
"▁growing": 36964,
"▁hid": 36965,
"▁hospital": 36966,
"▁kitchen": 36967,
"▁kuch": 36968,
"▁leto": 36969,
"▁marco": 36970,
"▁mende": 36971,
"▁minic": 36972,
"▁muerte": 36973,
"▁posts": 36974,
"▁praece": 36975,
"▁primer": 36976,
"▁privacy": 36977,
"▁reis": 36978,
"▁starting": 36979,
"▁straight": 36980,
"▁tablets": 36981,
"▁teach": 36982,
"▁venture": 36983,
"ざ": 36984,
"坡": 36985,
"太郎": 36986,
"汝": 36987,
"碧": 36988,
"营": 36989,
"離": 36990,
"Adresse": 36991,
"HAK": 36992,
"TIL": 36993,
"arrive": 36994,
"azione": 36995,
"dóttir": 36996,
"esten": 36997,
"hegy": 36998,
"isilla": 36999,
"jde": 37000,
"jë": 37001,
"manan": 37002,
"masing": 37003,
"mountain": 37004,
"rū": 37005,
"taler": 37006,
"tzak": 37007,
"tzu": 37008,
"zand": 37009,
"ála": 37010,
"þ": 37011,
"īd": 37012,
"Π": 37013,
"μα": 37014,
"от": 37015,
"ศ": 37016,
"▁17.30": 37017,
"▁Acord": 37018,
"▁Bä": 37019,
"▁Chambre": 37020,
"▁Devlet": 37021,
"▁Flirt": 37022,
"▁Kürt": 37023,
"▁Maret": 37024,
"▁Materi": 37025,
"▁Medicina": 37026,
"▁Mwanza": 37027,
"▁Ourense": 37028,
"▁Posta": 37029,
"▁STUDIO": 37030,
"▁Selatan": 37031,
"▁Viborg": 37032,
"▁Zeytin": 37033,
"▁agra": 37034,
"▁allan": 37035,
"▁antioxidant": 37036,
"▁buz": 37037,
"▁ceci": 37038,
"▁combination": 37039,
"▁cultural": 37040,
"▁halua": 37041,
"▁ito": 37042,
"▁kimi": 37043,
"▁likely": 37044,
"▁merk": 37045,
"▁obr": 37046,
"▁paprika": 37047,
"▁pos": 37048,
"▁prati": 37049,
"▁qual": 37050,
"▁religion": 37051,
"▁ringi": 37052,
"▁spas": 37053,
"▁summa": 37054,
"▁toalet": 37055,
"▁tut": 37056,
"▁usta": 37057,
"▁너": 37058,
"實": 37059,
"強": 37060,
"戴": 37061,
"潮": 37062,
"甫": 37063,
"略": 37064,
"竇": 37065,
"英雄": 37066,
"蒲": 37067,
"閔": 37068,
"OKA": 37069,
"Programm": 37070,
"acce": 37071,
"aḥ": 37072,
"endum": 37073,
"faire": 37074,
"graphique": 37075,
"inzi": 37076,
"läinen": 37077,
"matik": 37078,
"minimal": 37079,
"ndak": 37080,
"pyt": 37081,
"rades": 37082,
"ráz": 37083,
"siti": 37084,
"tej": 37085,
"which": 37086,
"xhi": 37087,
"ÉN": 37088,
"śli": 37089,
"ός": 37090,
"За": 37091,
"ن": 37092,
"▁(«": 37093,
"▁09.00": 37094,
"▁Ancora": 37095,
"▁Campionat": 37096,
"▁Dapur": 37097,
"▁Endre": 37098,
"▁Erste": 37099,
"▁FILM": 37100,
"▁Feminist": 37101,
"▁Geç": 37102,
"▁Korçë": 37103,
"▁Minimum": 37104,
"▁Norges": 37105,
"▁Perso": 37106,
"▁Pintu": 37107,
"▁SARA": 37108,
"▁Stoff": 37109,
"▁Tö": 37110,
"▁Uniti": 37111,
| |
i, bh_k in enumerate(bh_l):
if bh_k in astroA.activity_ratios.keys():
activity_num_np[i] += (len(astroA.res_d['area'][astroA.event_subsets[bh_k]]) / len(astroA.indices_d[bh_k])) * astroA.minute_frames
activity_num_added[i] += 1
activity_num_np /= activity_num_added
activity_i = np.argsort(activity_num_np)
activity_num_k_s = np.array(bh_l)[activity_i]
activity_num_l_s = activity_num_np[activity_i]
activity_num_k_s[np.where(activity_num_k_s == 'default')] = 'all'
fig = plotly_utils.plot_bar(x=activity_num_k_s, y=activity_num_l_s, text_values=['']*len(activity_num_l_s),
text_size=20, title='Activity number',
x_title='', y_title='Events per minute in state', margin_b=150,
err_y=[], err_symmetric=None)
if with_stats:
#data = {k : areas[i] for i, k in enumerate(area_keys_s)}
return fig, {}
return fig
def get_behaviour_activity_number_dot_plot_all(self, astroA_l, bh_l, with_stats=False, lines=False):
activity_num_l = []
for bh in bh_l:
activity_bh_l = []
for i, astroA in enumerate(astroA_l):
if bh in astroA.event_subsets.keys():
num_events = len(astroA.res_d['area'][astroA.event_subsets[bh]])
num_frames = len(astroA.indices_d[bh])
activity_bh_l.append((num_events / num_frames) * astroA.minute_frames)
activity_num_l.append(activity_bh_l)
activity_means = [np.mean(activity_nums) for activity_nums in activity_num_l]
activity_i = np.argsort(activity_means)
x = np.array(bh_l)[activity_i]
y = []
for i in activity_i:
y.append(activity_num_l[i])
fig, stats_d = plotly_utils.plot_point_box_revised(x, y, title='Activity number', x_title='', y_title='Events per minute in state', lines=lines, with_stats=True)
return fig, stats_d
def get_common_keys(self, astroA_l, bh_l):
s = set(bh_l)
for astroA in astroA_l:
s &= set(astroA.indices_d.keys())
return np.sort(list(s))
def get_all_signal_attribute_plot(self, astroA_l, bh_l, type_event='area', type_plot='bar',
y_range=None, divide_y=1, title='', x_title='', y_title='',
error_type='std', err_symmetric=True, with_stats=False):
areas = [[] for i in range(len(bh_l))]
for astroA in astroA_l:
for i, k in enumerate(bh_l):
if k in astroA.event_subsets.keys():
areas_k = astroA.res_d[type_event][astroA.event_subsets[k]]
areas[i].extend(areas_k)
areas_std = np.array([np.std(v_l) for v_l in areas])
areas_mean = np.array([np.mean(v_l) for v_l in areas])
areas_conf = []
for v_l in areas:
m, l, h = stat_utils.mean_confidence_interval(v_l, confidence=0.95)
areas_conf.append(m-l)
areas_conf = np.array(areas_conf)
areas_i = np.argsort(areas_mean)
area_keys_s = np.array(bh_l)[areas_i]
areas_s = np.array(areas)[areas_i]
areas_mean_s = np.array(areas_mean)[areas_i]
areas_std_s = np.array(areas_std)[areas_i]
areas_conf_s = np.array(areas_conf)[areas_i]
if type_plot == 'bar':
if error_type == 'std':
fig = plotly_utils.plot_bar(x=area_keys_s, y=areas_mean_s, text_values=[], text_size=20, title=title, x_title=x_title, y_title=y_title, margin_b=150, err_y=areas_std_s, err_symmetric=err_symmetric)
elif error_type == 'conf':
fig = plotly_utils.plot_bar(x=area_keys_s, y=areas_mean_s, text_values=[], text_size=20, title=title, x_title=x_title, y_title=y_title, margin_b=150, err_y=areas_conf_s, err_symmetric=err_symmetric)
elif type_plot == 'dot':
fig = plotly_utils.plot_point_box_revised(x=area_keys_s, y=areas_s, title=title, x_title=x_title, y_title=y_title, margin_b=150, y_range=y_range)
else:
return None
if with_stats:
data = {k : areas_s[i] for i, k in enumerate(area_keys_s)}
return fig, {'behaviour' : area_keys_s, 'mean' : areas_mean_s, 'std': areas_std_s, 'conf_95': areas_conf_s, 'data' : data}
return fig
def get_behaviour_area_plot(self, astroA):
area_keys = np.array(self.filter_keys(astroA))
area_l_mean = []
area_l_std = []
for k in area_keys:
area_k = astroA.res_d['area'][astroA.event_subsets[k]]
area_l_mean.append(np.mean(area_k))
area_l_std.append(np.std(area_k))
area_l_mean = np.array(area_l_mean)
area_l_std = np.array(area_l_std)
areas_i = np.argsort(area_l_mean)
area_keys_s = area_keys[areas_i]
area_l_mean_s = area_l_mean[areas_i]
area_l_std_s = area_l_std[areas_i]
fig = plotly_utils.plot_bar(x=area_keys_s, y=area_l_mean_s, text_values=[], text_size=20, title='Sizes of events', x_title='', y_title='Event sizes (\u03bcm<sup>2</sup>)', margin_b=150)
return fig
def get_behaviour_amplitude_bar_plot(self, astroA):
am_keys = np.array(self.filter_keys(astroA))
am_l_mean = []
for k in am_keys:
dff_res = astroA.res_d['dffMax2'][astroA.event_subsets[k]]
am_l_mean.append(np.mean(dff_res))
am_l_mean = np.array(am_l_mean)
am_i = np.argsort(am_l_mean)
am_keys_s = am_keys[am_i]
am_l_mean_s= am_l_mean[am_i]
fig = plotly_utils.plot_bar(x=am_keys_s, y=am_l_mean_s, text_values=[], text_size=20, title='Amplitude (df/f) of events', x_title='', y_title='df/f', margin_b=150)
return fig
def get_waterfall_delays_plot_all(self, astroA, return_results_only=False):
#Unique, no unique
#Num stick start non num stick start
#Half second non half second
#unique_args = [True, False]
unique_args = [True, False]
max_duration_args = [None, astroA.duration_small]
with_stick_num_args = [True]
figs = {}
figs_interp = {}
stick_id = 'stick_exact_start'
running_id = 'running_exact'
rest_id = 'rest_exact'
stick_v_l_d = {}
running_v_l_d = {}
no_running_v_l_d = {}
for un in unique_args:
for max_duration in max_duration_args:
for with_stick_num in with_stick_num_args:
delay_info_args = {'event_inds_subset' : astroA.event_subsets['default'],
'min_delay' : -20,
'max_delay' : 50,
'max_duration' : max_duration,
'unique_events' : un
}
plot_id = '{}-{}-{}'.format('unique' if un else 'notunique',
'max_duration_None' if (max_duration is None) else 'max_duration_' + str(max_duration),
'stick_num_' + str(with_stick_num))
if with_stick_num:
rand_running = np.random.choice(list(set(astroA.indices_d[running_id]) - set(astroA.indices_d[stick_id])), size=len(astroA.indices_d[stick_id]), replace=False)
rand_no_running = np.random.choice(list(set(astroA.indices_d[rest_id]) - set(astroA.indices_d[stick_id])), size=len(astroA.indices_d[stick_id]), replace=False)
else:
rand_running = list(set(astroA.indices_d[running_id]) - set(astroA.indices_d[stick_id]))
rand_no_running = list(set(astroA.indices_d[rest_id]) - set(astroA.indices_d[stick_id]))
signal_delays_stick_np, peak_delays_stick_np = aqua_utils.get_delay_info_from_res(astroA.indices_d[stick_id], astroA.res_d, **delay_info_args)
signal_delays_running_np, peak_delays_running_np = aqua_utils.get_delay_info_from_res(rand_running, astroA.res_d, **delay_info_args)
signal_delays_no_running_np, peak_delays_no_running_np = aqua_utils.get_delay_info_from_res(rand_no_running, astroA.res_d, **delay_info_args)
stick_v = np.sort(signal_delays_stick_np)
running_v = np.sort(signal_delays_running_np)
no_running_v = np.sort(signal_delays_no_running_np)
stick_v_l_d[plot_id] = stick_v
running_v_l_d[plot_id] = running_v
no_running_v_l_d[plot_id] = no_running_v
figs[plot_id] = plotly_utils.plot_waterfall(arrays_l=[stick_v, running_v, no_running_v], legend_names=['stick', 'running', 'rest'], title='Signal (event) delays after behaviour', x_title='Delay (s)', y_title='Event id')
plotly_utils.apply_fun_axis_fig(figs[plot_id], lambda x : x / astroA.fr, axis='x')
figs_interp[plot_id] = plotly_utils.plot_waterfall_interpolate(arrays_l=[stick_v, running_v, no_running_v], legend_names=['stick', 'running', 'rest'], title='Signal (event) delays after behaviour (scaled)', x_title='Delay (s)', y_title='Event id')
plotly_utils.apply_fun_axis_fig(figs_interp[plot_id], lambda x : x / astroA.fr, axis='x')
if return_results_only:
return [stick_v_l_d, running_v_l_d, no_running_v_l_d]
return figs, figs_interp
def get_waterfall_delays_plot_all_mult(self, astroA_l):
figs_d = {}
figs_interp_d = {}
stick_v_l_d = {}
running_v_l_d = {}
no_running_v_l_d = {}
for astroA_i, astroA in enumerate(astroA_l):
stick_d, running_d, no_running_d = self.get_waterfall_delays_plot_all(astroA, return_results_only=True)
if astroA_i == 0:
stick_v_l_d = stick_d
running_v_l_d = running_d
no_running_v_l_d = no_running_d
k_0 = list(stick_d.keys())[0]
arrs = [stick_v_l_d, running_v_l_d, no_running_v_l_d]
for k in stick_d.keys():
for arr in arrs:
arr[k] = list(arr[k])
else:
k_0 = list(stick_d.keys())[0]
for k in stick_d.keys():
stick_v_l_d[k].extend(stick_d[k])
running_v_l_d[k].extend(running_d[k])
no_running_v_l_d[k].extend(no_running_d[k])
for k in stick_v_l_d.keys():
stick_v = np.sort(stick_v_l_d[k])
running_v = np.sort(running_v_l_d[k])
no_running_v = np.sort(no_running_v_l_d[k])
fig = plotly_utils.plot_waterfall(arrays_l=[stick_v, running_v, no_running_v], legend_names=['stick', 'running', 'rest'], title='Signal (event) delays after behaviour', x_title='Delay (s)', y_title='Event id')
plotly_utils.apply_fun_axis_fig(fig, lambda x : x / astroA_l[0].fr, axis='x')
fig_interp = plotly_utils.plot_waterfall_interpolate(arrays_l=[stick_v, running_v, no_running_v], legend_names=['stick', 'running', 'rest'], title='Signal (event) delays after behaviour (scaled) All axons', x_title='Delay (s)', y_title='Event id')
plotly_utils.apply_fun_axis_fig(fig_interp, lambda x : x / astroA_l[0].fr, axis='x')
figs_d[k] = fig
figs_interp_d[k] = fig_interp
return figs_d, figs_interp_d
def get_transition_proportion_delays_plot_all(self, astroA_l, before_bh, inds_bh, after_bh,
before_range=20, after_range=50, avg_proportions=False,
delay_step_size=1):
'''
inds: the inds i to check
before_bh: for each i, make sure bh before is before_bh otherwize don't include i
after_bh: for each i, make sure bh after is after_bh otherwize don't include i
before_range: the range we look for events
after_range: the range we look for events
'''
#Unique, no unique
#Num stick start non num stick start
unique_args = [True, False]
max_duration_args = [None, astroA_l[0].duration_small]
figs = {}
for max_duration in max_duration_args:
#STICK
for un in unique_args:
plot_id = 'prop-{}-{}'.format('unique' if un else 'notunique', 'max_duration_None' if (max_duration is None) else 'max_duration_' + str(max_duration))
prop = np.zeros([(after_range+before_range+1)])
signal_delays_all_l = []
for astroA in astroA_l:
inds = astroA.indices_d[inds_bh]
#Filter indices
indices_filt_before = aqua_utils.filter_range_inds(inds, astroA.indices_d[before_bh], range=(-before_range, -1), prop=1.0)
indices_filt_after = aqua_utils.filter_range_inds(inds, astroA.indices_d[after_bh], range=(1, after_range), prop=1.0)
indices_filt = np.array(np.sort(list(set(indices_filt_before) & set(indices_filt_after))))
if len(indices_filt) == 0:
continue
#print('Len indices {} len filt before {} len filt after {} len filt {}'.format(len(inds), len(indices_filt_before), len(indices_filt_after), len(indices_filt)))
delay_info_args = {'event_inds_subset' : astroA.event_subsets['default'],
'min_delay' : -before_range,
'max_delay' : after_range,
'max_duration' : max_duration,
'unique_events' : un
}
signal_delays_np, peak_delays_np = aqua_utils.get_delay_info_from_res(indices_filt, astroA.res_d, **delay_info_args)
signal_delays_all_l.extend(list(signal_delays_np))
signal_delays_all = np.array(signal_delays_all_l)
print('Total signals {} {}-{} delay {} {}'.format(len(signal_delays_all), before_bh, after_bh, before_range, after_range))
for i, delay_x in enumerate(range(-before_range, after_range+1)):
if len(signal_delays_all) == 0:
prop[i] = 0
else:
prop[i] = float(np.sum(signal_delays_all == delay_x)) / len(signal_delays_all)
rem = len(prop) % delay_step_size
if rem != 0:
prop = prop[:-rem]
prop_step_sum = np.sum(prop.reshape([-1, delay_step_size]), axis=1)
x_l = [np.arange(-before_range, after_range+1, delay_step_size) for i in range(1)]
y_l = [prop_step_sum]
figs[plot_id] = plotly_utils.plot_scatter_mult(x_l, y_l, name_l=['{} to {}'.format(before_bh, after_bh)], mode='lines', title='scatter', x_title='Delay (s)', y_title='Events')
plotly_utils.apply_fun_axis_fig(figs[plot_id], lambda x : x / astroA.fr, axis='x')
return figs
def get_transition_proportion_delays_plot_all_alt(self, astroA_l, before_bh, inds_bh, after_bh,
before_range=20, after_range=50, y_title=None,
delay_step_size=1, fit=False, measure=None, fix_dff_interval=50, confidence=False,
duration_filter=[None, None]):
'''
Generate plots of transitions between behaviours lasting for some period of time
(e.g. 20 frames of rest (before_bh) and then transition to 30 frames of running (after_bh)
for valid indices in running_start_exact (inds_bh)). We can provide a measure to
plot a particular measure such as size or amplitude or leave it empty and obtain
the proportion of events taking place at which delay during these intervals found.
inds: the inds i to check
before_bh: for each i, make sure bh before is before_bh otherwize don't include i
after_bh: for each i, make sure bh after is after_bh otherwize don't include i
before_range: the range we look for events
after_range: the range we look for events
before_delay: the delay of the interval we look for continious befrore and after bh(its actually kind the range...)
'''
signal_delays_all_l_l = []
if measure is not None:
event_measure_all_l_l = []
#DFF max fix, to support both default and the fix
dff_max_to_fix = (measure == 'dffMax2')
if measure == 'dffMax2default':
measure = 'dffMax2'
#Fix dffMax by adding more range and delay
if dff_max_to_fix:
before_range += fix_dff_interval
after_range += fix_dff_interval
for astroA | |
<gh_stars>1-10
"""
Open Power System Data
Timeseries Datapackage
read.py : read time series files
"""
import pytz
import yaml
import os
import sys
import numpy as np
import pandas as pd
import logging
import zipfile
import csv
import re
from datetime import datetime, date, time, timedelta
import xlrd
from xml.sax import ContentHandler, parse
from .excel_parser import ExcelHandler
logger = logging.getLogger(__name__)
logger.setLevel('INFO')
def read_entso_e_transparency(
areas, filepath, variable_name, url, headers, res_key, cols, stacked,
unstacked, geo, append_headers, **kwargs):
"""
Read a .csv file from ENTSO-E TRansparency into a DataFrame.
Parameters
----------
filepath : str
Directory path of file to be read
variable_name : str
Name of variable, e.g. ``solar``
url : str
URL linking to the source website where this data comes from
headers : list
List of strings indicating the level names of the pandas.MultiIndex
for the columns of the dataframe
res_key : str
Resolution of the source data. Must be one of ['15min', '30min', 60min']
cols : dict
A mapping of of columnnames to use from input file and a new name to
rename them to. The new name is the header level whose corresponding
values are specified in that column
stacked : list
List of strings indicating the header levels that are reported
column-wise in the input files
unstacked
One strings indicating the header level that is reported row-wise in the
input files
geo: string
The geographical concept (i.e. ``country`` or ``bidding zone`` for which
data should be extracted.
Records for other concepts (i.e. ``control areas``)) willl be ignored.
append_headers: dict
Map of header levels and values to append to Multiindex
kwargs: dict
placeholder for further named function arguments
Returns
----------
df: pandas.DataFrame
The content of one file from PSE
"""
df_raw = pd.read_csv(
filepath,
sep='\t',
encoding='utf-16',
header=0,
index_col='timestamp',
parse_dates={'timestamp': ['DateTime']},
date_parser=None,
dayfirst=False,
decimal='.',
thousands=None,
usecols=['DateTime', *cols.keys()],
# the column specifying the technology has a trailing space, which we
# cut off
converters={'ProductionType_Name': lambda x: x[:-1]},
)
if variable_name == 'Actual Generation per Production Type':
# keep only renewables columns
renewables = {
'Solar': 'solar',
'Wind Onshore': 'wind_onshore',
'Wind Offshore': 'wind_offshore'
}
df_raw = df_raw[df_raw['ProductionType_Name'].isin(renewables.keys())]
df_raw.replace({'ProductionType_Name': renewables}, inplace=True)
if variable_name == 'Day Ahead Prices':
# Omit polish price data reported in EUR (keeping PLN prices)
# (Before 2017-03-02, the data is very messy)
no_polish_euro = ~((df_raw['AreaName'] == 'PSE SA BZ') &
(df_raw.index < pd.to_datetime('2017-03-02 00:00:00')))
df_raw = df_raw.loc[no_polish_euro]
# keep only entries for selected geographic entities as specified in
# areas.csv + select regions whith same temporal resolution
time_and_place = areas[geo].loc[areas[res_key] == True].dropna()
df_raw = df_raw.loc[df_raw['AreaName'].isin(time_and_place)]
# based on the AreaName column, map the area names used throughout OPSD
lookup = areas.set_index(geo)['area ID'].dropna()
lookup = lookup[~lookup.index.duplicated()]
df_raw['region'] = df_raw['AreaName'].map(lookup)
df_raw.drop('AreaName', axis=1, inplace=True)
# rename columns to comply with other data
df_raw.rename(columns=cols, inplace=True)
# juggle the index and columns
df = df_raw
df.set_index(stacked, append=True, inplace=True)
# at this point, only the values we are intereseted in are are left as
# columns
df.columns.rename(unstacked, inplace=True)
df = df.unstack(stacked)
# keep only columns that have at least some nonzero values
df = df.loc[:, (df > 0).any(axis=0)]
# add source and url to the columns.
# Note: pd.concat inserts new MultiIndex values infront of the old ones
df = pd.concat([df],
keys=[tuple([*append_headers.values(), url])],
names=[*append_headers.keys(), 'web'],
axis='columns')
# reorder and sort columns
df = df.reorder_levels(headers, axis=1)
df.sort_index(axis=1, inplace=True)
# throw out obs with wrong timestamp
#no_gaps = pd.DatetimeIndex(start=df.index[0],
# end=df.index[-1],
# freq=res_key)
#df = df.reindex(index=no_gaps)
return df
def read_pse(filepath, variable_name, url, headers):
"""
Read a .csv file from PSE into a DataFrame.
Parameters
----------
filepath : str
Directory path of file to be read
variable_name : str
Name of variable, e.g. ``solar``
url : str
URL linking to the source website where this data comes from
headers : list
List of strings indicating the level names of the pandas.MultiIndex
for the columns of the dataframe
Returns
----------
df: pandas.DataFrame
The content of one file from PSE
"""
df = pd.read_csv(
filepath,
sep=';',
encoding='cp1250',
header=0,
index_col=None,
parse_dates=None,
date_parser=None,
dayfirst=False,
decimal=',',
thousands=None,
# hours are indicated by their ending time. During fall DST,
# UTC 23:00-00:00 = CEST 1:00-2:00 is indicated by '02',
# UTC 00:00-01:00 = CEST 2:00-3:00 is indicated by '02A',
# UTC 01:00-02:00 = CET 2:00-3:00 is indicated by '03'.
# regular hours require backshifting by 1 period
converters={
'Time':
lambda x: '2:00' if x == '2A' else str(int(x) - 1) + ':00'
}
)
# Create a list of spring-daylight savings time (DST)-transitions
dst_transitions_spring = [
d.replace(hour=2)
for d in pytz.timezone('Europe/Copenhagen')._utc_transition_times
if d.year >= 2000 and d.month == 3]
# Account for an error where an hour is jumped in the data, incrementing
# the hour by one
#time_int = df['Time'].str[:-3].astype(int)
#if (time_int time_int.shift(1) - 1).
#if (time_int == 24).any():
# logger.info(filepath)
# df = df[time_int != 24]
if df['Date'][0] == 20130324:
df['Time'] = [str(num) + ':00' for num in range(24)]
# The hour from 01:00 - 02:00 (CET) should by PSE's logic be indexed
# by "02:00" (the endpoint), but at DST day in spring they use "03:00" in
# the files. Our routine requires it to be "01:00" (the start point).
df['proto_timestamp'] = pd.to_datetime(
df['Date'].astype(str) + ' ' + df['Time'])
slicer = df['proto_timestamp'].isin(dst_transitions_spring)
df.loc[slicer, 'Time'] = '1:00'
# create the actual timestamp from the corrected "Date"-column
df['timestamp'] = pd.to_datetime(
df['Date'].astype(str) + ' ' + df['Time'])
df.set_index('timestamp', inplace=True)
# 'ambigous' refers to how the October dst-transition hour is handled.
# 'infer' will attempt to infer dst-transition hours based on order.
df.index = df.index.tz_localize('Europe/Berlin', ambiguous='infer')
df.index = df.index.tz_convert(None)
colmap = {
'Generation of Wind Farms': {
'region': 'PL',
'variable': 'wind_onshore',
'attribute': 'generation_actual',
'source': 'PSE',
'web': url,
'unit': 'MW'
}
}
# Drop any column not in colmap
df = df[list(colmap.keys())]
# Create the MultiIndex
tuples = [tuple(colmap[col][level] for level in headers)
for col in df.columns]
df.columns = pd.MultiIndex.from_tuples(tuples, names=headers)
return df
def read_ceps(filepath, variable_name, url, headers):
'''Read a file from CEPS into a DataFrame'''
df = pd.read_csv(
# pd.read_excel(io=filepath,
#sheet_name='ČEPS report',
filepath,
sep=';',
header=2,
skiprows=None,
index_col=0,
usecols=[0, 1, 2]
)
df.index = pd.to_datetime(df.index.rename('timestamp'))
df.index = df.index.tz_localize('Europe/Brussels', ambiguous='infer')
df.index = df.index.tz_convert(None)
# Translate columns
colmap = {
'WPP [MW]': {
'region': 'CZ',
'variable': 'wind_onshore',
'attribute': 'generation_actual',
'source': 'CEPS',
'web': url,
'unit': 'MW'
},
'PVPP [MW]': {
'region': 'CZ',
'variable': 'solar',
'attribute': 'generation_actual',
'source': 'CEPS',
'web': url,
'unit': 'MW'
}
}
# Create the MultiIndex
tuples = [tuple(colmap[col][level] for level in headers)
for col in df.columns]
df.columns = pd.MultiIndex.from_tuples(tuples, names=headers)
return df
def read_elia(filepath, variable_name, url, headers):
'''Read a file from Elia into a DataFrame'''
df = pd.read_excel(
io=filepath,
header=None,
skiprows=4,
index_col=0,
usecols=None
)
colmap = {
'Day-Ahead forecast [MW]': {
'region': 'BE',
'variable': variable,
'attribute': 'generation_forecast',
'source': 'Elia',
'web': url,
'unit': 'MW'
},
'Corrected Upscaled Measurement [MW]': {
'region': 'BE',
'variable': variable,
'attribute': 'generation_actual',
'source': 'Elia',
'web': url,
'unit': 'MW'
},
'Monitored Capacity [MWp]': {
'region': 'BE',
'variable': variable,
'attribute': 'capacity',
'source': 'Elia',
'web': url,
'unit': 'MW'
}
}
# Drop any column not in colmap
df = df[list(colmap.keys())]
df.index = pd.to_datetime(df.index.rename('timestamp'))
df.index = df.index.tz_localize('Europe/Brussels', ambiguous='infer')
df.index = df.index.tz_convert(None)
# Create the MultiIndex
tuples = [tuple(colmap[col][level] for level in headers)
for col in df.columns]
df.columns = pd.MultiIndex.from_tuples(tuples, names=headers)
return df
def read_energinet_dk(filepath, url, headers):
'''Read a file from energinet.dk into a DataFrame'''
df = pd.read_excel(
io=filepath,
header=2, # the column headers are taken from 3rd row.
# 2nd row also contains header info like in a multiindex,
# i.e. wether the colums are price or generation data.
# However, we will make our own columnnames below.
# Row 3 is enough to unambigously identify the columns
skiprows=None,
index_col=None,
usecols=None, # None means: parse all columns
thousands=','
)
# pandas on it's own authority sets first 2 columns as index
# probably because the column's names are in merged cells
df.index.rename(['date', 'hour'], inplace=True)
df.reset_index(inplace=True)
df['timestamp'] = pd.to_datetime(
df['date'].astype(str) + ' ' +
(df['hour'] - 1).astype(str) + ':00')
df.set_index('timestamp', inplace=True)
# Create a list of spring-daylight savings time (DST)-transitions
dst_transitions_spring = [
d.replace(hour=2)
| |
import unittest
from io import StringIO
import tempfile
import sift_pyx12.error_handler
#from sift_pyx12.errors import *
import sift_pyx12.x12file
class X12fileTestCase(unittest.TestCase):
def _get_first_error(self, x12str, ftype=None):
fd = self._makeFd(x12str)
errors = []
err_cde = None
err_str = None
src = sift_pyx12.x12file.X12Reader(fd)
if ftype == '837':
src.check_837_lx = True
#src = sift_pyx12.x12file.X12Reader(fd)
for seg in src:
errors.extend(src.pop_errors())
errors.extend(src.pop_errors())
src.cleanup()
errors.extend(src.pop_errors())
if len(errors) > 0:
err_cde = errors[0][1]
err_str = errors[0][2]
return (err_cde, err_str)
def _makeFd(self, x12str=None):
try:
if x12str:
fd = StringIO(x12str)
else:
fd = StringIO()
except:
if x12str:
fd = StringIO(x12str, encoding='ascii')
else:
fd = StringIO(encoding='ascii')
fd.seek(0)
return fd
def test_binary_delimiters(self):
str1 = 'ISA&00& &00& &ZZ&ZZ000 &ZZ&ZZ001 &030828&1128&^&00501&000010121&0&T&!+\n'
str1 += 'GS&HC&ZZ000&ZZ001&20030828&1128&17&X&004010X098+\n'
str1 += 'ST&837&11280001+\n'
str1 += 'TST&AA!1!1&BB!5+\n'
str1 += 'SE&3&11280001+\n'
str1 += 'GE&1&17+\n'
str1 += 'IEA&1&000010121+\n'
str1 = str1.replace('&', chr(0x1C))
str1 = str1.replace('+', chr(0x1D))
str1 = str1.replace('!', chr(0x1E))
str1 = str1.replace('^', chr(0x1F))
fd = self._makeFd(str1)
errors = []
src = sift_pyx12.x12file.X12Reader(fd)
for seg in src:
errors.extend(src.pop_errors())
err_cde = None
if len(errors) > 0:
err_cde = errors[0][1]
self.assertEqual(err_cde, None)
self.assertEqual(src.subele_term, chr(0x1E))
self.assertEqual(src.ele_term, chr(0x1C))
self.assertEqual(src.seg_term, chr(0x1D))
self.assertEqual(src.repetition_term, chr(0x1F))
def test_trailing_ele_delim(self):
str1 = 'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~\n'
str1 += 'ZZ*1***~\n'
fd = self._makeFd(str1)
src = sift_pyx12.x12file.X12Reader(fd)
err_cde = None
err_str = None
for seg in src:
if seg.get_seg_id() == 'ZZ':
errors = src.pop_errors()
if len(errors) > 0:
err_cde = errors[0][1]
err_str = errors[0][2]
self.assertEqual(err_cde, 'SEG1', err_str)
class ISA_header(X12fileTestCase):
def test_starts_with_ISA(self):
str1 = ' ISA~'
fd = self._makeFd(str1)
self.assertRaises(sift_pyx12.errors.X12Error, sift_pyx12.x12file.X12Reader, fd)
def test_at_least_ISA_len(self):
str1 = 'ISA~'
fd = self._makeFd(str1)
self.assertRaises(sift_pyx12.errors.X12Error, sift_pyx12.x12file.X12Reader, fd)
def test_repeat_ISA_loops(self):
str1 = """ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~
IEA*0*000010121~
ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010122*0*T*:~
IEA*0*000010122~
"""
(err_cde, err_str) = self._get_first_error(str1)
self.assertEqual(err_cde, None, err_str)
def test_Unique_Interchange_ID(self):
seg = None
str1 = """ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~
IEA*0*000010121~
ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~
IEA*0*000010121~
"""
(err_cde, err_str) = self._get_first_error(str1)
self.assertEqual(err_cde, '025', err_str)
class IEA_Checks(X12fileTestCase):
def test_IEA_id_match_ISA_id(self):
seg = None
str1 = 'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~\n'
str1 += 'GS*HC*ZZ000*ZZ001*20030828*1128*17*X*004010X098~\n'
str1 += 'GE*0*17~\n'
str1 += 'IEA*1*000010555~\n'
(err_cde, err_str) = self._get_first_error(str1)
self.assertEqual(err_cde, '001', err_str)
def test_IEA_count(self):
seg = None
str1 = 'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~\n'
str1 += 'GS*HC*ZZ000*ZZ001*20030828*1128*17*X*004010X098~\n'
str1 += 'GE*0*17~\n'
str1 += 'IEA*2*000010121~\n'
(err_cde, err_str) = self._get_first_error(str1)
self.assertEqual(err_cde, '021', err_str)
def test_missing_IEA(self):
seg = None
str1 = 'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~\n'
str1 += 'GS*HC*ZZ000*ZZ001*20030828*1128*17*X*004010X098~\n'
str1 += 'GE*0*17~\n'
(err_cde, err_str) = self._get_first_error(str1)
self.assertEqual(err_cde, '023', err_str)
class GE_Checks(X12fileTestCase):
def test_GE_id_match_GS_id(self):
seg = None
str1 = 'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~\n'
str1 += 'GS*HC*ZZ000*ZZ001*20030828*1128*17*X*004010X098~\n'
str1 += 'GE*0*555~\n'
str1 += 'IEA*1*000010121~\n'
(err_cde, err_str) = self._get_first_error(str1)
self.assertEqual(err_cde, '4', err_str)
def test_GE_count(self):
str1 = 'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~\n'
str1 += 'GS*HC*ZZ000*ZZ001*20030828*1128*17*X*004010X098~\n'
str1 += 'GE*999*17~\n'
str1 += 'IEA*1*000010121~\n'
(err_cde, err_str) = self._get_first_error(str1)
self.assertEqual(err_cde, '5', err_str)
def test_Unique_Functional_Group_ID(self):
str1 = 'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~\n'
str1 += 'GS*HC*ZZ000*ZZ001*20030828*1128*17*X*004010X098~\n'
str1 += 'GE*0*17~\n'
str1 += 'GS*HC*ZZ000*ZZ001*20030828*1128*17*X*004010X098~\n'
str1 += 'GE*0*17~\n'
str1 += 'IEA*2*000010121~\n'
(err_cde, err_str) = self._get_first_error(str1)
self.assertEqual(err_cde, '6', err_str)
def test_missing_GE(self):
seg = None
str1 = 'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~\n'
str1 += 'GS*HC*ZZ000*ZZ001*20030828*1128*17*X*004010X098~\n'
str1 += 'IEA*1*000010121~\n'
(err_cde, err_str) = self._get_first_error(str1)
self.assertEqual(err_cde, '024', err_str)
class SE_Checks(X12fileTestCase):
def test_SE_id_match_ST_id(self):
seg = None
str1 = 'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~\n'
str1 += 'GS*HC*ZZ000*ZZ001*20030828*1128*17*X*004010X098~\n'
str1 += 'ST*837*11280001~\n'
str1 += 'SE*2*11280999~\n'
str1 += 'GE*1*17~\n'
str1 += 'IEA*1*000010121~\n'
(err_cde, err_str) = self._get_first_error(str1)
self.assertEqual(err_cde, '3', err_str)
def test_SE_count(self):
seg = None
str1 = 'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~\n'
str1 += 'GS*HC*ZZ000*ZZ001*20030828*1128*17*X*004010X098~\n'
str1 += 'ST*837*11280001~\n'
str1 += 'SE*0*11280001~\n'
str1 += 'GE*1*17~\n'
str1 += 'IEA*1*000010121~\n'
(err_cde, err_str) = self._get_first_error(str1)
self.assertEqual(err_cde, '4', err_str)
def test_Unique_Transaction_Set_ID(self):
seg = None
str1 = 'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~\n'
str1 += 'GS*HC*ZZ000*ZZ001*20030828*1128*17*X*004010X098~\n'
str1 += 'ST*837*11280001~\n'
str1 += 'SE*2*11280001~\n'
str1 += 'ST*837*11280001~\n'
str1 += 'SE*2*11280001~\n'
str1 += 'GE*2*17~\n'
str1 += 'IEA*1*000010121~\n'
(err_cde, err_str) = self._get_first_error(str1)
self.assertEqual(err_cde, '23', err_str)
def test_missing_SE(self):
seg = None
str1 = 'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~\n'
str1 += 'GS*HC*ZZ000*ZZ001*20030828*1128*17*X*004010X098~\n'
str1 += 'ST*837*11280001~\n'
str1 += 'GE*1*17~\n'
str1 += 'IEA*1*000010121~\n'
(err_cde, err_str) = self._get_first_error(str1)
self.assertEqual(err_cde, '3', err_str)
class HL_Checks(X12fileTestCase):
"""
We can do minimal HL parent checks here
"""
def test_HL_increment_good(self):
seg = None
str1 = 'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~\n'
str1 += 'GS*HC*ZZ000*ZZ001*20030828*1128*17*X*004010X098~\n'
str1 += 'ST*837*11280001~\n'
str1 += 'HL*1**20*1~\n'
str1 += 'HL*2*1*22*1~\n'
str1 += 'HL*3*2*23*1~\n'
str1 += 'HL*4*1*22*1~\n'
str1 += 'SE*6*11280001~\n'
str1 += 'GE*1*17~\n'
str1 += 'IEA*1*000010121~\n'
(err_cde, err_str) = self._get_first_error(str1)
self.assertEqual(err_cde, None, err_str)
def test_HL_increment_bad(self):
seg = None
str1 = 'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~\n'
str1 += 'GS*HC*ZZ000*ZZ001*20030828*1128*17*X*004010X098~\n'
str1 += 'ST*837*11280001~\n'
str1 += 'HL*1**20*1~\n'
str1 += 'HL*2*1*22*1~\n'
str1 += 'HL*3*2*23*1~\n'
str1 += 'HL*5*1*22*1~\n'
str1 += 'SE*6*11280001~\n'
str1 += 'GE*1*17~\n'
str1 += 'IEA*1*000010121~\n'
(err_cde, err_str) = self._get_first_error(str1)
self.assertEqual(err_cde, 'HL1', err_str)
def test_HL_parent_good(self):
seg = None
str1 = 'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~\n'
str1 += 'GS*HC*ZZ000*ZZ001*20030828*1128*17*X*004010X098~\n'
str1 += 'ST*837*11280001~\n'
str1 += 'HL*1**20*1~\n'
str1 += 'HL*2*1*22*1~\n'
str1 += 'HL*3*2*23*1~\n'
str1 += 'HL*4*1*22*1~\n'
str1 += 'SE*6*11280001~\n'
str1 += 'GE*1*17~\n'
str1 += 'IEA*1*000010121~\n'
(err_cde, err_str) = self._get_first_error(str1)
self.assertEqual(err_cde, None, err_str)
def test_HL_parent_bad_invalid(self):
seg = None
str1 = 'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~\n'
str1 += 'GS*HC*ZZ000*ZZ001*20030828*1128*17*X*004010X098~\n'
str1 += 'ST*837*11280001~\n'
str1 += 'HL*1**20*1~\n'
str1 += 'HL*2*1*22*1~\n'
str1 += 'HL*3*5*23*1~\n'
str1 += 'HL*4*2*22*1~\n'
str1 += 'SE*6*11280001~\n'
str1 += 'GE*1*17~\n'
str1 += 'IEA*1*000010121~\n'
(err_cde, err_str) = self._get_first_error(str1)
self.assertEqual(err_cde, 'HL2', err_str)
def xtest_HL_parent_bad_blank(self):
seg = None
str1 = 'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~\n'
str1 += 'GS*HC*ZZ000*ZZ001*20030828*1128*17*X*004010X098~\n'
str1 += 'ST*837*11280001~\n'
str1 += 'HL*1**20*1~\n'
str1 += 'HL*2*1*22*1~\n'
str1 += 'HL*3**23*1~\n'
str1 += 'HL*4*2*22*1~\n'
str1 += 'SE*6*11280001~\n'
str1 += 'GE*1*17~\n'
str1 += 'IEA*1*000010121~\n'
(err_cde, err_str) = self._get_first_error(str1)
self.assertEqual(err_cde, 'HL2', err_str)
class Formatting(X12fileTestCase):
def test_identity(self):
str1 = 'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~\n'
# str1 += 'GS*HC*ZZ000*ZZ001*20030828*1128*17*X*004010X098~\n'
# str1 += 'ST*837*11280001~\n'
# str1 += 'HL*1**20*1~\n'
# str1 += 'HL*2*1*22*1~\n'
# str1 += 'HL*3*2*23*1~\n'
# str1 += 'HL*4*1*22*1~\n'
# str1 += 'SE*6*11280001~\n'
# str1 += 'GE*1*17~\n'
str1 += 'IEA*1*000010121~\n'
fd = self._makeFd(str1)
src = sift_pyx12.x12file.X12Reader(fd)
str_out = ''
for seg in src:
str_out += seg.format() + '\n'
self.assertMultiLineEqual(str1, str_out)
def test_strip_eol(self):
str1 = 'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~\n'
str1 += 'IEA*1*000010121~\n'
fd = self._makeFd(str1)
src = sift_pyx12.x12file.X12Reader(fd)
str_out = ''
for seg in src:
str_out += seg.format()
str1 = str1.replace('\n', '')
self.assertMultiLineEqual(str1, str_out)
class Segment_ID_Checks(X12fileTestCase):
def test_segment_id_short(self):
str1 = 'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~\n'
str1 += 'Z*0019~\n'
(err_cde, err_str) = self._get_first_error(str1)
self.assertEqual(err_cde, '1', err_str)
def test_segment_last_space(self):
str1 = 'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~\n'
str1 += 'ZZ*0019 ~\n'
fd = self._makeFd(str1)
val = None
src = sift_pyx12.x12file.X12Reader(fd)
for seg in src:
if seg.get_seg_id() == 'ZZ':
val = seg.get('ZZ01').format()
self.assertEqual(val, '0019 ')
def test_segment_id_long(self):
str1 = 'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~\n'
str1 += 'ZZZZ*0019~\n'
(err_cde, err_str) = self._get_first_error(str1)
self.assertEqual(err_cde, '1', err_str)
def test_segment_id_empty(self):
str1 = 'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~\n'
str1 += '*1~\n'
(err_cde, err_str) = self._get_first_error(str1)
self.assertEqual(err_cde, '1', err_str)
def test_segment_empty(self):
errh = sift_pyx12.error_handler.errh_null()
str1 = 'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~\n'
str1 += 'TST~\n'
(err_cde, err_str) = self._get_first_error(str1)
self.assertEqual(err_cde, '8', err_str)
def test_segment_trailing_space(self):
str1 = 'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:~ \n'
str1 += 'ZZ*0019~ \n'
(err_cde, err_str) = self._get_first_error(str1)
self.assertEqual(err_cde, '1', err_str)
class FileString(X12fileTestCase):
def test_filename_open(self):
str1 = 'ISA&00& &00& &ZZ&ZZ000 &ZZ&ZZ001 &030828&1128&U&00401&000010121&0&T&!+\n'
str1 += 'GS&HC&ZZ000&ZZ001&20030828&1128&17&X&004010X098+\n'
str1 += 'ST&837&11280001+\n'
str1 += 'TST&AA!1!1&BB!5+\n'
str1 += 'SE&3&11280001+\n'
str1 += 'GE&1&17+\n'
str1 += 'IEA&1&000010121+\n'
fd = self._makeFd(str1)
errors = []
src = sift_pyx12.x12file.X12Reader(fd)
for seg in src:
errors.extend(src.pop_errors())
err_cde = None
if len(errors) > 0:
err_cde = errors[0][1]
self.assertEqual(err_cde, None)
self.assertEqual(src.subele_term, '!')
self.assertEqual(src.ele_term, '&')
self.assertEqual(src.seg_term, '+')
class X12WriterTest(X12fileTestCase):
def test_identity(self):
segs = [
'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:',
'GS*HC*ZZ000*ZZ001*20030828*1128*17*X*004010X098',
'ST*837*11280001',
'HL*1**20*1',
'HL*2*1*22*1',
'HL*3*2*23*1',
'HL*4*1*22*1',
'SE*6*11280001',
'GE*1*17',
'IEA*1*000010121'
]
fd_out = self._makeFd()
wr = sift_pyx12.x12file.X12Writer(fd_out, '~', '*', ':', '\n')
output = ''
for seg_str in segs:
seg_data = sift_pyx12.segment.Segment(seg_str, '~', '*', ':')
wr.Write(seg_data)
output += seg_str + '~\n'
fd_out.seek(0)
newval = fd_out.read()
self.assertMultiLineEqual(output, newval)
def test_tempfile_ascii(self):
segs = [
'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:',
'GS*HC*ZZ000*ZZ001*20030828*1128*17*X*004010X098',
'ST*837*11280001',
'HL*1**20*1',
'HL*2*1*22*1',
'HL*3*2*23*1',
'HL*4*1*22*1',
'SE*6*11280001',
'GE*1*17',
'IEA*1*000010121'
]
fd_out = tempfile.TemporaryFile(mode='w+', encoding='ascii')
wr = sift_pyx12.x12file.X12Writer(fd_out, '~', '*', ':', '\n')
output = ''
for seg_str in segs:
seg_data = sift_pyx12.segment.Segment(seg_str, '~', '*', ':')
wr.Write(seg_data)
output += seg_str + '~\n'
fd_out.seek(0)
newval = fd_out.read()
self.assertMultiLineEqual(output, newval)
def test_tempfile_fail_no_encoding(self):
segs = [
'ISA*00* *00* *ZZ*ZZ000 *ZZ*ZZ001 *030828*1128*U*00401*000010121*0*T*:',
'GS*HC*ZZ000*ZZ001*20030828*1128*17*X*004010X098',
'ST*837*11280001',
'HL*1**20*1',
'HL*2*1*22*1',
'HL*3*2*23*1',
'HL*4*1*22*1',
'SE*6*11280001',
'GE*1*17',
'IEA*1*000010121'
]
fd_out = tempfile.TemporaryFile()
wr = sift_pyx12.x12file.X12Writer(fd_out, | |
<reponame>Joukahainen/finmarketpy
__author__ = 'saeedamen' # <NAME>
#
# Copyright 2016-2020 Cuemacro - https://www.cuemacro.com / @cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
import pandas as pd
import numpy as np
from financepy.market.curves.FinDiscountCurveFlat import FinDiscountCurveFlat
from financepy.finutils.FinDate import FinDate
# Future versions of FinancePy will roll FXFinVolSurfacePlus into FinFXVolSurface
try:
from financepy.market.volatility.FinFXVolSurfacePlus import FinFXVolSurfacePlus as FinFXVolSurface
from financepy.market.volatility.FinFXVolSurfacePlus import FinFXATMMethod
from financepy.market.volatility.FinFXVolSurfacePlus import FinFXDeltaMethod
from financepy.market.volatility.FinFXVolSurfacePlus import volFunction
from financepy.market.volatility.FinFXVolSurfacePlus import FinVolFunctionTypes
except:
from financepy.market.volatility.FinFXVolSurface import FinFXVolSurface
from financepy.market.volatility.FinFXVolSurface import FinFXATMMethod
from financepy.market.volatility.FinFXVolSurface import FinFXDeltaMethod
from financepy.market.volatility.FinFXVolSurface import volFunction
from financepy.market.volatility.FinFXVolSurface import FinVolFunctionTypes
from financepy.finutils.FinGlobalTypes import FinSolverTypes
from findatapy.util.dataconstants import DataConstants
from finmarketpy.curve.volatility.abstractvolsurface import AbstractVolSurface
from finmarketpy.util.marketconstants import MarketConstants
from finmarketpy.util.marketutil import MarketUtil
data_constants = DataConstants()
market_constants = MarketConstants()
class FXVolSurface(AbstractVolSurface):
"""Holds data for an FX vol surface and also interpolates vol surface, converts strikes to implied vols etc.
"""
def __init__(self, market_df=None, asset=None, field='close', tenors=market_constants.fx_options_tenor_for_interpolation,
vol_function_type=market_constants.fx_options_vol_function_type,
atm_method=market_constants.fx_options_atm_method,
delta_method=market_constants.fx_options_delta_method,
depo_tenor=market_constants.fx_options_depo_tenor,
solver=market_constants.fx_options_solver,
alpha=market_constants.fx_options_alpha,
tol=market_constants.fx_options_tol):
"""Initialises object, with market data and various market conventions
Parameters
----------
market_df : DataFrame
Market data with spot, FX volatility surface, FX forwards and base depos
asset : str
Eg. 'EURUSD'
field : str
Market data field to use
default - 'close'
tenors : str(list)
Tenors to be used (we need to avoid tenors, where there might be NaNs)
vol_function_type : str
What type of interpolation scheme to use
default - 'CLARK5' (also 'CLARK', 'BBG' and 'SABR')
atm_method : str
How is the ATM quoted? Eg. delta neutral, ATMF etc.
default - 'fwd-delta-neutral-premium-adj'
delta_method : str
Spot delta, forward delta etc.
default - 'spot-delta'
solver : str
Which solver to use in FX vol surface calibration?
default - 'nelmer-mead'
alpha : float
Between 0 and 1 (default 0.5)
"""
self._market_df = market_df
self._tenors = tenors
self._asset = asset
self._field = field
self._depo_tenor = depo_tenor
self._market_util = MarketUtil()
self._dom_discount_curve = None
self._for_discount_curve = None
self._spot = None
self._value_date = None
self._fin_fx_vol_surface = None
self._df_vol_dict = None
for_name_base = asset[0:3]
dom_name_terms = asset[3:6]
field = '.' + field
# CAREFUL: need to divide by 100 for depo rate, ie. 0.0346 = 3.46%
self._forCCRate = market_df[for_name_base + depo_tenor + field].values / 100.0 # 0.03460 # EUR
self._domCCRate = market_df[dom_name_terms + depo_tenor + field].values / 100.0 # 0.02940 # USD
self._spot_history = market_df[asset + field].values
self._atm_vols = market_df[[asset + "V" + t + field for t in tenors]].values
self._market_strangle25DeltaVols = market_df[[asset + "25B" + t + field for t in tenors]].values
self._risk_reversal25DeltaVols = market_df[[asset + "25R" + t + field for t in tenors]].values
self._market_strangle10DeltaVols = market_df[[asset + "10B" + t + field for t in tenors]].values
self._risk_reversal10DeltaVols = market_df[[asset + "10R" + t + field for t in tenors]].values
if vol_function_type == 'CLARK':
self._vol_function_type = FinVolFunctionTypes.CLARK
elif vol_function_type == 'CLARK5':
self._vol_function_type = FinVolFunctionTypes.CLARK5
elif vol_function_type == 'BBG':
self._vol_function_type = FinVolFunctionTypes.BBG
# Note: currently SABR isn't fully implemented in FinancePy
elif vol_function_type == 'SABR':
self._vol_function_type = FinVolFunctionTypes.SABR
elif vol_function_type == 'SABR3':
self._vol_function_type = FinVolFunctionTypes.SABR3
# What does ATM mean? (for most
if atm_method == 'fwd-delta-neutral': # ie. strike such that a straddle would be delta neutral
self._atm_method = FinFXATMMethod.FWD_DELTA_NEUTRAL
elif atm_method == 'fwd-delta-neutral-premium-adj':
self._atm_method = FinFXATMMethod.FWD_DELTA_NEUTRAL_PREM_ADJ
elif atm_method == 'spot': # ATM is spot
self._atm_method = FinFXATMMethod.SPOT
elif atm_method == 'fwd': # ATM is forward
self._atm_method = FinFXATMMethod.FWD
# How are the deltas quoted?
if delta_method == 'spot-delta':
self._delta_method = FinFXDeltaMethod.SPOT_DELTA
elif delta_method == 'fwd-delta':
self._delta_method = FinFXDeltaMethod.FORWARD_DELTA
elif delta_method == 'spot-delta-prem-adj':
self._delta_method = FinFXDeltaMethod.SPOT_DELTA_PREM_ADJ
elif delta_method == 'fwd-delta-prem-adj':
self._delta_method = FinFXDeltaMethod.FORWARD_DELTA_PREM_ADJ
# Which solver to use in FX vol surface calibration
if solver == 'nelmer-mead':
self._solver = FinSolverTypes.NELDER_MEAD
elif solver == 'nelmer-mead-numba':
self._solver = FinSolverTypes.NELDER_MEAD_NUMBA
elif solver == 'cg':
self._solver = FinSolverTypes.CONJUGATE_GRADIENT
self._alpha = alpha
self._tol = tol
def build_vol_surface(self, value_date):
"""Builds the implied volatility surface for a particular value date and calculates the benchmark strikes etc.
Before we do any sort of interpolation later, we need to build the implied_vol vol surface.
Parameters
----------
value_date : str
Value date (need to have market data for this date)
asset : str
Asset name
depo_tenor : str
Depo tenor to use
default - '1M'
field : str
Market data field to use
default - 'close'
"""
self._value_date = self._market_util.parse_date(value_date)
value_fin_date = self._findate(self._value_date)
date_index = self._market_df.index == value_date
# TODO: add whole rates curve
dom_discount_curve = FinDiscountCurveFlat(value_fin_date, self._domCCRate[date_index])
for_discount_curve = FinDiscountCurveFlat(value_fin_date, self._forCCRate[date_index])
self._dom_discount_curve = dom_discount_curve
self._for_discount_curve = for_discount_curve
self._spot = float(self._spot_history[date_index][0])
# New implementation in FinancePy also uses 10d for interpolation
self._fin_fx_vol_surface = FinFXVolSurface(value_fin_date,
self._spot,
self._asset,
self._asset[0:3],
dom_discount_curve,
for_discount_curve,
self._tenors.copy(),
self._atm_vols[date_index][0],
self._market_strangle25DeltaVols[date_index][0],
self._risk_reversal25DeltaVols[date_index][0],
self._market_strangle10DeltaVols[date_index][0],
self._risk_reversal10DeltaVols[date_index][0],
self._alpha,
atmMethod=self._atm_method,
deltaMethod=self._delta_method,
volatilityFunctionType=self._vol_function_type,
finSolverType=self._solver,
tol=self._tol) # TODO add tol
def calculate_vol_for_strike_expiry(self, K, expiry_date=None, tenor='1M'):
"""Calculates the implied_vol volatility for a given strike and tenor (or expiry date, if specified). The
expiry date/broken dates are intepolated linearly in variance space.
Parameters
----------
K : float
Strike for which to find implied_vol volatility
expiry_date : str (optional)
Expiry date of option
tenor : str (optional)
Tenor of option
default - '1M'
Returns
-------
float
"""
if expiry_date is not None:
expiry_date = self._findate(self._market_util.parse_date(expiry_date))
return self._fin_fx_vol_surface.volatilityFromStrikeDate(K, expiry_date)
else:
try:
tenor_index = self._get_tenor_index(tenor)
return self.get_vol_from_quoted_tenor(K, tenor_index)
except:
pass
return None
def calculate_vol_for_delta_expiry(self, delta_call, expiry_date=None):
"""Calculates the implied_vol volatility for a given delta call and expiry date. The
expiry date/broken dates are intepolated linearly in variance space.
Parameters
----------
delta_call : float
Delta for the strike for which to find implied volatility
expiry_date : str (optional)
Expiry date of option
Returns
-------
float
"""
if expiry_date is not None:
expiry_date = self._findate(self._market_util.parse_date(expiry_date))
return self._fin_fx_vol_surface.volatilityFromDeltaDate(delta_call, expiry_date)
return None
def extract_vol_surface(self, num_strike_intervals=60, low_K_pc=0.95, high_K_pc=1.05):
"""Creates an interpolated implied vol surface which can be plotted (in strike space), and also in delta
space for key strikes (ATM, 25d call and put). Also for key strikes converts from delta to strike space.
Parameters
----------
num_strike_intervals : int
Number of points to interpolate
Returns
-------
dict
"""
## Modified from FinancePy code for plotting vol curves
# columns = tenors
df_vol_surface_strike_space = pd.DataFrame(columns=self._fin_fx_vol_surface._tenors)
df_vol_surface_delta_space = pd.DataFrame(columns=self._fin_fx_vol_surface._tenors)
# columns = tenors
df_vol_surface_implied_pdf = pd.DataFrame(columns=self._fin_fx_vol_surface._tenors)
# Conversion between main deltas and strikes
df_deltas_vs_strikes = pd.DataFrame(columns=self._fin_fx_vol_surface._tenors)
# ATM, 10d + 25d market strangle and 25d risk reversals
df_vol_surface_quoted_points = pd.DataFrame(columns=self._fin_fx_vol_surface._tenors)
# Note, at present we're not using 10d strikes
quoted_strikes_names = ['ATM', 'STR_25D_MS', 'RR_25D_P', 'STR_10D_MS', 'RR_10D_P']
key_strikes_names = ['K_10D_P', 'K_10D_P_MS', 'K_25D_P', 'K_25D_P_MS', 'ATM', 'K_25D_C', 'K_25D_C_MS', 'K_10D_C', 'K_10D_C_MS']
# Get max/min strikes to interpolate (from the longest dated tenor)
low_K = self._fin_fx_vol_surface._K_25D_P[-1] * low_K_pc
high_K = self._fin_fx_vol_surface._K_25D_C[-1] * high_K_pc
if num_strike_intervals is not None:
# In case using old version of FinancePy
try:
implied_pdf_fin_distribution = self._fin_fx_vol_surface.impliedDbns(low_K, high_K, num_strike_intervals)
except:
pass
for tenor_index in range(0, self._fin_fx_vol_surface._numVolCurves):
# Get the quoted vol points
tenor_label = self._fin_fx_vol_surface._tenors[tenor_index]
atm_vol = self._fin_fx_vol_surface._atmVols[tenor_index] * 100
ms_25d_vol = self._fin_fx_vol_surface._mktStrangle25DeltaVols[tenor_index] * 100
rr_25d_vol = self._fin_fx_vol_surface._riskReversal25DeltaVols[tenor_index] * 100
ms_10d_vol = self._fin_fx_vol_surface._mktStrangle10DeltaVols[tenor_index] * 100
rr_10d_vol = self._fin_fx_vol_surface._riskReversal10DeltaVols[tenor_index] * 100
df_vol_surface_quoted_points[tenor_label] = pd.Series(index=quoted_strikes_names,
data=[atm_vol, ms_25d_vol, rr_25d_vol, ms_10d_vol, rr_10d_vol])
# Do interpolation in strike space for the implied vols (if intervals have been specified)
strikes = []
vols = []
if num_strike_intervals is not None:
K = low_K
dK = (high_K - low_K) / num_strike_intervals
for i in range(0, num_strike_intervals):
sigma = self.get_vol_from_quoted_tenor(K, tenor_index) * 100.0
strikes.append(K)
vols.append(sigma)
K = K + dK
df_vol_surface_strike_space[tenor_label] = pd.Series(index=strikes, data=vols)
try:
df_vol_surface_implied_pdf[tenor_label] = pd.Series(index=implied_pdf_fin_distribution[tenor_index]._x,
data=implied_pdf_fin_distribution[tenor_index]._densitydx)
except:
pass
# Extract strikes for the quoted points (ie. 10d, 25d and ATM)
key_strikes = []
key_strikes.append(self._fin_fx_vol_surface._K_10D_P[tenor_index])
key_strikes.append(self._fin_fx_vol_surface._K_10D_P_MS[tenor_index])
key_strikes.append(self._fin_fx_vol_surface._K_25D_P[tenor_index])
key_strikes.append(self._fin_fx_vol_surface._K_25D_P_MS[tenor_index])
key_strikes.append(self._fin_fx_vol_surface._K_ATM[tenor_index])
key_strikes.append(self._fin_fx_vol_surface._K_25D_C[tenor_index])
key_strikes.append(self._fin_fx_vol_surface._K_25D_C_MS[tenor_index])
key_strikes.append(self._fin_fx_vol_surface._K_10D_C[tenor_index])
key_strikes.append(self._fin_fx_vol_surface._K_10D_C_MS[tenor_index])
df_deltas_vs_strikes[tenor_label] = pd.Series(index=key_strikes_names, data=key_strikes)
# Put a conversion between quoted deltas and | |
nodetypes.DependNode ):
attrNode = PyNode( attrNode )
# #-- Second Argument: Plug or Component
# # convert from string to _api objects.
# if isinstance(argObj,basestring) :
# argObj = _api.toApiObject( argObj, dagPlugs=False )
#
# # components
# elif isinstance( argObj, int ) or isinstance( argObj, slice ):
# argObj = attrNode._apiobject
else:
argObj = args[0]
# the order of the following 3 checks is important, as it is in increasing generality
if isinstance( argObj, Attribute ):
attrNode = argObj._node
argObj = argObj.__apiobjects__['MPlug']
elif isinstance( argObj, Component ):
try:
argObj = argObj._node.__apiobjects__[ 'MDagPath']
except KeyError:
argObj = argObj._node.__apiobjects__['MObjectHandle']
elif isinstance( argObj, PyNode ):
try:
argObj = argObj.__apiobjects__[ 'MDagPath']
except KeyError:
argObj = argObj.__apiobjects__['MObjectHandle']
elif hasattr( argObj, '__module__') and argObj.__module__.startswith( 'maya.OpenMaya' ) :
pass
#elif isinstance(argObj,basestring) : # got rid of this check because of nameparse objects
else:
# didn't match any known types. treat as a string
# convert to string then to _api objects.
try:
name = unicode(argObj)
except Exception:
raise MayaNodeError
else:
res = _api.toApiObject(name, dagPlugs=True)
# DagNode Plug
if isinstance(res, tuple):
# Plug or Component
#print "PLUG or COMPONENT", res
attrNode = PyNode(res[0])
argObj = res[1]
# There are some names which are both components and
# attributes: ie, scalePivot / rotatePivot
# toApiObject (and MSelectionList) will return the
# component in these ambigious cases; therefore,
# if we're explicitly trying to make an Attribute - ie,
# Attribute('myCube.scalePivot')
# ... make sure to cast it to one in these cases
if issubclass(cls, Attribute) and \
isinstance(argObj, _api.MObject) and \
_api.MFnComponent().hasObj(argObj) and \
'.' in name:
attrName = name.split('.', 1)[1]
if attrNode.hasAttr(attrName):
return attrNode.attr(attrName)
# DependNode Plug
elif isinstance(res, _api.MPlug):
attrNode = PyNode(res.node())
argObj = res
# Other Object
elif res:
argObj = res
else:
# Removed ability to create components such as
# PyNode('myCube.vtx')
# because of inconsistency - in general, for
# PyNode(stringName)
# stringName should be a valid mel name, ie
# cmds.select(stringName)
# should work
# # Check if it's a component that's normally indexed,
# # but has no index specified - ie, myPoly.vtx,
# # instead of the (mel-valid) myPoly.vtx[*]
# dotSplit = name.split('.')
# if len(dotSplit) == 2:
# try:
# res = PyNode(dotSplit[0])
# except MayaObjectError:
# pass
# else:
# try:
# argObj = getattr(res, dotSplit[1])
# except AttributeError:
# pass
# else:
# if isinstance(argObj, cls):
# return argObj
# non-existent objects
# the object doesn't exist: raise an error
# note - at one point, I briefly changed things so
# that the code would check to see if the name
# existed, but had multiple matches, or didn't
# exist at all, and made it so MayaObjectError
# would give a more informative error message
# depending...
# ...but it had potential performance implications -
# at best, it was doing an extra cmds.objExists...
# ...and objExists wasn't fast enough, considering
# we will easily be trying to create 1000s of
# PyNodes, and the command gets slower as the size
# of the scene increases...
raise _objectError(name)
#-- Components
if validComponentIndexType(argObj):
#pymelType, obj, name = _getPymelType( attrNode._apiobject )
obj = {'ComponentIndex' : argObj }
# if we are creating a component class using an int or slice, then we must specify a class type:
# valid: MeshEdge( myNode, 2 )
# invalid: PyNode( myNode, 2 )
assert issubclass(cls,Component), "%s is not a Component class" % cls.__name__
#-- All Others
else:
pymelType, obj = _getPymelType( argObj, name )
if attrNode is None and issubclass(pymelType, Attribute):
attrNode = PyNode(obj['MPlug'].node())
#print pymelType, obj, name, attrNode
# Virtual (non-existent) objects will be cast to their own virtual type.
# so, until we make that, we're rejecting them
assert obj is not None# real objects only
#assert obj or name
else :
# create node if possible
if issubclass(cls,nodetypes.DependNode):
newNode = None
vClassInfo = _factories.virtualClasses.getVirtualClassInfo(cls)
#----------------------------------
# Pre Creation
#----------------------------------
postArgs = {}
if vClassInfo and vClassInfo.preCreate:
kwargs = vClassInfo.preCreate(**kwargs)
if isinstance(kwargs, tuple):
assert len(kwargs) == 2, "preCreate must either 1 or 2 dictionaries of keyword arguments"
kwargs, postArgs = kwargs
assert isinstance(postArgs, dict), "preCreate second return value must be a dictionary of keyword arguments"
assert isinstance(kwargs, dict), "_preCreateVirtual must return a dictionary of keyword arguments"
#----------------------------------
# Creation
#----------------------------------
if vClassInfo and vClassInfo.create:
newNode = vClassInfo.create(**kwargs)
assert isinstance(newNode, basestring), "_createVirtual must return the name created node"
elif hasattr(cls, '__melcmd__') and not cls.__melcmd_isinfo__:
try:
_logger.debug( 'creating node of type %s using %s' % (cls.__melnode__, cls.__melcmd__.__name__ ) )
res = cls.__melcmd__(**kwargs)
except Exception, e:
_logger.debug( 'failed to create %s' % e )
pass
else:
if isinstance(res,list):
# we only want to return a single object
for x in res:
typ = cmds.nodeType(x)
if typ == cls.__melnode__:
newNode = x
break
elif typ == 'transform':
shape = cmds.listRelatives( x, s=1)
if shape and cmds.nodeType(shape[0]) == cls.__melnode__:
newNode = shape[0]
break
if newNode is None:
raise ValueError, "could not find type %s in result %s returned by %s" % ( cls.__name__, res, cls.__melcmd__.__name__ )
elif cls.__melnode__ == nodeType(res): #isinstance(res,cls):
newNode = res
else:
raise ValueError, "unexpect result %s returned by %s" % ( res, cls.__melcmd__.__name__ )
else:
_logger.debug( 'creating node of type %s using createNode' % cls.__melnode__ )
try:
newNode = createNode( cls.__melnode__, **kwargs )
except RuntimeError:
# FIXME: should we really be passing on this?
pass
#----------------------------------
# Post Creation
#----------------------------------
if newNode:
if vClassInfo and vClassInfo.postCreate:
vClassInfo.postCreate(newNode, **postArgs)
return cls(newNode)
raise ValueError, 'PyNode expects at least one argument: an object name, MObject, MObjectHandle, MDagPath, or MPlug'
# print "type:", pymelType
# print "PyNode __new__ : called with obj=%r, cls=%r, on object of type %s" % (obj, cls, pymelType)
# if an explicit class was given (ie: pyObj=DagNode(u'pCube1')) just check if actual type is compatible
# if none was given (ie generic pyObj=PyNode('pCube1')) then use the class corresponding to the type we found
newcls = None
if cls is not PyNode :
# a PyNode class was explicitly required, if an existing object was passed to init check that the object type
# is compatible with the required class, if no existing object was passed, create an empty PyNode of the required class
# There is one exception type: MeshVertex( Mesh( 'pSphere1') )
# TODO : can add object creation option in the __init__ if desired
if not pymelType or not issubclass( pymelType, cls ):
if issubclass( cls, Component ):
newcls = cls
else:
raise TypeError, "Determined type is %s, which is not a subclass of desired type %s" % ( pymelType.__name__, cls.__name__ )
else:
newcls = pymelType
else :
newcls = pymelType
if newcls :
self = super(PyNode, cls).__new__(newcls)
self._name = name
if attrNode:
self._node = attrNode
self.__apiobjects__ = obj
return self
else :
raise TypeError, "Cannot make a %s out of a %r object" % (cls.__name__, pymelType)
def __init__(self, *args, **kwargs):
# this prevents the _api class which is the second base, from being automatically instantiated. This __init__ should
# be overridden on subclasses of PyNode
pass
def __melobject__(self):
"""Special method for returning a mel-friendly representation."""
return self.name()
def __apimfn__(self):
try:
# if we have it, check that the mobject is still valid by calling
# __apimobject__
self.__apimobject__()
# ...if it is valid, go ahead and return the cached MFn
return self.__apiobjects__['MFn']
except KeyError:
if self.__apicls__:
# use whatever type is appropriate
obj = self.__apiobject__()
if obj:
try:
mfn = self.__apicls__(obj)
self.__apiobjects__['MFn'] = mfn
except RuntimeError:
# when using PyNodes in strange places, like node
# creation callbacks, the proper MFn does not work yet,
# so we default to a super class and we don't save it,
# so that we can get the right one later
if isinstance(obj, _api.MDagPath):
mfn = _api.MFnDagNode( obj )
_logger.warning( "Could not create desired MFn. Defaulting to MFnDagNode." )
elif isinstance(obj, _api.MObject):
mfn = _api.MFnDependencyNode( obj )
_logger.warning( "Could not create desired MFn. Defaulting to MFnDependencyNode." )
else:
raise
return mfn
def __repr__(self):
"""
:rtype: `unicode`
"""
| |
= {
"pools": [
{
"cbm": 0xf0,
"cores": [1],
"id": 1,
"name": "valid"
},
{
"cbm": 0xf,
"cores": [3],
"id": 8,
"name": "invalid"
}
]
}
with mock.patch('common.PQOS_API.check_core', new=check_core):
with pytest.raises(ValueError, match="Invalid core 3"):
ConfigStore.validate(data)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("caps.cat_l3_supported", mock.MagicMock(return_value=True))
def test_pool_duplicate_core(self):
data = {
"pools": [
{
"cbm": 0xf0,
"cores": [1, 3],
"id": 1,
"name": "pool 1"
},
{
"cbm": 0xf,
"id": 10,
"cores": [3],
"name": "pool 1"
}
]
}
with pytest.raises(ValueError, match="already assigned to another pool"):
ConfigStore.validate(data)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("caps.cat_l3_supported", mock.MagicMock(return_value=True))
def test_pool_same_ids(self):
data = {
"pools": [
{
"cbm": 0xf0,
"cores": [1, 3],
"id": 1,
"name": "pool 1"
},
{
"cbm": 0xf,
"id": 1,
"cores": [3],
"name": "pool 2"
}
]
}
with pytest.raises(ValueError, match="Pool 1, multiple pools with same id"):
ConfigStore.validate(data)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("caps.cat_l3_supported", mock.MagicMock(return_value=True))
def test_pool_invalid_app(self):
data = {
"pools": [
{
"apps": [1, 3],
"cbm": 0xf0,
"cores": [1, 3],
"id": 1,
"name": "pool 1"
}
],
"apps": [
{
"cores": [3],
"id": 1,
"name": "app 1",
"pids": [1]
}
]
}
with pytest.raises(KeyError, match="does not exist"):
ConfigStore.validate(data)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("caps.cat_l3_supported", mock.MagicMock(return_value=True))
def test_pool_invalid_cbm(self):
data = {
"pools": [
{
"apps": [],
"cbm": 0x5,
"cores": [1, 3],
"id": 1,
"name": "pool 1"
}
]
}
with pytest.raises(ValueError, match="not contiguous"):
ConfigStore.validate(data)
data['pools'][0]['cbm'] = 0
with pytest.raises(ValueError, match="not contiguous"):
ConfigStore.validate(data)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("caps.cat_l3_supported", mock.MagicMock(return_value=False))
def test_pool_cat_not_supported(self):
data = {
"pools": [
{
"apps": [],
"cbm": 0x4,
"cores": [1, 3],
"id": 1,
"name": "pool 1"
}
]
}
with pytest.raises(ValueError, match="CAT is not supported"):
ConfigStore.validate(data)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("caps.cat_l3_supported", mock.MagicMock(return_value=False))
@mock.patch("caps.mba_supported", mock.MagicMock(return_value=True))
def test_pool_cat_not_supported_mba(self):
data = {
"pools": [
{
"apps": [],
"cbm": 0x4,
"mba": 100,
"cores": [1, 3],
"id": 1,
"name": "pool 1"
}
]
}
with pytest.raises(ValueError, match="CAT is not supported"):
ConfigStore.validate(data)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("caps.mba_supported", mock.MagicMock(return_value=True))
def test_pool_invalid_mba(self):
data = {
"pools": [
{
"mba": 101,
"cores": [1, 3],
"id": 1,
"name": "pool 1"
}
]
}
with pytest.raises(jsonschema.exceptions.ValidationError, match="Failed validating 'maximum' in schema"):
ConfigStore.validate(data)
data['pools'][0]['mba'] = 0
with pytest.raises(jsonschema.exceptions.ValidationError, match="Failed validating 'minimum' in schema"):
ConfigStore.validate(data)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("caps.mba_supported", mock.MagicMock(return_value=False))
def test_pool_mba_not_supported(self):
data = {
"pools": [
{
"mba": 50,
"cores": [1, 3],
"id": 1,
"name": "pool 1"
}
]
}
with pytest.raises(ValueError, match="MBA is not supported"):
ConfigStore.validate(data)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("caps.cat_l3_supported", mock.MagicMock(return_value=True))
@mock.patch("caps.mba_supported", mock.MagicMock(return_value=True))
@mock.patch("caps.mba_bw_supported", mock.MagicMock(return_value=False))
def test_pool_mba_bw_not_supported(self):
data = {
"pools": [
{
"mba_bw": 5000,
"cores": [1, 3],
"id": 1,
"name": "pool 1"
}
]
}
with pytest.raises(ValueError, match="MBA BW is not enabled/supported"):
ConfigStore.validate(data)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("caps.cat_l3_supported", mock.MagicMock(return_value=True))
@mock.patch("caps.mba_supported", mock.MagicMock(return_value=False))
def test_pool_mba_not_supported_cat(self):
data = {
"pools": [
{
"cbm": 0xf,
"mba": 50,
"cores": [1, 3],
"id": 1,
"name": "pool 1"
}
]
}
with pytest.raises(ValueError, match="MBA is not supported"):
ConfigStore.validate(data)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("caps.cat_l3_supported", mock.MagicMock(return_value=True))
@mock.patch("caps.mba_supported", mock.MagicMock(return_value=True))
@mock.patch("caps.mba_bw_supported", mock.MagicMock(return_value=False))
def test_pool_mba_bw_not_supported_cat(self):
data = {
"pools": [
{
"cbm": 0xf,
"mba_bw": 5000,
"cores": [1, 3],
"id": 1,
"name": "pool 1"
}
]
}
with pytest.raises(ValueError, match="MBA BW is not enabled/supported"):
ConfigStore.validate(data)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("caps.cat_l3_supported", mock.MagicMock(return_value=True))
@mock.patch("caps.mba_supported", mock.MagicMock(return_value=True))
@mock.patch("caps.mba_bw_supported", mock.MagicMock(return_value=True))
def test_pool_mba_mba_bw_enabled(self):
data = {
"rdt_iface": {"interface": "os"},
"mba_ctrl": {"enabled": True},
"pools": [
{
"cbm": 0xf,
"mba": 50,
"cores": [1, 3],
"id": 1,
"name": "pool 1"
},
{
"cbm": 0xf,
"mba": 70,
"cores": [2],
"id": 2,
"name": "pool 2"
}
]
}
with pytest.raises(ValueError, match="MBA % is not enabled. Disable MBA BW and try again"):
ConfigStore.validate(data)
@mock.patch("caps.cat_l3_supported", mock.MagicMock(return_value=True))
def test_app_invalid_core(self):
def check_core(core):
return core != 3
data = {
"pools": [
{
"apps": [1],
"cbm": 0xf0,
"cores": [1],
"id": 1,
"name": "pool 1"
}
],
"apps": [
{
"cores": [3],
"id": 1,
"name": "app 1",
"pids": [1]
}
]
}
with mock.patch('common.PQOS_API.check_core', new=check_core):
with pytest.raises(ValueError, match="Invalid core 3"):
ConfigStore.validate(data)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("caps.cat_l3_supported", mock.MagicMock(return_value=True))
def test_app_core_does_not_match_pool(self):
data = {
"pools": [
{
"apps": [1],
"cbm": 0xf0,
"cores": [1],
"id": 1,
"name": "pool 1"
}
],
"apps": [
{
"cores": [3,4,5],
"id": 1,
"name": "app 1",
"pids": [1]
}
]
}
with pytest.raises(ValueError, match="App 1, cores {3, 4, 5} does not match Pool 1"):
ConfigStore.validate(data)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("caps.cat_l3_supported", mock.MagicMock(return_value=True))
def test_app_without_pool(self):
data = {
"pools": [
{
"apps": [1],
"cbm": 0xf0,
"cores": [1],
"id": 1,
"name": "pool 1"
},
{
"cbm": 0xf0,
"cores": [2],
"id": 2,
"name": "pool 2"
}
],
"apps": [
{
"cores": [1],
"id": 1,
"name": "app 1",
"pids": [1]
},
{
"cores": [1],
"id": 2,
"name": "app 2",
"pids": [2]
}
]
}
with pytest.raises(ValueError, match="not assigned to any pool"):
ConfigStore.validate(data)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("caps.cat_l3_supported", mock.MagicMock(return_value=True))
def test_app_without_pool(self):
data = {
"pools": [
{
"apps": [1],
"cbm": 0xf0,
"cores": [1],
"id": 1,
"name": "pool 1"
},
{
"apps": [1, 2],
"cbm": 0xf0,
"cores": [2],
"id": 2,
"name": "pool 2"
}
],
"apps": [
{
"cores": [1],
"id": 1,
"name": "app 1",
"pids": [1]
},
{
"cores": [2],
"id": 2,
"name": "app 2",
"pids": [2]
}
]
}
with pytest.raises(ValueError, match="App 1, Assigned to more than one pool"):
ConfigStore.validate(data)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("caps.cat_l3_supported", mock.MagicMock(return_value=True))
def test_app_same_ids(self):
data = {
"pools": [
{
"apps": [1],
"cbm": 0xf0,
"cores": [1],
"id": 1,
"name": "pool 1"
},
],
"apps": [
{
"cores": [1],
"id": 1,
"name": "app 1",
"pids": [1]
},
{
"cores": [1],
"id": 1,
"name": "app 2",
"pids": [1]
}
]
}
with pytest.raises(ValueError, match="App 1, multiple apps with same id"):
ConfigStore.validate(data)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("caps.cat_l3_supported", mock.MagicMock(return_value=True))
def test_app_same_pid(self):
data = {
"pools": [
{
"apps": [1, 2],
"cbm": 0xf0,
"cores": [1],
"id": 1,
"name": "pool 1"
},
],
"apps": [
{
"cores": [1],
"id": 1,
"name": "app 1",
"pids": [1]
},
{
"cores": [1],
"id": 2,
"name": "app 2",
"pids": [1]
}
]
}
with pytest.raises(ValueError, match=r"App 2, PIDs \{1} already assigned to another App."):
ConfigStore.validate(data)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("caps.cat_l3_supported", mock.MagicMock(return_value=True))
def test_app_invalid_pid(self):
data = {
"pools": [
{
"apps": [1, 2],
"cbm": 0xf0,
"cores": [1],
"id": 1,
"name": "pool 1"
},
],
"apps": [
{
"cores": [1],
"id": 1,
"name": "app 1",
"pids": [1]
},
{
"cores": [1],
"id": 2,
"name": "app 2",
"pids": [99999]
}
]
}
with pytest.raises(ValueError, match="App 2, PID 99999 is not valid"):
ConfigStore.validate(data)
def test_power_profile_expert_mode_invalid(self):
data = {
"pools": [],
"apps": [],
"power_profiles_expert_mode": None
}
with pytest.raises(jsonschema.exceptions.ValidationError, match="None is not of type 'boolean'"):
ConfigStore.validate(data)
data['power_profiles_expert_mode'] = 1
with pytest.raises(jsonschema.exceptions.ValidationError, match="1 is not of type 'boolean'"):
ConfigStore.validate(data)
data['power_profiles_expert_mode'] = []
with pytest.raises(jsonschema.exceptions.ValidationError, match="\\[\\] is not of type 'boolean'"):
ConfigStore.validate(data)
def test_power_profile_verify_invalid(self):
data = {
"pools": [],
"apps": [],
"power_profiles_verify": None
}
with pytest.raises(jsonschema.exceptions.ValidationError, match="None is not of type 'boolean'"):
ConfigStore.validate(data)
data['power_profiles_verify'] = 1
with pytest.raises(jsonschema.exceptions.ValidationError, match="1 is not of type 'boolean'"):
ConfigStore.validate(data)
data['power_profiles_verify'] = []
with pytest.raises(jsonschema.exceptions.ValidationError, match="\\[\\] is not of type 'boolean'"):
ConfigStore.validate(data)
def test_rdt_iface_invalid(self):
data = {
"pools": [],
"apps": [],
"rdt_iface": "os"
}
with pytest.raises(jsonschema.exceptions.ValidationError, match="'os' is not of type 'object'"):
ConfigStore.validate(data)
data['rdt_iface'] = {}
with pytest.raises(jsonschema.exceptions.ValidationError, match="'interface' is a required property"):
ConfigStore.validate(data)
data['rdt_iface']['interface'] = None
with pytest.raises(jsonschema.exceptions.ValidationError, match="None is not of type 'string'"):
ConfigStore.validate(data)
data['rdt_iface']['interface'] = 2
with pytest.raises(jsonschema.exceptions.ValidationError, match="2 is not of type 'string'"):
ConfigStore.validate(data)
data['rdt_iface']['interface'] = "test_string"
with pytest.raises(jsonschema.exceptions.ValidationError, match="'test_string' is not one of \\['msr', 'os'\\]"):
ConfigStore.validate(data)
def test_mba_ctrl_invalid(self):
data = {
"pools": [],
"apps": [],
"mba_ctrl": True
}
with pytest.raises(jsonschema.exceptions.ValidationError, match="True is not of type 'object'"):
ConfigStore.validate(data)
data['mba_ctrl'] = {}
with pytest.raises(jsonschema.exceptions.ValidationError, match="'enabled' is a required property"):
ConfigStore.validate(data)
data['mba_ctrl']['enabled'] = None
with pytest.raises(jsonschema.exceptions.ValidationError, match="None is not of type 'boolean'"):
ConfigStore.validate(data)
data['mba_ctrl']['enabled'] = 2
with pytest.raises(jsonschema.exceptions.ValidationError, match="2 is not of type 'boolean'"):
ConfigStore.validate(data)
data['mba_ctrl']['enabled'] = "test_string"
with pytest.raises(jsonschema.exceptions.ValidationError, match="'test_string' is not of type 'boolean'"):
ConfigStore.validate(data)
data['mba_ctrl']['enabled'] = True
with pytest.raises(ValueError, match="MBA CTRL requires RDT OS interface"):
ConfigStore.validate(data)
data['rdt_iface'] = {"interface": "msr"}
with pytest.raises(ValueError, match="MBA CTRL requires RDT OS interface"):
ConfigStore.validate(data)
@mock.patch('config.ConfigStore.get_config')
@pytest.mark.parametrize("cfg, result", [
({}, "msr"),
({"rdt_iface": {"interface": "msr"}}, "msr"),
({"rdt_iface": {"interface": "msr_test"}}, "msr_test"),
({"rdt_iface": {"interface": "os_test"}}, "os_test"),
({"rdt_iface": {"interface": "os"}}, "os")
])
def test_get_rdt_iface(mock_get_config, cfg, result):
mock_get_config.return_value = cfg
config_store = ConfigStore()
assert config_store.get_rdt_iface() == result
@mock.patch('config.ConfigStore.get_config')
@pytest.mark.parametrize("cfg, result", [
({}, False),
({"mba_ctrl": {"enabled": True}}, True),
({"mba_ctrl": {"enabled": False}}, False)
])
def test_get_mba_ctrl_enabled(mock_get_config, cfg, result):
mock_get_config.return_value = cfg
| |
<gh_stars>10-100
from ...base import *
class HistoryMixin:
""" GeomDataObject class mix-in """
def __init__(self):
self._subobj_change = {"vert": {}, "edge": {}, "poly": {}}
def get_data_to_store(self, event_type="", prop_id="", info="", unique_id=False):
data = {}
unique_prop_ids = self._unique_prop_ids
obj_id = self.toplevel_obj.id
cur_time_id = Mgr.do("get_history_time")
extra_normal_data = None
extra_normal_lock_data = None
if event_type == "creation":
data["geom_data"] = {"main": self}
for prop_id in self._prop_ids:
data.update(self.get_property_to_store(prop_id, event_type))
self._normal_change = set()
prev_time_ids = (cur_time_id,)
for subobj_type in ("vert", "edge", "poly"):
subobjs = self._subobjs[subobj_type]
for subobj in subobjs.values():
subobj.creation_time = cur_time_id
pickled_objs = {s_id: pickle.dumps(s, -1) for s_id, s in subobjs.items()}
unique_prop_id = unique_prop_ids[f"{subobj_type}__extra__"]
extra_data = {unique_prop_id: {"created": pickled_objs}}
unique_prop_id = unique_prop_ids[f"{subobj_type}s"]
data[unique_prop_id] = {"main": prev_time_ids, "extra": extra_data}
elif event_type == "deletion":
for prop_id in ("subobj_transform", "poly_tris", "uvs", "normals", "normal_lock"):
data.update(self.get_property_to_store(prop_id, event_type))
for subobj_type in ("vert", "edge", "poly"):
unique_prop_id = unique_prop_ids[f"{subobj_type}s"]
prev_time_ids = Mgr.do("load_last_from_history", obj_id, unique_prop_id)
prev_time_ids += (cur_time_id,)
subobjs = self._subobjs[subobj_type].items()
creation_times = {s_id: s.creation_time for s_id, s in subobjs}
unique_prop_id = unique_prop_ids[f"{subobj_type}__extra__"]
extra_data = {unique_prop_id: {"deleted": creation_times}}
unique_prop_id = unique_prop_ids[f"{subobj_type}s"]
data[unique_prop_id] = {"main": prev_time_ids, "extra": extra_data}
toplvl_obj = self.toplevel_obj
data["tangent space"] = {"main": toplvl_obj.get_property("tangent space")}
elif event_type == "subobj_change":
for prop_id in ("subobj_merge", "subobj_transform", "subobj_selection", "poly_tris", "uvs"):
data.update(self.get_property_to_store(prop_id, event_type))
subobj_change = self._subobj_change
deleted_verts = subobj_change["vert"].get("deleted")
if deleted_verts:
normal_data = self.get_property_to_store("normals", event_type)
data.update(normal_data)
unique_prop_id = unique_prop_ids["normal__extra__"]
extra_normal_data = list(normal_data.values())[0]["extra"][unique_prop_id]
normal_lock_data = self.get_property_to_store("normal_lock", event_type)
data.update(normal_lock_data)
unique_prop_id = unique_prop_ids["normal_lock__extra__"]
extra_normal_lock_data = list(normal_lock_data.values())[0]["extra"][unique_prop_id]
for subobj_type in ("vert", "edge", "poly"):
unique_prop_id = unique_prop_ids[f"{subobj_type}s"]
prev_time_ids = Mgr.do("load_last_from_history", obj_id, unique_prop_id)
prev_time_ids += (cur_time_id,)
data_to_store = {}
if "deleted" in subobj_change[subobj_type]:
deleted_subobjs = subobj_change[subobj_type]["deleted"]
# creation times of the deleted subobjects
creation_times = {s.id: s.creation_time for s in deleted_subobjs}
data_to_store["deleted"] = creation_times
if "created" in subobj_change[subobj_type]:
created_subobjs = subobj_change[subobj_type]["created"]
pickled_objs = {}
for subobj in created_subobjs:
subobj.creation_time = cur_time_id
pickled_objs[subobj.id] = pickle.dumps(subobj, -1)
data_to_store["created"] = pickled_objs
extra_data = {unique_prop_ids[f"{subobj_type}__extra__"]: data_to_store}
data[unique_prop_ids[f"{subobj_type}s"]] = {"main": prev_time_ids, "extra": extra_data}
self._subobj_change = {"vert": {}, "edge": {}, "poly": {}}
elif event_type == "prop_change":
unique_prop_id = prop_id if unique_id else (unique_prop_ids[prop_id]
if prop_id in unique_prop_ids else None)
if unique_prop_id in self.get_property_ids(unique=True):
data = self.get_property_to_store(unique_prop_id, event_type, info, unique_id=True)
if self._normal_change:
normal_data = self.get_property_to_store("normals", "prop_change")
if extra_normal_data:
unique_prop_id = unique_prop_ids["normal__extra__"]
extra_data = list(normal_data.values())[0]["extra"][unique_prop_id]
extra_normal_data["prev"].update(extra_data["prev"])
extra_normal_data["normals"].update(extra_data["normals"])
else:
data.update(normal_data)
if self._normal_lock_change:
normal_lock_data = self.get_property_to_store("normal_lock", "prop_change")
if extra_normal_lock_data:
unique_prop_id = unique_prop_ids["normal_lock__extra__"]
extra_data = list(normal_lock_data.values())[0]["extra"][unique_prop_id]
extra_normal_lock_data["prev"].update(extra_data["prev"])
extra_normal_lock_data["normals"].update(extra_data["normal_lock"])
else:
data.update(normal_lock_data)
if self._normal_sharing_change:
data.update(self.get_property_to_store("normal_sharing"))
if self._poly_smoothing_change:
data.update(self.get_property_to_store("smoothing"))
return data
def get_property_to_store(self, prop_id, event_type="", info="", unique_id=False):
data = {}
unique_prop_ids = self._unique_prop_ids
unique_prop_id = prop_id if unique_id else (unique_prop_ids[prop_id]
if prop_id in unique_prop_ids else None)
if unique_prop_id == unique_prop_ids["subobj_merge"]:
data[unique_prop_id] = {"main": (self.merged_verts, self.merged_edges)}
elif unique_prop_id == unique_prop_ids["uv_set_names"]:
data[unique_prop_id] = {"main": self._uv_set_names}
elif unique_prop_id == unique_prop_ids["normal_length"]:
data[unique_prop_id] = {"main": self._normal_length}
elif unique_prop_id == unique_prop_ids["normal_sharing"]:
data[unique_prop_id] = {"main": self.shared_normals}
self._normal_sharing_change = False
elif unique_prop_id == unique_prop_ids["smoothing"]:
data[unique_prop_id] = {"main": self._poly_smoothing}
self._poly_smoothing_change = False
elif unique_prop_id == unique_prop_ids["subobj_selection"]:
sel_subobj_ids = copy.deepcopy(self._selected_subobj_ids)
sel_subobj_ids.update(self._sel_subobj_ids_backup)
data[unique_prop_id] = {"main": sel_subobj_ids}
elif unique_prop_id == unique_prop_ids["subobj_transform"]:
obj_id = self.toplevel_obj.id
subobj_lvl = GD["active_obj_level"]
pos_data = {"prev": {}, "pos": {}}
extra_data = {unique_prop_ids["vert_pos__extra__"]: pos_data}
cur_time_id = Mgr.do("get_history_time")
prev_time_ids = Mgr.do("load_last_from_history", obj_id, unique_prop_id)
verts = self._subobjs["vert"]
if prev_time_ids:
prev_time_ids += (cur_time_id,)
else:
prev_time_ids = (cur_time_id,)
if event_type == "creation":
for vert_id, vert in verts.items():
pos = vert.get_pos()
pos_data["pos"][vert_id] = pos
vert.set_previous_property_time("transform", cur_time_id)
elif event_type == "deletion":
for vert_id, vert in verts.items():
time_id = vert.get_previous_property_time("transform")
pos_data["prev"][vert_id] = time_id
elif event_type == "subobj_change":
deleted_verts = self._subobj_change["vert"].get("deleted", [])
for vert in deleted_verts:
time_id = vert.get_previous_property_time("transform")
pos_data["prev"][vert.id] = time_id
created_verts = self._subobj_change["vert"].get("created", [])
for vert in created_verts:
pos = vert.get_pos()
pos_data["pos"][vert.id] = pos
vert.set_previous_property_time("transform", cur_time_id)
elif event_type == "prop_change":
if info == "all":
xformed_verts = set(self.merged_verts.values())
elif info == "check":
xformed_verts = set(self._transformed_verts)
self._transformed_verts = set()
else:
xformed_verts = self._verts_to_transf[subobj_lvl]
for merged_vert in xformed_verts:
pos = merged_vert.get_pos()
for vert_id in merged_vert:
# since it can happen that a MergedVertex references both previously
# transformed Vertex objects and newly created ones, the MergedVertex
# cannot be relied upon to get a single previous transform time for
# all the Vertex objects it references, so this has to be retrieved
# per Vertex object
time_id = verts[vert_id].get_previous_property_time("transform")
pos_data["pos"][vert_id] = pos
pos_data["prev"][vert_id] = time_id
merged_vert.set_previous_property_time("transform", cur_time_id)
data[unique_prop_id] = {"main": prev_time_ids, "extra": extra_data}
elif unique_prop_id == unique_prop_ids["poly_tris"]:
obj_id = self.toplevel_obj.id
subobj_lvl = GD["active_obj_level"]
tri_data = {"prev": {}, "tri_data": {}}
extra_data = {unique_prop_ids["tri__extra__"]: tri_data}
cur_time_id = Mgr.do("get_history_time")
prev_time_ids = Mgr.do("load_last_from_history", obj_id, unique_prop_id)
polys = self._subobjs["poly"]
if prev_time_ids:
prev_time_ids += (cur_time_id,)
else:
prev_time_ids = (cur_time_id,)
if event_type == "creation":
for poly_id, poly in polys.items():
tris = poly[:]
tri_data["tri_data"][poly_id] = tris
poly.set_previous_property_time("tri_data", cur_time_id)
elif event_type == "deletion":
for poly_id, poly in polys.items():
time_id = poly.get_previous_property_time("tri_data")
tri_data["prev"][poly_id] = time_id
elif event_type == "subobj_change":
deleted_polys = self._subobj_change["poly"].get("deleted", [])
for poly in deleted_polys:
time_id = poly.get_previous_property_time("tri_data")
tri_data["prev"][poly.id] = time_id
created_polys = self._subobj_change["poly"].get("created", [])
for poly in created_polys:
tris = poly[:]
tri_data["tri_data"][poly.id] = tris
poly.set_previous_property_time("tri_data", cur_time_id)
elif event_type == "prop_change":
for poly_id in self._tri_change:
poly = polys[poly_id]
tris = poly[:]
time_id = poly.get_previous_property_time("tri_data")
tri_data["tri_data"][poly_id] = tris
tri_data["prev"][poly_id] = time_id
poly.set_previous_property_time("tri_data", cur_time_id)
self._tri_change = set()
data[unique_prop_id] = {"main": prev_time_ids, "extra": extra_data}
elif unique_prop_id == unique_prop_ids["uvs"]:
obj_id = self.toplevel_obj.id
uv_data = {"prev": {}, "uvs": {}}
extra_data = {unique_prop_ids["uv__extra__"]: uv_data}
cur_time_id = Mgr.do("get_history_time")
prev_time_ids = Mgr.do("load_last_from_history", obj_id, unique_prop_id)
verts = self._subobjs["vert"]
if prev_time_ids:
prev_time_ids += (cur_time_id,)
else:
prev_time_ids = (cur_time_id,)
if event_type == "creation":
for vert_id, vert in verts.items():
uvs = vert.get_uvs()
uv_data["uvs"][vert_id] = uvs
vert.set_previous_property_time("uvs", cur_time_id)
elif event_type == "deletion":
for vert_id, vert in verts.items():
time_id = vert.get_previous_property_time("uvs")
uv_data["prev"][vert_id] = time_id
elif event_type == "subobj_change":
created_verts = self._subobj_change["vert"].get("created", [])
for vert in created_verts:
uvs = vert.get_uvs()
uv_data["uvs"][vert.id] = uvs
vert.set_previous_property_time("uvs", cur_time_id)
deleted_verts = self._subobj_change["vert"].get("deleted", [])
for vert in deleted_verts:
time_id = vert.get_previous_property_time("uvs")
uv_data["prev"][vert.id] = time_id
elif event_type == "prop_change":
for vert_id in self._uv_change:
vert = verts[vert_id]
uvs = vert.get_uvs()
time_id = vert.get_previous_property_time("uvs")
uv_data["uvs"][vert_id] = uvs
uv_data["prev"][vert_id] = time_id
vert.set_previous_property_time("uvs", cur_time_id)
self._uv_change = set()
data[unique_prop_id] = {"main": prev_time_ids, "extra": extra_data}
elif unique_prop_id == unique_prop_ids["normals"]:
obj_id = self.toplevel_obj.id
normal_data = {"prev": {}, "normals": {}}
extra_data = {unique_prop_ids["normal__extra__"]: normal_data}
cur_time_id = Mgr.do("get_history_time")
prev_time_ids = Mgr.do("load_last_from_history", obj_id, unique_prop_id)
verts = self._subobjs["vert"]
if prev_time_ids:
prev_time_ids += (cur_time_id,)
else:
prev_time_ids = (cur_time_id,)
if event_type == "creation":
for vert_id, vert in verts.items():
normal = vert.normal
normal_data["normals"][vert_id] = normal
vert.set_previous_property_time("normal", cur_time_id)
elif event_type == "deletion":
for vert_id, vert in verts.items():
time_id = vert.get_previous_property_time("normal")
normal_data["prev"][vert_id] = time_id
elif event_type == "subobj_change":
created_verts = self._subobj_change["vert"].get("created", [])
for vert in created_verts:
normal = vert.normal
normal_data["normals"][vert.id] = normal
vert.set_previous_property_time("normal", cur_time_id)
deleted_verts = self._subobj_change["vert"].get("deleted", [])
for vert in deleted_verts:
time_id = vert.get_previous_property_time("normal")
normal_data["prev"][vert.id] = time_id
elif event_type == "prop_change":
if info == "all":
normal_change = verts
else:
normal_change = self._normal_change
for vert_id in normal_change:
vert = verts[vert_id]
normal = vert.normal
time_id = vert.get_previous_property_time("normal")
normal_data["normals"][vert_id] = normal
normal_data["prev"][vert_id] = time_id
vert.set_previous_property_time("normal", cur_time_id)
self._normal_change = set()
data[unique_prop_id] = {"main": prev_time_ids, "extra": extra_data}
elif unique_prop_id == unique_prop_ids["normal_lock"]:
obj_id = self.toplevel_obj.id
lock_data = {"prev": {}, "normal_lock": {}}
extra_data = {unique_prop_ids["normal_lock__extra__"]: lock_data}
cur_time_id = Mgr.do("get_history_time")
prev_time_ids = Mgr.do("load_last_from_history", obj_id, unique_prop_id)
verts = self._subobjs["vert"]
if prev_time_ids:
prev_time_ids += (cur_time_id,)
else:
prev_time_ids = (cur_time_id,)
if event_type == "creation":
for vert_id, vert in verts.items():
locked = vert.has_locked_normal()
lock_data["normal_lock"][vert_id] = locked
vert.set_previous_property_time("normal_lock", cur_time_id)
elif event_type == "deletion":
for vert_id, vert in verts.items():
time_id = vert.get_previous_property_time("normal_lock")
lock_data["prev"][vert_id] = time_id
elif event_type == "subobj_change":
created_verts = self._subobj_change["vert"].get("created", [])
for vert in created_verts:
locked = vert.has_locked_normal()
lock_data["normal_lock"][vert.id] = locked
vert.set_previous_property_time("normal_lock", cur_time_id)
deleted_verts = self._subobj_change["vert"].get("deleted", [])
for vert in deleted_verts:
time_id = vert.get_previous_property_time("normal_lock")
lock_data["prev"][vert.id] = time_id
elif event_type == "prop_change":
if info == "all":
lock_change = verts
else:
lock_change = self._normal_lock_change
for vert_id in lock_change:
vert = verts[vert_id]
locked = vert.has_locked_normal()
time_id = vert.get_previous_property_time("normal_lock")
lock_data["normal_lock"][vert_id] = locked
lock_data["prev"][vert_id] = time_id
vert.set_previous_property_time("normal_lock", cur_time_id)
self._normal_lock_change = set()
data[unique_prop_id] = {"main": prev_time_ids, "extra": extra_data}
return data
def restore_data(self, data_ids, restore_type, old_time_id, new_time_id):
obj_id = self.toplevel_obj.id
if "self" in data_ids:
cancellable = True if GD["loading_scene"] else False
for prop_id in self.get_property_ids(unique=True):
self.__restore_property(prop_id, restore_type, old_time_id, new_time_id,
cancellable)
task = lambda: self.__recreate_geometry(old_time_id, | |
= M(content)
expected1 = Project('testproject_v1', 'https://url1.com/testproject',
revision='v1.0')
expected2 = Project('testproject_v2', 'https://url1.com/testproject',
revision='v2.0')
check_proj_consistency(m.projects[1], expected1)
check_proj_consistency(m.projects[2], expected2)
def test_project_clone_depth():
ps = M('''\
projects:
- name: foo
url: u1
- name: bar
url: u2
clone-depth: 4
''').projects
assert ps[1].clone_depth is None
assert ps[2].clone_depth == 4
def test_project_west_commands():
# Projects may also specify subdirectories with west commands.
m = M('''\
projects:
- name: zephyr
url: https://foo.com
west-commands: some-path/west-commands.yml
''')
assert m.projects[1].west_commands == ['some-path/west-commands.yml']
def test_project_git_methods(tmpdir):
# Test the internal consistency of the various methods that call
# out to git.
# Just manually create a Project instance. We don't need a full
# Manifest.
path = tmpdir / 'project'
p = Project('project', 'ignore-this-url', topdir=tmpdir)
# Helper for getting the contents of a.txt at a revision.
def a_content_at(rev):
return p.git(f'show {rev}:a.txt', capture_stderr=True,
capture_stdout=True).stdout.decode('ascii')
# The project isn't cloned yet.
assert not p.is_cloned()
# Create it, then verify the API knows it's cloned.
# Cache the current SHA.
create_repo(path)
assert p.is_cloned()
start_sha = p.sha('HEAD')
# If a.txt doesn't exist at a revision, we can't read it. If it
# does, we can.
with pytest.raises(subprocess.CalledProcessError):
a_content_at('HEAD')
add_commit(path, 'add a.txt', files={'a.txt': 'a'})
a_sha = p.sha('HEAD')
with pytest.raises(subprocess.CalledProcessError):
a_content_at(start_sha)
assert a_content_at(a_sha) == 'a'
# Checks for read_at() and listdir_at().
add_commit(path, 'add b.txt', files={'b.txt': 'b'})
b_sha = p.sha('HEAD')
assert p.read_at('a.txt', rev=a_sha) == b'a'
with pytest.raises(subprocess.CalledProcessError):
p.read_at('a.txt', rev=start_sha)
assert p.listdir_at('', rev=start_sha) == []
assert p.listdir_at('', rev=a_sha) == ['a.txt']
assert sorted(p.listdir_at('', rev=b_sha)) == ['a.txt', 'b.txt']
# p.git() should be able to take a cwd kwarg which is a PathLike
# or a str.
p.git('log -1', cwd=path)
p.git('log -1', cwd=str(path))
# Basic checks for functions which operate on commits.
assert a_content_at(a_sha) == 'a'
assert p.is_ancestor_of(start_sha, a_sha)
assert not p.is_ancestor_of(a_sha, start_sha)
assert p.is_up_to_date_with(start_sha)
assert p.is_up_to_date_with(a_sha)
assert p.is_up_to_date_with(b_sha)
p.revision = b_sha
assert p.is_up_to_date()
p.git(f'reset --hard {a_sha}')
assert not p.is_up_to_date()
def test_project_repr():
m = M('''\
projects:
- name: zephyr
url: https://foo.com
revision: r
west-commands: some-path/west-commands.yml
''')
assert repr(m.projects[1]) == \
'Project("zephyr", "https://foo.com", revision="r", path=\'zephyr\', clone_depth=None, west_commands=[\'some-path/west-commands.yml\'], topdir=None, groups=[], userdata=None)' # noqa: E501
def test_project_sha(tmpdir):
tmpdir = Path(os.fspath(tmpdir))
create_repo(tmpdir)
add_tag(tmpdir, 'test-tag')
expected_sha = rev_parse(tmpdir, 'HEAD^{commit}')
project = Project('name',
'url-do-not-fetch',
revision='test-tag',
path=tmpdir.name,
topdir=tmpdir.parent)
assert project.sha(project.revision) == expected_sha
def test_project_userdata(tmpdir):
m = M('''\
defaults:
remote: r
remotes:
- name: r
url-base: base
projects:
- name: foo
- name: bar
userdata: a-string
- name: baz
userdata:
key: value
''')
foo, bar, baz = m.get_projects(['foo', 'bar', 'baz'])
assert foo.userdata is None
assert bar.userdata == 'a-string'
assert baz.userdata == {'key': 'value'}
assert 'userdata' not in foo.as_dict()
assert 'a-string' == bar.as_dict()['userdata']
def test_self_userdata(tmpdir):
m = M('''
defaults:
remote: r
remotes:
- name: r
url-base: base
projects:
- name: bar
self:
path: foo
userdata:
key: value
''')
foo, bar = m.get_projects(['manifest', 'bar'])
assert m.userdata == {'key': 'value'}
assert foo.userdata == {'key': 'value'}
assert bar.userdata is None
assert 'userdata' in foo.as_dict()
assert 'userdata' not in bar.as_dict()
def test_self_missing_userdata(tmpdir):
m = M('''
defaults:
remote: r
remotes:
- name: r
url-base: base
projects:
- name: bar
self:
path: foo
''')
foo, bar = m.get_projects(['manifest', 'bar'])
assert m.userdata is None
assert foo.userdata is None
assert bar.userdata is None
assert 'userdata' not in foo.as_dict()
assert 'userdata' not in bar.as_dict()
def test_no_projects():
# An empty projects list is allowed.
m = Manifest.from_data('manifest: {}')
assert len(m.projects) == 1 # just ManifestProject
m = M('''
self:
path: foo
''')
assert len(m.projects) == 1 # just ManifestProject
#########################################
# Tests for the manifest repository
def test_manifest_project():
# Basic test that the manifest repository, when represented as a project,
# has attributes which make sense when loaded from data.
# Case 1: everything at defaults
m = M('''\
projects:
- name: name
url: url
''')
mp = m.projects[0]
assert mp.name == 'manifest'
assert mp.path is None
assert mp.topdir is None
assert mp.abspath is None
assert mp.posixpath is None
assert mp.url == ''
assert mp.revision == 'HEAD'
assert mp.clone_depth is None
# Case 2: path and west-commands are specified
m = M('''\
projects:
- name: name
url: url
self:
path: my-path
west-commands: cmds.yml
''')
mp = m.projects[0]
assert mp.name == 'manifest'
assert mp.path == 'my-path'
assert m.yaml_path == 'my-path'
assert mp.west_commands == ['cmds.yml']
assert mp.topdir is None
assert mp.abspath is None
assert mp.posixpath is None
assert mp.url == ''
assert mp.revision == 'HEAD'
assert mp.clone_depth is None
def test_self_tag():
# Manifests may contain a self section describing the manifest
# repository. It should work with multiple projects and remotes as
# expected.
m = M('''\
remotes:
- name: testremote1
url-base: https://example1.com
- name: testremote2
url-base: https://example2.com
projects:
- name: testproject1
remote: testremote1
revision: rev1
- name: testproject2
remote: testremote2
self:
path: the-manifest-path
west-commands: scripts/west_commands
''')
expected = [ManifestProject(path='the-manifest-path',
west_commands='scripts/west_commands'),
Project('testproject1', 'https://example1.com/testproject1',
revision='rev1'),
Project('testproject2', 'https://example2.com/testproject2')]
# Check the projects are as expected.
for p, e in zip(m.projects, expected):
check_proj_consistency(p, e)
# With a "self: path:" value, that will be available in the
# yaml_path attribute, but all other absolute and relative
# attributes are None since we aren't reading from a workspace.
assert m.abspath is None
assert m.relative_path is None
assert m.yaml_path == 'the-manifest-path'
assert m.repo_abspath is None
# If "self: path:" is missing, we won't have a yaml_path attribute.
m = M('''\
projects:
- name: p
url: u
''')
assert m.yaml_path is None
# Empty paths are an error.
with pytest.raises(MalformedManifest) as e:
M('''\
projects: []
self:
path:''')
assert 'must be nonempty if present' in str(e.value)
#########################################
# File system tests
#
# Parsing manifests from data is the base case that everything else
# reduces to, but parsing may also be done from files on the file
# system, or "as if" it were done from files on the file system.
def test_from_topdir(tmp_workspace):
# If you load from topdir along with some source data, you will
# get absolute paths.
#
# This is true of both projects and the manifest itself.
topdir = Path(str(tmp_workspace))
repo_abspath = topdir / 'mp'
relpath = Path('mp') / 'west.yml'
abspath = topdir / relpath
mf = topdir / relpath
# Case 1: manifest has no "self: path:".
with open(mf, 'w', encoding='utf-8') as f:
f.write('''
manifest:
projects:
- name: my-cool-project
url: from-manifest-dir
''')
m = Manifest.from_topdir(topdir=topdir)
# Path-related Manifest attribute tests.
assert Path(m.abspath) == mf
assert m.posixpath == mf.as_posix()
assert Path(m.relative_path) == relpath
assert m.yaml_path is None
assert Path(m.repo_abspath) == repo_abspath
assert m.repo_posixpath == repo_abspath.as_posix()
assert Path(m.topdir) == topdir
# Legacy ManifestProject tests.
mproj = m.projects[MANIFEST_PROJECT_INDEX]
assert Path(mproj.topdir) == topdir
assert Path(mproj.path) == Path('mp')
# Project tests.
p1 = m.projects[1]
assert Path(p1.topdir) == Path(topdir)
assert Path(p1.abspath) == Path(topdir / 'my-cool-project')
# Case 2: manifest has a "self: path:", which disagrees with the
# actual file system path.
with open(mf, 'w', encoding='utf-8') as f:
f.write('''
manifest:
projects:
- name: my-cool-project
url: from-manifest-dir
self:
path: something/else
''')
m = Manifest.from_topdir(topdir=topdir)
# Path-related Manifest attribute tests.
assert Path(m.abspath) == abspath
assert m.posixpath == abspath.as_posix()
assert Path(m.relative_path) == relpath
assert m.yaml_path == 'something/else'
assert Path(m.repo_abspath) == repo_abspath
assert m.repo_posixpath == repo_abspath.as_posix()
assert Path(m.topdir) == topdir
# Legacy ManifestProject tests.
mproj = m.projects[MANIFEST_PROJECT_INDEX]
assert Path(mproj.topdir).is_absolute()
assert Path(mproj.topdir) == topdir
assert Path(mproj.path) == Path('mp')
assert Path(mproj.abspath).is_absolute()
assert Path(mproj.abspath) == repo_abspath
# Project tests.
p1 = m.projects[1]
assert Path(p1.topdir) == Path(topdir)
assert Path(p1.abspath) == topdir / 'my-cool-project'
# Case 3: project has a path. This always takes effect.
with open(mf, 'w', encoding='utf-8') as f:
f.write('''
manifest:
projects:
- name: my-cool-project
url: from-manifest-dir
path: project-path
self:
path: something/else
''')
m = Manifest.from_topdir(topdir=topdir)
p1 = m.projects[1]
assert p1.path == 'project-path'
assert Path(p1.abspath) == topdir / 'project-path'
assert p1.posixpath == (topdir / 'project-path').as_posix()
def test_manifest_path_not_found(tmp_workspace):
# Make sure manifest_path() raises FileNotFoundError if the
# manifest file specified in .west/config doesn't exist.
# Here, we rely on tmp_workspace not actually creating the file.
with pytest.raises(FileNotFoundError) as e:
manifest_path()
assert e.value.filename == tmp_workspace / 'mp' / 'west.yml'
def test_manifest_path_conflicts(tmp_workspace):
# Project path conflicts with the manifest path are errors. This
# is true when we have an explicit file system path, but it is not
# true when loading from data, where | |
#!/usr/bin/env python3
from os import listdir
from os.path import join
from fnmatch import filter
from sys import exit
from datetime import datetime
from shared.random import Random
from shared.utils import *
from bot_utils import *
HELP_TEXT = """<b>Help:</b>
/help - This Message
/start - Start the Bot
/status - Bot Status
/uptime - Bot Uptime
/source - Nobotty's Source Code on Git
/speedsheets - Telegram SpeedSheets
/dev - Developer Sheets
/game - Game speedSheets
/8-ball - Ask 🎱
/doh - Doh!
/jibber - Jibber Jabber
/klaxon - Sound the Alarm!
/llama - Llama Llama Llama!
/norris - <NAME>
/tacos - Taco Tuesday
/who - Who knows?
"""
ALT_HELP_TEXT = """<b>Alt Help:</b>
/anvil - Anvil Rocks!
/banana - Bananas are good!
/banana-for-scale - Banana for scale.
/brick - Brick!
/cake - Is there cake?
/first - Who goes first?
/evil-bot
/glados - GLaDOS
/hal - HAL 9000
/insufficient-data - Insufficient Data!
/nobotty-knows - Who knows?
/sensible-chuckle
/unsupervised
/list-simpsons - List available Simpson quotes.
"""
SPEEDSHEETS_TEXT = """Telegram Speedsheets:
<a href="https://speedsheet.io/s/telegram">Telegram SpeedSheet</>
<a href="https://speedsheet.io/s/aiogram">aiogram SpeedSheet</>
"""
DEVELOPER_SPEEDSHEETS_TEXT = """<b>Developer Speedsheet Links:</b>
Dev Box (All Dev Sheets) <a href="https://speedsheet.io/s/dev_box">https://speedsheet.io/s/dev_box</>
Git <a href="https://speedsheet.io/s/git">https://speedsheet.io/s/git</>
Markdown <a href="https://speedsheet.io/s/markdown">https://speedsheet.io/s/markdown</>
Stash <a href="https://speedsheet.io/s/stash">https://speedsheet.io/s/stash</>
Bash Scripting <a href="https://speedsheet.io/s/bash">https://speedsheet.io/s/bash</>
Unix Commands <a href="https://speedsheet.io/s/unix">https://speedsheet.io/s/unix</>
Python <a href="https://speedsheet.io/s/python">https://speedsheet.io/s/python</>
Requests <a href="https://speedsheet.io/s/requests">https://speedsheet.io/s/requests</>
aiohttp <a href="https://speedsheet.io/s/aiohttp">https://speedsheet.io/s/aiohttp</>
aiogram <a href="https://speedsheet.io/s/aiogram">https://speedsheet.io/s/aiogram</>
MySql <a href="https://speedsheet.io/s/mysql">https://speedsheet.io/s/mysql</>
Redis <a href="https://speedsheet.io/s/redis">https://speedsheet.io/s/redis</>
Raspberry Pi <a href="https://speedsheet.io/s/raspberry_pi">https://speedsheet.io/s/raspberry_pi</>
"""
GAME_SPEEDSHEETS_TEXT = """<b>Game Speedsheet Links:</b>
All Game Sheets (Complete List) <a href="https://speedsheet.io/s/games/games">https://speedsheet.io/s/games/games</>
7 Wonders <a href="https://speedsheet.io/s/games/7_wonders">https://speedsheet.io/s/games/7_wonders</>
Age of Steam <a href="https://speedsheet.io/s/games/age_of_steam">https://speedsheet.io/s/games/age_of_steam</>
Castles of Bugundy <a href="https://speedsheet.io/s/games/castles_of_burgundy">https://speedsheet.io/s/games/castles_of_burgundy</>
Concordia <a href="https://speedsheet.io/s/games/concordia">https://speedsheet.io/s/games/concordia</>
Tabletop Simulator <a href="https://speedsheet.io/s/games/tabletop_simulator">https://speedsheet.io/s/games/tabletop_simulator</>
"""
BANANA_PHOTO_FILE = "banana.jpg"
BANANA_FOR_SCALE_PHOTO_FILE = "banana-for-scale.png"
DOH_PHOTO_FILE = "homer-doh.png"
CAKE_PHOTO_FILE = "cake.jpg"
DOH_PHOTO_FILE = "homer-doh.png"
GLADOS_PHOTO_FILE = "glados.png"
HAL_PHOTO_FILE = "hal-9000.png"
KLAXON_AUDIO_FILE = "klaxon.wav"
LLAMA_PHOTO_FILE = "llama.png"
NOBOTTY_KNOWS_PHOTO_FILE = 'nobotty-knows.jpg'
ANVIL_FILES = "anvil*.jpg"
BRICK_FILES = "brick*.jpg"
SIMPSON_FILES = "simpson*.wav"
TACO_FILES = "taco*.*"
CHUCK_NORRIS = [
'Chuck Norris died 30 years ago. Death just hasn\'t had the courage to tell him yet.',
'Chuck Norris has a grizzly bear carpet. The bear isn\'t dead. It is just too afraid to move.',
'Chuck Norris doesn\'t flush the toilet, he scares the shit out of it.',
'Death once had a near Chuck Norris experience.',
'When Chuck Norris crosses the road, cars look both ways.',
'Chuck Norris counted to infinity. Twice.',
'A cobra once bit Chuck Norris\'s leg. After 5 days of excruciating pain, the cobra died.',
'Chuck Norris can hear sign language.',
'Chuck Norris can kill two stones with one bird.',
'Chuck Norris doesn\'t dial wrong numbers, you pick up the wrong phone.',
'Chuck Norris can unscramble an egg.',
'Chuck Norris can hit so hard, your blood will bleed.',
'When the Boogeyman goes to sleep, he checks his closet for Chuck Norris.',
'Chuck Norris sleeps with a pillow beneath his gun.',
'Chuck Norris doesn\'t cheat death. He wins fair and square.',
'Chuck Norris\'s dog learned to clean up after itself. Chuck Norris doesn\'t take shit from anyone.',
'Chuck Norris can cut a knife with butter.',
'The only time Chuck Norris was wrong was when he though he made a mistake.',
'Jesus can walk on water. Chuck Norris can swim on land.',
'Chuck Norris doesn\'t use the force. The force uses Chuck Norris.',
'There used to be a street called Chuck Norris but they changed the name because nobody crosses Chuck Norris and lives.'
]
EIGHT_BALL = [
"It is Certain!",
"It is decidedly so!",
"Without a doubt!",
"Yes definitely!",
"You may rely on it.",
"As I see it, yes.",
"Most likely.",
"Outlook good.",
"Yes.",
"Signs point to yes.",
"Reply hazy, try again.",
"Ask again later.",
"Better not tell you now.",
"Cannot predict now.",
"Concentrate and ask again.",
"Don't count on it.",
"My reply is no.",
"My sources say no.",
"Outlook not so good.",
"Very doubtful.",
]
MR_T = [
'I believe in the Golden Rule - The Man with the Gold... Rules.',
'Somedays you eat the bear, somedays the bear eats you.',
'When I was growing up, my family was so poor we couldn\'t afford to pay attention.',
'When I was old enough to change my name, I changed it to Mr. T so that the first word out of someone\'s mouth was \'Mister,\' a sign of respect.',
'I don\'t like magic - but I have been known to make guys disappear.',
'I\'m a Christian - I really don\'t believe in UFOs.',
'It takes a smart guy to play dumb.',
'Anger - use it, but don\'t lose it!',
'I pitty the fool!',
'First name Mr, middle name \'period\', last name T!',
'<NAME> and <NAME> don\'t wear clothes with your name on it, so why should you wear their name?',
'People ask me what the "T" stands for in my name. If you\'re a man, the "T" stands for tough. If you\'re a woman or child, it stands for tender!'
]
PLAYERS = [
'Bill',
'Marco',
'Tim',
]
random = Random()
def read_files (data_dir, file_filter):
return filter (listdir (data_dir), file_filter)
## Bot Commands ###########################################
class BotCommands:
def __init__(self, bot, dispatcher, settings):
self.bot = bot
self.dispatcher = dispatcher
self.bot_name = settings.bot_name
self.data_dir = settings.data_dir
self.anvil = read_files(self.data_dir, ANVIL_FILES)
self.brick = read_files(self.data_dir, BRICK_FILES)
self.simpsons = read_files(self.data_dir, SIMPSON_FILES)
self.tacos = read_files(self.data_dir, TACO_FILES)
self._start_datetime = datetime.today()
self._configure_commands (dispatcher)
def _configure_commands(self, dispatcher):
dispatcher.register_message_handler (self.command_show_help, commands = {'help'})
dispatcher.register_message_handler (self.command_show_alt_help, commands = {'alt_help', 'alt-help', 'alt', 'alt.help'})
dispatcher.register_message_handler (self.command_start, commands = {'start'})
# dispatcher.register_message_handler (self.command_stop, commands = {'stop', 'exit'})
dispatcher.register_message_handler (self.command_status, commands = {'status', 'stat', 'stats'})
dispatcher.register_message_handler (self.command_uptime, commands = {'up-time', 'uptime', 'server-uptime'})
dispatcher.register_message_handler (self.command_ask_eight_ball, commands = {'8-ball', '8_ball', '8', '8ball', 'ball', '🎱'})
dispatcher.register_message_handler (self.command_anvil, commands = {'anvil'})
dispatcher.register_message_handler (self.command_banana, commands = {'banana'})
dispatcher.register_message_handler (self.command_banana_for_scale, commands = {'banana-for-scale', 'banana-for', 'for-scale'})
dispatcher.register_message_handler (self.command_brick, commands = {'brick', 'brick!'})
dispatcher.register_message_handler (self.command_cake, commands = {'cake', 'the', 'thecakeisalie', 'the_cake_is_a_lie', 'lie'})
dispatcher.register_message_handler (self.command_chuck_norris, commands = {'chuck', 'norris', 'chucknorris', 'chuck_norris'})
dispatcher.register_message_handler (self.command_developer_speedsheets, commands = {'dev-sheets', 'dev-speedsheets', 'dev-stashes', 'dev-stash', 'dev-box', 'dev', 'devs'})
dispatcher.register_message_handler (self.command_doh, commands = {'doh', 'doh!'})
dispatcher.register_message_handler (self.command_evil_bot, commands = {'bot', 'evil', 'evil-bot'})
dispatcher.register_message_handler (self.command_game_speedsheets, commands = {'game-sheets', 'game-speedsheets', 'game-stashes',' game-stash', 'games'})
dispatcher.register_message_handler (self.command_glados, commands = {'glados'})
dispatcher.register_message_handler (self.command_hal, commands = {'hal', 'hal9000', 'hal-9000'})
dispatcher.register_message_handler (self.command_insufficient_data, commands = {'insufficient', 'insufficient-data', 'data'})
dispatcher.register_message_handler (self.command_jibber_jabber, commands = {'jibber', 'jabber', 'jibberjabber', 'jibber_jabber'})
dispatcher.register_message_handler (self.command_klaxon, commands = {'klaxon'})
dispatcher.register_message_handler (self.command_list_simpsons, commands = {'list-simpsons', 'list_simpsons', 'simpsons'})
dispatcher.register_message_handler (self.command_llama, commands = {'llama'})
dispatcher.register_message_handler (self.command_nobotty_knows, commands = {'nobotty', 'nobotty-knows'})
dispatcher.register_message_handler (self.command_sensible_chuckle, commands = {'sensible', 'sensible-chuckle', 'chuckle'})
dispatcher.register_message_handler (self.command_source, commands = {'git-source', 'git_source', 'git', 'github', 'gitsource', 'source', 'sourcecode', 'source_code'})
dispatcher.register_message_handler (self.command_tacos, commands = {'tacos', 'taco', 'taco-tuesday', 'taco_tuesday'})
dispatcher.register_message_handler (self.command_unsupervised, commands = {'defence', 'unsupervised'})
dispatcher.register_message_handler (self.command_who_is_first, commands = {'who-is-first', 'who_is_first', 'who-goes-first', 'who_goes_first', 'first', 'pick'})
dispatcher.register_message_handler (self.command_who_knows, commands = {'who', 'who_knows', 'who-knows'})
async def command_cake(self, message):
log_command ("cake", message)
await self._reply_photo (message, CAKE_PHOTO_FILE)
async def command_anvil(self, message):
log_command ("anvil", message)
await self._reply_photo (message, pick_one_and_print (self.anvil))
async def command_ask_eight_ball(self, message):
log_command ("ask 8 ball", message)
await reply (message, "🎱 " + pick_one_and_print (EIGHT_BALL))
async def command_banana(self, message):
log_command ("banana", message)
await self._reply_photo (message, BANANA_PHOTO_FILE)
async def command_banana_for_scale(self, message):
log_command ("banana for scale", message)
await self._reply_photo (message, BANANA_FOR_SCALE_PHOTO_FILE)
async def command_evil_bot(self, message):
log_command ("banana for scale", message)
await self._reply_photo (message, BANANA_FOR_SCALE_PHOTO_FILE)
async def command_brick(self, message):
log_command ("brick", message)
await self._reply_photo (message, pick_one_and_print (self.brick))
async def command_chuck_norris(self, message):
log_command ("chuck norris", message)
await reply (message, pick_one_and_print (CHUCK_NORRIS))
async def command_doh(self, message):
log_command ("doh", message)
await self._reply_photo (message, DOH_PHOTO_FILE)
await self._reply_audio (message, pick_one_and_print(self.simpsons))
async def command_developer_speedsheets(self, message):
log_command ("show dev speedsheets", message)
await reply (message, DEVELOPER_SPEEDSHEETS_TEXT)
async def command_evil_bot(self, message):
log_command ("evil bot", message)
if should_i():
await self._reply_photo (message, GLADOS_PHOTO_FILE)
else:
await self._reply_photo (message, HAL_PHOTO_FILE)
async def command_game_speedsheets(self, message):
log_command ("show game speedsheets", message)
await reply (message, GAME_SPEEDSHEETS_TEXT)
async def command_jibber_jabber(self, message):
log_command ("jibber jabber", message)
command = parse_command (message)
if command.parameter:
await reply (message, f"Got your jibber jabber right here:\n\n{command.parameter}!")
else:
await reply (message, pick_one_and_print (MR_T))
await reply (message, "<i>- Mr. T</i>")
async def command_glados(self, message):
log_command ("glados", message)
await self._reply_photo (message, GLADOS_PHOTO_FILE)
async def command_hal(self, message):
log_command ("hal", message)
await self._reply_photo (message, HAL_PHOTO_FILE)
async def command_insufficient_data(self, message):
log_command ("insufficient data", message)
await self._reply_photo (message, "insufficient-data.jpg")
async def command_klaxon(self, message):
log_command ("Command - klaxon", message)
await self._reply_audio (message, KLAXON_AUDIO_FILE)
async def command_list_simpsons(self, message):
log_command ("list simpsons", message)
await reply (message, "Met The Simpson:\n\n" + "\n".join (self.simpsons))
async def command_llama(self, message):
log_command ("llama", message)
await self._reply_photo (message, LLAMA_PHOTO_FILE)
async def command_nobotty_knows(self, message):
log_command ("nobotty knows", message)
await self._reply_photo (message, NOBOTTY_KNOWS_PHOTO_FILE)
async def command_sensible_chuckle(self, message):
log_command ("sensible chuckle", message)
await self._reply_photo (message, "sensible-chuckle.gif")
async def command_show_alt_help(self, message):
log_command ("show alt help", message)
await reply (message, ALT_HELP_TEXT)
async def command_show_help(self, message):
log_command ("show help", message)
await reply (message, HELP_TEXT)
async def command_source(self, message):
log_command ("show source", message)
await reply (message, 'Source on Github:\n<a href="https://github.com/toconn/nobotty">https://github.com/toconn/nobotty</a>')
async def command_speedsheets(self, message):
log_command ("show speedsheets", message)
await reply (message, SPEEDSHEETS_TEXT)
async def command_start(self, message):
log_command ("start", message)
await reply (message, f"'ello {message.from_user.first_name}!")
await reply (message, HELP_TEXT)
async def command_status(self, message):
log_command ("start", message)
await reply (message, "I am completely operational, and all my circuits are functioning perfectly.")
async def command_stop(self, message):
print ("Stopping.")
await reply (message, "stopping.")
# self.dispatcher.stop_polling()
os.exit()
async def command_tacos (self, message):
log_command ("tacos", message)
await self._reply_photo (message, pick_one_and_print (self.tacos))
async def command_unsupervised(self, message):
log_command ("unsupervised", message)
await self._reply_photo (message, "unsupervised.png")
async def command_uptime (self, message):
log_command ("uptime", message)
await reply (message, self._uptime_message())
async def command_who_is_first(self, message):
log_command ("who is first", message)
await reply (message, pick_one_and_print (PLAYERS))
async def command_who_knows(self, message):
log_command ("Command - who knows", message)
if should_i_weighted(30):
if should_i_weighted(30):
await self._reply_photo (message, NOBOTTY_KNOWS_PHOTO_FILE)
else:
await reply (message, "Nobotty knows!")
else:
await reply (message, "Nobody knows!")
if should_i():
await reply (message, "Maybe aliens?")
def _path(self, file_name):
return join (self.data_dir, file_name)
async def _reply_animation (self, message, file_name):
await reply_animation (message, self._path(file_name))
async def _reply_audio (self, message, file_name):
await reply_audio (message, self._path(file_name))
async def _reply_photo (self, message, | |
"""
- Sets the CIE Tristimulus Standard Observer Angle as 2 degrees or 10 degrees.
The default at power-on is always 2 degrees.
Parameters
----------
angle : `RS_7_1.OBSERVER_ANGLE`
(default to DEG_2)
CIE Tristimulus Standard Observer Angle.
"""
msg = f"SOB{angle.value}"
self._com_cmd(msg)
self._raise_debug(f"Standard Observer Angle set to {angle.value} degrees")
return None
#checked
def set_power_all(
self, power:float, unit:POWER_UNIT=POWER_UNIT.PERCENTAGE,
irr_distance_mm:int=0) -> None:
"""
- Set the output power of the overall system to specified unit and power.
Parameters:
----------
power : `float`
Power of all channel in percentage.
unit : `RS_7_1.POWER_UNIT`
(default to Percentage)
Select the unit of the power from radiance, irradiance, luminance,
illuminance, and percentage.
irr_distance_mm : `int`
(default to 0)
Distance from the surface of the output port of the light source to
the desired imaging plane. Only used for irradiance or illuminance
power profile.
"""
self._set_power_unit(unit, irr_distance_mm)
msg = f"SCP 0,{power}"
self._com_cmd(msg)
self._raise_info(f"All LED channels' power set to {power}{self.POWER_UNIT.UNITS.value[unit.value]}.")
return None
#checked
def set_power_chans(self, chans:Union[list[int], NDArray[np.int_], int], powers:Union[list[float], NDArray[np.float_], float], unit:POWER_UNIT=POWER_UNIT.PERCENTAGE, irr_distance_mm:int=0) -> None:
"""
- Set the output power of the individual LED channel[s] tothe
specified unit and power.
Parameters
----------
chans : `list[int]` or 'int' or 'NDArray[int]'
A int list consists individual LED channel number with an
one-to-one correspondence to the powers array.
powers : `list[float]` or 'float' or 'NDArray[float]'
A floating number NDArray consists individual power settings with
an one-to-one correspondence to the chans array.
unit : `RS_7_1.POWER_UNIT`
(default to Percentage)
Select the unit of the power from radiance, irradiance, luminance,
illuminance, and percentage.
irr_distance_mm : `int`
(default to 0)
Distance from the surface of the output port of the light source to
the desired imaging plane. ONLY used for irradiance or illuminance
power profile.
"""
self._set_power_unit(unit, irr_distance_mm)
if type(powers) is np.ndarray:
powers = list(powers)
if type(chans) is np.ndarray:
chans = list(chans)
if type(powers) is not list:
powers = list([powers])
if type(chans) is not list:
chans = list([chans])
if len(powers)==1:
powers = powers * len(chans)
if (len(chans)!=len(powers)):
self._raise_error("Provided list of channels doesn't have same amount of elemets as the list of power!")
for chan in chans:
if (chan not in self.LED_CHANNELS.LEN_CHANS.value):
self._raise_error(f"Provided LED channel {chan} is not installed!")
str_chans = [str(x) for x in chans]
str_powers = [str(f'{x:.4f}') for x in powers]
combined = str_chans+str_powers
combined[0::2] = str_chans
combined[1::2] = str_powers
combined_proc = ','.join(combined)
msg = f"SCP{combined_proc}"
self._raise_info("LED Channel[s] power set.")
self._com_cmd(msg)
#checked
def set_power_led_random(self, power_percentage:int=5) -> list[float]:
"""
- Set output spectrum to a random spectrum.
Uses
----------
>>> set_spectrum_random()
>>> set_power_output(power = 30)
Returns
--------
Output_spectrum : `list[float]`
Actual output spectrum with a.u. from 360nm to 1100nm with step size of 1nm.
"""
spectrum = np.random.random([len(self.LED_CHANNELS.LEN_CHANS_NO_WHITE.value)])*power_percentage
self.set_power_chans(self.LED_CHANNELS.LEN_CHANS_NO_WHITE.value,spectrum)
self._raise_info("Output spectrum set to a randomly generated spectrum.")
return None
#checked
def set_power_output(
self, power:float, unit:POWER_UNIT=POWER_UNIT.RADIANCE,
irr_distance_mm:int=0, match_chrom:bool=False) -> None:
"""
- Set the output power of the overall system to specified unit and power.
- WARNINGL: Unknown chromaticity/spectrum accuracy! Always use in
conjunction with `get_power_output()` and `get_spectrum_output()` functions
or with external powermeter and spectrometer! For better chromaticity/
spectrum accuracy, set your desired absolute (photometric/radiometric) unit
in the `set_spectrum_raw()` functions!
- WARNING: For chromaticity/color based spectrum setting, follow instructions
in `set_spectrum_rgb()`, `set_spectrum_CIExy()`, and `set_spectrum_pantone()`
to ensure chromaticity precision!
- WARNING: Using `match_chrom=True` may results in faliure to change the power
level since the chromaticity correction function may lower the output level
for unknown reason, NO warning NOR error will be raised if this happened!
Parameters
----------
power : `float`
Power of all channel in percentage.
unit : `RS_7_1.POWER_UNIT`
(default to Radiance)
Select the unit of the power from radiance, irradiance, luminance,
illuminance, and percentage.
irr_distance_mm : `int`
(default to 0)
Distance from the surface of the output port of the light source to
the desired imaging plane. Only used for irradiance or illuminance
power profile.
match_chrom : `bool`
(default to False)
Note that changing the output level using `match_chrom = False` can
result in a chromaticity shift in the final output spectrum OSP, as
OUT merely adjusts all channels’ power level to match the requested
setting without regard to any wavelength shift caused by the change
in drive current.
"""
self._set_power_unit(unit, irr_distance_mm)
chrom = self.get_chromaticity_output()
if match_chrom:
chrom = self.get_chromaticity_output()
msg = f'OUT{power:.3f}'
self._com_cmd(msg)
self._com_cmd(f"CCS{chrom[0]:.6f},{chrom[1]:.6f}")
else:
msg = f'OUT{power:.3f}'
self._com_cmd(msg)
self._raise_info(f"Output power set to: {power}{self.POWER_UNIT.UNITS.value[unit.value]}, irradiance distance: {irr_distance_mm}, chroma_correction: {match_chrom}.")
return None
#checked
def set_power_fixed_spectrum(
self, power:float, unit:POWER_UNIT=POWER_UNIT.RADIANCE,
irr_distance_mm:int=0) -> None:
"""
- Set the output power of the overall system to specified unit and power
while maintaining the current output spectrum.
Parameters
----------
power : `float`
Power of all channel in percentage.
unit : `RS_7_1.POWER_UNIT`
(default to Radiance)
Select the unit of the power from radiance, irradiance, luminance,
illuminance, and percentage.
irr_distance_mm : `int`
(default to 0)
Distance from the surface of the output port of the light source to
the desired imaging plane. Only used for irradiance or illuminance
power profile.
"""
self.set_spectrum_raw(self.get_spectrum_output(),power=power,power_unit=unit,irr_distance_mm=irr_distance_mm)
self._raise_info(f"Output spectrum power set to: {power}{self.POWER_UNIT.UNITS.value[unit.value]}, irradiance distance: {irr_distance_mm}.")
return None
#checked
def set_spectrum_raw(
self,
spectrum:Union[list[float], NDArray[np.float_]],
*,
power:float=0,
power_unit:POWER_UNIT=POWER_UNIT.RADIANCE,
include_white:bool=True,
fit_max_pwr:bool=False,
chroma_correction:bool = False,
irr_distance_mm:int=0) -> float:
"""
- Sets and fit the spectrum to be fitted by the light source, with 1nm step size, and unit
of radiance or irradiance ONLY. Default range from 360nm to 1100nm i.e. 741 points.
- For lot of applications, it's easier to provide a normalized spectrum with a.u.
and set the arg. "fit_max_pwr" to Ture to get maximum power output for a specific
spectrum of interest.
Parameters
----------
spectrum : `list[float]` or `NDArray[float]`
Specturm data in specified unit with 1nm step size.
power : `float`
(default = 100)
A floating number of individual power settings.
power_unit : 'RS_7_1.POWER_UNIT'
(defalut = POWER_UNIT.RADIANCE)
include_white : `bool`
(defalut = True)
Whether including white LEDs in the fitting process.
fit_max_pwr : `bool`
(defalut = False)
Fit to maximum possible power of the light source or not.
chroma_correction : `bool`
(defalut = False)
Whether run chromaticity correction to the fitted spectrum to the
chromaticity of the request spectrum.
irr_distance_mm : `int`
(default to 0)
Distance from the surface of the output port of the light source to
the desired imaging plane. Only used for irradiance or illuminance
power profile.
Returns
--------
RMS_Error : `float`
Root-Mean-Square error for the fitted specturm vs. provided spectrum.
"""
if (power_unit is self.POWER_UNIT.PERCENTAGE):
self._raise_error("Only Radiometric and Photometric are supported for spectrum setting!")
self._set_power_unit(power_unit, irr_distance_mm)
spectrum = list(spectrum)
if len(spectrum) != (self._wavelength_max - self._wavelength_min + 1):
self._raise_error("Provided spectrum data's length doesn't match current wavelength min_max setting!")
msg_spectrum = ','.join(['{:.6f}'.format(x) for x in spectrum])
self._com_cmd(f"TSP{msg_spectrum}")
if power != 0:
self._com_cmd(f"STS{power:.4f}")
self._raise_debug(f"Target spectrum power scaled to {power}{self.POWER_UNIT.UNITS.value[power_unit.value]}.")
msg_cmd = "FTS"
if include_white:
msg_cmd = msg_cmd + "W"
if fit_max_pwr:
msg_cmd = msg_cmd + "M"
self._com_cmd(f"{msg_cmd}")
if chroma_correction:
self._com_cmd("CCS")
return self.get_E_rms_fitted_spectrum()
#checked
def set_spectrum_CIExy(
self, CIEx:float, CIEy:float,
power:float=0, power_unit:POWER_UNIT=POWER_UNIT.RADIANCE, irr_distance_mm:int=0
) -> tuple[float, float]:
"""
- Fit the output spectrum to a specified CIE 1931 x,y chromaticity setting.
- Set `match_chrom = Ture` when calling set_`power_fitted_spectrum()`
to maintain the chromaticity matching.
Uses
----------
>>> set_CIE_chroma(0.125, 0.246)
>>> set_power_output(20, match_chrom=True)
Parameters
----------
CIEx : 'float'
CIEx coordinate for chromaticity setting.
CIEy : 'float'
CIEy coordinate for chromaticity setting.
Returns:
----------
(A_CIEx, A_CIEy) : `[float, float]`
Actual fitted CIEx,y chromaticity in CIE 1931 standard.
"""
self.set_power_all(0.1)
self._com_cmd(f"CCS{CIEx:.6f},{CIEy:.6f}")
if power !=0:
self.set_power_output(power, power_unit, irr_distance_mm)
self._com_cmd(f"CCS{CIEx:.6f},{CIEy:.6f}")
self._raise_debug(f"Set output spectrum to CIExy chromaticity {CIEx:.6f},{CIEy:.6f}.")
return self.get_chromaticity_output()
#checked
def set_spectrum_black_body(
self, temp:int, power:float=0, power_unit:POWER_UNIT=POWER_UNIT.RADIANCE,
irr_distance_mm:int=0) -> float:
"""
- Sets and fit the spectrum to be fitted by the light source, with
1nm step size, and unit of radiance or irradiance ONLY. Default | |
<reponame>i-m-vivek/iGibson
from flask import Flask, render_template, Response, request, session
import sys
import pickle
from gibson2.robots.turtlebot_robot import Turtlebot
from gibson2.robots.fetch_robot import Fetch
from gibson2.simulator import Simulator
from gibson2.scenes.gibson_indoor_scene import StaticIndoorScene
from gibson2.scenes.igibson_indoor_scene import InteractiveIndoorScene
import gibson2
import os
from gibson2.objects.ycb_object import YCBObject
from gibson2.utils.utils import parse_config
from gibson2.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
import numpy as np
from gibson2.render.profiler import Profiler
import cv2
from PIL import Image
from io import BytesIO
import base64
import binascii
import multiprocessing
import traceback
import atexit
import time
import cv2
import uuid
interactive = True
def pil_image_to_base64(pil_image):
buf = BytesIO()
pil_image.save(buf, format="JPEG")
return base64.b64encode(buf.getvalue())
class ProcessPyEnvironment(object):
"""Step a single env in a separate process for lock free paralellism."""
# Message types for communication via the pipe.
_READY = 1
_ACCESS = 2
_CALL = 3
_RESULT = 4
_EXCEPTION = 5
_CLOSE = 6
def __init__(self, env_constructor):
self._env_constructor = env_constructor
def start(self):
"""Start the process."""
self._conn, conn = multiprocessing.Pipe()
self._process = multiprocessing.Process(target=self._worker,
args=(conn, self._env_constructor))
atexit.register(self.close)
self._process.start()
result = self._conn.recv()
if isinstance(result, Exception):
self._conn.close()
self._process.join(5)
raise result
assert result is self._READY, result
def __getattr__(self, name):
"""Request an attribute from the environment.
Note that this involves communication with the external process, so it can
be slow.
:param name: attribute to access.
:return: value of the attribute.
"""
print('gettinng', name)
self._conn.send((self._ACCESS, name))
return self._receive()
def call(self, name, *args, **kwargs):
"""Asynchronously call a method of the external environment.
:param name: name of the method to call.
:param args: positional arguments to forward to the method.
:param kwargs: keyword arguments to forward to the method.
:return: promise object that blocks and provides the return value when called.
"""
payload = name, args, kwargs
self._conn.send((self._CALL, payload))
return self._receive
def close(self):
"""Send a close message to the external process and join it."""
try:
self._conn.send((self._CLOSE, None))
self._conn.close()
except IOError:
# The connection was already closed.
pass
self._process.join(5)
def step(self, action, blocking=True):
"""Step the environment.
:param action: the action to apply to the environment.
:param blocking: whether to wait for the result.
:return: (next_obs, reward, done, info) tuple when blocking, otherwise callable that returns that tuple
"""
promise = self.call('step', action)
if blocking:
return promise()
else:
return promise
def reset(self, blocking=True):
"""Reset the environment.
:param blocking: whether to wait for the result.
:return: next_obs when blocking, otherwise callable that returns next_obs
"""
promise = self.call('reset')
if blocking:
return promise()
else:
return promise
def _receive(self):
"""Wait for a message from the worker process and return its payload.
:raise Exception: an exception was raised inside the worker process.
:raise KeyError: the reveived message is of an unknown type.
:return: payload object of the message.
"""
message, payload = self._conn.recv()
# Re-raise exceptions in the main process.
if message == self._EXCEPTION:
stacktrace = payload
raise Exception(stacktrace)
if message == self._RESULT:
return payload
self.close()
raise KeyError(
'Received message of unexpected type {}'.format(message))
def _worker(self, conn, env_constructor):
"""The process waits for actions and sends back environment results.
:param conn: connection for communication to the main process.
:param env_constructor: env_constructor for the OpenAI Gym environment.
:raise KeyError: when receiving a message of unknown type.
"""
try:
np.random.seed()
env = env_constructor()
conn.send(self._READY) # Ready.
while True:
try:
# Only block for short times to have keyboard exceptions be raised.
if not conn.poll(0.1):
continue
message, payload = conn.recv()
except (EOFError, KeyboardInterrupt):
break
if message == self._ACCESS:
name = payload
result = getattr(env, name)
conn.send((self._RESULT, result))
continue
if message == self._CALL:
name, args, kwargs = payload
if name == 'step' or name == 'reset':
result = getattr(env, name)(*args, **kwargs)
conn.send((self._RESULT, result))
continue
if message == self._CLOSE:
getattr(env, 'close')()
assert payload is None
break
raise KeyError(
'Received message of unknown type {}'.format(message))
except Exception: # pylint: disable=broad-except
etype, evalue, tb = sys.exc_info()
stacktrace = ''.join(traceback.format_exception(etype, evalue, tb))
message = 'Error in environment process: {}'.format(stacktrace)
conn.send((self._EXCEPTION, stacktrace))
finally:
conn.close()
class ToyEnv(object):
"""
ToyEnv is an example environment that wraps around the simulator. It doesn't follow
OpenAI gym interface, and only has step and close function. It works with static
mesh scenes.
"""
def __init__(self):
config = parse_config(os.path.join(gibson2.example_config_path, 'turtlebot_demo.yaml'))
hdr_texture = os.path.join(
gibson2.ig_dataset_path, 'scenes', 'background', 'probe_02.hdr')
hdr_texture2 = os.path.join(
gibson2.ig_dataset_path, 'scenes', 'background', 'probe_03.hdr')
light_modulation_map_filename = os.path.join(
gibson2.ig_dataset_path, 'scenes', 'Rs_int', 'layout', 'floor_lighttype_0.png')
background_texture = os.path.join(
gibson2.ig_dataset_path, 'scenes', 'background', 'urban_street_01.jpg')
settings = MeshRendererSettings(enable_shadow=False, enable_pbr=False)
self.s = Simulator(mode='headless', image_width=400,
image_height=400, rendering_settings=settings)
scene = StaticIndoorScene('Rs')
self.s.import_scene(scene)
#self.s.import_ig_scene(scene)
self.robot = Turtlebot(config)
self.s.import_robot(self.robot)
for _ in range(5):
obj = YCBObject('003_cracker_box')
self.s.import_object(obj)
obj.set_position_orientation(np.random.uniform(
low=0, high=2, size=3), [0, 0, 0, 1])
print(self.s.renderer.instances)
def step(self, a):
# run simulation for one step and get an rgb frame
self.robot.apply_action(a)
self.s.step()
frame = self.s.renderer.render_robot_cameras(modes=('rgb'))[0]
return frame
def close(self):
# tear down the simulation
self.s.disconnect()
class ToyEnvInt(object):
"""
Same with ToyEnv, but works with interactive scenes.
"""
def __init__(self, robot='turtlebot', scene='Rs_int'):
config = parse_config(os.path.join(gibson2.example_config_path, 'turtlebot_demo.yaml'))
hdr_texture = os.path.join(
gibson2.ig_dataset_path, 'scenes', 'background', 'probe_02.hdr')
hdr_texture2 = os.path.join(
gibson2.ig_dataset_path, 'scenes', 'background', 'probe_03.hdr')
light_modulation_map_filename = os.path.join(
gibson2.ig_dataset_path, 'scenes', 'Rs_int', 'layout', 'floor_lighttype_0.png')
background_texture = os.path.join(
gibson2.ig_dataset_path, 'scenes', 'background', 'urban_street_01.jpg')
scene = InteractiveIndoorScene(
scene, texture_randomization=False, object_randomization=False)
#scene._set_first_n_objects(5)
scene.open_all_doors()
settings = MeshRendererSettings(env_texture_filename=hdr_texture,
env_texture_filename2=hdr_texture2,
env_texture_filename3=background_texture,
light_modulation_map_filename=light_modulation_map_filename,
enable_shadow=True, msaa=True,
light_dimming_factor=1.0,
optimized=True)
self.s = Simulator(mode='headless', image_width=400,
image_height=400, rendering_settings=settings)
self.s.import_ig_scene(scene)
if robot=='turtlebot':
self.robot = Turtlebot(config)
else:
self.robot = Fetch(config)
self.s.import_robot(self.robot)
for _ in range(5):
obj = YCBObject('003_cracker_box')
self.s.import_object(obj)
obj.set_position_orientation(np.random.uniform(
low=0, high=2, size=3), [0, 0, 0, 1])
print(self.s.renderer.instances)
def step(self, a):
# run simulation for one step and get an rgb frame
action = np.zeros(self.robot.action_space.shape)
# for some reason, the wheel velocity of Fetch Robot needs to be reversed.
if isinstance(self.robot, Turtlebot):
action[0] = a[0]
action[1] = a[1]
else:
action[1] = a[0]
action[0] = a[1]
self.robot.apply_action(action)
self.s.step()
frame = self.s.renderer.render_robot_cameras(modes=('rgb'))[0]
return frame
def close(self):
# tear down the simulation
self.s.disconnect()
class iGFlask(Flask):
"""
iGFlask is a Flask app that handles environment creation and teardown.
"""
def __init__(self, args, **kwargs):
super(iGFlask, self).__init__(args, **kwargs)
self.action= {} # map uuid to input action
self.envs = {} # map uuid to environment instance
self.envs_inception_time = {} # map uuid to environment start time
def cleanup(self):
"""
Routine clean up, this function tries to find any environment that idles for more
than 200s and stops it.
"""
print(self.envs)
for k,v in self.envs_inception_time.items():
if time.time() - v > 200:
# clean up an old environment
self.stop_env(k)
def prepare_env(self, uuid, robot, scene):
"""
This function creates an Env (ToyEnv or ToyEnvInt) in a subprocess.
"""
self.cleanup()
def env_constructor():
if interactive:
return ToyEnvInt(robot=robot, scene=scene)
else:
return ToyEnv()
self.envs[uuid] = ProcessPyEnvironment(env_constructor)
self.envs[uuid].start()
self.envs_inception_time[uuid] = time.time()
def stop_env(self, uuid):
# stop an environment (ToyEnv or ToyEnvInt) that lives in a subprocess.
self.envs[uuid].close()
del self.envs[uuid]
del self.envs_inception_time[uuid]
app = iGFlask(__name__)
@app.route('/')
def index():
id = uuid.uuid4()
return render_template('index.html', uuid=id)
@app.route('/demo')
def demo():
args = request.args
id = uuid.uuid4()
robot = args['robot']
scene = args['scene']
return render_template('demo.html', uuid=id, robot=robot, scene=scene)
"""
gen is a utility function that generate an image based on user id
and user input (robot and scene), and send it back to the user.
The images are played quickly so it becomes a video.
"""
def gen(app, unique_id, robot, scene):
image = np.array(Image.open("templates/loading.jpg").resize((400, 400))).astype(np.uint8)
loading_frame = pil_image_to_base64(Image.fromarray(image))
loading_frame = binascii.a2b_base64(loading_frame)
image = np.array(Image.open("templates/waiting.jpg").resize((400, 400))).astype(np.uint8)
waiting_frame = pil_image_to_base64(Image.fromarray(image))
waiting_frame = binascii.a2b_base64(waiting_frame)
image = np.array(Image.open("templates/finished.jpg").resize((400, 400))).astype(np.uint8)
finished_frame = pil_image_to_base64(Image.fromarray(image))
finished_frame = binascii.a2b_base64(finished_frame)
id = unique_id
if len(app.envs) < 3:
# if number of envs is smaller than 3, then create an environment and provide to the user
for i in range(5):
yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + loading_frame + b'\r\n\r\n')
app.prepare_env(id, robot, scene)
try:
start_time = time.time()
if interactive:
timeout = 200
else:
timeout = 30
while time.time() - start_time < timeout:
# If the environment is still valid (before it should be destroyed)
# generate a frame from the Env and supply to the user.
frame = app.envs[id].step(app.action[id])
frame = (frame[:, :, :3] * 255).astype(np.uint8)
frame = pil_image_to_base64(Image.fromarray(frame))
frame = binascii.a2b_base64(frame)
yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
except:
pass
finally:
# if timeouts, stop the environment, and show an text prompt image telling
# the user the simulation has finished
app.stop_env(id)
for i in range(5):
yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + finished_frame + b'\r\n\r\n')
else:
# If number of envs is >= 3, | |
lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
return exclude
def func_fa5826c503454255b6e198c489d3338d(budget, placed):
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
return remaining_budget
def func_346669cf7fdb42f08a0d0e1ae00d3231(budget, placed):
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
return needed_budget
def func_9b879c5d032e4dc8a93bd1e06ad3c777(budget, placed):
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
return lowest_cnt
def func_9fce7021afb7428f97af9c8f9bfc453e(budget, placed):
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
return can_replicate
def func_74ddda17a48d4ad8a71f939e6f1d2a07(budget, placed):
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
return lowest
def func_934d186d2493471f97a636549a9d331d(budget, placed):
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
return ret
def func_0c00f14afddb48daa8444e73770dc1d5(budget, placed):
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - | |
from copy import deepcopy
import json
import logging, logging.handlers
import sys
import uuid
from datetime import datetime
from io import BytesIO
from pprint import pformat
from time import time, sleep
from pymisp import MISPObject
###################################
def add_ips_to_cert_obj(mh, cert_data, ips):
existing_ips = []
ip_attrs = cert_data.get_attributes_by_relation('cert-ip')
# Check the IPs already in this cert_data object
for ip in ip_attrs:
if ip.value not in existing_ips:
existing_ips.append(ip.value)
if ip.value in ips:
update_timestamps(mh, ip)
# If any of the IPs we found weren't already in this misphunter-cert object
# add them, with a comment of where we found them.
added_ips = []
for hunt_type, ips in ips.items():
for ip in ips:
if ip not in existing_ips:
if ip not in added_ips:
added_ips.append(ip)
comment=f"{ip} discovered via {hunt_type} search for associated hosts."
attr = cert_data.add_attribute('cert-ip', ip, type='ip-dst', comment=comment, disable_correlation=False, to_ids=False, pythonify=True)
update_timestamps(mh, attr)
return cert_data
def add_json_attr(mh, checksum, raw_sorted_json_text, host_obj, json_type, comment=""):
json_filename = str(checksum)+".json"
mh.logger.info(f"Saving raw JSON blob {json_filename} to host_obj {host_obj.uuid}...")
pseudofile = BytesIO(raw_sorted_json_text.encode('utf-8'))
new_attr = host_obj.add_attribute(json_type, value=json_filename,
comment=comment, type='attachment', to_ids=False, data=pseudofile,
distribution=5, disable_correlation=True, pythonify=False)
update_timestamps(mh, new_attr)
mh.logger.debug(f"Object updated. Returning {host_obj}")
return host_obj
def blacklist_cert(mh, cert_data, comment):
cert_hash = get_attr_val_by_rel(cert_data, 'cert-sha256')
cert_hash_attr = get_attr_obj_by_rel(cert_data, 'cert-sha256')
ips = get_all_attrs_by_rel(cert_data, 'cert-ip')
cert_hash_attr.comment = comment
# mh.logger.warning(f"Search returned {len(ips)} IPs. Threshold set to > 1 and < {mh.cert_pivot_threshold}.")
mh.logger.warning(f"Disabling misphunter-cert object {cert_hash} - [{cert_data.uuid}] "
f"for future pivoting.")
blacklisted = get_attr_obj_by_rel(cert_data, 'blacklisted')
if int(blacklisted.value) == 0:
blacklisted.value = 1
# if this object already exists in the event, update it
cert_obj_exists = mh.misp.get_object(cert_data.uuid, pythonify=True)
if not isinstance(cert_obj_exists, dict):
cert_data = update_existing_object(mh, cert_data)
if 'auto_blacklists_added' not in mh.run_stats:
mh.run_stats['auto_blacklists_added'] = {}
if cert_data.event_id not in mh.run_stats['auto_blacklists_added']:
mh.run_stats['auto_blacklists_added'][cert_data.event_id] = {}
if 'misphunter-certs' not in mh.run_stats['auto_blacklists_added'][cert_data.event_id]:
mh.run_stats['auto_blacklists_added'][cert_data.event_id]['misphunter-certs'] = []
mh.run_stats['auto_blacklists_added'][cert_data.event_id]['misphunter-certs'].append(cert_hash)
mh.run_stats['auto_blacklists_added']['total']+=1
return cert_data
def blacklist_check_cert(mh, cert):
# mh.logger.debug(f"Checking misphunter-cert object {cert.uuid} to determine if it should be blacklisted.")
cert_ips = cert.get_attributes_by_relation('cert-ip')
sha256 = get_attr_val_by_rel(cert, 'cert-sha256')
if len(cert_ips) <= 1 or len(cert_ips) >= mh.cert_pivot_threshold:
comment = f"Cert {sha256} - {cert.uuid} had {len(cert_ips)} IPs associated with it.\n" \
f"\tPivot threshold currently set to {mh.cert_pivot_threshold}.\n"\
f"\tBlacklisting cert from future pivots!"
mh.logger.info(comment)
cert = blacklist_cert(mh, cert, comment)
else:
mh.logger.debug(f"Cert {sha256} - {cert.uuid} had {len(cert_ips)} IPs associated with it. Leaving blacklist val alone!")
return cert
def build_misphunter_cert(mh, cert, parent_obj, event, raw_data):
parsed_data = raw_data['parsed']
mh.logger.info(f"Building a new misphunter-cert object for {parsed_data['fingerprint_sha256']}!")
cert_temp = mh.misp.get_raw_object_template('misphunter-cert')
cert_obj = MISPObject('misphunter-cert', misp_objects_template_custom=cert_temp)
cert_obj.is_new = True
cert_obj.add_attribute('cert-sha256', parsed_data['fingerprint_sha256'], type="x509-fingerprint-sha256", disable_correlation=True, to_ids=False, pythonify=True)
cert_obj.add_attribute('cert-sha1', parsed_data['fingerprint_sha1'], type="x509-fingerprint-sha1", disable_correlation=True, to_ids=False, pythonify=True)
cert_obj.add_attribute('cert-md5', parsed_data['fingerprint_md5'], type="x509-fingerprint-md5", disable_correlation=True, to_ids=False, pythonify=True)
cert_obj.add_attribute('cert-issuer-dn', parsed_data['issuer_dn'], type="text", disable_correlation=True, to_ids=False, pythonify=True)
cert_obj.add_attribute('cert-subject-dn', parsed_data['subject_dn'], type="text", disable_correlation=True, to_ids=False, pythonify=True)
cert_obj.add_attribute('blacklisted', '0', type="boolean", disable_correlation=True, to_ids=False, pythonify=True)
attrs = parent_obj.get_attributes_by_relation('host-ip')
if len(attrs)>0:
ip = attrs[0].value
cert_obj.add_attribute('cert-ip', ip, type="ip-dst", disable_correlation=False, to_ids=False, pythonify=True)
# Add the raw results as a JSON file
raw_data_text = json.dumps(raw_data)
json_filename = f"{parsed_data['fingerprint_sha256']}.json"
pseudofile = BytesIO(raw_data_text.encode('utf-8'))
cert_obj.add_attribute('json', value=json_filename, data=pseudofile, type='attachment', disable_correlation=True, to_ids=False, pythonify=True)
# Add any names as domains
if 'names' in parsed_data:
for name in parsed_data['names']:
domain = name.lstrip("*.")
cert_obj.add_attribute('cert-domain', domain, type="domain", disable_correlation=False, to_ids=False, pythonify=True)
# Add relationship
# comment=f"Certificate was seen on {ip}"
# cert_obj.add_reference(parent_obj.uuid, "derived-from", comment=comment)
sha256 = parsed_data['fingerprint_sha256']
if 'certs_added' not in mh.run_stats:
mh.run_stats['certs_added'] = {str(event.id): [sha256]}
elif str(event.id) not in mh.run_stats['certs_added']:
mh.run_stats['certs_added'][str(event.id)] = [sha256]
else:
mh.run_stats['certs_added'][str(event.id)].append(sha256)
mh.run_stats['certs_added']['total']+=1
for attr in cert_obj.Attribute:
update_timestamps(mh, attr)
return cert_obj
def build_new_host_obj(mh, event, ip):
mh.logger.info(f"MISP Object for {ip} does not exist. Building a new one.")
try:
raw_template = mh.misp.get_raw_object_template('misphunter-host')
except Exception as e:
mh.logger.error(f"Could not get template for misphunter-host. Host {ip} WILL NOT BE ADDED: {e}")
return False
host_obj = MISPObject('misphunter-host', misp_objects_template_custom=raw_template)
host_obj.is_new = True
host_obj.add_attribute('host-ip', value=str(ip), type='ip-dst', to_ids=False)
host_obj.add_attribute('blacklisted', value=str(0), type='boolean', to_ids=False)
# Update timestamps
for attr in host_obj.Attribute:
update_timestamps(mh, attr)
if 'hosts_added' not in mh.run_stats:
mh.run_stats['hosts_added'] = {str(event.id) : [ip]}
elif str(event.id) not in mh.run_stats['hosts_added']:
mh.run_stats['hosts_added'][str(event.id)] = [ip]
else:
mh.run_stats['hosts_added'][str(event.id)].append(ip)
mh.run_stats['hosts_added']['total']+=1
return host_obj
def check_all_certs(mh, cert, event):
# Returns False if nothing found, otherwise returns the latest misphunter-cert MISPObject
cert_data = False
mh.logger.info(f"Saving API queries by checking if cert already exists in database...")
try:
all_cert_data = mh.misp.search(controller="objects", object_name="misphunter-cert", value=cert, pythonify=True)
except Exception as e:
mh.logger.error(f"FAILED attempt at searching for all existing certificates of value {cert}...")
return False
mh.logger.debug(f"Found {len(all_cert_data)} certs matching value {cert}.")
event_cert_uuids = []
for event_cert in mh.event_certs:
if event_cert.uuid not in event_cert_uuids:
event_cert_uuids.append(event_cert.uuid)
for obj in all_cert_data:
if obj.uuid in event_cert_uuids:
mh.logger.info(f"Found a cert object that already lives in this exact event, so that's the one we'll use: {obj.uuid}")
cert_data = obj
cert_data.is_new = False
cert_data.is_clone=False
return cert_data
if cert_data == False:
mh.logger.debug(f"Found first cert object for {cert}: {obj.uuid}")
cert_data = obj
else:
if int(obj.timestamp.timestamp()) > int(cert_data.timestamp.timestamp()):
mh.logger.debug(f"Found newer cert object [{obj.uuid}] to replace existing cert object {cert_data.uuid}")
cert_data = obj
if cert_data:
if str(cert_data.event_id)!=str(event.id):
mh.logger.info(f"Found existing cert object {cert_data.uuid} from a different event. Cloning object for this event!")
cert_data = clone_obj(mh, cert_data, event)
cert_data.is_clone=True
return cert_data
def check_tags(obj):
tags = []
if hasattr(obj, 'Tag'):
for tag in obj.Tag:
if tag.name not in tags:
tags.append(tag.name)
return tags
def check_json_freshness(mh, host_obj, service):
mh.logger.info(f"First checking to see if we have an existing json blob new enough to avoid using an API query.")
json_type = f"{service}-json"
last_json = get_latest_attr_by_rel(host_obj, json_type)
threshold_timestamp = int(time()) - (int(mh.update_threshold) * 60 * 60)
if last_json:
mh.logger.info(f"{last_json.value} found of type {json_type}. Checking to see if it's new enough...")
if int(last_json.last_seen.timestamp()) >= int(threshold_timestamp):
mh.logger.info(f"JSON timestamp of {int(last_json.last_seen.timestamp())} > {mh.update_threshold} hours ago ({int(threshold_timestamp)})."
"new enough to reuse!")
if not hasattr(host_obj, 'is_new'):
host_obj.is_new = False
if not host_obj.is_new:
mh.logger.debug(f"attempting to read {last_json.value}...")
if not last_json.data:
mh.logger.warning(f"Data missing from {last_json.value}... Trying some magic.")
last_json = mh.misp.get_attribute(last_json, pythonify=True)
if not last_json.data:
mh.logger.warning(f"Still no go. RETURNING FALSE!")
return False
else:
mh.logger.info(f"The magic appears to have worked!")
bytes_obj = last_json.data.read()
last_json.data.seek(0)
json_str = bytes_obj.decode('utf-8').replace("'", '"')
try:
raw = json.loads(json_str)
except Exception as e:
mh.logger.error(f"Error attempting to load json: {e}")
mh.logger.error(f"Something went wrong attempting to read {last_json.value}. Skipping re-use and "
"just going to hit the API again.")
return False
# set misphunter_processed to make sure we're not re-processing this JSON object
raw['misphunter_processed'] = True
return raw
else:
mh.logger.info(f"BUT WAIT - host_obj {host_obj.uuid} is_new! Better get a fresh JSON blob to be safe!")
return False
else:
mh.logger.info(f"It had a timestamp of {int(last_json.last_seen.timestamp())} and we wanted it to be > "
f"{mh.update_threshold} hrs ago ({int(threshold_timestamp)}). Getting fresh JSON blob for comparison.")
return False
else:
return False
def check_timer(mh, seed):
# Returns seed with updated last_run time (or updated object timestamp in ignoring timers in debugging mode)
mh.logger.info(f"Checking event timer to see if seed {seed.uuid} is ready to update")
update_seed_object = False
run_search = False
now = int(time())
last_run = get_attr_val_by_rel(seed, 'last-run')
# If no last_run time found, update event
if last_run:
mh.logger.info(f"seed last ran: {last_run}")
last_run_time = int(last_run.timestamp())
else:
mh.logger.info(f"Search does not have a record of when it was last run. Setting last run timestamp to 0.")
last_run_time = 0
if not mh.ignore_timers:
seed.add_attribute('last-run', now, type="datetime", disable_correlation=True, pythonify=True)
else:
mh.logger.info(f"ignore_timers flag is set, so not adding a last_run attribute to this event.")
mh.logger.debug(f"Setting update_seed_object to True.")
update_seed_object = True
freq = get_attr_val_by_rel(seed, 'update-freq')
mh.logger.info(f"Search frequency for seed {seed.uuid} set to run once every {freq} hours.")
# If no frequency time found, update event
if freq:
freq_time = int(freq) * 60 * 60
else:
mh.logger.info(f"Search does not have a frequency set. Defaulting to 24 hours.")
freq_time = 24 * 60 * 60
# Ignore frequency attribute if running in debugger mode (avoids having to delete it every time)
if not mh.ignore_timers:
seed.add_attribute('update-freq', '24', type='text', disable_correlation=True, comment="Search is re-run every time this many hours have passed.", pythonify=True)
else:
mh.logger.info(f"ignore_timers flag is set, so not adding a frequency attribute to this event.")
mh.logger.debug(f"Setting update_seed_object to True.")
update_seed_object = True
time_passed = now - int(last_run_time)
# If no last_run_time set, this will always be true because time_passed/epoch is > whatever freq is set to.
if time_passed >= freq_time:
update_seed_object = True
mh.logger.info(f"{str(time_passed/60/60)} hours have passed. Time to run the search again!")
# Ignore last_run attribute | |
<reponame>maxwellmattryan/cs-313e
# dependencies
import math
# point class used for triangle class
class Point(object):
# initialization of Point object
def __init__(self, x=0, y=0):
self.x = x
self.y = y
# handle print calling
def __str__(self):
return(f"({self.x}, {self.y})")
# given another point, other, find the distance to it
def dist(self, other):
return(math.hypot(self.x - other.x, self.y - other.y))
# Q1:
# A triangle is defined by the three vertices. Write the following functions of the Triangle class
# assuming that the Point class has already been written for you. You may add helper functions as needed.
class Triangle(object):
# default constructor assigning (0, 0), (1, 0), and (0, 1) as vertices unless specified
def __init__(self, v1_x=0, v1_y=0, v2_x=1, v2_y=0, v3_x=0, v3_y=1):
#if(abs(v1_x * (v2_y - v3_y)) - (v2_x * (v1_y - v3_y)) + (v3_x * (v1_y - v2_y)) < 1.0e-8):
self.v1 = Point(v1_x, v1_y)
self.v2 = Point(v2_x, v2_y)
self.v3 = Point(v3_x, v3_y)
self.tol = 1.0e-8 # used for correcting mathematical operations using floating point values
# calculate and return the area of the triangle
def area(self):
return(abs(
(self.v3.x + self.v1.x) * (self.v3.y - self.v1.y) +
(self.v1.x + self.v2.x) * (self.v1.y - self.v2.y) +
(self.v2.x + self.v3.x) * (self.v2.y - self.v3.y))
)
# return True if the triangle is an isosceles right angled triangle
def isRight(self):
a = self.v1.dist(self.v2)
b = self.v2.dist(self.v3)
c = self.v3.dist(self.v1)
if(
(a == b and abs(math.hypot(a, b) - c) < self.tol) or
(b == c and abs(math.hypot(b, c) - a) < self.tol) or
(c == a and abs(math.hypot(c, a) - b) < self.tol)
):
return(True)
return(False)
# calculate the return the perimeter of the triangle
def perimeter(self):
return(
self.v1.dist(self.v2) +
self.v2.dist(self.v3) +
self.v3.dist(self.v1)
)
# return True if a Point p is strictly inside the triangle or False otherwise
def pointInside(self, p):
totalArea = 0
totalArea += Triangle(self.v1.x, self.v1.y, self.v2.x, self.v2.y, p.x, p.y).area()
totalArea += Triangle(self.v2.x, self.v2.y, self.v3.x, self.v3.y, p.x, p.y).area()
totalArea += Triangle(self.v3.x, self.v3.y, self.v1.x, self.v1.y, p.x, p.y).area()
return(abs(totalArea - self.area()) < self.tol)
# Q2:
# Given a 2D list filled with 0s and 1s, write the function largestRectangle() that finds the largest
# rectangle made up of only 1s and returns the area of this rectangle. You may solve this problem
# iteratively.
# Example:
# rect is a 2D list that is filled with 0s and 1s
# return an integer of the largest area of 1s
# rect = [
# [0, 0, 0, 0, 0],
# [0, 0, 1, 1, 0],
# [0, 1, 1, 1, 0]
# ]
# largestRectangle(rect) => 4
def largestRectangle(grid):
# get maximum area of histogram
def maxAreaInHistogram(hist):
stack = []
largest = 0
for index, height in enumerate(hist):
last = None
while(stack and stack[-1][1] > height):
last = stack.pop()
largest = max(largest, (index - last[0]) * last[1])
if last is not None:
stack.append((last[0], height))
else:
stack.append((index, height))
index = len(hist)
while stack:
last = stack.pop()
largest = max(largest, (index - last[0]) * last[1])
return(largest)
# convert grid into histogram
for i in range(1, len(grid)):
for j in range(len(grid[i])):
if(grid[i][j] == 1):
grid[i][j] = grid[i - 1][j] + 1
# find maximum area of each row of new histogram grid
maxArea = max([maxAreaInHistogram(row) for row in grid])
return(maxArea)
# Q3:
# Given n, it is easy to get the factorial of n. It might be a big number, but you can still compute it.
# However, the inverse problem is difficult. Given some largest number, n, write the function,
# inverseFactorial(n), find x such that x! is equal to n. In other words
# abs(x! - n) < abs((x - 1)! - n) and abs(x! - n) < abs((x + 1)! - n). You may use Python's
# math.factorial() in your algorithm. Assume that n is a positive integer.
# inverseFactorial(40320) => 8
# inverseFactorial(115) => 5
def inverseFactorial(n):
if(n == 1 or n == 2):
return(n)
lo = 0
hi = n // 2
while(lo < hi):
x = (lo + hi) // 2
xF = math.factorial(x)
if(xF == n):
return(x)
elif(xF > n):
hi = x
else:
lo = x
return(-1)
# Q4:
# Given a string, s, return the length of the longest palindrome that is a substring of s. There could
# be two or more substrings that are the longest palindromes in s, then just return the length of any
# one. There are two edge cases that your function must be able to handle - the string s itself is a
# palindrome or there is no substring of length 2 or greater that is a palindrome. The string s will
# only be lowercase letters.
# longestPalindrome("radar") => 5
# longestPalindrome("abcde") => 1
# longestPalindrome("babad") => 3
def longestPalindrome(string):
# helper method to determine if string is a palindrome
def isPalindrome(string):
if(len(string) % 2 == 0):
return(False)
left = right = len(string) // 2
while(left >= 0 and right < len(string)):
if(string[left] != string[right]):
return(False)
left -= 1
right += 1
return(True)
string = string.replace(" ", "")
if(isPalindrome(string)):
return(string)
maxPalindrome = ""
for Len in range(1, len(string)):
for i in range(len(string) - Len + 1):
j = i + Len - 1
tempString = ""
for k in range(i, j + 1):
tempString += string[k]
if(isPalindrome(tempString) and len(tempString) > len(maxPalindrome)):
maxPalindrome = tempString
return(maxPalindrome)
# Q5 :
# A group of friends wants to do a secret santa with each friend being assigned to another and no
# friend can be assigned to themselves (this is called derangement). The friends are named A, E, I,
# O, U, and Y. Additionally, E does not like Y and A does not like E and each will get bad gifts for
# the other if they have the opportunity to do so. Write the findPairings() function that returns a list
# with all the assignments that do not assign any of the friends to themselves, nor pair E to Y or
# A to E. You may add helper functions as needed.
# Match the friends for secret santa
# friends = ["A", "E", "I", "O", "U", "Y"]
# findPairings(friends) => ["E:I", "I:O", "O:U", "U:Y", "Y:A"]
# def findPairings(friends):
# # given two friends, f1 and f2, return True if they are a valid match and False otherwise
# def canPair(f1, f2):
# if(f1 == f2):
# return(False)
# elif(f1 == "A" and f2 == "E") or (f1 == "E" and f2 == "A"):
# return(False)
# elif(f1 == "E" and f2 == "Y") or (f1 == "Y" and f2 == "E"):
# return(False)
# return(True)
# n = len(friends)
# grid = [[0 for j in range(n)] for i in range(n)]
# for i in range(len(friends)):
# for j in range(len(friends)):
# if(canPair(friends[i], friends[j])):
# grid[i][j] += 1
# pairings = []
# used = []
# for i in range(len(grid)):
# for j in range(len(grid[i])):
# if(grid[i][j] == 1 and j not in used):
# used.append(j)
# pairings.append(f"{friends[i]}:{friends[j]}")
# break
# return(pairings)
def findPairings(friends, pairingsSet):
# given two friends, f1 and f2, return True if they are a valid match and False otherwise
def canPair(f1, f2):
if(f1 == f2):
return(False)
# elif(f1 == "A" and f2 == "E") or (f1 == "E" and f2 == "A"):
# return(False)
# elif(f1 == "E" and f2 == "Y") or (f1 == "Y" and f2 == "E"):
# return(False)
return(True)
# returns True if no other sender has claimed that recipient
def isValidColumn(grid, row, col):
temp = row
while(temp >= 0):
if(grid[temp][col] == 2):
return(False)
temp -= 1
temp = row
while(temp < len(grid)):
if(grid[temp][col] == 2):
return(False)
temp += 1
return(True)
# recursive helper method once grid is made
def findPairingsHelper(grid, friends, pairings):
if(len(pairings) == len(friends)):
pairingsSet.append(pairings[:])
else:
row = len(pairings)
for j in range(len(grid[row])):
if(grid[row][j] == 1 and isValidColumn(grid, row, j)):
grid[row][j] += 1
pairings.append(f"{friends[row]}:{friends[j]}")
findPairingsHelper(grid, friends, pairings)
pairings.pop()
grid[row][j] -= 1
n = len(friends)
grid = [[0 for j in range(n)] for i in range(n)]
for i in range(len(friends)):
for j in range(len(friends)):
if(canPair(friends[i], friends[j])):
grid[i][j] += 1
findPairingsHelper(grid, friends, [])
# EC:
# Trace Ackermann's function
# if m = 0, f(m, n) => n | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import fileinput
import gettext
import os
import platform
import sqlite3
import subprocess
import tkinter as tk
from tkinter import *
if platform.system() == "Windows":
import winreg
if sys.version_info[0] < 3:
raise Exception("This bot works only with Python 3.x")
root = tk.Tk()
root.wm_title("Setup")
root.geometry("290x350")
root.resizable(width=False, height=False)
def startupinfo():
handle = sqlite3.connect('pccontrol.sqlite')
handle.row_factory = sqlite3.Row
cursor = handle.cursor()
cursor.execute("SELECT value FROM config WHERE name='console'")
query = cursor.fetchone()
console = "hide"
if query:
console = query["value"]
if console == "hide":
if platform.system() == "Windows":
value = subprocess.STARTUPINFO()
value.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
value = None
else:
value = None
return value
def db_and_co():
handle = sqlite3.connect('pccontrol.sqlite')
handle.row_factory = sqlite3.Row
cursor = handle.cursor()
# The bot will automatically create the right db if it not exist
config_table = "CREATE TABLE IF NOT EXISTS " \
"`config` ( `id` INTEGER UNIQUE, `name` TEXT ," \
" `value` TEXT, UNIQUE(name, value), PRIMARY KEY(`id`))"
users_table = "CREATE TABLE IF NOT EXISTS " \
"`users` ( `id` INTEGER UNIQUE, `name_first` TEXT," \
" `name_last` TEXT, `username` TEXT, `privs` INTEGER," \
" `last_use` INTEGER, `time_used` INTEGER," \
" `language` TEXT DEFAULT 'en', PRIMARY KEY(`id`))"
cursor.execute(config_table)
cursor.execute(users_table)
create_mo_files()
cursor.execute("SELECT value FROM config WHERE name='language'")
query = cursor.fetchone()
lang = "en"
if query:
lang = query["value"]
translate = gettext.translation(
"setup", localedir="locale", languages=[lang])
translate.install()
return lang
def en_lang():
handle = sqlite3.connect('pccontrol.sqlite')
handle.row_factory = sqlite3.Row
cursor = handle.cursor()
cursor.execute("SELECT value FROM config WHERE name='language'")
data = cursor.fetchall()
if len(data) == 0:
handle = sqlite3.connect('pccontrol.sqlite')
handle.row_factory = sqlite3.Row
cursor = handle.cursor()
cursor.execute(
"INSERT INTO config(name, value) VALUES ('language', 'en')")
handle.commit()
restart_popup()
else:
handle = sqlite3.connect('pccontrol.sqlite')
handle.row_factory = sqlite3.Row
cursor = handle.cursor()
cursor.execute("UPDATE config SET value='en' WHERE name='language'")
handle.commit()
restart_popup()
def it_lang():
handle = sqlite3.connect('pccontrol.sqlite')
handle.row_factory = sqlite3.Row
cursor = handle.cursor()
cursor.execute("SELECT value FROM config WHERE name='language'")
data = cursor.fetchall()
if len(data) == 0:
handle = sqlite3.connect('pccontrol.sqlite')
handle.row_factory = sqlite3.Row
cursor = handle.cursor()
cursor.execute(
"INSERT INTO config(name, value) VALUES ('language', 'it')")
handle.commit()
restart_popup()
else:
handle = sqlite3.connect('pccontrol.sqlite')
handle.row_factory = sqlite3.Row
cursor = handle.cursor()
cursor.execute("UPDATE config SET value='it' WHERE name='language'")
handle.commit()
restart_popup()
def botfather_token_check():
handle = sqlite3.connect('pccontrol.sqlite')
handle.row_factory = sqlite3.Row
cursor = handle.cursor()
cursor.execute("SELECT value FROM config WHERE name='BotFather_token'")
data = cursor.fetchall()
if len(data) == 0:
B1.configure(text=_("Confirm"))
else:
B1.configure(text=_("Change token"))
def imgur_token_check():
handle = sqlite3.connect('pccontrol.sqlite')
handle.row_factory = sqlite3.Row
cursor = handle.cursor()
cursor.execute("SELECT value FROM config WHERE name='Imgur_token'")
data = cursor.fetchall()
if len(data) == 0:
B2.configure(text=_("Confirm"))
else:
B2.configure(text=_("Change token"))
def botfather_token_set(val1):
handle = sqlite3.connect('pccontrol.sqlite')
handle.row_factory = sqlite3.Row
cursor = handle.cursor()
cursor.execute("SELECT value FROM config WHERE name='BotFather_token'")
data = cursor.fetchall()
if len(data) == 0:
if len(val1) >= 45 <= 50:
handle = sqlite3.connect('pccontrol.sqlite')
handle.row_factory = sqlite3.Row
cursor = handle.cursor()
cursor.execute(
"INSERT INTO config(name, value)"
" VALUES ('BotFather_token', ?)", (val1,))
handle.commit()
token1.destroy()
B1.destroy()
L1_done.configure(text=_("Token saved!"),
font="Times 11", fg="green", justify=LEFT)
elif len(val1) == 0:
L1_done.configure(text=_("Your entry is empty"),
font="Times 11", fg="red", justify=LEFT)
else:
L1_done.configure(text=_("The inserted token is wrong"),
font="Times 11", fg="red", justify=LEFT)
else:
if len(val1) >= 45 <= 50:
handle = sqlite3.connect('pccontrol.sqlite')
handle.row_factory = sqlite3.Row
cursor = handle.cursor()
cursor.execute(
"UPDATE config SET value=? "
"WHERE name='BotFather_token'", (val1,))
handle.commit()
token1.destroy()
B1.destroy()
L1_done.configure(text=_("Token saved!"),
font="Times 11", fg="green", justify=LEFT)
elif len(val1) == 0:
L1_done.configure(text=_("Your entry is empty"),
font="Times 11", fg="red", justify=LEFT)
else:
L1_done.configure(text=_("The inserted token is wrong"),
font="Times 11", fg="red", justify=LEFT)
def imgur_token_set(val2):
handle = sqlite3.connect('pccontrol.sqlite')
handle.row_factory = sqlite3.Row
cursor = handle.cursor()
cursor.execute("SELECT value FROM config WHERE name='Imgur_token'")
data = cursor.fetchall()
if len(data) == 0:
if len(val2) != 0:
handle = sqlite3.connect('pccontrol.sqlite')
handle.row_factory = sqlite3.Row
cursor = handle.cursor()
cursor.execute(
"INSERT INTO config(name, value) "
"VALUES ('Imgur_token', ?)", (val2,))
handle.commit()
token2.destroy()
B2.destroy()
L2_done.configure(text=_("Token saved!"),
font="Times 11", fg="green", justify=LEFT)
else:
L2_done.configure(text=_("Your entry is empty"),
font="Times 11", fg="red", justify=LEFT)
else:
if len(val2) != 0:
handle = sqlite3.connect('pccontrol.sqlite')
handle.row_factory = sqlite3.Row
cursor = handle.cursor()
cursor.execute(
"UPDATE config SET value=? WHERE name='Imgur_token'", (val2,))
handle.commit()
token2.destroy()
B2.destroy()
L2_done.configure(text=_("Token saved!"),
font="Times 11", fg="green", justify=LEFT)
else:
L2_done.configure(text=_("Your entry is empty"),
font="Times 11", fg="red", justify=LEFT)
def requirements_check():
if os.path.isfile("requirements_log.txt") is False:
B3.configure(text=_("Install the requirements"))
else:
B3.configure(text=_("Update the requirements"))
def requirements():
if platform.system() == "Windows":
subprocess.call(
"pip install -r requirements.txt > requirements_log.txt",
startupinfo=startupinfo(), shell=True)
else:
if sys.version_info[0] < 3:
subprocess.call(
"pip install -r requirements.txt > requirements_log.txt",
startupinfo=startupinfo(), shell=True)
else:
subprocess.call(
"pip3 install -r requirements.txt > requirements_log.txt",
startupinfo=startupinfo(), shell=True)
requirements_popup()
requirements_check()
def requirements_popup():
req = tk.Toplevel(root)
l_frame = tk.Frame(req)
l_frame.pack(fill=tk.X, side=tk.TOP)
b_frame = tk.Frame(req)
b_frame.pack(fill=tk.X, side=tk.BOTTOM)
b_frame.columnconfigure(0, weight=1)
b_frame.columnconfigure(1, weight=1)
lr = Label(l_frame, text=_("The requirements install process is done.\n"
"Do you want to take a look to the log?"),
font="Times 11", justify=LEFT)
lr.grid()
yes_b = tk.Button(b_frame, text=_("Yes"),
command=lambda: [req.destroy(), log_link()])
yes_b.grid(row=0, column=0, sticky=tk.W+tk.E)
no_b = tk.Button(b_frame, text=_("No"),
command=lambda: req.destroy())
no_b.grid(row=0, column=1, sticky=tk.W+tk.E)
def log_link():
if platform.system() == "Windows":
subprocess.call(
"requirements_log.txt", startupinfo=startupinfo(), shell=True)
else:
subprocess.call("xdg-open requirements_log.txt",
startupinfo=startupinfo(), shell=True)
def create_mo_files():
if os.path.isfile('locale/en/LC_MESSAGES/setup.mo') is False:
subprocess.call(
"pip install Babel", startupinfo=startupinfo(), shell=True)
subprocess.call('pybabel compile -D setup '
'-d locale -l en -i locale/en/LC_MESSAGES/setup.po',
startupinfo=startupinfo(), shell=True)
subprocess.call('pybabel compile -D setup '
'-d locale -l it -i locale/it/LC_MESSAGES/setup.po',
startupinfo=startupinfo(), shell=True)
if os.path.isfile('locale/en/LC_MESSAGES/pccontrol.mo') is False:
subprocess.call(
"pip install Babel", startupinfo=startupinfo(), shell=True)
subprocess.call(
'pybabel compile -D pccontrol '
'-d locale -l en -i locale/en/LC_MESSAGES/pccontrol.po',
startupinfo=startupinfo(), shell=True)
subprocess.call(
'pybabel compile -D pccontrol '
'-d locale -l it -i locale/it/LC_MESSAGES/pccontrol.po',
startupinfo=startupinfo(), shell=True)
elif os.path.isfile('locale/en/LC_MESSAGES/pccontrol.mo') is False:
subprocess.call(
"pip install Babel", startupinfo=startupinfo(), shell=True)
subprocess.call(
'pybabel compile -D pccontrol '
'-d locale -l en -i locale/en/LC_MESSAGES/pccontrol.po',
startupinfo=startupinfo(), shell=True)
subprocess.call(
'pybabel compile -D pccontrol '
'-d locale -l it -i locale/it/LC_MESSAGES/pccontrol.po',
startupinfo=startupinfo(), shell=True)
def bot_start():
root.withdraw()
create_mo_files()
handle = sqlite3.connect('pccontrol.sqlite')
handle.row_factory = sqlite3.Row
cursor = handle.cursor()
if startupinfo() is not None:
if platform.system() == "Windows":
cursor.execute("SELECT value FROM config WHERE name='startup'")
query = cursor.fetchone()
startup = "false"
if query:
startup = query["value"]
if startup == "true":
subprocess.call(sys.executable + " bot.pyw",
creationflags=0x08000000,
shell=True)
else:
subprocess.call(sys.executable + " bot.py",
creationflags=0x08000000,
shell=True)
else:
cursor.execute("SELECT value FROM config WHERE name='startup'")
query = cursor.fetchone()
startup = "false"
if query:
startup = query["value"]
if startup == "true":
subprocess.call(sys.executable + " bot.pyw", shell=True)
else:
subprocess.call(sys.executable + " bot.py", shell=True)
else:
cursor.execute("SELECT value FROM config WHERE name='startup'")
query = cursor.fetchone()
startup = "false"
if query:
startup = query["value"]
if startup == "true":
subprocess.call(sys.executable + " bot.pyw", shell=True)
else:
subprocess.call(sys.executable + " bot.py", shell=True)
def privs_window():
privs = tk.Toplevel(root)
privs.wm_title(_("Permissions"))
usr_l = Label(privs, text=_("Username"),
font="Times 11 bold", justify=LEFT)
usr_l.pack()
usr_e = Entry(privs, bd=5)
usr_e.pack()
add_b = tk.Button(privs, text=_("Add permissions"),
command=lambda: add_privs(usr_e.get()))
add_b.pack()
rm_b = tk.Button(privs, text=_("Remove permissions"),
command=lambda: remove_privs(usr_e.get()))
rm_b.pack()
usr_done = Label(privs, text="")
usr_done.pack()
def add_privs(usr):
handle = sqlite3.connect('pccontrol.sqlite')
handle.row_factory = sqlite3.Row
cursor = handle.cursor()
cursor.execute("SELECT * FROM users WHERE username=?", (usr,))
data = cursor.fetchall()
if len(data) != 0:
cursor.execute("UPDATE users SET privs='-2' WHERE username=?",
(usr,))
handle.commit()
usr_e.destroy()
add_b.destroy()
rm_b.destroy()
usr_done.configure(text=_("Permissions for %s changed!") % (
usr), font="Times 11", fg="green", justify=LEFT)
else:
usr_done.configure(text=_("%s isn't in your database") % (
usr), font="Times 11", fg="red", justify=LEFT)
def remove_privs(usr):
handle = sqlite3.connect('pccontrol.sqlite')
handle.row_factory = sqlite3.Row
cursor = handle.cursor()
cursor.execute("SELECT * FROM users WHERE username=?", (usr,))
data = cursor.fetchall()
if len(data) != 0:
cursor.execute("UPDATE users SET privs='' WHERE username=?",
(usr,))
handle.commit()
usr_e.destroy()
add_b.destroy()
rm_b.destroy()
usr_done.configure(text=_("Permissions for %s changed!") % (
usr), font="Times 11", fg="green", justify=LEFT)
else:
usr_done.configure(text=_("%s isn't in your database") % (
usr), font="Times 11", fg="red", justify=LEFT)
def restart_popup():
privs = tk.Toplevel(root)
privs.wm_title(_("Restart"))
lp = Label(privs, text=_(
"Please restart bot_setup to apply the change"),
font="Times 11", justify=LEFT)
lp.pack()
add_b = tk.Button(privs, text=_("Restart"), command=lambda: restart())
add_b.pack()
def restart():
python = sys.executable
os.execl(python, python, *sys.argv)
def console_show():
handle = sqlite3.connect('pccontrol.sqlite')
handle.row_factory = sqlite3.Row
cursor = handle.cursor()
cursor.execute("SELECT value FROM config WHERE name='console'")
data = cursor.fetchall()
if len(data) == 0:
handle = sqlite3.connect('pccontrol.sqlite')
handle.row_factory = sqlite3.Row
cursor = handle.cursor()
cursor.execute(
"INSERT INTO config(name, value) VALUES ('console', 'show')")
handle.commit()
restart_popup()
else:
handle = sqlite3.connect('pccontrol.sqlite')
handle.row_factory = sqlite3.Row
cursor = handle.cursor()
cursor.execute("UPDATE config SET value='show' WHERE name='console'")
handle.commit()
restart_popup()
def console_hide():
handle = sqlite3.connect('pccontrol.sqlite')
handle.row_factory = sqlite3.Row
cursor = handle.cursor()
cursor.execute("SELECT value FROM config WHERE name='console'")
data = cursor.fetchall()
if len(data) == 0:
handle = sqlite3.connect('pccontrol.sqlite')
handle.row_factory = sqlite3.Row
cursor = handle.cursor()
cursor.execute(
"INSERT INTO config(name, value) VALUES ('console', 'hide')")
handle.commit()
restart_popup()
else:
handle = sqlite3.connect('pccontrol.sqlite')
handle.row_factory = sqlite3.Row
cursor | |
from numpy import isnan, take, any, all, logical_or, logical_and, logical_not, atleast_1d, \
asarray, argmin, argsort, abs, isfinite, dot#where
import numpy as np
# for PyPy
from openopt.kernel.nonOptMisc import where
from bisect import bisect_right
from FuncDesigner.Interval import splitDomainForDiscreteVariable
try:
from bottleneck import nanmin
except ImportError:
from numpy import nanmin
def getTruncatedArrays(ind, y, e, indT, _s):
# TODO: rework it when numpy will have appropriate inplace function
s = ind.size
y = take(y, ind, axis=0, out=y[:s])
e = take(e, ind, axis=0, out=e[:s])
_s = _s[ind]
if indT is not None:
indT = indT[ind]
return y, e, indT, _s#, nlh, nlh_0
def adjustDiscreteVarBounds(y, e, p):
# TODO: rework it
#n = p.n
# TODO: remove the cycle, use vectorization
for i in p._discreteVarsNumList:
v = p._freeVarsList[i]
y[:, i], e[:, i] = splitDomainForDiscreteVariable(y[:, i], e[:, i], v)
# ind = y>e
# assert not any(ind)
# y[ind], e[ind] = e[ind], y[ind]
# Ind = any(y>e, 1)
# trunc_ind = where(logical_not(Ind))[0]
# # TODO: is it triggered? // updated: can be from MOP or cons
# if any(Ind):
# ind = where(logical_not(Ind))[0]
# s = ind.size
# y = take(y, ind, axis=0, out=y[:s])
# e = take(e, ind, axis=0, out=e[:s])
# _s = _s[ind]
# if indT is not None:
# indT = indT[ind]
return y, e#, trunc_ind#_s, indT
def func7(y, e, o, a, _s, indT, nlhc, residual):
r10 = logical_and(all(isnan(o), 1), all(isnan(a), 1))
if any(r10):
j = where(logical_not(r10))[0]
lj = j.size
y = take(y, j, axis=0, out=y[:lj])
e = take(e, j, axis=0, out=e[:lj])
o = take(o, j, axis=0, out=o[:lj])
a = take(a, j, axis=0, out=a[:lj])
_s = _s[j]
if indT is not None:
indT = indT[j]
if nlhc is not None:
nlhc = take(nlhc, j, axis=0, out=nlhc[:lj])
if residual is not None:
residual = take(residual, j, axis=0, out=residual[:lj])
return y, e, o, a, _s, indT, nlhc, residual
def func9(an, fo, g, p):
#ind = searchsorted(ar, fo, side='right')
if p.probType in ('NLSP', 'SNLE') and p.maxSolutions != 1:
mino = atleast_1d([node.key for node in an])
ind = mino > 0
if not any(ind):
return an, g
else:
g = nanmin((g, nanmin(mino[ind])))
ind2 = where(logical_not(ind))[0]
#an = take(an, ind2, axis=0, out=an[:ind2.size])
#an = asarray(an[ind2])
an = [an[i] for i in ind2]
return an, g
elif p.solver.dataHandling == 'sorted':
#OLD
mino = [node.key for node in an]
ind = bisect_right(mino, fo)
if ind == len(mino):
return an, g
else:
g = nanmin((g, nanmin(atleast_1d(mino[ind]))))
return an[:ind], g
elif p.solver.dataHandling == 'raw':
#NEW
mino = atleast_1d([node.key for node in an])
r10 = mino > fo
if not any(r10):
return an, g
else:
ind = where(r10)[0]
g = nanmin((g, nanmin(atleast_1d(mino)[ind])))
#an = asarray(an)
ind2 = where(logical_not(r10))[0]
#an = take(an, ind2, axis=0, out=an[:ind2.size])
an = [an[i] for i in ind2]
return an, g
# NEW 2
# curr_tnlh = [node.tnlh_curr for node in an]
# import warnings
# warnings.warn('! fix g')
return an, g
else:
assert 0, 'incorrect nodes remove approach'
def func5(an, nn, g, p):
m = len(an)
if m <= nn: return an, g
mino = np.array([node.key for node in an])
if nn == 1: # box-bound probs with exact interval analysis
ind = argmin(mino)
assert ind in (0, 1), 'error in interalg engine'
g = nanmin((mino[1-ind], g))
an = [an[i] for i in ind]
elif m > nn:
if p.solver.dataHandling == 'raw':
ind = argsort(mino)
th = mino[ind[nn]]
ind2 = where(mino < th)[0]
g = nanmin((th, g))
#an = take(an, ind2, axis=0, out=an[:ind2.size])
an = [an[i] for i in ind2]#an[ind2]
else:
g = nanmin((mino[nn], g))
an = an[:nn]
return an, g
def func4(p, y, e, o, a, fo, tnlhf_curr = None):
# TODO: simplifications for all-bool probs
if fo is None and tnlhf_curr is None: return False# used in IP
if y.size == 0: return False
cs = (y + e)/2
n = y.shape[1]
# TODO: a, o could be chenged to +/- inf instead of values duplication
if tnlhf_curr is not None:
tnlh_modL = tnlhf_curr[:, :n]
ind = logical_not(isfinite(tnlh_modL))
else:
s = o[:, :n]
ind = logical_or(s > fo, isnan(s)) # TODO: assert isnan(s) is same to isnan(a_modL)
# hasDiscreteVariables = len(p._discreteVarsNumList) != 0
indT = any(ind, 1)
if any(ind):
# if hasDiscreteVariables:
for j, v in enumerate(p._discreteVarsList):
i = p._discreteVarsNumList[j]
k = where(ind[:, i])[0]
if k.size == 0: continue
discr_mid1, discr_mid2 = splitDomainForDiscreteVariable(y[k, i], e[k, i], v)
cs[k, i] = discr_mid2
y[ind] = cs[ind]
# TODO: check is it ever called from MOP, implement if not
if p.probType != 'MOP':
a[:, :n][ind] = a[:, n:][ind]
o[:, :n][ind] = o[:, n:][ind]
if tnlhf_curr is not None:
tnlhf_curr[:, :n][ind] = tnlhf_curr[:, n:][ind]
if tnlhf_curr is not None:
tnlh_modU = tnlhf_curr[:, n:]
ind = logical_not(isfinite(tnlh_modU))
else:
q = o[:, n:]
ind = logical_or(q > fo, isnan(q)) # TODO: assert isnan(q) is same to isnan(a_modU)
indT = logical_or(any(ind, 1), indT)
if any(ind):
# copy is used to prevent y and e being same array, that may be buggy with discrete vars
# TODO: remove copy after new discrete vars handling implementation
for j, v in enumerate(p._discreteVarsList):
i = p._discreteVarsNumList[j]
k = where(ind[:, i])[0]
if k.size == 0: continue
discr_mid1, discr_mid2 = splitDomainForDiscreteVariable(y[k, i], e[k, i], v)
cs[k, i] = discr_mid1
e[ind] = cs[ind].copy()
# Changes
# ind = logical_and(ind, logical_not(isnan(a[:, n:])))
## ii = len(where(ind)[0])
## if ii != 0: print ii
if p.probType != 'MOP':
a[:, n:][ind] = a[:, :n][ind]
o[:, n:][ind] = o[:, :n][ind]
if tnlhf_curr is not None:
tnlhf_curr[:, n:][ind] = tnlhf_curr[:, :n][ind]
# for arr in arrays:
# if arr is not None:
# arr[:, n:2*n][ind] = arr[:, 0:n][ind]
return indT
def truncateByPlane(y, e, indT, A, b):
#!!!!!!!!!!!!!!!!!!!
# TODO: vectorize it by matrix A
#!!!!!!!!!!!!!!!!!!!
ind_trunc = True
# assert np.asarray(b).size <= 1, 'unimplemented yet'
m, n = y.shape
if m == 0:
assert e.size == 0, 'bug in interalg engine'
return y, e, indT, ind_trunc
ind_positive = where(A > 0)[0]
ind_negative = where(A < 0)[0]
# print ind_negative, ind_positive
A1 = A[ind_positive]
S1 = A1 * y[:, ind_positive]
A2 = A[ind_negative]
S2 = A2 * e[:, ind_negative]
# print A1.shape, A2.shape, e[:, ind_negative].shape
S = S1.sum(axis=1) + S2.sum(axis=1)
if ind_positive.size != 0:
S1_ = b - S.reshape(-1, 1) + S1
Alt_ub = S1_ / A1
ind = e[:, ind_positive] > Alt_ub #+ 1e-15*abs(Alt_ub)
if np.any(ind):
_ind = where(ind.flatten())[0]
e[:, ind_positive].flat[_ind] = Alt_ub.flat[_ind]
indT[any(ind, axis = 1)] = True
if ind_negative.size != 0:
S2_ = b - S.reshape(-1, 1) + S2
Alt_lb = S2_ / A2
ind = y[:, ind_negative] < Alt_lb #- 1e-15 * abs(Alt_lb)
if np.any(ind):
#y[:, ind_negative][ind] = Alt_lb[ind]
_ind = where(ind.flatten())[0]
y[:, ind_negative].flat[_ind] = Alt_lb.flat[_ind]
indT[any(ind, axis = 1)] = True
ind = all(e>=y, axis = 1)
if not all(ind):
ind_trunc = where(ind)[0]
lj = ind_trunc.size
y = take(y, ind_trunc, axis=0, out=y[:lj])
e = take(e, ind_trunc, axis=0, out=e[:lj])
indT = indT[ind_trunc]
return y, e, indT, ind_trunc
def _truncateByPlane(y, e, indT, A, b):
#!!!!!!!!!!!!!!!!!!!
# TODO: vectorize it by matrix A
#!!!!!!!!!!!!!!!!!!!
ind_trunc = slice(None)
# assert np.asarray(b).size <= 1, 'unimplemented yet'
m, n = y.shape
A = A.T
assert A.shape == y.shape
if m == 0:
assert e.size == 0, 'bug in interalg engine'
return y, e, indT, ind_trunc
# ind_positive = where(A > 0)[0]
# ind_negative = where(A < 0)[0]
# print ind_negative, ind_positive
#TODO: mb store ind_positive, ind_negative in prob for fixed A
ind_positive, ind_negative = where(A>0), where(A<0)
# S1 = dot(where(A>0, A, -A).T, y.T)
# S2 = dot(where(A<0, A, -A).T, e.T)
# S_y = dot(where(A>0, A, 0).T, y.T)
# S_y_ = dot(where(A<0, A, 0).T, y.T)
# S_e = dot(where(A>0, A, 0).T, e.T)
# S_e_ = dot(where(A<0, A, 0).T, e.T)
S_y = where(A>0, A, 0) * y
S_y_ = where(A<0, A, 0) * y
S_e = where(A>0, A, 0) * e
S_e_ = where(A<0, A, 0) * e
# _S = S1+S2
# 1
S1 = S_y
S2 = S_e_
S = S1.sum(axis=1) + S2.sum(axis=1)
d = (b.reshape(-1, 1) - S.reshape(-1, 1)) + S1 # vector | |
<filename>caldavclientlibrary/protocol/url.py
##
# Copyright (c) 2007-2016 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import os
import urllib
class URL(object):
eAbsolute = 0
eRelative = 1
eLastPath = 2
URLEscape = '%'
URLReserved = "/?:@&="
URLUnreserved = ( # Allowable URL chars
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 0 - 15
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 16 - 31
0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, # 32 - 47
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, # 48 - 63
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 64 - 79
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, # 80 - 95
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 96 - 111
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, # 112 - 127
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 128 - 143
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 144 - 159
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 160 - 175
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 176 - 191
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 192 - 207
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 208 - 223
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 224 - 239
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 240 - 255
)
URLCharacter = ( # Allowable URL chars -- all
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 0 - 15
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 16 - 31
0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 32 - 47
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, # 48 - 63
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 64 - 79
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, # 80 - 95
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 96 - 111
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, # 112 - 127
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 128 - 143
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 144 - 159
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 160 - 175
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 176 - 191
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 192 - 207
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 208 - 223
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 224 - 239
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 240 - 255
)
URLXCharacter = ( # Allowable URL chars (all)
# RFC2732 uses '[...]' for IPv6 addressing - [] are now allowed
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 0 - 15
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 16 - 31
0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 32 - 47
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, # 48 - 63
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 64 - 79
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, # 80 - 95
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 96 - 111
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, # 112 - 127
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 128 - 143
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 144 - 159
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 160 - 175
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 176 - 191
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 192 - 207
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 208 - 223
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 224 - 239
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 240 - 255
)
URLSchemeDoubleSlash = ("http", "https", "webcal",)
def __init__(self, url=None, scheme=None, server=None, path=None, extended=None, decode=False):
self.scheme = ""
self.server = ""
self.path = ""
self.extended = ""
if not url:
self.scheme = scheme
self.server = server
self.path = path
if self.path and decode:
self.path = urllib.unquote(self.path)
self.extended = extended
if self.extended and decode:
self.extended = urllib.unquote_plus(self.extended)
else:
self._parse(url, decode)
def __str__(self):
return "URL: %s" % (self.toString(),)
def __repr__(self):
return "URL: %s" % (self.toString(),)
def __cmp__(self, other):
return cmp(self.toString(), other.toString())
def absoluteURL(self):
return self.toString()
def relativeURL(self):
return self.toString(conversion=URL.eRelative)
def toString(self, conversion=eAbsolute, encode=True):
result = ""
# Add scheme & host if not relative
if conversion == URL.eAbsolute and self.scheme and self.server:
result += self.scheme + ":"
if self.scheme in URL.URLSchemeDoubleSlash:
result += "//"
result += self.server
# Get path (or last part of it if required)
if self.path and conversion == URL.eLastPath:
path = self.path[self.path.rfind("/"):]
else:
path = self.path
# Now encode if required
if path:
result += (urllib.quote(path) if encode else path)
if self.extended:
result += (urllib.quote_plus(self.extended, "?&=") if encode else self.extended)
return result
def equal(self, comp):
# | |
columns[0] ],
key="free-text-search",
visible=False,
filterable="standard" ) )
operations = []
use_paging = False
def build_initial_query( self, trans, **kwd ):
# Filter by latest installable revisions that contain tools with missing tool test components.
revision_clause_list = []
for repository in trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deprecated == false(),
model.Repository.table.c.deleted == false() ) ):
changeset_revision = \
grids_util.filter_by_latest_downloadable_changeset_revision_that_has_missing_tool_test_components( trans, repository )
if changeset_revision:
revision_clause_list.append( model.RepositoryMetadata.table.c.changeset_revision == changeset_revision )
if revision_clause_list:
return trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deprecated == false(),
model.Repository.table.c.deleted == false() ) ) \
.join( model.RepositoryMetadata ) \
.filter( or_( *revision_clause_list ) ) \
.join( model.User.table )
# Return an empty query.
return trans.sa_session.query( model.Repository ) \
.filter( model.Repository.table.c.id < 0 )
class MyWritableRepositoriesMissingToolTestComponentsGrid( RepositoriesMissingToolTestComponentsGrid ):
# This grid displays only the latest installable revision of each repository.
title = "Repositories I can change with missing tool test components"
columns = [ col for col in RepositoriesMissingToolTestComponentsGrid.columns ]
operations = []
use_paging = False
def build_initial_query( self, trans, **kwd ):
# First get all repositories that the current user is authorized to update.
username = trans.user.username
user_clause_list = []
for repository in trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deprecated == false(),
model.Repository.table.c.deleted == false() ) ):
allow_push = repository.allow_push( trans.app )
if allow_push:
allow_push_usernames = allow_push.split( ',' )
if username in allow_push_usernames:
user_clause_list.append( model.Repository.table.c.id == repository.id )
if user_clause_list:
# We have the list of repositories that the current user is authorized to update, so filter
# further by latest installable revisions that contain tools with missing tool test components.
revision_clause_list = []
for repository in trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deprecated == false(),
model.Repository.table.c.deleted == false() ) ) \
.filter( or_( *user_clause_list ) ):
changeset_revision = \
grids_util.filter_by_latest_downloadable_changeset_revision_that_has_missing_tool_test_components( trans, repository )
if changeset_revision:
revision_clause_list.append( model.RepositoryMetadata.table.c.changeset_revision == changeset_revision )
if revision_clause_list:
return trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deprecated == false(),
model.Repository.table.c.deleted == false() ) ) \
.join( model.User.table ) \
.filter( or_( *user_clause_list ) ) \
.join( model.RepositoryMetadata ) \
.filter( or_( *revision_clause_list ) )
# Return an empty query.
return trans.sa_session.query( model.Repository ) \
.filter( model.Repository.table.c.id < 0 )
class RepositoriesWithTestInstallErrorsGrid( RepositoryGrid ):
# This grid displays only the latest installable revision of each repository.
title = "Repositories with tool test installation errors"
columns = [
RepositoryGrid.NameColumn( "Name",
key="name",
link=( lambda item: dict( operation="view_or_manage_repository", id=item.id ) ),
attach_popup=False ),
RepositoryGrid.LatestInstallableRevisionColumn( "Latest Installable Revision" ),
RepositoryGrid.UserColumn( "Owner",
key="User.username",
model_class=model.User,
link=( lambda item: dict( operation="repositories_by_user", id=item.id ) ),
attach_popup=False )
]
columns.append( grids.MulticolFilterColumn( "Search repository name",
cols_to_filter=[ columns[0] ],
key="free-text-search",
visible=False,
filterable="standard" ) )
operations = []
use_paging = False
def build_initial_query( self, trans, **kwd ):
# Filter by latest installable revisions that contain tools with missing tool test components.
revision_clause_list = []
for repository in trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deprecated == false(),
model.Repository.table.c.deleted == false() ) ):
changeset_revision = \
grids_util.filter_by_latest_downloadable_changeset_revision_that_has_test_install_errors( trans, repository )
if changeset_revision:
revision_clause_list.append( model.RepositoryMetadata.table.c.changeset_revision == changeset_revision )
if revision_clause_list:
return trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deprecated == false(),
model.Repository.table.c.deleted == false() ) ) \
.join( model.RepositoryMetadata ) \
.filter( or_( *revision_clause_list ) ) \
.join( model.User.table )
# Return an empty query.
return trans.sa_session.query( model.Repository ) \
.filter( model.Repository.table.c.id < 0 )
class MyWritableRepositoriesWithTestInstallErrorsGrid( RepositoriesWithTestInstallErrorsGrid ):
# This grid displays only the latest installable revision of each repository.
title = "Repositories I can change with tool test installation errors"
columns = [ col for col in RepositoriesWithTestInstallErrorsGrid.columns ]
operations = []
use_paging = False
def build_initial_query( self, trans, **kwd ):
# First get all repositories that the current user is authorized to update.
username = trans.user.username
user_clause_list = []
for repository in trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deprecated == false(),
model.Repository.table.c.deleted == false() ) ):
allow_push = repository.allow_push( trans.app )
if allow_push:
allow_push_usernames = allow_push.split( ',' )
if username in allow_push_usernames:
user_clause_list.append( model.Repository.table.c.id == repository.id )
if user_clause_list:
# We have the list of repositories that the current user is authorized to update, so filter
# further by latest installable revisions that contain tools with missing tool test components.
revision_clause_list = []
for repository in trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deprecated == false(),
model.Repository.table.c.deleted == false() ) ) \
.filter( or_( *user_clause_list ) ):
changeset_revision = \
grids_util.filter_by_latest_downloadable_changeset_revision_that_has_test_install_errors( trans, repository )
if changeset_revision:
revision_clause_list.append( model.RepositoryMetadata.table.c.changeset_revision == changeset_revision )
if revision_clause_list:
return trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deprecated == false(),
model.Repository.table.c.deleted == false() ) ) \
.join( model.User.table ) \
.filter( or_( *user_clause_list ) ) \
.join( model.RepositoryMetadata ) \
.filter( or_( *revision_clause_list ) )
# Return an empty query.
return trans.sa_session.query( model.Repository ) \
.filter( model.Repository.table.c.id < 0 )
class RepositoriesWithSkipTestsCheckedGrid( RepositoryGrid ):
# This grid displays only the latest installable revision of each repository.
title = "Repositories with skip tool tests checked"
columns = [
RepositoryGrid.NameColumn( "Name",
key="name",
link=( lambda item: dict( operation="view_or_manage_repository", id=item.id ) ),
attach_popup=False ),
RepositoryGrid.LatestInstallableRevisionColumn( "Latest Installable Revision" ),
RepositoryGrid.UserColumn( "Owner",
key="User.username",
model_class=model.User,
link=( lambda item: dict( operation="repositories_by_user", id=item.id ) ),
attach_popup=False )
]
columns.append( grids.MulticolFilterColumn( "Search repository name",
cols_to_filter=[ columns[0] ],
key="free-text-search",
visible=False,
filterable="standard" ) )
operations = []
use_paging = False
def build_initial_query( self, trans, **kwd ):
# Filter by latest installable revisions that contain tools with missing tool test components.
revision_clause_list = []
for repository in trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deprecated == false(),
model.Repository.table.c.deleted == false() ) ):
changeset_revision = \
grids_util.filter_by_latest_downloadable_changeset_revision_with_skip_tests_checked( trans, repository )
if changeset_revision:
revision_clause_list.append( model.RepositoryMetadata.table.c.changeset_revision == changeset_revision )
if revision_clause_list:
return trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deprecated == false(),
model.Repository.table.c.deleted == false() ) ) \
.join( model.RepositoryMetadata ) \
.filter( or_( *revision_clause_list ) ) \
.join( model.User.table )
# Return an empty query.
return trans.sa_session.query( model.Repository ) \
.filter( model.Repository.table.c.id < 0 )
class MyWritableRepositoriesWithSkipTestsCheckedGrid( RepositoriesWithSkipTestsCheckedGrid ):
# This grid displays only the latest installable revision of each repository.
title = "Repositories I can change with skip tool tests checked"
columns = [ col for col in RepositoriesWithSkipTestsCheckedGrid.columns ]
operations = []
use_paging = False
def build_initial_query( self, trans, **kwd ):
# First get all repositories that the current user is authorized to update.
username = trans.user.username
user_clause_list = []
for repository in trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deprecated == false(),
model.Repository.table.c.deleted == false() ) ):
allow_push = repository.allow_push( trans.app )
if allow_push:
allow_push_usernames = allow_push.split( ',' )
if username in allow_push_usernames:
user_clause_list.append( model.Repository.table.c.id == repository.id )
if user_clause_list:
# We have the list of repositories that the current user is authorized to update, so filter
# further by latest installable revisions that contain tools with missing tool test components.
revision_clause_list = []
for repository in trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deprecated == false(),
model.Repository.table.c.deleted == false() ) ) \
.filter( or_( *user_clause_list ) ):
changeset_revision = \
grids_util.filter_by_latest_downloadable_changeset_revision_with_skip_tests_checked( trans, repository )
if changeset_revision:
revision_clause_list.append( model.RepositoryMetadata.table.c.changeset_revision == changeset_revision )
if revision_clause_list:
return trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deprecated == false(),
model.Repository.table.c.deleted == false() ) ) \
.join( model.User.table ) \
.filter( or_( *user_clause_list ) ) \
.join( model.RepositoryMetadata ) \
.filter( or_( *revision_clause_list ) )
# Return an empty query.
return trans.sa_session.query( model.Repository ) \
.filter( model.Repository.table.c.id < 0 )
class DeprecatedRepositoriesIOwnGrid( RepositoriesIOwnGrid ):
title = "Deprecated repositories I own"
columns = [
RepositoriesIOwnGrid.NameColumn( "Name",
key="name",
link=( lambda item: dict( operation="view_or_manage_repository", id=item.id ) ),
attach_popup=False ),
RepositoryGrid.TypeColumn( "Type" ),
RepositoriesIOwnGrid.MetadataRevisionColumn( "Metadata<br/>Revisions" ),
RepositoryGrid.ToolsFunctionallyCorrectColumn( "Tools or<br/>Package<br/>Verified" ),
RepositoriesIOwnGrid.CategoryColumn( "Category",
model_class=model.Category,
key="Category.name",
attach_popup=False ),
]
columns.append( grids.MulticolFilterColumn( "Search repository name",
cols_to_filter=[ columns[0] ],
key="free-text-search",
visible=False,
filterable="standard" ) )
use_paging = False
def build_initial_query( self, trans, **kwd ):
return trans.sa_session.query( model.Repository ) \
.filter( and_( model.Repository.table.c.deleted == false(),
model.Repository.table.c.user_id == trans.user.id,
model.Repository.table.c.deprecated == true() ) ) \
.join( model.User.table ) \
.outerjoin( model.RepositoryCategoryAssociation.table ) \
.outerjoin( model.Category.table )
class RepositoriesWithFailingToolTestsGrid( RepositoryGrid ):
# This grid displays only the latest installable revision of each repository.
title = "Repositories with failing tool tests"
columns = [
RepositoryGrid.NameColumn( "Name",
key="name",
link=( lambda | |
<gh_stars>0
import dash
import dash_core_components as dcc
import dash_table
import dash_html_components as html
import dash_bootstrap_components as dbc
import pandas as pd
import os
from dash.exceptions import PreventUpdate
import dash_bio as dashbio
from src.processing import Processing
from src.dashView import initializeData
# files, which are processed
# read-only
file_list = None
struct_data = None
# starts dash
# file_list: input data
# sec_struct_data: input structural data
# port: port
def startDash(files, port, sec_struct_data):
global file_list
global struct_data
file_list = files
struct_data = sec_struct_data
app.run_server(debug=False, host='0.0.0.0', port=port)
# calculates slider ranges
# peak-boolean sets first value to 'none' (for peak-slider)
def markSliderRange(min_val, max_val, peak):
mark = {}
if peak:
min_val += 1
mark[0] = 'none'
for i in range(min_val, max_val + 1):
mark[i] = str(i)
return mark
# range() function for floats
# start: start-value which is head of list
# step: steps between two values
# run: number of loop runs
def float_range(start, step, run):
for_list = [start]
for i in range(1, run):
next_step = start + step * i
for_list.append(next_step)
return for_list
# checks if custom normalization rates sum up to one
# parameters (e.g. ee,ss,etc. ..): rate for 2-mer
def check_sum(ee, ss, ii, mm, bb, si, i_s, sm, ms, es, se, hh, hs, sh, sb, bs):
custom_rates = [ee, ss, ii, mm, bb, si, i_s, sm, ms, es, se, hh, hs, sh, sb, bs]
k_mer_sum = round(sum(custom_rates), 1)
check_passed = bool(k_mer_sum == 1)
return check_passed
# ------------------------------------------- Dash-Layout --------------------------------------------------------------
app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])
app.title = "k-Mer Dash"
app.layout = dbc.Container([
# ------------------------------------------ Store -----------------------------------------------------------------
dbc.Spinner(children=[dcc.Store(id='memory', storage_type='memory')],
color="primary", fullscreen=True),
# -------------------------------------------------------------------------------------------------------------------
dbc.Card([
dbc.Row([
dbc.Col(
dbc.CardBody([
html.H3("Menu"),
html.Br(),
# ------------------------------------- Select File1 And File 2 ------------------------------------
html.H6("Selected files:", id="sel_files_header"),
dbc.Select(
id="file1",
options=[],
value="0"),
dbc.Select(
id="file2",
options=[],
value="1"),
dbc.Tooltip(
"Files containing DNA nucleotide-sequences used for k-mer visualization",
target="sel_files_header"
),
html.Br(),
html.Br(),
# ------------------------------------- Select Structure Files -------------------------------------
html.H6("Selected structure files:", id="struc_files_header"),
dbc.Select(
id="file3",
options=[{"label": "-", "value": "0"}],
value="0"),
dbc.Select(
id="file4",
options=[{"label": "-", "value": "0"}],
value="1"),
dbc.Tooltip(
"Files containing element-strings used for RNA structure heatmaps(s)",
target="struc_files_header"
),
html.Br(),
html.Br(),
# ------------------------------------------- K ----------------------------------------------------
html.H6("K-mer length:", id="k_header"),
dcc.Slider(
id='k',
min=0,
max=10,
step=1,
value=3,
marks=markSliderRange(0, 10, False)
),
dbc.Tooltip(
"Length of visualized substrings (k-mer)",
target="k_header"
),
html.Br(),
# ----------------------------------------- Peak ---------------------------------------------------
html.H6("Peak-position:", id="peak_header"),
dcc.Slider(
id='peak',
min=1,
max=10,
step=1,
value=0,
marks=markSliderRange(0, 10, True)
),
dbc.Tooltip(
"Highlighted position in sequence (e.g. assumed binding position "
"of protein in given sequences)",
target="peak_header"
),
html.Br(),
# ------------------------------------------ top ---------------------------------------------------
html.H6("Top-values:", id="top_header"),
dbc.Select(
id='top',
options=[
{'label': '10', 'value': '0'},
{'label': '20', 'value': '1'},
{'label': '50', 'value': '2'},
{'label': '100', 'value': '3'}
],
value="0"
),
dbc.Tooltip(
"Number of <top> highest k-mer occurrences",
target="top_header"
),
html.Br(),
html.Br(),
# -------------------------------- Highlighted Feature ---------------------------------------------
html.H6("Highlighted feature:", id="feature_header"),
dbc.Select(
id="Feature",
options=[
{"label": "Frequency", "value": "1"},
{"label": "T Occurrences", "value": "2"},
{"label": "A Occurrences", "value": "3"},
{"label": "C Occurrences", "value": "4"},
{"label": "G Occurrences", "value": "5"},
],
value="1"
),
dbc.Tooltip(
"Highlighted/Colored property of PCAs",
target="feature_header"
),
html.Br(),
html.Br(),
# ------------------------------- Options structural data ------------------------------------------
dbc.ButtonGroup(
[dbc.Button("Extended options", id="opt_btn_open"),
# dbc.Button("Export PDF", id="ex_btn",disabled=True)
],
size="md",
className="mr-1",
),
dbc.Tooltip(
"Options for structural data visualization",
target="opt_btn_open"
),
dbc.Modal(
[
dbc.ModalHeader("Options for structural data visualization"),
dbc.ModalBody(children=[
dcc.Checklist(
id="sec_peak",
options=[{'label': 'show only peak positions', 'value': 'peaking'}],
inputStyle={'margin-right': '3px'},
),
dbc.Tooltip(
"Only show peak positions in RNA structure Heatmap(s)",
target="sec_peak"
),
html.Br(),
html.Div("Normalization:", id="norm_header",
style={'font-weight': 'bold', 'padding-bottom': '10px'}),
html.Div("ERROR: sum of custom rates should be equal to 1", id="error",
style={'font-weight': 'bold', 'color': 'red',
'padding-bottom': '10px'}, hidden=True),
html.Div("ERROR: only numerical values between zero and one allowed", id="error_type",
style={'font-weight': 'bold', 'color': 'red',
'padding-bottom': '10px'}, hidden=True),
dcc.RadioItems(
id="db",
options=[
{'label': 'none', 'value': 'none'},
{'label': 'use A.thaliana database', 'value': 'at_db'},
{'label': 'use custom k-mer rates', 'value': 'custom_vals'}
],
value='none',
labelStyle={'display': 'block'},
inputStyle={'margin-right': '3px'}
),
dbc.Tooltip(
"Used data for normalization of structural data",
target="norm_header"
),
html.Div(id="norm_input", children=[
html.Table(children=[
html.Tr(children=[
html.Td(children=[
html.Div("EE"),
dbc.Input(id="EE", type="number", style={'width': '100px'}, max=1,
min=0, step=0.001, value=0)], ),
html.Td(children=[
html.Div("ES"),
dbc.Input(id="ES", type="number", style={'width': '100px'}, max=1,
min=0, step=0.001, value=0)], ),
html.Td(children=[
html.Div("SS"),
dbc.Input(id="SS", type="number", style={'width': '100px'}, max=1,
min=0, step=0.001, value=0)], )
]),
html.Tr(children=[
html.Td(children=[
html.Div("SI"),
dbc.Input(id="SI", type="number", style={'width': '100px'}, max=1,
min=0, step=0.001, value=0), ], ),
html.Td(children=[
html.Div("IS"),
dbc.Input(id="IS", type="number", style={'width': '100px'}, max=1,
min=0, step=0.001, value=0), ], ),
html.Td(children=[
html.Div("II"),
dbc.Input(id="II", type="number", style={'width': '100px'}, max=1,
min=0, step=0.001, value=0), ], )
]),
html.Tr(children=[
html.Td(children=[
html.Div("SH"),
dbc.Input(id="SH", type="number", style={'width': '100px'}, max=1,
min=0, step=0.001, value=0), ], ),
html.Td(children=[
html.Div("HS"),
dbc.Input(id="HS", type="number", style={'width': '100px'}, max=1,
min=0, step=0.001, value=0), ], ),
html.Td(children=[
html.Div("HH"),
dbc.Input(id="HH", type="number", style={'width': '100px'}, max=1,
min=0, step=0.001, value=0), ], )
]),
html.Tr(children=[
html.Td(children=[
html.Div("SM"),
dbc.Input(id="SM", type="number", style={'width': '100px'}, max=1,
min=0, step=0.001, value=0), ], ),
html.Td(children=[
html.Div("MS"),
dbc.Input(id="MS", type="number", style={'width': '100px'}, max=1,
min=0, step=0.001, value=0), ], ),
html.Td(children=[
html.Div("SE"),
dbc.Input(id="SE", type="number", style={'width': '100px'}, max=1,
min=0, step=0.001, value=0), ], ),
]),
html.Tr(children=[
html.Td(children=[
html.Div("BB"),
dbc.Input(id="BB", type="number", style={'width': '100px'}, max=1,
min=0, step=0.001, value=0), ], ),
html.Td(children=[
html.Div("BS"),
dbc.Input(id="BS", type="number", style={'width': '100px'}, max=1,
min=0, step=0.001, value=0), ], ),
html.Td(children=[
html.Div("SB"),
dbc.Input(id="SB", type="number", style={'width': '100px'}, max=1,
min=0, step=0.001, value=0), ], ),
]),
html.Tr(children=[
html.Td(children=[
html.Div("MM"),
dbc.Input(id="MM", type="number", style={'width': '100px'}, max=1,
min=0, step=0.001, value=0), ], ),
html.Td(children=[]),
html.Td(children=[
html.Br(),
dbc.Button("Reset", id="opt_btn_reset",
style={'margin': 'auto'})]),
dbc.Tooltip(
"Reset table",
target="opt_btn_reset"
),
])
], style={'width': '100%'}
)
], style={'display': 'block'}, hidden=True),
]),
dbc.ModalFooter(children=[
dbc.ButtonGroup(
[dbc.Button("Apply", id="opt_btn_apply"),
dbc.Button("Close", id="opt_btn_close")],
className="mr-1",
),
]
),
],
id="ex_options",
backdrop='static',
centered=True
),
], style={
'height': '100vh',
'left': '0px',
'background': 'lightgrey'}),
width=2,
style={"padding-right": '0px',
"padding-left": '0px',
'margin-right': '0px'}),
# --------------------------------------- ScatterPlot ------------------------------------------------------
dbc.Col([
dbc.Card([
dbc.Spinner(children=[
dcc.Tabs(value="s-tab", children=[
dcc.Tab(label="Scatterplot", value='s-tab', id="s-tab1", children=[
dcc.Graph(figure={}, id="scatter", style={'height': '40vh'})
]),
# -------------------------------------- FornaContainer ------------------------------------
dcc.Tab(value='r-tab', id="s-tab2", children=[
dbc.Card(
dashbio.FornaContainer(
id='forna', height='300', width='400', colorScheme='custom'
),
className="w-100 p-3",
),
]),
dcc.Tab(value='r-tab2', id="s-tab3", children=[
dbc.Card(
dashbio.FornaContainer(
id='forna2', height='300', width='400', colorScheme='custom'
),
className="w-100 p-3",
),
])
]),
dbc.Tooltip(
"Scatterplot of k-mer occurences from selected files containing "
"nucleotide sequences",
target="s-tab1"
),
dbc.Tooltip(
"Visualization of arbitrary RNA structure, highlighting k-mer occurrences of "
"element strings from first selected structural data file",
target="s-tab2"
),
dbc.Tooltip(
"Visualization of arbitrary RNA structure, highlighting k-mer occurrences of "
"element strings from second selected structural data file",
target="s-tab3"
),
],
color="primary", spinner_style={'position': 'absolute',
'top': '50%',
'left': '50%'
}),
], style={
'background': '#f2f2f2', 'height': '50vh'}, outline=True),
# -------------------------------------------- TopK ----------------------------------------------------
dbc.Spinner(children=[dbc.Card(id="topK", children=[], style={
'background': '#f2f2f2', 'height': '49vh', 'overflow-y': 'scroll'}, outline=True)],
color="primary", spinner_style={'position': 'absolute',
'top': '50%',
'left': '50%'
}),
],
width=5,
style={"padding-right": '5px',
"padding-left": '10px'}),
# ------------------------------------------------- PCAs ---------------------------------------------------
dbc.Col(
[dbc.Card([
dbc.Spinner(children=[
dcc.Tabs(id='tabs-example', value='Tab1', children=[
dcc.Tab(label="", value='Tab1', id="Tab1", children=[
dcc.Graph(figure={}, id="PCA1",
style={'height': '42vh'}
)
]),
dcc.Tab(label="", value='Tab2', id="Tab2", children=[
dcc.Graph(figure={}, id="PCA2",
style={'height': '42vh'}
)
]),
],
),
dbc.Tooltip(
"Principal component analysis (PCA) of first selected file containing nucleotide sequences",
target="Tab1"
),
dbc.Tooltip(
"Principal component analysis (PCA) of "
"second selected file containing nucleotide sequences",
target="Tab2"
),
], color="primary",
spinner_style={'position': 'absolute',
'top': '50%',
'left': '50%'
}),
], style={
'background': '#f2f2f2', 'height': '50vh'}, outline=True),
# ------------------------------------------- MSA --------------------------------------------------
dbc.Spinner(children=[dbc.Card(id="msa", children=[], style={
'background': '#f2f2f2', 'height': '49vh', 'overflow-y': 'scroll'}, outline=True)],
color="primary", spinner_style={'position': 'absolute',
'top': '50%',
'left': '50%'
}),
],
width=5,
style={"padding-right": '0px',
"padding-left": '0px'}
)
], style={'padding-top': '0px', 'padding-bottom': '0px', 'margin-top': '0px', 'margin-bottom': '0px',
'margin-left': '0px', 'padding-left': '0px'},
className="mw-100 mh-100"
),
],
className="mw-100 mh-100"),
], className="mw-100 mh-100", style={'left': '0px', 'margin-left': '0px', 'padding': '0px'})
# ------------------------------------ Store Callback ------------------------------------------------------------------
@app.callback(
[dash.dependencies.Output('memory', 'data')],
[dash.dependencies.Input('file1', 'value'),
dash.dependencies.Input('file2', 'value'),
dash.dependencies.Input('file3', 'value'),
dash.dependencies.Input('file4', 'value'),
dash.dependencies.Input('k', 'value'),
dash.dependencies.Input('peak', 'value'),
dash.dependencies.Input('top', 'value'),
dash.dependencies.Input('Feature', 'value'),
dash.dependencies.Input('opt_btn_apply', 'n_clicks'),
dash.dependencies.State('sec_peak', 'value'),
dash.dependencies.State('EE', 'value'),
dash.dependencies.State('SS', 'value'),
dash.dependencies.State('II', 'value'),
dash.dependencies.State('MM', 'value'),
dash.dependencies.State('BB', 'value'),
dash.dependencies.State('SI', 'value'),
dash.dependencies.State('IS', 'value'),
dash.dependencies.State('SM', 'value'),
dash.dependencies.State('MS', 'value'),
dash.dependencies.State('ES', 'value'),
dash.dependencies.State('SE', 'value'),
dash.dependencies.State('HH', 'value'),
dash.dependencies.State('HS', 'value'),
dash.dependencies.State('SH', 'value'),
dash.dependencies.State('SB', 'value'),
dash.dependencies.State('BS', 'value'),
dash.dependencies.State('db', 'value'),
dash.dependencies.State('memory', 'data')]
)
# calculates new data for tables/diagrams
# k: k-mer length
# peak: peak: peak-position, where sequences should be aligned
# top: number of best values
# pca_feature: number of T or k-mer-Frequency for PCAs
# apply_options_btn: n_clicks of apply-button within modal
# sec_peak: peak status (-1: no data, 0: False, 1: True) for structural data
# parameters (e.g. ee,ss,etc. ...): custom rates of 2-mer
# norm_option: normalization option (none, for A.thaliana, custom)
# data: storage to share data between callbacks
def updateData(f1, f2, f3, f4, k, peak, top, pca_feature, apply_options_btn, sec_peak,
ee, ss, ii, mm, bb, si, i_s, sm, ms, es, se, hh, hs, sh, sb, bs, norm_option, data):
| |
<reponame>dmopalmer/pds-tools
################################################################################
# julian.py - The Julian Library
#
# This is a set of routines for handing date and time conversions. It handles
# these time systems:
# UTC = Universal Coordinates Time, similar to Grenwich Mean Time, expressed
# by integer days since January 1, 2000 plus floating-point seconds
# since beginning of day. UTC can also be represented in various
# standard formats for a calendar date plus an optional time.
# TAI = International Atomic Time, which is number of actual elapsed seconds
# since December 31, 1999 at 23:59:28. This running tally accounts for
# all leap seconds.
# TDB = Terrestrial Barycentric Time, which is the number of elapsed seconds
# since noon (not midnight!) on January 1, 2000, and adjusted for
# relativistic effects that cause a clock on the Earth to vary in speed
# relative to one at the solar system barycenter. This quantity is
# equivalent to "ephemeris time" in the SPICE time system, although
# differences at the level of milliseconds can occur.
# TDT = Terrestrial Dynamical Time, which is the preferred time system for
# Earth-centered orbits. This is also defined in a manner consistent
# with that in the SPICE toolkit.
# JD = Julian Date as a number of elapsed days since noon (not midnight!) on
# Monday, January 1, 4713 BCE. Each period from one noon to the next
# counts as one day, regardless of whether that day contains leap
# seconds. As a result, some days are longer than others. (Note that
# leap seconds always occur at midnight, and therefore at the middle of
# a Julian day.)
# MJD = Modified Julian Date, defined to be JD minus 2400000.5.
# JED = Julian Ephmeris Date, defined to be TDB/86400 + 2451545. It is
# compatible with SPICE ephemeris time but in units of days rather than
# seconds.
# MJED = Modified Julian Ephmeris Date, defined as JED minus 2400000.5.
#
# Throughout the library, TAI is the intermediary time relative to which all
# others are defined. Note: The term "TAI" is also used infrequently in the
# SPICE Toolkit, but the SPICE value is smaller by exactly 43200 seconds. All
# other terms used here are essentially identical in meaning to their SPICE
# Toolkit equivalents.
#
# If the environment variable SPICE_LSK_FILEPATH is defined, then this SPICE
# leapseconds kernel is read at startup. Otherwise, leap seconds through 2020
# are always included, as defined in NAIF00012.TLS.
#
# The library also handles calendar conversions and both parses and formats
# strings that express time in UTC.
#
# This library duplicates much of the functionality of python's built-in
# datetime library, but is separate from them because the datetime library
# cannot handle leap seconds.
#
# This library duplicates some of the SPICE toolkit, but has the advantage of
# supporting array-based time operations, which can be much faster when
# processing large amounts of data. It is also pure Python, and so does not
# need to be linked with C or FORTRAN libraries.
#
# Aside from the I/O routines, every argument to every function can be either
# a scalar or something array-like, i.e, a NumPy array, a tuple or a list.
# Arguments other than scalars are converted to NumPy arrays, the arrays are
# broadcasted to the same shape if necessary, and the complete array(s) of
# results are returned.
#
# The Julian Library is compatible with both Python 2 and Python 3.
#
# <NAME>
# PDS Ring-Moon Systems Node, SETI Institute
# This software is licensed under Academic Free License ("AFL") v. 3.0.
# See https://opensource.org/licenses/afl-3.0.php
################################################################################
from __future__ import print_function, division
import os
import datetime as dt
import numpy as np
import pyparsing
import unittest
import numbers
from . import textkernel as tk
from . import julian_dateparser as jdp
def _INT(arg):
"""Convert to int, works for for scalar, array, or array-like."""
if isinstance(arg, np.ndarray):
return arg.astype('int')
elif np.shape(arg):
return np.array(arg).astype('int')
else:
return int(arg)
def _FLOAT(arg):
"""Convert to floating-point, works for scalar, array, or array-like."""
if np.shape(arg):
return np.asfarray(arg)
else:
return float(arg)
def _ZEROS(arg):
"""Array of floating-point zeros matching the shape of arg."""
if np.shape(arg):
return np.zeros(np.shape(arg))
else:
return 0.
################################################################################
# Initialization
#
# At load time, this file looks for an environment variable SPICE_LSK_FILEPATH.
# If found, this file is used to initialize the module. Otherwise, the text
# defined internally as SPICE_LSK_TEXT is used.
################################################################################
# Define the text from the latest LSK file, NAIF00012.TLS
SPICE_LSK_DICT = {
"DELTA_T_A": 32.184,
"K": 1.657e-3,
"EB": 1.671e-2,
"M": (6.239996, 1.99096871e-7),
"DELTA_AT": (10, dt.date(1972,1,1),
11, dt.date(1972,7,1),
12, dt.date(1973,1,1),
13, dt.date(1974,1,1),
14, dt.date(1975,1,1),
15, dt.date(1976,1,1),
16, dt.date(1977,1,1),
17, dt.date(1978,1,1),
18, dt.date(1979,1,1),
19, dt.date(1980,1,1),
20, dt.date(1981,7,1),
21, dt.date(1982,7,1),
22, dt.date(1983,7,1),
23, dt.date(1985,7,1),
24, dt.date(1988,1,1),
25, dt.date(1990,1,1),
26, dt.date(1991,1,1),
27, dt.date(1992,7,1),
28, dt.date(1993,7,1),
29, dt.date(1994,7,1),
30, dt.date(1996,1,1),
31, dt.date(1997,7,1),
32, dt.date(1999,1,1),
33, dt.date(2006,1,1),
34, dt.date(2009,1,1),
35, dt.date(2012,7,1),
36, dt.date(2015,7,1),
37, dt.date(2017,1,1))}
# Define the static variables needed for TAI-ET conversions
global DELTET_T_A, DELTET_K, DELTET_EB, DELTET_M0, DELTET_M1
DELTET_T_A = 0. # indicates un-initialized data
DELTET_K = 0.
DELTET_EB = 0.
DELTET_M0 = 0.
DELTET_M1 = 0.
# Define the static variables needed for UTC-TAI conversions
global LS_YEAR0, LS_YEARS, LS_ARRAY1D, LS_ARRAY2D
LS_YEAR0 = 0 # indicates un-initialized data
LS_YEARS = 0
LS_ARRAY1D = None
LS_ARRAY2D = None
def load_from_dict(spicedict):
"""Loads the SPICE LSK parameters from the given dictionary. The dictionary
is that returned by textkernel.from_file()["DELTET"].
"""
global DELTET_T_A, DELTET_K, DELTET_EB, DELTET_M0, DELTET_M1
global LS_YEAR0, LS_YEARS, LS_ARRAY1D, LS_ARRAY2D
# Look up the needed variables and save them as globals
DELTET_T_A = spicedict["DELTA_T_A"]
DELTET_K = spicedict["K"]
DELTET_EB = spicedict["EB"]
(DELTET_M0, DELTET_M1) = spicedict["M"]
# Construct a static array of (TAI minus UTC), the number of elapsed leap
# seconds, and save them indexed by [year,halfyear]...
# Get the list of leapseconds from the kernel
delta_at = spicedict["DELTA_AT"]
LS_YEAR0 = delta_at[1].year - 1 # subtract one so the first tabulated
# year has zero leapseconds.
LS_YEARS = delta_at[-1].year - LS_YEAR0 + 1
# add one so years indexed is inclusive
# Construct an array indexed by halfyear
LS_ARRAY1D = np.zeros(2*LS_YEARS, dtype="int")
for i in range(0, len(delta_at), 2):
date = delta_at[i+1]
indx = 2 * (date.year - LS_YEAR0) + (date.month - 1)//6
LS_ARRAY1D[indx:] = delta_at[i]
# Convert to an array indexed by [year,halfyear]
# These arrays share the same data
LS_ARRAY2D = LS_ARRAY1D.reshape((LS_YEARS,2))
def load_from_kernel(filespec):
"""Loads the SPICE LSK parameters from the given text kernel."""
# Load the kernel as a dictionary
load_from_dict(tk.from_file(filespec)["DELTET"])
# INITIALIZE PARAMETERS...
load_from_dict(SPICE_LSK_DICT)
try:
filespec = os.environ["SPICE_LSK_FILEPATH"]
load_from_kernel(filespec)
except KeyError:
pass
########################################
# UNIT TESTS
########################################
class Test_Initialize(unittest.TestCase):
def runTest(self):
self.assertEqual(DELTET_T_A, 32.184,
'DELTET_T_A value is wrong')
self.assertEqual(DELTET_M0, 6.239996,
'DELTET_M0 value is wrong')
self.assertEqual(LS_YEAR0, 1971,
'Leapseconds list does not start in 1971')
self.assertEqual(LS_ARRAY2D[0,0], 0,
'LS_ARRAY2D does not start at zero')
self.assertEqual(LS_ARRAY2D[1999 - LS_YEAR0,0], 32,
'LS_ARRAY2D returns the wrong value for early 1999')
self.assertEqual(LS_ARRAY2D[1998 - LS_YEAR0,1], 31,
'LS_ARRAY2D returns the wrong value for late 1998')
################################################################################
# Calendar conversions
# Algorithms from http://alcor.concordia.ca/~gpkatch/gdate-algorithm.html
#
# day = number of days elapsed since January 1, 2000
# month = number of months elapsed since January 2000
# (y,m,d) = year, month (1-12), day (1-31)
# (y,d) = year and day-of-year (1-366)
# (y,m) = year and month number (1-12)
#
# All function operate on either scalars or arrays. If given scalars, they
# return scalars; if given anything array-like, they return arrays.
################################################################################
def day_from_ymd(y, m, d):
"""Day number from year, month and day. All must be integers. Supports
scalar or array arguments."""
y = _INT(y)
m = _INT(m)
d = _INT(d)
m = (m + 9) % 12
y = y - m//10
return 365*y + y//4 - y//100 + y//400 + (m*306 + 5)//10 + d - 730426
########################################
def ymd_from_day(day):
"""Year, month and day from day number. Inputs must be integers."""
day = _INT(day)
# Execute the magic algorithm
g = day + 730425
y = (10000*g + 14780)//3652425
ddd = g - (365*y + y//4 - y//100 + y//400)
# Use scalar version of test...
if np.shape(day):
y[ddd < 0] -= 1
elif ddd < 0:
y -= 1
ddd = g - (365*y + y//4 - y//100 + y//400)
mi = (100*ddd + 52)//3060
mm = (mi + 2)%12 + 1
y = y + (mi + 2)//12
dd = ddd - (mi*306 + 5)//10 + 1
return (y, mm, dd)
########################################
def yd_from_day(day):
"""Year and day-of-year from day number."""
| |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines endpoints for the landing page.
TODO(shifucun): once this is well tested, can deprecate corresponding code
in chart.py and place.py
"""
import collections
import copy
import json
import logging
import urllib
from flask import Blueprint, current_app, Response, url_for, g
from flask_babel import gettext
from collections import defaultdict
from cache import cache
import services.datacommons as dc_service
import routes.api.place as place_api
import lib.range as lib_range
# Define blueprint
bp = Blueprint("api.landing_page", __name__, url_prefix='/api/landingpage')
BAR_CHART_TYPES = ['parent', 'similar', 'nearby', 'child']
MAX_DENOMINATOR_BACK_YEAR = 3
MIN_CHART_TO_KEEP_TOPICS = 30
OVERVIEW = 'Overview'
def get_landing_page_data(dcid, new_stat_vars):
response = dc_service.fetch_data('/landing-page', {
'place': dcid,
'newStatVars': new_stat_vars,
},
compress=False,
post=True,
has_payload=False)
return response
def build_url(dcids, statvar_to_denom, is_scaled=False):
anchor = '&place=' + ','.join(dcids)
parts = []
for statvar, denom in statvar_to_denom.items():
part = statvar
if denom:
part += '|' + denom
parts.append(part)
anchor += ('&statsVar=' + '__'.join(parts))
if is_scaled:
anchor = anchor + '&pc'
return urllib.parse.unquote(url_for('tools.timeline', _anchor=anchor))
def fill_translation(chart):
chart['title'] = gettext(chart['titleId'])
del chart['titleId']
if 'description' in chart:
del chart['description']
return chart
# TODO: add test for chart_config for assumption that each combination of stat vars will only have one config in chart_config.
def build_spec(chart_config):
"""Builds hierachical spec based on chart config."""
spec = defaultdict(lambda: defaultdict(list))
# Map: category -> topic -> [config]
for conf in chart_config:
config = copy.deepcopy(conf)
config = fill_translation(config)
if 'relatedChart' in config and config['relatedChart']['scale']:
config['relatedChart'] = fill_translation(config['relatedChart'])
is_overview = ('isOverview' in config and config['isOverview'])
category = config['category']
if 'isOverview' in config:
del config['isOverview']
del config['category']
if is_overview:
spec[OVERVIEW][category].append(copy.deepcopy(config))
spec[category][config['title']].append(config)
return spec
def get_denom(cc, related_chart=False):
"""Get the numerator and denominator map."""
# If chart requires denominator, use it for both primary and related charts.
if 'denominator' in cc:
result = {}
if len(cc['denominator']) != len(cc['statsVars']):
raise ValueError('Denominator number not matching: %s', cc)
for num, denom in zip(cc['statsVars'], cc['denominator']):
result[num] = denom
return result
# For related chart, use the denominator that is specified in the
# 'relatedChart' field if present.
if related_chart and cc.get('relatedChart', {}).get('scale', False):
return cc['relatedChart'].get('denominator', 'Count_Person')
return None
def get_series(data, place, stat_vars):
"""Get time series from the landing page data.
Aggregate for all the stat vars and return empty series if any stat var data
is missing
Returns:
series and sources.
"""
all_series = []
sources = set()
num_sv = len(stat_vars)
for sv in stat_vars:
if 'data' not in data[place] or sv not in data[place]['data']:
return {}, []
series = data[place]['data'][sv]
all_series.append(series['val'])
sources.add(series['metadata']['provenanceUrl'])
# One series, no need to aggregate
if num_sv == 1:
return all_series[0], sources
merged_series = defaultdict(list)
for series in all_series:
for date, value in series.items():
merged_series[date].append(value)
# Aggregate
agg_series = {}
for date, values in merged_series.items():
if len(values) == num_sv:
agg_series[date] = sum(values)
return agg_series, sources
def get_stat_var_group(cc, data, places):
"""Get the stat var grouping for aggregation."""
if 'aggregate' in cc:
agg_type = lib_range.get_aggregate_config(cc['aggregate'])
place_stat_vars = defaultdict(list)
for place in places:
if place not in data or 'data' not in data[place]:
continue
for sv in cc['statsVars']:
if sv in data[place]['data']:
place_stat_vars[place].append(sv)
result = lib_range.aggregate_stat_var(place_stat_vars, agg_type)
for place in places:
if place not in result:
result[place] = {}
else:
result = {}
for place in places:
result[place] = {sv: [sv] for sv in cc['statsVars']}
return result
def get_snapshot_across_places(cc, data, places):
"""Get the snapshot used for bar data across a few places.
This will scale the value if required and pick the latest date that has the
most <place, stat_var> entries.
"""
if not places:
return {}, {}
# date_to_data is a dictionary from date to place and a tuple of
# (stat_var, value) pair.
# Example:
# {
# "2018": {
# "geoId/06":[("Count_Person", 200), ("Count_Person_Female", 100)],
# "geoId/08":[("Count_Person", 300), ("Count_Person_Female", 150)],
# },
# "2017": {
# "geoId/06":[("Count_Person", 300), ("Count_Person_Female", 150)],
# "geoId/08":[("Count_Person", 400), ("Count_Person_Female", 200)],
# },
# }
date_to_data = collections.defaultdict(
lambda: collections.defaultdict(list))
# TODO(shifucun/beets): add a unittest to ensure denominator is set
# explicitly when scale==True
num_denom = get_denom(cc, related_chart=True)
sources = set()
place_stat_var_group = get_stat_var_group(cc, data, places)
statvar_to_denom = {}
for place in places:
if place not in data:
continue
stat_var_group = place_stat_var_group[place]
for num_sv, sv_list in stat_var_group.items():
num_series, num_sources = get_series(data, place, sv_list)
if not num_series:
continue
sources.update(num_sources)
if num_denom:
if isinstance(num_denom, dict):
denom_sv = num_denom[num_sv]
else:
denom_sv = num_denom
statvar_to_denom[num_sv] = denom_sv
denom_series, denom_sources = get_series(
data, place, [denom_sv])
if not denom_series:
continue
sources.update(denom_sources)
result_series = scale_series(num_series, denom_series)
else:
result_series = num_series
statvar_to_denom[num_sv] = None
# Turn the value to be keyed by date.
for date, value in result_series.items():
date_to_data[date][place].append((num_sv, value))
# Pick a date that has the most series across places.
dates = sorted(date_to_data.keys(), reverse=True)
if not dates:
return {}, {}
count = 0
chosen_date = None
for date in dates:
if len(date_to_data[date]) > count:
count = len(date_to_data[date])
chosen_date = date
result = {'date': chosen_date, 'data': [], 'sources': list(sources)}
for place in places:
points = {}
for stat_var, value in date_to_data[chosen_date][place]:
points[stat_var] = value
if points:
result['data'].append({'dcid': place, 'data': points})
return result, statvar_to_denom
# TODO(shifucun): Add unittest for these helper functions
def get_bar(cc, data, places):
"""Get the bar data across a few places.
This will scale the value if required and pick the latest date that has the
most <place, stat_var> entries.
"""
result, statvar_denom = get_snapshot_across_places(cc, data, places)
if not result:
return {}
# Should have data other than the primary place. Return empty struct to
# so client won't draw chart.
if len(result['data']) <= 1:
return {}
is_scaled = (('relatedChart' in cc and
cc['relatedChart'].get('scale', False)) or
('denominator' in cc))
result['exploreUrl'] = build_url(places, statvar_denom, is_scaled)
return result
def get_trend(cc, data, place):
"""Get the time series data for a place."""
if place not in data:
return {}
result_series = {}
sources = set()
num_denom = get_denom(cc)
stat_var_group = get_stat_var_group(cc, data, [place])[place]
statvar_denom = {}
for num_sv, sv_list in stat_var_group.items():
num_series, num_sources = get_series(data, place, sv_list)
if not num_series:
continue
sources.update(num_sources)
if num_denom:
if isinstance(num_denom, dict):
denom_sv = num_denom[num_sv]
else:
denom_sv = num_denom
denom_sv = num_denom[num_sv]
statvar_denom[num_sv] = denom_sv
denom_series, denom_sources = get_series(data, place, [denom_sv])
if not denom_series:
continue
sources.update(denom_sources)
result_series[num_sv] = scale_series(num_series, denom_series)
else:
result_series[num_sv] = num_series
statvar_denom[num_sv] = None
# filter out time series with single data point.
for sv in list(result_series.keys()):
if len(result_series[sv]) <= 1:
del result_series[sv]
if not result_series:
return {}
is_scaled = ('denominator' in cc)
return {
'series': result_series,
'sources': list(sources),
'exploreUrl': build_url([place], statvar_denom, is_scaled)
}
def get_year(date):
try:
return int(date.split('-')[0])
except IndexError:
raise ValueError('no valid date format found %s', date)
# TODO(shifucun): Add unittest.
def scale_series(numerator, denominator):
"""Scale two time series.
The date of the two time series may not be exactly aligned. Here we use
year alignment to match two date. If no denominator is found for a
numerator, then the data is removed.
"""
data = {}
for date, value in numerator.items():
if date in denominator:
if denominator[date] > 0:
data[date] = value / denominator[date]
else:
data[date] = 0
else:
try:
numerator_year = get_year(date)
for i in range(0, MAX_DENOMINATOR_BACK_YEAR + 1):
year = str(numerator_year - i)
if year in denominator:
if denominator[year] > 0:
data[date] = value / denominator[year]
else:
data[date] = 0
break
except ValueError:
return {}
return data
def get_i18n_all_child_places(raw_page_data):
all_child_places = raw_page_data.get('allChildPlaces', {})
all_dcids = []
for place_type in list(all_child_places.keys()):
for place in all_child_places[place_type]['places']:
all_dcids.append(place.get('dcid', ''))
i18n_names = place_api.get_i18n_name(all_dcids,
False) # Don't resolve en-only names
for place_type in list(all_child_places.keys()):
for place in all_child_places[place_type]['places']:
dcid = place.get('dcid')
i18n_name = i18n_names.get(dcid, '')
if i18n_name:
place['name'] = i18n_name
for place_type in list(all_child_places.keys()):
all_child_places[place_type] = all_child_places[place_type]['places']
return all_child_places
@bp.route('/data/<path:dcid>')
@cache.cached(timeout=3600 * 24, query_string=True) # Cache for one day.
def data(dcid):
"""
Get chart spec and stats data of the landing page for a given place.
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.