blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4fc5cd909ace6525d66a306e4a947a7acf2d7448 | 8f2f54e3337c13252fd466c3ceab7d8d7781a4ea | /basal/base_model.py | 9ca606036c8a62ae8df2d434169ccdab73ef6a7d | [] | no_license | dansand/crustyconvection | c4eded1598920b95c5a866f11a8683d7f88bbf7a | 6506bb525a50671e7d8c958d145d834aa8358b2d | refs/heads/master | 2021-01-15T08:05:07.182439 | 2019-02-21T03:52:26 | 2019-02-21T03:52:26 | 49,477,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45,027 | py |
# coding: utf-8
# Crameri-Tackley model
# =======
#
# From Cramer and Tackley 2015
# --------
#
#
#
#
#
# References
# ====
#
#
#
# Load python functions needed for underworld. Some additional python functions from os, math and numpy used later on.
# In[19]:
import networkx as nx
import underworld as uw
import math
from underworld import function as fn
import glucifer
#import matplotlib.pyplot as pyplot
import time
import numpy as np
import os
import sys
import natsort
import shutil
from easydict import EasyDict as edict
import collections
import slippy2 as sp
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# In[20]:
#Display working directory info if in nb mode
if (len(sys.argv) > 1):
if (sys.argv[1] == '-f'):
get_ipython().system(u'pwd && ls')
# In[21]:
############
#Model name.
############
Model = "T"
ModNum = 12
if len(sys.argv) == 1:
ModIt = "Base"
elif sys.argv[1] == '-f':
ModIt = "Base"
else:
ModIt = str(sys.argv[1])
# Set physical constants and parameters, including the Rayleigh number (*RA*).
# In[22]:
###########
#Standard output directory setup
###########
outputPath = "results" + "/" + str(Model) + "/" + str(ModNum) + "/" + str(ModIt) + "/"
imagePath = outputPath + 'images/'
filePath = outputPath + 'files/'
checkpointPath = outputPath + 'checkpoint/'
dbPath = outputPath + 'gldbs/'
outputFile = 'results_model' + Model + '_' + str(ModNum) + '_' + str(ModIt) + '.dat'
if uw.rank()==0:
# make directories if they don't exist
if not os.path.isdir(outputPath):
os.makedirs(outputPath)
if not os.path.isdir(checkpointPath):
os.makedirs(checkpointPath)
if not os.path.isdir(imagePath):
os.makedirs(imagePath)
if not os.path.isdir(dbPath):
os.makedirs(dbPath)
if not os.path.isdir(filePath):
os.makedirs(filePath)
comm.Barrier() #Barrier here so not procs run the check in the next cell too early
# In[23]:
###########
#Check if starting from checkpoint
###########
checkdirs = []
for dirpath, dirnames, files in os.walk(checkpointPath):
if files:
print dirpath, 'has files'
checkpointLoad = True
checkdirs.append(dirpath)
if not files:
print dirpath, 'is empty'
checkpointLoad = False
# In[199]:
###########
#Physical parameters
###########
#The Slippy rheology class will contain dimensional and nondimensional values, linked in a self-consistent way by scaling paramters
#lowermantle.nondimensional['cohesion']
#Where lowermantle is a material class (generated within a rheology class); and non dimensional is a dictionary
#UW naming conventions:
#module_name, package_name, ClassName, function_name, method_name,
#ExceptionName, propertyName GLOBAL_CONSTANT_NAME, globalVarName, instanceVarName, functionParameterName, localVarName
###########
#dimensional parameter dictionary
dp = edict({'LS':2890.*1e3,
'rho':3300,
'g':9.81,
'eta0':1e23,
'k':10**-6,
'a':1.25*10**-5,
'TS':273.,
'TB':2773.,
'deltaT':2500,
'cohesion':1e7,
'E':240000.,
'R':8.314,
'V':6.34*(10**-7),
'StALS': 54*1e3})
#non-dimensional parameter dictionary
#One draw back of a dictionary structure, is that variables cannot link to other variables
RAfac = 16.
Stressfac =(0.3*RAfac**(2/3.))
ndp = edict({'RA':1e6*RAfac,
'LS':1.,
'eta0':1.,
'StAeta0':0.01,
'k':1.,
'E':11.55,
'V':3.0,
'H':20.,
'TR':(1600./2500.),
'TS':(dp.TS/2500.),
'RD':1.,
'cohesion':1577.*RAfac,
'cohesion_reduce':10.,
'fc':0.1,
'low_visc':RAfac*1e-4,
'up_visc':1e5,
'random_temp': 0.05})
#A few parameters defining lengths scales, affects materal transistions etc.
MANTLETOCRUST = (27.*1e3)/dp.LS #Crust depth
CRUSTTOMANTLE = (300.*1e3)/dp.LS
LITHTOMANTLE = (660.*1e3)/dp.LS
MANTLETOLITH = (200.*1e3)/dp.LS
TOPOHEIGHT = (0.*1e3)/dp.LS #rock-air topography limits
AVGTEMP = 0.53 #Used to define lithosphere
#Compositional Rayliegh number of rock-air
ETAREF = dp.rho*dp.g*dp.a*dp.deltaT*((dp.LS)**3)/(ndp.RA*dp.k) #equivalent dimensional reference viscosity
#RC = (3300.*dp.g*(dp.LS)**3)/(ETAREF *dp.k) #Composisitional Rayleigh number for rock-air buoyancy force
RC = (1650.*dp.g*(dp.LS)**3)/(ETAREF *dp.k) #Composisitional Rayleigh number for rock-air buoyancy force
COMP_RA_FACT = RC/ndp.RA
#Additional dimensionless paramters
ndp["StA"] = ndp.RA*COMP_RA_FACT
#######################To be replaced soon
#Physical parameters that can be defined with STDIN,
#The == '-f': check is a a hack check to see cover the notebook case
if len(sys.argv) == 1:
ndp.cohesion = ndp.cohesion
elif sys.argv[1] == '-f':
ndp.cohesion = ndp.cohesion
else:
ndp.cohesion = float(sys.argv[1])*newvisc
# In[201]:
# In[25]:
###########
#Model setup parameters
###########
stickyAir = True
MINX = -1.
MINY = 0.
MAXX = 1.0
#MAXY = 1.035
MAXY = 1.
if MINX == 0.:
squareModel = True
else:
squareModel = False
dim = 2 # number of spatial dimensions
#MESH STUFF
RES = 64
if MINX == 0.:
Xres = RES
else:
Xres = 2*RES
if stickyAir:
Yres = RES
MAXY = 1. + dp.StALS/dp.LS #150km
else:
Yres = RES
MAXY = 1.
periodic = [True, False]
elementType = "Q1/dQ0"
#elementType ="Q2/DPC1"
refineMesh = True
#System/Solver stuff
PIC_integration=False
# In[26]:
###########
#Model Runtime parameters
###########
swarm_update = 10
swarm_repop = 10
files_output = 1e6
gldbs_output = 5
images_output = 1e6
checkpoint_every = 1e6
metric_output = 1e6
sticky_air_temp = 10
comm.Barrier() #Barrier here so not procs run the check in the next cell too early
#assert metric_output <= checkpoint_every, 'Checkpointing should run less or as ofen as metric output'
assert (metric_output >= swarm_update), 'Swarm update is needed before checkpointing'
assert metric_output >= sticky_air_temp, 'Sticky air temp should be updated more frequently that metrics'
# In[27]:
###########
#Model output parameters
###########
#Do you want to write hdf5 files - Temp, RMS, viscosity, stress?
writeFiles = True
loadTemp = True
# In[28]:
mesh = uw.mesh.FeMesh_Cartesian( elementType = ("Q1/dQ0"),
elementRes = (Xres, Yres),
minCoord = (MINX,MINY),
maxCoord=(MAXX,MAXY), periodic=periodic)
velocityField = uw.mesh.MeshVariable( mesh=mesh, nodeDofCount=dim )
pressureField = uw.mesh.MeshVariable( mesh=mesh.subMesh, nodeDofCount=1 )
temperatureField = uw.mesh.MeshVariable( mesh=mesh, nodeDofCount=1 )
temperatureDotField = uw.mesh.MeshVariable( mesh=mesh, nodeDofCount=1 )
# In[29]:
print("mesh size", mesh.data.shape, mesh.elementRes)
# In[30]:
Xres, Yres, MINX,MAXX,MINY,MAXY, periodic, elementType, dim
# ##Refine mesh
# In[31]:
#X-Axis
if refineMesh:
mesh.reset()
axis = 0
origcoords = np.linspace(mesh.minCoord[axis], mesh.maxCoord[axis], mesh.elementRes[axis] + 1)
edge_rest_lengths = np.diff(origcoords)
deform_lengths = edge_rest_lengths.copy()
min_point = (abs(mesh.maxCoord[axis]) - abs(mesh.minCoord[axis]))/2.
el_reduction = 0.5001
dx = mesh.maxCoord[axis] - min_point
deform_lengths = deform_lengths - ((1.-el_reduction) *deform_lengths[0]) + abs((origcoords[1:] - min_point))*((0.5*deform_lengths[0])/dx)
#print(edge_rest_lengths.shape, deform_lengths.shape)
sp.deform_1d(deform_lengths, mesh,axis = 'x',norm = 'Min', constraints = [])
# In[32]:
axis = 1
orgs = np.linspace(mesh.minCoord[axis], mesh.maxCoord[axis], mesh.elementRes[axis] + 1)
value_to_constrain = 1.
yconst = [(sp.find_closest(orgs, value_to_constrain), np.array([value_to_constrain,0]))]
# In[33]:
#Y-Axis
if refineMesh:
#Y-Axis
axis = 1
origcoords = np.linspace(mesh.minCoord[axis], mesh.maxCoord[axis], mesh.elementRes[axis] + 1)
edge_rest_lengths = np.diff(origcoords)
deform_lengths = edge_rest_lengths.copy()
min_point = (mesh.maxCoord[axis])
el_reduction = 0.5
dx = mesh.maxCoord[axis]
deform_lengths = deform_lengths - ((1.-el_reduction)*deform_lengths[0]) + abs((origcoords[1:] - min_point))*((0.5*deform_lengths[0])/dx)
#print(edge_rest_lengths.shape, deform_lengths.shape)
sp.deform_1d(deform_lengths, mesh,axis = 'y',norm = 'Min', constraints = [])
# In[34]:
figMesh = glucifer.Figure(figsize=(1200,600),antialias=1)
#figMesh.append( glucifer.objects.Mesh(mesh.subMesh, nodeNumbers=True) )
figMesh.append( glucifer.objects.Mesh(mesh) )
figMesh.show()
# # ICs and BCs
# In[185]:
# Get the actual sets
#
# HJJJJJJH
# I I
# I I
# I I
# HJJJJJJH
#
# Note that H = I & J
# Note that we use operator overloading to combine sets
# send boundary condition information to underworld
IWalls = mesh.specialSets["MinI_VertexSet"] + mesh.specialSets["MaxI_VertexSet"]
JWalls = mesh.specialSets["MinJ_VertexSet"] + mesh.specialSets["MaxJ_VertexSet"]
TWalls = mesh.specialSets["MaxJ_VertexSet"]
BWalls = mesh.specialSets["MinJ_VertexSet"]
AWalls = IWalls + JWalls
# In[186]:
# Initialise data.. Note that we are also setting boundary conditions here
velocityField.data[:] = [0.,0.]
pressureField.data[:] = 0.
temperatureField.data[:] = 0.
temperatureDotField.data[:] = 0.
def wbl(x, w0 = 0.06):
delx = 1- abs(x)
Wbl = w0*math.sqrt(delx)
if Wbl== 0.: #to avoid division by zero
Wbl = 1e-8
return Wbl
def tempf(z,w,t0=0.64):
temp = t0*math.erf((1-z)/w)
return temp
# In[187]:
age_asymmetry = 2.
for index, coord in enumerate(mesh.data):
if coord[0] > 0.:
w = wbl(coord[0])
t = tempf(coord[1], w)
temperatureField.data[index] = t
else:
w = wbl(coord[0])/age_asymmetry
t = tempf(coord[1], w)
temperatureField.data[index] = t
for index, coord in enumerate(mesh.data):
if abs(coord[0]) < wbl(0)/2. and coord[1] > 0.5:
w = wbl(0)/2.
d = w - abs(coord[0])
t = tempf(d, coord[1], w)
temperatureField.data[index] = t
#Set sticky air Temp to zero
for index, coord in enumerate(mesh.data):
if coord[1] > 1.:
temperatureField.data[index] = 0.
# In[188]:
#For notebook runs
#ModIt = "96"
# In[189]:
# Now setup the dirichlet boundary condition
# Note that through this object, we are flagging to the system
# that these nodes are to be considered as boundary conditions.
# Also note that we provide a tuple of sets.. One for the Vx, one for Vy.
freeslipBC = uw.conditions.DirichletCondition( variable = velocityField,
indexSetsPerDof = (TWalls, JWalls) )
# also set dirichlet for temp field
tempBC = uw.conditions.DirichletCondition( variable=temperatureField,
indexSetsPerDof=(JWalls,) )
# set initial conditions (and boundary values)
velocityField.data[:] = [0.,0.]
pressureField.data[:] = 0.
#
# ##Add Random 125 K temp perturbation
#
# In[190]:
# Setup temperature initial condition via numpy arrays
A = 0.05
#Note that width = height = 1
tempNump = temperatureField.data
for index, coord in enumerate(mesh.data):
pertCoeff = temperatureField.data[index] + A*(1. - coord[1])*(1 - math.cos( math.pi/2. * (coord[0] + 1.)))
temperatureField.data[index] = pertCoeff;
if coord[1] > 1:
tempNump[index] = 0.
# ##Reset bottom Dirichlet conds.
# In[191]:
# Set temp boundaries
# on the boundaries
for index in mesh.specialSets["MinJ_VertexSet"]:
temperatureField.data[index] = ndp.TR
for index in mesh.specialSets["MaxJ_VertexSet"]:
temperatureField.data[index] = ndp.TS
# In[192]:
#temperatureField.evaluate(IWalls).min()
# In[193]:
figVelocityMag = glucifer.Figure(figsize=(1024,384))
figVelocityMag.append( glucifer.objects.Surface(mesh, temperatureField) )
figVelocityMag.show()
# #Particles
# In[43]:
###########
#Material Swarm and variables
###########
gSwarm = uw.swarm.Swarm(mesh=mesh)
materialVariable = gSwarm.add_variable( dataType="char", count=1 )
rockIntVar = gSwarm.add_variable( dataType="double", count=1 )
airIntVar = gSwarm.add_variable( dataType="double", count=1 )
lithIntVar = gSwarm.add_variable( dataType="double", count=1 )
varlist = [materialVariable, rockIntVar, airIntVar, lithIntVar]
varnames = ['materialVariable', 'rockIntVar', 'airIntVar', 'lithIntVar']
# In[26]:
###########
#Swarms for surface intragrals when using Sticky air
###########
snum = 1000.
dx = (mesh.data[:,0].max()- mesh.data[:,0].min())/snum
yp = 0.9947 #1. - yelsize/2. at res = 96
mesh.data[:,0].max()
xps = np.linspace(mesh.data[:,0].min(),mesh.data[:,0].max(), snum)
yps = [yp for i in xps]
surfintswarm = uw.swarm.Swarm( mesh=mesh )
dumout = surfintswarm.add_particles_with_coordinates(np.array((xps,yps)).T)
yps = [ 1.- yp for i in xps]
baseintswarm = uw.swarm.Swarm( mesh=mesh)
dumout = baseintswarm.add_particles_with_coordinates(np.array((xps,yps)).T)
# #Initialise swarm variables, or Swarm checkpoint load
# In[27]:
mantleIndex = 0
lithosphereIndex = 1
crustIndex = 2
airIndex = 3
if checkpointLoad:
checkpointLoadDir = natsort.natsorted(checkdirs)[-1]
temperatureField.load(os.path.join(checkpointLoadDir, "temperatureField" + ".hdf5"))
pressureField.load(os.path.join(checkpointLoadDir, "pressureField" + ".hdf5"))
velocityField.load(os.path.join(checkpointLoadDir, "velocityField" + ".hdf5"))
gSwarm.load(os.path.join(checkpointLoadDir, "swarm" + ".h5"))
for ix in range(len(varlist)):
varb = varlist[ix]
varb.load(os.path.join(checkpointLoadDir,varnames[ix] + ".h5"))
else:
# Layouts are used to populate the swarm across the whole domain
# Create the layout object
#layout = uw.swarm.layouts.GlobalSpaceFillerLayout( swarm=gSwarm, particlesPerCell=20)
#layout = uw.swarm.layouts.PerCellRandomLayout(swarm=gSwarm, particlesPerCell=15)
layout = uw.swarm.layouts.PerCellRandomLayout(swarm=gSwarm, particlesPerCell=25)
# Now use it to populate.
gSwarm.populate_using_layout( layout=layout )
# Lets initialise the 'materialVariable' data to represent different materials
# Set the material to heavy everywhere via the numpy array
materialVariable.data[:] = mantleIndex
#Set initial air and crust materials (allow the graph to take care of lithsophere)
#########
#This initial material setup will be model dependent
#########
for particleID in range(gSwarm.particleCoordinates.data.shape[0]):
if (1. - gSwarm.particleCoordinates.data[particleID][1]) < 0:
materialVariable.data[particleID] = airIndex
elif (1. - gSwarm.particleCoordinates.data[particleID][1]) < MANTLETOCRUST:
materialVariable.data[particleID] = crustIndex
# #Material Graphs
# In[28]:
##############
#Important: This is a quick fix for a bug that arises in parallel runs
##############
material_list = [0,1,2,3]
# In[29]:
print( "unique values after swarm has loaded:" + str(np.unique(materialVariable.data[:])))
# In[30]:
#All depth conditions are given as (km/D) where D is the length scale,
#note that 'model depths' are used, e.g. 1-z, where z is the vertical Underworld coordinate
#All temp conditions are in dimensionless temp. [0. - 1.]
#######Graph object
DG = nx.DiGraph(field="Depth")
#######Nodes
#Note that the order of materials, deepest to shallowest is important
DG.add_node(0, mat='mantle')
DG.add_node(1, mat='lithosphere')
DG.add_node(2, mat='crust')
DG.add_node(3, mat='air')
labels=dict((n,d['mat']) for n,d in DG.nodes(data=True))
pos=nx.spring_layout(DG)
#######Edges
#anything to air
DG.add_edges_from([(0,3),(1,3), (2,3)])
DG[0][3]['depthcondition'] = -1*TOPOHEIGHT
DG[1][3]['depthcondition'] = -1*TOPOHEIGHT
DG[2][3]['depthcondition'] = -1*TOPOHEIGHT
#Anything to mantle
DG.add_edges_from([(2,0), (1,0)])
DG[2][0]['depthcondition'] = CRUSTTOMANTLE
DG[1][0]['depthcondition'] = LITHTOMANTLE #This means we're going to kill lithosphere at the 660.
#Anything to lithsphere
DG.add_edges_from([(0,1),(3,1)])
DG[0][1]['depthcondition'] = MANTLETOLITH
DG[0][1]['avgtempcondition'] = 0.75*AVGTEMP #definition of thermal lithosphere
#Anything to crust
DG.add_edges_from([(0,2), (1,2), (3,2)])
DG[0][2]['depthcondition'] = MANTLETOCRUST
DG[1][2]['depthcondition'] = MANTLETOCRUST
DG[3][2]['depthcondition'] = TOPOHEIGHT
# In[31]:
DG.nodes()
# In[32]:
remove_nodes = []
for node in DG.nodes():
if not node in material_list:
remove_nodes.append(node)
for rmnode in remove_nodes:
DG.remove_node(rmnode)
# In[33]:
DG.nodes()
# In[34]:
#remove_nodes = []
#for node in DG.nodes_iter():
# if not node in material_list:
# remove_nodes.append(node)
#for rmnode in remove_nodes:
# DG.remove_node(rmnode)
# In[35]:
#A Dictionary to map strings in the graph (e.g. 'depthcondition') to particle data arrays
particledepths = 1. - gSwarm.particleCoordinates.data[:,1]
particletemps = temperatureField.evaluate(gSwarm)[:,0]
conditionmap = {}
conditionmap['depthcondition'] = {}
conditionmap['depthcondition']['data'] = particledepths
conditionmap['avgtempcondition'] = {}
conditionmap['avgtempcondition']['data'] = particletemps
# In[36]:
def update_swarm(graph, particleIndex):
"""
This function takes the materials graph (networkx.DiGraph), and a particle index,
then determines if a material update is required
and if so, returns the new materialindex
Args:
graph (networkx.DiGraph): Directed multigraph representing the transformation of material types
particleIndex (int): the particle index as corressponding to the index in the swarm data arrays
Returns:
if update is required the function returns the the new material variable (int)
else returns None
Raises:
TypeError: not implemented
ValueError: not implemented
"""
##Egde gives links to other materials, we then query the conditions to see if we should change materials
matId = materialVariable.data[particleIndex][0]
innerchange = False
outerchange = False
for edge in graph[matId]:
if outerchange:
break
for cond in graph[matId][edge].keys():
outerchange = False
if innerchange: #found a complete transition, break inner loop
break
currentparticlevalue = conditionmap[cond]['data'][particleIndex]
crossover = graph[matId][edge][cond]
if ((matId > edge) and (currentparticlevalue > crossover)):
innerchange = False # continue on,
if graph[matId][edge].keys()[-1] == cond:
outerchange = True
innerchange = edge
break
elif ((matId < edge) and (currentparticlevalue < crossover)):
innerchange = False
if graph[matId][edge].keys()[-1] == cond:
outerchange = True
innerchange = edge
break
else:
#condition not met, break outer loop, go to next edge, outerchange should still be False
break
if type(innerchange) == int:
return innerchange
# In[37]:
#fn.branching.conditional?
# In[38]:
#Cleanse the swarm of its sins
#For some Material Graphs, the graph may have to be treaversed more than once
check = -1
number_updated = 1
while number_updated != 0:
number_updated = 0
for particleID in range(gSwarm.particleCoordinates.data.shape[0]):
check = update_swarm(DG, particleID)
if check > -1:
number_updated += 1
materialVariable.data[particleID] = check
# In[39]:
## Here we'll play around with some different crust-perturbations
##Voul inlude this is the Graph update function, but for now keep it seperate
#MANTLETOCRUST
centre = 0.0
#CRUSTTOMANTLE
square_shape = np.array([ (MANTLETOCRUST ,1. ), (-1.*MANTLETOCRUST ,1. ), ((-1.*MANTLETOCRUST) ,
(1.0 - CRUSTTOMANTLE/2.) ), ((MANTLETOCRUST),(1.0 - CRUSTTOMANTLE/2.))])
square_shape = fn.shape.Polygon( square_shape)
sub_zone1 = np.array([ ((2*MANTLETOCRUST + 0.5*MANTLETOLITH),1. ), ((-2.*MANTLETOCRUST + 0.5*MANTLETOLITH) ,1. ),
((-2.*MANTLETOCRUST - 0.5*MANTLETOLITH ) ,(1.0 - CRUSTTOMANTLE/2.) ), ((2*MANTLETOCRUST - 0.5*MANTLETOLITH ),(1.0 - CRUSTTOMANTLE/2. )) ])
shape1 = fn.shape.Polygon( sub_zone1)
if not checkpointLoad:
for particleID in range( gSwarm.particleCoordinates.data.shape[0] ):
if shape1.evaluate(tuple(gSwarm.particleCoordinates.data[particleID])):
# #print "true"
materialVariable.data[particleID] = crustIndex
#elif shape2.evaluate(tuple(gSwarm.particleCoordinates.data[particleID])):
# materialVariable.data[particleID] = crustIndex
# In[40]:
#shape.evaluate(
# In[41]:
figSwarm = glucifer.Figure(figsize=(1024,384))
figSwarm.append( glucifer.objects.Points(gSwarm,materialVariable, colours='brown white blue red'))
#figSwarm.append( glucifer.objects.Mesh(mesh))
figSwarm.append( glucifer.objects.Surface(mesh, temperatureField))
figSwarm.save_database('test.gldb')
figSwarm.show()
# ## Set the values for the masking swarms
# In[42]:
#Setup up a masking Swarm variable for the integrations.
#These should be rebuilt at same frequency as the metric calcualtions
rockIntVar.data[:] = 0.
notair = np.where(materialVariable.data != airIndex)
rockIntVar.data[notair] = 1.
airIntVar.data[:] = 0.
notrock = np.where(materialVariable.data == airIndex)
airIntVar.data[notrock] = 1.
lithIntVar.data[:] = 0.
islith = np.where((materialVariable.data == lithosphereIndex) | (materialVariable.data == crustIndex))
lithIntVar.data[islith] = 1.
# #Material properties
#
# In the paper, Crameri and Tackley give the dimensionless cohesion as well as the dimensionless yield stress gradient. But the latter is given as a function of dimensionless (lithostatic) pressure, whereas it is easier to use dimensionless depth. Easy, multiply the dimensionless depth by $\rho g D$, divide by the stress scale, $\frac{\eta \kappa}{D^2}$ then use the same dimensionless yeild stress gradient ($\mu$)
# In[43]:
# The yeilding of the upper slab is dependent on the strain rate.
strainRate_2ndInvariant = fn.tensor.second_invariant(
fn.tensor.symmetric(
velocityField.fn_gradient ))
coordinate = fn.input()
depth = 1. - coordinate[1]
#Determine yield criterion for depth (rather than pressure as given in Crameri)
#Scaling is same as van Heck and Tackley, EPSL, 2011
lithopressuregrad = dp.rho*dp.g*(dp.LS)**3/(dp.eta0*dp.k)
# In[44]:
#Check important paramters
print(ndp.E, ndp.V,ndp.TS,ndp.RD, ndp.TR, ndp.cohesion)
# In[45]:
#ndp.up_visc/ndp.low_visc
# In[46]:
############
#Mantle
############
#Linear viscosity
arhennius = fn.misc.min(ndp.up_visc,fn.math.exp(((ndp.E + ndp.V*(1.-coordinate[1]))/
(temperatureField + ndp.TS)) - ((ndp.E + ndp.V*(1.- ndp.RD))/(ndp.TR + ndp.TS))))
#Psuedo-plastic
ys = ndp.cohesion + (depth*ndp.fc*lithopressuregrad)
#ys = ndp.fc*lithopressuregrad*(30e3/dp.LS) # this is the byerlee strength at 30 km
yss = fn.misc.max(ndp.cohesion, ys)
plasticvisc = yss*(math.sqrt(2))/(strainRate_2ndInvariant*2.)
plastic = fn.misc.max(ndp.low_visc,plasticvisc)
#combine these
mantleviscosityFn = fn.exception.SafeMaths(fn.misc.min(arhennius, plastic))
############
#crust
############
ysc = (ndp.cohesion/ndp.cohesion_reduce) + (depth*(ndp.fc/100.)*lithopressuregrad)
#ysc = ys/100.
ycs = fn.misc.max((ndp.cohesion/ndp.cohesion_reduce), ysc)
crustplasticvisc = ycs*(math.sqrt(2))/(strainRate_2ndInvariant*2.)
crustplastic = fn.misc.max(ndp.low_visc,crustplasticvisc)
crustviscosityFn = fn.exception.SafeMaths(fn.misc.min(arhennius, crustplastic))
# Note that the rheology implemented manually above can also be set up using an Underworld2 function, as follows:
# ```python
# fn_stress = 2.*arhennius*uw.function.tensor.symmetric(velocityField.fn_gradient )
# plasticvisc = fn.rheology.stress_limiting_viscosity(fn_stress,ys,arhennius)
# plastic = fn.misc.max(1e-4,plasticvisc)
# mantleviscosityFn = fn.exception.SafeMaths(fn.misc.min(arhennius, plastic))
# stokesPIC.fn_viscosity = mantleviscosityFn
# ```
# Set up material properties
# ====
#
# Here the functions for density, viscosity etc. are set. These functions and/or values are preserved for the entire simulation time.
# In[47]:
# Here we set a viscosity value of '1.' for both materials
viscosityMapFn = fn.branching.map( fn_key = materialVariable,
mapping = {airIndex:ndp.StAeta0,
lithosphereIndex:mantleviscosityFn,
crustIndex:crustviscosityFn,
mantleIndex:mantleviscosityFn} )
densityMapFn = fn.branching.map( fn_key = materialVariable,
mapping = {airIndex:ndp.StA,
lithosphereIndex:ndp.RA*temperatureField,
crustIndex:ndp.RA*temperatureField,
mantleIndex:ndp.RA*temperatureField} )
# Define our gravity using a python tuple (this will be automatically converted to a function)
gravity = ( 0.0, 1.0 )
buoyancyFn = gravity*densityMapFn
# In[ ]:
# Build the Stokes system, solvers, advection-diffusion
# ------
#
# Setup linear Stokes system to get the initial velocity.
# In[48]:
stokesPIC = uw.systems.Stokes( velocityField = velocityField,
pressureField = pressureField,
#swarm = gSwarm,
conditions = [freeslipBC,],
fn_viscosity = arhennius,
fn_bodyforce = buoyancyFn,
swarm=gSwarm)
# In[49]:
#We do one solve with linear viscosity to get the initial strain rate invariant.
#This solve step also calculates a 'guess' of the the velocity field based on the linear system,
#which is used later in the non-linear solver.
solver = uw.systems.Solver(stokesPIC)
# If not doing a restart, do a solve on the non-plastic system
if not checkpointLoad:
solver.solve()
# In[50]:
solver.options
# In[51]:
print(solver.options.A11.list())
print(solver.options.scr.list())
print(solver.options.mg.list())
print(solver.options.mg_accel.list())
print(solver.options.main.list())
print(solver.options.rhsA11.list())
print(solver.options.backsolveA11.list())
# In[52]:
####################
#Add the non-linear viscosity to the Stokes system
stokesPIC.fn_viscosity = viscosityMapFn
###################
#Set more advanced solver option
solver.options.main.Q22_pc_type='gkgdiag'
#solver.options.A11.ksp_rtol=1e-2
#solver.options.scr.ksp_rtol=1e-3
#solver.options.A11.ksp_type="cg"
solver.options.scr.use_previous_guess = True
#solver.options.scr.ksp_set_min_it_converge = 1
#solver.options.main.penalty=10.0
#solver.options.mg.levels = 3
#solver.options.main.remove_constant_pressure_null_space=True
#solver.options.main.penalty = 1e2
solver.options.A11.ksp_rtol=1e-4
solver.options.scr.ksp_rtol=1e-4
solver.options.A11.ksp_monitor=''
solver.options.A11.ksp_converged_reason=''
# In[ ]:
# In[53]:
print "## Solver Config"
print solver.options.main.list()
print "### A11 Config"
print solver.options.A11.list()
# Solve non-linear system for pressure and velocity using Picard iteration
#
# In[54]:
solver.solve(nonLinearIterate=True)
# In[55]:
#Now check the stress.
fn_stress = 2.*mantleviscosityFn*uw.function.tensor.symmetric(velocityField.fn_gradient)
fn_minmax_inv = fn.view.min_max(fn.tensor.second_invariant(fn_stress))
ignore = fn_minmax_inv.evaluate(gSwarm)
# In[56]:
fn_minmax_inv.max_global()
# In[58]:
#np.isclose(fn_minmax_inv.max_global(), ys, rtol=1e-03)
# In[61]:
#figTemp = glucifer.Figure()
#figTemp.append( glucifer.objects.Surface(mesh, temperatureField))
#figTemp.append( glucifer.objects.VectorArrows(mesh,velocityField, arrowHead=0.2, scaling=0.0005))
#figTemp.show()
# Create an advective-diffusive system
# =====
#
# Setup the system in underworld by flagging the temperature and velocity field variables.
# In[62]:
#Create advdiff system
advDiff = uw.systems.AdvectionDiffusion( phiField = temperatureField,
phiDotField = temperatureDotField,
velocityField = velocityField,
fn_sourceTerm = 0.0,
fn_diffusivity = 1.0,
conditions = [tempBC, ] )
advector = uw.systems.SwarmAdvector( swarm = gSwarm,
velocityField = velocityField,
order = 1)
#Switch particle escape on, this will also trigger the inflow population control
gSwarm.particleEscape = True
# Metrics for benchmark
# =====
#
# Define functions to be used in the time loop. For cases 1-4, participants were asked to report a number of diagnostic quantities to be measured after reaching steady state:
#
# * Average temp... $$ \langle T \rangle = \int^1_0 \int^1_0 T \, dxdy $$
# * Top and bottom Nusselt numbers... $$N = \int^1_0 \frac{\partial T}{\partial y} \rvert_{y=0/1} \, dx$$
# * RMS velocity over the whole domain, surface and max velocity at surface
# * max and min viscosity over the whole domain
# * average rate of work done against gravity...$$\langle W \rangle = \int^1_0 \int^1_0 T u_y \, dx dy$$
# * and the average rate of viscous dissipation...$$\langle \Phi \rangle = \int^1_0 \int^1_0 \tau_{ij} \dot \epsilon_{ij} \, dx dy$$
#
# * In steady state, if thermal energy is accurately conserved, the difference between $\langle W \rangle$ and $\langle \Phi \rangle / Ra$ must vanish, so also reported is the percentage error:
#
# $$ \delta = \frac{\lvert \langle W \rangle - \frac{\langle \Phi \rangle}{Ra} \rvert}{max \left( \langle W \rangle, \frac{\langle \Phi \rangle}{Ra}\right)} \times 100% $$
# In[63]:
#Setup some Integrals. We want these outside the main loop...
tempVariable = gSwarm.add_variable( dataType="double", count=1 )
tempVariable.data[:] = temperatureField.evaluate(gSwarm)[:]
tempint = uw.utils.Integral((tempVariable*rockIntVar), mesh)
areaint = uw.utils.Integral((1.*rockIntVar),mesh)
v2int = uw.utils.Integral(fn.math.dot(velocityField,velocityField)*rockIntVar, mesh)
dwint = uw.utils.Integral(temperatureField*velocityField[1]*rockIntVar, mesh)
sinner = fn.math.dot(strainRate_2ndInvariant,strainRate_2ndInvariant)
vdint = uw.utils.Integral((4.*viscosityMapFn*sinner)*rockIntVar, mesh)
vdintair = uw.utils.Integral((4.*viscosityMapFn*sinner)*airIntVar, mesh)
vdintlith = uw.utils.Integral((4.*viscosityMapFn*sinner)*lithIntVar, mesh)
# In[64]:
#These should differ if the the map function assigns different properties to bulk mantle
print(uw.utils.Integral((4.*mantleviscosityFn*sinner)*rockIntVar, mesh).evaluate()[0])
print(uw.utils.Integral((4.*viscosityMapFn*sinner)*rockIntVar, mesh).evaluate()[0])
print(uw.utils.Integral((4.*mantleviscosityFn*sinner)*airIntVar, mesh).evaluate()[0])
print(uw.utils.Integral((4.*viscosityMapFn*sinner)*airIntVar, mesh).evaluate()[0])
print(uw.utils.Integral((4.*mantleviscosityFn*sinner)*lithIntVar, mesh).evaluate()[0])
print(uw.utils.Integral((4.*viscosityMapFn*sinner)*lithIntVar, mesh).evaluate()[0])
# In[65]:
def avg_temp():
return tempint.evaluate()[0]/areaint.evaluate()[0]
#This one gets cleaned up when Surface integrals are available
def nusselt(tempfield, swarm, dx):
#Update the swarm variable
tempgrad = tempfield.fn_gradient
valcheck = tempgrad[1].evaluate(swarm)
if valcheck is None:
vals = np.array(0, dtype='float64')
else:
vals = valcheck.sum()*dx
return vals
def rms():
return math.sqrt(v2int.evaluate()[0]/areaint.evaluate()[0])
#This one gets cleaned up when Surface integrals are available
def rms_surf(swarm, dx):
rmsmaxfn = fn.math.dot(velocityField,velocityField)
rmscheck = rmsmaxfn.evaluate(swarm)
if rmscheck is None:
#print "watch out"
rmsvals = np.array(0, dtype='float64')
else:
rmsvals = np.sqrt(rmscheck.sum()*dx)
#print "okay"
return rmsvals
def max_vx_surf(velfield, swarm):
check = velfield[0].evaluate(swarm)
if check is None:
return 0.
else:
return check.max()
#def max_vy_surf(velfield, swarm):
# surfvelxmaxfn = fn.view.min_max(velfield[1])
# surfvelxmaxfn.evaluate(swarm)
# return surfvelxmaxfn.max_global()
def gravwork(workfn):
return workfn.evaluate()[0]
def viscdis(vdissfn):
return vdissfn.evaluate()[0]
def visc_extr(viscfn):
vuviscfn = fn.view.min_max(viscfn)
vuviscfn.evaluate(gSwarm)
return vuviscfn.max_global(), vuviscfn.min_global()
# In[66]:
#Fields for saving data / fields
rmsField = uw.mesh.MeshVariable( mesh=mesh, nodeDofCount=1)
rmsfn = fn.math.sqrt(fn.math.dot(velocityField,velocityField))
rmsdata = rmsfn.evaluate(mesh)
rmsField.data[:] = rmsdata
viscField = uw.mesh.MeshVariable( mesh=mesh, nodeDofCount=1)
viscdata = mantleviscosityFn.evaluate(mesh)
viscField.data[:] = viscdata
strainrateField = uw.mesh.MeshVariable( mesh=mesh, nodeDofCount=1)
srtdata = fn.tensor.second_invariant(
fn.tensor.symmetric(
velocityField.fn_gradient ))
rostfield = srtdata.evaluate(mesh)
strainrateField.data[:] = rostfield
viscVariable = gSwarm.add_variable( dataType="float", count=1 )
viscVariable.data[:] = viscosityMapFn.evaluate(gSwarm)
# In[67]:
#Images
figEta = glucifer.Figure()
figEta.append( glucifer.objects.Points(gSwarm,viscVariable, logScale=True))
figMat = glucifer.Figure()
figMat.append( glucifer.objects.Points(gSwarm,materialVariable, colours='brown white blue red'))
figMat.append( glucifer.objects.Mesh(mesh))
figStrainRate = glucifer.Figure()
figStrainRate.append( glucifer.objects.Surface(mesh, strainRate_2ndInvariant, logScale=True))
figVelocityMag = glucifer.Figure()
figVelocityMag.append( glucifer.objects.Surface(mesh, fn.math.dot(velocityField,velocityField))
)
figTemp = glucifer.Figure()
figTemp.append( glucifer.objects.Surface(mesh, temperatureField))
#Pack some stuff into a database as well
figDb = glucifer.Figure()
figDb.append( glucifer.objects.Points(gSwarm,viscVariable, logScale=True, colours='brown white blue'))
figDb.append( glucifer.objects.Points(gSwarm,materialVariable, colours='brown white blue red'))
figDb.append( glucifer.objects.Mesh(mesh))
figDb.append( glucifer.objects.VectorArrows(mesh,velocityField, arrowHead=0.2, scaling=0.002))
figDb.append( glucifer.objects.Surface(mesh, strainRate_2ndInvariant, logScale=True, colours='brown white blue'))
figDb.append( glucifer.objects.Surface(mesh, temperatureField))
# Main simulation loop
# =======
#
# The main time stepping loop begins here. Before this the time and timestep are initialised to zero and the output statistics arrays are set up. Also the frequency of outputting basic statistics to the screen is set in steps_output.
#
# In[62]:
pics = uw.swarm.PICIntegrationSwarm(gSwarm)
# In[63]:
def checkpoint1(step, checkpointPath,filename, filewrites):
path = checkpointPath + str(step)
os.mkdir(path)
##Write and save the file, if not already a writing step
if not step % filewrites == 0:
filename.write((13*'%-15s ' + '\n') % (realtime, Viscdis, float(Nu0glob), float(Nu1glob), Avg_temp,
Rms,Rmsurfglob,Max_vx_surf,Gravwork, etamax, etamin, Viscdisair, Viscdislith))
filename.close()
shutil.copyfile(os.path.join(outputPath, outputFile), os.path.join(path, outputFile))
def checkpoint2(step, checkpointPath, swarm, filename, varlist = [materialVariable], varnames = ['materialVariable']):
path = checkpointPath + str(step)
velfile = "velocityField" + ".hdf5"
tempfile = "temperatureField" + ".hdf5"
pressfile = "pressureField" + ".hdf5"
velocityField.save(os.path.join(path, velfile))
temperatureField.save(os.path.join(path, tempfile))
pressureField.save(os.path.join(path, pressfile))
swarm.save(os.path.join(path, "swarm.h5") )
for ix in range(len(varlist)):
varb = varlist[ix]
varb.save(os.path.join(path,varnames[ix] + ".h5"))
# In[64]:
advector.get_max_dt(), advDiff.get_max_dt()
# In[67]:
# initialise timer for computation
start = time.clock()
# setup summary output file (name above)
if checkpointLoad:
if uw.rank() == 0:
shutil.copyfile(os.path.join(checkpointLoadDir, outputFile), outputPath+outputFile)
comm.Barrier()
f_o = open(os.path.join(outputPath, outputFile), 'a')
prevdata = np.genfromtxt(os.path.join(outputPath, outputFile), skip_header=0, skip_footer=0)
if len(prevdata.shape) == 1: #this is in case there is only one line in previous file
realtime = prevdata[0]
else:
realtime = prevdata[prevdata.shape[0]-1, 0]
step = int(checkpointLoadDir.split('/')[-1])
timevals = [0.]
else:
f_o = open(outputPath+outputFile, 'w')
realtime = 0.
step = 0
timevals = [0.]
# In[ ]:
# In[95]:
# initialise timer for computation
startMain = time.clock()
# Perform steps
while realtime < 0.05:
#while step < 5:
#Enter non-linear loop
print step
solver.solve(nonLinearIterate=True)
dt = advDiff.get_max_dt()
if step == 0:
dt = 0.
#start = time.clock()
advDiff.integrate(dt)
#machine_time = (time.clock()-start)
#print("total advDiff time is: " + str(machine_time))
# Advect swarm using this timestep size
#start = time.clock()
advector.integrate(dt)
#machine_time = (time.clock()-start)
#print("total advector time is: " + str(machine_time))
# Increment
realtime += dt
step += 1
timevals.append(realtime)
################
#Update temperature field in the air region
################
if (step % sticky_air_temp == 0):
for index, coord in enumerate(mesh.data):
if coord[1] >= 1.:
temperatureField.data[index] = 0.
################
#Particle update
###############
if (step % swarm_update == 0) or (step % metric_output == 0): #These updates should be done before any metric output
#These swarm variables get updated first, as they are used to determine material changes
particledepths = 1. - gSwarm.particleCoordinates.data[:,1]
particletemps = temperatureField.evaluate(gSwarm)[:,0]
conditionmap['depthcondition']['data'] = particledepths
conditionmap['avgtempcondition']['data'] = particletemps
################
number_updated = 0
for particleID in range(gSwarm.particleCoordinates.data.shape[0]):
check = update_swarm(DG, particleID)
if check > -1:
number_updated += 1
#if check == 0:
# print "from " + str(materialVariable.data[particleID]) + " to " + str(check)
materialVariable.data[particleID] = check
else:
pass
#Also update those integration swarms
rockIntVar.data[:] = 0.
notair = np.where(materialVariable.data != airIndex)
rockIntVar.data[notair] = 1.
airIntVar.data[:] = 0.
notrock = np.where(materialVariable.data == airIndex)
airIntVar.data[notrock] = 1.
lithIntVar.data[:] = 0.
islith = np.where((materialVariable.data == lithosphereIndex) | (materialVariable.data == crustIndex))
lithIntVar.data[islith] = 1.
################
# Calculate the Metrics, only on 1 of the processors:
################
if (step % metric_output == 0):
###############
#Swarm - based Metrics
###############
tempVariable.data[:] = temperatureField.evaluate(gSwarm)[:]
Avg_temp = avg_temp()
Rms = rms()
Gravwork = gravwork(dwint)
Viscdis = viscdis(vdint)
Viscdisair = viscdis(vdintair)
Viscdislith = viscdis(vdintlith)
etamax, etamin = visc_extr(viscosityMapFn)
#These are the ones that need mpi4py treatment
Nu0loc = nusselt(temperatureField, baseintswarm, dx)
Nu1loc = nusselt(temperatureField, surfintswarm, dx)
Rmsurfloc = rms_surf(surfintswarm, dx)
Max_vx_surfloc = np.array(max_vx_surf(velocityField, surfintswarm),'d') #This float needed to be an array to play with mpi4py
#Setup the global output arrays
dTp = Nu0loc.dtype
Nu0glob = np.array(0, dtype=dTp)
dTp = Nu1loc.dtype
Nu1glob = np.array(0, dtype=dTp)
dTp = Rmsurfloc.dtype
Rmsurfglob = np.array(0, dtype=dTp)
dTp = Max_vx_surfloc.dtype
Max_vx_surfglob = np.array(0.0,dtype=dTp)
#Do global operation ... sum, or max
comm.Allreduce(Nu0loc, Nu0glob, op=MPI.SUM)
comm.Allreduce(Nu1loc, Nu1glob, op=MPI.SUM)
comm.Allreduce(Rmsurfloc, Rmsurfglob, op=MPI.SUM)
comm.Allreduce([Max_vx_surfloc, MPI.DOUBLE],[Max_vx_surfglob, MPI.DOUBLE],op=MPI.MAX)
# output to summary text file
if uw.rank()==0:
f_o.write((13*'%-15s ' + '\n') % (realtime, Viscdis, float(Nu0glob), float(Nu1glob), Avg_temp,
Rms,Rmsurfglob,Max_vx_surfglob,Gravwork, etamax, etamin, Viscdisair, Viscdislith))
################
#Gldb output
################
if (step % gldbs_output == 0) & (writeFiles == True):
#Rebuild any necessary swarm variables
viscVariable.data[:] = viscosityMapFn.evaluate(gSwarm)
#Write gldbs
fnamedb = "dbFig" + "_" + str(ModIt) + "_" + str(step) + ".gldb"
fullpath = os.path.join(outputPath + "gldbs/" + fnamedb)
figDb.show()
figDb.save_database(fullpath)
################
#Also repopulate entire swarm periodically
################
if step % swarm_repop == 0:
pics.repopulate()
################
#Checkpoint
################
if step % checkpoint_every == 0:
if uw.rank() == 0:
checkpoint1(step, checkpointPath,f_o, metric_output)
checkpoint2(step, checkpointPath, gSwarm, f_o, varlist = varlist, varnames = varnames)
f_o = open(os.path.join(outputPath, outputFile), 'a') #is this line supposed to be here?
f_o.close()
print 'step =',step
# In[ ]:
#figTemp = glucifer.Figure()
#figTemp.append( glucifer.objects.Surface(mesh, temperatureField))
#figTemp.append( glucifer.objects.Mesh(mesh))
#figTemp.append( glucifer.objects.VectorArrows(mesh,velocityField, arrowHead=0.2, scaling=0.01))
#figTemp.save_database('test.gldb')
#figTemp.show()
# In[ ]:
#figVelocityMag = glucifer.Figure(figsize=(1024,384))
#figVelocityMag.append( glucifer.objects.Surface(mesh, fn.math.dot(velocityField,velocityField), logScale=True))
#figVelocityMag.show()
# In[ ]:
#figVelocityMag = glucifer.Figure(figsize=(1024,384))
#figVelocityMag.append( glucifer.objects.Surface(mesh, temperatureField, logScale=True, valueRange=[1e-15,5e-8]) )
#figVelocityMag.show()
# In[ ]:
#figVelocityMag
# In[67]:
#figSwarm = glucifer.Figure(figsize=(1024,384))
#figSwarm.append( glucifer.objects.Points(gSwarm,materialVariable, colours='brown white blue red'))
#figSwarm.append( glucifer.objects.Mesh(mesh))
#figSwarm.show()
#figSwarm.save_database('test.gldb')
# In[70]:
figDb.show()
figDb.save_database('test.gldb')
# In[53]:
machine_time = (time.clock()-start)
print("total time is: " + str(machine_time))
# In[54]:
#Sanity check for periodic conditions
#velocityField.evaluate(TWalls)[:,0].max()
# In[31]:
#print("TWalls x vel is: " + str(velocityField.evaluate(TWalls)[:,0].max()))
#print("IWalls x vel is: " + str(velocityField.evaluate(IWalls)[:,0].max()))
# In[ ]:
#ts = ((D*1e3)**2)/k
#secperyear = (3600*24*365)
#dt*ts/secperyear
#2.25895987733e-05*ts/secperyear
| [
"[email protected]"
] | |
1ccfedd89dc3598e0b125d7c90fb2f24a5542eed | 0a2cc497665f2a14460577f129405f6e4f793791 | /sdk/deploymentmanager/azure-mgmt-deploymentmanager/azure/mgmt/deploymentmanager/aio/operations/_service_units_operations.py | 5809b54b4000f26ae07611e01667bfe29872d3b4 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | hivyas/azure-sdk-for-python | 112158aa9e1dd6e30cf6b3dde19f5db6ea2a577b | 8b3258fa45f5dc25236c22ad950e48aa4e1c181c | refs/heads/master | 2023-06-17T12:01:26.392186 | 2021-05-18T19:56:01 | 2021-05-18T19:56:01 | 313,761,277 | 1 | 1 | MIT | 2020-12-02T17:48:22 | 2020-11-17T22:42:00 | Python | UTF-8 | Python | false | false | 21,101 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ServiceUnitsOperations:
"""ServiceUnitsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.deploymentmanager.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
service_topology_name: str,
service_name: str,
service_unit_name: str,
service_unit_info: "_models.ServiceUnitResource",
**kwargs
) -> "_models.ServiceUnitResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceUnitResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serviceTopologyName': self._serialize.url("service_topology_name", service_topology_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'serviceUnitName': self._serialize.url("service_unit_name", service_unit_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(service_unit_info, 'ServiceUnitResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('ServiceUnitResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeploymentManager/serviceTopologies/{serviceTopologyName}/services/{serviceName}/serviceUnits/{serviceUnitName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
service_topology_name: str,
service_name: str,
service_unit_name: str,
service_unit_info: "_models.ServiceUnitResource",
**kwargs
) -> AsyncLROPoller["_models.ServiceUnitResource"]:
"""Creates or updates a service unit under the service in the service topology.
This is an asynchronous operation and can be polled to completion using the operation resource
returned by this operation.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param service_topology_name: The name of the service topology .
:type service_topology_name: str
:param service_name: The name of the service resource.
:type service_name: str
:param service_unit_name: The name of the service unit resource.
:type service_unit_name: str
:param service_unit_info: The service unit resource object.
:type service_unit_info: ~azure.mgmt.deploymentmanager.models.ServiceUnitResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ServiceUnitResource or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.deploymentmanager.models.ServiceUnitResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceUnitResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
service_topology_name=service_topology_name,
service_name=service_name,
service_unit_name=service_unit_name,
service_unit_info=service_unit_info,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
response_headers = {}
response = pipeline_response.http_response
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('ServiceUnitResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serviceTopologyName': self._serialize.url("service_topology_name", service_topology_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'serviceUnitName': self._serialize.url("service_unit_name", service_unit_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeploymentManager/serviceTopologies/{serviceTopologyName}/services/{serviceName}/serviceUnits/{serviceUnitName}'} # type: ignore
async def get(
self,
resource_group_name: str,
service_topology_name: str,
service_name: str,
service_unit_name: str,
**kwargs
) -> "_models.ServiceUnitResource":
"""Gets the service unit.
Gets the service unit.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param service_topology_name: The name of the service topology .
:type service_topology_name: str
:param service_name: The name of the service resource.
:type service_name: str
:param service_unit_name: The name of the service unit resource.
:type service_unit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceUnitResource, or the result of cls(response)
:rtype: ~azure.mgmt.deploymentmanager.models.ServiceUnitResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceUnitResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serviceTopologyName': self._serialize.url("service_topology_name", service_topology_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'serviceUnitName': self._serialize.url("service_unit_name", service_unit_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceUnitResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeploymentManager/serviceTopologies/{serviceTopologyName}/services/{serviceName}/serviceUnits/{serviceUnitName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
service_topology_name: str,
service_name: str,
service_unit_name: str,
**kwargs
) -> None:
"""Deletes the service unit.
Deletes the service unit.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param service_topology_name: The name of the service topology .
:type service_topology_name: str
:param service_name: The name of the service resource.
:type service_name: str
:param service_unit_name: The name of the service unit resource.
:type service_unit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serviceTopologyName': self._serialize.url("service_topology_name", service_topology_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'serviceUnitName': self._serialize.url("service_unit_name", service_unit_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeploymentManager/serviceTopologies/{serviceTopologyName}/services/{serviceName}/serviceUnits/{serviceUnitName}'} # type: ignore
async def list(
self,
resource_group_name: str,
service_topology_name: str,
service_name: str,
resource_group_name1: str,
service_topology_name1: str,
service_name1: str,
**kwargs
) -> List["_models.ServiceUnitResource"]:
"""Lists the service units under a service in the service topology.
Lists the service units under a service in the service topology.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param service_topology_name: The name of the service topology .
:type service_topology_name: str
:param service_name: The name of the service resource.
:type service_name: str
:param resource_group_name1: The name of the resource group. The name is case insensitive.
:type resource_group_name1: str
:param service_topology_name1: The name of the service topology .
:type service_topology_name1: str
:param service_name1: The name of the service resource.
:type service_name1: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of ServiceUnitResource, or the result of cls(response)
:rtype: list[~azure.mgmt.deploymentmanager.models.ServiceUnitResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ServiceUnitResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
api_version = "2019-11-01-preview"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serviceTopologyName': self._serialize.url("service_topology_name", service_topology_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name1", resource_group_name1, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serviceTopologyName': self._serialize.url("service_topology_name1", service_topology_name1, 'str'),
'serviceName': self._serialize.url("service_name1", service_name1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[ServiceUnitResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeploymentManager/serviceTopologies/{serviceTopologyName}/services/{serviceName}/serviceUnits'} # type: ignore
| [
"[email protected]"
] | |
977cd26e8a9ce77960696f23c7267eaec7fa0dd3 | 584db1be8b6bdedaa56d186692ad72da5ee07164 | /patron/tests/functional/v3/test_instance_usage_audit_log.py | 6a882a19ad76b2c75966c710359475c274d32493 | [
"Apache-2.0"
] | permissive | casbin/openstack-patron | 66006f57725cf1c3d735cd5529d3459fd77384c8 | b41b1262f3a52c8cc9f6b6bdf87be5a1abcf6d25 | refs/heads/master | 2023-05-31T05:23:37.721768 | 2015-12-31T12:18:17 | 2015-12-31T12:18:17 | 382,054,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,250 | py | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from oslo_config import cfg
from patron.tests.functional.v3 import api_sample_base
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'patron.api.openstack.compute.extensions')
class InstanceUsageAuditLogJsonTest(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extension_name = "os-instance-usage-audit-log"
# TODO(gmann): Overriding '_api_version' till all functional tests
# are merged between v2 and v2.1. After that base class variable
# itself can be changed to 'v2'
_api_version = 'v2'
def _get_flags(self):
f = super(InstanceUsageAuditLogJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append('patron.api.openstack.compute.'
'contrib.instance_usage_audit_log.'
'Instance_usage_audit_log')
return f
def test_show_instance_usage_audit_log(self):
response = self._do_get('os-instance_usage_audit_log/%s' %
urllib.quote('2012-07-05 10:00:00'))
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('inst-usage-audit-log-show-get-resp',
subs, response, 200)
def test_index_instance_usage_audit_log(self):
response = self._do_get('os-instance_usage_audit_log')
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('inst-usage-audit-log-index-get-resp',
subs, response, 200)
| [
"[email protected]"
] | |
306694ae20d0761a8ceb55a2d83fc0495e190317 | ff182eeaf59b16f79b7d306eef72ddaadf0f4e71 | /Vaffle_interface/testcase/MessageModule/Message_test21_question_answerquestionlist.py | cca8da534dbe3a9cae3189adbf5e7b7bdc28b965 | [] | no_license | heyu1229/vaffle | 04d6f8b0d3bd0882ff1cdea54d18d5fdde7933b9 | 2c1c040f78094cf3cfc68f08627a958c4aa5e1d5 | refs/heads/master | 2023-06-05T09:55:21.894344 | 2021-03-12T07:26:45 | 2021-03-12T07:26:45 | 381,248,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | # -*- coding:UTF-8 -*-
import unittest,time,json
from Vaffle_interface.public_1.func_requests import FuncRequests
#---------------QA答题列表---------------------
class Message(unittest.TestCase):
def setUp(self):
self.r = FuncRequests()
#-----------------QA答题列表---------------------------------
def testcase_001(self):
sheet_index = 5
row = 22
print("testcase_001 QA答题列表:")
member_id = 'b9f73f23-7bc6-4de6-9f9b-df2c98076221'
payload = {'type':'recommend','page':1}
result=self.r.interface_requests_payload(member_id,sheet_index,row,payload)
self.assertEqual(10000, result["code"])
print("code返回值:10000")
if __name__=="__main__":
unittest.main() | [
"[email protected]"
] | |
dd53c3727ad67dbcc8f4cc1ff5e0b523226dd686 | 89dedd7f3c7acc81d12e2bcb2e716f9af9e5fa04 | /build/android/developer_recommended_flags.gypi | 79c201deccb3b99bf444200fbc6d40cbfa1cec79 | [
"BSD-3-Clause"
] | permissive | bino7/chromium | 8d26f84a1b6e38a73d1b97fea6057c634eff68cb | 4666a6bb6fdcb1114afecf77bdaa239d9787b752 | refs/heads/master | 2022-12-22T14:31:53.913081 | 2016-09-06T10:05:11 | 2016-09-06T10:05:11 | 67,410,510 | 1 | 3 | BSD-3-Clause | 2022-12-17T03:08:52 | 2016-09-05T10:11:59 | null | UTF-8 | Python | false | false | 2,722 | gypi | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is the set of recommended gyp variable settings for Chrome for Android development.
#
# These can be used by copying this file to $CHROME_SRC/chrome/supplement.gypi.
#
# Even better, create chrome/supplement.gypi containing the following:
# {
# 'includes': [ '../build/android/developer_recommended_flags.gypi' ]
# }
# and you'll get new settings automatically.
# When using this method, you can override individual settings by setting them unconditionally (with
# no %) in chrome/supplement.gypi.
# I.e. to disable gyp_managed_install but use everything else:
# {
# 'variables': {
# 'gyp_managed_install': 0,
# },
# 'includes': [ '../build/android/developer_recommended_flags.gypi' ]
# }
{
'variables': {
'variables': {
# Set component to 'shared_library' to enable the component build. This builds native code as
# many small shared libraries instead of one monolithic library. This slightly reduces the time
# required for incremental builds.
'component%': 'shared_library',
},
'component%': '<(component)',
# When gyp_managed_install is set to 1, building an APK will install that APK on the connected
# device(/emulator). To install on multiple devices (or onto a new device), build the APK once
# with each device attached. This greatly reduces the time required for incremental builds.
#
# This comes with some caveats:
# Only works with a single device connected (it will print a warning if
# zero or multiple devices are attached).
# Device must be flashed with a user-debug unsigned Android build.
# Some actions are always run (i.e. ninja will never say "no work to do").
'gyp_managed_install%': 1,
# With gyp_managed_install, we do not necessarily need a standalone APK.
# When create_standalone_apk is set to 1, we will build a standalone APK
# anyway. For even faster builds, you can set create_standalone_apk to 0.
'create_standalone_apk%': 1,
# Set clang to 1 to use the clang compiler. Clang has much (much, much) better warning/error
# messages than gcc.
# TODO(cjhopman): Enable this when http://crbug.com/156420 is addressed. Until then, users can
# set clang to 1, but Android stack traces will sometimes be incomplete.
#'clang%': 1,
# Set fastbuild to 1 to build with less debugging information. This can greatly decrease linking
# time. The downside is that stack traces will be missing useful information (like line
# numbers).
#'fastbuild%': 1,
},
}
| [
"[email protected]"
] | |
5f9b9da363ca1b7046a85b9ca50ffd5a17546d0a | 6a1e30c699125d05caf4b3443b83c47d1a733851 | /EMS/migrations/0002_auto_20200623_1446.py | 2cd6aa0e0db237346a0fe2c980b78533bd1e4711 | [] | no_license | Subhrans/Keekers | 373b259c04bcd277a91e6e7679da5da2b799e184 | b971b1a1f5c2436b3068b5003d220ca28fdced0f | refs/heads/main | 2023-04-16T17:26:43.207550 | 2021-04-07T18:10:49 | 2021-04-07T18:10:49 | 355,639,412 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | # Generated by Django 3.0.1 on 2020-06-23 09:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('EMS', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='employee',
name='e_experience',
field=models.CharField(default='Year:0 Months:0 Days:0 ', max_length=22),
),
]
| [
"[email protected]"
] | |
b90768d806834c51f4937d283dcb46b436f8db8a | 4e7db10524c938c8c6e687521def2889e20ec646 | /P16/1-16.py | a1030b875a6397384fd635bad46dc8338e9e1d13 | [] | no_license | mpigrobot/python | e5cf60ca438e0d5e63a1e87a266a9e255bc07271 | bf9262657a7401f37de38318db768e630fab97a9 | refs/heads/master | 2020-03-15T15:37:03.228080 | 2018-03-31T07:33:07 | 2018-03-31T07:33:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | #! /usr/bin/env python
#coding:utf-8
def del_space(string):
split_string = string.split(" ")
string_list = [i for i in split_string if i!=""]
result_string = " ".join(string_list)
return result_string
if __name__=="__main__":
one_str = "Hello, I am Qiwsir."
string = del_space(one_str)
print one_str
print string | [
"[email protected]"
] | |
91f4d3ccee46519af255d5bac95200dd39e71f50 | dc86022781700c04d143736d401138881d98fe14 | /student_profile/urls.py | ccd89f5b0862df465e50872ee2d29c60807607f2 | [] | no_license | SymaxxDigital/college-journal | d4b25901788221793e8c3ce25f7fefd6e150df4b | c188511ad38fc2fe1b3d5cf907b2ce7be46aad5e | refs/heads/master | 2023-06-17T00:00:16.057099 | 2021-07-12T16:45:21 | 2021-07-12T16:45:21 | 372,991,491 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,781 | py | from django.urls import path
from .views import (
ProfileView,
ProfileCreateView,
ProfileUpdateView,
DemographicListView,
DemographicCreateView,
DemographicUpdateView,
FamilyListView,
FamilyCreateView,
FamilyUpdateView,
EducationListView,
EducationCreateView,
EducationUpdateView,
PersonalessayListView,
PersonalEssayCreateView,
PersonalEssayUpdateView,
ProfileLinkView,
)
app_name = 'student_profile'
urlpatterns = [
path("", ProfileView.as_view(), name="profile"),
path("add/", ProfileCreateView.as_view(), name="profile_create"),
path("edit/<uuid:pk>/", ProfileUpdateView.as_view(), name="profile_update"),
path("demographics/", DemographicListView.as_view(), name="demographics"),
path("demographic/add", DemographicCreateView.as_view(), name="demographic_create"),
path("demographic/edit/<uuid:pk>", DemographicUpdateView.as_view(), name="demographic_update"),
path("family", FamilyListView.as_view(), name="family"),
path("family/add", FamilyCreateView.as_view(), name="family_create"),
path("family/edit/<uuid:pk>", FamilyUpdateView.as_view(), name="family_update"),
path("education", EducationListView.as_view(), name="education"),
path("education/add", EducationCreateView.as_view(), name="education_create"),
path("education/edit/<uuid:pk>", EducationUpdateView.as_view(), name="education_update"),
path("essays/", PersonalessayListView.as_view(), name="personalessays"),
path("personal-essay/add", PersonalEssayCreateView.as_view(), name="personal_essay_create"),
path("personal-essay/edit/<uuid:pk>", PersonalEssayUpdateView.as_view(), name="personal_essay_update"),
path("links/", ProfileLinkView.as_view(), name="profile_links")
]
| [
"[email protected]"
] | |
36674f77a3891e30e9a9320ef731c81cc8fc7b77 | ec7591c3f478c43e76257aaa500d8f6a2e763d74 | /stanza/utils/datasets/sentiment/process_airline.py | edacb874ed99653b6fc0f5adde34b14c3f9fb636 | [
"Apache-2.0"
] | permissive | stanfordnlp/stanza | 5cc3dbe70a96dd565639b7dae1efde6b4fa76985 | c530c9af647d521262b56b717bcc38b0cfc5f1b8 | refs/heads/main | 2023-09-01T12:01:38.980322 | 2023-03-14T16:10:05 | 2023-03-14T16:10:05 | 104,854,615 | 4,281 | 599 | NOASSERTION | 2023-09-10T00:31:36 | 2017-09-26T08:00:56 | Python | UTF-8 | Python | false | false | 2,631 | py | """
Airline tweets from Kaggle
from https://www.kaggle.com/crowdflower/twitter-airline-sentiment/data#
Some ratings seem questionable, but it doesn't hurt performance much, if at all
Files in the airline repo are csv, with quotes in "..." if they contained commas themselves.
Accordingly, we use the csv module to read the files and output them in the format
<class> <sentence>
Run using
python3 convert_airline.py Tweets.csv train.json
If the first word is an @, it is removed, and after that, leading @ or # are removed.
For example:
@AngledLuffa you must hate having Mox Opal #banned
->
you must hate having Mox Opal banned
"""
import csv
import os
import sys
from stanza.models.classifiers.data import SentimentDatum
import stanza.utils.datasets.sentiment.process_utils as process_utils
def get_phrases(in_directory):
in_filename = os.path.join(in_directory, "Tweets.csv")
with open(in_filename, newline='') as fin:
cin = csv.reader(fin, delimiter=',', quotechar='"')
lines = list(cin)
phrases = []
for line in lines[1:]:
sentiment = line[1]
if sentiment == 'negative':
sentiment = '0'
elif sentiment == 'neutral':
sentiment = '1'
elif sentiment == 'positive':
sentiment = '2'
else:
raise ValueError("Unknown sentiment: {}".format(sentiment))
# some of the tweets have \n in them
utterance = line[10].replace("\n", " ")
phrases.append(SentimentDatum(sentiment, utterance))
return phrases
def get_tokenized_phrases(in_directory):
phrases = get_phrases(in_directory)
phrases = process_utils.get_ptb_tokenized_phrases(phrases)
phrases = [SentimentDatum(x.sentiment, process_utils.clean_tokenized_tweet(x.text)) for x in phrases]
print("Found {} phrases in the airline corpus".format(len(phrases)))
return phrases
def main(in_directory, out_directory, short_name):
phrases = get_tokenized_phrases(in_directory)
os.makedirs(out_directory, exist_ok=True)
out_filename = os.path.join(out_directory, "%s.train.json" % short_name)
# filter leading @United, @American, etc from the tweets
process_utils.write_list(out_filename, phrases)
# something like this would count @s if you cared enough to count
# would need to update for SentimentDatum()
#ats = Counter()
#for line in lines:
# ats.update([x for x in line.split() if x[0] == '@'])
if __name__ == '__main__':
in_directory = sys.argv[1]
out_directory = sys.argv[2]
short_name = sys.argv[3]
main(in_directory, out_directory, short_name)
| [
"[email protected]"
] | |
c959374bd1150ba5cf9665a077bc81075a46cbde | 933e89e2d45803ff021cc684b4b78cd2e14a5b15 | /AllBehaviours/agent_util.py | 289676eb5c22b11ea1fdb417fd764da9c32b681b | [] | no_license | Occy88/MultiAgents | 8123d2718df5f3ad202410097f821c6d69eadd7b | c49d89caf2db49dcc3571d87fa4fb4bade516226 | refs/heads/master | 2022-12-14T18:36:02.343056 | 2021-01-01T13:38:10 | 2021-01-01T13:38:10 | 214,148,130 | 0 | 0 | null | 2022-11-22T04:36:34 | 2019-10-10T10:00:00 | Python | UTF-8 | Python | false | false | 5,716 | py | from enum import Enum
from vacuumworld.vwc import action, direction
import math
# RESOLVED AS SINGLE CHARACTERS TO SAVE COMMUNICATION BANDWIDTH
class AgentPercepts(Enum):
LEFT = '1'
RIGHT = '2'
TOP = '3'
FRONT_LEFT = '4'
FRONT_RIGHT = '5'
class AgentActions(Enum):
TURN_LEFT = '6'
TURN_RIGHT = '7'
FORWARD = '8'
class CommunicationKeys(Enum):
GRID_SIZE = 'a'
POSITION = 'b'
NEXT_ACTION = 'c'
OBSERVATIONS = 'd'
class GridDirections(Enum):
"""
Cannot be modified, (used to find orientation)
"""
TOP = 'north'
RIGHT = 'east'
LEFT = 'west'
BOTTOM = 'south'
class GridLocation:
def __init__(self):
self.dirt = None
self.agent = None
self.currently_observed = False
self.age = 0
def encode(self):
agent_details_to_send = self.agent
dirt_details_to_send = self.dirt
if self.agent is not None:
agent_details_to_send = [self.agent[0].split('-')[1], self.agent[1]]
if self.dirt is not None:
dirt_details_to_send = [self.dirt[0].split('-')[1], self.dirt[1]]
return [agent_details_to_send, dirt_details_to_send]
def decode(self, observation):
self.agent = observation[1]
self.dirt = observation[2]
if observation[1] is not None:
self.agent = ['A-' + str(observation[1][0]), observation[1][1]]
if observation[2] is not None:
self.dirt = ['D-' + str(observation[2][0]), observation[2][1]]
self.currently_observed = True
self.age = 0
def update(self):
self.age += 1
def draw(self):
string = '|'
if self.dirt is not None:
string += 'D'
else:
string += ' '
if self.agent is not None:
string += self.agent[0].split('-')[1]
else:
string += ' '
if self.currently_observed:
string += 'X'
else:
string += ' '
age = str(self.age)
age = age.ljust(3)
print(string + ' ' + age, end='')
class GridState:
def __init__(self):
self.size = 0
self.locations = []
def set_size(self, n):
self.size = n
self.locations = []
for y in range(n):
self.locations.append([])
for x in range(n):
self.locations[y].append(GridLocation())
def update(self):
for y in self.locations:
for l in y:
l.update()
def decode(self, observations):
"""
[[7, 3], ['a-1', 'orange', 'north'], None], [[6, 3], None, None], [[7, 2], ['a-2', 'orange', 'north'], None], [[6, 2], None, None]]
:param d:
:return:
"""
try:
for obs in observations:
coords = obs[0]
x = int(coords[0])
y = int(coords[1])
cell = self.locations[y][x]
cell.decode(obs)
except Exception as e:
print(e)
def encode_location(self, x, y):
"""
returns position in the format of an observation
:param x:
:param y:
:return:
"""
l = self.locations[y][x]
return [[x, y]] + l.encode()
def draw(self):
for l in self.locations:
for location in l:
location.draw()
print('')
def split_grid(grid_size):
"""
Splits grid into points that need to be explored to cover the
whole grid.
:param grid_size:
:return:
"""
points_to_explore = []
for y in range(1, grid_size):
points_to_explore.append([])
for x in range(grid_size):
points_to_explore[y].append(GridLocation())
pass
y += 3
return points_to_explore
def get_cam_detections(observation):
"""
returns all squares seen by the camera
:param observation:
:return:[(obs,observation.direction)]
"""
obs_dir_list = {}
if observation.left is not None:
obs_dir_list[AgentPercepts.LEFT.value] = observation.left
if observation.forwardleft is not None:
obs_dir_list[AgentPercepts.FRONT_LEFT.value] = observation.forwardleft
if observation.forward is not None:
obs_dir_list[AgentPercepts.TOP.value] = observation.forward
if observation.forwardright is not None:
obs_dir_list[AgentPercepts.FRONT_RIGHT.value] = observation.forwardright
if observation.right is not None:
obs_dir_list[AgentPercepts.RIGHT.value] = observation.right
return obs_dir_list
def get_closest_agent(agent_list, point):
closest_distance = math.inf
if agent_list is None or len(agent_list)<=0:
return
closest_agent = agent_list[0]
for agent in agent_list:
agent_pos = agent[1]
distance = dist(agent_pos, point)
if distance < closest_distance:
closest_distance = distance
closest_agent = agent
elif distance == closest_distance:
if int(agent[0][0].split('-')[1]) > int(closest_agent[0][0].split('-')[1]):
closest_agent = agent
return closest_agent
def get_agents(observation):
"""
Gets all agents in the observation
returns a list
:param observation:
:return: [(agent,observation.direction)...]
"""
directions = get_cam_detections(observation)
agents = {}
for key, val in directions.items():
if val.agent is not None:
agents[key] = val.agent
return agents
def dist(p1, p2):
summation = 0
for i, v in enumerate(p1):
summation += (v - p2[i]) ** 2
return math.sqrt(summation)
| [
"[email protected]"
] | |
6d33d07aa654f0785f0b7306fa9ba31354982ae1 | 028d788c0fa48a8cb0cc6990a471e8cd46f6ec50 | /Python-Fundamentals/Final-Exam/Practice-Exams/Exam-04-04-2020/01_password_reset.py | 9a2e5e197669e3de86bca07fdc4681b322e7edb2 | [] | no_license | Sheko1/SoftUni | d6b8e79ae545116f4c0e5705ad842f12d77a9c9d | a9fbeec13a30231b6a97c2b22bb35257ac1481c0 | refs/heads/main | 2023-07-13T15:39:48.826925 | 2021-08-21T12:51:02 | 2021-08-21T12:51:02 | 317,266,200 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,255 | py | def take_odd(password):
result = ""
for i in range(len(password)):
if i % 2 != 0:
result += password[i]
return result
def cut(index_1, length_1, password):
sub_string = password[index_1:index_1+length_1]
password = password.replace(sub_string, "", 1)
return password
def substitute(old, new, password):
if old in password:
password = password.replace(old, new)
return password
else:
return "Nothing to replace!"
string = input()
command = input()
while command != "Done":
data = command.split(maxsplit=1)
if data[0] == "TakeOdd":
string = take_odd(string)
print(string)
elif data[0] == "Cut":
index, length = data[1].split()
index = int(index)
length = int(length)
string = cut(index, length, string)
print(string)
elif data[0] == "Substitute":
sub_str, replace_str = data[1].split()
if substitute(sub_str, replace_str, string) == "Nothing to replace!":
print(substitute(sub_str, replace_str, string))
else:
string = substitute(sub_str, replace_str, string)
print(string)
command = input()
print(f"Your password is: {string}")
| [
"[email protected]"
] | |
a389ef6945b30b5b0a5c5c6f45ca52d6239e2db7 | f1ddb0bb268b3b1c3fe512b4753ff60fab5c78cb | /datapipe/image/kill_isolated_pixels.py | b31140cf0a14e5185b5940ec1561c30ed30fc096 | [
"MIT"
] | permissive | jjlk/ctapipe-wavelet-cleaning | 18ef70f2a68414974b0e5a187855694631f78121 | 091a657598a0cd493e8494319854bfc1dba3cea0 | refs/heads/master | 2021-05-11T10:43:55.292322 | 2018-01-17T14:24:29 | 2018-01-17T14:24:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,928 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)
# This script is provided under the terms and conditions of the MIT license:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__all__ = ['get_islands',
'kill_isolated_pixels',
'kill_isolated_pixels_stats',
'number_of_islands']
import numpy as np
import scipy.ndimage as ndimage
# See: https://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.ndimage.measurements.label.html
def get_islands(array, threshold=0.2):
"""
...
Parameters
----------
array : Numpy array
The input image to clean.
threshold : float
The "level of the sea" before island cleaning.
Returns
-------
Numpy array
``filtered_array`` the input image with all pixels below ``threshold`` put to 0 (may contain NaN values).
Numpy array
``label_array`` define the island id each pixel belongs to (doesn't contain NaN values).
Integer
``num_labels`` the number of islands.
"""
array = array.astype('float64', copy=True)
filtered_array = np.copy(array)
# Put NaN pixels to 0
# This is OK as long as it is made temporary and internally to avoid issues
# with scipy
filtered_array[np.isnan(filtered_array)] = 0.
# Put to 0 pixels that are below 'threshold'
if threshold is not None:
filtered_array[filtered_array < threshold] = 0.
mask = filtered_array > 0
# Detect islands ("label")
label_array, num_labels = ndimage.label(mask)#, structure=np.ones((5, 5)))
# Put back NaN in filtered_array (required to avoid bugs in others
# functions (e.g. uncoherent dimensions with pixels_positions).
filtered_array[np.isnan(array)] = np.nan
return filtered_array, label_array, num_labels
def kill_isolated_pixels(array, threshold=0.2):
"""
...
Parameters
----------
array : Numpy array
The input image to clean.
threshold : float
The "level of the sea" before island cleaning.
Returns
-------
Numpy array
The input image ``array`` with isolated islands removed.
Only keeping the biggest islands (the largest surface).
"""
array = array.astype('float64', copy=True)
filtered_array, label_array, num_labels = get_islands(array, threshold)
# Put NaN pixels to 0
# This is OK as long as it is made temporary and internally to avoid issues
# with scipy
filtered_array[np.isnan(filtered_array)] = 0.
# Count the number of pixels for each island
num_pixels_per_island = ndimage.sum(filtered_array, label_array, range(num_labels + 1))
# Only keep the biggest island
mask_biggest_island = num_pixels_per_island < np.max(num_pixels_per_island)
remove_pixel = mask_biggest_island[label_array]
filtered_array[remove_pixel] = 0
# Put back NaN in filtered_array (required to avoid bugs in others
# functions (e.g. uncoherent dimensions with pixels_positions).
filtered_array[np.isnan(array)] = np.nan
return filtered_array
def kill_isolated_pixels_stats(array, threshold=0.2):
array = array.astype('float64', copy=True)
filtered_array = kill_isolated_pixels(array, threshold=threshold)
delta_pe = np.nansum(array - filtered_array)
delta_abs_pe = np.nansum(np.abs(array - filtered_array))
array[np.isfinite(array) & (array != 0)] = 1 # May genereate warnings on NaN values
filtered_array[np.isfinite(filtered_array) & (filtered_array != 0)] = 1 # May genereate warnings on NaN values
delta_num_pixels = np.nansum(array - filtered_array)
return float(delta_pe), float(delta_abs_pe), float(delta_num_pixels)
def number_of_islands(array, threshold=0.2):
filtered_array, label_array, num_labels = get_islands(array, threshold)
return num_labels
| [
"[email protected]"
] | |
f930dcaa01af621049c16c525a67e528ced24dd0 | bcfa02c21a73798872bbb28303233d1f0039cf00 | /server/www/packages/packages-darwin/x64/qrcode/tests/test_qrcode.py | 32612e954908ff9d9bc64d772e9c9d534a53b38a | [
"Apache-2.0"
] | permissive | zhoulhb/teleport | 6301cd50c951bcbac21cbe24017eb8421ff57adc | 54da194697898ef77537cfe7032d774555dc1335 | refs/heads/master | 2021-11-10T17:10:59.661130 | 2021-11-09T11:16:19 | 2021-11-09T11:16:19 | 192,643,069 | 0 | 0 | Apache-2.0 | 2019-06-19T02:20:53 | 2019-06-19T02:20:52 | null | UTF-8 | Python | false | false | 8,891 | py | import warnings
import six
import sys
import qrcode
import qrcode.util
import qrcode.image.svg
try:
import qrcode.image.pure
import pymaging_png # ensure that PNG support is installed
except ImportError: # pragma: no cover
pymaging_png = None
import qrcode
from qrcode.image.base import BaseImage
from qrcode.exceptions import DataOverflowError
from qrcode.util import (
QRData, MODE_NUMBER, MODE_ALPHA_NUM, MODE_8BIT_BYTE)
from qrcode.tests.svg import SvgImageWhite
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from unittest import mock
except ImportError:
import mock
UNICODE_TEXT = u'\u03b1\u03b2\u03b3'
class QRCodeTests(unittest.TestCase):
def test_basic(self):
qr = qrcode.QRCode(version=1)
qr.add_data('a')
qr.make(fit=False)
def test_large(self):
qr = qrcode.QRCode(version=27)
qr.add_data('a')
qr.make(fit=False)
def test_invalid_version(self):
qr = qrcode.QRCode(version=41)
self.assertRaises(ValueError, qr.make, fit=False)
def test_overflow(self):
qr = qrcode.QRCode(version=1)
qr.add_data('abcdefghijklmno')
self.assertRaises(DataOverflowError, qr.make, fit=False)
def test_add_qrdata(self):
qr = qrcode.QRCode(version=1)
data = QRData('a')
qr.add_data(data)
qr.make(fit=False)
def test_fit(self):
qr = qrcode.QRCode()
qr.add_data('a')
qr.make()
self.assertEqual(qr.version, 1)
qr.add_data('bcdefghijklmno')
qr.make()
self.assertEqual(qr.version, 2)
def test_mode_number(self):
qr = qrcode.QRCode()
qr.add_data('1234567890123456789012345678901234', optimize=0)
qr.make()
self.assertEqual(qr.version, 1)
self.assertEqual(qr.data_list[0].mode, MODE_NUMBER)
def test_mode_alpha(self):
qr = qrcode.QRCode()
qr.add_data('ABCDEFGHIJ1234567890', optimize=0)
qr.make()
self.assertEqual(qr.version, 1)
self.assertEqual(qr.data_list[0].mode, MODE_ALPHA_NUM)
def test_regression_mode_comma(self):
qr = qrcode.QRCode()
qr.add_data(',', optimize=0)
qr.make()
self.assertEqual(qr.data_list[0].mode, MODE_8BIT_BYTE)
def test_mode_8bit(self):
qr = qrcode.QRCode()
qr.add_data(u'abcABC' + UNICODE_TEXT, optimize=0)
qr.make()
self.assertEqual(qr.version, 1)
self.assertEqual(qr.data_list[0].mode, MODE_8BIT_BYTE)
def test_mode_8bit_newline(self):
qr = qrcode.QRCode()
qr.add_data('ABCDEFGHIJ1234567890\n', optimize=0)
qr.make()
self.assertEqual(qr.data_list[0].mode, MODE_8BIT_BYTE)
def test_render_pil(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image()
img.save(six.BytesIO())
def test_qrcode_bad_factory(self):
self.assertRaises(
TypeError, qrcode.QRCode, image_factory='not_BaseImage')
self.assertRaises(
AssertionError, qrcode.QRCode, image_factory=dict)
def test_qrcode_factory(self):
class MockFactory(BaseImage):
drawrect = mock.Mock()
qr = qrcode.QRCode(image_factory=MockFactory)
qr.add_data(UNICODE_TEXT)
qr.make_image()
self.assertTrue(MockFactory.drawrect.called)
def test_render_svg(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=qrcode.image.svg.SvgImage)
img.save(six.BytesIO())
def test_render_svg_path(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=qrcode.image.svg.SvgPathImage)
img.save(six.BytesIO())
def test_render_svg_fragment(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=qrcode.image.svg.SvgFragmentImage)
img.save(six.BytesIO())
def test_render_svg_with_background(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=SvgImageWhite)
img.save(six.BytesIO())
@unittest.skipIf(not pymaging_png, "Requires pymaging with PNG support")
def test_render_pymaging_png(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=qrcode.image.pure.PymagingImage)
with warnings.catch_warnings():
if six.PY3:
warnings.simplefilter('ignore', DeprecationWarning)
img.save(six.BytesIO())
@unittest.skipIf(not pymaging_png, "Requires pymaging")
def test_render_pymaging_png_bad_kind(self):
qr = qrcode.QRCode()
qr.add_data(UNICODE_TEXT)
img = qr.make_image(image_factory=qrcode.image.pure.PymagingImage)
self.assertRaises(ValueError, img.save, six.BytesIO(), kind='FISH')
def test_optimize(self):
qr = qrcode.QRCode()
text = 'A1abc12345def1HELLOa'
qr.add_data(text, optimize=4)
qr.make()
self.assertEqual(
[d.mode for d in qr.data_list],
[
MODE_8BIT_BYTE, MODE_NUMBER, MODE_8BIT_BYTE, MODE_ALPHA_NUM,
MODE_8BIT_BYTE
]
)
self.assertEqual(qr.version, 2)
def test_optimize_short(self):
qr = qrcode.QRCode()
text = 'A1abc1234567def1HELLOa'
qr.add_data(text, optimize=7)
qr.make()
self.assertEqual(len(qr.data_list), 3)
self.assertEqual(
[d.mode for d in qr.data_list],
[MODE_8BIT_BYTE, MODE_NUMBER, MODE_8BIT_BYTE]
)
self.assertEqual(qr.version, 2)
def test_optimize_size(self):
text = 'A1abc12345123451234512345def1HELLOHELLOHELLOHELLOa' * 5
qr = qrcode.QRCode()
qr.add_data(text)
qr.make()
self.assertEqual(qr.version, 10)
qr = qrcode.QRCode()
qr.add_data(text, optimize=0)
qr.make()
self.assertEqual(qr.version, 11)
def test_qrdata_repr(self):
data = b'hello'
data_obj = qrcode.util.QRData(data)
self.assertEqual(repr(data_obj), repr(data))
def test_print_ascii_stdout(self):
qr = qrcode.QRCode()
stdout_encoding = sys.stdout.encoding
with mock.patch('sys.stdout') as fake_stdout:
# Python 2.6 needs sys.stdout.encoding to be a real string.
sys.stdout.encoding = stdout_encoding
fake_stdout.isatty.return_value = None
self.assertRaises(OSError, qr.print_ascii, tty=True)
self.assertTrue(fake_stdout.isatty.called)
def test_print_ascii(self):
qr = qrcode.QRCode(border=0)
f = six.StringIO()
qr.print_ascii(out=f)
printed = f.getvalue()
f.close()
expected = u'\u2588\u2580\u2580\u2580\u2580\u2580\u2588'
self.assertEqual(printed[:len(expected)], expected)
f = six.StringIO()
f.isatty = lambda: True
qr.print_ascii(out=f, tty=True)
printed = f.getvalue()
f.close()
expected = (
u'\x1b[48;5;232m\x1b[38;5;255m' +
u'\xa0\u2584\u2584\u2584\u2584\u2584\xa0')
self.assertEqual(printed[:len(expected)], expected)
def test_print_tty_stdout(self):
qr = qrcode.QRCode()
with mock.patch('sys.stdout') as fake_stdout:
fake_stdout.isatty.return_value = None
self.assertRaises(OSError, qr.print_tty)
self.assertTrue(fake_stdout.isatty.called)
def test_print_tty(self):
qr = qrcode.QRCode()
f = six.StringIO()
f.isatty = lambda: True
qr.print_tty(out=f)
printed = f.getvalue()
f.close()
BOLD_WHITE_BG = '\x1b[1;47m'
BLACK_BG = '\x1b[40m'
WHITE_BLOCK = BOLD_WHITE_BG + ' ' + BLACK_BG
EOL = '\x1b[0m\n'
expected = (
BOLD_WHITE_BG + ' '*23 + EOL +
WHITE_BLOCK + ' '*7 + WHITE_BLOCK)
self.assertEqual(printed[:len(expected)], expected)
def test_get_matrix(self):
qr = qrcode.QRCode(border=0)
qr.add_data('1')
self.assertEqual(qr.get_matrix(), qr.modules)
def test_get_matrix_border(self):
qr = qrcode.QRCode(border=1)
qr.add_data('1')
matrix = [row[1:-1] for row in qr.get_matrix()[1:-1]]
self.assertEqual(matrix, qr.modules)
def test_negative_size_at_construction(self):
self.assertRaises(ValueError, qrcode.QRCode, box_size=-1)
def test_negative_size_at_usage(self):
qr = qrcode.QRCode()
qr.box_size = -1
self.assertRaises(ValueError, qr.make_image)
class ShortcutTest(unittest.TestCase):
def runTest(self):
qrcode.make('image')
| [
"[email protected]"
] | |
94b87efb8bbc64ecfe8f2e4dc896d8dd597d68d0 | 64a646a8c72c5a0a6c981a1864a2021c6526f025 | /tests/test_printer.py | e95cf30018b49e2a9efed0993940e56173a451f5 | [
"BSD-3-Clause"
] | permissive | vmagamedov/kinko | 6b11d8bf38196a4100ed5c7679204f55461b7fd1 | b65f8ae97bbf5d056781e90e22d2a369f440ed4c | refs/heads/master | 2020-04-06T06:58:54.300766 | 2016-06-29T14:51:32 | 2016-06-29T14:51:32 | 38,629,823 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,100 | py | import difflib
from textwrap import dedent
from kinko.nodes import Tuple, Symbol, Keyword, String, List, Placeholder
from kinko.printer import Printer
from .base import TestCase
class TestPrinter(TestCase):
def assertPrints(self, node, output):
first = Printer.dumps(node)
second = dedent(output).strip() + '\n'
if first != second:
msg = ('Printed code is not equal:\n\n{}'
.format('\n'.join(difflib.ndiff(first.splitlines(),
second.splitlines()))))
raise self.failureException(msg)
def testSimple(self):
self.assertPrints(
Tuple([Symbol('html'),
Keyword('foo'), String('bar'), Symbol('baz')]),
"""
html :foo "bar" baz
""",
)
def testNested(self):
self.assertPrints(
Tuple([Symbol('html'),
Keyword('foo'), String('bar'),
Tuple([Symbol('head')])]),
"""
html :foo "bar"
head
""",
)
def testJoin(self):
self.assertPrints(
Tuple([Symbol('html'),
Keyword('foo'), String('bar'),
Tuple([Symbol('join'), List([
Tuple([Symbol('head')]),
Tuple([Symbol('body')]),
])])]),
"""
html :foo "bar"
head
body
""",
)
def testGet(self):
self.assertPrints(
Tuple([Symbol('html'),
Keyword('foo'), Tuple([Symbol('get'), Symbol('bar'),
Symbol('baz')])]),
"""
html :foo bar.baz
""",
)
self.assertPrints(
Tuple([Symbol('html'),
Keyword('foo'), Tuple([Symbol('get'), Placeholder('bar'),
Symbol('baz')])]),
"""
html :foo #bar.baz
""",
)
| [
"[email protected]"
] | |
c080be714d98599463d46bacf36758234d7faf91 | 0e9e2bfc8d4b5c297ceb4908eb4ca55c4101ea5b | /function_views/migrations/0003_auto_20200420_2102.py | e59be5a859cb9efc08590d349ec9d9af9543c697 | [
"MIT"
] | permissive | cyrilleAdelphe/django-views-tutorial | 776ee1b08f0c6fc68fb56cf64f57bf20c557b9d0 | 71cd7fdb0846a61fcff7f1822d58b0e862be352a | refs/heads/master | 2023-06-30T10:09:47.021727 | 2020-12-08T03:50:14 | 2020-12-08T03:50:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | # Generated by Django 3.0.5 on 2020-04-21 02:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("function_views", "0002_auto_20200420_2052"),
]
operations = [
migrations.AlterField(
model_name="user",
name="bio",
field=models.TextField(null=True),
),
]
| [
"[email protected]"
] | |
824a8bfdd632b45637dadcb4532b085dc7756e6e | 2d276785c3663d4798be462115291c4706dbd255 | /Python从菜鸟到高手/chapter4/demo4.20.py | f4c7eb45f4e343ffc86089f644684daa5542aa05 | [] | no_license | bupthl/Python | 81c92433bd955663e6cda5fe7cab5ea3d067c3de | bdb33aeeb179a43100b9ef7129a925c63a133fd3 | refs/heads/master | 2022-02-21T11:02:40.195265 | 2019-08-16T05:49:18 | 2019-08-16T05:49:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | '''
--------《Python从菜鸟到高手》源代码------------
欧瑞科技版权所有
作者:李宁
如有任何技术问题,请加QQ技术讨论群:264268059
或关注“极客起源”订阅号或“欧瑞科技”服务号或扫码关注订阅号和服务号,二维码在源代码根目录
如果QQ群已满,请访问https://geekori.com,在右侧查看最新的QQ群,同时可以扫码关注公众号
“欧瑞学院”是欧瑞科技旗下在线IT教育学院,包含大量IT前沿视频课程,
请访问http://geekori.com/edu或关注前面提到的订阅号和服务号,进入移动版的欧瑞学院
“极客题库”是欧瑞科技旗下在线题库,请扫描源代码根目录中的小程序码安装“极客题库”小程序
关于更多信息,请访问下面的页面
https://geekori.com/help/videocourse/readme.html
'''
print('hello' * 5)
print([20] * 10)
print([None] * 6)
| [
"[email protected]"
] | |
15729ac399f50b317aa8bb686eb760418a3e65cf | 1b5f653955779f45e78ca6dda925518779d09e8f | /submissions/2779.py | ae704befd917231ea60204a5bca5bfbc3b93854e | [] | no_license | LeonardoSaid/uri-py-solutions | ad285f552934ead54ad2410e23113e84b0724f72 | 43c10c0e99e99d22b4b5ae2871e5d897f8823b42 | refs/heads/master | 2020-08-11T00:28:48.661578 | 2020-04-23T20:21:39 | 2020-04-23T20:21:39 | 214,453,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | n = int(input())
c = int(input())
m = list()
while c > 0:
x = int(input())
if x not in m:
m.append(x)
c -= 1
print(n-len(m)) | [
"[email protected]"
] | |
7694ec591f3fb364781ff62e28267ea8d3b26a4e | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /pAFxfge35bT3zj4Bs_19.py | d59e6f7d09df6fd8ccc31e7672ecb60bc30a3dd0 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | """
Write a function that accepts `base` (decimal), `height` (decimal) and `shape`
("triangle", "parallelogram") as input and calculates the area of that shape.
### Examples
area_shape(2, 3, "triangle") ➞ 3
area_shape(8, 6, "parallelogram") ➞ 48
area_shape(2.9, 1.3, "parallelogram") ➞ 3.77
### Notes
* Area of a triangle is `0.5 * b * h`
* Area of a parallelogram is `b * h`
* Assume triangle and parallelogram are the only inputs for `shape`.
"""
def area_shape(base, height, shape):
if shape == "triangle":
area = 0.5 * (base * height)
elif shape == "parallelogram":
area = base * height
return area
| [
"[email protected]"
] | |
4269b68f681e3630513770809d1de919fa9e353e | 77311ad9622a7d8b88707d7cee3f44de7c8860cb | /res_bw/scripts/common/lib/bsddb/test/test_dbshelve.py | 723a44f72eb9db34edb2fb3745e46003fdd76de4 | [] | no_license | webiumsk/WOT-0.9.14-CT | 9b193191505a4560df4e872e022eebf59308057e | cfe0b03e511d02c36ce185f308eb48f13ecc05ca | refs/heads/master | 2021-01-10T02:14:10.830715 | 2016-02-14T11:59:59 | 2016-02-14T11:59:59 | 51,606,676 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 10,771 | py | # 2016.02.14 12:46:57 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/bsddb/test/test_dbshelve.py
"""
TestCases for checking dbShelve objects.
"""
import os, string, sys
import random
import unittest
from test_all import db, dbshelve, test_support, verbose, get_new_environment_path, get_new_database_path
class DataClass:
def __init__(self):
self.value = random.random()
def __repr__(self):
return 'DataClass %f' % self.value
def __cmp__(self, other):
return cmp(self.value, other)
class DBShelveTestCase(unittest.TestCase):
if sys.version_info < (2, 7) or sys.version_info >= (3, 0) and sys.version_info < (3, 2):
def assertIn(self, a, b, msg = None):
return self.assertTrue(a in b, msg=msg)
def setUp(self):
if sys.version_info[0] >= 3:
from test_all import do_proxy_db_py3k
self._flag_proxy_db_py3k = do_proxy_db_py3k(False)
self.filename = get_new_database_path()
self.do_open()
def tearDown(self):
if sys.version_info[0] >= 3:
from test_all import do_proxy_db_py3k
do_proxy_db_py3k(self._flag_proxy_db_py3k)
self.do_close()
test_support.unlink(self.filename)
def mk(self, key):
"""Turn key into an appropriate key type for this db"""
if sys.version_info[0] < 3:
return key
else:
return bytes(key, 'iso8859-1')
def populateDB(self, d):
for x in string.letters:
d[self.mk('S' + x)] = 10 * x
d[self.mk('I' + x)] = ord(x)
d[self.mk('L' + x)] = [x] * 10
inst = DataClass()
inst.S = 10 * x
inst.I = ord(x)
inst.L = [x] * 10
d[self.mk('O' + x)] = inst
def do_open(self):
self.d = dbshelve.open(self.filename)
def do_close(self):
self.d.close()
def test01_basics(self):
if verbose:
print '\n', '-=' * 30
print 'Running %s.test01_basics...' % self.__class__.__name__
self.populateDB(self.d)
self.d.sync()
self.do_close()
self.do_open()
d = self.d
l = len(d)
k = d.keys()
s = d.stat()
f = d.fd()
if verbose:
print 'length:', l
print 'keys:', k
print 'stats:', s
self.assertEqual(0, d.has_key(self.mk('bad key')))
self.assertEqual(1, d.has_key(self.mk('IA')))
self.assertEqual(1, d.has_key(self.mk('OA')))
d.delete(self.mk('IA'))
del d[self.mk('OA')]
self.assertEqual(0, d.has_key(self.mk('IA')))
self.assertEqual(0, d.has_key(self.mk('OA')))
self.assertEqual(len(d), l - 2)
values = []
for key in d.keys():
value = d[key]
values.append(value)
if verbose:
print '%s: %s' % (key, value)
self.checkrec(key, value)
dbvalues = d.values()
self.assertEqual(len(dbvalues), len(d.keys()))
if sys.version_info < (2, 6):
values.sort()
dbvalues.sort()
self.assertEqual(values, dbvalues)
else:
values.sort(key=lambda x: str(x))
dbvalues.sort(key=lambda x: str(x))
self.assertEqual(repr(values), repr(dbvalues))
items = d.items()
self.assertEqual(len(items), len(values))
for key, value in items:
self.checkrec(key, value)
self.assertEqual(d.get(self.mk('bad key')), None)
self.assertEqual(d.get(self.mk('bad key'), None), None)
self.assertEqual(d.get(self.mk('bad key'), 'a string'), 'a string')
self.assertEqual(d.get(self.mk('bad key'), [1, 2, 3]), [1, 2, 3])
d.set_get_returns_none(0)
self.assertRaises(db.DBNotFoundError, d.get, self.mk('bad key'))
d.set_get_returns_none(1)
d.put(self.mk('new key'), 'new data')
self.assertEqual(d.get(self.mk('new key')), 'new data')
self.assertEqual(d[self.mk('new key')], 'new data')
return
def test02_cursors(self):
if verbose:
print '\n', '-=' * 30
print 'Running %s.test02_cursors...' % self.__class__.__name__
self.populateDB(self.d)
d = self.d
count = 0
c = d.cursor()
rec = c.first()
while rec is not None:
count = count + 1
if verbose:
print rec
key, value = rec
self.checkrec(key, value)
rec = getattr(c, 'next')()
del c
self.assertEqual(count, len(d))
count = 0
c = d.cursor()
rec = c.last()
while rec is not None:
count = count + 1
if verbose:
print rec
key, value = rec
self.checkrec(key, value)
rec = c.prev()
self.assertEqual(count, len(d))
c.set(self.mk('SS'))
key, value = c.current()
self.checkrec(key, value)
del c
return
def test03_append(self):
if verbose:
print '\n', '-=' * 30
print 'Running %s.test03_append...' % self.__class__.__name__
self.assertRaises(dbshelve.DBShelveError, self.d.append, 'unit test was here')
def test04_iterable(self):
self.populateDB(self.d)
d = self.d
keys = d.keys()
keyset = set(keys)
self.assertEqual(len(keyset), len(keys))
for key in d:
self.assertIn(key, keyset)
keyset.remove(key)
self.assertEqual(len(keyset), 0)
def checkrec(self, key, value):
if sys.version_info[0] >= 3:
if isinstance(key, bytes):
key = key.decode('iso8859-1')
x = key[1]
if key[0] == 'S':
self.assertEqual(type(value), str)
self.assertEqual(value, 10 * x)
elif key[0] == 'I':
self.assertEqual(type(value), int)
self.assertEqual(value, ord(x))
elif key[0] == 'L':
self.assertEqual(type(value), list)
self.assertEqual(value, [x] * 10)
elif key[0] == 'O':
if sys.version_info[0] < 3:
from types import InstanceType
self.assertEqual(type(value), InstanceType)
else:
self.assertEqual(type(value), DataClass)
self.assertEqual(value.S, 10 * x)
self.assertEqual(value.I, ord(x))
self.assertEqual(value.L, [x] * 10)
else:
self.assertTrue(0, 'Unknown key type, fix the test')
class BasicShelveTestCase(DBShelveTestCase):
def do_open(self):
self.d = dbshelve.DBShelf()
self.d.open(self.filename, self.dbtype, self.dbflags)
def do_close(self):
self.d.close()
class BTreeShelveTestCase(BasicShelveTestCase):
dbtype = db.DB_BTREE
dbflags = db.DB_CREATE
class HashShelveTestCase(BasicShelveTestCase):
dbtype = db.DB_HASH
dbflags = db.DB_CREATE
class ThreadBTreeShelveTestCase(BasicShelveTestCase):
dbtype = db.DB_BTREE
dbflags = db.DB_CREATE | db.DB_THREAD
class ThreadHashShelveTestCase(BasicShelveTestCase):
dbtype = db.DB_HASH
dbflags = db.DB_CREATE | db.DB_THREAD
class BasicEnvShelveTestCase(DBShelveTestCase):
def do_open(self):
self.env = db.DBEnv()
self.env.open(self.homeDir, self.envflags | db.DB_INIT_MPOOL | db.DB_CREATE)
self.filename = os.path.split(self.filename)[1]
self.d = dbshelve.DBShelf(self.env)
self.d.open(self.filename, self.dbtype, self.dbflags)
def do_close(self):
self.d.close()
self.env.close()
def setUp(self):
self.homeDir = get_new_environment_path()
DBShelveTestCase.setUp(self)
def tearDown(self):
if sys.version_info[0] >= 3:
from test_all import do_proxy_db_py3k
do_proxy_db_py3k(self._flag_proxy_db_py3k)
self.do_close()
test_support.rmtree(self.homeDir)
class EnvBTreeShelveTestCase(BasicEnvShelveTestCase):
envflags = 0
dbtype = db.DB_BTREE
dbflags = db.DB_CREATE
class EnvHashShelveTestCase(BasicEnvShelveTestCase):
envflags = 0
dbtype = db.DB_HASH
dbflags = db.DB_CREATE
class EnvThreadBTreeShelveTestCase(BasicEnvShelveTestCase):
envflags = db.DB_THREAD
dbtype = db.DB_BTREE
dbflags = db.DB_CREATE | db.DB_THREAD
class EnvThreadHashShelveTestCase(BasicEnvShelveTestCase):
envflags = db.DB_THREAD
dbtype = db.DB_HASH
dbflags = db.DB_CREATE | db.DB_THREAD
class RecNoShelveTestCase(BasicShelveTestCase):
dbtype = db.DB_RECNO
dbflags = db.DB_CREATE
def setUp(self):
BasicShelveTestCase.setUp(self)
self.key_pool = list(range(1, 5000))
self.key_map = {}
self.intkey_map = {}
def mk(self, key):
if key not in self.key_map:
self.key_map[key] = self.key_pool.pop(0)
self.intkey_map[self.key_map[key]] = key
return self.key_map[key]
def checkrec(self, intkey, value):
key = self.intkey_map[intkey]
BasicShelveTestCase.checkrec(self, key, value)
def test03_append(self):
if verbose:
print '\n', '-=' * 30
print 'Running %s.test03_append...' % self.__class__.__name__
self.d[1] = 'spam'
self.d[5] = 'eggs'
self.assertEqual(6, self.d.append('spam'))
self.assertEqual(7, self.d.append('baked beans'))
self.assertEqual('spam', self.d.get(6))
self.assertEqual('spam', self.d.get(1))
self.assertEqual('baked beans', self.d.get(7))
self.assertEqual('eggs', self.d.get(5))
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DBShelveTestCase))
suite.addTest(unittest.makeSuite(BTreeShelveTestCase))
suite.addTest(unittest.makeSuite(HashShelveTestCase))
suite.addTest(unittest.makeSuite(ThreadBTreeShelveTestCase))
suite.addTest(unittest.makeSuite(ThreadHashShelveTestCase))
suite.addTest(unittest.makeSuite(EnvBTreeShelveTestCase))
suite.addTest(unittest.makeSuite(EnvHashShelveTestCase))
suite.addTest(unittest.makeSuite(EnvThreadBTreeShelveTestCase))
suite.addTest(unittest.makeSuite(EnvThreadHashShelveTestCase))
suite.addTest(unittest.makeSuite(RecNoShelveTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\bsddb\test\test_dbshelve.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.02.14 12:46:58 Střední Evropa (běžný čas)
| [
"[email protected]"
] | |
fdc6ea27d7950c594da38246cc2d8663b4d8363b | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/proc/applicationcpuhist15min.py | f7bb3a84414b139644153e14fb5cb1aafa198085 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,906 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class ApplicationCPUHist15min(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = StatsClassMeta("cobra.model.proc.ApplicationCPUHist15min", "Application CPU utilization")
counter = CounterMeta("current", CounterCategory.GAUGE, "percentage", "Application CPU usage")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "currentMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "currentMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "currentAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "currentSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "currentThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "currentTr"
meta._counters.append(counter)
meta.moClassName = "procApplicationCPUHist15min"
meta.rnFormat = "HDprocApplicationCPU15min-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical Application CPU utilization stats in 15 minute"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.proc.App")
meta.parentClasses.add("cobra.model.proc.Container")
meta.superClasses.add("cobra.model.proc.ApplicationCPUHist")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.rnPrefixes = [
('HDprocApplicationCPU15min-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "currentAvg", "currentAvg", 30353, PropCategory.IMPLICIT_AVG)
prop.label = "Application CPU usage average value"
prop.isOper = True
prop.isStats = True
meta.props.add("currentAvg", prop)
prop = PropMeta("str", "currentMax", "currentMax", 30352, PropCategory.IMPLICIT_MAX)
prop.label = "Application CPU usage maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("currentMax", prop)
prop = PropMeta("str", "currentMin", "currentMin", 30351, PropCategory.IMPLICIT_MIN)
prop.label = "Application CPU usage minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("currentMin", prop)
prop = PropMeta("str", "currentSpct", "currentSpct", 30354, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Application CPU usage suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("currentSpct", prop)
prop = PropMeta("str", "currentThr", "currentThr", 30355, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Application CPU usage thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("currentThr", prop)
prop = PropMeta("str", "currentTr", "currentTr", 30356, PropCategory.IMPLICIT_TREND)
prop.label = "Application CPU usage trend"
prop.isOper = True
prop.isStats = True
meta.props.add("currentTr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "index", "index", 30326, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "index"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
432eda245cde6554464c98cca73dea1dbe401983 | 2c5b25d0b5d6ba66d013251f93ebf4c642fd787b | /wrong_answer_codes/Next_Permutation/Next Permutation_279803526.py | 0f2f44ba7c182ed06e0f91406f928fa3d8ab7085 | [] | no_license | abhinay-b/Leetcode-Submissions | da8099ac54b5d36ae23db42580064d0f9d9bc63b | d034705813f3f908f555f1d1677b827af751bf42 | refs/heads/master | 2022-10-15T22:09:36.328967 | 2020-06-14T15:39:17 | 2020-06-14T15:39:17 | 259,984,100 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | class Solution:
def nextPermutation(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
if len(nums) < 2:
return
idx = len(nums) - 1
found = False
while idx > 0:
if nums[idx-1] < nums[idx]:
nums[idx-1],nums[idx] = nums[idx], nums[idx-1]
found = True
break
idx -= 1
if found == False:
nums = nums[::-1]
| [
"[email protected]"
] | |
292f6b296954e7cea39ad9a0858abbaba0e99edc | 5f86944bdf1b810a84c63adc6ed01bbb48d2c59a | /kubernetes/test/test_v1beta1_certificate_signing_request_status.py | c35e4425ee58df047c2b093e26ce28ab6d3998da | [
"Apache-2.0"
] | permissive | m4ttshaw/client-python | 384c721ba57b7ccc824d5eca25834d0288b211e2 | 4eac56a8b65d56eb23d738ceb90d3afb6dbd96c1 | refs/heads/master | 2021-01-13T06:05:51.564765 | 2017-06-21T08:31:03 | 2017-06-21T08:31:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,047 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_certificate_signing_request_status import V1beta1CertificateSigningRequestStatus
class TestV1beta1CertificateSigningRequestStatus(unittest.TestCase):
""" V1beta1CertificateSigningRequestStatus unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1CertificateSigningRequestStatus(self):
"""
Test V1beta1CertificateSigningRequestStatus
"""
model = kubernetes.client.models.v1beta1_certificate_signing_request_status.V1beta1CertificateSigningRequestStatus()
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
1ecd3331a0fce54de551858fff25267360267370 | b8f77ec8e4c6da4a2117140a3eeac0201ab08678 | /seleniumbase/plugins/selenium_plugin.py | 0516847bad855e38692c817c5c8f90e306b35431 | [
"MIT"
] | permissive | CyrilShch/SeleniumBase | b07571172c173278eaf49b88ee24dd0eac779936 | 269e367d2b7bcd3741840241c5da184d7a3b2bc7 | refs/heads/master | 2023-06-11T20:42:13.325171 | 2021-07-04T12:56:30 | 2021-07-04T12:56:30 | 381,798,302 | 0 | 0 | MIT | 2021-06-30T18:29:27 | 2021-06-30T18:29:27 | null | UTF-8 | Python | false | false | 27,689 | py | # -*- coding: utf-8 -*-
""" This is the Nosetest plugin for setting Selenium test configuration. """
import sys
from nose.plugins import Plugin
from seleniumbase import config as sb_config
from seleniumbase.config import settings
from seleniumbase.core import proxy_helper
from seleniumbase.fixtures import constants
class SeleniumBrowser(Plugin):
"""
This plugin adds the following command-line options to nosetests:
--browser=BROWSER (The web browser to use. Default: "chrome".)
--user-data-dir=DIR (Set the Chrome user data directory to use.)
--protocol=PROTOCOL (The Selenium Grid protocol: http|https.)
--server=SERVER (The Selenium Grid server/IP used for tests.)
--port=PORT (The Selenium Grid port used by the test server.)
--cap-file=FILE (The web browser's desired capabilities to use.)
--cap-string=STRING (The web browser's desired capabilities to use.)
--proxy=SERVER:PORT (Connect to a proxy server:port for tests.)
--proxy=USERNAME:PASSWORD@SERVER:PORT (Use authenticated proxy server.)
--agent=STRING (Modify the web browser's User-Agent string.)
--mobile (Use the mobile device emulator while running tests.)
--metrics=STRING (Set mobile metrics: "CSSWidth,CSSHeight,PixelRatio".)
--chromium-arg=ARG (Add a Chromium arg for Chrome/Edge, comma-separated.)
--firefox-arg=ARG (Add a Firefox arg for Firefox, comma-separated.)
--firefox-pref=SET (Set a Firefox preference:value set, comma-separated.)
--extension-zip=ZIP (Load a Chrome Extension .zip|.crx, comma-separated.)
--extension-dir=DIR (Load a Chrome Extension directory, comma-separated.)
--headless (Run tests headlessly. Default mode on Linux OS.)
--headed (Run tests with a GUI on Linux OS.)
--locale=LOCALE_CODE (Set the Language Locale Code for the web browser.)
--interval=SECONDS (The autoplay interval for presentations & tour steps)
--start-page=URL (The starting URL for the web browser when tests begin.)
--time-limit=SECONDS (Safely fail any test that exceeds the time limit.)
--slow (Slow down the automation. Faster than using Demo Mode.)
--demo (Slow down and visually see test actions as they occur.)
--demo-sleep=SECONDS (Set the wait time after Demo Mode actions.)
--highlights=NUM (Number of highlight animations for Demo Mode actions.)
--message-duration=SECONDS (The time length for Messenger alerts.)
--check-js (Check for JavaScript errors after page loads.)
--ad-block (Block some types of display ads after page loads.)
--block-images (Block images from loading during tests.)
--verify-delay=SECONDS (The delay before MasterQA verification checks.)
--disable-csp (Disable the Content Security Policy of websites.)
--disable-ws (Disable Web Security on Chromium-based browsers.)
--enable-ws (Enable Web Security on Chromium-based browsers.)
--enable-sync (Enable "Chrome Sync".)
--use-auto-ext (Use Chrome's automation extension.)
--remote-debug (Enable Chrome's Remote Debugger on http://localhost:9222)
--swiftshader (Use Chrome's "--use-gl=swiftshader" feature.)
--incognito (Enable Chrome's Incognito mode.)
--guest (Enable Chrome's Guest mode.)
--devtools (Open Chrome's DevTools when the browser opens.)
--maximize (Start tests with the web browser window maximized.)
--save-screenshot (Save a screenshot at the end of each test.)
--visual-baseline (Set the visual baseline for Visual/Layout tests.)
--timeout-multiplier=MULTIPLIER (Multiplies the default timeout values.)
"""
name = "selenium" # Usage: --with-selenium
def options(self, parser, env):
super(SeleniumBrowser, self).options(parser, env=env)
parser.add_option(
"--browser",
action="store",
dest="browser",
choices=constants.ValidBrowsers.valid_browsers,
default=constants.Browser.GOOGLE_CHROME,
help="""Specifies the web browser to use. Default: Chrome.
If you want to use Firefox, explicitly indicate that.
Example: (--browser=firefox)""",
)
parser.add_option(
"--browser_version",
"--browser-version",
action="store",
dest="browser_version",
default="latest",
help="""The browser version to use. Explicitly select
a version number or use "latest".""",
)
parser.add_option(
"--cap_file",
"--cap-file",
action="store",
dest="cap_file",
default=None,
help="""The file that stores browser desired capabilities
for BrowserStack or Sauce Labs web drivers.""",
)
parser.add_option(
"--cap_string",
"--cap-string",
dest="cap_string",
default=None,
help="""The string that stores browser desired
capabilities for BrowserStack, Sauce Labs,
and other remote web drivers to use.
Enclose cap-string in single quotes.
Enclose parameter keys in double quotes.
Example: --cap-string='{"name":"test1","v":"42"}'""",
)
parser.add_option(
"--user_data_dir",
"--user-data-dir",
action="store",
dest="user_data_dir",
default=None,
help="""The Chrome User Data Directory to use. (Chrome Profile)
If the directory doesn't exist, it'll be created.""",
)
parser.add_option(
"--protocol",
action="store",
dest="protocol",
choices=(
constants.Protocol.HTTP,
constants.Protocol.HTTPS,
),
default=constants.Protocol.HTTP,
help="""Designates the Selenium Grid protocol to use.
Default: http.""",
)
parser.add_option(
"--server",
action="store",
dest="servername",
default="localhost",
help="""Designates the Selenium Grid server to use.
Use "127.0.0.1" to connect to a localhost Grid.
If unset or set to "localhost", Grid isn't used.
Default: "localhost".""",
)
parser.add_option(
"--port",
action="store",
dest="port",
default="4444",
help="""Designates the Selenium Grid port to use.
Default: 4444.""",
)
parser.add_option(
"--proxy",
action="store",
dest="proxy_string",
default=None,
help="""Designates the proxy server:port to use.
Format: servername:port. OR
username:password@servername:port OR
A dict key from proxy_list.PROXY_LIST
Default: None.""",
)
parser.add_option(
"--agent",
"--user-agent",
"--user_agent",
action="store",
dest="user_agent",
default=None,
help="""Designates the User-Agent for the browser to use.
Format: A string.
Default: None.""",
)
parser.add_option(
"--mobile",
"--mobile-emulator",
"--mobile_emulator",
action="store_true",
dest="mobile_emulator",
default=False,
help="""If this option is enabled, the mobile emulator
will be used while running tests.""",
)
parser.add_option(
"--metrics",
"--device-metrics",
"--device_metrics",
action="store",
dest="device_metrics",
default=None,
help="""Designates the three device metrics of the mobile
emulator: CSS Width, CSS Height, and Pixel-Ratio.
Format: A comma-separated string with the 3 values.
Example: "375,734,3"
Default: None. (Will use default values if None)""",
)
parser.add_option(
"--chromium_arg",
"--chromium-arg",
action="store",
dest="chromium_arg",
default=None,
help="""Add a Chromium argument for Chrome/Edge browsers.
Format: A comma-separated list of Chromium args.
If an arg doesn't start with "--", that will be
added to the beginning of the arg automatically.
Default: None.""",
)
parser.add_option(
"--firefox_arg",
"--firefox-arg",
action="store",
dest="firefox_arg",
default=None,
help="""Add a Firefox argument for Firefox browser runs.
Format: A comma-separated list of Firefox args.
If an arg doesn't start with "--", that will be
added to the beginning of the arg automatically.
Default: None.""",
)
parser.add_option(
"--firefox_pref",
"--firefox-pref",
action="store",
dest="firefox_pref",
default=None,
help="""Set a Firefox preference:value combination.
Format: A comma-separated list of pref:value items.
Example usage:
--firefox-pref="browser.formfill.enable:True"
--firefox-pref="pdfjs.disabled:False"
--firefox-pref="abc.def.xyz:42,hello.world:text"
Boolean and integer values to the right of the ":"
will be automatically converted into proper format.
If there's no ":" in the string, then True is used.
Default: None.""",
)
parser.add_option(
"--extension_zip",
"--extension-zip",
"--crx",
action="store",
dest="extension_zip",
default=None,
help="""Designates the Chrome Extension ZIP file to load.
Format: A comma-separated list of .zip or .crx files
containing the Chrome extensions to load.
Default: None.""",
)
parser.add_option(
"--extension_dir",
"--extension-dir",
action="store",
dest="extension_dir",
default=None,
help="""Designates the Chrome Extension folder to load.
Format: A directory containing the Chrome extension.
(Can also be a comma-separated list of directories.)
Default: None.""",
)
parser.add_option(
"--headless",
action="store_true",
dest="headless",
default=False,
help="""Using this makes Webdriver run web browsers headlessly,
which is required on headless machines.
Default: False on Mac/Windows. True on Linux.""",
)
parser.add_option(
"--headed",
"--gui",
action="store_true",
dest="headed",
default=False,
help="""Using this makes Webdriver run web browsers with
a GUI when running tests on Linux machines.
(The default setting on Linux is headless.)
(The default setting on Mac or Windows is headed.)""",
)
parser.add_option(
"--locale_code",
"--locale-code",
"--locale",
action="store",
dest="locale_code",
default=None,
help="""Designates the Locale Code for the web browser.
A Locale is a specific version of a spoken Language.
The Locale alters visible text on supported websites.
See: https://seleniumbase.io/help_docs/locale_codes/
Default: None. (The web browser's default mode.)""",
)
parser.add_option(
"--interval",
action="store",
dest="interval",
default=None,
help="""This globally overrides the default interval,
(in seconds), of features that include autoplay
functionality, such as tours and presentations.
Overrides from methods take priority over this.
(Headless Mode skips tours and presentations.)""",
)
parser.add_option(
"--start_page",
"--start-page",
"--url",
action="store",
dest="start_page",
default=None,
help="""Designates the starting URL for the web browser
when each test begins.
Default: None.""",
)
parser.add_option(
"--time_limit",
"--time-limit",
"--timelimit",
action="store",
dest="time_limit",
default=None,
help="""Use this to set a time limit per test, in seconds.
If a test runs beyond the limit, it fails.""",
)
parser.add_option(
"--slow_mode",
"--slow-mode",
"--slow",
action="store_true",
dest="slow_mode",
default=False,
help="""Using this slows down the automation.""",
)
parser.add_option(
"--demo_mode",
"--demo-mode",
"--demo",
action="store_true",
dest="demo_mode",
default=False,
help="""Using this slows down the automation and lets you
visually see what the tests are actually doing.""",
)
parser.add_option(
"--demo_sleep",
"--demo-sleep",
action="store",
dest="demo_sleep",
default=None,
help="""Setting this overrides the Demo Mode sleep
time that happens after browser actions.""",
)
parser.add_option(
"--highlights",
action="store",
dest="highlights",
default=None,
help="""Setting this overrides the default number of
highlight animation loops to have per call.""",
)
parser.add_option(
"--message_duration",
"--message-duration",
action="store",
dest="message_duration",
default=None,
help="""Setting this overrides the default time that
messenger notifications remain visible when reaching
assert statements during Demo Mode.""",
)
parser.add_option(
"--check_js",
"--check-js",
action="store_true",
dest="js_checking_on",
default=False,
help="""The option to check for JavaScript errors after
every page load.""",
)
parser.add_option(
"--ad_block",
"--ad-block",
"--block_ads",
"--block-ads",
action="store_true",
dest="ad_block_on",
default=False,
help="""Using this makes WebDriver block display ads
that are defined in ad_block_list.AD_BLOCK_LIST.""",
)
parser.add_option(
"--block_images",
"--block-images",
action="store_true",
dest="block_images",
default=False,
help="""Using this makes WebDriver block images from
loading on web pages during tests.""",
)
parser.add_option(
"--verify_delay",
"--verify-delay",
action="store",
dest="verify_delay",
default=None,
help="""Setting this overrides the default wait time
before each MasterQA verification pop-up.""",
)
parser.add_option(
"--disable_csp",
"--disable-csp",
action="store_true",
dest="disable_csp",
default=False,
help="""Using this disables the Content Security Policy of
websites, which may interfere with some features of
SeleniumBase, such as loading custom JavaScript
libraries for various testing actions.
Setting this to True (--disable-csp) overrides the
value set in seleniumbase/config/settings.py""",
)
parser.add_option(
"--disable_ws",
"--disable-ws",
"--disable-web-security",
action="store_true",
dest="disable_ws",
default=False,
help="""Using this disables the "Web Security" feature of
Chrome and Chromium-based browsers such as Edge.""",
)
parser.add_option(
"--enable_ws",
"--enable-ws",
"--enable-web-security",
action="store_true",
dest="enable_ws",
default=False,
help="""Using this enables the "Web Security" feature of
Chrome and Chromium-based browsers such as Edge.""",
)
parser.add_option(
"--enable_sync",
"--enable-sync",
action="store_true",
dest="enable_sync",
default=False,
help="""Using this enables the "Chrome Sync" feature.""",
)
parser.add_option(
"--use_auto_ext",
"--use-auto-ext",
"--auto-ext",
action="store_true",
dest="use_auto_ext",
default=False,
help="""Using this enables Chrome's Automation Extension.
It's not required, but some commands & advanced
features may need it.""",
)
parser.add_option(
"--no_sandbox",
"--no-sandbox",
action="store_true",
dest="no_sandbox",
default=False,
help="""Using this enables the "No Sandbox" feature.
(This setting is now always enabled by default.)""",
)
parser.add_option(
"--disable_gpu",
"--disable-gpu",
action="store_true",
dest="disable_gpu",
default=False,
help="""Using this enables the "Disable GPU" feature.
(This setting is now always enabled by default.)""",
)
parser.add_option(
"--remote_debug",
"--remote-debug",
action="store_true",
dest="remote_debug",
default=False,
help="""This enables Chromium's remote debugger.
To access the remote debugging interface, go to:
http://localhost:9222 while Chromedriver is running.
Info: chromedevtools.github.io/devtools-protocol/""",
)
parser.add_option(
"--swiftshader",
action="store_true",
dest="swiftshader",
default=False,
help="""Using this enables the "--use-gl=swiftshader"
feature when running tests on Chrome.""",
)
parser.add_option(
"--incognito",
"--incognito_mode",
"--incognito-mode",
action="store_true",
dest="incognito",
default=False,
help="""Using this enables Chrome's Incognito mode.""",
)
parser.add_option(
"--guest",
"--guest_mode",
"--guest-mode",
action="store_true",
dest="guest_mode",
default=False,
help="""Using this enables Chrome's Guest mode.""",
)
parser.add_option(
"--devtools",
"--open_devtools",
"--open-devtools",
action="store_true",
dest="devtools",
default=False,
help="""Using this opens Chrome's DevTools.""",
)
parser.add_option(
"--maximize_window",
"--maximize-window",
"--maximize",
"--fullscreen",
action="store_true",
dest="maximize_option",
default=False,
help="""The option to start with the web browser maximized.""",
)
parser.add_option(
"--save_screenshot",
"--save-screenshot",
action="store_true",
dest="save_screenshot",
default=False,
help="""(DEPRECATED) - Screenshots are enabled by default now.
This option saves screenshots during test failures.
(Added to the "latest_logs/" folder.)""",
)
parser.add_option(
"--visual_baseline",
"--visual-baseline",
action="store_true",
dest="visual_baseline",
default=False,
help="""Setting this resets the visual baseline for
Automated Visual Testing with SeleniumBase.
When a test calls self.check_window(), it will
rebuild its files in the visual_baseline folder.""",
)
parser.add_option(
"--timeout_multiplier",
"--timeout-multiplier",
action="store",
dest="timeout_multiplier",
default=None,
help="""Setting this overrides the default timeout
by the multiplier when waiting for page elements.
Unused when tests override the default value.""",
)
def configure(self, options, conf):
super(SeleniumBrowser, self).configure(options, conf)
self.enabled = True # Used if test class inherits BaseCase
self.options = options
self.headless_active = False # Default setting
proxy_helper.remove_proxy_zip_if_present()
def beforeTest(self, test):
test.test.browser = self.options.browser
test.test.cap_file = self.options.cap_file
test.test.cap_string = self.options.cap_string
test.test.headless = self.options.headless
test.test.headed = self.options.headed
test.test.locale_code = self.options.locale_code
test.test.interval = self.options.interval
test.test.start_page = self.options.start_page
test.test.protocol = self.options.protocol
test.test.servername = self.options.servername
test.test.port = self.options.port
test.test.user_data_dir = self.options.user_data_dir
test.test.extension_zip = self.options.extension_zip
test.test.extension_dir = self.options.extension_dir
test.test.chromium_arg = self.options.chromium_arg
test.test.firefox_arg = self.options.firefox_arg
test.test.firefox_pref = self.options.firefox_pref
test.test.proxy_string = self.options.proxy_string
test.test.user_agent = self.options.user_agent
test.test.mobile_emulator = self.options.mobile_emulator
test.test.device_metrics = self.options.device_metrics
test.test.time_limit = self.options.time_limit
test.test.slow_mode = self.options.slow_mode
test.test.demo_mode = self.options.demo_mode
test.test.demo_sleep = self.options.demo_sleep
test.test.highlights = self.options.highlights
test.test.message_duration = self.options.message_duration
test.test.js_checking_on = self.options.js_checking_on
test.test.ad_block_on = self.options.ad_block_on
test.test.block_images = self.options.block_images
test.test.verify_delay = self.options.verify_delay # MasterQA
test.test.disable_csp = self.options.disable_csp
test.test.disable_ws = self.options.disable_ws
test.test.enable_ws = self.options.enable_ws
if not self.options.disable_ws:
test.test.enable_ws = True
test.test.enable_sync = self.options.enable_sync
test.test.use_auto_ext = self.options.use_auto_ext
test.test.no_sandbox = self.options.no_sandbox
test.test.disable_gpu = self.options.disable_gpu
test.test.remote_debug = self.options.remote_debug
test.test.swiftshader = self.options.swiftshader
test.test.incognito = self.options.incognito
test.test.guest_mode = self.options.guest_mode
test.test.devtools = self.options.devtools
test.test.maximize_option = self.options.maximize_option
test.test.save_screenshot_after_test = self.options.save_screenshot
test.test.visual_baseline = self.options.visual_baseline
test.test.timeout_multiplier = self.options.timeout_multiplier
test.test.use_grid = False
test.test.dashboard = False
test.test._multithreaded = False
test.test._reuse_session = False
if test.test.servername != "localhost":
# Use Selenium Grid (Use --server="127.0.0.1" for localhost Grid)
test.test.use_grid = True
if "linux" in sys.platform and (
not self.options.headed and not self.options.headless
):
print(
"(Running with --headless on Linux. "
"Use --headed or --gui to override.)"
)
self.options.headless = True
test.test.headless = True
if not self.options.headless:
self.options.headed = True
test.test.headed = True
if self.options.headless:
try:
# from pyvirtualdisplay import Display # Skip for own lib
from seleniumbase.virtual_display.display import Display
self.display = Display(visible=0, size=(1440, 1880))
self.display.start()
self.headless_active = True
except Exception:
# pyvirtualdisplay might not be necessary anymore because
# Chrome and Firefox now have built-in headless displays
pass
sb_config._is_timeout_changed = False
sb_config._SMALL_TIMEOUT = settings.SMALL_TIMEOUT
sb_config._LARGE_TIMEOUT = settings.LARGE_TIMEOUT
# The driver will be received later
self.driver = None
test.test.driver = self.driver
def finalize(self, result):
""" This runs after all tests have completed with nosetests. """
proxy_helper.remove_proxy_zip_if_present()
def afterTest(self, test):
try:
# If the browser window is still open, close it now.
self.driver.quit()
except AttributeError:
pass
except Exception:
pass
if self.options.headless:
if self.headless_active:
try:
self.display.stop()
except AttributeError:
pass
except Exception:
pass
| [
"[email protected]"
] | |
720356dcaeadc6446daf29ea65dc5897ed2614cf | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_227/ch25_2020_03_09_19_08_42_543739.py | 28620a9b8ede3883991958f34592cf33371e7b28 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | import math
velocidade = float(input("Qual a velocidade da jaca? "))
ângulo = float(input("Qual o ângulo de lançamento da jaca? "))
distância = (velocidade**2)*(math.sin(math.radians(2*ângulo)))/(9.8)
if distância < 98 :
print('Muito perto')
elif 98 <= distância <= 102 :
print('Acertou!')
else :
print('Muito longe') | [
"[email protected]"
] | |
f8f31a1459502f000a301088ec73cfa9af65dd6d | 673517e68db4b2540ac3a908a6374aaaa72e0f27 | /tests/src/event/MakeBeansProfitEvent.py | 06350a8843655b3b21df11747f98205409c96e13 | [
"Apache-2.0"
] | permissive | buxx/synergine | 3a977b69bc35c1a5af1056f98028f8b7412795d2 | da05d762cdbc993362807d4851e1ca74784438ae | refs/heads/master | 2021-07-03T19:57:24.486164 | 2017-09-04T09:19:45 | 2017-09-04T09:19:45 | 23,734,878 | 6 | 2 | Apache-2.0 | 2021-06-10T14:15:26 | 2014-09-06T13:15:07 | Python | UTF-8 | Python | false | false | 232 | py | from tests.src.event.TestEvent import TestEvent
from tests.src.TestSynergyObject import TestSynergyObject
class MakeBeansProfitEvent(TestEvent):
def _prepare(self, object_id, context, parameters={}):
return parameters | [
"[email protected]"
] | |
4b514a998a289f87349c4cdfda7c3e9cb179742c | f5b4d2f294085565395cb060d0508cc60b331526 | /vi/vn_express/spider.py | da6e99ead84c2fcf983dd2bcea95a235f8a1ba71 | [] | no_license | HaManhDong/crawler_news | 72fd8a18f0fe3358d1033667ee9857755dd07ac8 | 56171952a7bffef26ccfd397b08936ee259d52aa | refs/heads/master | 2021-01-22T20:26:14.036281 | 2016-11-24T03:33:39 | 2016-11-24T03:33:39 | 85,321,142 | 0 | 0 | null | 2017-03-17T14:33:45 | 2017-03-17T14:33:45 | null | UTF-8 | Python | false | false | 5,742 | py | import datetime
import scrapy
from scrapy.exceptions import CloseSpider
from database import database_connection
from database.database_connection import NewsData
class NewspaperItem(scrapy.Item):
url = scrapy.Field()
title = scrapy.Field()
summary = scrapy.Field()
content = scrapy.Field()
class NewsItem:
def __init__(self, url, title, summary, content):
self.url = url
self.title = title
self.summary = summary
self.content = content
class VNExpressNewspaperSpider(scrapy.Spider):
db_session = database_connection.connect_to_database()
name = "VnExpress"
# start_urls = ['http://vnexpress.net/', ]
start_urls = \
[
'http://vnexpress.net/', ]
url_set = set(start_urls)
crawled_page = 0
def parse(self, response):
# title = response.xpath()
news_title = VNExpressNewspaperSpider.get_title(response)
news_time = VNExpressNewspaperSpider.get_time(response)
summary = VNExpressNewspaperSpider.get_summary(response)
content = VNExpressNewspaperSpider.get_content(response)
url = response.url
if news_title is not None and summary is not None and content is not None and news_time is not None:
news_vn_express_data = NewsData(url=url, title=news_title, summary=summary, content=content,
time=news_time)
VNExpressNewspaperSpider.db_session.add(news_vn_express_data)
VNExpressNewspaperSpider.db_session.commit()
# filename = "result.txt"
# with open(filename, 'ab') as outfile:
# outfile.write(json.dumps(NewsItem(url, news_title, summary, content).__dict__).encode("UTF-8"))
# outfile.write("\n")
VNExpressNewspaperSpider.crawled_page += 1
if VNExpressNewspaperSpider.crawled_page > 500:
raise CloseSpider('Search Exceeded 500')
next_link_list = []
href_element = response.xpath("//*[contains(@href,'vnexpress.net')]")
mobile_href_element = response.xpath("//*[contains(@data-mobile-href,'vnexpress.net')]")
data_href_element = response.xpath("//*[contains(@data-href,'vnexpress.net')]")
for link in href_element:
link_url = link.xpath("./@href").extract_first()
if link_url not in VNExpressNewspaperSpider.url_set:
VNExpressNewspaperSpider.url_set.add(link_url)
next_link_list.append(link_url)
for link in mobile_href_element:
link_url = link.xpath("./@data-mobile-href").extract_first()
if link_url not in VNExpressNewspaperSpider.url_set:
VNExpressNewspaperSpider.url_set.add(link_url)
next_link_list.append(link_url)
for link in data_href_element:
link_url = link.xpath("./@data-href").extract_first()
if link_url not in VNExpressNewspaperSpider.url_set:
VNExpressNewspaperSpider.url_set.add(link_url)
next_link_list.append(link_url)
for next_link in next_link_list:
yield scrapy.Request(next_link, callback=self.parse)
# yield scrapy.Request(next_page, callback=self.parse)
#
# with open(filename, 'ab') as f:
# f.write()
@staticmethod
def get_title(response):
news_title_element = response.xpath('//div[@class="title_news"]/h1/text()')
if len(news_title_element) > 0:
return news_title_element.extract_first()
return None
@staticmethod
def get_summary(response):
summary_element = response.xpath('//*[contains(@class,"short_intro")]/text()')
if len(summary_element) > 0:
return summary_element.extract_first()
return None
@staticmethod
def get_content(response):
content_block_element = response.xpath('//*[contains(@class,"fck_detail")]')
if len(content_block_element) <= 0:
content_block_element = response.xpath('//*[contains(@class,"block_content_slide_showdetail")]')
if len(content_block_element) > 0:
return_text = ''
text_nodes = content_block_element[0].xpath(".//*[text()]")
for text_node in text_nodes:
return_text += text_node.xpath("./text()").extract_first()
return return_text
return None
@staticmethod
def get_time(response):
# content_block_element =
# response.xpath("//div[contains(@class, 'block_timer_share') and contains(@class, 'class2')]")
content_block_element = response.xpath("//div[contains(@class, 'block_timer_share')]" +
"/div[contains(@class, 'block_timer')]")
if len(content_block_element) > 0:
try:
datetime_data = content_block_element.xpath("./text()").extract()
date_data = datetime_data[0].split(",")[1].split("/")
time_data = datetime_data[1].split(" ")[0].split(":")
if len(date_data) == 3 and len(time_data) == 2:
try:
check_date = datetime.datetime(
int(date_data[2]), int(date_data[1]), int(date_data[0]),
int(time_data[0]), int(time_data[1]))
return str(check_date.year) + '/' + str(check_date.month) + '/' + str(check_date.day) +\
'/' + str(check_date.hour) + '/' + str(check_date.minute)
except ValueError:
return None
except Exception:
return None
return None
| [
"[email protected]"
] | |
a2ac3234cd4def8f830856f03044bd7161fbd180 | 8ff60c41c1cb387271a19b839ddb4ee951b41057 | /message/migrations/0001_initial.py | 3c0cfe80cb066405982f5f16be2f4526e2d184cb | [] | no_license | ajaypythonmate/codecrux | 579306b7691898fe6bce43ac6ba385bb38c89d90 | 31a0e3b8b92ab3693efe7f053ba3ba94ef614b39 | refs/heads/main | 2023-04-02T09:51:35.834632 | 2021-04-06T04:52:27 | 2021-04-06T04:52:27 | 355,058,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,028 | py | # Generated by Django 3.1.3 on 2021-02-01 10:29
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Conversation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_message_datetime', models.DateTimeField(blank=True, default=datetime.datetime(2021, 2, 1, 10, 29, 54, 688446), null=True)),
('subject', models.CharField(max_length=1200)),
('start_date_time', models.DateTimeField(blank=True, default=datetime.datetime(2021, 2, 1, 10, 29, 54, 688602), null=True)),
('end_date_time', models.DateTimeField(blank=True, default=datetime.datetime(2021, 2, 1, 10, 29, 54, 688628), null=True)),
('participant_web_link', models.CharField(blank=True, max_length=1200, null=True)),
('teamviewr_id', models.CharField(blank=True, max_length=1200, null=True)),
('access_token', models.CharField(blank=True, max_length=1200, null=True)),
('refresh_token', models.CharField(blank=True, max_length=1200, null=True)),
('conference_call_information', models.CharField(blank=True, max_length=1200, null=True)),
('password', models.CharField(blank=True, max_length=1200, null=True)),
('archived_by', models.ManyToManyField(blank=True, null=True, related_name='conversation_archived', to=settings.AUTH_USER_MODEL)),
('includes', models.ManyToManyField(blank=True, null=True, related_name='conversation_includes', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(blank=True, default=datetime.datetime(2021, 2, 1, 10, 29, 54, 691297), null=True)),
('message', models.CharField(max_length=1200)),
('is_read', models.BooleanField(default=False)),
('attachment', models.FileField(blank=True, null=True, upload_to='uploads/')),
('conversation', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='conversation_message', to='message.conversation')),
('read_by', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),
('sent_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='sent_message', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('date',),
},
),
]
| [
"[email protected]"
] | |
184a87884146cb9b01e533a853cb3669cca251cf | 50a8942417f2124ffeeedb527bb036d3227e4f52 | /src/briefcase/__init__.py | 531000413b17257869aed573d1eded5004db2a08 | [
"BSD-3-Clause"
] | permissive | Viridescence/briefcase | b711b3334ef8e5fad7cded9eb60076437c94972d | a04adcc0091cbcc83d9c12d57bb6cc9fa705de42 | refs/heads/master | 2023-03-09T22:52:42.711239 | 2021-03-01T02:09:28 | 2021-03-01T02:09:28 | 342,579,930 | 0 | 0 | BSD-3-Clause | 2021-02-26T13:12:43 | 2021-02-26T13:12:42 | null | UTF-8 | Python | false | false | 380 | py | __all__ = [
'__version__',
]
# Examples of valid version strings
# __version__ = '1.2.3.dev1' # Development release 1
# __version__ = '1.2.3a1' # Alpha Release 1
# __version__ = '1.2.3b1' # Beta Release 1
# __version__ = '1.2.3rc1' # RC Release 1
# __version__ = '1.2.3' # Final Release
# __version__ = '1.2.3.post1' # Post Release 1
__version__ = '0.3.4'
| [
"[email protected]"
] | |
2ef807073708068bda7360e6552b4bd687dc2821 | b38264a28f59248454972c54d19e4136aa3f181b | /tp3/algorithm_em.py | c1f399bc42fd5dfca969b15fb951c6e87392bae7 | [] | no_license | icannos/machine-learning-medical-data | 0ed8e4c47712daae081c2a8536766415b15505e2 | 8d79db4965a0297e1094f49780e920a75071e4bb | refs/heads/master | 2020-12-15T10:01:46.479424 | 2020-03-18T14:10:34 | 2020-03-18T14:10:34 | 235,068,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,465 | py | import numpy as np
import random
from scipy.stats import multivariate_normal, norm
class myEM:
def __init__(self, n_components=1, dim=1):
self.dim = dim
self.n_components = n_components
self.mu = None
self.sigma = None
self.reset()
def reset(self):
self.sigma = np.random.uniform(-1, 1, size=(self.n_components, self.dim, self.dim))
for i in range(self.n_components):
self.sigma[i] = np.matmul(self.sigma[i], np.transpose(self.sigma[i]))
self.mu = np.random.uniform(-3, 3, size=(self.n_components, self.dim))
def fit(self, data, nb_iteration=100):
# Learning procedure (optimization)
for iter in range(1, nb_iteration):
hat_mu = self.update_mu(data)
hat_sigma = self.update_sigma(data)
self.mu = hat_mu
self.sigma = hat_sigma + 1e-13
def fit_predict(self, X):
self.fit(X)
return self.predict(X)
def predict(self, X):
return np.argmax(self.predict_proba(X), axis=1)
def predict_proba(self, X):
y = []
for i in range(X.shape[0]):
y.append([multivariate_normal(mean=self.mu[j], cov=self.sigma[j]).pdf(X[i])
for j in range(self.n_components)])
return np.array(y)
def update_mu(self, X):
pnk = self.proba_nk(X)
mu = np.zeros((self.n_components, *X.shape[1:]))
for k in range(self.n_components):
mu[k] = np.sum(pnk[:, k].reshape(-1,1)*X, axis=0) / (np.sum(pnk[:, k]).reshape(-1,1)+1E-10)
return mu
def update_sigma(self, X):
sigma = np.zeros((self.n_components, self.dim, self.dim))
pnk = self.proba_nk(X)
for k in range(self.n_components):
sigma[k] = np.cov(np.transpose(X), aweights=pnk[:, k]+1E-10)
return sigma
def proba_x(self, X):
probs = self.predict_proba(X)
probk = self.proba_k(X)
p = np.zeros(X.shape[0])
for k in range(self.n_components):
p += probs[:, k] * probk[k]
return p
def proba_nk(self, X):
px = self.proba_x(X)
pk = self.proba_k(X)
p = self.predict_proba(X)
p = p * pk
pnk = p / px.reshape((-1,1))
return pnk
def proba_k(self, X):
probs = self.predict_proba(X)
normalization = np.sum(probs, axis=0)
return normalization / np.sum(normalization)
| [
"[email protected]"
] | |
23a3ae57a468e1f126e12d677509104c6dd7bca9 | 78d23de227a4c9f2ee6eb422e379b913c06dfcb8 | /Interview-prep/merge_list.py | 5261ca9770974f95bb58bed058d70e103a38271e | [] | no_license | siddharthcurious/Pythonic3-Feel | df145293a3f1a7627d08c4bedd7e22dfed9892c0 | 898b402b7a65073d58c280589342fc8c156a5cb1 | refs/heads/master | 2020-03-25T05:07:42.372477 | 2019-09-12T06:26:45 | 2019-09-12T06:26:45 | 143,430,534 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | import itertools
L = [[1,2,3], [4,5], [6,7,8], [9,10,11]]
merged = itertools.chain.from_iterable(L)
for a in merged:
print(a) | [
"[email protected]"
] | |
b5a23212cb338feb0206ac68fd0138470c6b4ef2 | 08cfc4fb5f0d2f11e4e226f12520a17c5160f0a2 | /kubernetes/test/test_v2beta1_object_metric_status.py | dbd605d082a20f5aee5371947ac2a7b38d3ef172 | [
"Apache-2.0"
] | permissive | ex3cv/client-python | 5c6ee93dff2424828d064b5a2cdbed3f80b74868 | 2c0bed9c4f653472289324914a8f0ad4cbb3a1cb | refs/heads/master | 2021-07-12T13:37:26.049372 | 2017-10-16T20:19:01 | 2017-10-16T20:19:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,035 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v2beta1_object_metric_status import V2beta1ObjectMetricStatus
class TestV2beta1ObjectMetricStatus(unittest.TestCase):
""" V2beta1ObjectMetricStatus unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV2beta1ObjectMetricStatus(self):
"""
Test V2beta1ObjectMetricStatus
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v2beta1_object_metric_status.V2beta1ObjectMetricStatus()
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
d07c88d41d056a346ca71e8dd0c968788a908880 | 64b33c5c68b2b668f086fd218a1ce0e381f7fb40 | /binary_image/PASCAL_VOC/NDUDE.py | 59336883d98fe5377da828aee95b6b35171f3a2a | [] | no_license | hongjoon0805/ICE-N-DUDE | ce1f9c498ae490a76259be4d796e252a1ea6dedd | 138e76ca56d39bc88a90a70e9c6d08d1c3a24b39 | refs/heads/master | 2020-04-20T11:43:55.646416 | 2019-02-02T11:50:54 | 2019-02-02T11:50:54 | 148,457,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,724 | py |
# coding: utf-8
# In[1]:
from core import *
from tools import *
import numpy as np
from numpy import *
import os
import tensorflow as tf
import keras as K
import sys
import argparse
# In[4]:
# ICML_2019/image
class State_Estimation_Process:
def __init__(self, n, k, nb_x_classes, nb_z_classes, x, z, param_name = 'test'):
self.n, self.k, self.x, self.z, self.nb_x_classes, self.nb_z_classes = n, k, x, z, nb_x_classes, nb_z_classes
self.param_name = param_name
self.raw_error = error_rate(x,z)
self.C = make_context(z, k, nb_z_classes, n)
self.train_batch_size = 100 + 200 * (nb_x_classes - 2)
self.test_batch_size = 3000
self.epochs = nb_z_classes * 5
def denoise(self, pred_prob): # Estimate latent variables using softmax output
n, k, x, z = self.n, self.k, self.x, self.z
"""
pred_class[0] = Say What You See(s[0]=z[i]) = -1
pred_class[i+1] = Always Say i(s[i+1]=i) = i
"""
# s(z) = z
pred_class = np.argmax(pred_prob, axis = -1) - 1
# mask Say What You see
mask = pred_class == -1
# mask-> Say What You see || others-> 0,1,2,3
x_hat = z[k:n-k] * mask + (mask^1)*pred_class
x_hat = np.hstack((z[:k], x_hat, z[n-k:n]))
error = normalized_error_rate(x,x_hat,self.raw_error)
return error, x_hat
def N_DUDE(self, PI): # Denoising process
n, k, nb_x_classes, nb_z_classes, z, param_name, C = self.n, self.k, self.nb_x_classes, self.nb_z_classes, self.z, self.param_name, self.C
epochs, train_batch_size, test_batch_size = self.epochs, self.train_batch_size, self.test_batch_size
iteration = 3
# fine-tuning the weights from ICE process
L_new = L_NEW(PI, nb_x_classes, nb_z_classes)
Y = make_pseudo_label(z, k, L_new, nb_z_classes, n)
model = ICE_N_DUDE_model(nb_x_classes, nb_z_classes, k, lr = 0.0001)
model.load_weights("weights/"+param_name+".hd5")
# model training...
hist = model.fit(C, Y, epochs=epochs // 2, batch_size=train_batch_size*4, verbose=1, validation_data=(C, Y))
model.load_weights("weights/"+param_name+".hd5")
pred_prob = model.predict(C, batch_size = test_batch_size*4, verbose = 0)
return self.denoise(pred_prob)
# In[ ]:
img_arr = ['2012_000003.jpg', '2012_000004.jpg', '2012_000007.jpg', '2012_000010.jpg', '2012_000014.jpg', '2012_000015.jpg', '2012_000016.jpg', '2012_000019.jpg', '2012_000025.jpg', '2012_000027.jpg', '2012_000028.jpg', '2012_000029.jpg', '2012_000030.jpg', '2012_000031.jpg', '2012_000032.jpg', '2012_000035.jpg', '2012_000036.jpg', '2012_000040.jpg', '2012_000042.jpg', '2012_000044.jpg', '2012_000045.jpg', '2012_000049.jpg', '2012_000050.jpg', '2012_000051.jpg', '2012_000055.jpg', '2012_000056.jpg', '2012_000058.jpg', '2012_000059.jpg', '2012_000060.jpg', '2012_000065.jpg', '2012_000067.jpg', '2012_000069.jpg', '2012_000070.jpg', '2012_000071.jpg', '2012_000072.jpg', '2012_000074.jpg', '2012_000078.jpg', '2012_000083.jpg', '2012_000084.jpg', '2012_000085.jpg', '2012_000086.jpg', '2012_000087.jpg', '2012_000089.jpg', '2012_000100.jpg', '2012_000102.jpg', '2012_000104.jpg', '2012_000105.jpg', '2012_000106.jpg', '2012_000108.jpg', '2012_000113.jpg']
try:
parser = argparse.ArgumentParser()
parser.add_argument("--t", help="PI type", type=int)
parser.add_argument("--i", help="image number: 0~67", type=int)
args = parser.parse_args()
result_name = sys.argv[0]
type_num = args.t
nb_x_classes = 2
nb_z_classes = nb_x_classes
img_num = args.i
k = 50
except:
result_name = "test"
type_num = 1
nb_x_classes = 2
nb_z_classes = nb_x_classes
img_num = 0
k = 50
PI_type_arr = ['20%', '30%', '10%']
# In[ ]:
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]='0, 1, 2, 3'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
K.backend.set_session(session)
# In[ ]:
PI_type = PI_type_arr[type_num]
PI = load_channel('true', nb_x_classes, 1, type_num)
print(PI)
f = open('results/' + '%d_'%nb_x_classes + PI_type + '_' + result_name,'a')
x,z = load_img(PI, [img_arr[img_num]])
param_name = "NDUDE_%d"%(type_num)
n = len(x)
print(x[:20])
print(z[:20])
print(n)
print(error_rate(x,z))
# In[ ]:
# State Estimation Process
SE = State_Estimation_Process(n, k, nb_x_classes, nb_z_classes, x, z, param_name = param_name)
error, x_hat = SE.N_DUDE(PI)
f.write("%d %.5f\n"%(img_num, error))
print('%d %.5f'%(img_num, error))
| [
"[email protected]"
] | |
dbbe06ceccb250198a84188d034663571ac01ebe | d423ac8072d7796ed512a41ee0c4221b0c98325c | /web/raw.py | 62e96e88024a6186cce1802c5c7eb0ac52a40929 | [
"Apache-2.0"
] | permissive | MagiCircles/frgl | fa579e965c451434e85b1bd7b1704869906fce2c | f8da4d6ff688862bfef0902e8315160ae1049e0a | refs/heads/master | 2021-05-29T21:15:33.324305 | 2015-11-01T01:15:10 | 2015-11-01T01:15:10 | 42,368,540 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 903 | py |
# -1 means the value is currently unknown
# -2 means the value differs depending on the card
cards_data = {
'C': {
'stages': 1,
'experiences': [0, 117, 417],
'performances': [60, 132, 200],
'levels': [0, 10, 20],
'max_level_reward': 10,
},
'R': {
'stages': 2,
'experiences': [0, 568, 1423, 2858],
'performances': [66, 210, 258, 299],
'levels': [0, 20, 30, 40],
'max_level_reward': 20,
},
'SR': {
'stages': 3,
'experiences': [0, 1532, 3083, 6800, -1],
'performances': [72, 273, 316, 368, 400],
'levels': [0, 30, 40, 55, 70],
'max_level_reward': 30,
},
'UR': {
'stages': 3,
'experiences': [0, 3303, -1, -1, -1],
'performances': [80, 341, -1, -1, -1],
'levels': [0, 40, 55, 80, 100],
'max_level_reward': 40,
},
}
| [
"[email protected]"
] | |
e2a001631a36104800fc4d40f2e65499ec59a9f7 | b7546fccec3f417ece54d8fd9da369176f9190a8 | /yt_auth.py | 05ffc67d1740ba122cac09613e557a83a5383596 | [] | no_license | Botmasher/yt-captions-languages | 327f396c6357fb4ba1a50087b192c838f9e145d9 | fa71069fabcfac5e6bfb5aaf9dda469b87c38037 | refs/heads/master | 2020-06-12T14:12:29.992320 | 2019-07-01T20:44:15 | 2019-07-01T20:44:15 | 194,325,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | # -*- coding: utf-8 -*-
# Instructions for running these code samples locally:
# https://developers.google.com/explorer-help/guides/code_samples#python
import os
import google_auth_oauthlib.flow
import googleapiclient.discovery
import googleapiclient.errors
scopes = ["https://www.googleapis.com/auth/youtube.readonly"]
client_secrets_dir = os.path.dirname(os.path.realpath(__file__))
client_secrets_filename = "client_secrets.json"
def youtube():
# Disable OAuthlib's HTTPS verification when running locally.
# *DO NOT* leave this option enabled in production.
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
api_service_name = "youtube"
api_version = "v3"
# Get credentials and create an API client
flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(
f"{client_secrets_dir}/{client_secrets_filename}",
scopes
)
credentials = flow.run_console()
youtube = googleapiclient.discovery.build(
api_service_name, api_version, credentials=credentials
)
return youtube
| [
"[email protected]"
] | |
e3c8b16e273d41aa26174d2de74d18df804737f3 | 183e4126b2fdb9c4276a504ff3ace42f4fbcdb16 | /II семестр/Дискретна математика/Лаби/2016-17/Мазан 7109/2/Algorythms.py | 4321fec35960e5e539d712a72179c4177fbdbb32 | [] | no_license | Computer-engineering-FICT/Computer-engineering-FICT | ab625e2ca421af8bcaff74f0d37ac1f7d363f203 | 80b64b43d2254e15338060aa4a6d946e8bd43424 | refs/heads/master | 2023-08-10T08:02:34.873229 | 2019-06-22T22:06:19 | 2019-06-22T22:06:19 | 193,206,403 | 3 | 0 | null | 2023-07-22T09:01:05 | 2019-06-22T07:41:22 | HTML | UTF-8 | Python | false | false | 3,467 | py | from Names import names
import networkx as nx
import pylab as plt
from random import sample,randint
class graphs_gen():
def __init__(self,A,B,relation):
self.relation = relation
self.A = A
self.B = B
self.graph = nx.DiGraph()
def define_graph(self):
self.graph.add_nodes_from(self.A)
self.graph.add_nodes_from(self.B)
self.graph.add_edges_from(self.relation)
self.color_list = []
self.pos_list = {}
for i in range(len(self.A)):
self.color_list.append("crimson")
self.pos_list[self.A[i]]=(i,15)
for i in range(len(self.B)):
self.color_list.append("orange")
self.pos_list[self.B[i]] = (i,10)
return self.graph
def draw(self):
self.define_graph()
nx.draw(self.graph,self.pos_list,node_size = 500,node_color = self.color_list,font_size = 10,with_labels = True)
plt.show()
class algorythms:
def __init__(self, A:"list", B:"list"):
self.A = A
self.B = B
self.universal = self.decart_multiplication()
self.male_names = names().male_names
self.fem_names = names().fem_names
self.A_males = []
self.A_females = []
self.relation_S = []
self.children = []
self.godmothers = []
self.godmother()
self.compadre()
#basic operations
def decart_multiplication(self):
multiplication = [(i,j)for i in self.A for j in self.B]
return multiplication
def union(self,relation_1, relation_2):
return list(set(relation_1).union(set(relation_2)))
def intersection(self,relation_1, relation_2):
return list(set(relation_1).intersection(set(relation_2)))
def difference(self,relation_1, relation_2):
return list(set(relation_1).difference(set(relation_2)))
def complement(self,universal,relation):
return list(set(universal).difference(set(relation)))
def inverted_relation(self,relation):
return [(i[1], i[0]) for i in relation]
#operations according to my variant
def godmother(self):
self.A_males = list(set(self.A).intersection(set(self.male_names)))
self.A_females = list(set(self.A).intersection(set(self.fem_names)))
if len(self.A_females)<len(self.B):
self.godmothers = sample(self.A_females,randint(1,len(self.A_females)-1))
else:
self.godmothers = sample(self.A_females,randint(1,len(self.B)-1))
self.relation_S = list(zip(self.godmothers,self.B))
self.children = [i[1] for i in self.relation_S]
return self.relation_S
def compadre(self):
self.unmarked_in_B = list(set(self.B).difference(set(self.children)))
self.B_males = list(set(self.B).intersection(set(self.male_names)))
self.B_females = list(set(self.B).intersection(set(self.fem_names)))
self.fathers = list(set(self.B_males).intersection(self.unmarked_in_B))
self.mothers = list(set(self.B_females).intersection(self.unmarked_in_B))
self.pairs = list(zip(self.mothers,self.fathers))
alpha_R = list(zip(self.godmothers,self.pairs))
self.relation_R = []
#alpha_R represents relation of godmother to pair,
# not relation of godmother to distinct parent
for i in alpha_R:
self.relation_R.append((i[0],i[1][0]))
self.relation_R.append((i[0],i[1][1]))
| [
"[email protected]"
] | |
45e245a4a7fedb645ce234ffef4508e98fc3999e | 7170e8a71c85bf88c43ae7524ffab25cf853b916 | /tests/test_athena_parquet.py | c878725b68b27b5cf81729239b39a1c0b048d876 | [
"Apache-2.0"
] | permissive | ibanmarco/aws-data-wrangler | b18aa898e2c0f33f225c44cdebf11b25f6637f63 | e99937296075c671e5f8a0998b430879c808687d | refs/heads/master | 2022-12-29T21:18:18.351632 | 2020-10-19T19:13:58 | 2020-10-19T19:13:58 | 289,549,549 | 0 | 0 | Apache-2.0 | 2020-10-19T19:13:59 | 2020-08-22T19:01:24 | Python | UTF-8 | Python | false | false | 26,492 | py | import datetime
import logging
import math
import time
from decimal import Decimal
import boto3
import numpy as np
import pandas as pd
import pytest
import awswrangler as wr
from ._utils import ensure_data_types, get_df, get_df_cast, get_df_list
logging.getLogger("awswrangler").setLevel(logging.DEBUG)
def test_parquet_catalog(path, path2, glue_table, glue_table2, glue_database):
with pytest.raises(wr.exceptions.UndetectedType):
wr.s3.to_parquet(
df=pd.DataFrame({"A": [None]}), path=path, dataset=True, database=glue_database, table=glue_table
)
df = get_df_list()
with pytest.raises(wr.exceptions.InvalidArgumentCombination):
wr.s3.to_parquet(
df=df,
path=path[:-1],
use_threads=True,
dataset=False,
mode="overwrite",
database=glue_database,
table=glue_table,
)
wr.s3.to_parquet(
df=df, path=path, use_threads=True, dataset=True, mode="overwrite", database=glue_database, table=glue_table
)
wr.s3.to_parquet(
df=df,
path=path2,
index=True,
use_threads=True,
dataset=True,
mode="overwrite",
database=glue_database,
table=glue_table2,
partition_cols=["iint8", "iint16"],
)
columns_types, partitions_types = wr.s3.read_parquet_metadata(path=path2, dataset=True)
assert len(columns_types) == 18
assert len(partitions_types) == 2
columns_types, partitions_types, partitions_values = wr.s3.store_parquet_metadata(
path=path2, database=glue_database, table=glue_table2, dataset=True
)
assert len(columns_types) == 18
assert len(partitions_types) == 2
assert len(partitions_values) == 2
assert wr.catalog.delete_table_if_exists(database=glue_database, table=glue_table) is True
assert wr.catalog.delete_table_if_exists(database=glue_database, table=glue_table2) is True
@pytest.mark.parametrize("use_threads", [True, False])
@pytest.mark.parametrize("max_rows_by_file", [None, 0, 40, 250, 1000])
@pytest.mark.parametrize("partition_cols", [None, ["par0"], ["par0", "par1"]])
def test_file_size(path, glue_table, glue_database, use_threads, max_rows_by_file, partition_cols):
df = get_df_list()
df = pd.concat([df for _ in range(100)])
paths = wr.s3.to_parquet(
df=df,
path=path,
index=False,
dataset=True,
database=glue_database,
table=glue_table,
max_rows_by_file=max_rows_by_file,
use_threads=use_threads,
partition_cols=partition_cols,
)["paths"]
if max_rows_by_file is not None and max_rows_by_file > 0:
assert len(paths) >= math.floor(300 / max_rows_by_file)
wr.s3.wait_objects_exist(paths, use_threads=use_threads)
df2 = wr.s3.read_parquet(path=path, dataset=True, use_threads=use_threads)
ensure_data_types(df2, has_list=True)
assert df2.shape == (300, 19)
assert df.iint8.sum() == df2.iint8.sum()
df2 = wr.athena.read_sql_table(database=glue_database, table=glue_table, use_threads=use_threads)
ensure_data_types(df2, has_list=True)
assert df2.shape == (300, 19)
assert df.iint8.sum() == df2.iint8.sum()
def test_parquet_catalog_duplicated(path, glue_table, glue_database):
df = pd.DataFrame({"A": [1], "a": [1]})
with pytest.raises(wr.exceptions.InvalidDataFrame):
wr.s3.to_parquet(
df=df, path=path, index=False, dataset=True, mode="overwrite", database=glue_database, table=glue_table
)
def test_parquet_catalog_casting(path, glue_database):
paths = wr.s3.to_parquet(
df=get_df_cast(),
path=path,
index=False,
dataset=True,
mode="overwrite",
database=glue_database,
table="__test_parquet_catalog_casting",
dtype={
"iint8": "tinyint",
"iint16": "smallint",
"iint32": "int",
"iint64": "bigint",
"float": "float",
"double": "double",
"decimal": "decimal(3,2)",
"string": "string",
"date": "date",
"timestamp": "timestamp",
"bool": "boolean",
"binary": "binary",
"category": "double",
"par0": "bigint",
"par1": "string",
},
)["paths"]
wr.s3.wait_objects_exist(paths=paths)
df = wr.s3.read_parquet(path=path)
assert df.shape == (3, 16)
ensure_data_types(df=df, has_list=False)
df = wr.athena.read_sql_table(table="__test_parquet_catalog_casting", database=glue_database, ctas_approach=True)
assert df.shape == (3, 16)
ensure_data_types(df=df, has_list=False)
df = wr.athena.read_sql_table(table="__test_parquet_catalog_casting", database=glue_database, ctas_approach=False)
assert df.shape == (3, 16)
ensure_data_types(df=df, has_list=False)
wr.s3.delete_objects(path=path)
assert wr.catalog.delete_table_if_exists(database=glue_database, table="__test_parquet_catalog_casting") is True
def test_parquet_catalog_casting_to_string_with_null(path, glue_table, glue_database):
data = [{"A": "foo"}, {"A": "boo", "B": "bar"}]
df = pd.DataFrame(data)
paths = wr.s3.to_parquet(
df, path, dataset=True, database=glue_database, table=glue_table, dtype={"A": "string", "B": "string"}
)["paths"]
wr.s3.wait_objects_exist(paths=paths)
df = wr.s3.read_parquet(path=path)
assert df.shape == (2, 2)
for dtype in df.dtypes.values:
assert str(dtype) == "string"
assert pd.isna(df[df["a"] == "foo"].b.iloc[0])
df = wr.athena.read_sql_table(table=glue_table, database=glue_database, ctas_approach=True)
assert df.shape == (2, 2)
for dtype in df.dtypes.values:
assert str(dtype) == "string"
assert pd.isna(df[df["a"] == "foo"].b.iloc[0])
df = wr.athena.read_sql_query(
f"SELECT count(*) as counter FROM {glue_table} WHERE b is NULL ", database=glue_database
)
assert df.counter.iloc[0] == 1
@pytest.mark.parametrize("compression", [None, "gzip", "snappy"])
def test_parquet_compress(path, glue_table, glue_database, compression):
paths = wr.s3.to_parquet(
df=get_df(),
path=path,
compression=compression,
dataset=True,
database=glue_database,
table=glue_table,
mode="overwrite",
)["paths"]
wr.s3.wait_objects_exist(paths=paths)
df2 = wr.athena.read_sql_table(glue_table, glue_database)
ensure_data_types(df2)
df2 = wr.s3.read_parquet(path=path)
wr.s3.delete_objects(path=path)
assert wr.catalog.delete_table_if_exists(database=glue_database, table=glue_table) is True
ensure_data_types(df2)
def test_parquet_char_length(path, glue_database, glue_table):
df = pd.DataFrame(
{"id": [1, 2], "cchar": ["foo", "boo"], "date": [datetime.date(2020, 1, 1), datetime.date(2020, 1, 2)]}
)
wr.s3.to_parquet(
df=df,
path=path,
dataset=True,
database=glue_database,
table=glue_table,
mode="overwrite",
partition_cols=["date"],
dtype={"cchar": "char(3)"},
)
df2 = wr.s3.read_parquet(path, dataset=True)
assert len(df2.index) == 2
assert len(df2.columns) == 3
assert df2.id.sum() == 3
df2 = wr.athena.read_sql_table(table=glue_table, database=glue_database)
assert len(df2.index) == 2
assert len(df2.columns) == 3
assert df2.id.sum() == 3
@pytest.mark.parametrize("col2", [[1, 1, 1, 1, 1], [1, 2, 3, 4, 5], [1, 1, 1, 1, 2], [1, 2, 2, 2, 2]])
@pytest.mark.parametrize("chunked", [True, 1, 2, 100])
def test_parquet_chunked(path, glue_database, glue_table, col2, chunked):
wr.s3.delete_objects(path=path)
values = list(range(5))
df = pd.DataFrame({"col1": values, "col2": col2})
paths = wr.s3.to_parquet(
df,
path,
index=False,
dataset=True,
database=glue_database,
table=glue_table,
partition_cols=["col2"],
mode="overwrite",
)["paths"]
wr.s3.wait_objects_exist(paths=paths)
dfs = list(wr.s3.read_parquet(path=path, dataset=True, chunked=chunked))
assert sum(values) == pd.concat(dfs, ignore_index=True).col1.sum()
if chunked is not True:
assert len(dfs) == int(math.ceil(len(df) / chunked))
for df2 in dfs[:-1]:
assert chunked == len(df2)
assert chunked >= len(dfs[-1])
else:
assert len(dfs) == len(set(col2))
dfs = list(wr.athena.read_sql_table(database=glue_database, table=glue_table, chunksize=chunked))
assert sum(values) == pd.concat(dfs, ignore_index=True).col1.sum()
if chunked is not True:
assert len(dfs) == int(math.ceil(len(df) / chunked))
for df2 in dfs[:-1]:
assert chunked == len(df2)
assert chunked >= len(dfs[-1])
wr.s3.delete_objects(path=paths)
assert wr.catalog.delete_table_if_exists(database=glue_database, table=glue_table) is True
def test_unsigned_parquet(path, glue_database, glue_table):
wr.s3.delete_objects(path=path)
df = pd.DataFrame({"c0": [0, 0, (2 ** 8) - 1], "c1": [0, 0, (2 ** 16) - 1], "c2": [0, 0, (2 ** 32) - 1]})
df["c0"] = df.c0.astype("uint8")
df["c1"] = df.c1.astype("uint16")
df["c2"] = df.c2.astype("uint32")
paths = wr.s3.to_parquet(
df=df, path=path, dataset=True, database=glue_database, table=glue_table, mode="overwrite"
)["paths"]
wr.s3.wait_objects_exist(paths=paths, use_threads=False)
df = wr.athena.read_sql_table(table=glue_table, database=glue_database)
assert df.c0.sum() == (2 ** 8) - 1
assert df.c1.sum() == (2 ** 16) - 1
assert df.c2.sum() == (2 ** 32) - 1
schema = wr.s3.read_parquet_metadata(path=path)[0]
assert schema["c0"] == "smallint"
assert schema["c1"] == "int"
assert schema["c2"] == "bigint"
df = wr.s3.read_parquet(path=path)
assert df.c0.sum() == (2 ** 8) - 1
assert df.c1.sum() == (2 ** 16) - 1
assert df.c2.sum() == (2 ** 32) - 1
df = pd.DataFrame({"c0": [0, 0, (2 ** 64) - 1]})
df["c0"] = df.c0.astype("uint64")
with pytest.raises(wr.exceptions.UnsupportedType):
wr.s3.to_parquet(df=df, path=path, dataset=True, database=glue_database, table=glue_table, mode="overwrite")
wr.s3.delete_objects(path=path)
wr.catalog.delete_table_if_exists(database=glue_database, table=glue_table)
def test_parquet_overwrite_partition_cols(path, glue_database, glue_table):
df = pd.DataFrame({"c0": [1, 2, 1, 2], "c1": [1, 2, 1, 2], "c2": [2, 1, 2, 1]})
paths = wr.s3.to_parquet(
df=df,
path=path,
dataset=True,
database=glue_database,
table=glue_table,
mode="overwrite",
partition_cols=["c2"],
)["paths"]
wr.s3.wait_objects_exist(paths=paths, use_threads=False)
df = wr.athena.read_sql_table(table=glue_table, database=glue_database)
assert len(df.index) == 4
assert len(df.columns) == 3
assert df.c0.sum() == 6
assert df.c1.sum() == 6
assert df.c2.sum() == 6
paths = wr.s3.to_parquet(
df=df,
path=path,
dataset=True,
database=glue_database,
table=glue_table,
mode="overwrite",
partition_cols=["c1", "c2"],
)["paths"]
wr.s3.wait_objects_exist(paths=paths, use_threads=False)
df = wr.athena.read_sql_table(table=glue_table, database=glue_database)
assert len(df.index) == 4
assert len(df.columns) == 3
assert df.c0.sum() == 6
assert df.c1.sum() == 6
assert df.c2.sum() == 6
@pytest.mark.parametrize("partition_cols", [None, ["c2"], ["c1", "c2"]])
def test_store_metadata_partitions_dataset(glue_database, glue_table, path, partition_cols):
df = pd.DataFrame({"c0": [0, 1, 2], "c1": [3, 4, 5], "c2": [6, 7, 8]})
paths = wr.s3.to_parquet(df=df, path=path, dataset=True, partition_cols=partition_cols)["paths"]
wr.s3.wait_objects_exist(paths=paths, use_threads=False)
wr.s3.store_parquet_metadata(path=path, database=glue_database, table=glue_table, dataset=True)
df2 = wr.athena.read_sql_table(table=glue_table, database=glue_database)
assert len(df.index) == len(df2.index)
assert len(df.columns) == len(df2.columns)
assert df.c0.sum() == df2.c0.sum()
assert df.c1.sum() == df2.c1.astype(int).sum()
assert df.c2.sum() == df2.c2.astype(int).sum()
@pytest.mark.parametrize("partition_cols", [None, ["c2"], ["c1", "c2"]])
def test_store_metadata_partitions_sample_dataset(glue_database, glue_table, path, partition_cols):
num_files = 10
df = pd.DataFrame({"c0": [0, 1, 2], "c1": [3, 4, 5], "c2": [6, 7, 8]})
for _ in range(num_files):
paths = wr.s3.to_parquet(df=df, path=path, dataset=True, partition_cols=partition_cols)["paths"]
wr.s3.wait_objects_exist(paths=paths, use_threads=False)
wr.s3.store_parquet_metadata(
path=path,
database=glue_database,
table=glue_table,
dtype={"c1": "bigint", "c2": "smallint"},
sampling=0.25,
dataset=True,
)
df2 = wr.athena.read_sql_table(table=glue_table, database=glue_database)
assert len(df.index) * num_files == len(df2.index)
assert len(df.columns) == len(df2.columns)
assert df.c0.sum() * num_files == df2.c0.sum()
assert df.c1.sum() * num_files == df2.c1.sum()
assert df.c2.sum() * num_files == df2.c2.sum()
@pytest.mark.parametrize("partition_cols", [None, ["c1"], ["c2"], ["c1", "c2"], ["c2", "c1"]])
def test_to_parquet_reverse_partitions(glue_database, glue_table, path, partition_cols):
df = pd.DataFrame({"c0": [0, 1, 2], "c1": [3, 4, 5], "c2": [6, 7, 8]})
paths = wr.s3.to_parquet(
df=df, path=path, dataset=True, database=glue_database, table=glue_table, partition_cols=partition_cols
)["paths"]
wr.s3.wait_objects_exist(paths=paths, use_threads=False)
df2 = wr.athena.read_sql_table(table=glue_table, database=glue_database)
assert df.shape == df2.shape
assert df.c0.sum() == df2.c0.sum()
assert df.c1.sum() == df2.c1.sum()
assert df.c2.sum() == df2.c2.sum()
def test_to_parquet_nested_append(glue_database, glue_table, path):
df = pd.DataFrame(
{
"c0": [[1, 2, 3], [4, 5, 6]],
"c1": [[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
"c2": [[["a", "b"], ["c", "d"]], [["e", "f"], ["g", "h"]]],
"c3": [[], [[[[[[[[1]]]]]]]]],
"c4": [{"a": 1}, {"a": 1}],
"c5": [{"a": {"b": {"c": [1, 2]}}}, {"a": {"b": {"c": [3, 4]}}}],
}
)
paths = wr.s3.to_parquet(df=df, path=path, dataset=True, database=glue_database, table=glue_table)["paths"]
wr.s3.wait_objects_exist(paths=paths, use_threads=False)
df2 = wr.athena.read_sql_query(sql=f"SELECT c0, c1, c2, c4 FROM {glue_table}", database=glue_database)
assert len(df2.index) == 2
assert len(df2.columns) == 4
paths = wr.s3.to_parquet(df=df, path=path, dataset=True, database=glue_database, table=glue_table)["paths"]
wr.s3.wait_objects_exist(paths=paths, use_threads=False)
df2 = wr.athena.read_sql_query(sql=f"SELECT c0, c1, c2, c4 FROM {glue_table}", database=glue_database)
assert len(df2.index) == 4
assert len(df2.columns) == 4
def test_to_parquet_nested_cast(glue_database, glue_table, path):
df = pd.DataFrame({"c0": [[1, 2, 3], [4, 5, 6]], "c1": [[], []], "c2": [{"a": 1, "b": 2}, {"a": 3, "b": 4}]})
paths = wr.s3.to_parquet(
df=df,
path=path,
dataset=True,
database=glue_database,
table=glue_table,
dtype={"c0": "array<double>", "c1": "array<string>", "c2": "struct<a:bigint, b:double>"},
)["paths"]
wr.s3.wait_objects_exist(paths=paths, use_threads=False)
df = pd.DataFrame({"c0": [[1, 2, 3], [4, 5, 6]], "c1": [["a"], ["b"]], "c2": [{"a": 1, "b": 2}, {"a": 3, "b": 4}]})
paths = wr.s3.to_parquet(df=df, path=path, dataset=True, database=glue_database, table=glue_table)["paths"]
wr.s3.wait_objects_exist(paths=paths, use_threads=False)
df2 = wr.athena.read_sql_query(sql=f"SELECT c0, c2 FROM {glue_table}", database=glue_database)
assert len(df2.index) == 4
assert len(df2.columns) == 2
def test_parquet_catalog_casting_to_string(path, glue_table, glue_database):
for df in [get_df(), get_df_cast()]:
paths = wr.s3.to_parquet(
df=df,
path=path,
index=False,
dataset=True,
mode="overwrite",
database=glue_database,
table=glue_table,
dtype={
"iint8": "string",
"iint16": "string",
"iint32": "string",
"iint64": "string",
"float": "string",
"double": "string",
"decimal": "string",
"string": "string",
"date": "string",
"timestamp": "string",
"timestamp2": "string",
"bool": "string",
"binary": "string",
"category": "string",
"par0": "string",
"par1": "string",
},
)["paths"]
wr.s3.wait_objects_exist(paths=paths)
df = wr.s3.read_parquet(path=path)
assert df.shape == (3, 16)
for dtype in df.dtypes.values:
assert str(dtype) == "string"
df = wr.athena.read_sql_table(table=glue_table, database=glue_database, ctas_approach=True)
assert df.shape == (3, 16)
for dtype in df.dtypes.values:
assert str(dtype) == "string"
df = wr.athena.read_sql_table(table=glue_table, database=glue_database, ctas_approach=False)
assert df.shape == (3, 16)
for dtype in df.dtypes.values:
assert str(dtype) == "string"
@pytest.mark.parametrize("use_threads", [True, False])
@pytest.mark.parametrize("partition_cols", [["c2"], ["c1", "c2"]])
def test_read_parquet_filter_partitions(path, glue_table, glue_database, use_threads, partition_cols):
df = pd.DataFrame({"c0": [0, 1, 2], "c1": [0, 1, 2], "c2": [0, 1, 2]})
paths = wr.s3.to_parquet(
df,
path,
dataset=True,
partition_cols=partition_cols,
use_threads=use_threads,
table=glue_table,
database=glue_database,
)["paths"]
wr.s3.wait_objects_exist(paths=paths, use_threads=use_threads)
for i in range(3):
df2 = wr.s3.read_parquet_table(
table=glue_table,
database=glue_database,
partition_filter=lambda x: True if x["c2"] == str(i) else False,
use_threads=use_threads,
)
assert df2.shape == (1, 3)
assert df2.c0.iloc[0] == i
assert df2.c1.iloc[0] == i
assert df2.c2.iloc[0] == i
@pytest.mark.parametrize("use_threads", [True, False])
def test_read_parquet_mutability(path, glue_table, glue_database, use_threads):
sql = "SELECT timestamp '2012-08-08 01:00' AS c0"
df = wr.athena.read_sql_query(sql, "default", use_threads=use_threads)
df["c0"] = df["c0"] + pd.DateOffset(months=-2)
assert df.c0[0].value == 1339117200000000000
def test_glue_number_of_versions_created(path, glue_table, glue_database):
df = pd.DataFrame({"c0": [0, 1, 2], "c1": [0, 1, 2]})
for _ in range(5):
wr.s3.to_parquet(
df, path, dataset=True, table=glue_table, database=glue_database,
)
assert wr.catalog.get_table_number_of_versions(table=glue_table, database=glue_database) == 1
def test_sanitize_index(path, glue_table, glue_database):
df = pd.DataFrame({"id": [1, 2], "DATE": [datetime.date(2020, 1, 1), datetime.date(2020, 1, 2)]})
df.set_index("DATE", inplace=True, verify_integrity=True)
wr.s3.to_parquet(df, path, dataset=True, index=True, database=glue_database, table=glue_table, mode="overwrite")
df = pd.DataFrame({"id": [1, 2], "DATE": [datetime.date(2020, 1, 1), datetime.date(2020, 1, 2)]})
df.set_index("DATE", inplace=True, verify_integrity=True)
wr.s3.to_parquet(df, path, dataset=True, index=True, database=glue_database, table=glue_table, mode="append")
df2 = wr.athena.read_sql_table(database=glue_database, table=glue_table)
assert df2.shape == (4, 2)
assert df2.id.sum() == 6
assert list(df2.columns) == ["id", "date"]
def test_to_parquet_sanitize(path, glue_database):
df = pd.DataFrame({"C0": [0, 1], "camelCase": [2, 3], "c**--2": [4, 5]})
table_name = "TableName*!"
paths = wr.s3.to_parquet(
df, path, dataset=True, database=glue_database, table=table_name, mode="overwrite", partition_cols=["c**--2"]
)["paths"]
wr.s3.wait_objects_exist(paths)
df2 = wr.athena.read_sql_table(database=glue_database, table=table_name)
wr.catalog.delete_table_if_exists(database=glue_database, table="table_name_")
assert df.shape == df2.shape
assert list(df2.columns) == ["c0", "camel_case", "c_2"]
assert df2.c0.sum() == 1
assert df2.camel_case.sum() == 5
assert df2.c_2.sum() == 9
def test_schema_evolution_disabled(path, glue_table, glue_database):
wr.s3.to_parquet(
df=pd.DataFrame({"c0": [1]}),
path=path,
dataset=True,
database=glue_database,
table=glue_table,
schema_evolution=False,
)
with pytest.raises(wr.exceptions.InvalidArgumentValue):
wr.s3.to_parquet(
df=pd.DataFrame({"c0": [2], "c1": [2]}),
path=path,
dataset=True,
database=glue_database,
table=glue_table,
schema_evolution=False,
)
paths = wr.s3.to_parquet(
df=pd.DataFrame({"c0": [2]}),
path=path,
dataset=True,
database=glue_database,
table=glue_table,
schema_evolution=False,
)["paths"]
wr.s3.wait_objects_exist(paths)
df2 = wr.athena.read_sql_table(database=glue_database, table=glue_table)
assert df2.shape == (2, 1)
assert df2.c0.sum() == 3
def test_date_cast(path, glue_table, glue_database):
df = pd.DataFrame(
{
"c0": [
datetime.date(4000, 1, 1),
datetime.datetime(2000, 1, 1, 10),
"2020",
"2020-01",
1,
None,
pd.NA,
pd.NaT,
np.nan,
np.inf,
]
}
)
df_expected = pd.DataFrame(
{
"c0": [
datetime.date(4000, 1, 1),
datetime.date(2000, 1, 1),
datetime.date(2020, 1, 1),
datetime.date(2020, 1, 1),
datetime.date(1970, 1, 1),
None,
None,
None,
None,
None,
]
}
)
paths = wr.s3.to_parquet(
df=df, path=path, dataset=True, database=glue_database, table=glue_table, dtype={"c0": "date"}
)["paths"]
wr.s3.wait_objects_exist(paths)
df2 = wr.s3.read_parquet(path=path)
assert df_expected.equals(df2)
df3 = wr.athena.read_sql_table(database=glue_database, table=glue_table)
assert df_expected.equals(df3)
@pytest.mark.parametrize("use_threads", [True, False])
@pytest.mark.parametrize("partition_cols", [None, ["par0"], ["par0", "par1"]])
def test_partitions_overwrite(path, glue_table, glue_database, use_threads, partition_cols):
df = get_df_list()
wr.s3.to_parquet(
df=df,
path=path,
dataset=True,
database=glue_database,
table=glue_table,
use_threads=use_threads,
partition_cols=partition_cols,
mode="overwrite_partitions",
)
paths = wr.s3.to_parquet(
df=df,
path=path,
dataset=True,
database=glue_database,
table=glue_table,
use_threads=use_threads,
partition_cols=partition_cols,
mode="overwrite_partitions",
)["paths"]
wr.s3.wait_objects_exist(paths, use_threads=use_threads)
df2 = wr.athena.read_sql_table(database=glue_database, table=glue_table, use_threads=use_threads)
ensure_data_types(df2, has_list=True)
assert df2.shape == (3, 19)
assert df.iint8.sum() == df2.iint8.sum()
@pytest.mark.parametrize("use_threads", [True, False])
def test_empty_column(path, glue_table, glue_database, use_threads):
df = pd.DataFrame({"c0": [1, 2, 3], "c1": [None, None, None], "par": ["a", "b", "c"]})
df["c0"] = df["c0"].astype("Int64")
df["par"] = df["par"].astype("string")
with pytest.raises(wr.exceptions.UndetectedType):
wr.s3.to_parquet(df, path, dataset=True, table=glue_table, database=glue_database, partition_cols=["par"])
@pytest.mark.parametrize("use_threads", [True, False])
def test_mixed_types_column(path, glue_table, glue_database, use_threads):
df = pd.DataFrame({"c0": [1, 2, 3], "c1": [1, 2, "foo"], "par": ["a", "b", "c"]})
df["c0"] = df["c0"].astype("Int64")
df["par"] = df["par"].astype("string")
with pytest.raises(TypeError):
wr.s3.to_parquet(df, path, dataset=True, table=glue_table, database=glue_database, partition_cols=["par"])
@pytest.mark.parametrize("use_threads", [True, False])
def test_failing_catalog(path, glue_table, glue_database, use_threads):
df = pd.DataFrame({"c0": [1, 2, 3]})
try:
wr.s3.to_parquet(df, path, max_rows_by_file=1, dataset=True, table=glue_table, database="foo")
except boto3.client("glue").exceptions.EntityNotFoundException:
pass
time.sleep(3)
assert len(wr.s3.list_objects(path)) == 0
def test_cast_decimal(path, glue_table, glue_database):
df = pd.DataFrame(
{"c0": [100.1], "c1": ["100.1"], "c2": [Decimal((0, (1, 0, 0, 1), -1))], "c3": [Decimal((0, (1, 0, 0, 1), -1))]}
)
paths = wr.s3.to_parquet(
df=df,
path=path,
dataset=True,
database=glue_database,
table=glue_table,
dtype={"c0": "decimal(4,1)", "c1": "decimal(4,1)", "c2": "decimal(4,1)", "c3": "string"},
)["paths"]
wr.s3.wait_objects_exist(paths=paths)
df2 = wr.athena.read_sql_table(table=glue_table, database=glue_database)
assert df2.shape == (1, 4)
assert df2["c0"].iloc[0] == Decimal((0, (1, 0, 0, 1), -1))
assert df2["c1"].iloc[0] == Decimal((0, (1, 0, 0, 1), -1))
assert df2["c2"].iloc[0] == Decimal((0, (1, 0, 0, 1), -1))
assert df2["c3"].iloc[0] == "100.1"
| [
"[email protected]"
] | |
df4bd70ae363af51471337895bdf0d0e88de410a | e78154abbb8bacf5afccda9da371684cbeabad36 | /envs/ALPHA-POPEGO/lib/python2.5/site-packages/twisted/web2/dav/method/put.py | 17bbf984ea913e62fbe9dbcba0185cc46d7d0d2f | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | enterstudio/popego | 1a196fabc374c0f45764e5c74bd7752236424040 | 2d09e793d9d2f297139edb325b8a70ddda9b2705 | refs/heads/master | 2021-04-09T16:39:40.781634 | 2016-10-14T16:53:47 | 2016-10-14T16:53:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,519 | py | # -*- test-case-name: twisted.web2.dav.test.test_put -*-
##
# Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# DRI: Wilfredo Sanchez, [email protected]
##
"""
WebDAV PUT method
"""
__all__ = ["preconditions_PUT", "http_PUT"]
from twisted.python import log
from twisted.web2 import responsecode
from twisted.web2.http import HTTPError, StatusResponse
from twisted.web2.dav.fileop import put
def preconditions_PUT(self, request):
if self.fp.exists():
if not self.fp.isfile():
log.err("Unable to PUT to non-file: %s" % (self.fp.path,))
raise HTTPError(StatusResponse(
responsecode.FORBIDDEN,
"The requested resource exists but is not backed by a regular file."
))
resource_is_new = False
else:
if not self.fp.parent().isdir():
log.err("No such directory: %s" % (self.fp.path,))
raise HTTPError(StatusResponse(
responsecode.CONFLICT,
"Parent collection resource does not exist."
))
resource_is_new = True
#
# HTTP/1.1 (RFC 2068, section 9.6) requires that we respond with a Not
# Implemented error if we get a Content-* header which we don't
# recognize and handle properly.
#
for header, value in request.headers.getAllRawHeaders():
if header.startswith("Content-") and header not in (
#"Content-Base", # Doesn't make sense in PUT?
#"Content-Encoding", # Requires that we decode it?
"Content-Language",
"Content-Length",
#"Content-Location", # Doesn't make sense in PUT?
"Content-MD5",
#"Content-Range", # FIXME: Need to implement this
"Content-Type",
):
log.err("Client sent unrecognized content header in PUT request: %s"
% (header,))
raise HTTPError(StatusResponse(
responsecode.NOT_IMPLEMENTED,
"Unrecognized content header %r in request." % (header,)
))
def http_PUT(self, request):
"""
Respond to a PUT request. (RFC 2518, section 8.7)
"""
log.msg("Writing request stream to %s" % (self.fp.path,))
#
# Don't pass in the request URI, since PUT isn't specified to be able
# to return a MULTI_STATUS response, which is WebDAV-specific (and PUT is
# not).
#
return put(request.stream, self.fp)
| [
"[email protected]"
] | |
12ff03f8aed23af658ae9feafae2a89b573e0010 | 0ec51a4bf5013e8601d5fa621f6cde88eeb7b4b5 | /CUDA_Python/课程3/solutions/col_sums_solution.py | be380447aa903d7bf52c47c924b00125a632c2dd | [
"MIT"
] | permissive | bobo0810/PytorchNetHub | 6228b0dd69ab2b22b81867c99262a0b1675759a1 | 15312703bc5bf2e1771de1c96402366fa3e727d4 | refs/heads/master | 2023-08-30T14:35:59.369145 | 2023-08-14T03:22:58 | 2023-08-14T03:22:58 | 132,425,167 | 383 | 95 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | @cuda.jit
def col_sums(a, sums, ds):
idx = cuda.grid(1)
sum = 0.0
for i in range(ds):
sum += a[i][idx]
sums[idx] = sum
| [
"[email protected]"
] | |
91ab54c692b193ef15b70f550d25cc8e939e0c69 | f68cd225b050d11616ad9542dda60288f6eeccff | /testscripts/RDKB/component/CMAgent/TS_CMAGENT_GetParamValues.py | 1f450f59ff0d7bd463c9d4fb7aac9399f70ba71d | [
"Apache-2.0"
] | permissive | cablelabs/tools-tdkb | 18fb98fadcd169fa9000db8865285fbf6ff8dc9d | 1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69 | refs/heads/master | 2020-03-28T03:06:50.595160 | 2018-09-04T11:11:00 | 2018-09-05T00:24:38 | 147,621,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,681 | py | ##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2016 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>9</version>
<name>TS_CMAGENT_GetParamValues</name>
<primitive_test_id/>
<primitive_test_name>CMAgent_Get</primitive_test_name>
<primitive_test_version>2</primitive_test_version>
<status>FREE</status>
<synopsis>TC_CMAGENT_6 :: Get Parameter API Validation for CM Agent</synopsis>
<groups_id>4</groups_id>
<execution_time>2</execution_time>
<long_duration>false</long_duration>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_CMAGENT_5</test_case_id>
<test_objective>To Validate
Get Param Values Function for
CM Agent</test_objective>
<test_type>Positive</test_type>
<test_setup>XB3</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state else invoke cosa_start.sh manually that includes all the ccsp components and TDK Component
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>None</api_or_interface_used>
<input_parameters>Json Interface:
API Name
TS_CMAGENT_GetParamValues
Input
1.Parameter Path (paramName)( eg: "Device.X_CISCO_COM_CableModem." )</input_parameters>
<automation_approch>1.Configure the Function info in Test Manager GUI which needs to be tested
(TS_CMAGENT_GetParamValues - func name - "If not exists already"
cmagent - module name
Necessary I/P args as Mentioned in Input)
2.Python Script will be generated/overrided automically by Test Manager with provided arguments in configure page (TS_CMAGENT_GetParamValues.py)
3.Execute the generated Script(TS_CMAGENT_GetParamValues.py) using excution page of Test Manager GUI
4.cmagentstub which is a part of TDK Agent process, will be in listening mode to execute TDK Component function named TS_CMAGENT_GetParamValues through registered TDK cmagentstub function along with necessary Path Name as arguments
5.TS_CMAGENT_GetParamValues function will call Ccsp Base Function named "CcspBaseIf_getParameterValues" , that inturn will call CM Agent Library Function "CcspCcMbi_GetParameterValues" along with provided path name
6.Response(s)(printf) from TDK Component,Ccsp Library function and cmagentstub would be logged in Agent Console log based on the debug info redirected to agent console.
7.cmagentstub will validate the available result (from agent console log and Pointer to instance as updated) with expected result (Eg:"Values for Requested Param" along with info) and the same is updated to agent console log.
8.TestManager will publish the result in GUI as PASS/FAILURE based on the response from cmagentstub.</automation_approch>
<except_output>CheckPoint 1:
Parameter values from DUT should be available in Agent Console Log
CheckPoint 2:
TDK agent Test Function will log the test case result as PASS based on API response
CheckPoint 3:
TestManager GUI will publish the result as PASS in Execution page</except_output>
<priority>High</priority>
<test_stub_interface>None</test_stub_interface>
<test_script>TS_CMAGENT_GetParamValues</test_script>
<skipped>No</skipped>
<release_version/>
<remarks/>
</test_cases>
<script_tags/>
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("cmagent","RDKB");
#IP and Port of box, No need to change,
#This will be replaced with corresponding Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_CMAGENT_GetParamValues');
#Get the result of connection with test component and STB
loadModuleresult =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadModuleresult;
loadStatusExpected = "SUCCESS"
if loadStatusExpected not in loadModuleresult.upper():
print "[Failed To Load CM Agent Stub from env TDK Path]"
print "[Exiting the Script]"
exit();
expectedresult = "SUCCESS";
#Primitive test case which associated to this Script
tdkTestObj = obj.createTestStep('CMAgent_Get');
#Input Parameters
tdkTestObj.addParameter("paramName","Device.X_CISCO_COM_CableModem.");
#Execute the test case in STB
tdkTestObj.executeTestCase(expectedresult);
#Get the result of execution
actualresult = tdkTestObj.getResult();
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
resultDetails = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution as success
tdkTestObj.setResultStatus("SUCCESS");
else:
#Set the result status of execution as failure
tdkTestObj.setResultStatus("FAILURE");
print "[TEST EXECUTION RESULT] : %s" %resultDetails ;
obj.unloadModule("cmagent");
| [
"[email protected]"
] | |
d1c71a3a09bf1ab60e9939f0fc2e6fa3d0844338 | 10d98fecb882d4c84595364f715f4e8b8309a66f | /rouge/test_util.py | 041dfba5fa55d03239266f5a3c3157d65409c920 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | afcarl/google-research | 51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42 | 320a49f768cea27200044c0d12f394aa6c795feb | refs/heads/master | 2021-12-02T18:36:03.760434 | 2021-09-30T20:59:01 | 2021-09-30T21:07:02 | 156,725,548 | 1 | 0 | Apache-2.0 | 2018-11-08T15:13:53 | 2018-11-08T15:13:52 | null | UTF-8 | Python | false | false | 1,310 | py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test utils for ROUGE."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
_TESTDATA_PREFIX = os.path.join(os.path.dirname(__file__), "testdata")
TARGETS_FILE = os.path.join(_TESTDATA_PREFIX, "target.txt")
PREDICTIONS_FILE = os.path.join(_TESTDATA_PREFIX, "prediction.txt")
LARGE_TARGETS_FILE = os.path.join(_TESTDATA_PREFIX, "target_large.txt")
LARGE_PREDICTIONS_FILE = os.path.join(_TESTDATA_PREFIX, "prediction_large.txt")
DELIMITED_FILE = os.path.join(_TESTDATA_PREFIX, "delimited.txt")
PYROUGE_DIR = os.path.join(_TESTDATA_PREFIX, "pyrouge_files")
def get_text(fname):
with open(fname) as f:
return f.read()
| [
"[email protected]"
] | |
2825a5aac7cc59f516e43a1498d6e501bcb4853d | e0fc7493f4339145792f54bcd7124acea500ca45 | /cpc/ir/Case.py | 9e62727bce0a942a4cb172aec455d5c2ebb9d7ae | [
"BSD-3-Clause"
] | permissive | U-Ar/Cpresto | d52d99e8d44ed01c87c8911614d744cae695d6aa | f723458fb237c9e3e8bc8a6afdf7c81858a65363 | refs/heads/main | 2023-05-14T15:28:38.449783 | 2021-06-06T15:07:14 | 2021-06-06T15:07:14 | 364,445,894 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | from .IRDumpable import IRDumpable
class Case(IRDumpable):
def __init__(self,value,label):
self._value = value
self._label = label
def dump(self, dumper):
dumper.print_class(self)
dumper.print_member("value",self._value)
dumper.print_member("label",self._label) | [
"[email protected]"
] | |
4e201647bd6eeb238b8a73851f58decd7d0a62c2 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/BEGEMOT-MIB.py | d439eec5efeb941a97f775f616d9774521e94746 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 2,166 | py | #
# PySNMP MIB module BEGEMOT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/BEGEMOT-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:36:58 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint")
fokus, = mibBuilder.importSymbols("FOKUS-MIB", "fokus")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibIdentifier, iso, Counter32, Integer32, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, IpAddress, Gauge32, Bits, NotificationType, ModuleIdentity, ObjectIdentity, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "iso", "Counter32", "Integer32", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "IpAddress", "Gauge32", "Bits", "NotificationType", "ModuleIdentity", "ObjectIdentity", "TimeTicks")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
begemot = ModuleIdentity((1, 3, 6, 1, 4, 1, 12325, 1))
if mibBuilder.loadTexts: begemot.setLastUpdated('200201300000Z')
if mibBuilder.loadTexts: begemot.setOrganization('Fraunhofer FOKUS, CATS')
if mibBuilder.loadTexts: begemot.setContactInfo(' Hartmut Brandt Postal: Fraunhofer Institute for Open Communication Systems Kaiserin-Augusta-Allee 31 10589 Berlin Germany Fax: +49 30 3463 7352 E-mail: [email protected]')
if mibBuilder.loadTexts: begemot.setDescription('The root of the Begemot subtree of the fokus tree.')
mibBuilder.exportSymbols("BEGEMOT-MIB", begemot=begemot, PYSNMP_MODULE_ID=begemot)
| [
"[email protected]"
] | |
4f1a576b5379b63b33bfb71ad1943f8617ab700a | 5eaef75ca4638702e79863fff688394d8afdd7bc | /testRunner-res.py | 4b1bbc797c93a0c1b3d5a3daf1903873be7efbe3 | [
"MIT"
] | permissive | tejastank/domainThing | 34045004332947e8ca881b0d0814d1118033b76a | 28e1d3f6fc23d88a0487fb85a102b80c6a135117 | refs/heads/master | 2021-05-30T15:39:39.501489 | 2016-03-17T23:26:42 | 2016-03-17T23:26:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | from domainParser import domainReader
from domainResolver2 import domainResolver
from domainConsumer import domainConsumer
import logging
from multiprocessing import Process, current_process, active_children
if __name__ == "__main__":
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler())
logger.setLevel(3)
print("Starting to resolve")
resolver = domainResolver()
resolver.singleRun()
print("===============================")
# reader = domainReader('verisign', 1)
# Process(target=reader.getZoneFiles).start()
# reader.getZoneFiles()
# reader.getZoneFiles()
print("===============================")
#consumer = domainConsumer("testing-1")
#consumer.singleRun()
print("DONE")
# active_children()
# exit(0)
| [
"[email protected]"
] | |
9a975581de93e01be4828e18a85b4e49b68404c0 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /b36bBpsnzyDbd4mzF_0.py | 370edaffbb2d97407a6bf21f3389a97d8d057256 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | """
Create a function that calculates the chance of being an imposter. The formula
for the chances of being an imposter is `100 × (i / p)` where `i` is the
imposter count and `p` is the player count. Make sure to round the value to
the nearest integer and return the value as a percentage.
### Examples
imposter_formula(1, 10) ➞ "10%"
imposter_formula(2, 5) ➞ "40%"
imposter_formula(1, 8) ➞ "13%"
### Notes
The player limit is `10` and the imposter count can only go up to `3`.
"""
def imposter_formula(i, p):
return '{:.0%}'.format(i/p)
| [
"[email protected]"
] | |
df939de128f6f9c89f7761cd09c1e121e032004c | 37e47457fa95ef39ce18d618c6150bdf3a287161 | /python/mono_stack/s42_dp.py | d5089c39aa7a84d5c89d57c71d0d7572b51e4f6d | [] | no_license | TQCAI/Algorithm | 7e9c05055b02748c92e134861c78a74857346465 | 82a6cfdee5f02d56b884cb65872c3d820146ba7b | refs/heads/master | 2023-03-19T08:42:00.423944 | 2021-03-11T04:10:12 | 2021-03-11T04:10:12 | 305,140,106 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | from typing import List
class Solution:
def trap(self, height: List[int]) -> int:
sum = 0
n = len(height)
max_left = [0] * n
max_right = [0] * n
for i in range(1, n - 1):
max_left[i] = max(max_left[i - 1], height[i - 1])
for i in reversed(range(1, n - 1)): # range(n - 2, 0, -1):
max_right[i] = max(max_right[i + 1], height[i + 1])
for i in range(1, n - 1):
min_height = min(max_left[i], max_right[i])
sum += max(0, min_height - height[i])
return sum
ans = Solution().trap([0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1])
print(ans)
| [
"[email protected]"
] | |
7b1c9612fb7a17754a7b6b29764c37752e059f84 | 8fe4f275cfc1e4f81addcde950c7d50582ddab6b | /SellerUI/migrations/0006_vehicle_bought.py | 404a56d72ae46afeb490603db671f0b031ad4863 | [] | no_license | strange-hawk/ResaleValuePrediction | 5d09b075fcd8367198f02f69efa0402b435b3531 | c0e6cec56e0c3bcf5dcb7e3b46a7ee9152893382 | refs/heads/master | 2022-11-11T04:50:23.610331 | 2020-06-26T17:00:35 | 2020-06-26T17:00:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | # Generated by Django 3.0.5 on 2020-06-26 14:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('SellerUI', '0005_auto_20200626_1807'),
]
operations = [
migrations.AddField(
model_name='vehicle',
name='bought',
field=models.BooleanField(default=False),
),
]
| [
"[email protected]"
] | |
94eec03dcff4ef92243ac73ed5bd0b161c8f41f1 | 26bd175ffb3bd204db5bcb70eec2e3dfd55fbe9f | /exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/netapp_e_asup.py | bb82400d45423fc844e431e87b2d50a769a4f04e | [
"CC0-1.0",
"GPL-1.0-or-later",
"MIT",
"GPL-3.0-or-later",
"GPL-3.0-only"
] | permissive | tr3ck3r/linklight | 37814ed19173d893cdff161355d70a1cf538239b | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | refs/heads/master | 2021-04-11T04:33:02.727318 | 2020-03-25T17:38:41 | 2020-03-25T17:38:41 | 248,992,437 | 0 | 0 | MIT | 2020-03-21T14:26:25 | 2020-03-21T14:26:25 | null | UTF-8 | Python | false | false | 11,242 | py | #!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_asup
short_description: NetApp E-Series manage auto-support settings
description:
- Allow the auto-support settings to be configured for an individual E-Series storage-system
author: Michael Price (@lmprice)
extends_documentation_fragment:
- netapp.ontap.netapp.eseries
options:
state:
description:
- Enable/disable the E-Series auto-support configuration.
- When this option is enabled, configuration, logs, and other support-related information will be relayed
to NetApp to help better support your system. No personally identifiable information, passwords, etc, will
be collected.
default: enabled
choices:
- enabled
- disabled
aliases:
- asup
- auto_support
- autosupport
active:
description:
- Enable active/proactive monitoring for ASUP. When a problem is detected by our monitoring systems, it's
possible that the bundle did not contain all of the required information at the time of the event.
Enabling this option allows NetApp support personnel to manually request transmission or re-transmission
of support data in order ot resolve the problem.
- Only applicable if I(state=enabled).
default: yes
type: bool
start:
description:
- A start hour may be specified in a range from 0 to 23 hours.
- ASUP bundles will be sent daily between the provided start and end time (UTC).
- I(start) must be less than I(end).
aliases:
- start_time
default: 0
end:
description:
- An end hour may be specified in a range from 1 to 24 hours.
- ASUP bundles will be sent daily between the provided start and end time (UTC).
- I(start) must be less than I(end).
aliases:
- end_time
default: 24
days:
description:
- A list of days of the week that ASUP bundles will be sent. A larger, weekly bundle will be sent on one
of the provided days.
choices:
- monday
- tuesday
- wednesday
- thursday
- friday
- saturday
- sunday
required: no
aliases:
- days_of_week
- schedule_days
verbose:
description:
- Provide the full ASUP configuration in the return.
default: no
required: no
type: bool
log_path:
description:
- A local path to a file to be used for debug logging
required: no
notes:
- Check mode is supported.
- Enabling ASUP will allow our support teams to monitor the logs of the storage-system in order to proactively
respond to issues with the system. It is recommended that all ASUP-related options be enabled, but they may be
disabled if desired.
- This API is currently only supported with the Embedded Web Services API v2.0 and higher.
'''
EXAMPLES = """
- name: Enable ASUP and allow pro-active retrieval of bundles
netapp_e_asup:
state: enabled
active: yes
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
- name: Set the ASUP schedule to only send bundles from 12 AM CST to 3 AM CST.
netapp_e_asup:
start: 17
end: 20
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The settings have been updated.
asup:
description:
- True if ASUP is enabled.
returned: on success
sample: True
type: bool
active:
description:
- True if the active option has been enabled.
returned: on success
sample: True
type: bool
cfg:
description:
- Provide the full ASUP configuration.
returned: on success when I(verbose=true).
type: complex
contains:
asupEnabled:
description:
- True if ASUP has been enabled.
type: bool
onDemandEnabled:
description:
- True if ASUP active monitoring has been enabled.
type: bool
daysOfWeek:
description:
- The days of the week that ASUP bundles will be sent.
type: list
"""
import json
import logging
from pprint import pformat
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class Asup(object):
DAYS_OPTIONS = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
state=dict(type='str', required=False, default='enabled', aliases=['asup', 'auto_support', 'autosupport'],
choices=['enabled', 'disabled']),
active=dict(type='bool', required=False, default=True, ),
days=dict(type='list', required=False, aliases=['schedule_days', 'days_of_week'],
choices=self.DAYS_OPTIONS),
start=dict(type='int', required=False, default=0, aliases=['start_time']),
end=dict(type='int', required=False, default=24, aliases=['end_time']),
verbose=dict(type='bool', required=False, default=False),
log_path=dict(type='str', required=False),
))
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, )
args = self.module.params
self.asup = args['state'] == 'enabled'
self.active = args['active']
self.days = args['days']
self.start = args['start']
self.end = args['end']
self.verbose = args['verbose']
self.ssid = args['ssid']
self.url = args['api_url']
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'], )
self.check_mode = self.module.check_mode
log_path = args['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
if self.start >= self.end:
self.module.fail_json(msg="The value provided for the start time is invalid."
" It must be less than the end time.")
if self.start < 0 or self.start > 23:
self.module.fail_json(msg="The value provided for the start time is invalid. It must be between 0 and 23.")
else:
self.start = self.start * 60
if self.end < 1 or self.end > 24:
self.module.fail_json(msg="The value provided for the end time is invalid. It must be between 1 and 24.")
else:
self.end = min(self.end * 60, 1439)
if not self.days:
self.days = self.DAYS_OPTIONS
def get_configuration(self):
try:
(rc, result) = request(self.url + 'device-asup', headers=HEADERS, **self.creds)
if not (result['asupCapable'] and result['onDemandCapable']):
self.module.fail_json(msg="ASUP is not supported on this device. Array Id [%s]." % (self.ssid))
return result
except Exception as err:
self.module.fail_json(msg="Failed to retrieve ASUP configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def update_configuration(self):
config = self.get_configuration()
update = False
body = dict()
if self.asup:
body = dict(asupEnabled=True)
if not config['asupEnabled']:
update = True
if (config['onDemandEnabled'] and config['remoteDiagsEnabled']) != self.active:
update = True
body.update(dict(onDemandEnabled=self.active,
remoteDiagsEnabled=self.active))
self.days.sort()
config['schedule']['daysOfWeek'].sort()
body['schedule'] = dict(daysOfWeek=self.days,
dailyMinTime=self.start,
dailyMaxTime=self.end,
weeklyMinTime=self.start,
weeklyMaxTime=self.end)
if self.days != config['schedule']['daysOfWeek']:
update = True
if self.start != config['schedule']['dailyMinTime'] or self.start != config['schedule']['weeklyMinTime']:
update = True
elif self.end != config['schedule']['dailyMaxTime'] or self.end != config['schedule']['weeklyMaxTime']:
update = True
elif config['asupEnabled']:
body = dict(asupEnabled=False)
update = True
self._logger.info(pformat(body))
if update and not self.check_mode:
try:
(rc, result) = request(self.url + 'device-asup', method='POST',
data=json.dumps(body), headers=HEADERS, **self.creds)
# This is going to catch cases like a connection failure
except Exception as err:
self.module.fail_json(msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return update
def update(self):
update = self.update_configuration()
cfg = self.get_configuration()
if self.verbose:
self.module.exit_json(msg="The ASUP settings have been updated.", changed=update,
asup=cfg['asupEnabled'], active=cfg['onDemandEnabled'], cfg=cfg)
else:
self.module.exit_json(msg="The ASUP settings have been updated.", changed=update,
asup=cfg['asupEnabled'], active=cfg['onDemandEnabled'])
def __call__(self, *args, **kwargs):
self.update()
def main():
settings = Asup()
settings()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
b83fe49dd4b24404a9c5d47f5d730ca86340f1e2 | ef701a68d9cd1f1f2e3699cc0b98b138eeb11934 | /job.py | f335dbe5a778b9003eb95d2d69d87ff2dc514f8d | [] | no_license | yangxuserene/BBSimulator | b920344bc8abb78835a4ff5f5a17d516bbb241bd | 684822d275bb4d180016a9e1a4f4673f7d3755b3 | refs/heads/master | 2021-01-17T12:30:47.256223 | 2016-02-23T19:54:02 | 2016-02-23T19:54:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,068 | py | #!/usr/bin/env python
from enum import Enum
import logging
class BBJobTimeStamp(object):
"""timing statistics"""
def __init__(self, submit):
super(BBJobTimeStamp, self).__init__()
self.submit = float(submit) # when job goes into input queue
self.start_in = 0.0
self.finish_in = 0.0 # when job goes into run queue
self.start_run = 0.0
self.finish_run = 0.0 # when job goes into out queue
self.start_out = 0.0
self.finish_out = 0.0
class BBJobDemand(object):
"""demand statistics"""
def __init__(self, num_core, bb_in, bb, data_out):
super(BBJobDemand, self).__init__()
self.num_core = float(num_core)
self.bb_in = float(bb_in)
self.bb = float(bb)
# additional trace data
self.data_in = float(bb_in)
self.data_out = float(data_out)
def __str__(self):
return "dv = [%d cores, %.2f in_buffer, %.2f buffer, %.2f out_data]" % \
(self.num_core, self.bb_in, self.bb, self.data_out)
class BBJobStatus(Enum):
"""job status"""
WaitInput = 1
Inputing = 2
WaitRun = 3
Running = 4
WaitOutput = 5
Outputing = 6
Complete = 7
class BBJob(object):
"""jobs with burst buffer demand"""
def __init__(self, job_id, submit, demand, rt):
super(BBJob, self).__init__()
self.job_id = job_id
ts = BBJobTimeStamp(submit)
self.ts = ts
self.demand = demand
self.runtime = float(rt)
self.status = BBJobStatus.WaitInput
def jobStatus(self):
if self.status == BBJobStatus.WaitInput:
return 'Wait Input'
elif self.status == BBJobStatus.Inputing:
return 'Inputing'
elif self.status == BBJobStatus.WaitRun:
return 'Wait Run'
elif self.status == BBJobStatus.Running:
return 'Running'
elif self.status == BBJobStatus.WaitOutput:
return 'Wait Out'
elif self.status == BBJobStatus.Outputing:
return 'Outputing'
else:
return 'Complete'
def __str__(self):
return 'job_%d, %s [%s]' % (self.job_id,
self.demand, self.jobStatus())
def dumpTimeStatistic(self):
if self.status == BBJobStatus.Complete:
submit = self.ts.submit
waiting_in = self.ts.start_in - self.ts.submit
waiting_run = self.ts.start_run - self.ts.finish_in
waiting_out = self.ts.start_out - self.ts.finish_run
inputing = self.ts.finish_in - self.ts.start_in
running = self.ts.finish_run - self.ts.start_run
outputing = self.ts.finish_out - self.ts.start_out
complete = self.ts.finish_out
total_wait = waiting_in + waiting_run + waiting_out
response = complete - submit
return [self.job_id, submit, waiting_in, inputing,
waiting_run, running, waiting_out, outputing,
complete, total_wait, response]
| [
"[email protected]"
] | |
03ce106bae9066ec66a3695dd6a7d4c52c838338 | d4d7c65bd4a4283b60caf1d8cea69a4aec9e594d | /04. Mid Exam Tasks/03. Froggy Squad.py | 0af494f78e673e0361b1e6c04525249814f6f788 | [] | no_license | tony-andreev94/Python-Fundamentals | db806d05168c1bb61763e5b3d27f495f045e7667 | 32032d37e3a8f70e156db6ccad2d9f25aac62c23 | refs/heads/master | 2021-08-07T12:32:27.624459 | 2020-12-24T12:47:05 | 2020-12-24T12:47:05 | 233,568,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,521 | py | # Create a program that helps you keep track of the frogs that are on the riverside.
# Because you are an extreme animal lover, you also name them.
# You will receive the names of the frogs that are already on the riverside on a single line,
# separated by a single space in the following format:
# "{frog1} {frog2} {frog3}… {frogn}"
# Then you will receive commands that describe their action. There are five possible commands:
frog_names = input().split(' ')
while True:
command = input().split(' ')
if command[0] == 'Join':
frog_names.append(command[1])
if command[0] == 'Jump':
frog_names.insert(int(command[2]), command[1])
if command[0] == 'Dive':
# Check index
frog_names.remove(frog_names[int(command[1])])
if command[0] == 'First':
for index in range(int(command[1])):
print(frog_names[index], end=" ")
if index == len(frog_names) - 1:
break
if command[0] == 'Last':
for index in range(-1, -(int(command[1]) + 1), -1):
print(frog_names[index], end=" ")
if index == len(frog_names) - 1:
break
if command[0] == 'Print':
print()
print("Frogs:", end=" ")
if command[1] == 'Normal':
for each_frog in frog_names:
print(each_frog, end=" ")
break
if command[1] == 'Reversed':
for each_frog in reversed(frog_names):
print(each_frog, end=" ")
break
| [
"[email protected]"
] | |
f41df66c6e1adf697bf2f3d6c03b719db4aa44b3 | 1f41b828fb652795482cdeaac1a877e2f19c252a | /maya_menus/_MAINMENU_PMC_Rigging/19.Selection/08.Select Destination Connection( ctls, attrs ).py | f8de72daebe0d35b572f115cc222baf4738737b2 | [] | no_license | jonntd/mayadev-1 | e315efe582ea433dcf18d7f1e900920f5590b293 | f76aeecb592df766d05a4e10fa2c2496f0310ca4 | refs/heads/master | 2021-05-02T07:16:17.941007 | 2018-02-05T03:55:12 | 2018-02-05T03:55:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | import pymel.core
sels = pymel.core.ls( sl=1 )
selAttrs = pymel.core.channelBox( 'mainChannelBox', q=1, sma=1 )
targets = []
for sel in sels:
for attr in selAttrs:
targets += sel.attr( attr ).listConnections( s=0, d=1 )
pymel.core.select( targets ) | [
"[email protected]"
] | |
1548cc5c67c89921883b09489b577fe4434dd082 | 9e1bda53da4c5e98190f5f25235f528d692ee5a8 | /project_3/wsgi.py | 9b1534bf529f2f0539fb6479d4dcf2f5817e9563 | [] | no_license | Jumayev-A/Project-3 | 3d373181af6a87e3fe319a13d28fcd18941167b7 | 34ddd009726cbba9ae52e74a46d554fd735566e2 | refs/heads/main | 2023-06-10T11:02:06.446151 | 2021-07-07T06:19:11 | 2021-07-07T06:19:11 | 350,375,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for project_3 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project_3.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
252b884b0834fea22df3ad412b0761120fe6d714 | 6841787dc208198535c90ee76d035d8837f8b973 | /alipay/aop/api/domain/AlipayCommerceLogisticsOrderInstantdeliveryCreateModel.py | 746bac03311ec824b36480abc385429d1b55f82c | [
"Apache-2.0"
] | permissive | demlution/alipay-sdk-python-all | e56ea12651717476f940212c281b6c2199dd3fa6 | c3cb81aa5980913e7cd51edcf754a804495a9a30 | refs/heads/master | 2022-10-09T02:08:28.789764 | 2022-09-20T04:32:03 | 2022-09-20T04:32:03 | 263,779,638 | 0 | 1 | null | 2020-05-14T00:58:51 | 2020-05-14T00:58:51 | null | UTF-8 | Python | false | false | 8,366 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.ConsumerNotifyIstd import ConsumerNotifyIstd
from alipay.aop.api.domain.GoodsDetailIstd import GoodsDetailIstd
from alipay.aop.api.domain.GoodsInfoIstd import GoodsInfoIstd
from alipay.aop.api.domain.OrderExtIstd import OrderExtIstd
from alipay.aop.api.domain.ReceiverIstd import ReceiverIstd
from alipay.aop.api.domain.SenderIstd import SenderIstd
class AlipayCommerceLogisticsOrderInstantdeliveryCreateModel(object):
def __init__(self):
self._consumer_id = None
self._consumer_notify = None
self._consumer_source = None
self._goods_details = None
self._goods_info = None
self._logistics_code = None
self._logistics_token = None
self._order_ext_istd = None
self._out_order_no = None
self._receiver = None
self._sender = None
self._shop_no = None
@property
def consumer_id(self):
return self._consumer_id
@consumer_id.setter
def consumer_id(self, value):
self._consumer_id = value
@property
def consumer_notify(self):
return self._consumer_notify
@consumer_notify.setter
def consumer_notify(self, value):
if isinstance(value, ConsumerNotifyIstd):
self._consumer_notify = value
else:
self._consumer_notify = ConsumerNotifyIstd.from_alipay_dict(value)
@property
def consumer_source(self):
return self._consumer_source
@consumer_source.setter
def consumer_source(self, value):
self._consumer_source = value
@property
def goods_details(self):
return self._goods_details
@goods_details.setter
def goods_details(self, value):
if isinstance(value, list):
self._goods_details = list()
for i in value:
if isinstance(i, GoodsDetailIstd):
self._goods_details.append(i)
else:
self._goods_details.append(GoodsDetailIstd.from_alipay_dict(i))
@property
def goods_info(self):
return self._goods_info
@goods_info.setter
def goods_info(self, value):
if isinstance(value, GoodsInfoIstd):
self._goods_info = value
else:
self._goods_info = GoodsInfoIstd.from_alipay_dict(value)
@property
def logistics_code(self):
return self._logistics_code
@logistics_code.setter
def logistics_code(self, value):
self._logistics_code = value
@property
def logistics_token(self):
return self._logistics_token
@logistics_token.setter
def logistics_token(self, value):
self._logistics_token = value
@property
def order_ext_istd(self):
return self._order_ext_istd
@order_ext_istd.setter
def order_ext_istd(self, value):
if isinstance(value, OrderExtIstd):
self._order_ext_istd = value
else:
self._order_ext_istd = OrderExtIstd.from_alipay_dict(value)
@property
def out_order_no(self):
return self._out_order_no
@out_order_no.setter
def out_order_no(self, value):
self._out_order_no = value
@property
def receiver(self):
return self._receiver
@receiver.setter
def receiver(self, value):
if isinstance(value, ReceiverIstd):
self._receiver = value
else:
self._receiver = ReceiverIstd.from_alipay_dict(value)
@property
def sender(self):
return self._sender
@sender.setter
def sender(self, value):
if isinstance(value, SenderIstd):
self._sender = value
else:
self._sender = SenderIstd.from_alipay_dict(value)
@property
def shop_no(self):
return self._shop_no
@shop_no.setter
def shop_no(self, value):
self._shop_no = value
def to_alipay_dict(self):
params = dict()
if self.consumer_id:
if hasattr(self.consumer_id, 'to_alipay_dict'):
params['consumer_id'] = self.consumer_id.to_alipay_dict()
else:
params['consumer_id'] = self.consumer_id
if self.consumer_notify:
if hasattr(self.consumer_notify, 'to_alipay_dict'):
params['consumer_notify'] = self.consumer_notify.to_alipay_dict()
else:
params['consumer_notify'] = self.consumer_notify
if self.consumer_source:
if hasattr(self.consumer_source, 'to_alipay_dict'):
params['consumer_source'] = self.consumer_source.to_alipay_dict()
else:
params['consumer_source'] = self.consumer_source
if self.goods_details:
if isinstance(self.goods_details, list):
for i in range(0, len(self.goods_details)):
element = self.goods_details[i]
if hasattr(element, 'to_alipay_dict'):
self.goods_details[i] = element.to_alipay_dict()
if hasattr(self.goods_details, 'to_alipay_dict'):
params['goods_details'] = self.goods_details.to_alipay_dict()
else:
params['goods_details'] = self.goods_details
if self.goods_info:
if hasattr(self.goods_info, 'to_alipay_dict'):
params['goods_info'] = self.goods_info.to_alipay_dict()
else:
params['goods_info'] = self.goods_info
if self.logistics_code:
if hasattr(self.logistics_code, 'to_alipay_dict'):
params['logistics_code'] = self.logistics_code.to_alipay_dict()
else:
params['logistics_code'] = self.logistics_code
if self.logistics_token:
if hasattr(self.logistics_token, 'to_alipay_dict'):
params['logistics_token'] = self.logistics_token.to_alipay_dict()
else:
params['logistics_token'] = self.logistics_token
if self.order_ext_istd:
if hasattr(self.order_ext_istd, 'to_alipay_dict'):
params['order_ext_istd'] = self.order_ext_istd.to_alipay_dict()
else:
params['order_ext_istd'] = self.order_ext_istd
if self.out_order_no:
if hasattr(self.out_order_no, 'to_alipay_dict'):
params['out_order_no'] = self.out_order_no.to_alipay_dict()
else:
params['out_order_no'] = self.out_order_no
if self.receiver:
if hasattr(self.receiver, 'to_alipay_dict'):
params['receiver'] = self.receiver.to_alipay_dict()
else:
params['receiver'] = self.receiver
if self.sender:
if hasattr(self.sender, 'to_alipay_dict'):
params['sender'] = self.sender.to_alipay_dict()
else:
params['sender'] = self.sender
if self.shop_no:
if hasattr(self.shop_no, 'to_alipay_dict'):
params['shop_no'] = self.shop_no.to_alipay_dict()
else:
params['shop_no'] = self.shop_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceLogisticsOrderInstantdeliveryCreateModel()
if 'consumer_id' in d:
o.consumer_id = d['consumer_id']
if 'consumer_notify' in d:
o.consumer_notify = d['consumer_notify']
if 'consumer_source' in d:
o.consumer_source = d['consumer_source']
if 'goods_details' in d:
o.goods_details = d['goods_details']
if 'goods_info' in d:
o.goods_info = d['goods_info']
if 'logistics_code' in d:
o.logistics_code = d['logistics_code']
if 'logistics_token' in d:
o.logistics_token = d['logistics_token']
if 'order_ext_istd' in d:
o.order_ext_istd = d['order_ext_istd']
if 'out_order_no' in d:
o.out_order_no = d['out_order_no']
if 'receiver' in d:
o.receiver = d['receiver']
if 'sender' in d:
o.sender = d['sender']
if 'shop_no' in d:
o.shop_no = d['shop_no']
return o
| [
"[email protected]"
] | |
55f8bb4cc636189aed727dbdcd2d0918046f96db | 2d0bada349646b801a69c542407279cc7bc25013 | /src/vai_quantizer/xnnc4xir/xnnc/proto/tf_pb2/reader_base_pb2.py | 1578860ab1cd6daaf243152a58058981a8251456 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-3-Clause-Open-MPI",
"LicenseRef-scancode-free-unknown",
"Libtool-exception",
"GCC-exception-3.1",
"LicenseRef-scancode-mit-old-style",
"OFL-1.1",
"JSON",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"ICU",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-issl-2018",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-or-later",
"Zlib",
"BSD-Source-Code",
"ClArtistic",
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"NCSA",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"CC-BY-4.0",
"FSFULLR",
"Minpack",
"Unlicense",
"BSL-1.0",
"NAIST-2003",
"Apache-2.0",
"LicenseRef-scancode-protobuf",
"LicenseRef-scancode-public-domain",
"Libpng",
"Spencer-94",
"BSD-2-Clause",
"Intel",
"GPL-1.0-or-later",
"MPL-2.0"
] | permissive | Xilinx/Vitis-AI | 31e664f7adff0958bb7d149883ab9c231efb3541 | f74ddc6ed086ba949b791626638717e21505dba2 | refs/heads/master | 2023-08-31T02:44:51.029166 | 2023-07-27T06:50:28 | 2023-07-27T06:50:28 | 215,649,623 | 1,283 | 683 | Apache-2.0 | 2023-08-17T09:24:55 | 2019-10-16T21:41:54 | Python | UTF-8 | Python | false | false | 5,019 | py | """
Copyright 2019 Xilinx Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/framework/reader_base.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="tensorflow/core/framework/reader_base.proto",
package="tensorflow",
syntax="proto3",
serialized_options=_b(
"\n\030org.tensorflow.frameworkB\020ReaderBaseProtosP\001Z=github.com/tensorflow/tensorflow/tensorflow/go/core/framework\370\001\001"
),
serialized_pb=_b(
'\n+tensorflow/core/framework/reader_base.proto\x12\ntensorflow"r\n\x0fReaderBaseState\x12\x14\n\x0cwork_started\x18\x01 \x01(\x03\x12\x15\n\rwork_finished\x18\x02 \x01(\x03\x12\x1c\n\x14num_records_produced\x18\x03 \x01(\x03\x12\x14\n\x0c\x63urrent_work\x18\x04 \x01(\x0c\x42p\n\x18org.tensorflow.frameworkB\x10ReaderBaseProtosP\x01Z=github.com/tensorflow/tensorflow/tensorflow/go/core/framework\xf8\x01\x01\x62\x06proto3'
),
)
_READERBASESTATE = _descriptor.Descriptor(
name="ReaderBaseState",
full_name="tensorflow.ReaderBaseState",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="work_started",
full_name="tensorflow.ReaderBaseState.work_started",
index=0,
number=1,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="work_finished",
full_name="tensorflow.ReaderBaseState.work_finished",
index=1,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="num_records_produced",
full_name="tensorflow.ReaderBaseState.num_records_produced",
index=2,
number=3,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="current_work",
full_name="tensorflow.ReaderBaseState.current_work",
index=3,
number=4,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=59,
serialized_end=173,
)
DESCRIPTOR.message_types_by_name["ReaderBaseState"] = _READERBASESTATE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ReaderBaseState = _reflection.GeneratedProtocolMessageType(
"ReaderBaseState",
(_message.Message,),
dict(
DESCRIPTOR=_READERBASESTATE,
__module__="tensorflow.core.framework.reader_base_pb2"
# @@protoc_insertion_point(class_scope:tensorflow.ReaderBaseState)
),
)
_sym_db.RegisterMessage(ReaderBaseState)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
c17d73e8b28549a190e9b51caacf123d8b11d9ec | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/isis/db.py | dfa245b2a4813380fba88996e099bc64cdb6034d | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 5,562 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class Db(Mo):
"""
The IS-IS database information.
"""
meta = ClassMeta("cobra.model.isis.Db")
meta.moClassName = "isisDb"
meta.rnFormat = "db-%(type)s"
meta.category = MoCategory.REGULAR
meta.label = "IS-IS Database"
meta.writeAccessMask = 0x8008020040001
meta.readAccessMask = 0x8008020040001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.isis.GrpRec")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.isis.LspRec")
meta.childClasses.add("cobra.model.isis.Nexthop")
meta.childClasses.add("cobra.model.isis.NodeRec")
meta.childClasses.add("cobra.model.isis.Route")
meta.childClasses.add("cobra.model.isis.DTEp")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.childNamesAndRnPrefix.append(("cobra.model.isis.NodeRec", "node-"))
meta.childNamesAndRnPrefix.append(("cobra.model.isis.DTEp", "dtep-"))
meta.childNamesAndRnPrefix.append(("cobra.model.isis.GrpRec", "grp-"))
meta.childNamesAndRnPrefix.append(("cobra.model.isis.LspRec", "lsp-"))
meta.childNamesAndRnPrefix.append(("cobra.model.isis.Nexthop", "nh-"))
meta.childNamesAndRnPrefix.append(("cobra.model.isis.Route", "rt-"))
meta.parentClasses.add("cobra.model.isis.Dom")
meta.parentClasses.add("cobra.model.isis.DomLvl")
meta.superClasses.add("cobra.model.l3.Db")
meta.superClasses.add("cobra.model.nw.Conn")
meta.superClasses.add("cobra.model.nw.Db")
meta.superClasses.add("cobra.model.nw.Item")
meta.superClasses.add("cobra.model.nw.GEp")
meta.rnPrefixes = [
('db-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 14494, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "name", "name", 16436, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 128)]
meta.props.add("name", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "type", "type", 3216, PropCategory.REGULAR)
prop.label = "Type"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.defaultValue = 1
prop.defaultValueStr = "ls"
prop._addConstant("dtep", "dynamic-tunnel-endpoint-database", 6)
prop._addConstant("fn", "fabric-node-database", 5)
prop._addConstant("ls", "link-state-dtabase", 1)
prop._addConstant("mcgrp", "multicast-group-database", 4)
prop._addConstant("nh", "nexthop-database", 3)
prop._addConstant("rt", "route-database", 2)
meta.props.add("type", prop)
meta.namingProps.append(getattr(meta.props, "type"))
def __init__(self, parentMoOrDn, type, markDirty=True, **creationProps):
namingVals = [type]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
ff6b877cf1ff67b550b56f6ef180a404c3b60ca2 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2304/60775/297440.py | 2a5c35b05a3f2272ea4b464e9c0bc37c673d30cf | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,110 | py | in1 = input().split(' ')
num_v = int(in1[0])
root = int(in1[1])
tree = [[] for i in range(100)]
for i in range(num_v):
in2 = input().split(' ')
tree[int(in2[0])] = [int(in2[1]),int(in2[2])]
#广度优先遍历
stack = []
stack.append(root)
this_layer_nodes = 1
layer = 1
while stack != []:
next_layer_nodes = 0
print("Level", layer, ":",end='')
while this_layer_nodes > 0:
tmp = stack.pop(0)
print(" " +str(tmp),end='')
this_layer_nodes -= 1
if tree[tmp][0] != 0:
stack.append(tree[tmp][0])
next_layer_nodes += 1
if tree[tmp][1] != 0:
stack.append(tree[tmp][1])
next_layer_nodes += 1
this_layer_nodes = next_layer_nodes
layer += 1
print()
#Zigzag遍历
stack = []
stack.append(root)
this_layer_nodes = 1
layer = 1
while stack != []:
next_layer_nodes = 0
if layer % 2 == 1:
print("Level", layer, "from left to right:", end='')
while this_layer_nodes > 0:
tmp = stack.pop(0)
print(" " +str(tmp),end='')
this_layer_nodes -= 1
if tree[tmp][0] != 0:
stack.append(tree[tmp][0])
next_layer_nodes += 1
if tree[tmp][1] != 0:
stack.append(tree[tmp][1])
next_layer_nodes += 1
this_layer_nodes = next_layer_nodes
layer += 1
else:
print("Level", layer, "from right to left:", end='')
while this_layer_nodes > 0 :
tmp_nodes = []
while this_layer_nodes > 0:
tmp = stack.pop(0)
tmp_nodes.append(tmp)
this_layer_nodes -= 1
if tree[tmp][0] != 0:
stack.append(tree[tmp][0])
next_layer_nodes += 1
if tree[tmp][1] != 0:
stack.append(tree[tmp][1])
next_layer_nodes += 1
this_layer_nodes = next_layer_nodes
layer += 1
for i in range(len(tmp_nodes)-1,-1,-1):
print(' '+str(tmp_nodes[i]),end='')
print() | [
"[email protected]"
] | |
6db40a61a9d2f970b2620229d396218da4bbba9a | e8cc093ce857f65882e25f4bfae94a395ffc2fe5 | /PESA-BACK/remitapi/utils.py | 15b7d841ed5d3fec3b88484e78393b7201794401 | [] | no_license | mandelashaban593/Chatting-App | f6d0f7ac3785da690f52a7c1427353956699af4c | 0e6e5d9edb0a4f0c91a40391ae5916549e87ec7b | refs/heads/master | 2021-01-22T06:01:42.649403 | 2017-03-20T08:08:55 | 2017-03-20T08:08:55 | 92,512,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,063 | py | from remitapi.tasks import send_email, send_sms
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from django.conf import settings
from django.template import RequestContext
import sys
from datetime import datetime
def mailer(request, subject, template, content, to, sender=False):
if settings.DISABLE_COMMS:
return True
if not sender:
sender = "%s via %s" % (settings.APP_NAME, settings.APP_EMAILS['info'])
try:
content['STATIC_URL'] = "%semail/" % settings.STATIC_URL
html_content = render_to_string(
template, content, context_instance=RequestContext(request))
# this strips the html, so people will have the text as well.
text_content = strip_tags(html_content)
# create the email, and attach the HTML version as well.
send_email(subject, text_content, sender, to, html_content)
except Exception, e:
print e
return True
def send_msg_notification(msg, request):
if msg.is_note:
template = settings.EMAIL_TEMPLATE_DIR + 'new_note.html'
else:
template = settings.EMAIL_TEMPLATE_DIR + 'new_message.html'
data = {'msg': msg}
email = False
subject = "New Message from 199Fix"
try:
email = msg.recipient.email
if not subject:
subject = '%s via 199Fix' % msg.sender_profile().short_name
except Exception, e:
print e
mailer(request, subject, template, data, email)
def send_job_notification(job, request):
if not job.status == '1':
template = settings.EMAIL_TEMPLATE_DIR + 'new_job_status.html'
else:
template = settings.EMAIL_TEMPLATE_DIR + 'job.html'
data = {'job': job}
email = False
subject = "%s via 199Fix [ %s ]" % (
job.app.owner_profile().short_name,
job.name
)
try:
email = job.app.user.email
except Exception:
pass
if not email:
email = "[email protected]"
mailer(request, subject, template, data, email)
def debug(e, txt=False, log='debug'):
txt = "%s %s" % (e, txt)
if settings.DEBUG_API:
if not txt:
txt = ''
print >> sys.stderr, 'Debuging____________________ %s' % txt
print >> sys.stderr, e
else:
try:
old_stdout = sys.stdout
log_file = open("%slogs/%s.log" % (settings.LOG_DIR, log), "a")
sys.stdout = log_file
print '%s: Debuging_____________%s' % (
datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
txt
)
sys.stdout = old_stdout
log_file.close()
except Exception, e:
print e
def balance_low_email(request, wallet, transaction):
'''
balance is low
'''
template = settings.EMAIL_TEMPLATE_DIR + 'balance_low_email.html'
data = {'transaction': transaction}
email = "[email protected]"
subject = "Balance for %s is low" % wallet
mailer(request, subject, template, data, email)
def error_message(request, msgtype, data={}):
template = settings.BASE_DIR + 'templates/error_messages.html'
data['type'] = msgtype
text = render_to_string(
template, data, context_instance=RequestContext(request))
messages.error(request, text)
def success_message(request, msgtype, data={}):
template = settings.BASE_DIR + 'templates/success_messages.html'
data['type'] = msgtype
text = render_to_string(
template, data, context_instance=RequestContext(request))
messages.success(request, text)
def admin_mail(request, code, data=False, e=False):
'''admin email template'''
template = settings.EMAIL_TEMPLATE_DIR + 'admin.html'
subjects = {
'pending_transaction': 'Pending Transaction',
'complete_transaction': 'Transaction Complete',
'user_verification': 'User Pending Verification',
'user_verification_update': 'User Updated Verification Details',
'new_user': '',
'rates_error': 'An error occurred while fetching the rates',
'server_error': 'Dude your App Just Broke',
'contact_us': 'New Contact Message',
}
if settings.DEBUG:
emails = settings.DEBUG_EMAILS
if code == 'server_error':
emails = {'[email protected]'}
elif code == 'contact_us':
emails = {'[email protected]'}
else:
emails = {'[email protected]'}
response = False
if code in subjects:
#emails = {'[email protected]'}
subject = subjects[code]
extradata = {}
extradata['data'] = data
extradata['code'] = code
# if e:
# extradata['e'] = repr(e)
sender = settings.APP_EMAILS['info']
if 'contact_us' in subjects:
sender = settings.APP_EMAILS['contact_us']
for email in emails:
response = mailer(request, subject, template,
extradata, email, sender)
return response
def sendsms(to, template, content):
'''backward compatibility ,move this to tasks.py'''
return send_sms(to, template, content)
# return True
COUNTRY_CHOICES = (
('UG', 'Uganda'),
('KE', 'Kenya'),
('TZ', 'Tanzania'),
('RW', 'Rwanda'),
)
NETWORK_CHOICES = (
('MTN', 'MTN Mobile Money'),
('AIRTEL', 'Airtel Money'),
('UTL', 'M-Sente'),
)
def render_to_pdf(template_src, context_dict):
template = get_template(template_src)
context = Context(context_dict)
html = template.render(context)
result = StringIO.StringIO()
#data = html.encode("ISO-8859-1")
data = html.encode('utf-8')
pdf = pisa.pisaDocument(StringIO.StringIO(data), result)
if not pdf.err:
return HttpResponse(result.getvalue(), content_type='application/pdf')
return HttpResponse('We had some errors<pre>%s</pre>' % escape(html))
def log_unauthorized_access(request):
debug(request, 'log_unauthorized_access')
def insufficient_account_balance(transaction):
print "insufficient_account_balance"
| [
"[email protected]"
] | |
0b7242d707c8fad8e136bfc83589b9de21a0b2a5 | bdadee64ca8c143a925ec84736e3966ef0183bce | /Analysis/Tools/timeSeries_returnOuterVariables/FIG8_T_cut2_new_static_argv_chunk.py | 8b7b37efaaec670e0393b9c6266677eafa85a50d | [] | no_license | snow-stone/python | fcb69950f5984d654f555ca591583c0a0763bef4 | 0e79a0fb265a22a409309b1c0f992f00ef588f95 | refs/heads/master | 2020-03-11T10:51:53.268373 | 2019-10-18T14:24:45 | 2019-10-18T14:24:45 | 129,953,986 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,613 | py | import matplotlib
matplotlib.use('agg')
import sys
sys.path.append('/home/hluo/work/git/thesis/Thesis_hluo_new/reference_database') # for rdb
import reference_database as rdb
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
plt.style.use('seaborn-white') # from defaut
plt.rcParams.update({'font.size': 20})
plt.rcParams['savefig.dpi'] = 200
def D1_Dai_EAU_mean(ax):
x1,y1 = rdb.Dai_thesis.Fig4p9a('EAU')
ax.plot(-x1+0.5, y1, label='D1-Dai-EAU', marker='s', markerfacecolor='none', linewidth=1, linestyle='--', markersize=16, markeredgecolor='mediumvioletred', color='mediumvioletred', markeredgewidth=2)
x2,y2 = rdb.Dai_thesis.Fig4p9a('XG')
ax.plot(-x2+0.5, y2, label='D1-Dai-XG', marker='s', markerfacecolor='none', linewidth=1, linestyle='--', markersize=16, markeredgecolor='orange', color='orange', markeredgewidth=2)
def D1_Dai_EAU_rms(ax):
x2,y2 = rdb.Dai_thesis.Fig4p12a('EAU')
ax.plot(-x2+0.5, y2, label='D1-Dai-EAU', marker='s', markerfacecolor='none', linewidth=1, linestyle='--', markersize=16, markeredgecolor='mediumvioletred', color='mediumvioletred', markeredgewidth=2)
x2,y2 = rdb.Dai_thesis.Fig4p12a('XG')
ax.plot(-x2+0.5, y2, label='D1-Dai-XG', marker='s', markerfacecolor='none', linewidth=1, linestyle='--', markersize=16, markeredgecolor='orange', color='orange', markeredgewidth=2)
def D2_Dai_EAU_mean(ax):
x1,y1 = rdb.Dai_thesis.Fig5p5b('EAU')
ax.plot(-x1+0.5, y1, label=r'$N^2_{Exp}$', marker='s', markerfacecolor='none', linewidth=1, linestyle='--', markersize=16, markeredgecolor='mediumvioletred', color='mediumvioletred', markeredgewidth=2)
#x2,y2 = rdb.Dai_thesis.Fig5p5b('XG')
#ax.plot(-x2+0.5, y2, label=r'$NN^2_{Exp}$', marker='s', markerfacecolor='none', linewidth=1, linestyle='--', markersize=16, markeredgecolor='orange', color='orange', markeredgewidth=2)
def D2_Dai_EAU_rms(ax):
x2,y2 = rdb.Dai_thesis.Fig5p10b('EAU')
ax.plot(-x2+0.5, y2, label=r'$N^2_{Exp}$', marker='s', markerfacecolor='none', linewidth=1, linestyle='--', markersize=16, markeredgecolor='mediumvioletred', color='mediumvioletred', markeredgewidth=2)
#x2,y2 = rdb.Dai_thesis.Fig5p10b('XG')
#ax.plot(-x2+0.5, y2, label=r'$NN^2_{Exp}$', marker='s', markerfacecolor='none', linewidth=1, linestyle='--', markersize=16, markeredgecolor='orange', color='orange', markeredgewidth=2)
def D3_Dai_EAU_mean(ax):
x1,y1 = rdb.Dai_thesis.Fig5p6c('EAU')
ax.plot(-x1+0.5, y1, label=r'$N^3_{Exp}$', marker='s', markerfacecolor='none', linewidth=1, linestyle='--', markersize=16, markeredgecolor='mediumvioletred', color='mediumvioletred', markeredgewidth=2)
x2,y2 = rdb.Dai_thesis.Fig5p6c('XG')
ax.plot(-x2+0.5, y2, label=r'$NN^3_{Exp}$', marker='s', markerfacecolor='none', linewidth=1, linestyle='--', markersize=16, markeredgecolor='orange', color='orange', markeredgewidth=2)
def D3_Dai_EAU_rms(ax):
x1,y1 = rdb.Dai_thesis.Fig5p10c('EAU')
ax.plot(-x1+0.5, y1, label=r'$N^3_{Exp}$', marker='s', markerfacecolor='none', linewidth=1, linestyle='--', markersize=16, markeredgecolor='mediumvioletred', color='mediumvioletred', markeredgewidth=2)
x2,y2 = rdb.Dai_thesis.Fig5p10c('XG')
ax.plot(-x2+0.5, y2, label=r'$NN^3_{Exp}$', marker='s', markerfacecolor='none', linewidth=1, linestyle='--', markersize=16, markeredgecolor='orange', color='orange', markeredgewidth=2)
def main():
import timeSeriesReader_ReturnOuterVariables_T as tsR
import os
parameterFileBasename = sys.argv[1]
saveDir = sys.argv[2]
ifLocalControl = sys.argv[3]
simu_module = __import__("parameters_"+parameterFileBasename)
simu_parameters = simu_module.parameters
alias_dict={
"D2-NN-1j_test_from0" : r"$NN^{2,A}_{syn}$",
# "D2-NN-1j_test_from0p3_forcingStep_St1_A_eq_0p05" : r"$NN^{2,A}_{Step,St=1,A=0.05}$",
"D2-NN-1j_test_from0p3_forcingSinus_St3p2_A_eq_0p05" : r"$NN^{2,A}_{syn,St=3.2,A=0.05}$",
"D1-1j_mapped":r"$N^{1,A}_{mapped}$",
"D2-1j_mapped":r"$N^{2,A}_{mapped}$",
"D3-1j_mapped":r"$N^{3,A}_{mapped}$",
# "D2-1j_syn" : r"$N^{2,A}_{syn-mean}$",
# "D2-NN-1j_syn":r"$NN^{2,A}_{syn-mean-'}$",
"D2-NN-1k_syn":r"$NN^{2,B}_{syn}$",
"D2-NN-1k_syn_forcing":r"$NN^{2,B}_{syn,St=3.2,A=0.025}$"
}
fig1,ax1 = plt.subplots()
fig2,ax2 = plt.subplots()
# get data
simu_parameters['sampling']['dataShape']=simu_parameters['sampling']['TdataShape2']
fileListSimu = tsR.pre_check(simu_parameters,"Dai_lines_typeFace_cell_T-2")
dataBase2Plot = tsR.process(simu_parameters,validDataList=fileListSimu,colonNb=1)
# reference plot
if (parameterFileBasename[0:2] == 'D2'):
D2_Dai_EAU_mean(ax1)
D2_Dai_EAU_rms(ax2)
#Ux_bulk_Dai=1
elif (parameterFileBasename[0:2] == 'D1'):
D1_Dai_EAU_mean(ax1)
D1_Dai_EAU_rms(ax2)
#Ux_bulk_Dai=1
elif (parameterFileBasename[0:2] == 'D3'):
D3_Dai_EAU_mean(ax1)
D3_Dai_EAU_rms(ax2)
#Ux_bulk_Dai=1
if (parameterFileBasename[3:5] == 'NN'):
simu_color='orange'
else:
simu_color='mediumvioletred'
# No-dimnesionize and plot
#ax1.plot(dataBase2Plot['rByD'],dataBase2Plot['mean']/Ux_bulk_Dai,label=simu_parameters['alias'],linewidth=4, color=simu_color)
#for i in range(len(dataBase2Plot['chunkedMean'])):
# ax1.plot(dataBase2Plot['rByD'],dataBase2Plot['chunkedMean'][i]/Ux_bulk_Dai,label=str(i),linewidth=4)
y0 = dataBase2Plot['chunkedMean'][3]+dataBase2Plot['chunkedMean'][4]
y0 = y0/2.0
ax1.plot(dataBase2Plot['rByD'],y0,label=alias_dict[parameterFileBasename],linewidth=4)
#ax2.plot(dataBase2Plot['rByD'],dataBase2Plot['std']/Ux_bulk_Dai,label=simu_parameters['alias'],linewidth=4, color=simu_color)
#for i in range(len(dataBase2Plot['chunkedMean'])):
# ax2.plot(dataBase2Plot['rByD'],dataBase2Plot['chunkedStd'][i]/Ux_bulk_Dai,label=str(i),linewidth=4)
y1 = dataBase2Plot['chunkedStd'][3]+dataBase2Plot['chunkedStd'][4]
y1 = y1/2.0
ax2.plot(dataBase2Plot['rByD'],y1,label=alias_dict[parameterFileBasename],linewidth=4)
# plot settings
ax1.set_xlim(0,1)
ax1.set_ylim(-0.25,1.5)
if ifLocalControl == "ControlFig_usingParameters":
print "====================================="
print "For fig1 :"
print "In file " + os.path.basename(__file__)
print "applying ControlFig_usingParameters pre-described in file " + os.path.basename(simu_module.__file__)
print "====================================="
ax1.legend(bbox_to_anchor=simu_parameters['plot']['legendPosition2a'], ncol=1, fancybox=True, shadow=True)
else:
print "====================================="
print "For fig1 :"
print "In file " + os.path.basename(__file__)
print "applying local constant control on legend positioning"
print "====================================="
ax1.legend(bbox_to_anchor=(1, 1.4), ncol=1, fancybox=True, shadow=True)
ax1.set_xlabel(r'$r/D$')
ax1.set_ylabel(r'$\overline{c}$')
#ax1.set_title(alias_dict[parameterFileBasename])
ax2.set_xlim(0,1)
ax2.set_ylim(0,0.5)
if ifLocalControl == "ControlFig_usingParameters":
print "====================================="
print "For fig2 :"
print "In file " + os.path.basename(__file__)
print "applying ControlFig_usingParameters pre-described in file " + os.path.basename(simu_module.__file__)
print "====================================="
ax2.legend(bbox_to_anchor=simu_parameters['plot']['legendPosition2b'], ncol=1, fancybox=True, shadow=True)
else:
print "====================================="
print "For fig2 :"
print "In file " + os.path.basename(__file__)
print "applying local constant control on legend positioning"
print "====================================="
ax2.legend(bbox_to_anchor=(1, 1.3), ncol=1, fancybox=True, shadow=True)
ax2.set_xlabel(r'$r/D$')
ax2.set_ylabel(r'$rms(c)$')
#ax2.set_title(alias_dict[parameterFileBasename])
im = plt.imread('cutPositions_cropped_cut2.png')
rect=[0.1, 0.8, 0.3, 0.3]
ax1_new = fig1.add_axes(rect, anchor='NE', zorder=-1)
ax1_new.imshow(im)
ax1_new.axis('off')
#ax1.get_legend().remove()
ax2.get_legend().remove()
fig1.savefig(saveDir+"T_cut2a.png", bbox_inches='tight')
fig2.savefig(saveDir+"T_cut2b.png", bbox_inches='tight')
main()
| [
"[email protected]"
] | |
3f1a9f74909affc32a3e4eead313c53c10cab52b | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/ads/googleads/v6/googleads-py/tests/unit/gapic/googleads.v6/services/test_topic_constant_service.py | 7b78557082a66257e87e02f0bfec735694281464 | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,911 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from unittest import mock
import grpc
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google import auth
from google.ads.googleads.v6.resources.types import topic_constant
from google.ads.googleads.v6.services.services.topic_constant_service import TopicConstantServiceClient
from google.ads.googleads.v6.services.services.topic_constant_service import transports
from google.ads.googleads.v6.services.types import topic_constant_service
from google.api_core import client_options
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.auth import credentials
from google.auth.exceptions import MutualTLSChannelError
from google.oauth2 import service_account
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert TopicConstantServiceClient._get_default_mtls_endpoint(None) is None
assert TopicConstantServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert TopicConstantServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert TopicConstantServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert TopicConstantServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert TopicConstantServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
def test_topic_constant_service_client_from_service_account_info():
creds = credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = TopicConstantServiceClient.from_service_account_info(info)
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_topic_constant_service_client_from_service_account_file():
creds = credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = TopicConstantServiceClient.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
client = TopicConstantServiceClient.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_topic_constant_service_client_get_transport_class():
transport = TopicConstantServiceClient.get_transport_class()
assert transport == transports.TopicConstantServiceGrpcTransport
transport = TopicConstantServiceClient.get_transport_class("grpc")
assert transport == transports.TopicConstantServiceGrpcTransport
@mock.patch.object(TopicConstantServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TopicConstantServiceClient))
def test_topic_constant_service_client_client_options():
# Check that if channel is provided we won't create a new one.
with mock.patch('google.ads.googleads.v6.services.services.topic_constant_service.TopicConstantServiceClient.get_transport_class') as gtc:
transport = transports.TopicConstantServiceGrpcTransport(
credentials=credentials.AnonymousCredentials()
)
client = TopicConstantServiceClient(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch('google.ads.googleads.v6.services.services.topic_constant_service.TopicConstantServiceClient.get_transport_class') as gtc:
client = TopicConstantServiceClient(transport="grpc")
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch('google.ads.googleads.v6.services.services.topic_constant_service.transports.TopicConstantServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = TopicConstantServiceClient(client_options=options)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT
# is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch('google.ads.googleads.v6.services.services.topic_constant_service.transports.TopicConstantServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = TopicConstantServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch('google.ads.googleads.v6.services.services.topic_constant_service.transports.TopicConstantServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = TopicConstantServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = TopicConstantServiceClient()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = TopicConstantServiceClient()
@mock.patch.object(TopicConstantServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TopicConstantServiceClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
@pytest.mark.parametrize("use_client_cert_env", ["true", "false"])
def test_topic_constant_service_client_mtls_env_auto(use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch('google.ads.googleads.v6.services.services.topic_constant_service.transports.TopicConstantServiceGrpcTransport.__init__') as grpc_transport:
ssl_channel_creds = mock.Mock()
with mock.patch('grpc.ssl_channel_credentials', return_value=ssl_channel_creds):
grpc_transport.return_value = None
client = TopicConstantServiceClient(client_options=options)
if use_client_cert_env == "false":
expected_ssl_channel_creds = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_ssl_channel_creds = ssl_channel_creds
expected_host = client.DEFAULT_MTLS_ENDPOINT
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v6.services.services.topic_constant_service.transports.TopicConstantServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
with mock.patch('google.auth.transport.grpc.SslCredentials.ssl_credentials', new_callable=mock.PropertyMock) as ssl_credentials_mock:
if use_client_cert_env == "false":
is_mtls_mock.return_value = False
ssl_credentials_mock.return_value = None
expected_host = client.DEFAULT_ENDPOINT
expected_ssl_channel_creds = None
else:
is_mtls_mock.return_value = True
ssl_credentials_mock.return_value = mock.Mock()
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_ssl_channel_creds = ssl_credentials_mock.return_value
grpc_transport.return_value = None
client = TopicConstantServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v6.services.services.topic_constant_service.transports.TopicConstantServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
is_mtls_mock.return_value = False
grpc_transport.return_value = None
client = TopicConstantServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_topic_constant_service_client_client_options_from_dict():
with mock.patch('google.ads.googleads.v6.services.services.topic_constant_service.transports.TopicConstantServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = TopicConstantServiceClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_get_topic_constant(transport: str = 'grpc', request_type=topic_constant_service.GetTopicConstantRequest):
client = TopicConstantServiceClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_topic_constant),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = topic_constant.TopicConstant(
resource_name='resource_name_value',
id=205,
topic_constant_parent='topic_constant_parent_value',
path=['path_value'],
)
response = client.get_topic_constant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == topic_constant_service.GetTopicConstantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, topic_constant.TopicConstant)
assert response.resource_name == 'resource_name_value'
assert response.id == 205
assert response.topic_constant_parent == 'topic_constant_parent_value'
assert response.path == ['path_value']
def test_get_topic_constant_from_dict():
test_get_topic_constant(request_type=dict)
def test_get_topic_constant_field_headers():
client = TopicConstantServiceClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = topic_constant_service.GetTopicConstantRequest()
request.resource_name = 'resource_name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_topic_constant),
'__call__') as call:
call.return_value = topic_constant.TopicConstant()
client.get_topic_constant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'resource_name=resource_name/value',
) in kw['metadata']
def test_get_topic_constant_flattened():
client = TopicConstantServiceClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_topic_constant),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = topic_constant.TopicConstant()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_topic_constant(
resource_name='resource_name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].resource_name == 'resource_name_value'
def test_get_topic_constant_flattened_error():
client = TopicConstantServiceClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_topic_constant(
topic_constant_service.GetTopicConstantRequest(),
resource_name='resource_name_value',
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.TopicConstantServiceGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TopicConstantServiceClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.TopicConstantServiceGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
client = TopicConstantServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.TopicConstantServiceGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = TopicConstantServiceClient(
credentials=credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.TopicConstantServiceGrpcTransport,
)
@pytest.mark.parametrize("transport_class", [
transports.TopicConstantServiceGrpcTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(auth, 'default') as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_topic_constant_service_base_transport():
# Instantiate the base transport.
with mock.patch('google.ads.googleads.v6.services.services.topic_constant_service.transports.TopicConstantServiceTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.TopicConstantServiceTransport(
credentials=credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'get_topic_constant',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
def test_topic_constant_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(auth, 'default') as adc, mock.patch('google.ads.googleads.v6.services.services.topic_constant_service.transports.TopicConstantServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (credentials.AnonymousCredentials(), None)
transport = transports.TopicConstantServiceTransport()
adc.assert_called_once()
def test_topic_constant_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(auth, 'default') as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
TopicConstantServiceClient()
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_topic_constant_service_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(auth, 'default') as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transports.TopicConstantServiceGrpcTransport(host="squid.clam.whelk")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_topic_constant_service_host_no_port():
client = TopicConstantServiceClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com'),
)
assert client.transport._host == 'googleads.googleapis.com:443'
def test_topic_constant_service_host_with_port():
client = TopicConstantServiceClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com:8000'),
)
assert client.transport._host == 'googleads.googleapis.com:8000'
def test_topic_constant_service_grpc_transport_channel():
channel = grpc.insecure_channel('http://localhost/')
# Check that channel is used if provided.
transport = transports.TopicConstantServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize("transport_class", [transports.TopicConstantServiceGrpcTransport])
def test_topic_constant_service_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize("transport_class", [transports.TopicConstantServiceGrpcTransport,])
def test_topic_constant_service_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_topic_constant_path():
topic_id = "squid"
expected = "topicConstants/{topic_id}".format(topic_id=topic_id, )
actual = TopicConstantServiceClient.topic_constant_path(topic_id)
assert expected == actual
def test_parse_topic_constant_path():
expected = {
"topic_id": "clam",
}
path = TopicConstantServiceClient.topic_constant_path(**expected)
# Check that the path construction is reversible.
actual = TopicConstantServiceClient.parse_topic_constant_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "whelk"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = TopicConstantServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "octopus",
}
path = TopicConstantServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = TopicConstantServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "oyster"
expected = "folders/{folder}".format(folder=folder, )
actual = TopicConstantServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nudibranch",
}
path = TopicConstantServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = TopicConstantServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "cuttlefish"
expected = "organizations/{organization}".format(organization=organization, )
actual = TopicConstantServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "mussel",
}
path = TopicConstantServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = TopicConstantServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "winkle"
expected = "projects/{project}".format(project=project, )
actual = TopicConstantServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nautilus",
}
path = TopicConstantServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = TopicConstantServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "scallop"
location = "abalone"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = TopicConstantServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "squid",
"location": "clam",
}
path = TopicConstantServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = TopicConstantServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.TopicConstantServiceTransport, '_prep_wrapped_messages') as prep:
client = TopicConstantServiceClient(
credentials=credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.TopicConstantServiceTransport, '_prep_wrapped_messages') as prep:
transport_class = TopicConstantServiceClient.get_transport_class()
transport = transport_class(
credentials=credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
5f562dcaa59d40c74a34cfe55ed08f30174a169d | 49185bd5cf7e2f5190ce22b5189a09fe1ab6bb0f | /Proper/proper/prop_circular_obscuration.py | 626faec83727f75711d6b3eb83c90439547c57bc | [
"MIT"
] | permissive | RupertDodkins/MEDIS | c3f55d8adb6a8c4120593ba6552c9dfe3784d4e2 | bdb1f00fb93506da2a1f251bc6780e70e97a16c5 | refs/heads/master | 2021-07-05T20:06:44.162517 | 2019-09-05T22:16:12 | 2019-09-05T22:16:12 | 160,850,558 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,649 | py | # Copyright 2016, 2017 California Institute of Technology
# Users must agree to abide by the restrictions listed in the
# file "LegalStuff.txt" in the PROPER library directory.
#
# PROPER developed at Jet Propulsion Laboratory/California Inst. Technology
# Original IDL version by John Krist
# Python translation by Navtej Saini, with Luis Marchen and Nikta Amiri
import proper
def prop_circular_obscuration(wf, radius, xc = 0.0, yc = 0.0, **kwargs):
"""Multiply the wavefront by a circular obscuration.
Parameters
----------
wf : obj
WaveFront class object
radius : float
Radius of aperture in meters, unless norm is specified
xc : float
X-center of aperture relative to center of wavefront. Default is 0.0
yc : float
Y-center of aperture relative to center of wavefront. Default is 0.0
Returns
-------
Multiplies current wavefront in "wf" by a circular obscuration (0 inside,
1 outside).
Other Parameters
----------------
NORM : bool
If set to True, the specified radius and xc, yc aperure centers are
assumed to be normalized to the current beam radius (e.g. radius is 1.0
means the aperture is the same size as the current beam). xc, yc = 0,0
is the center of the wavefront. Default is False.
"""
if ("NORM" in kwargs and kwargs["NORM"]):
norm = True
else:
norm = False
wf.wfarr *= proper.prop_shift_center(proper.prop_ellipse(wf, radius, radius, xc, yc, NORM = norm, DARK = True))
return
| [
"[email protected]"
] | |
c255da1783f7d52da5f07fbf950c77042766e954 | 0f6a6edcfbfe5669a78748422c7fc0343d532f42 | /src/events/adapters/celery.py | 7284036cbe2d0d304bec18de7ae5555363a8f92d | [
"MIT"
] | permissive | jonatasoli/the-eye | f734f814d0cf12ee157f1e69f95f6e2bf73a7a83 | 0a11334bc24c99dcb7158238bc2ae63a26dc63b1 | refs/heads/main | 2023-08-22T06:55:27.893110 | 2021-10-09T14:35:27 | 2021-10-09T14:35:27 | 415,181,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | import celery
from loguru import logger
from flask import Flask
celery_obj = celery.Celery(__name__)
def init_app(app: Flask):
logger.info(app.config['CELERY_BROKER_URL'])
logger.info(app.config['CELERY_RESULT_BACKEND'])
celery_obj.conf.broker_url = app.config['CELERY_BROKER_URL']
celery_obj.conf.result_backend = app.config['CELERY_RESULT_BACKEND']
TaskBase = celery_obj.Task
class AppContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = AppContextTask
| [
"[email protected]"
] | |
087414c9fb5074b4342290c052f598198db0bc75 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_199/2460.py | bb3b758e7f02863f07f01b7e76c0a3ec1bdca277 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | import numpy as np
T = int(raw_input())
def solve():
S = len(s)
# print s, K, S
cnt = 0
for ii in xrange(S-K+1):
if s[ii] == '-':
cnt+=1
for jj in xrange(K):
s[ii+jj] = '+' if s[ii+jj] == '-' else '-'
if s.count('-') > 0:
return 'IMPOSSIBLE'
else:
return cnt
for i in xrange(T):
s, K = raw_input().split()
K = int(K)
s = [c for c in s]
sol = solve()
print "Case #%d: %s"%(i+1,sol)
| [
"[email protected]"
] | |
4d1740d100a8a38bb1f5f3a400475d62046b4555 | afbaa5685bf737ec7d16fee2bab54ae13caf96f9 | /geekbang/data_analysis/ch04/numpy3.py | 7556b5147749407b51b42d1bcc8a0f52d3aa9d31 | [] | no_license | ykdsg/myPython | 9dcc9afe6f595e51b72257875d66ada1ba04bba6 | 77d2eaa2acb172664b632cc2720cef62dff8f235 | refs/heads/master | 2023-06-10T20:11:08.061075 | 2023-06-03T11:39:53 | 2023-06-03T11:39:53 | 10,655,956 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | import numpy as np
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(np.amin(a))
# 延着 axis=0 轴的最小值
print(np.amin(a, 0))
print(np.amin(a, 1))
print(np.amax(a))
print(np.amax(a, 0))
print(np.amax(a, 1))
x = np.array([[0, 1], [2, 3]])
print(np.sum(x, axis=0))
print(np.sum(x, axis=1))
print(np.amin(x, axis=0))
print(np.amin(x, axis=1))
| [
"[email protected]"
] | |
a30322bdb05ecc03bd4eae8d604f413144cb093d | 0aea9408dceec38dfa4d52be13256cd84367c15f | /my_blog/main/__init__.py | 2c91492717aac4766ba8b72f7ad0e954e8ee4d4c | [] | no_license | ezirmusitua/my-blog-with-flask | 54d11f4cb0326be5a2dbbebf078282dd68b43e44 | d7c2bda06d12359261fe25a24527138f437cef41 | refs/heads/master | 2021-01-16T21:05:34.267515 | 2016-06-21T11:09:17 | 2016-06-21T11:09:17 | 61,627,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from flask import Blueprint
main = Blueprint('main', __name__)
from . import views
| [
"[email protected]"
] | |
8c5ce3513996a713016395c9da9fc122a5f8709a | 39e1320c74bcf0bbebb855645b4f538e9ef361f4 | /src/genui/accounts/urls.py | 6a3bd9d8c8926d8932b1dba59f7fc2685aaf3b8f | [
"MIT"
] | permissive | Tontolda/genui | 4c684e08e78b848e5afa7e4333bbea46c30d9d51 | c5b7da7c5a99fc16d34878e2170145ac7c8e31c4 | refs/heads/master | 2023-04-14T12:57:31.774323 | 2021-01-29T08:01:30 | 2021-01-29T08:01:30 | 344,443,814 | 0 | 0 | NOASSERTION | 2021-04-24T14:56:35 | 2021-03-04T11:00:54 | null | UTF-8 | Python | false | false | 657 | py | """
urls
Created by: Martin Sicho
On: 4/30/20, 7:59 PM
"""
from django.urls import path, include, re_path
from django.contrib import admin
from allauth.account.views import ConfirmEmailView
urlpatterns = []
urlpatterns += [
path('admin/', admin.site.urls),
path('accounts/', include('allauth.urls')),
re_path(r'^accounts/registration/account-confirm-email/(?P<key>[-:\w]+)/$', ConfirmEmailView.as_view(),
name='account_confirm_email'),
path(f'api/accounts/rfauth/', include('rest_framework.urls')),
path('api/accounts/', include('rest_auth.urls')),
path('api/accounts/registration/', include('rest_auth.registration.urls')),
] | [
"[email protected]"
] | |
4a4363b17be5d1d57768b191fe9a685a4a95b7a5 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/benchmarks/stdlib-464.py | 6fd0b1b2c131801d99e2804b1f0a3ef65bd17f03 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,611 | py | # ChocoPy library functions
def int_to_str(x: int) -> str:
digits:[str] = None
result:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def str_to_int(x: str) -> int:
result:int = 0
digit:int = 0
char:str = ""
sign:int = 1
first_char:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return $INT # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
# Input parameters
c:int = 42
n:int = 10
# Run [-nc, nc] with step size c
s:str = ""
i:int = 0
i = -n * c
# Crunch
while i <= n * c:
s = int_to_str(i)
print(s)
i = str_to_int(s) + c
| [
"[email protected]"
] | |
702470c1af3221320eb66ea71b131c25c4d4499f | 2da708faf710bcca9b6c75b900bdb98dd4e03b2d | /lib/python/celery/app/task.py | c793fc65b94ca4bf7d50f5103ad95c4f243e7cfe | [] | no_license | mozilla/FlightDeck-lib | 7dc8250a863bc92a096ef4939d6b0a5c15646b2f | c9e5405789630c273dca1916b007033bac7f507a | refs/heads/master | 2023-07-03T16:19:30.648487 | 2013-02-25T13:14:11 | 2013-02-25T13:14:11 | 1,096,066 | 0 | 3 | null | 2019-03-29T04:42:18 | 2010-11-19T22:35:44 | Python | UTF-8 | Python | false | false | 30,260 | py | # -*- coding: utf-8 -*-
"""
celery.app.task
~~~~~~~~~~~~~~~
Task Implementation: Task request context, and the base task class.
"""
from __future__ import absolute_import
from __future__ import with_statement
from celery import current_app
from celery import states
from celery.__compat__ import class_property
from celery._state import get_current_worker_task, _task_stack
from celery.datastructures import ExceptionInfo
from celery.exceptions import MaxRetriesExceededError, RetryTaskError
from celery.result import EagerResult
from celery.utils import gen_task_name, fun_takes_kwargs, uuid, maybe_reraise
from celery.utils.functional import mattrgetter, maybe_list
from celery.utils.imports import instantiate
from celery.utils.mail import ErrorMail
from .annotations import resolve_all as resolve_all_annotations
from .registry import _unpickle_task
#: extracts attributes related to publishing a message from an object.
extract_exec_options = mattrgetter(
'queue', 'routing_key', 'exchange',
'immediate', 'mandatory', 'priority', 'expires',
'serializer', 'delivery_mode', 'compression',
)
class Context(object):
# Default context
logfile = None
loglevel = None
hostname = None
id = None
args = None
kwargs = None
retries = 0
eta = None
expires = None
is_eager = False
delivery_info = None
taskset = None # compat alias to group
group = None
chord = None
utc = None
called_directly = True
callbacks = None
errbacks = None
timeouts = None
_children = None # see property
_protected = 0
def __init__(self, *args, **kwargs):
self.update(*args, **kwargs)
def update(self, *args, **kwargs):
self.__dict__.update(*args, **kwargs)
def clear(self):
self.__dict__.clear()
def get(self, key, default=None):
try:
return getattr(self, key)
except AttributeError:
return default
def __repr__(self):
return '<Context: %r>' % (vars(self, ))
@property
def children(self):
# children must be an empy list for every thread
if self._children is None:
self._children = []
return self._children
class TaskType(type):
"""Meta class for tasks.
Automatically registers the task in the task registry, except
if the `abstract` attribute is set.
If no `name` attribute is provided, then no name is automatically
set to the name of the module it was defined in, and the class name.
"""
def __new__(cls, name, bases, attrs):
new = super(TaskType, cls).__new__
task_module = attrs.get('__module__') or '__main__'
# - Abstract class: abstract attribute should not be inherited.
if attrs.pop('abstract', None) or not attrs.get('autoregister', True):
return new(cls, name, bases, attrs)
# The 'app' attribute is now a property, with the real app located
# in the '_app' attribute. Previously this was a regular attribute,
# so we should support classes defining it.
_app1, _app2 = attrs.pop('_app', None), attrs.pop('app', None)
app = attrs['_app'] = _app1 or _app2 or current_app
# - Automatically generate missing/empty name.
task_name = attrs.get('name')
if not task_name:
attrs['name'] = task_name = gen_task_name(app, name, task_module)
# - Create and register class.
# Because of the way import happens (recursively)
# we may or may not be the first time the task tries to register
# with the framework. There should only be one class for each task
# name, so we always return the registered version.
tasks = app._tasks
if task_name not in tasks:
tasks.register(new(cls, name, bases, attrs))
instance = tasks[task_name]
instance.bind(app)
return instance.__class__
def __repr__(cls):
if cls._app:
return '<class %s of %s>' % (cls.__name__, cls._app, )
if cls.__v2_compat__:
return '<unbound %s (v2 compatible)>' % (cls.__name__, )
return '<unbound %s>' % (cls.__name__, )
class Task(object):
"""Task base class.
When called tasks apply the :meth:`run` method. This method must
be defined by all tasks (that is unless the :meth:`__call__` method
is overridden).
"""
__metaclass__ = TaskType
__trace__ = None
__v2_compat__ = False # set by old base in celery.task.base
ErrorMail = ErrorMail
MaxRetriesExceededError = MaxRetriesExceededError
#: Execution strategy used, or the qualified name of one.
Strategy = 'celery.worker.strategy:default'
#: This is the instance bound to if the task is a method of a class.
__self__ = None
#: The application instance associated with this task class.
_app = None
#: Name of the task.
name = None
#: If :const:`True` the task is an abstract base class.
abstract = True
#: If disabled the worker will not forward magic keyword arguments.
#: Deprecated and scheduled for removal in v4.0.
accept_magic_kwargs = False
#: Maximum number of retries before giving up. If set to :const:`None`,
#: it will **never** stop retrying.
max_retries = 3
#: Default time in seconds before a retry of the task should be
#: executed. 3 minutes by default.
default_retry_delay = 3 * 60
#: Rate limit for this task type. Examples: :const:`None` (no rate
#: limit), `'100/s'` (hundred tasks a second), `'100/m'` (hundred tasks
#: a minute),`'100/h'` (hundred tasks an hour)
rate_limit = None
#: If enabled the worker will not store task state and return values
#: for this task. Defaults to the :setting:`CELERY_IGNORE_RESULT`
#: setting.
ignore_result = None
#: When enabled errors will be stored even if the task is otherwise
#: configured to ignore results.
store_errors_even_if_ignored = None
#: If enabled an email will be sent to :setting:`ADMINS` whenever a task
#: of this type fails.
send_error_emails = None
#: The name of a serializer that are registered with
#: :mod:`kombu.serialization.registry`. Default is `'pickle'`.
serializer = None
#: Hard time limit.
#: Defaults to the :setting:`CELERY_TASK_TIME_LIMIT` setting.
time_limit = None
#: Soft time limit.
#: Defaults to the :setting:`CELERY_TASK_SOFT_TIME_LIMIT` setting.
soft_time_limit = None
#: The result store backend used for this task.
backend = None
#: If disabled this task won't be registered automatically.
autoregister = True
#: If enabled the task will report its status as 'started' when the task
#: is executed by a worker. Disabled by default as the normal behaviour
#: is to not report that level of granularity. Tasks are either pending,
#: finished, or waiting to be retried.
#:
#: Having a 'started' status can be useful for when there are long
#: running tasks and there is a need to report which task is currently
#: running.
#:
#: The application default can be overridden using the
#: :setting:`CELERY_TRACK_STARTED` setting.
track_started = None
#: When enabled messages for this task will be acknowledged **after**
#: the task has been executed, and not *just before* which is the
#: default behavior.
#:
#: Please note that this means the task may be executed twice if the
#: worker crashes mid execution (which may be acceptable for some
#: applications).
#:
#: The application default can be overridden with the
#: :setting:`CELERY_ACKS_LATE` setting.
acks_late = None
#: Default task expiry time.
expires = None
#: Some may expect a request to exist even if the task has not been
#: called. This should probably be deprecated.
_default_request = None
__bound__ = False
from_config = (
('send_error_emails', 'CELERY_SEND_TASK_ERROR_EMAILS'),
('serializer', 'CELERY_TASK_SERIALIZER'),
('rate_limit', 'CELERY_DEFAULT_RATE_LIMIT'),
('track_started', 'CELERY_TRACK_STARTED'),
('acks_late', 'CELERY_ACKS_LATE'),
('ignore_result', 'CELERY_IGNORE_RESULT'),
('store_errors_even_if_ignored',
'CELERY_STORE_ERRORS_EVEN_IF_IGNORED'),
)
__bound__ = False
# - Tasks are lazily bound, so that configuration is not set
# - until the task is actually used
@classmethod
def bind(self, app):
was_bound, self.__bound__ = self.__bound__, True
self._app = app
conf = app.conf
for attr_name, config_name in self.from_config:
if getattr(self, attr_name, None) is None:
setattr(self, attr_name, conf[config_name])
if self.accept_magic_kwargs is None:
self.accept_magic_kwargs = app.accept_magic_kwargs
if self.backend is None:
self.backend = app.backend
# decorate with annotations from config.
if not was_bound:
self.annotate()
from celery.utils.threads import LocalStack
self.request_stack = LocalStack()
# PeriodicTask uses this to add itself to the PeriodicTask schedule.
self.on_bound(app)
return app
@classmethod
def on_bound(self, app):
"""This method can be defined to do additional actions when the
task class is bound to an app."""
pass
@classmethod
def _get_app(self):
if not self.__bound__ or self._app is None:
# The app property's __set__ method is not called
# if Task.app is set (on the class), so must bind on use.
self.bind(current_app)
return self._app
app = class_property(_get_app, bind)
@classmethod
def annotate(self):
for d in resolve_all_annotations(self.app.annotations, self):
for key, value in d.iteritems():
if key.startswith('@'):
self.add_around(key[1:], value)
else:
setattr(self, key, value)
@classmethod
def add_around(self, attr, around):
orig = getattr(self, attr)
if getattr(orig, '__wrapped__', None):
orig = orig.__wrapped__
meth = around(orig)
meth.__wrapped__ = orig
setattr(self, attr, meth)
def __call__(self, *args, **kwargs):
_task_stack.push(self)
self.push_request()
try:
# add self if this is a bound task
if self.__self__ is not None:
return self.run(self.__self__, *args, **kwargs)
return self.run(*args, **kwargs)
finally:
self.pop_request()
_task_stack.pop()
# - tasks are pickled into the name of the task only, and the reciever
# - simply grabs it from the local registry.
def __reduce__(self):
return (_unpickle_task, (self.name, ), None)
def run(self, *args, **kwargs):
"""The body of the task executed by workers."""
raise NotImplementedError('Tasks must define the run method.')
def start_strategy(self, app, consumer):
return instantiate(self.Strategy, self, app, consumer)
def delay(self, *args, **kwargs):
"""Star argument version of :meth:`apply_async`.
Does not support the extra options enabled by :meth:`apply_async`.
:param \*args: positional arguments passed on to the task.
:param \*\*kwargs: keyword arguments passed on to the task.
:returns :class:`celery.result.AsyncResult`:
"""
return self.apply_async(args, kwargs)
def apply_async(self, args=None, kwargs=None,
task_id=None, producer=None, connection=None, router=None,
link=None, link_error=None, publisher=None,
add_to_parent=True, **options):
"""Apply tasks asynchronously by sending a message.
:keyword args: The positional arguments to pass on to the
task (a :class:`list` or :class:`tuple`).
:keyword kwargs: The keyword arguments to pass on to the
task (a :class:`dict`)
:keyword countdown: Number of seconds into the future that the
task should execute. Defaults to immediate
execution (do not confuse with the
`immediate` flag, as they are unrelated).
:keyword eta: A :class:`~datetime.datetime` object describing
the absolute time and date of when the task should
be executed. May not be specified if `countdown`
is also supplied. (Do not confuse this with the
`immediate` flag, as they are unrelated).
:keyword expires: Either a :class:`int`, describing the number of
seconds, or a :class:`~datetime.datetime` object
that describes the absolute time and date of when
the task should expire. The task will not be
executed after the expiration time.
:keyword connection: Re-use existing broker connection instead
of establishing a new one.
:keyword retry: If enabled sending of the task message will be retried
in the event of connection loss or failure. Default
is taken from the :setting:`CELERY_TASK_PUBLISH_RETRY`
setting. Note you need to handle the
producer/connection manually for this to work.
:keyword retry_policy: Override the retry policy used. See the
:setting:`CELERY_TASK_PUBLISH_RETRY` setting.
:keyword routing_key: Custom routing key used to route the task to a
worker server. If in combination with a
``queue`` argument only used to specify custom
routing keys to topic exchanges.
:keyword queue: The queue to route the task to. This must be a key
present in :setting:`CELERY_QUEUES`, or
:setting:`CELERY_CREATE_MISSING_QUEUES` must be
enabled. See :ref:`guide-routing` for more
information.
:keyword exchange: Named custom exchange to send the task to.
Usually not used in combination with the ``queue``
argument.
:keyword priority: The task priority, a number between 0 and 9.
Defaults to the :attr:`priority` attribute.
:keyword serializer: A string identifying the default
serialization method to use. Can be `pickle`,
`json`, `yaml`, `msgpack` or any custom
serialization method that has been registered
with :mod:`kombu.serialization.registry`.
Defaults to the :attr:`serializer` attribute.
:keyword compression: A string identifying the compression method
to use. Can be one of ``zlib``, ``bzip2``,
or any custom compression methods registered with
:func:`kombu.compression.register`. Defaults to
the :setting:`CELERY_MESSAGE_COMPRESSION`
setting.
:keyword link: A single, or a list of subtasks to apply if the
task exits successfully.
:keyword link_error: A single, or a list of subtasks to apply
if an error occurs while executing the task.
:keyword producer: :class:[email protected]` instance to use.
:keyword add_to_parent: If set to True (default) and the task
is applied while executing another task, then the result
will be appended to the parent tasks ``request.children``
attribute.
:keyword publisher: Deprecated alias to ``producer``.
Also supports all keyword arguments supported by
:meth:`kombu.messaging.Producer.publish`.
.. note::
If the :setting:`CELERY_ALWAYS_EAGER` setting is set, it will
be replaced by a local :func:`apply` call instead.
"""
producer = producer or publisher
app = self._get_app()
router = router or self.app.amqp.router
conf = app.conf
# add 'self' if this is a bound method.
if self.__self__ is not None:
args = (self.__self__, ) + tuple(args)
if conf.CELERY_ALWAYS_EAGER:
return self.apply(args, kwargs, task_id=task_id, **options)
options = dict(extract_exec_options(self), **options)
options = router.route(options, self.name, args, kwargs)
if connection:
producer = app.amqp.TaskProducer(connection)
with app.producer_or_acquire(producer) as P:
task_id = P.publish_task(self.name, args, kwargs,
task_id=task_id,
callbacks=maybe_list(link),
errbacks=maybe_list(link_error),
**options)
result = self.AsyncResult(task_id)
if add_to_parent:
parent = get_current_worker_task()
if parent:
parent.request.children.append(result)
return result
def subtask_from_request(self, request=None, args=None, kwargs=None,
**extra_options):
request = self.request if request is None else request
args = request.args if args is None else args
kwargs = request.kwargs if kwargs is None else kwargs
delivery_info = request.delivery_info or {}
options = {
'task_id': request.id,
'link': request.callbacks,
'link_error': request.errbacks,
'exchange': delivery_info.get('exchange'),
'routing_key': delivery_info.get('routing_key')
}
return self.subtask(args, kwargs, options, type=self, **extra_options)
def retry(self, args=None, kwargs=None, exc=None, throw=True,
eta=None, countdown=None, max_retries=None, **options):
"""Retry the task.
:param args: Positional arguments to retry with.
:param kwargs: Keyword arguments to retry with.
:keyword exc: Optional exception to raise instead of
:exc:`~celery.exceptions.MaxRetriesExceededError`
when the max restart limit has been exceeded.
:keyword countdown: Time in seconds to delay the retry for.
:keyword eta: Explicit time and date to run the retry at
(must be a :class:`~datetime.datetime` instance).
:keyword max_retries: If set, overrides the default retry limit.
:keyword \*\*options: Any extra options to pass on to
meth:`apply_async`.
:keyword throw: If this is :const:`False`, do not raise the
:exc:`~celery.exceptions.RetryTaskError` exception,
that tells the worker to mark the task as being
retried. Note that this means the task will be
marked as failed if the task raises an exception,
or successful if it returns.
:raises celery.exceptions.RetryTaskError: To tell the worker that
the task has been re-sent for retry. This always happens,
unless the `throw` keyword argument has been explicitly set
to :const:`False`, and is considered normal operation.
**Example**
.. code-block:: python
>>> @task()
>>> def tweet(auth, message):
... twitter = Twitter(oauth=auth)
... try:
... twitter.post_status_update(message)
... except twitter.FailWhale, exc:
... # Retry in 5 minutes.
... raise tweet.retry(countdown=60 * 5, exc=exc)
Although the task will never return above as `retry` raises an
exception to notify the worker, we use `return` in front of the retry
to convey that the rest of the block will not be executed.
"""
request = self.request
retries = request.retries + 1
max_retries = self.max_retries if max_retries is None else max_retries
# Not in worker or emulated by (apply/always_eager),
# so just raise the original exception.
if request.called_directly:
maybe_reraise() # raise orig stack if PyErr_Occurred
raise exc or RetryTaskError('Task can be retried', None)
if not eta and countdown is None:
countdown = self.default_retry_delay
S = self.subtask_from_request(
request, args, kwargs,
countdown=countdown, eta=eta, retries=retries,
)
if max_retries is not None and retries > max_retries:
if exc:
maybe_reraise()
raise self.MaxRetriesExceededError(
"""Can't retry %s[%s] args:%s kwargs:%s""" % (
self.name, request.id, S.args, S.kwargs))
# If task was executed eagerly using apply(),
# then the retry must also be executed eagerly.
S.apply().get() if request.is_eager else S.apply_async()
ret = RetryTaskError(exc=exc, when=eta or countdown)
if throw:
raise ret
return ret
def apply(self, args=None, kwargs=None, **options):
"""Execute this task locally, by blocking until the task returns.
:param args: positional arguments passed on to the task.
:param kwargs: keyword arguments passed on to the task.
:keyword throw: Re-raise task exceptions. Defaults to
the :setting:`CELERY_EAGER_PROPAGATES_EXCEPTIONS`
setting.
:rtype :class:`celery.result.EagerResult`:
"""
# trace imports Task, so need to import inline.
from celery.task.trace import eager_trace_task
app = self._get_app()
args = args or ()
# add 'self' if this is a bound method.
if self.__self__ is not None:
args = (self.__self__, ) + tuple(args)
kwargs = kwargs or {}
task_id = options.get('task_id') or uuid()
retries = options.get('retries', 0)
throw = app.either('CELERY_EAGER_PROPAGATES_EXCEPTIONS',
options.pop('throw', None))
# Make sure we get the task instance, not class.
task = app._tasks[self.name]
request = {'id': task_id,
'retries': retries,
'is_eager': True,
'logfile': options.get('logfile'),
'loglevel': options.get('loglevel', 0),
'delivery_info': {'is_eager': True}}
if self.accept_magic_kwargs:
default_kwargs = {'task_name': task.name,
'task_id': task_id,
'task_retries': retries,
'task_is_eager': True,
'logfile': options.get('logfile'),
'loglevel': options.get('loglevel', 0),
'delivery_info': {'is_eager': True}}
supported_keys = fun_takes_kwargs(task.run, default_kwargs)
extend_with = dict((key, val)
for key, val in default_kwargs.items()
if key in supported_keys)
kwargs.update(extend_with)
tb = None
retval, info = eager_trace_task(task, task_id, args, kwargs,
request=request, propagate=throw)
if isinstance(retval, ExceptionInfo):
retval, tb = retval.exception, retval.traceback
state = states.SUCCESS if info is None else info.state
return EagerResult(task_id, retval, state, traceback=tb)
def AsyncResult(self, task_id):
"""Get AsyncResult instance for this kind of task.
:param task_id: Task id to get result for.
"""
return self._get_app().AsyncResult(task_id, backend=self.backend,
task_name=self.name)
def subtask(self, *args, **kwargs):
"""Returns :class:`~celery.subtask` object for
this task, wrapping arguments and execution options
for a single task invocation."""
from celery.canvas import subtask
return subtask(self, *args, **kwargs)
def s(self, *args, **kwargs):
"""``.s(*a, **k) -> .subtask(a, k)``"""
return self.subtask(args, kwargs)
def si(self, *args, **kwargs):
"""``.si(*a, **k) -> .subtask(a, k, immutable=True)``"""
return self.subtask(args, kwargs, immutable=True)
def chunks(self, it, n):
"""Creates a :class:`~celery.canvas.chunks` task for this task."""
from celery import chunks
return chunks(self.s(), it, n)
def map(self, it):
"""Creates a :class:`~celery.canvas.xmap` task from ``it``."""
from celery import xmap
return xmap(self.s(), it)
def starmap(self, it):
"""Creates a :class:`~celery.canvas.xstarmap` task from ``it``."""
from celery import xstarmap
return xstarmap(self.s(), it)
def update_state(self, task_id=None, state=None, meta=None):
"""Update task state.
:keyword task_id: Id of the task to update, defaults to the
id of the current task
:keyword state: New state (:class:`str`).
:keyword meta: State metadata (:class:`dict`).
"""
if task_id is None:
task_id = self.request.id
self.backend.store_result(task_id, meta, state)
def on_success(self, retval, task_id, args, kwargs):
"""Success handler.
Run by the worker if the task executes successfully.
:param retval: The return value of the task.
:param task_id: Unique id of the executed task.
:param args: Original arguments for the executed task.
:param kwargs: Original keyword arguments for the executed task.
The return value of this handler is ignored.
"""
pass
def on_retry(self, exc, task_id, args, kwargs, einfo):
"""Retry handler.
This is run by the worker when the task is to be retried.
:param exc: The exception sent to :meth:`retry`.
:param task_id: Unique id of the retried task.
:param args: Original arguments for the retried task.
:param kwargs: Original keyword arguments for the retried task.
:keyword einfo: :class:`~celery.datastructures.ExceptionInfo`
instance, containing the traceback.
The return value of this handler is ignored.
"""
pass
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""Error handler.
This is run by the worker when the task fails.
:param exc: The exception raised by the task.
:param task_id: Unique id of the failed task.
:param args: Original arguments for the task that failed.
:param kwargs: Original keyword arguments for the task
that failed.
:keyword einfo: :class:`~celery.datastructures.ExceptionInfo`
instance, containing the traceback.
The return value of this handler is ignored.
"""
pass
def after_return(self, status, retval, task_id, args, kwargs, einfo):
"""Handler called after the task returns.
:param status: Current task state.
:param retval: Task return value/exception.
:param task_id: Unique id of the task.
:param args: Original arguments for the task that failed.
:param kwargs: Original keyword arguments for the task
that failed.
:keyword einfo: :class:`~celery.datastructures.ExceptionInfo`
instance, containing the traceback (if any).
The return value of this handler is ignored.
"""
pass
def send_error_email(self, context, exc, **kwargs):
if self.send_error_emails and \
not getattr(self, 'disable_error_emails', None):
self.ErrorMail(self, **kwargs).send(context, exc)
def execute(self, request, pool, loglevel, logfile, **kwargs):
"""The method the worker calls to execute the task.
:param request: A :class:`~celery.worker.job.Request`.
:param pool: A task pool.
:param loglevel: Current loglevel.
:param logfile: Name of the currently used logfile.
:keyword consumer: The :class:`~celery.worker.consumer.Consumer`.
"""
request.execute_using_pool(pool, loglevel, logfile)
def push_request(self, *args, **kwargs):
self.request_stack.push(Context(*args, **kwargs))
def pop_request(self):
self.request_stack.pop()
def __repr__(self):
"""`repr(task)`"""
return '<@task: %s>' % (self.name, )
def _get_request(self):
"""Get current request object."""
req = self.request_stack.top
if req is None:
# task was not called, but some may still expect a request
# to be there, perhaps that should be deprecated.
if self._default_request is None:
self._default_request = Context()
return self._default_request
return req
request = property(_get_request)
@property
def __name__(self):
return self.__class__.__name__
BaseTask = Task # compat alias
| [
"[email protected]"
] | |
b55369c3e074890c1d761da477257d3372efcd96 | d2e029233e08ea2b7f806728fb6fdb4313992d1d | /Python Fundamentals/for loop.py | 303b1615818d69f5568318b74b3521dc1fecb333 | [] | no_license | pvr30/Python-Tutorial | f0ccc6c6af2346afc656e5f1f98bae69a58bda6d | 3c4b968d0e0efbf454fbf9a9f98cd630a288b2d9 | refs/heads/master | 2023-06-02T10:08:50.891627 | 2021-06-21T16:36:11 | 2021-06-21T16:36:11 | 378,997,069 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | # for loop
for index in range(10):
print(index)
print("\n")
for index in range(5,10):
print(index)
print("\n")
for i in range(1,20,2):
print(i) # this will print number between 1 to 20 with difference of 2
# Print list.
name = ["Harsh","Sanjay","Sahil","Manthan"]
for index in name:
print(index)
# Print list of dictionary .
student_grade = [
{"name": "Harsh", "grade": 100},
{"name": "Sahil", "grade": 80},
{"name": "Manthan", "grade": 10},
]
for student in student_grade:
name = student["name"]
grade = student["grade"]
print(f"{name} got {grade} marks in Exam.")
| [
"[email protected]"
] | |
3cb38ba06488f204e037b03a48de04afff964cc6 | ddefb1ad0ba5939b66e3f5e8fae5fb8a88f03d4a | /四级/四级.py | 56a7d411d403c8ba85fa9f1d76dcb5e3d96676c2 | [] | no_license | github3332422/case | 45ea6a8f7528affa9b724bb31088c8c8c9f0cdf2 | fb86da7f94c8ce24f142e1d34d2cec1f6e21a81f | refs/heads/master | 2020-05-19T13:28:37.379740 | 2019-05-05T14:15:49 | 2019-05-05T14:15:49 | 185,039,295 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,095 | py | #!/usr/bin/python3
# -*- coding:utf-8 -*-
# @Time:2018/8/24 9:37
# @Author: wardseptember
# @File: CountHighFrequencyWords.py
import re
excludes = ['the', 'of', 'to', 'and', 'in', 'a', 'is', 'were', 'was', 'you',
'I', 'he', 'his', 'there', 'those', 'she', 'her', 'their',
'that', '[a]', '[b]', '[c]', '[d]', 'them', 'or','for','as',
'are','on','it','be','with','by','have','from','not','they',
'more','but','an','at','we','has','can','this','your','which','will',
'one','should','points)','________','________.','all','than','what',
'people','if','been','its','new','our','would','part','may','some','i',
'who','answer','when','most','so','section','no','into','do','only',
'each','other','following','had','such','much','out','--','up','these',
'even','how','directions:','use','because','(10','time','(15','[d].',
'-','it.','[b],','[a],','however,','1','c','1.','2.','b','d','a','(10',
'2','12.','13.','29.','3.','4.','5.','6.','7.','8.','9.','10.','11.','14.',
'15.']
#自行过滤简单词,太多了不写了
def getTxt():
txt = open('86_17_1_2.txt').read()
txt = txt.lower()
for ch in '!"@#$%^&*()+,-./:;<=>?@[]_`~{|}': #替换特殊字符
txt.replace(ch, ' ')
return txt
#1.获取单词
EngTxt = getTxt()
#2.切割为列表格式
txtArr = EngTxt.split()
#3.遍历统计
counts = {}
for word in txtArr:
flag=True
for word1 in excludes:
if word==word1:
flag=False
else:
continue
if flag is True:
counts[word] = counts.get(word, 0) + 1
else:
continue
#4.转换格式,方便打印,将字典转换为列表
countsList = list(counts.items())
countsList.sort(key=lambda x:x[1], reverse=True)#按次数从大到小排序
#5.打印输出
for word,count in countsList:
with open('output_3.txt','a+') as f:
str1=word+' : '+str(count)+ '次'
f.writelines(str1+'\n')
f.close()
#print('{0:<10}{1:>5}'.format(word,count)) | [
"[email protected]"
] | |
dec8f4a34c5465e586dbdbcddfbb3d2c85cc27bc | 8f81a0ff4a5bef52bdc23decfdd21183040451f1 | /docs/conf.py | e411946184148cfbcbb42fbc600b02373eb15ba1 | [
"MIT"
] | permissive | shimw6828/sramongo | 22faad838376a2b264cb003c937522b72b67d6d0 | 7b73a2a71c9c12160da7d0aaa1537a615a356136 | refs/heads/master | 2022-11-14T18:33:17.122167 | 2020-07-02T19:42:09 | 2020-07-02T19:42:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,163 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# sramongo documentation build configuration file, created by
# sphinx-quickstart on Thu Jan 5 14:49:04 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'sramongo'
copyright = '2017, Justin Fear'
author = 'Justin Fear'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `_todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'sramongodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sramongo.tex', 'sramongo Documentation',
'Justin Fear', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sramongo', 'sramongo Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sramongo', 'sramongo Documentation',
author, 'sramongo', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Options for Napoleon ----------------------------------------------
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| [
"[email protected]"
] | |
15ef14fad6965eb4f3742e852d29527029f1f162 | 09df68c721f724bdfa1d00f18d5ee6fffba548df | /NK_25.py | f53bf49e6d6c9a353293f13fe6d37c671183f4ed | [] | no_license | NeilWangziyu/JZOffer | c8cde80cf54545244ebeb3a9fc88d5ac6b832622 | ce29ea836bd20841d69972180273e4d4ec11514d | refs/heads/master | 2020-04-23T06:19:48.434829 | 2019-09-25T12:35:38 | 2019-09-25T12:35:38 | 170,969,684 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | n,m = list(map(int,input().split()))
def getChildCount(pre,mx):
ans = 1
p = 10
while pre * p <= mx:
if pre * p + p - 1 <= mx:
ans += p
else:
ans += mx - pre * p + 1
p *= 10
return ans
def find(m,mx):
ans = 1
while m != 0:
v = getChildCount(ans, mx)
if v < m:
ans += 1
m -= v
continue
if m == 1:
break
ans *= 10
m -= 1
return ans
print(find(m,n))
# 查找字典序排数中具体位置的数
# http://www.lyqhahaha.xyz/P/64/ | [
"[email protected]"
] | |
73c1a9a4086a951478d1b79d38eca31ad7308602 | 84046429e40890d1c0e1064726db0cf359dbadcf | /django_git_deploy/__init__.py | e8400d0c61ee787ebe7d4c3f9c382fed2532bc11 | [] | no_license | joakim-hove/django-git-deploy | 8cb346ccff632209df8a5d5fcb69eb6b1b7d6191 | 6413fa7d1b71258a2f176b1630dbc4ac602bbb49 | refs/heads/master | 2022-06-07T05:42:53.915611 | 2022-05-23T21:32:59 | 2022-05-23T21:32:59 | 183,873,411 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,858 | py | import sys
import os
import os.path
import subprocess
import shutil
import time
import yaml
import fnmatch
from contextlib import contextmanager
@contextmanager
def env_context(env):
env0 = os.environ.copy()
for key,value in env.items():
if value is None:
if key in os.environ:
del os.environ[key]
else:
os.environ[key] = value
yield
for key in os.environ:
if key in env0:
os.environ[key] = env0[key]
else:
del os.environ[key]
@contextmanager
def pushd(path):
cwd0 = os.getcwd()
if path:
os.chdir(path)
yield
os.chdir(cwd0)
class Config(object):
config_file = "deploy_config.yml"
def __init__(self, config_file = "hooks/{}".format(config_file)):
self.data = yaml.safe_load(open(config_file))
for config_branch,config in self.data.items():
if not "path" in config:
raise OSError("Must have a path setting in the branch payload")
path = config["path"]
if not os.path.isdir(path):
print("path: {} does not exist".format(path))
raise OSError("The path setting must point to an existing directory")
if os.path.isdir(os.path.join(path, ".git")):
raise OSError("Target path should not be the git repository")
self.repo, _ = os.path.splitext( os.path.basename( os.getcwd() ))
self.repo_path = os.path.dirname( os.getcwd() )
def config_branch(self, git_branch):
for config_branch in self.data.keys():
if fnmatch.fnmatch(git_branch, config_branch):
return config_branch
return None
def path(self, config_branch):
return self.data[config_branch]["path"]
def script(self, config_branch):
return self.data[config_branch].get("script")
def env(self, config_branch):
return self.data[config_branch].get("env", {})
def reload_apache():
subprocess.call( ["sudo", "systemctl", "reload", "apache2"])
def update_wc(git_branch, conf):
config_branch = conf.config_branch(git_branch)
if config_branch is None:
return
path = conf.path(config_branch)
env = {"GIT_DIR" : None, "GIT_WORK_TREE": None}
env.update(conf.env(config_branch))
with env_context(env):
with pushd(path):
if not os.path.isdir(conf.repo):
subprocess.call(["git", "clone", "--recursive" , "{}/{}".format(conf.repo_path, conf.repo)])
os.chdir(conf.repo)
cmd_list = [["git" , "fetch" , "origin"],
["git" , "reset" , "--hard","origin/%s" % git_branch]]
static_source = os.path.join( path , conf.repo, "staticfiles" )
if not os.path.isdir( static_source ):
os.mkdir( static_source )
for cmd in cmd_list:
print("[{}/{}]: {}".format(path, conf.repo, " ".join(cmd)))
subprocess.call( cmd ,
stdout = open(os.devnull , "w") ,
stderr = open(os.devnull , "w") )
script = conf.script(config_branch)
if script:
if os.path.isfile(script) and os.access(script, os.X_OK):
path, f = os.path.split(script)
with pushd(path):
subprocess.call([os.path.abspath(f)])
else:
print("script path: {} does not exist".format(script))
raise OSError("Script does not exist")
def post_receive():
conf = Config()
for line in sys.stdin.readlines():
(_, _, ref) = line.split()
git_branch = ref.split("/")[-1]
update_wc(git_branch, conf)
reload_apache()
def deploy(branch):
conf = Config()
update_wc(branch, conf)
reload_apache()
def make_hook():
_, hook_path = os.path.split(os.path.abspath(os.getcwd()))
if hook_path != "hooks":
raise OSError("The make_hook script must be invoked from the hooks/ directory in a git repo")
if os.path.exists("post-receive.sample"):
print("Removing existing post-receive.sample file")
if os.path.exists("post-receive.sample"):
os.unlink("post-receive.sample")
with open("post-receive", "w") as f:
f.write("""#!/usr/bin/env python3
from django_git_deploy import post_receive
post_receive()
""")
os.chmod("post-receive", 0o755)
if not os.path.exists(Config.config_file):
d = {"master": {"path": "/path/to/deploy/master",
"env": {"KEY1" : "VALUE1"}}}
with open(Config.config_file, "w") as f:
f.write(yaml.dump(d))
print("Sample configuration stored in: {}".format(Config.config_file))
| [
"[email protected]"
] | |
4aeedb82b2fb9519b3d200ad0e488617b17fae76 | b194dbc7889e8175993a76f2d2f65a4b81c5d725 | /statistics_hypothesis_testing/18_one_sided_p_value.py | 1b94a09124221130a1bc99028b8c1cb0ed6514c0 | [] | no_license | rahulsaini/oreilly_math_fundamentals_data_science | f96d40f5684c7b1cd993993d82931694afea6976 | 888326dfdea37f5a9b336d37e127a72ae4512a5a | refs/heads/main | 2023-05-18T22:32:32.928472 | 2021-06-08T13:07:22 | 2021-06-08T13:07:22 | 372,832,069 | 1 | 0 | null | 2021-06-01T13:02:53 | 2021-06-01T13:02:52 | null | UTF-8 | Python | false | false | 197 | py | from scipy.stats import norm
# Cold has 18 day mean recovery, 1.5 std dev
mean = 18
std_dev = 1.5
# Probability of 16 or less days
x = norm.cdf(16, mean, std_dev)
print(x) # 0.09121121972586788
| [
"[email protected]"
] | |
28643ca7f0873fb86a9b6ce8a36119c0b6719ae3 | f1600240f3bbadfa8f190c165bd40b0f74110652 | /model/core/XML_CNN.py | 77d8c627d999b273d9fe1bfd6474da3313acf7df | [] | no_license | shubhampachori12110095/XMTC | b45801e143710e97ad8098ee028b4c44b22cb110 | b93a8a78c7799461b4853006f5cd7a0fc4fcdc67 | refs/heads/master | 2020-04-22T23:26:19.869742 | 2018-04-29T01:56:55 | 2018-04-29T01:56:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,965 | py | '''
Created on Jan, 2018
@author: FrancesZhou
'''
from __future__ import absolute_import
import numpy as np
import tensorflow as tf
class XML_CNN(object):
def __init__(self, max_seq_len, word_embedding, filter_sizes, label_output_dim, hidden_dim, args):
self.max_seq_len = max_seq_len
self.word_embedding_dim = word_embedding.shape[-1]
self.filter_sizes = filter_sizes
self.label_output_dim = label_output_dim
self.num_filters = args.num_filters
self.pooling_units = args.pooling_units
self.hidden_dim = hidden_dim
self.batch_size = args.batch_size
self.dropout_keep_prob = args.dropout_keep_prob
self.weight_initializer = tf.contrib.layers.xavier_initializer()
self.const_initializer = tf.constant_initializer()
self.word_embedding = tf.constant(word_embedding, dtype=tf.float32)
#self.x = tf.placeholder(tf.float32, [self.batch_size, self.max_seq_len, self.word_embedding_dim])
self.x = tf.placeholder(tf.int32, [self.batch_size, self.max_seq_len])
self.y = tf.placeholder(tf.float32, [self.batch_size, self.label_output_dim])
def build_model(self):
# x: [batch_size, self.max_seq_len, self.embedding_dim]
# y: [batch_size, self.label_output_dim]
x = tf.nn.embedding_lookup(self.word_embedding, self.x)
x_expand = tf.expand_dims(x, axis=-1)
y = self.y
# dropout
#with tf.name_scope('dropout'):
# x_expand = tf.nn.dropout(x_expand, keep_prob=0.25)
conv_outputs = []
for i, filter_size in enumerate(self.filter_sizes):
with tf.name_scope('convolution-pooling-{0}'.format(filter_size)):
# ============= convolution ============
filter = tf.get_variable('filter-{0}'.format(filter_size),
[filter_size, self.word_embedding_dim, 1, self.num_filters],
initializer=self.weight_initializer)
conv = tf.nn.conv2d(x_expand, filter, strides=[1,1,1,1], padding='VALID', name='conv')
b = tf.get_variable('b-{0}'.format(filter_size), [self.num_filters])
conv_b = tf.nn.relu(tf.nn.bias_add(conv, b), 'relu')
# conv_b: [batch_size, seqence_length-filter_size+1, 1, num_filters]
# ============= dynamic max pooling =================
pool_size = (self.max_seq_len - filter_size + 1) // self.pooling_units
pool_out = tf.nn.max_pool(conv_b, ksize=[1, pool_size, 1, 1],
strides=[1, pool_size, 1, 1], padding='VALID', name='dynamic-max-pooling')
# pool_out: [batch_size, pooling_units, 1, num_filters]
pool_out = tf.reshape(pool_out, [self.batch_size, -1])
conv_outputs.append(pool_out)
all_features = tf.concat(conv_outputs, -1)
# dropout
# with tf.name_scope('dropout'):
# fea_dropout = tf.nn.dropout(all_features, keep_prob=self.dropout_keep_prob)
with tf.name_scope('output'):
fea_dim = all_features.get_shape().as_list()[-1]
# bottlenetck layer
w_b = tf.get_variable('bottleneck_w', [fea_dim, self.hidden_dim], initializer=self.weight_initializer)
l_hidden = tf.nn.relu(tf.matmul(all_features, w_b), 'relu')
# dropout layer
l_hidden_dropout = tf.nn.dropout(l_hidden, keep_prob=self.dropout_keep_prob)
# output layer
w_o = tf.get_variable('output_w', [self.hidden_dim, self.label_output_dim], initializer=self.weight_initializer)
#y_ = tf.nn.relu(tf.matmul(l_hidden_dropout, w_o), 'relu')
y_ = tf.matmul(l_hidden_dropout, w_o)
# loss
loss = tf.losses.sigmoid_cross_entropy(y, y_)
#print loss.get_shape().as_list()
return y_, y_, loss
| [
"[email protected]"
] | |
96df624ea6bd582ebb0abe1224a3de5593c6665c | 393c8e9d26f89c9b4f1f24a160970dee437a36db | /shortener/forms.py | 262f395e21bc7cd33d5826e02a21863f9680c29b | [] | no_license | victorsemenov1980/Shorten-URL-Django | eb12b04573e7cd562e46510afb24062e0383eb39 | 5a854d8e061ea88c5b7abe992d6414dea1e06120 | refs/heads/master | 2022-11-05T06:52:24.551215 | 2020-06-17T11:13:34 | 2020-06-17T11:13:34 | 272,881,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 17 00:10:37 2020
@author: user
"""
from django import forms
from .models import url
class UrlForm(forms.Form):
url = forms.URLField(label='URL',)
hash_ = forms.CharField(label='Hash value - optional', max_length=15,required=False,)
| [
"[email protected]"
] | |
eb584a69683572fc29dd1442da6c274d9cd1a144 | 026991d5749c55910f4c33cc6f35a778494ef89e | /Laboratory Works/Lab_7/Problems/Informatics/Loops/While/3061.py | 879e83a2205f46104461678f04a9db5bdac7ec7b | [
"MIT"
] | permissive | diable201/WEB-development | e832df0e35a837fc5464d6b0dada1c8fd8c9783b | 370bd731b9a65a1658033a60c63abece11d4e259 | refs/heads/master | 2023-06-02T03:46:20.786310 | 2021-06-28T15:57:11 | 2021-06-28T15:57:11 | 336,358,294 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | n = int(input())
k = 0
while 2 ** k < n:
k += 1
print(k)
| [
"[email protected]"
] | |
b5c607c76cf485cef3717ca12fb01cacd49003ca | c78ce4f66cc964c230ad60fbf2ced6b4811eab89 | /0x04-python-more_data_structures/8-simple_delete.py | 80e9e4f0e0199a69a96ccc4a5f11e897b5024a38 | [] | no_license | jebichii/holbertonschool-higher_level_programming-1 | 89026557909851dd775ae355f036db89ebd9adb9 | 741953aa479af90e8eac6f1315415eff4a20224f | refs/heads/master | 2023-03-15T14:58:27.062528 | 2020-06-11T07:21:23 | 2020-06-11T07:21:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | #!/usr/bin/python3
def simple_delete(a_dictionary, key=""):
""" Delete a dictionary entry
"""
if a_dictionary is not None:
try:
del a_dictionary[key]
except KeyError:
pass
return a_dictionary
return None
| [
"[email protected]"
] | |
c326be87999ac18c9ef83bfc0529c64deb3029dd | 84e3fa6f0111a48c56184b1239c0ad3e4feada99 | /cina_env/bin/django-admin.py | 2e78a97b2fa1f113d82502632a5cc1685828ae9e | [] | no_license | shin04/cina_backend | 75d0d8383c5e26df860abfa8ab8d6f380b97440f | a918be430f41049b4a154da8c9ab73633d2b4d50 | refs/heads/master | 2023-08-15T11:05:30.187250 | 2020-05-24T06:09:47 | 2020-05-24T06:09:47 | 254,606,663 | 0 | 0 | null | 2021-09-22T18:51:43 | 2020-04-10T10:24:48 | Python | UTF-8 | Python | false | false | 177 | py | #!/Users/kajiwarataishin/Desktop/cina/cina_backend/cina_env/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
] | |
e0e4a5522cfe5345bb65f2fdec29cfc93476dd42 | 3c19870b8e937e5e360ad3cb97453e7e0d0e755f | /smartphoniker_wiki/urls.py | 35b952496d8a0d687a254508c0fce7ab94d72c5a | [] | no_license | M0r13n/wiki | 37862d63f52565efc692c41cc762700de686b236 | 9b263771bb4554c67e52c07b38a845f53d6e97b7 | refs/heads/master | 2022-11-15T22:51:42.615490 | 2020-07-11T09:35:51 | 2020-07-11T09:35:51 | 278,831,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,240 | py | from django.conf import settings
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.http.response import HttpResponse
from django.urls import include
from django.urls import re_path
from django.views.static import serve as static_serve
admin.autodiscover()
urlpatterns = [
re_path(r"^admin/", admin.site.urls),
re_path(r"^robots.txt", lambda _: HttpResponse("User-agent: *\nDisallow: /")),
]
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
urlpatterns += [
re_path(
r"^media/(?P<path>.*)$",
static_serve,
{"document_root": settings.MEDIA_ROOT},
),
]
if settings.DEBUG:
try:
import debug_toolbar
urlpatterns = [
re_path("__debug__/", include(debug_toolbar.urls)),
# For django versions before 2.0:
# url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
except ImportError:
pass
urlpatterns += [
re_path(r"^notify/", include("django_nyt.urls")),
re_path(r"", include("wiki.urls")),
]
handler500 = "smartphoniker_wiki.views.server_error"
handler404 = "smartphoniker_wiki.views.page_not_found"
| [
"[email protected]"
] | |
10e66a88c03e25b636ca4c2dfb9e251d60931935 | 8ccbadcfaebb9148b60978fc89290291625c14ec | /resolwe/storage/tests/test_views.py | 3a61280c490fa45e4e5ce80f30f6490d8fb66848 | [
"Apache-2.0"
] | permissive | romunov/resolwe | f7e426c5965917045e23608c6c52a44b78d735c9 | 11a06a9d741dcc999253246919a0abc12127fd2a | refs/heads/master | 2021-07-11T16:07:17.015683 | 2021-02-21T18:33:07 | 2021-02-25T13:49:02 | 229,769,285 | 0 | 0 | Apache-2.0 | 2019-12-23T14:36:02 | 2019-12-23T14:36:01 | null | UTF-8 | Python | false | false | 1,265 | py | """Test resolwe.storage.views."""
import json
from unittest.mock import MagicMock, patch
from django.test import TestCase
from rest_framework import status
from resolwe.storage.views import UriResolverView
class UriResolverViewTest(TestCase):
"""Test UriResolverView."""
@patch("resolwe.storage.views.DataBrowseView._get_datum")
@patch("resolwe.storage.views.DataBrowseView._get_response")
def test_get(self, get_response_mock, get_datum_mock):
"""Test get method."""
get_datum_mock.return_value = MagicMock()
get_response_mock.side_effect = [
("signed_url1", True),
("dir_structure", False),
("signed_url2", True),
]
request = MagicMock()
request.GET.getlist.return_value = [
"123/file1.txt",
"456/dir",
"789/dir/file2.txt",
]
response = UriResolverView().get(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
json.loads(response.content.decode("utf-8")),
{
"123/file1.txt": "signed_url1",
"456/dir": "dir_structure",
"789/dir/file2.txt": "signed_url2",
},
)
| [
"[email protected]"
] | |
b0e870438ce45f557046f6ba32280a0ed8293072 | bee9ab30f3230831030a74ecc4698d951011c0d2 | /scripts/cnocr_predict.py | ab3b94ccf9178faec1c4bb57b4e74e16cd0bae31 | [
"NCSA",
"Zlib",
"Intel",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-2-Clause-Views",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | DCMMC/cnocr | 2493f5f175cc4847cd75783eb2922a9c2a4ca789 | ea66f8e87c155ff1959fdc1c3e526acd0dac8c74 | refs/heads/master | 2022-04-24T15:27:14.393242 | 2020-04-25T13:28:27 | 2020-04-25T13:28:27 | 258,252,551 | 0 | 0 | Apache-2.0 | 2020-04-23T15:43:07 | 2020-04-23T15:43:06 | null | UTF-8 | Python | false | false | 1,963 | py | # coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" An example of predicting CAPTCHA image data with a LSTM network pre-trained with a CTC loss"""
from __future__ import print_function
import sys
import os
import logging
import argparse
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from cnocr import CnOcr
from cnocr.utils import set_logger
logger = set_logger(log_level=logging.INFO)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name", help="model name", type=str, default='conv-lite-fc'
)
parser.add_argument("--model_epoch", type=int, default=None, help="model epoch")
parser.add_argument("-f", "--file", help="Path to the image file")
parser.add_argument(
"-s",
"--single-line",
default=False,
help="Whether the image only includes one-line characters",
)
args = parser.parse_args()
ocr = CnOcr(model_name=args.model_name, model_epoch=args.model_epoch)
if args.single_line:
res = ocr.ocr_for_single_line(args.file)
else:
res = ocr.ocr(args.file)
logger.info("Predicted Chars: %s", res)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
5d19425b9f79dca344d62f5713da2454035970cf | a1bffcd8854e1843e56bb812d4d83b3161a5211e | /tests/unit/module_utils/common/validation/test_check_type_bool.py | 193622d5bc7dbf0eb681bea33c6c8f96045faacc | [] | no_license | goneri/ansible.community | 1a71f9d98c164b77f8ed2ed7f558b4963005ff8f | f26f612dd0a3154050d90b51a75502018c95f6e4 | refs/heads/master | 2020-12-29T07:47:35.353515 | 2020-01-22T17:43:18 | 2020-01-22T17:43:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,476 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ansible_collections.ansible.community.plugins.module_utils._text import to_native
from ansible_collections.ansible.community.plugins.module_utils.common.validation import check_type_bool
def test_check_type_bool():
test_cases = (
(True, True),
(False, False),
('1', True),
('on', True),
(1, True),
('0', False),
(0, False),
('n', False),
('f', False),
('false', False),
('true', True),
('y', True),
('t', True),
('yes', True),
('no', False),
('off', False),
)
for case in test_cases:
assert case[1] == check_type_bool(case[0])
def test_check_type_bool_fail():
default_test_msg = 'cannot be converted to a bool'
test_cases = (
({'k1': 'v1'}, 'is not a valid bool'),
(3.14159, default_test_msg),
(-1, default_test_msg),
(-90810398401982340981023948192349081, default_test_msg),
(90810398401982340981023948192349081, default_test_msg),
)
for case in test_cases:
with pytest.raises(TypeError) as e:
check_type_bool(case)
assert 'cannot be converted to a bool' in to_native(e.value)
| [
"[email protected]"
] | |
b9eb17b596550fa1751c638e5d554839479d80dd | 46279163a543cd8820bdc38133404d79e787c5d2 | /test/distributed/_pipeline/sync/skip/test_stash_pop.py | 7a5b16a39cff59a886e16432f0db67c0bc9af3fa | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | erwincoumans/pytorch | 31738b65e7b998bfdc28d0e8afa7dadeeda81a08 | ae9f39eb580c4d92157236d64548b055f71cf14b | refs/heads/master | 2023-01-23T10:27:33.628897 | 2020-12-06T01:22:00 | 2020-12-06T01:23:40 | 318,930,000 | 5 | 1 | NOASSERTION | 2020-12-06T01:58:57 | 2020-12-06T01:58:56 | null | UTF-8 | Python | false | false | 3,050 | py | # Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from torch import nn
from torch.distributed._pipeline.sync.skip import pop, skippable, stash
from torch.distributed._pipeline.sync.skip.tracker import SkipTracker, use_skip_tracker
@pytest.fixture(autouse=True)
def skip_tracker():
skip_tracker = SkipTracker()
with use_skip_tracker(skip_tracker):
yield skip_tracker
def test_stash(skip_tracker):
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2 # noqa
l1 = Stash()
assert len(skip_tracker.tensors) == 0
with use_skip_tracker(skip_tracker):
l1(torch.tensor(42))
assert len(skip_tracker.tensors) == 1
def test_pop():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2 # noqa
@skippable(pop=["foo"])
class Pop(nn.Module):
def forward(self, input):
foo = yield pop("foo")
return foo # noqa
l1 = Stash()
l2 = Pop()
output = l2(l1(torch.tensor(42)))
assert output.item() == 42
def test_declare_but_not_use():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
return input * 2
@skippable(pop=["foo"])
class Pop(nn.Module):
def forward(self, input):
return input * 3
l1 = Stash()
l2 = Pop()
with pytest.raises(RuntimeError):
l1(torch.tensor(42))
with pytest.raises(RuntimeError):
l2(torch.tensor(42))
def test_stash_not_declared():
@skippable()
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2 # noqa
l1 = Stash()
with pytest.raises(RuntimeError):
l1(torch.tensor(42))
def test_pop_not_declared():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", input)
return input * 2 # noqa
@skippable()
class Pop(nn.Module):
def forward(self, input):
foo = yield pop("foo")
return foo # noqa
l1 = Stash()
l2 = Pop()
latent = l1(torch.tensor(42))
with pytest.raises(RuntimeError):
l2(latent)
def test_pop_not_stashed():
@skippable(pop=["foo"])
class Pop(nn.Module):
def forward(self, input):
yield pop("foo")
l1 = Pop()
with pytest.raises(RuntimeError):
l1(torch.tensor(42))
def test_stash_none():
@skippable(stash=["foo"])
class Stash(nn.Module):
def forward(self, input):
yield stash("foo", None)
return input * 2 # noqa
l1 = Stash()
l1(torch.tensor(42))
| [
"[email protected]"
] | |
9cf468cb3abdeaa9dd1f3c9098ef2ee86b1a3b82 | 3c522665e79f45ce93728f62201a893261a9fb85 | /FlaskDemo05/run01.py | d83d1a1c95cb9fbbca2fc319438f9a042c2de16d | [] | no_license | Chenhuaqi6/Flask | b9cdc33f43f1d5e5aa9a660f051366e56707cb6a | d2c229b429aabb217eb1de781f5f976357a6ab6c | refs/heads/master | 2020-04-17T10:54:42.664504 | 2019-01-19T06:16:32 | 2019-01-19T06:16:32 | 166,517,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,224 | py | from _operator import or_
from flask import Flask, render_template, request, redirect
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate,MigrateCommand
from sqlalchemy import func,or_
app = Flask(__name__)
#先在mysql中创建一个库----create database flask05 default charset utf8 collate utf8_general_ci;
#配置连接
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:123456@localhost:3306/flask05'
#配置app的启动模式为调试模式
app.config['DEBUG']= True
#配置自动提交
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
#创建数据库的实例
db = SQLAlchemy(app)
#创建Manger对象并指定要管理哪个应用
manager = Manager(app)
#创建Migrate对象并指定要关联的app和db
migrate = Migrate(app,db)
#为manager增加命令:允许做数据表迁移的命令
manager.add_command('db',MigrateCommand)
def __repr__(self):
return "<User:%s>" % self.uname
class Users(db.Model):
__tablename__ = "users"
id =db.Column(db.Integer,primary_key=True)
username = db.Column(db.String(80),unique=True)
age = db.Column(db.Integer,nullable=True)
email = db.Column(db.String(200),unique=True)
birth = db.Column(db.Date)
isActive = db.Column(db.Boolean,default=True)
class Student(db.Model):
__tablename__ = 'student'
id = db.Column(db.Integer,primary_key=True)
sname = db.Column(db.String(30))
sage = db.Column(db.Integer)
isActive = db.Column(db.Boolean,default=True)
class Teacher(db.Model):
__tablename__ = 'teacher'
id = db.Column(db.Integer,primary_key=True)
tname = db.Column(db.String(30),nullable=True)
tage = db.Column(db.Integer,nullable=False)
#增加对Course("一"表)类的引用
course_id = db.Column(db.Integer,db.ForeignKey('course.id'))
#增加对多对多
students = db.relationship('Student',
secondary='teacher_student',
lazy='dynamic',
backref=db.backref('teachers',lazy='dynamic'))
class Course(db.Model):
__tablename__ = 'course'
id = db.Column(db.Integer,primary_key=True)
cname = db.Column(db.String(50),nullable=True)
#增加关联属性和反向引用关系属性
teachers = db.relationship('Teacher',
backref='course',
lazy='dynamic'
)
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer,primary_key=True)
uname = db.Column(db.String(50))
uage = db.Column(db.Integer)
uemail = db.Column(db.String(200))
isActive = db.Column(db.Boolean,default=True)
#增加关联属性和反向引用关系属性
wife = db.relationship('Wife',backref='user',uselist=False)
#创建Wife实体类
class Wife(db.Model):
__tablename__ = 'wife'
id = db.Column(db.Integer,primary_key=True)
wname = db.Column(db.String(30))
Wage = db.Column(db.Integer)
#增加一对一的引用关系:引用自user中的主键id
user_id = db.Column(db.Integer,db.ForeignKey('user.id'),unique=True)
def __repr__(self):
return "<Wife:%r>" % self.wname
#创建TeacherStudent实体类 第三张表
class TeacherStudent(db.Model):
__tablename__ = 'teacher_student'
id = db.Column(db.Integer,primary_key=True)
teacher_id = db.Column(db.Integer, db.ForeignKey('teacher.id'))
student_id = db.Column(db.Integer,db.ForeignKey('student.id'))
#创建goods表,表示商品
class Goods(db.Model):
__tablename__ = 'goods'
id = db.Column(db.Integer,primary_key=True)
gname = db.Column(db.String(50))
price = db.Column(db.Float)
#增加对user(多)的关联关系(多对多)
users = db.relationship('User',secondary='Shoppingcart',lazy='dynamic',backref=db.backref('goods',lazy='dynamic'))
#创建ShoppingCart表,表示购物车
class ShoppingCart(db.Model):
__tablename__ = 'Shoppingcart'
id = db.Column(db.Integer,primary_key=True)
user_id = db.Column(db.Integer,db.ForeignKey('user.id'))
goods_id = db.Column(db.Integer,db.ForeignKey('goods.id'))
count = db.Column(db.Integer,nullable=False)
@app.route('/')
def index():
return "This is my first page"
@app.route('/01-adduser')
def addUser_views():
#1.创建User的实体对象
user = User()
user.uname = 'Wangwc'
user.uage = 28
user.uemail = '[email protected]'
#2.通过db.session.add() 保存实体对象
db.session.add(user)
#33.提交回数据库
# db.session.commit()
return '提交数据成功'
@app.route('/02-query')
def query_virews():
#1.测试db.session.query()的作用
#查询User表中所有的列
# query = db.session.query(User)
#查询User表中id和uname列
# query = db.session.q|uery(User.id,User.uname)
#查询user表和student表中连接后的所有数据
# query = db.session.query(User,Student)
# print(type(query))
# print(query)
##2.测试查询执行函数
#####################
##2.1 db.session.query().all()
# users = db.session.query(User).all()
# for u in users:
# print('id:%d,uname:%s,uage:%d,uemail:%s' % (u.id,u.uname,u.uage,u.uemail))
##2.2 查询user表中所有数据的id和uname俩个列的值并输出在终端上
# users = db.session.query(User.id,User.uname).all()
# for u in users:
# print('id:%d,uname:%s' % (u.id,u.uname))
##2.3 查询user表中的第一条数据
# u = db.session.query(User).first()
#
# print('id:%d,uname:%s,uage:%d,uemail:%s' % (u.id,u.uname,u.uage,u.uemail))
#
# ##2.4 查询user中共有多少条数据
# count = db.session.query(User).count()
# print('user表中共有%d条数据' % count)
###########################
###3.测试filter()函数
############################
###3.1 查询年龄大于17 的user表中的信息
# users = db.session.query(User).filter(User.uage > 17).all()
# print(users)
###3.2 年龄大于17并且id>1的user信息
# users = db.session.query(User).filter(User.uage > 17,User.id > 2).all()
# print(users)\
###3.3 查询年龄大于17或者id大于1的user的信息
# users = db.session.query(User).filter(or_(User.uage > 17,User.id>1)).all()
# print(users)
###3.4 查询id等于2 的user信息
# user = db.session.query(User).filter(User.id == 2).first()
# print(user)
##3.5 查询uemail中包含w字符的user的信息
# users = db.session.query(User).filter(User.uemail.like('%CH%')).all()
# print(users)
#4.1测试 filter_by()
# users = db.session.query(User).filter_by(id=1).first()
# print(users)
###############
#5.测试limit() / offset()函数
######################
# users = db.session.query(User).limit(1).all()
# print(users)
#跳过前3条,取一条数据
# users = db.session.query(User).limit(1).offset(3).all()
# print(users)
################
#6.测试order_by()函数
#################
# users = db.session.query(User).order_by('id desc').all()
# print(users)
#升序ac可省略
users = db.session.query(User).order_by('uage,id desc').all()
print(users)
return '<script> alert("查询成功");</script>'
@app.route('/03-queryall')
def queryall_views():
users = db.session.query(User).all()
return render_template('03-queryall.html',users=users)
@app.route('/04-update',methods=['GET','POST'])
def update_views():
if request.method == 'GET':
#接收前端传递过来的用户id
id = request.args['id']
user = db.session.query(User).filter_by(id=id).first()
#根据id将对应的用户的信息读取出来
#将读取出来的实体对象发送到04-update.html上显示出来
return render_template('04-update.html',user = user)
else:
# 接收前端传递过来的四个值(id,username,uage,uemail)
id = request.form['id']
print(id)
uname = request.form['uname']
uage = request.form['uage']
uemail = request.form['uemail']
# 根据id查询出对应的user的信息
user = User.query.filter_by(id=id).first()
# 将user的信息保存回数据库
user.uname = uname
user.uage = uage
user.uemail = uemail
# 将user的信息保存回数据库
db.session.add(user)
# 响应:重定向回/03-queryall
return redirect('/03-queryall')
@app.route('/05-query')
def query05_views():
###############
##1.聚合函数的使用
##1.1 查询user表中所有的年龄的平均值
# result = db.session.query(func.avg(User.uage)).first()
# print(result[0])
# print(type(result))
##1.2 在user表中按uage进行分组,求每组中的年龄的平均值,以及uage的总和
# result = db.session.query(User.uage,func.avg(User.uage),func.sum(User.uage)).group_by('uage').all()
# # print(result)
# for r in result:
# print("组:",r[0],'平均年龄:',r[1],'总年龄:',r[2])
result = db.session.query(User.isActive,func.count(User.isActive)).group_by('isActive').all()
for r in result:
print(r[0],'的数量为:',r[1])
return "<script>alert('查询成功');</script>"
@app.route('/06-delete')
def delete_views():
id = request.args.get('id')
user = User.query.filter_by(id=id).first()
db.session.delete(user)
return redirect('/03-queryall')
@app.route("/07-update")
def update06_views():
#修改id为4的用户的信息
user=User.query.filter_by(id=2).first()
print(user)
user.uname = "Zhexue Lv"
user.uage = 30
user.uemail = "[email protected]"
user.isActive = True
db.session.add(user)
return "<script>alert('修改成功');</script>"
@app.route('/08-addteacher')
def addteacher_views():
# 解析:
# 每个teacher对象中都有一个属性course_id(手动添加)
# 每个teacher对象中都有一个属性course(通过反向引用添加)
#
# 方式1:通过反向引用关系属性增加数据
# 1.1 获取id为1的课程(course)对象
course = Course.query.filter_by(id=4).first()
#1.2 创建teacher对象并指定
teacher = Teacher()
teacher.tname = '陈'
teacher.tage = 21
#1.3 为teacher对象指定关联的course对象
teacher.course = course
print(teacher.course)
#1.4 将teacher保存会数据库
db.session.add(teacher)
# return '增加数据成功'
#方式2:通过外键列的方式增加数据
# teacher = Teacher()
# teacher.tname = '王老师'
# teacher.tage = 31
# #通过外键列增加数据
# teacher.course_id = 1
# db.session.add(teacher)
return '增加数据成功'
@app.route('/09-regteacher',methods=['POST','GET'])
def regteacher_views():
if request.method == 'GET':
#查询所有的课程
courses = Course.query.all()
return render_template('09-regteacher.html',courses=courses)
else:
teacher = Teacher()
teacher.tname = request.form['tname']
teacher.tage = request.form['tage']
teacher.course_id = request.form['course_id']
db.session.add(teacher)
return redirect('/05-showTea')
@app.route('/05-showTea')
def showTea():
teachers = Teacher.query.all()
return render_template('05-showTea.html',teachers = teachers)
#通过course获取teachers
#通过teacher获取course
@app.route('/10-getcourse')
def getcourse_views():
#通过course获取teachers
#1.获取id为1 的course的信息
# course = Course.query.filter_by(id = 1).first()
# #2.获取关联的teachers
# teaList = course.teachers.all()
# for tea in teaList:
# print('姓名:%s,年龄:%d' % (tea.tname,tea.tage))
# return '获取数据成功'
#通过teacher得到对应的course
#1.获取id为1的老师的信息
teacher = Teacher.query.filter_by(id = 1).first()
print('教师姓名:',teacher.tname)
#2.再通过反向引用查找对应的course
course = teacher.course
print('教授课程:',course.cname)
return '数据获取成功'
@app.route('/11-showteachers')
def showteacher_views():
courses = Course.query.all()
#1.判断是否有参数,如果没有参数或参数为-1 则查询所有的老师们的信息
if 'id' not in request.args or request.args['id'] == "-1":
teachers = Teacher.query.all()
else:
id = request.args['id']
course = Course.query.filter_by(id = id).first()
teachers = course.teachers.all()
return render_template('11-showteachers.html',params = locals())
@app.route('/12-addwife')
def addwife_views():
#1.通过外键属性user_id关联user与wife
# wife = Wife()
# wife.wname = 'WC夫人'
# wife.wage = 38
# wife.user_id = 6
# db.session.add(wife)
#2.通过反向引用关系属性关联user与wife
user = User.query.filter_by(id=7).first()
wife = Wife()
wife.wname = '王夫人'
wife.wage = 46
wife.user = user
db.session.add(wife)
return '添加成功'
@app.route('/13-queryuser')
def query_views():
#判断请求中是否包含参数-uname
if 'uname' in request.args:
#获取参数
uname = request.args['uname']
#按照参数构造条件并查询数据
users = User.query.filter(User.uname.like('%'+uname+'%')).all()
else:
users = User.query.all()
return render_template('13-queryuser.html',params=locals())
@app.route('/14-regstudent')
def regstudent_views():
#查询id为1 的Teacher的信息
tea = Teacher.query.filter_by(id=1).first()
#查询id为1 的Student的信息
stu = Student.query.filter_by(id=1).first()
tea.students.append(stu)
return '增加关联数据成功'
@app.route('/15-queryteacher')
def queryteacher_views():
#查询0id为1的老师对应学生的信息
tea = Teacher.query.filter_by(id = 1).first()
students = tea.students.all()
for stu in students:
print('姓名:%s,年龄:%s' % (stu.sname,stu.sage))
return '查询成功'
if __name__ == '__main__':
#通过manager管理启动程序
manager.run() | [
"[email protected]"
] | |
cc7446671e306d6a6e487bb6ec3c82bb71b00587 | b7125b27e564d2cc80a2ce8d0a6f934aa22c8445 | /.history/display_board_20201103142917.py | 8c63f44d74fa4ec5b0c7cf268870c0b69d958f52 | [] | no_license | JensVL96/Puzzle-solver-for-fun | 4c15dcd570c3705b7ac555efb56b52913e81083c | 6d8a4378a480372213a596a336a4deca727a00fc | refs/heads/master | 2021-07-15T05:19:42.185495 | 2020-11-08T13:59:49 | 2020-11-08T13:59:49 | 224,855,888 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,433 | py | from config import *
import pygame as pg
class Display_board():
def __init__(self, screen):
self.screen = screen
self.font_num = pg.font.SysFont("comicsans", NUMBER_SIZE)
self.font_cdt = pg.font.SysFont("comicsans", CANDIDATE_SIZE)
def draw_val(self, val, x, y):
text1 = self.font_num.render(str(val), 1, BLACK)
self.screen.blit(text1, (x * BLOCK_SIZE + 15, y * BLOCK_SIZE + 15))
def draw_cdt(self, val, x, y):
text1 = self.font_cdt.render(str(val), 1, BLACK)
self.screen.blit(text1, (x * BLOCK_SIZE + 1, y * BLOCK_SIZE + 1))
def on_mouse_press(self, x, y, symbol, modifier):
pass
def draw(self, grid):
for i in range (9):
for j in range (9):
if grid[i][j] != 0:
if type(grid[i][j]) != int:
new_line = 1
iteration = 0
for number in grid[i][j]:
if iteration % 3 == 1:
print("pos: 3, 6, 9")
new_line += 1
iteration = 0
elif (iteration - 1) % 3 == 1:
print("pos: 1, 4, 7")
text1 = self.font_cdt.render(str(number), 1, BLACK)
self.screen.blit(text1, ((TOP_LX + i * BLOCK_SIZE) + 10, (TOP_LY + j * BLOCK_SIZE) + new_line * CELL_SIZE))
else:
text1 = self.font_cdt.render(str(number), 1, BLACK)
self.screen.blit(text1, ((TOP_LX + i * BLOCK_SIZE) + iteration * CELL_SIZE, (TOP_LY + j * BLOCK_SIZE) + new_line * CELL_SIZE))
iteration += 1
else:
text1 = self.font_num.render(str(grid[i][j]), 1, BLACK)
self.screen.blit(text1, (TOP_LX + i * BLOCK_SIZE + 15, TOP_LY + j * BLOCK_SIZE + 15))
for i in range(10):
if i %3 == 0:
thick = 7
else:
thick = 1
pg.draw.line(self.screen, BLACK, (TOP_LX,
TOP_LY + i * BLOCK_SIZE),
(TOP_RX,
TOP_RY + i * BLOCK_SIZE), thick)
pg.draw.line(self.screen, BLACK, (TOP_LX + i * BLOCK_SIZE,
TOP_LY),
(BOT_LX + i * BLOCK_SIZE,
BOT_LY), thick)
def update(self, grid, row, col, blk):
font_val = pg.font.SysFont("comicsans", BOLD)
if row != (-1, -1):
# Remove old number
text1 = self.font_num.render(str(grid[row[0]][row[1]]), 1, WHITE)
self.screen.blit(text1, (TOP_LX + row[0] * BLOCK_SIZE + 15, TOP_LY + row[1] * BLOCK_SIZE + 15))
# Rewrite in bigger font
text1 = font_val.render(str(grid[row[0]][row[1]]), 1, BLACK)
self.screen.blit(text1, (TOP_LX + row[0] * BLOCK_SIZE + 14, TOP_LY + row[1] * BLOCK_SIZE + 10))
if col != (-1, -1):
# Remove old number
text1 = self.font_num.render(str(grid[col[0]][col[1]]), 1, WHITE)
self.screen.blit(text1, (TOP_LX + col[0] * BLOCK_SIZE + 15, TOP_LY + col[1] * BLOCK_SIZE + 15))
# Rewrite in bigger font
text1 = font_val.render(str(grid[col[0]][col[1]]), 1, BLACK)
self.screen.blit(text1, (TOP_LX + col[0] * BLOCK_SIZE + 14, TOP_LY + col[1] * BLOCK_SIZE + 10))
if blk != (-1, -1):
# Remove old number
text1 = self.font_num.render(str(grid[blk[0]][blk[1]]), 1, WHITE)
self.screen.blit(text1, (TOP_LX + blk[0] * BLOCK_SIZE + 15, TOP_LY + blk[1] * BLOCK_SIZE + 15))
# Rewrite in bigger font
text1 = font_val.render(str(grid[blk[0]][blk[1]]), 1, BLACK)
self.screen.blit(text1, (TOP_LX + blk[0] * BLOCK_SIZE + 14, TOP_LY + blk[1] * BLOCK_SIZE + 10))
def find_cell(self, x, y):
# Only applies glow when a cell is selected
if x == -1 and y == -1:
return
width = BLOCK_SIZE
height = BLOCK_SIZE
# Adjustment in size if bordering a thick line
if x % 3 == 0: # If thick line on the left
start_pos_x = TOP_LX + x * BLOCK_SIZE + 4
width = BLOCK_SIZE - 4
else:
start_pos_x = TOP_LX + x * BLOCK_SIZE + 1
if (x + 1) % 3 == 0: # If thick line on the right
width = BLOCK_SIZE - 3.5
if y % 3 == 0: # If thick line on the top
start_pos_y = TOP_LY + y * BLOCK_SIZE + 4
height = BLOCK_SIZE - 4
else:
start_pos_y = TOP_LY + y * BLOCK_SIZE + 1
if (y + 1) % 3 == 0: # If thick line on the bottom
height = BLOCK_SIZE - 3.5
return (start_pos_x, start_pos_y, width, height)
def blink(self, alpha, a_change):
if a_change:
alpha += BLINK_SPEED
if alpha >= 175:
a_change = False
elif a_change == False:
alpha += -BLINK_SPEED
if alpha <= 30:
a_change = True
return (alpha, a_change)
| [
"[email protected]"
] | |
c695f796fffd594178ab5a19127ff197690acb7c | 33836016ea99776d31f7ad8f2140c39f7b43b5fe | /fip_collab/2016_08_31_polycrystal_FIP/plot_err_v_pc.py | ba4dcd807183302d80eb21fd3d85c224651fb99c | [] | no_license | earthexploration/MKS-Experimentation | 92a2aea83e041bfe741048d662d28ff593077551 | 9b9ff3b468767b235e7c4884b0ed56c127328a5f | refs/heads/master | 2023-03-17T23:11:11.313693 | 2017-04-24T19:24:35 | 2017-04-24T19:24:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,561 | py | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from constants import const
from matplotlib.ticker import AutoMinorLocator
import h5py
def plterr(par, upbnd, deg, Tvec, Hvec):
C = const()
colormat = cm.plasma(np.linspace(0, .8, len(Hvec)))
# colormat = cm.rainbow(np.linspace(0, .9, len(Hvec)))
linemat = ['-', '--', ':']
fig, ax = plt.subplots(figsize=[7, 5])
for ii in xrange(len(Hvec)):
for jj in xrange(len(Tvec)):
f = h5py.File("regression_results_L%s.hdf5" % Hvec[ii], 'r')
rlen = f.get('order_%s' % par).shape[0]
n_fac = f.get('Rpred_cal_%s' % par)[...].mean()
"""plot the prediction error versus number of pc"""
plotmat = np.zeros((rlen, 3))
plotmat[:, :2] = f.get('order_%s' % par)[...]
plotmat[:, 2] = f.get('meanerr_%s_%s' % (Tvec[jj], par))[...]
pc_range = len(np.unique(plotmat[:, 0]))
poly_range = len(np.unique(plotmat[:, 1]))
plotmat_ = plotmat.reshape((pc_range, poly_range, 3))
err = 100*plotmat_[..., deg-1, 2]/n_fac
if Tvec[jj] == 'cal':
label = 'calibration, L=%s'
elif Tvec[jj] == 'LOOCV':
label = 'LOOCV, L=%s'
elif Tvec[jj] == 'val':
label = 'validation, L=%s'
else:
label = 'L=%s'
plt.plot(np.arange(C['n_pc_max'])+1, err[:C['n_pc_max']],
marker='', markersize=8,
color=colormat[ii, :], alpha=0.7,
linestyle=linemat[jj], linewidth=2,
label=label % Hvec[ii])
f.close()
spc = np.int16(np.ceil(C['n_pc_max']/15.))
plt.xticks(np.arange(0, C['n_pc_max']+spc, spc))
minor_locator = AutoMinorLocator(2)
ax.xaxis.set_minor_locator(minor_locator)
plt.grid(linestyle='-', alpha=0.15)
plt.grid(which='minor', linestyle='-', alpha=0.2)
plt.axis([.5, C['n_pc_max']+.5, 0, upbnd])
plt.legend(loc='upper right', shadow=True, fontsize='small', ncol=3)
plt.xlabel("number of PCs")
plt.ylabel("mean error (%)")
plt.tight_layout()
typs = ''.join(Tvec)
fig_name = 'selection_%s_%s_npc%s.png' % (typs, par, C['n_pc_max'])
fig.canvas.set_window_title(fig_name)
plt.savefig(fig_name)
if __name__ == '__main__':
C = const()
par = "strength"
upbnd = 3
Tvec = ['cal']
Hvec = [6, 15, 41, 90]
plterr(C, par, upbnd, Tvec, Hvec)
plt.show()
| [
"[email protected]"
] | |
83ed30f264265a018a2915a912dc401fb61247ad | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2630/60636/251138.py | 6a6bbb79cd34380643c6e73cf9718a1721f76e69 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,007 | py | def location(source,a):
for i in len(source):
for j in len(source[0])-1:
if(source[i][j]==a):
return [i,j]
def find(source,i,j,alls):
if i==0 and j==0:
return min([source[0][1],source[1][0]])
elif i==0 and j==len(source[i])-1:
if(not location(alls,min([source[0][j-1],source[1][j]])) in alls):
return min([source[0][j-1],source[1,j]])
elif j==0 and i==len(source)-1:
if(not location(alls,min([source[i-1][0],source[i][1]])) in alls):
return min([source[i-1][0],source[i][1]])
elif i==0:
if(not location(alls,min([source[i][j-1],source[i][j+1],source[i+1][j]])) in alls):
return min([source[i][j-1],source[i][j+1],source[i+1][j]])
elif j==0:
if(not location(alls,min([source[i-1][j],source[i+1][j],source[i][j+1]])) in alls):
return min([source[i-1][j],source[i+1][j],source[i][j+1]])
elif i==len(source)-1:
if(not location(alls,min([source[i-1][j],source[i][j-1],source[i][j+1]])) in alls):
return min([source[i-1][j],source[i][j-1],source[i][j+1]])
elif j==len(source[0])-1:
if(not location(alls,min([source[i-1][j],source[i+1][j],source[i][j-1]])) in alls):
return min([source[i-1][j],source[i+1][j],source[i][j-1]])
else:
if(not location(alls,min([source[i-1][j],source[i+1][j],source[i][j-1],source[i][j+1]])) in alls):
return min([source[i-1][j],source[i+1][j],source[i][j-1],source[i][j+1]])
grid=eval(input())
alls=[]
alls.append([0,0])
result=[]
result.append(grid[0][0])
while(True):
possiable=[]
print(alls)
for a in alls:
possiable.append(find(grid,a[0],a[1],alls))
result.append(min(possiable))
for i in range(len(grid)):
for j in range(len(grid[0])):
if(grid[i][j] in possiable):
alls.append([i,j])
if(grid[len(grid)-1][len(grid[0])-1] in result):
break
print(resullt)
| [
"[email protected]"
] | |
1e3d6016c25d1b2a8f583b4da27b2234c064d97e | d08b0a2ea1365e96c2143a3076d6f1cfce178321 | /learnPython-master/news/ithome/demo.py | ab355ef2de9b7770e0ebab59189751875d274e5d | [] | no_license | xueyes/py3_study | f64060e5dbfcbf11c8d61de8561ce90bbb4e3c19 | a7d83b58ef95806f061f375952db604afe98bc13 | refs/heads/master | 2022-12-11T05:56:03.540612 | 2019-05-06T13:07:55 | 2019-05-06T13:07:55 | 162,883,421 | 1 | 0 | null | 2022-12-08T02:28:21 | 2018-12-23T11:02:31 | HTML | UTF-8 | Python | false | false | 381 | py | from lxml import etree
import requests
url = 'https://dyn.ithome.com/ithome/getajaxdata.aspx'
data = {
'newsID': '326707',
'hash': 'A379730C87B522EA',
'type': 'commentpage',
'page': '3',
'order': 'false',
}
html = requests.post(url, data=data).text
print(html)
# urls = 'https://dyn.ithome.com/comment/326707'
# html =requests.post(urls).text
# print(html)
| [
"[email protected]"
] | |
64e61e45e0a348bd5d50ae86d13b5f1dd0a34442 | 484a348682d9fa515666b94a5cd3a13b1b725a9e | /Data Structure/考试.py | 00a3ea41fd9be666a23b1b977479e92d1ce53653 | [] | no_license | joseph-mutu/Codes-of-Algorithms-and-Data-Structure | 1a73772825c3895419d86d6f1f506d58617f3ff0 | d62591683d0e2a14c72cdc64ae1a36532c3b33db | refs/heads/master | 2020-12-29T17:01:55.097518 | 2020-04-15T19:25:43 | 2020-04-15T19:25:43 | 238,677,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,332 | py | #include <stdio.h>
#include <stdlib.h>
using namespace std;
int N;
typedef struct BiTree
{
int Data;
struct BiTree* left, *right;
}BiTree;
BiTree* CreatTree(int data1[], int data2[], int start, int ends, int &ind)
{
int mid = data1[ind];
int i;
for(i = ends; i >= start; i--)
{
if(mid == data2[i])
break;
}
BiTree *root = (BiTree*)malloc(sizeof(BiTree));
root->Data = mid, root->right = NULL, root->left = NULL;
if(i + 1 <= ends)
{
root->right = CreatTree(data1, data2, i + 1, ends, --ind);
}
if(i - 1 >= start)
{
root->left = CreatTree(data1, data2, start, i - 1, --ind);
}
return root;
}
void Preorder(BiTree* key)
{
if(key)
{
printf(" %d", key->Data);
Preorder(key->left);
Preorder(key->right);
}
}
int main()
{
scanf("%d", &N);
int K1[100], K2[100];// K1 hou xu K2 zheng xu
int i;
for(i = 0; i < N; i++)
{
int n;
scanf("%d", &n);
K1[i] = n;
}
for(i = 0; i < N; i++)
{
int n;
scanf("%d", &n);
K2[i] = n;
}
BiTree *One;
int k = N - 1;
One = CreatTree(K1, K2, 0, N - 1, k);//建立二叉树
printf("Preorder:");
Preorder(One);//遍历二叉树
printf("\n");
return 0;
} | [
"[email protected]"
] | |
5b7db5c2f10832e93c05e4accf948b198fd38fc5 | 5c4515960dcbfd3861d06d90b8c9bde0bdf3ecf5 | /Iserlab/migrations/0044_auto_20170306_0956.py | 649dec546e754ed99d60b1d6b99ffde77ce3b431 | [] | no_license | Mathilda1992/mcysite | 66bb2f51de622b7f7c450664c798eb11ce195cae | def82e43474ecc734c6cbb26842bd87f698b2b88 | refs/heads/master | 2021-01-11T19:58:23.611196 | 2017-06-26T08:58:11 | 2017-06-26T08:58:11 | 79,434,975 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-03-06 09:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Iserlab', '0043_score_socre_time'),
]
operations = [
migrations.AlterField(
model_name='score',
name='score',
field=models.IntegerField(blank=True, null=True),
),
]
| [
"[email protected]"
] | |
ce924970e5c11232b741a9273bf79d4aec02ecf8 | 42ed6d4e67172522f79ab6f3c8cb650f4234be90 | /grpc/client/main.py | 55d9fa17b36fef38b4fffb7106dd8f1ebf9684f2 | [] | no_license | Hylan129/Self-Learning | 81a5143015850c33d5226c4da43d110150661dc7 | 06ccdc202f62629395900658909e1363a32632fd | refs/heads/master | 2020-06-12T19:45:50.479677 | 2020-05-26T15:38:26 | 2020-05-26T15:38:26 | 194,405,633 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import grpc
from example import data_pb2, data_pb2_grpc
_HOST = 'localhost'
_PORT = '8080'
def run():
conn = grpc.insecure_channel(_HOST + ':' + _PORT)
client = data_pb2_grpc.FormatDataStub(channel=conn)
response = client.DoFormat(data_pb2.Data(text='hello,world!'))
print("received: " + response.text)
if __name__ == '__main__':
run() | [
"[email protected]"
] | |
b4953030c41f9a1add974681fbeb2f552d932ccf | beb4d7c16ea8d8da9747b94298891cf01d9466f6 | /users/urls.py | c7b3b09544316033c571df99cedc218e2fa6a055 | [] | no_license | Chirag-Django/nonstopio_assignment | 48985445a19f8d01c1f0565e8058cd032942d903 | b1f1561e841857ea64d9a5170974704a347cc0e3 | refs/heads/master | 2023-03-05T07:05:49.963018 | 2021-02-20T18:35:29 | 2021-02-20T18:35:29 | 340,347,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | from django.urls import path
from . import views
from django.contrib.auth import views as auth_views
app_name='users'
urlpatterns = [
path('register/', views.register, name='register'),
path('update_profile/',views.update_profile,name='update_profile'),
path('login/', auth_views.LoginView.as_view(template_name='login.html'), name='login'),
path('logout/',auth_views.LogoutView.as_view(template_name='logout.html'),name='logout'),
] | [
"[email protected]"
] | |
ba0317152df2c5141cbc08cfba662a6ad4943890 | e7c1f3af60c030b97916e4f431172ebdbc07b2a9 | /django_mindscape/tests/test_dependencies.py | c02066d0898bbc0482536e51e8faff115d00fe83 | [] | no_license | podhmo/django-mindscape | d1df56c217750fee3cf3c24a49591bf98bbb6e89 | 2da92e155b7aaf465d631258f9799eb2e3d59671 | refs/heads/master | 2016-08-03T12:04:42.090545 | 2015-03-10T19:12:14 | 2015-03-10T19:12:14 | 31,464,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,128 | py | # -*- coding:utf-8 -*-
import unittest
from evilunit import test_target
@test_target("django_mindscape:Walker")
class ForeignKeyTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
from django.db import models
class Group(models.Model):
name = models.CharField(max_length=255, null=False, default="")
class Meta:
app_label = "myapp"
class Member(models.Model):
group = models.ForeignKey(Group)
name = models.CharField(max_length=255, null=False, default="")
class Meta:
app_label = "myapp"
cls.Group = Group
cls.Member = Member
def _get_models(self):
return [self.Group, self.Member]
def test_dependecies__member(self):
walker = self._makeOne(self._get_models())
walker.walkall()
member_dependencies = [node.to.model for node in walker[self.Member].dependencies]
self.assertEqual(member_dependencies, [self.Group])
def test_dependecies__group(self):
walker = self._makeOne(self._get_models())
walker.walkall()
self.assertEqual(walker[self.Group].dependencies, [])
def test_relation_type(self):
walker = self._makeOne(self._get_models())
walker.walkall()
reltype = walker[self.Member].dependencies[0].type
self.assertEqual(reltype, "M1")
def test_relation_name(self):
walker = self._makeOne(self._get_models())
walker.walkall()
name = walker[self.Member].dependencies[0].name
self.assertEqual(name, "group")
def test_relation_backref(self):
walker = self._makeOne(self._get_models())
walker.walkall()
backref = walker[self.Member].dependencies[0].backref
self.assertEqual(backref, "member_set")
@test_target("django_mindscape:Walker")
class RelatedNameTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
from django.db import models
class Group(models.Model):
name = models.CharField(max_length=255, null=False, default="")
class Meta:
app_label = "myappp"
class Member(models.Model):
group = models.ForeignKey(Group, related_name="+")
name = models.CharField(max_length=255, null=False, default="")
class Meta:
app_label = "myappp"
cls.Group = Group
cls.Member = Member
def _get_models(self):
return [self.Group, self.Member]
def test_relation_name(self):
walker = self._makeOne(self._get_models())
walker.walkall()
name = walker[self.Member].dependencies[0].name
self.assertEqual(name, "group")
def test_relation_backref(self):
walker = self._makeOne(self._get_models())
walker.walkall()
backref = walker[self.Member].dependencies[0].backref
self.assertEqual(backref, None)
@test_target("django_mindscape:Walker")
class ManyToManyTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
from django.db import models
class Group(models.Model):
name = models.CharField(max_length=255, null=False, default="")
class Meta:
app_label = "myapp2"
class Member(models.Model):
group_set = models.ManyToManyField(Group, through="GroupToMember")
name = models.CharField(max_length=255, null=False, default="")
class Meta:
app_label = "myapp2"
class GroupToMember(models.Model):
member = models.ForeignKey(Member)
group = models.ForeignKey(Group)
class Meta:
app_label = "myapp2"
cls.Group = Group
cls.Member = Member
cls.GroupToMember = GroupToMember
def _get_models(self):
return [self.Group, self.Member]
def test_relation_type(self):
walker = self._makeOne(self._get_models())
walker.walkall()
reltype = walker[self.Member].dependencies[0].type
self.assertEqual(reltype, "MM")
def test_relation_name(self):
walker = self._makeOne(self._get_models())
walker.walkall()
name = walker[self.Member].dependencies[0].name
self.assertEqual(name, "group_set")
def test_relation_backref(self):
walker = self._makeOne(self._get_models())
walker.walkall()
backref = walker[self.Member].dependencies[0].backref
self.assertEqual(backref, "member_set")
def test_relation_through(self):
walker = self._makeOne(self._get_models())
walker.walkall()
through = walker[self.Member].dependencies[0].through
self.assertEqual(through.model, self.GroupToMember)
@test_target("django_mindscape:Walker")
class OneToOneTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
from django.db import models
class Group(models.Model):
name = models.CharField(max_length=255, null=False, default="")
class Meta:
app_label = "myapp3"
class Member(models.Model):
group = models.OneToOneField(Group)
name = models.CharField(max_length=255, null=False, default="")
class Meta:
app_label = "myapp3"
cls.Group = Group
cls.Member = Member
def _get_models(self):
return [self.Group, self.Member]
def test_relation_type(self):
walker = self._makeOne(self._get_models())
walker.walkall()
reltype = walker[self.Member].dependencies[0].type
self.assertEqual(reltype, "11")
def test_relation_name(self):
walker = self._makeOne(self._get_models())
walker.walkall()
name = walker[self.Member].dependencies[0].name
self.assertEqual(name, "group")
def test_relation_backref(self):
walker = self._makeOne(self._get_models())
walker.walkall()
backref = walker[self.Member].dependencies[0].backref
self.assertEqual(backref, "member")
| [
"[email protected]"
] | |
aef95b48318caf1b38584776aca1d4f9936f4928 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_2_1_neat/16_2_1_Math_getting_digit.py | ba10364f80fe35b453998f9f7dfea738adce6858 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,233 | py | #!/usr/bin/python3
import sys
def remove_letters(letters, word, ref_letter):
if (ref_letter in letters and letters[ref_letter] > 0):
number = letters[ref_letter]
for ll in word:
letters[ll] -= number
return(number)
else:
return(0)
def solve_number(SS, case):
letters = dict()
for ll in SS:
if (not ll in letters):
letters[ll] = 1
else:
letters[ll] += 1
numbers = [0 for ii in range(10)]
numbers[0] = remove_letters(letters, 'ZERO', 'Z')
numbers[2] = remove_letters(letters, 'TWO', 'W')
numbers[8] = remove_letters(letters, 'HEIGHT', 'G')
numbers[6] = remove_letters(letters, 'SIX', 'X')
numbers[7] = remove_letters(letters, 'SEVEN', 'S')
numbers[5] = remove_letters(letters, 'FIVE', 'V')
numbers[4] = remove_letters(letters, 'FOUR', 'F')
numbers[9] = remove_letters(letters, 'NINE', 'I')
numbers[1] = remove_letters(letters, 'ONE', 'N')
numbers[3] = remove_letters(letters, 'THREE', 'T')
print("Case #" + str(case) + ": " + ''.join([ str(ii) for ii in range(10) for jj in range(numbers[ii]) ]))
TT = int(input())
for ii in range(TT):
SS = input()
solve_number(SS, ii+1)
| [
"[[email protected]]"
] | |
7458e345152467d791ea25abac7dba7b3fe448a1 | bc6ebddfd13b36d5e394f7b5eb15444f7467f5e5 | /sundial/utils.py | 2670fc6d9ad15404fea7729d884697854ba0c8d8 | [
"MIT"
] | permissive | Surgo/django-sundial | 28bbe8da723ca45fe926750e78ae568b1c5f0092 | e6a3f69d61a49a5d7ae2b053cdd79289e11a8a73 | refs/heads/master | 2021-01-17T06:09:01.327135 | 2015-03-12T09:13:12 | 2015-03-12T09:13:12 | 32,067,215 | 0 | 0 | null | 2015-03-12T09:01:48 | 2015-03-12T09:01:48 | Python | UTF-8 | Python | false | false | 709 | py | from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.conf import settings
from django.utils.text import force_text
from django.utils.translation import ugettext_lazy as _
import pytz
TIMEZONE_SESSION_KEY = getattr(
settings, 'SUNDIAL_TIMEZONE_SESSION_KEY', '_timezone'
)
def set_session_timezone(session, zone):
session[TIMEZONE_SESSION_KEY] = force_text(zone)
def get_session_timezone(session):
return session.get(TIMEZONE_SESSION_KEY)
def coerce_timezone(zone):
try:
return pytz.timezone(zone)
except pytz.UnknownTimeZoneError:
raise ValidationError(
_('Unknown timezone.'), code='invalid'
)
| [
"[email protected]"
] | |
27cfac09bd2fc5432cc9463f81553242ffb3e67f | 0de5fbd2c992388d572bfb4c114f82741a351b8e | /0x06-Basic_authentication/api/v1/auth/auth.py | 55326846343b39f19a21e2e490782c8f2570f96d | [] | no_license | veromejia/holbertonschool-web_back_end | 3fac1ad905d02bb6d351092210d005a211053df4 | 44258bb53104da95c6df8a69dd066233cb0daae0 | refs/heads/main | 2023-02-10T01:49:31.439587 | 2021-01-05T00:44:46 | 2021-01-05T00:44:46 | 305,465,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | #!/usr/bin/env python3
""" Define Auth class """
from flask import request
from typing import List, TypeVar
class Auth:
"""Auth Class that handle public methods"""
def require_auth(self, path: str, excluded_paths: List[str]) -> bool:
"""Check if path requires authentication"""
if path is None or excluded_paths is None:
return True
if path[-1] is not '/':
path += '/'
wildcards = [p[:-1] for p in excluded_paths if p[-1] == '*']
for p in wildcards:
if path.startswith(p):
return False
return False if path in excluded_paths else True
def authorization_header(self, request=None) -> str:
"""Check if request is authorized"""
if request is None or 'Authorization' not in request.headers:
return None
return request.headers.get('Authorization')
def current_user(self, request=None) -> TypeVar('User'):
""" Return current user """
| [
"[email protected]"
] | |
626e573ed0bbb8e22c948eeb92c9e96d8ffb4782 | be6e6d8af85adf044bf79676b7276c252407e010 | /spec/construct/test_switch_manual_enum_invalid.py | 66cd2a7da5b985e6bb318912bb2ad2cc7e62bf3c | [
"MIT"
] | permissive | kaitai-io/kaitai_struct_tests | 516e864d29d1eccc5fe0360d1b111af7a5d3ad2b | 3d8a6c00c6bac81ac26cf1a87ca84ec54bf1078d | refs/heads/master | 2023-08-19T19:42:47.281953 | 2023-08-04T20:26:50 | 2023-08-04T20:26:50 | 52,155,797 | 12 | 41 | MIT | 2023-07-30T23:30:30 | 2016-02-20T13:55:39 | Ruby | UTF-8 | Python | false | false | 531 | py | # Autogenerated from KST: please remove this line if doing any edits by hand!
import unittest
from switch_manual_enum_invalid import _schema
class TestSwitchManualEnumInvalid(unittest.TestCase):
def test_switch_manual_enum_invalid(self):
r = _schema.parse_file('src/enum_negative.bin')
self.assertEqual(len(r.opcodes), 2)
self.assertEqual(r.opcodes[0].code, 255)
self.assertIsNone(r.opcodes[0].body)
self.assertEqual(r.opcodes[1].code, 1)
self.assertIsNone(r.opcodes[1].body)
| [
"[email protected]"
] | |
f71da1fd302633769fc6a1663d9ca71769b093c6 | eddbfe4eb1aa3052cb8d03097cca2673ae207ec0 | /books/migrations/0003_auto_20210429_1839.py | 63f82e9831d65f0c47c2a14623a7f4bae6ac1eab | [] | no_license | luiz158/CRUD_Django | f40d630f09916a0cd2d06a076415873122c02098 | 852242b34c7ff4781c9704df032865e83ded9bf3 | refs/heads/master | 2023-07-02T08:58:45.637229 | 2021-08-04T21:57:03 | 2021-08-04T21:57:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | # Generated by Django 3.2 on 2021-04-29 18:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('books', '0002_alter_book_date_inclusion'),
]
operations = [
migrations.RemoveField(
model_name='book',
name='author',
),
migrations.AddField(
model_name='book',
name='author',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='books.author'),
),
]
| [
"[email protected]"
] | |
4efd9e8caa0fa82f83d43e5d2702eae94ce0a814 | 2db7597686f33a0d700f7082e15fa41f830a45f0 | /Python/BinaryTree/1448. 统计二叉树中好节点的数目.py | f1622e85dcf75ab740d30faccdc8934d48e9b3c6 | [] | no_license | Leahxuliu/Data-Structure-And-Algorithm | 04e0fc80cd3bb742348fd521a62bc2126879a70e | 56047a5058c6a20b356ab20e52eacb425ad45762 | refs/heads/master | 2021-07-12T23:54:17.785533 | 2021-05-17T02:04:41 | 2021-05-17T02:04:41 | 246,514,421 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | '''
1448. 统计二叉树中好节点的数目
给你一棵根为 root 的二叉树,请你返回二叉树中好节点的数目。
「好节点」X 定义为:从根到该节点 X 所经过的节点中,没有任何节点的值大于 X 的值。
'''
# Definition for a BT node
class TreeNode:
def __init__(self, x = 0):
self.val = x
self.left = None
self.right = None
def goodNodes(root):
if root == None:
return 0
def find(root, maxVal):
if root == None:
return
next_maxVal = maxVal
if root.val >= maxVal:
self.res += 1
next_maxVal = root.val
find(root.left, next_maxVal)
find(root.right, next_maxVal)
return
self.res = 0
find(root, float('-inf'))
return self.res
| [
"[email protected]"
] | |
3f9a50bca0bdd49ad832b81e37cc2acb7ef94337 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /aQWEQDoWiNbryG8xs_13.py | 74b473ad6cce9d4cfa59205686035d2b90514e34 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | """
Create a function that takes a number `num` and returns the first 10 multiples
of `num` with 1 added to it, separated by commas.
### Examples
n_tables_plus_one(7) ➞ "8,15,22,29,36,43,50,57,64,71"
n_tables_plus_one(1) ➞ "2,3,4,5,6,7,8,9,10,11"
n_tables_plus_one(3) ➞ "4,7,10,13,16,19,22,25,28,31"
### Notes
There is no comma after the last number.
"""
def n_tables_plus_one(num):
Final = ''
for item in range(1,10+1):
Final += str((num*item)+1)+','
return Final[:-1]
| [
"[email protected]"
] | |
6cb50044646a8ed42437fa9dd67f027154040955 | 241e8be8fbd310384e5669b64142cf173ecf692b | /mickey/admin.py | 65504f3ffe946c80f308fdd06f1d942f53fbe379 | [
"MIT"
] | permissive | raihanba13/mickey | a1e61cf71419b61acfc2240336c8a8cebf712ceb | 6a151145d11687760eae14749b7ee150c9d5a044 | refs/heads/master | 2022-06-10T03:22:18.869733 | 2020-05-07T11:31:20 | 2020-05-07T11:31:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,993 | py | from django.contrib import admin
from django.utils.html import format_html
from django.contrib import messages
from django.contrib.contenttypes.admin import GenericTabularInline
from mickey.models import *
from mickey.widgets import *
import logging
logger = logging.getLogger(__name__)
# Register your models here.
def get_message_bit(rows_updated, model_name):
if model_name == "category":
message_bit = "1 category was" if rows_updated == 1 else "%s categories were" % rows_updated
elif model_name == "post":
message_bit = "1 post was" if rows_updated == 1 else "%s posts were" % rows_updated
elif model_name == "tag":
message_bit = "1 tag was" if rows_updated == 1 else "%s tags were" % rows_updated
elif model_name == "comment":
message_bit = "1 comment was" if rows_updated == 1 else "%s comment were" % rows_updated
return message_bit
class PostAdminForm(forms.ModelForm):
model = Post
class Meta:
fields = '__all__'
widgets = {
'content': HtmlEditor(attrs={'style': 'width: 90%; height: 100%;'}),
'short_content': HtmlEditor(attrs={'style': 'width: 90%; height: 100%;'}),
}
@admin.register(Media)
class MediaAdmin(admin.ModelAdmin):
list_display = ("id", "image","md_image","sm_image", "created_by", "created_at")
fieldsets = (
("Required Information", {
"description": "These fields are required for each Media",
"fields": (
('image', 'image_tag'),
),
}),
("Optional Information", {
'classes': ('collapse',),
'fields': (
('md_image','sm_image'),
)
})
)
readonly_fields = ('image_tag',)
def image_tag(self, obj):
logger.critical(obj.image.url)
return format_html('<img src="{}" width="160" height="135"/>'.format(obj.image.url))
image_tag.short_description = 'Image'
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
fields = (('parent','name'),'active')
list_display = ("id", "name", "parent", "active" ,"created_by")
actions = ['make_category_active', 'make_category_deactivate']
def make_category_active(self, request, queryset):
rows_updated = queryset.update(active=True)
message_bit = "1 category was" if rows_updated == 1 else "%s categories were" % rows_updated
self.message_user(request, "%s activated successfully." % message_bit)
def make_category_deactivate(self, request, queryset):
rows_updated = queryset.update(active=False)
message_bit = "1 category was" if rows_updated == 1 else "%s categories were" % rows_updated
self.message_user(request, "%s deactivated successfully." % message_bit)
make_category_active.short_description = "Active selected categories"
make_category_deactivate.short_description = "Deactivate selected categories"
@admin.register(Tag)
class TagAdmin(admin.ModelAdmin):
list_display = ("id", "name", "created_by")
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
form = PostAdminForm
list_display = ("title", "category", "published", "archive", "created_by", "created_at")
search_fields = ['title','category__name','published']
list_filter = ('category__name', 'published', 'archive','created_at')
fieldsets = (
("Required Information", {
"description": "These fields are required for each post",
"fields": (
('category', 'title'), ('content',), ('tags',)
),
}),
("Optional Information", {
'classes': ('collapse',),
'fields': (
('cover_image','image_tag'),
('published','archive',),
('short_content',),
('slug',)
)
})
)
actions = ['make_archive','remove_archive','publish_post','unpublish_post']
readonly_fields = ('image_tag',)
def make_archive(self, request, queryset):
rows_updated = queryset.update(archive=True)
self.message_user(request, "%s archived successfully." % get_message_bit(rows_updated,'post'))
def remove_archive(self, request, queryset):
rows_updated = queryset.update(archive=False)
self.message_user(request, "%s published from archive successfully." % get_message_bit(rows_updated,'post'))
def unpublish_post(self, request, queryset):
rows_updated = queryset.update(published=False)
self.message_user(request, "%s unpublished successfully." % get_message_bit(rows_updated,'post'))
def publish_post(self, request, queryset):
rows_updated = queryset.update(published=True)
self.message_user(request, "%s published successfully." % get_message_bit(rows_updated,'post'))
def image_tag(self, obj):
try:
return format_html('<img src="{}" width="160" height="135"/>'.format(obj.cover_image.image.url))
except:
return ""
image_tag.short_description = 'Post Image'
make_archive.short_description = "Archive selected post"
remove_archive.short_description = "Publish selected post from archive"
publish_post.short_description = "Publish selected post"
unpublish_post.short_description = "Unpublish selected post"
@admin.register(React)
class ReactAdmin(admin.ModelAdmin):
list_display = ("id","blog", "type", "amount",)
search_fields = ['blog__title', 'type']
list_filter = ('blog', 'type',)
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ("id","post", "parent", "name", "active", "created_at")
fields = (('post','parent'),('name', ), 'body', ('active',))
list_filter = ('post', 'name', 'active')
actions = ['make_activate','make_deactivate',]
def make_activate(self, request, queryset):
rows_updated = queryset.update(active=True)
self.message_user(request, "%s activated successfully." % get_message_bit(rows_updated,'comment'))
def make_deactivate(self, request, queryset):
rows_updated = queryset.update(active=False)
self.message_user(request, "%s deactivated successfully." % get_message_bit(rows_updated,'comment'))
make_activate.short_description = "Active selected comments"
make_deactivate.short_description = "Deactivate selected comments"
@admin.register(SiteInformation)
class DJSiteAdmin(admin.ModelAdmin):
list_display = ("title", "tagline", "created_by")
fieldsets = (
("Required Information", {
"description": "These fields are required for DJ Site Information",
"fields": (
('title', ),
),
}),
("Optional Information", {
'classes': ('collapse',),
'fields': (
('header_title','tagline',),
'footer_text'
)
})
)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.